metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "Jeremy-Alekai/DigiPay-App-in-Python",
"score": 3
}
|
#### File: DigiPay-App-in-Python/application/algod.py
```python
from algosdk import account, mnemonic
from algosdk.constants import microalgos_to_algos_ratio
from algosdk.future.transaction import PaymentTxn, AssetConfigTxn
from algosdk.v2client import algod
def algod_client():
"""Initialise and return an algod client"""
algod_address = "https://testnet-algorand.api.purestake.io/ps2"
# FIXME: Put your API key in
algod_token = "YOUR API KEY GOES HERE"
headers = {
"X-API-Key": algod_token,
}
return algod.AlgodClient(algod_token, algod_address, headers)
def create_account():
"""Create account and return its mnemonic"""
private_key, address = account.generate_account()
return mnemonic.from_private_key(private_key)
def get_balance(address):
"""Returns the given address balance in algos converted from microalgos"""
account_info = algod_client().account_info(address)
balance = account_info.get('amount') / microalgos_to_algos_ratio
return balance
def send_txn(sender, quantity, receiver, note, sk):
"""Create and sign a transaction. Quantity is assumed to be in algorands, not microalgos"""
quantity = int(quantity * microalgos_to_algos_ratio)
params = algod_client().suggested_params()
note = note.encode()
try:
unsigned_txn = PaymentTxn(sender, params, receiver, quantity, None, note)
except Exception as err:
print(err)
return False
signed_txn = unsigned_txn.sign(sk)
try:
txid = algod_client().send_transaction(signed_txn)
except Exception as err:
print(err)
return False
# wait for confirmation
try:
wait_for_confirmation(txid, 4)
return True
except Exception as err:
print(err)
return False
# utility for waiting on a transaction confirmation
def wait_for_confirmation(transaction_id, timeout):
"""
Wait until the transaction is confirmed or rejected, or until 'timeout'
number of rounds have passed.
Args:
transaction_id (str): the transaction to wait for
timeout (int): maximum number of rounds to wait
Returns:
dict: pending transaction information, or throws an error if the transaction
is not confirmed or rejected in the next timeout rounds
"""
start_round = algod_client().status()["last-round"] + 1
current_round = start_round
while current_round < start_round + timeout:
try:
pending_txn = algod_client().pending_transaction_info(transaction_id)
except Exception as err:
print(err)
return
if pending_txn.get("confirmed-round", 0) > 0:
return pending_txn
elif pending_txn["pool-error"]:
raise Exception(
'pool error: {}'.format(pending_txn["pool-error"]))
algod_client().status_after_block(current_round)
current_round += 1
raise Exception(
'pending tx not found in timeout rounds, timeout value = : {}'.format(timeout))
def create_asset(
creator,
asset_name,
unit_name,
total,
decimals,
default_frozen,
url,
sk
):
"""Creates an asset, returns the newly created asset ID"""
params = algod_client().suggested_params()
txn = AssetConfigTxn(
sender=creator,
sp=params,
total=total,
default_frozen=default_frozen,
unit_name=unit_name,
asset_name=asset_name,
manager=creator,
reserve=creator,
freeze=creator,
clawback=creator,
url=url,
decimals=decimals)
# Sign with secret key of creator
stxn = txn.sign(sk)
# Send the transaction to the network and retrieve the txid.
txid = algod_client().send_transaction(stxn)
try:
wait_for_confirmation(txid, 4)
except Exception as err:
print(err)
return None
try:
ptx = algod_client().pending_transaction_info(txid)
asset_id = ptx["asset-index"]
return asset_id
except Exception as err:
print(err)
return None
```
#### File: DigiPay-App-in-Python/application/indexer.py
```python
from algosdk.constants import microalgos_to_algos_ratio
from algosdk.v2client import indexer
def myindexer():
"""Initialise and return an indexer"""
algod_address = "https://testnet-algorand.api.purestake.io/idx2"
# FIXME: Put your API key in
algod_token = "YOUR API KEY GOES HERE"
headers = {
"X-API-Key": algod_token,
}
return indexer.IndexerClient("", algod_address, headers)
def get_transactions(address, substring):
"""Returns a list of transactions related to the given address"""
response = myindexer().search_transactions(address=address, txn_type="pay")
txns = []
for txn in response["transactions"]:
sender = txn["sender"]
fee = txn["fee"]
amount = txn["payment-transaction"]["amount"]
if sender == address:
# if the current account is the sender, add fee and display transaction as negative
amount += fee
amount *= -1
other_address = txn["payment-transaction"]["receiver"]
else:
other_address = sender
amount /= microalgos_to_algos_ratio
# check for searched address
if substring not in other_address:
continue
txns.append({"amount": amount, "address": other_address})
return txns
def get_assets(address, name):
"""Returns a list of assets that have been created by the given address"""
response = myindexer().search_assets(creator=address, name=name)
assets = response["assets"]
return assets
```
#### File: DigiPay-App-in-Python/application/__init__.py
```python
from flask import Flask
from . import auth
from . import views
def create_app():
app = Flask(__name__)
app.config["SECRET_KEY"] = "put any long random string here"
auth.login_manager.init_app(app)
app.register_blueprint(views.main_bp)
app.register_blueprint(auth.auth_bp)
return app
```
#### File: DigiPay-App-in-Python/application/models.py
```python
from algosdk import mnemonic
from flask_login import UserMixin
from .algod import get_balance, send_txn, create_asset
from .indexer import get_transactions, get_assets
class User(UserMixin):
"""User account model"""
def __init__(self, passphrase):
"""Creates a user using the 25-word mnemonic"""
self.passphrase = passphrase
@property
def id(self):
"""Returns private key from mnemonic"""
return mnemonic.to_private_key(self.passphrase)
@property
def public_key(self):
"""Returns public key from mnemonic. This is the same as the user's address"""
return mnemonic.to_public_key(self.passphrase)
def get_balance(self):
"""Returns user balance, in algos"""
return get_balance(self.public_key)
def send(self, quantity, receiver, note):
"""Returns True for a succesful transaction. Quantity is given in algos"""
return send_txn(self.public_key, quantity, receiver, note, self.id)
def create(
self,
asset_name,
unit_name,
total,
decimals,
default_frozen,
url
):
"""Creates an asset, with the user as the creator"""
return create_asset(
self.public_key,
asset_name,
unit_name,
total,
decimals,
default_frozen,
url,
self.id
)
def get_transactions(self, substring):
"""Returns a list of the user's transactions"""
return get_transactions(self.public_key, substring)
def get_assets(self, name):
"""Returns a list of the user's assets"""
return get_assets(self.public_key, name)
```
|
{
"source": "JeremyAndress/cobnut",
"score": 3
}
|
#### File: cobnut/tests/test_exceptions.py
```python
from unittest import TestCase
from arsene import Arsene
from arsene.exceptions import ValidationBroker
class arseneTestCase(TestCase):
def test_without_broker(self):
with self.assertRaises(ValidationBroker) as context:
Arsene()
self.assertTrue('Need a broker to use Arsene' in context.exception)
```
|
{
"source": "JeremyAndress/fastapi-auth2",
"score": 2
}
|
#### File: alembic/versions/c0b790895c18_add_user_and_role_models.py
```python
from alembic import op
import sqlalchemy as sa
from core.role import ROLE
# revision identifiers, used by Alembic.
revision = 'c0b790895c18'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
role_table = op.create_table(
'rol',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=30), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table(
'user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=25), nullable=True),
sa.Column('email', sa.String(length=255), nullable=False),
sa.Column('password', sa.String(length=255), nullable=True),
sa.Column('rol_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['rol_id'], ['rol.id'], onupdate='CASCADE', ondelete='RESTRICT'),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=True)
op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)
op.bulk_insert(
role_table,
[
{'id': 1, 'name': ROLE.ADMIN.value},
{'id': 2, 'name': ROLE.BASIC.value}
]
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_user_username'), table_name='user')
op.drop_index(op.f('ix_user_email'), table_name='user')
op.drop_table('user')
op.drop_table('rol')
# ### end Alembic commands ###
```
#### File: api_v1/role/role.py
```python
from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy.orm import Session
from schemas.user import UserCreate
from db.session import get_db
from api.deps import get_admin_user
from schemas.rol import ListRol, RolBase, Rol
from .service import role_service
router = APIRouter()
# Document
@router.get('/role/{id}', response_model=Rol)
def get_rol(
id: int,
db: Session = Depends(get_db),
current_user: UserCreate = Depends(get_admin_user)
):
return role_service.get(db, id)
@router.post('/role', response_model=Rol, status_code=201)
def create_role(
rol: RolBase,
db: Session = Depends(get_db),
current_user: UserCreate = Depends(get_admin_user)
):
try:
return role_service.create(db, obj_in=rol)
except Exception as e:
raise HTTPException(status_code=400, detail=f'{e}')
@router.put('/role', response_model=Rol)
def update_role(
rol: Rol,
db: Session = Depends(get_db),
current_user: UserCreate = Depends(get_admin_user)
):
try:
return role_service.update(db, obj_in=rol)
except Exception as e:
raise HTTPException(status_code=400, detail=f'{e}')
@router.delete('/role/{id}')
def delete_role(
id: int,
db: Session = Depends(get_db),
current_user: UserCreate = Depends(get_admin_user)
):
try:
return role_service.remove(db, id=id)
except Exception as e:
raise HTTPException(status_code=400, detail=f'{e}')
@router.get('/roles', response_model=ListRol)
def get_all_roles(
page: int,
db: Session = Depends(get_db),
current_user: UserCreate = Depends(get_admin_user)
):
return role_service.get_paginate(db, page=page)
```
#### File: tests/api/test_rol.py
```python
def test_create_role(client, super_user_token):
data = {
'name': 'new_role'
}
response = client.post(
'/api/v1/role', json=data, headers={
'token': super_user_token
}
)
assert response.status_code == 201, response.text
response_json = response.json()
expected_data = {'name': 'new_role', 'id': 3}
assert response_json == expected_data
def test_get_role(client, super_user_token):
response = client.get(
'/api/v1/role/1', headers={
'token': super_user_token
}
)
assert response.status_code == 200, response.text
response_json = response.json()
expected_data = {'name': 'ADMINISTRATOR', 'id': 1}
assert response_json == expected_data
def test_get_roles(client, super_user_token):
response = client.get(
'/api/v1/roles?page=1', headers={
'token': super_user_token
}
)
assert response.status_code == 200, response.text
response_json = response.json()
expected_data = {
'previous_page': None, 'next_page': None, 'total': 2,
'pages': 1, 'data': [
{'name': 'ADMINISTRATOR', 'id': 1},
{'name': 'BASIC', 'id': 2}
]
}
assert response_json == expected_data
def test_update_role(client, super_user_token):
data = {'name': 'new_name', 'id': 2}
response = client.put(
'/api/v1/role', json=data,
headers={
'token': super_user_token
}
)
assert response.status_code == 200, response.text
response_json = response.json()
assert response_json == data
def test_delete_role(client, super_user_token):
response = client.delete(
'/api/v1/role/2',
headers={
'token': <PASSWORD>
}
)
assert response.status_code == 200, response.text
response_json = response.json()
expected_data = {'name': 'BASIC', 'id': 2}
assert response_json == expected_data
```
#### File: tests/api/test_user.py
```python
def test_create_user(client, create_roles):
data = {
'username': 'test_user',
'password': '<PASSWORD>',
'email': '<EMAIL>',
'rol_id': 1
}
response = client.post(
'/api/v1/user', json=data
)
assert response.status_code == 201, response.text
response_json = response.json()
expected_data = {
'username': 'test_user',
'rol_id': 1, 'email': '<EMAIL>', 'id': 1
}
assert response_json == expected_data
def test_get_user(client, super_user_token):
response = client.get(
'/api/v1/user/1', headers={
'token': super_user_token
}
)
assert response.status_code == 200, response.text
response_json = response.json()
expected_data = {
'username': 'test_super_user', 'rol_id': 1,
'email': '<EMAIL>', 'id': 1
}
assert response_json == expected_data
def test_get_users(client, super_user_token):
response = client.get(
'/api/v1/users?page=1', headers={
'token': super_user_token
}
)
assert response.status_code == 200, response.text
response_json = response.json()
expected_data = {
'previous_page': None, 'next_page': None, 'total': 1,
'pages': 1, 'data': [
{
'username': 'test_super_user', 'rol_id': 1,
'email': '<EMAIL>', 'id': 1
}
]
}
assert response_json == expected_data
def test_update_user(client, super_user_token, create_basic_user):
data = {
'username': 'test_user',
'password': '<PASSWORD>',
'email': 'test<EMAIL>',
'rol_id': 1,
'id': 2
}
response = client.put(
'/api/v1/user', json=data,
headers={
'token': super_user_token
}
)
assert response.status_code == 200, response.text
response_json = response.json()
del data['password']
assert response_json == data
def test_delete_user(client, super_user_token, create_basic_user):
response = client.delete(
'/api/v1/user/2',
headers={
'token': super_user_token
}
)
assert response.status_code == 200, response.text
response_json = response.json()
expected_data = {
'username': 'test_user', 'rol_id': 2,
'email': '<EMAIL>', 'id': 2
}
assert response_json == expected_data
```
|
{
"source": "JeremyAndress/Flask-Celery",
"score": 2
}
|
#### File: src/app/routes.py
```python
from flask import Blueprint
from flask import request
from datetime import datetime
from utils.logging import logger
task_bp = Blueprint('task_bp', __name__)
@task_bp.route('/', methods=['GET'])
def index():
return 'Flask Celery Template!!!'
```
#### File: src/app/task.py
```python
from app import create_celery
from utils.logging import logger
celery_app = create_celery()
@celery_app.task(max_retries=3,time_limit=7200)
def periodic():
print('Hi! from periodic_task')
```
#### File: src/utils/logging.py
```python
import os
import uuid
import logging
from logging.handlers import RotatingFileHandler
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
LOG_FILENAME_INFO = BASE_DIR+'/logs/info.log'
logging.basicConfig(
handlers=[
logging.StreamHandler(),
RotatingFileHandler(LOG_FILENAME_INFO, maxBytes=20000, backupCount=10)
],
level=logging.INFO,
format= '[%(asctime)s] [%(pathname)s:%(lineno)d] [%(levelname)s] - %(message)s',
datefmt='%d/%m/%Y %H:%M:%S'
)
logger = logging.getLogger("launchpad")
def gene_extra(ms):
return {
'id':str(uuid.uuid4()),
'msisdn': ms
}
```
|
{
"source": "jeremyandrews/netgrasp",
"score": 2
}
|
#### File: netgrasp/utils/cli.py
```python
from netgrasp import netgrasp
from netgrasp.database import database
from netgrasp.utils import pretty
def start(ng):
import os
pid = ng.is_running()
if pid:
ng.debugger.critical("Netgrasp is already running with pid %d.", (pid,))
ng.debugger.warning("Starting netgrasp...")
if os.getuid() != 0:
ng.debugger.critical("netgrasp must be run as root (currently running as %s), exiting", (ng.debugger.whoami()))
netgrasp.netgrasp_instance = ng
# @TODO: use pcap to set and test interface
if not ng.listen["interface"]:
ng.debugger.critical("Required [Listen] 'interface' not defined in configuration file, exiting.")
if not ng.database["filename"]:
ng.debugger.critical("Required [Database] 'filename' not defined in configuration file, exiting.")
# Start netgrasp.
if ng.daemonize:
# Test that we can write to the log.
try:
with open(ng.logging["filename"], "w"):
ng.debugger.info("successfully opened logfile for writing")
except Exception as e:
ng.debugger.dump_exception("start() exception")
ng.debugger.critical("failed to open logfile '%s' for writing: %s", (ng.logging["filename"], e))
import daemonize
# Test that we can write to the pidfile.
try:
with open(ng.logging["pidfile"], "w"):
ng.debugger.info("successfully opened pidfile for writing")
except IOError as e:
ng.debugger.critical("failed to open pidfile '%s' for writing: %s", (ng.logging["pidfile"], e))
ng.debugger.info("daemonizing app=netgrasp, pidfile=%s, user=%s, group=%s, verbose=True", (ng.logging["pidfile"], ng.security["user"], ng.security["group"]))
ng.debugger.warning("daemonizing, output redirected to log file: %s", (ng.logging["filename"],))
try:
ng.debugger.logToFile()
daemon = daemonize.Daemonize(app="netgrasp", pid=ng.logging["pidfile"], privileged_action=netgrasp.get_pcap, user=ng.security["user"], group=ng.security["group"], action=netgrasp.main, keep_fds=[ng.debugger.handler.stream.fileno()], logger=ng.logger, verbose=True)
daemon.start()
except Exception as e:
ng.debugger.critical("Failed to daemonize: %s, exiting", (e,))
else:
netgrasp.main()
def stop(ng, must_be_running=True):
import os
import signal
import errno
pid = ng.is_running()
if not pid:
if must_be_running:
ng.debugger.critical("Netgrasp is not running.")
else:
ng.debugger.info("Netgrasp is not running.")
else:
ng.debugger.warning("Stopping netgrasp...")
try:
os.kill(pid, signal.SIGTERM)
except OSError as e:
if e.errno == errno.EPERM:
ng.debugger.critical("Failed (perhaps try with sudo): %s", (e,))
else:
ng.debugger.critical("Failed: %s", (e,))
def restart(ng):
import time
stop(ng, False)
running = ng.is_running()
loops = 0
while running:
loops += 1
if loops > 15:
ng.debugger.critical("Failed to stop netgrasp.")
time.sleep(0.2)
running = ng.is_running()
start(ng)
def status(ng):
pid = ng.is_running()
if pid:
ng.debugger.warning("Netgrasp is running with pid %d", (pid,))
else:
ng.debugger.warning("Netgrasp is not running.")
def update(ng):
from netgrasp.update import update
netgrasp.netgrasp_instance = ng
try:
ng.db = database.Database()
except Exception as e:
ng.debugger.error("error: %s", (e,))
ng.debugger.critical("Failed to open or create database file %s (as user %s), exiting.", (ng.database["filename"], ng.debugger.whoami()))
ng.db.cursor = ng.db.connection.cursor()
query = database.SelectQueryBuilder("state")
query.db_select("{%BASE}.value")
query.db_where("{%BASE}.key = 'schema_version'")
ng.db.cursor.execute(query.db_query(), query.db_args())
schema_version = ng.db.cursor.fetchone()
if schema_version:
version = schema_version[0]
else:
version = 0
updates = update.needed(version)
if updates:
ng.debugger.warning("schema updates required: %s", (updates,))
else:
ng.debugger.critical("no schema updates are required.")
pid = ng.is_running()
if pid:
ng.debugger.critical("Netgrasp must be stopped before running updates.")
netgrasp.netgrasp_instance = ng
netgrasp.email.email_instance = None
netgrasp.notify.notify_instance = None
update.run_updates(version)
def list(ng):
import datetime
netgrasp.netgrasp_instance = ng
pid = ng.is_running()
if not pid:
ng.debugger.critical("Netgrasp is not running.")
try:
ng.db = database.Database()
except Exception as e:
ng.debugger.error("error: %s", (e,))
ng.debugger.critical("Failed to open or create database file %s (as user %s), exiting.", (ng.database["filename"], ng.debugger.whoami()))
ng.debugger.info("Opened %s as user %s", (ng.database["filename"], ng.debugger.whoami()))
ng.db.cursor = ng.db.connection.cursor()
if ng.args.type == "device":
# List devices.
query = database.SelectQueryBuilder("activity")
query.db_select("{%BASE}.did")
query.db_select("mac.address")
query.db_select("ip.address")
query.db_select("{%BASE}.updated")
if ng.args.all:
description = "All devices"
else:
description = "Active devices"
query.db_where("{%BASE}.active = ?", 1)
query.db_where("{%BASE}.updated IS NOT NULL")
if not ng.args.all or ng.args.all == 1:
query.db_group("{%BASE}.did")
query.db_order("{%BASE}.updated DESC")
rowfmt = "{:>16}{:>34}{:>22}"
header = ["IP", "Name", "Last seen"]
elif ng.args.type == 'event':
# List events.
query = database.SelectQueryBuilder("event")
query.db_select("{%BASE}.did")
query.db_select("mac.address")
query.db_select("ip.address")
query.db_select("{%BASE}.timestamp")
query.db_select("{%BASE}.type")
if ng.args.all:
description = "All alerts"
# @TODO: this is a bogus WHERE, get rid of altogether
query.db_where("{%BASE}.timestamp >= ?", 1)
else:
description = "Recent alerts"
recent = datetime.datetime.now() - datetime.timedelta(seconds=ng.listen["active_timeout"])
query.db_where("{%BASE}.timestamp >= ?", recent)
if not ng.args.all or ng.args.all == 1:
query.db_group("{%BASE}.did")
query.db_group("{%BASE}.type")
query.db_order("{%BASE}.timestamp DESC")
rowfmt = "{:>16}{:>24}{:>21}{:>18}"
header = ["IP", "Name", "Event", "Last seen"]
query.db_leftjoin("device", "{%BASE}.did = device.did")
query.db_leftjoin("ip", "{%BASE}.iid = ip.iid")
query.db_leftjoin("mac", "device.mid = mac.mid")
if ng.args.mac:
query.db_where("mac.address LIKE ?", "%"+ng.args.mac+"%")
if ng.args.ip:
query.db_where("ip.address LIKE ?", "%"+ng.args.ip+"%")
if ng.args.vendor:
query.db_leftjoin("vendor", "device.vid = vendor.vid")
query.db_where("vendor.name LIKE ?", "%"+ng.args.vendor+"%")
if ng.args.hostname or ng.args.custom:
query.db_leftjoin("host", "device.hid = host.hid")
if ng.args.hostname:
query.db_where("host.name LIKE ?", "%"+ng.args.hostname+"%")
else:
query.db_where("host.custom_name LIKE ?", "%"+ng.args.custom+"%")
ng.db.cursor.execute(query.db_query(), query.db_args())
rows = ng.db.cursor.fetchall()
if rows:
print """ %s:""" % description
print rowfmt.format(*header)
for row in rows:
if ng.args.type == 'device':
print rowfmt.format(pretty.truncate_string(row[2], 15), pretty.truncate_string(pretty.name_did(row[0]), 32), pretty.truncate_string(pretty.time_ago(row[3], False), 20))
else:
print rowfmt.format(pretty.truncate_string(row[2], 15), pretty.truncate_string(pretty.name_did(row[0]), 22), pretty.truncate_string(row[4], 19), pretty.truncate_string(pretty.time_ago(row[3], False), 16))
def identify(ng):
from netgrasp.utils import exclusive_lock
netgrasp.netgrasp_instance = ng
pid = ng.is_running()
if not pid:
ng.debugger.critical("Netgrasp is not running.")
try:
ng.db = database.Database()
except Exception as e:
ng.debugger.error("%s", (e,))
ng.debugger.critical("Failed to open or create database file %s (as user %s), exiting.", (ng.database["filename"], ng.debugger.whoami()))
ng.debugger.info("Opened %s as user %s", (ng.database["filename"], ng.debugger.whoami()))
ng.db.cursor = ng.db.connection.cursor()
if not ng.args.set:
description = "Use --set ID 'CUSTOM NAME' to set a custom name on a device"
header = ["ID", "IP", "Name", "Last seen"]
rowfmt = "{:>7}{:>16}{:>34}{:>22}"
query = database.SelectQueryBuilder("host")
query.db_select("{%BASE}.hid")
query.db_leftjoin("ip", "{%BASE}.iid = ip.iid")
query.db_leftjoin("mac", "ip.mid = mac.mid")
query.db_leftjoin("activity", "{%BASE}.iid = activity.iid")
query.db_select("activity.did")
query.db_select("mac.address")
query.db_select("ip.address")
query.db_select("activity.updated")
query.db_group("activity.did")
query.db_order("activity.updated DESC")
if not ng.args.all and not ng.args.custom:
query.db_where("{%BASE}.custom_name IS NULL")
if ng.args.mac:
query.db_where("mac.address LIKE ?", "%"+ng.args.mac+"%")
if ng.args.ip:
query.db_where("ip.address LIKE ?", "%"+ng.args.ip+"%")
if ng.args.vendor:
query.db_leftjoin("vendor", "mac.vid = vendor.vid")
query.db_where("vendor.name LIKE ?", "%"+ng.args.vendor+"%")
if ng.args.hostname:
query.db_where("host.name LIKE ?", "%"+ng.args.hostname+"%")
if ng.args.custom:
query.db_where("host.custom_name LIKE ?", "%"+ng.args.custom+"%")
ng.db.cursor.execute(query.db_query(), query.db_args())
rows = ng.db.cursor.fetchall()
if rows:
print """ %s:""" % description
print rowfmt.format(*header)
for row in rows:
# @TODO handle IP changes
print rowfmt.format(row[0], pretty.truncate_string(row[3], 15), pretty.truncate_string(pretty.name_did(row[1]), 32), pretty.truncate_string(pretty.time_ago(row[4], False), 20))
else:
if ng.args.verbose > 1:
print "id:", ng.args.set[0], "| custom name:", ng.args.set[1]
ng.db.cursor.execute("SELECT vendor.vid FROM vendor LEFT JOIN mac ON vendor.vid = mac.vid LEFT JOIN host ON mac.mid = host.hid WHERE host.hid = ?", (ng.args.set[0],))
with exclusive_lock.ExclusiveFileLock(ng, 5, "failed to set custom name, please try again"):
db_args = [ng.args.set[1]]
db_args.append(ng.args.set[0])
ng.db.cursor.execute("UPDATE host SET custom_name = ? WHERE hid = ?", db_args)
ng.db.connection.commit()
def template(ng):
import pkg_resources
if ng.args.alert or ng.args.type == 'alert':
template_file = "mail_templates/template." + ng.args.alert + ".json"
if ng.args.alert:
if not pkg_resources.resource_exists("netgrasp", template_file):
tmpl = pkg_resources.resource_string("netgrasp", "mail_templates/template.default.json")
else:
tmpl = pkg_resources.resource_string("netgrasp", "mail_templates/template." + ng.args.alert + ".json")
print tmpl
elif ng.args.type == "config":
tmpl = pkg_resources.resource_string("netgrasp", "template.netgrasp.cfg")
print tmpl
```
|
{
"source": "jeremyandrews/sucklesync",
"score": 2
}
|
#### File: sucklesync/config/config.py
```python
from sucklesync.utils import debug
import ConfigParser
import sys
config_instance = None
class Config:
def __init__(self, debugger):
from sucklesync import sucklesync
self.parser = ConfigParser.ConfigParser()
self.debugger = debugger
self.found = self.parser.read(sucklesync.DEFAULT_CONFIG)
def _GetValue(self, section, option, value, default, required, secret):
if not value and default:
value = default
if required and not value and value != False:
self.debugger.critical("Required [%s] '%s' not defined in configuration file, exiting.", (section, option))
if value != None:
if secret:
self.debugger.info2("configuration [%s] '%s' set", (section, option))
else:
self.debugger.info2("configuration [%s] '%s' set to '%s'", (section, option, value))
else:
if value:
if secret:
self.debugger.info2("configuration [%s] '%s' set to default", (section, option))
else:
if default:
if secret:
self.debugger.info2("configuration [%s] '%s' set to default", (section, option))
else:
self.debugger.info2("configuration [%s] '%s' set to default of '%s'", (section, option, value))
return value
def GetText(self, section, option, default = None, required = True, secret = False):
try:
if (self.parser.has_section(section) and self.parser.has_option(section, option)):
value = self.parser.get(section, option)
else:
value = None
return self._GetValue(section, option, value, default, required, secret)
except Exception as e:
self.debugger.dump_exception("GetText() exception while reading configuration")
def GetInt(self, section, option, default = None, required = True, secret = False):
try:
if (self.parser.has_section(section) and self.parser.has_option(section, option)):
value = self.parser.getint(section, option)
else:
value = None
return self._GetValue(section, option, value, default, required, secret)
except Exception as e:
self.debugger.dump_exception("GetInt() exception while reading configuration")
def GetBoolean(self, section, option, default = None, required = True, secret = False):
try:
if (self.parser.has_section(section) and self.parser.has_option(section, option)):
value = self.parser.getboolean(section, option)
else:
value = None
return self._GetValue(section, option, value, default, required, secret)
except Exception as e:
self.debugger.dump_exception("GetBoolean() exception while reading configuration")
def GetTextList(self, section, option, default = None, required = True, secret = False, quiet = False):
try:
if (self.parser.has_section(section) and self.parser.has_option(section, option)):
text = self.parser.get(section, option)
values = text.split(',')
textlist = []
for value in values:
textlist.append(value.strip())
else:
textlist = None
if quiet:
return textlist
else:
return self._GetValue(section, option, textlist, default, required, secret)
except Exception as e:
self.debugger.dump_exception("GetTextList() exception while reading configuration")
def GetEmailList(self, section, option, default = None, required = True, secret = False):
try:
emails = self.GetTextList(section, option, default, required, secret, True)
addresses = []
if emails:
for email in emails:
pieces = email.split('|')
if len(pieces) == 2:
name, address = pieces
if valid_email_address(address):
addresses.append((name.strip(), address.strip()))
else:
self.debugger.error('ignoring invalid email address (%s)', (address,))
elif len(pieces) == 1:
if valid_email_address(email):
addresses.append(email)
else:
self.debugger.error('ignoring invalid email address (%s)', (email,))
else:
self.debugger.error('ignoring invalid email address (%s)', (email,))
return self._GetValue(section, option, addresses, default, required, secret)
except Exception as e:
self.debugger.dump_exception("GetEmailList() exception while reading configuration")
def GetItemPairs(self, section, keys):
import re
try:
pairs = {}
valid = True
if (self.parser.has_section(section)):
temp = []
items = self.parser.items(section)
for key, value in items:
match = re.match(r"([a-z]+)([0-9]+)", key, re.I)
if match:
groups = match.groups()
if groups:
keytype = groups[0]
keyvalue = groups[1]
if keytype in keys:
temp.append((keytype, keyvalue, value))
temp.sort()
for keytype, keyvalue, value in temp:
if keytype in pairs:
pairs[keytype].append((keyvalue, value))
else:
pairs[keytype] = [(keyvalue, value)]
previous_size = -1
for key in keys:
ordered = []
try:
pairs[key].sort
previous_id = -1
for id, value in pairs[key]:
if previous_id >= 0 and previous_id + 1 != int(id):
valid = False
self.debugger.error("[%s] '%s' missing key %d", (section, key, previous_id + 1))
print "[%s] '%s' missing key %d" % (section, key, previous_id + 1)
ordered.append(value)
previous_id = int(id)
pairs[key] = ordered
size = len(pairs[key])
if previous_size >= 0 and previous_size != size:
valid = False
self.debugger.error("[%s] '%s' has mismatched number of keys", (section, key,))
print "[%s] '%s' has mismatched number of keys" % (section, key,)
previous_size = size
except:
valid = False
self.debugger.error("one or more '%s' keys missing from configuration", (key,))
for key in keys:
try:
size = len(pairs[key])
except:
valid = False
if valid:
return pairs
else:
return {}
else:
pairs = None
return pairs
except Exception as e:
self.debugger.dump_exception("GetItems() exception while reading configuration")
# Perform simplistic email address validation.
def valid_email_address(address):
try:
from email.utils import parseaddr
if not '@' in parseaddr(address)[1]:
return False
else:
return True
except Exception as e:
self.debugger.dump_exception("valid_email_address() exception while reading configuration")
```
#### File: sucklesync/sucklesync/sucklesync.py
```python
import logging
from easyprocess import EasyProcess
import sucklesync
from utils import debug
from utils import email
from config import config
sucklesync_instance = None
DEFAULT_CONFIG = ["/etc/sucklesync.cfg", "/usr/local/etc/sucklesync.cfg", "~/.sucklesync.cfg", "./sucklesync.cfg"]
DEFAULT_LOGLEVEL = logging.WARNING
DEFAULT_LOGFILE = "/var/log/sucklesync/sucklesync.log"
DEFAULT_LOGFORMAT = "%(asctime)s [%(levelname)s/%(processName)s] %(message)s"
DEFAULT_PIDFILE = "/var/run/sucklesync.pid"
class SuckleSync:
def __init__(self, config):
self.config = config
self.local = {}
self.remote = {}
self.paths = []
self.logging = {}
self.frequency = {}
self.mail = {}
def _load_debugger(self):
import logging.handlers
try:
self.logger = logging.getLogger(__name__)
self.debugger = debug.Debugger(self.verbose, self.logger, debug.PRINT)
# start by logging to stdout
self.debugger.handler = logging.StreamHandler()
formatter = logging.Formatter(DEFAULT_LOGFORMAT)
self.debugger.handler.setFormatter(formatter)
self.logger.addHandler(self.debugger.handler)
except Exception as e:
self.debugger.dump_exception("_load_debugger() exception")
def _enable_debugger(self):
import logging.handlers
try:
if self.daemonize:
self.debugger.handler = logging.FileHandler(self.logging["filename"])
else:
self.debugger.handler = logging.StreamHandler()
formatter = logging.Formatter(DEFAULT_LOGFORMAT)
self.debugger.handler.setFormatter(formatter)
self.logger.addHandler(self.debugger.handler)
self.logger.setLevel(self.logging["level"])
except Exception as e:
self.debugger.dump_exception("_enable_debugger() exception")
def _load_configuration(self):
try:
from shlex import quote as cmd_quote
except ImportError:
from pipes import quote as cmd_quote
self.configuration = config.Config(self.debugger)
# load binary paths and associated flags
self.local["rsync"] = cmd_quote(self.configuration.GetText("Local", "rsync", "/usr/bin/rsync"))
self.local["rsync_flags"] = self.configuration.GetText("Local", "rsync_flags", "-aP")
self.local["ssh"] = cmd_quote(self.configuration.GetText("Local", "ssh", "/usr/bin/ssh"))
self.local["ssh_flags"] = self.configuration.GetText("Local", "ssh_flags", "-C")
self.local["delete"] = self.configuration.GetBoolean("Local", "delete")
self.remote["find"] = cmd_quote(self.configuration.GetText("Remote", "find", "/usr/bin/find"))
self.remote["find_flags"] = cmd_quote(self.configuration.GetText("Remote", "find_flags", "-mmin -10 -print"))
# load SSH configuration
self.remote["hostname"] = self.configuration.GetText("Remote", "hostname")
self.remote["port"] = self.configuration.GetInt("Remote", "port", 22, False)
self.remote["ssh_timeout"] = self.configuration.GetInt("Remote", "ssh_timeout", 5, False)
self.remote["username"] = self.configuration.GetText("Remote", "username", False, False)
# load paths that will be suckle-synced
self.paths = self.configuration.GetItemPairs("Sucklepaths", ["source", "destination"])
# load logging preferences
self.logging["filename"] = self.configuration.GetText("Logging", "filename", DEFAULT_LOGFILE, False)
self.logging["pidfile"] = self.configuration.GetText("Logging", "pidfile", DEFAULT_PIDFILE, False)
self.logging["level"] = self.configuration.GetText("Logging", "level", DEFAULT_LOGLEVEL, False)
# load frequency preferences
self.frequency["minimum_poll_delay"] = self.configuration.GetInt("Frequency", "minimum_poll_delay", 60, False)
self.frequency["maximum_poll_delay"] = self.configuration.GetInt("Frequency", "maximum_poll_delay", 60, False)
# load email preferences
self.mail["enabled"] = self.configuration.GetBoolean("Email", "enabled", False, False)
if self.mail["enabled"]:
self.mail["to"] = self.configuration.GetEmailList("Email", "to", None)
self.mail["from"] = self.configuration.GetEmailList("Email", "from", None)
self.mail["hostname"] = self.configuration.GetText("Email", "smtp_hostname", None)
self.mail["port"] = self.configuration.GetInt("Email", "smtp_port", 587)
self.mail["mode"] = self.configuration.GetText("Email", "smtp_mode", None)
self.mail["username"] = self.configuration.GetText("Email", "smtp_username", None)
self.mail["password"] = self.configuration.GetText("Email", "smtp_password", None)
# Determine if pid in pidfile is a running process.
def is_running(self):
import os
import errno
running = False
if self.logging["pidfile"]:
if os.path.isfile(self.logging["pidfile"]):
f = open(self.logging["pidfile"])
pid = int(f.readline())
f.close()
if pid > 0:
self.debugger.info("Found pidfile %s, contained pid %d", (self.logging["pidfile"], pid))
try:
os.kill(pid, 0)
except OSError as e:
if e.errno == errno.EPERM:
running = pid
else:
running = pid
return running
def start(ss):
ss.debugger.warning("starting sucklesync")
sucklesync.sucklesync_instance = ss
# test that we can write to the log
try:
with open(ss.logging["filename"], "w"):
ss.debugger.info("successfully writing to logfile")
except IOError:
ss.debugger.critical("failed to write to logfile: %s", (ss.logging["filename"],))
# test rsync -- run a NOP, only success returns
command = ss.local["rsync"] + " -qh"
_rsync(command)
ss.debugger.info("successfully tested local rsync: %s", (command,))
# test ssh -- run a NOP find, only success returns
command = ss.local["ssh"] + " " + ss.remote["hostname"] + " " + ss.local["ssh_flags"] + " " + ss.remote["find"] + " " + ss.remote["find"] + " -type d"
_ssh(command, True)
ss.debugger.info("successfully tested ssh to remote server: %s", (command,))
if ss.daemonize:
try:
import daemonize
except Exception as e:
ss.debugger.error("fatal exception: %s", (e,))
ss.debugger.critical("failed to import daemonize (as user %s), try 'pip install daemonize', exiting", (ss.debugger.whoami()))
ss.debugger.info("successfully imported daemonize")
# test that we can write to the pidfile
try:
with open(ss.logging["pidfile"], "w"):
ss.debugger.info("successfully writing to pidfile")
except IOError:
ss.debugger.critical("failed to write to pidfile: %s", (ss.logging["pidfile"],))
ss.debugger.warning("daemonizing, output redirected to log file: %s", (ss.logging["filename"],))
try:
ss.debugger.logToFile()
daemon = daemonize.Daemonize(app="sucklesync", pid=ss.logging["pidfile"], action=sucklesync, keep_fds=[ss.debugger.handler.stream.fileno()], logger=ss.logger, verbose=True)
daemon.start()
except Exception as e:
ss.debugger.critical("Failed to daemonize: %s, exiting", (e,))
else:
sucklesync()
def stop(ss, must_be_running = True):
import os
import signal
import errno
pid = ss.is_running()
if not pid:
if must_be_running:
ss.debugger.critical("Sucklesync is not running.")
else:
ss.debugger.info("Sucklesync is not running.")
else:
ss.debugger.warning("Stopping sucklesync...")
try:
os.kill(pid, signal.SIGTERM)
except OSError as e:
if e.errno == errno.EPERM:
ss.debugger.critical("Failed (perhaps try with sudo): %s", (e))
else:
ss.debugger.critical("Failed: %s", (e,))
def restart(ss):
import time
stop(ss, False)
running = ss.is_running()
loops = 0
while running:
loops += 1
if (loops > 15):
ss.debugger.critical("Failed to stop sucklesync.")
time.sleep(0.2)
running = ss.is_running()
start(ss)
def status(ss):
pid = ss.is_running()
if pid:
ss.debugger.warning("Sucklesync is running with pid %d", (pid,))
else:
ss.debugger.warning("Sucklesync is not running.")
def _ssh(command, fail_on_error = False):
ss = sucklesync.sucklesync_instance
ss.debugger.debug("_ssh: %s", (command,))
try:
output = EasyProcess(command).call(timeout=ss.remote["ssh_timeout"])
if output.timeout_happened:
ss.debugger.error("failed to ssh to remote server, took longer than %d seconds. Failed command: %s", (ss.remote["timeout"], command))
elif output.return_code:
ss.debugger.error("ssh to remote server returned error code (%d), error (%s). Failed command: %s", (output.return_code, output.stderr, command))
elif output.oserror:
ss.debugger.error("failed to ssh to remote server, error (%s). Failed command: %s", (output.oserror, command))
return output.stdout.splitlines()
except Exception as e:
ss.debugger.error("_ssh exception, failed command: %s", (command,))
ss.debugger.dump_exception("_ssh() exception")
def _rsync(command):
ss = sucklesync.sucklesync_instance
ss.debugger.debug("_rsync: %s", (command,))
try:
output = EasyProcess(command).call()
if output.return_code:
ss.debugger.error("rsync returned error code (%d), error (%s). Failed command: %s", (output.return_code, output.stderr, command))
elif output.oserror:
ss.debugger.error("rsync failed, error (%s). Failed command: %s", (output.oserror, command))
return output.stdout.splitlines()
except Exception as e:
ss.debugger.error("_rsync exception, failed command: %s", (command,))
ss.debugger.dump_exception("_rsync() exception")
def _cleanup(source, key):
import re
ss = sucklesync.sucklesync_instance
ss.debugger.debug("_cleanup: %s (%d)", (source, key))
try:
deleted = []
if ss.local["delete"]:
# Delete files/directories that were deleted on the source.
cleanup = ss.local["rsync"] + " --recursive --delete --ignore-existing --existing --prune-empty-dirs --verbose"
cleanup += " " + ss.remote["hostname"] + ':"' + source + '/"'
cleanup += " " + ss.paths["destination"][key]
output = _rsync(cleanup)
prefix = True
for line in output:
if prefix:
if re.search("receiving file list", line):
prefix = False
else:
ss.debugger.debug("PREFIX: %s", (line,))
else:
try:
if re.search("sent (.*) bytes", line):
# All done with the information we care about.
break
ss.debugger.debug(" %s ...", (line,))
deleted.append(line)
except:
# This shouldn't happen during file deletion.
continue
else:
ss.debugger.debug("local delete disabled")
return deleted
except Exception as e:
ss.debugger.dump_exception("_cleanup() exception")
def sucklesync():
from utils import simple_timer
import re
import time
ss = sucklesync.sucklesync_instance
run = True
timer = None
sleep_delay = 0
if ss.mail["enabled"]:
ss.mail["email"] = email.Email(ss)
try:
while run:
if timer:
# When no files are being transferred, sleep for greater and greater
# periods of time, up to a maximum.
if (timer.elapsed() < ss.frequency["minimum_poll_delay"]):
if sleep_delay < ss.frequency["maximum_poll_delay"]:
sleep_delay += ss.frequency["minimum_poll_delay"]
if sleep_delay > ss.frequency["maximum_poll_delay"]:
sleep_delay = ss.frequency["maximum_poll_delay"]
ss.debugger.debug("sleeping %d seconds", (sleep_delay,))
time.sleep(sleep_delay)
else:
ss.debugger.info("last loop took %d seconds, resetting sleep_delay", (timer.elapsed(),))
sleep_delay = 0
timer = simple_timer.Timer()
key = 0
for source in ss.paths["source"]:
# Build a list of files to transfer.
ss.debugger.info("polling %s ...", (source,))
initial_queue = []
queue = []
transferred = False
include_flags = "! " + ss.remote["find_flags"]
command = ss.local["ssh"] + " " + ss.remote["hostname"] + " " + ss.local["ssh_flags"] + " " + ss.remote["find"] + " " + source + " " + include_flags
include = _ssh(command)
command = ss.local["ssh"] + " " + ss.remote["hostname"] + " " + ss.local["ssh_flags"] + " " + ss.remote["find"] + " " + source + " " + ss.remote["find_flags"]
exclude = _ssh(command)
# We may be having connectivity issues, try again later.
if not include:
break
for line in include:
subpath = re.sub(r"^" + re.escape(source), "", line)
try:
directory = subpath.split("/")[1]
if directory[0] == ".":
continue
elif directory not in initial_queue:
ss.debugger.info(" queueing %s ...", (directory,))
initial_queue.append(directory)
except:
continue
if exclude:
exclude_from_queue = []
for line in exclude:
subpath = re.sub(r"^" + re.escape(source), "", line)
try:
directory = subpath.split("/")[1]
if directory[0] == ".":
continue
elif directory not in exclude_from_queue:
exclude_from_queue.append(directory)
except:
continue
for line in initial_queue:
if line in exclude_from_queue:
ss.debugger.info(" excluding from queue %s ...", (directory,))
else:
queue.append(line)
else:
queue = initial_queue
# Now rsync the list one by one, allowing for useful emails.
subkey = 0
for directory in queue:
# Sync queued list of directories.
sync = ss.local["rsync"] + " " + ss.local["rsync_flags"]
sync += " " + ss.remote["hostname"] + ':"' + source + "/"
sync += re.escape(directory) + '"'
sync += " " + ss.paths["destination"][key]
output = _rsync(sync)
synced = []
transferred = False
mail_text = "Successfully synchronized:\n"
mail_html = "<html><title>successfully synchronized</title><body><p>Successfully synchronized:</p><ul>"
prefix = True
suffix = False
for line in output:
if prefix:
if re.search("receiving(.*)file list", line):
prefix = False
elif suffix:
mail_text += line
mail_html += "<br />" + line
ss.debugger.debug("stats: %s", (line,))
else:
try:
if re.search("sent (.*) bytes", line):
suffix = True
mail_text += "\n" + line
mail_html += "</ul><p>" + line
continue
directory_synced = line.split("/")[0]
if directory_synced and directory_synced not in synced:
transferred = True
mail_text += " - " + directory_synced + "\n"
mail_html += "<li>" + directory_synced
ss.debugger.debug(" synced %s ...", (directory_synced,))
synced.append(directory_synced)
except:
# rsync suffix starts with a blank line
suffix = True
continue
if transferred:
mail_html += "</p>"
# List up to three queued items.
in_list = False
if len(queue) > subkey + 1:
in_list = True
mail_text += "Next download:\n - " + queue[subkey + 1] + "\n"
mail_html += "<hr /><p>Next download:<ul><li>" + queue[subkey + 1] + "</li>"
ss.debugger.debug(" next up %s ... [%d of %d]", (queue[subkey + 1], len(queue), subkey))
if in_list and len(queue) > subkey + 2:
mail_text += queue[subkey + 2] + "\n"
mail_html += "<li>" + queue[subkey + 2] + "</li>"
if in_list and len(queue) > subkey + 3:
mail_text += queue[subkey + 3] + "\n"
mail_html += "<li>" + queue[subkey + 3] + "</li>"
if in_list:
mail_html += "</ul></p>"
if transferred:
mail_html += "</body></html>"
ss.mail["email"].MailSend("[sucklesync] file copied", mail_text, mail_html)
_cleanup(source, key)
subkey += 1
if not transferred:
_cleanup(source, key)
key += 1
except Exception as e:
ss.debugger.dump_exception("sucklesync() exception")
```
|
{
"source": "jeremyarr/younit",
"score": 2
}
|
#### File: jeremyarr/younit/setup.py
```python
from setuptools import setup
import os
import sys
PY_VER = sys.version_info
if not PY_VER >= (3, 6):
print("PYTHON VERSION WHEN RUNNING YOUNIT SETUP = {}".format(PY_VER))
raise RuntimeError("younit doesn't support Python earlier than 3.6")
here = os.path.abspath(os.path.dirname(__file__))
about = {}
with open(os.path.join(here, 'younit', '__version__.py'), 'r') as f:
exec(f.read(), about)
def read(fname):
with open(os.path.join(here, fname), 'r') as f:
return str(f.read().strip())
setup(
name='younit',
version=about['__version__'],
packages=['younit'],
description="a collection of helpers for the unittest module",
long_description='\n\n'.join((read('README.rst'), read('CHANGELOG.rst'))),
include_package_data=True,
install_requires=[
],
zip_safe=False,
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
keywords=["asyncio","unittest","mock","testing"],
url="https://github.com/jeremyarr/younit",
classifiers = [
'Development Status :: 2 - Pre-Alpha',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3.6',
]
)
```
#### File: younit/test/test_unit.py
```python
import sys
import unittest
from unittest.mock import Mock, patch
import asyncio
import threading
import queue
import os
import xmlrunner
import time
import younit
from test import common
from test import unittest_utils
# @unittest.skip("skipped")
class TestNameDecorator(unittest.TestCase):
def setUp(self):
self.original = sys.stdout
self.stdout_file = open('redirect.txt', 'w')
sys.stdout = self.stdout_file
def tearDown(self):
sys.stdout = self.original
self.stdout_file.close()
expected = '\n******** STARTING TEST: test_that_name_decorator_works *********\n'
with open('redirect.txt', 'r') as f:
actual = f.read()
os.remove("redirect.txt")
self.assertEqual(expected,actual)
@younit.test_name
def test_that_name_decorator_works(self):
pass
# @unittest.skip("skipped")
class TestSetClearHanging(unittest.TestCase):
@younit.set_test_hang_alarm
def setUp(self):
pass
@younit.clear_test_hang_alarm
def tearDown(self):
pass
# @unittest.skip("skipped")
def test_that_test_hangs(self):
with self.assertRaises(younit.TestHang):
time.sleep(2)
# @unittest.skip("skipped")
def test_that_test_doesnt_hang(self):
time.sleep(0.1)
# @unittest.skip("skipped")
class TestHanging(unittest.TestCase):
# @unittest.skip("skipped")
@younit.test_hang_alarm
def test_that_test_hangs(self):
with self.assertRaises(younit.TestHang):
time.sleep(2)
# @unittest.skip("skipped")
@younit.test_hang_alarm
def test_that_test_doesnt_hang(self):
time.sleep(0.1)
# @unittest.skip("skipped")
class TestCloseAllThreads(unittest.TestCase):
def setUp(self):
self.threads_to_close = []
def tearDown(self):
time.sleep(0.01)
self.assertEqual(1,threading.active_count())
@younit.close_all_threads
def test_that_all_threads_are_closed(self):
t = ThreadRunner()
self.threads_to_close.append(t)
t.start()
class AsyncioTestWithMocking(unittest.TestCase):
async def async_setUp(self):
self.x = younit.AsyncMock()
await self.x()
async def async_tearDown(self):
await self.x()
@younit.asyncio_test
async def test_runs_mock(self):
self.x.mock.assert_called_once()
class ThreadRunner(object):
def __init__(self):
self.q = queue.Queue()
def start(self):
t = threading.Thread(target=self.worker)
t.start()
def worker(self):
self.q.get()
def close(self):
self.q.put(None)
if __name__ == '__main__':
unittest.main(
testRunner=xmlrunner.XMLTestRunner(output='test-reports'),
# these make sure that some options that are not applicable
# remain hidden from the help menu.
failfast=False, buffer=False, catchbreak=False)
```
#### File: younit/test/unittest_utils.py
```python
import signal
import unittest
import asyncio
from unittest.mock import MagicMock
class TestHang(Exception):
pass
def test_name(func):
'''
decorator that prints the test name before
starting a test
usage:
@test_name
def test_this(self):
pass
'''
def inner(*args, **kwargs):
print ("\n******** STARTING TEST: %s *********" % func.__name__)
return func(*args, **kwargs)
return inner
def _test_hang_handler(signum, frame):
raise TestHang
def set_test_hang_alarm(func):
'''
decorator that sets an alarm of 1 second before
starting any test.
If a test takes longer than 1 second a TestHang
exception is raised.
usage inside a unittest.TestCase:
@set_test_hang_alarm
def setUp(self):
pass
'''
def inner(*args,**kwargs):
signal.signal(signal.SIGALRM, _test_hang_handler)
signal.alarm(1)
return func(*args,**kwargs)
return inner
def clear_test_hang_alarm(func):
'''
decorator that resets any test hang alarms after
a test is comleted.
usage inside a unittest.TestCase:
@clear_test_hang_alarm
def tearDown(self):
pass
'''
def inner(*args,**kwargs):
signal.alarm(0)
return func(*args,**kwargs)
return inner
def close_all_threads(func):
'''
decorator that closes any threads after a test is run.
usage inside a unittest.TestCase:
add an object with a close method to self.threads_to_close.
The close methods instructs the thread to close.
@close_all_threads
def test_this(self):
pass
'''
def inner(self):
try:
return func(self)
finally:
[x.close() for x in self.threads_to_close]
return inner
def asyncio_test(func):
'''
decorator that runs a test as a coroutine including
setup and teardown coroutines.
usage inside a unittest.TestCase:
async def async_setUp(self):
pass
async def async_tearDown(self):
pass
@asyncio_test
async def test_this(self):
pass
'''
def inner(self):
async def run(self,*args,**kwargs):
await self.async_setUp()
try:
return await func(self,*args,**kwargs)
finally:
await self.async_tearDown()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.loop.set_debug(True)
try:
self.loop.run_until_complete(run(self))
finally:
self.loop.close()
return inner
def AsyncMock(*args, **kwargs):
'''
function that mocks a coroutine.
mock attribute is a MagicMock object
that records mock usage
usage:
x = AsyncMock()
await x()
x.mock.assert_called_once()
'''
m = MagicMock(*args, **kwargs)
async def mock_coro(*args, **kwargs):
return m(*args, **kwargs)
mock_coro.mock = m
return mock_coro
```
#### File: younit/younit/main.py
```python
import signal
import unittest
import asyncio
from unittest.mock import MagicMock
class TestHang(Exception):
pass
def test_name(func):
'''
A decorator that prints the test name before
starting a test.
Convenient if you want to separate the output of
different tests.
Usage::
class MyTestCase(unittest.TestCase):
@test_name
def test_this(self):
print("im testing this")
@test_name
def test_that(self):
print("im testing that")
'''
def inner(*args, **kwargs):
print("\n******** STARTING TEST: {} *********".format(func.__name__))
return func(*args, **kwargs)
return inner
def _test_hang_handler(signum, frame):
raise TestHang
def test_hang_alarm(func):
'''
A decorator that sets an alarm of 1 second before
starting any test.
If a test takes longer than 1 second, a :class:`TestHang`
exception is raised.
If a test takes less than 1 second, the alarm is cancelled.
Usage::
class MyTestCase(unittest.TestCase):
@test_hang_alarm
def test_this(self):
time.sleep(3)
'''
def inner(*args, **kwargs):
signal.signal(signal.SIGALRM, _test_hang_handler)
signal.alarm(1)
try:
return func(*args, **kwargs)
finally:
signal.alarm(0)
return inner
def set_test_hang_alarm(func):
'''
A decorator that sets an alarm of 1 second before
starting any test.
If a test takes longer than 1 second, a :class:`TestHang`
exception is raised.
Should be used during set up and in conjunction with
:func:`clear_test_hang_alarm` during tear down.
Usage::
class MyTestCase(unittest.TestCase):
@set_test_hang_alarm
def setUp(self):
pass
@clear_test_hang_alarm
def tearDown(self):
pass
'''
def inner(*args, **kwargs):
signal.signal(signal.SIGALRM, _test_hang_handler)
signal.alarm(1)
return func(*args, **kwargs)
return inner
def clear_test_hang_alarm(func):
'''
A decorator that resets an alarm set by :func:`set_test_hang_alarm`
Should be used during tear down and in conjunction with
:func:`set_test_hang_alarm` during set up.
Usage::
class MyTestCase(unittest.TestCase):
@set_test_hang_alarm
def setUp(self):
pass
@clear_test_hang_alarm
def tearDown(self):
pass
'''
def inner(*args, **kwargs):
try:
return func(*args, **kwargs)
finally:
signal.alarm(0)
return inner
def close_all_threads(func):
'''
A decorator that closes any threads that are created
as part of running a test.
To use, ensure your threads are able to be closed by
invoking a ``close()`` method on an object related to the
thread. Then add the object to the ``self.threads_to_close``
list.
Usage::
class MyTestCase(unittest.TestCase):
def setUp(self):
self.threads_to_close = []
x = start_a_new_thread()
#x is an object with a close() method
#that closes the thread
self.threads_to_close.append(x)
@close_all_threads
def test_this(self):
y = start_a_new_thread()
self.threads_to_close.append(y)
'''
def inner(self):
try:
return func(self)
finally:
[x.close() for x in self.threads_to_close]
return inner
def asyncio_test(func):
'''
A decorator that runs a test as a coroutine including
any set up and tear down coroutines.
Usage::
class MyTestCase(unittest.TestCase):
async def async_setUp(self):
pass
async def async_tearDown(self):
pass
@asyncio_test
async def test_this(self):
pass
'''
def inner(self):
async def run(self, *args, **kwargs):
await self.async_setUp()
try:
return await func(self, *args, **kwargs)
finally:
await self.async_tearDown()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.loop.set_debug(True)
try:
self.loop.run_until_complete(run(self))
finally:
self.loop.close()
return inner
def AsyncMock(*args, **kwargs):
'''
A function that can be used to mock a coroutine.
Returns a coroutine function with a mock attribute.
The mock attribute is a :class:`unittest.mock.MagicMock` object that records
usage.
Usage::
class MyTestCase(unittest.TestCase):
async def async_setUp(self):
pass
async def async_tearDown(self):
pass
@asyncio_test
async def test_this(self):
x = AsyncMock()
await x()
x.mock.assert_called_once()
'''
# thanks to <NAME> for this
# https://blog.miguelgrinberg.com/post/unit-testing-asyncio-code
m = MagicMock(*args, **kwargs)
async def mock_coro(*args, **kwargs):
return m(*args, **kwargs)
mock_coro.mock = m
return mock_coro
```
|
{
"source": "JeremyARussell/stephanie-va",
"score": 3
}
|
#### File: Stephanie/AudioManager/audio_recognizer.py
```python
from Stephanie.configurer import config
# noinspection SpellCheckingInspection
class AudioRecognizer:
def __init__(self, recognizer, UnknownValueError, RequestError):
self.UnknownValueError = UnknownValueError
self.RequestError = RequestError
self.r = recognizer
self.c = config
def recognize_from_sphinx(self, audio):
# recognize speech using Sphinx
try:
text = self.r.recognize_sphinx(audio)
print("Sphinx thinks you said " + text)
return text
except self.UnknownValueError:
print("Sphinx could not understand audio")
return False
except self.RequestError as e:
print("Sphinx error; {0}".format(e))
return False
def recognize_from_google(self, audio):
try:
# for testing purposes, we're just using the default API key
# to use another API key, use `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
# instead of `r.recognize_google(audio)`
text = self.r.recognize_google(audio)
print("Google Speech Recognition thinks you said " + text)
return text
except KeyError:
print("Google Recognition couldn't understand your audio with enough confidence.")
except self.UnknownValueError:
print("Google Speech Recognition could not understand audio")
return False
except self.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
return False
def recognize_from_google_cloud(self, audio):
# recognize speech using Google Cloud Speech
try:
google_cloud_speech_credentials = self.c.config['STT_KEYS']['google_cloud_speech_api']
except KeyError:
print("Api key not found in the config.ini file.")
return False
try:
text = self.r.recognize_google_cloud(audio,
credentials_json=google_cloud_speech_credentials)
print("Google Cloud Speech thinks you said " + text)
return text
except self.UnknownValueError:
print("Google Cloud Speech could not understand audio")
return False
except self.RequestError as e:
print("Could not request results from Google Cloud Speech service; {0}".format(e))
return False
def recognize_from_wit(self, audio):
# recognize speech using Wit.ai
try:
wit_ai_key = self.c.config['STT_KEYS'][
'wit.ai_speech_api'] # Wit.ai keys are 32-character uppercase alphanumeric strings
except KeyError:
print("Api key not found in the config.ini file.")
return False
try:
text = self.r.recognize_wit(audio, key=wit_ai_key)
print("Wit.ai thinks you said " + text)
return text
except self.UnknownValueError:
print("Wit.ai could not understand audio")
return False
except self.RequestError as e:
print("Could not request results from Wit.ai service; {0}".format(e))
return False
def recognize_from_bing(self, audio):
# recognize speech using Microsoft Bing Voice Recognition
# Microsoft Bing Voice Recognition API keys 32-character lowercase hexadecimal strings
try:
bing_key = self.c.config['STT_KEYS']['bing_speech_api']
except KeyError:
print("Api key not found in the config.ini file.")
return False
try:
text = self.r.recognize_bing(audio, key=bing_key)
print("Microsoft Bing Voice Recognition thinks you said " + text)
return text
except self.UnknownValueError:
print("Microsoft Bing Voice Recognition could not understand audio")
return False
except self.RequestError as e:
print("Could not request results from Microsoft Bing Voice Recognition service; {0}".format(e))
return False
def recognize_from_houndify(self, audio):
# recognize speech using Houndify
try:
houndify_client_id = self.c.config['STT_KEYS'][
'houndify_client_id'] # Houndify client IDs are Base64-encoded strings
houndify_client_key = self.c.config['STT_KEYS'][
'houndify_client_key'] # Houndify client keys are Base64-encoded strings
except KeyError:
print("Api key not found in the config.ini file.")
return False
try:
text = self.r.recognize_houndify(audio, client_id=houndify_client_id,
client_key=houndify_client_key)
print("Houndify thinks you said " + text)
return text
except self.UnknownValueError:
print("Houndify could not understand audio")
return False
except self.RequestError as e:
print("Could not request results from Houndify service; {0}".format(e))
return False
def recognize_from_ibm(self, audio):
# recognize speech using IBM Speech to Text
try:
# IBM Speech to Text usernames are strings of the form XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
ibm_username = self.c.config['STT_KEYS']['ibm_username']
# IBM Speech to Text passwords are mixed-case alphanumeric strings
ibm_password = self.c.config['STT_KEYS']['ibm_password']
except KeyError:
print("Api key not found in the config.ini file.")
return False
try:
text = self.r.recognize_ibm(audio, username=ibm_username,
password=<PASSWORD>)
print("IBM Speech to Text thinks you said " + text)
return text
except self.UnknownValueError:
print("IBM Speech to Text could not understand audio")
return False
except self.RequestError as e:
print("Could not request results from IBM Speech to Text service; {0}".format(e))
return False
```
#### File: stephanie-va/Stephanie/configurer.py
```python
import os
import configparser
import json
class Configurer:
def __init__(self, filename="config.ini", modules_filename="modules.json"):
print("initialised")
self.abs_filename = self.get_abs_filename(filename)
self.abs_mods_filename = self.get_abs_filename(modules_filename)
self.config = configparser.ConfigParser()
self.config.read(self.abs_filename)
self.sections = self.config.sections()
self.modules = self.retreive_modules(self.abs_mods_filename)
@staticmethod
def retreive_modules(abs_mods_filename):
print("modules retreived.")
try:
with open(abs_mods_filename, "r") as file:
modules = json.load(file)
file.close()
except Exception as e:
raise Exception("Modules.json file has been not formatted correctly. check the support tab in case you're integrating some 3rd party module.") from e
return modules
def get_modules(self, filename=None):
if filename:
abs_mods_filename = self.get_abs_filename(filename)
return self.retreive_modules(abs_mods_filename)
return self.modules
@staticmethod
def get_abs_filename(filename):
return os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir, filename))
config = Configurer()
```
#### File: Stephanie/EventDispatcher/event_dispatcher.py
```python
from pydispatch import dispatcher
class EventDispatcher:
def __init__(self):
self.dispatcher = dispatcher
self.sleep_status = False
self.active_status = False
def close(self):
return self.sleep_status
def sleep(self, sender):
self.sleep_status = True
print("The virtual assistant is going to sleep by %s method" % sender)
return self
def quit(self, sender):
self.active_status = True
print("The virtual assistant is being quit by %s method" % sender)
def add(self, handle_name):
handle_event = getattr(self, handle_name)
self.dispatcher.connect(handle_event, signal=handle_name, sender=self.dispatcher.Any)
return self
def trigger(self, handle):
self.dispatcher.send(signal=handle, sender=handle)
```
#### File: Stephanie/local_libs/numbers_format.py
```python
import re
class NumberService(object):
__small__ = {
'zero': 0,
'one': 1,
'two': 2,
'three': 3,
'four': 4,
'five': 5,
'six': 6,
'seven': 7,
'eight': 8,
'nine': 9,
'ten': 10,
'eleven': 11,
'twelve': 12,
'thirteen': 13,
'fourteen': 14,
'fifteen': 15,
'sixteen': 16,
'seventeen': 17,
'eighteen': 18,
'nineteen': 19,
'twenty': 20,
'thirty': 30,
'forty': 40,
'fifty': 50,
'sixty': 60,
'seventy': 70,
'eighty': 80,
'ninety': 90
}
__magnitude__ = {
'thousand': 1000,
'million': 1000000,
'billion': 1000000000,
'trillion': 1000000000000,
'quadrillion': 1000000000000000,
'quintillion': 1000000000000000000,
'sextillion': 1000000000000000000000,
'septillion': 1000000000000000000000000,
'octillion': 1000000000000000000000000000,
'nonillion': 1000000000000000000000000000000,
'decillion': 1000000000000000000000000000000000,
}
__ordinals__ = {
'first': 'one',
'second': 'two',
'third': 'three',
'fourth': 'four',
'fifth': 'five',
'sixth': 'six',
'seventh': 'seventh',
'eighth': 'eight',
'ninth': 'nine',
'tenth': 'ten',
'eleventh': 'eleven',
'twelth': 'twelve',
'thirteenth': 'thirteen',
'fifteenth': 'fifteen',
'sixteenth': 'sixteen',
'seventeenth': 'seventeen',
'eighteenth': 'eighteen',
'nineteenth': 'nineteen',
'twentieth': 'twenty',
'thirtieth': 'thirty',
'fortieth': 'forty',
'fiftieth': 'fifty',
'sixtieth': 'sixty',
'seventieth': 'seventy',
'eightieth': 'eighty',
'ninetieth': 'ninety',
'hundredth': 'hundred'
}
__fractions__ = {
'quarter': 'four',
'half': 'two',
'halve': 'two'
}
class NumberException(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
def parse(self, words):
"""A general method for parsing word-representations of numbers.
Supports floats and integers.
Args:
words (str): Description of an arbitrary number.
Returns:
A double representation of the words.
"""
def exact(words):
"""If already represented as float or int, convert."""
try:
return float(words)
except:
return None
guess = exact(words)
if guess is not None:
return guess
split = words.split(' ')
# Replace final ordinal/fraction with number
if split[-1] in self.__fractions__:
split[-1] = self.__fractions__[split[-1]]
elif split[-1] in self.__ordinals__:
split[-1] = self.__ordinals__[split[-1]]
parsed_ordinals = ' '.join(split)
return self.parseFloat(parsed_ordinals)
def parseFloat(self, words):
"""Convert a floating-point number described in words to a double.
Supports two kinds of descriptions: those with a 'point' (e.g.,
"one point two five") and those with a fraction (e.g., "one and
a quarter").
Args:
words (str): Description of the floating-point number.
Returns:
A double representation of the words.
"""
def pointFloat(words):
m = re.search(r'(.*) point (.*)', words)
if m:
whole = m.group(1)
frac = m.group(2)
total = 0.0
coeff = 0.10
for digit in frac.split(' '):
total += coeff * self.parse(digit)
coeff /= 10.0
return self.parseInt(whole) + total
return None
def fractionFloat(words):
m = re.search(r'(.*) and (.*)', words)
if m:
whole = self.parseInt(m.group(1))
frac = m.group(2)
# Replace plurals
frac = re.sub(r'(\w+)s(\b)', '\g<1>\g<2>', frac)
# Convert 'a' to 'one' (e.g., 'a third' to 'one third')
frac = re.sub(r'(\b)a(\b)', '\g<1>one\g<2>', frac)
split = frac.split(' ')
# Split fraction into num (regular integer), denom (ordinal)
num = split[:1]
denom = split[1:]
while denom:
try:
# Test for valid num, denom
num_value = self.parse(' '.join(num))
denom_value = self.parse(' '.join(denom))
return whole + float(num_value) / denom_value
except:
# Add another word to num
num += denom[:1]
denom = denom[1:]
return None
# Extract "one point two five"-type float
result = pointFloat(words)
if result:
return result
# Extract "one and a quarter"-type float
result = fractionFloat(words)
if result:
return result
# Parse as integer
return self.parseInt(words)
def parseInt(self, words):
"""Parses words to the integer they describe.
Args:
words (str): Description of the integer.
Returns:
An integer representation of the words.
"""
# Remove 'and', case-sensitivity
words = words.replace(" and ", " ").lower()
# 'a' -> 'one'
words = re.sub(r'(\b)a(\b)', '\g<1>one\g<2>', words)
def textToNumber(s):
"""
Converts raw number string to an integer.
Based on text2num.py by <NAME>.
"""
a = re.split(r"[\s-]+", s)
n = 0
g = 0
for w in a:
x = NumberService.__small__.get(w, None)
if x is not None:
g += x
elif w == "hundred":
g *= 100
else:
x = NumberService.__magnitude__.get(w, None)
if x is not None:
n += g * x
g = 0
else:
raise NumberService.NumberException(
"Unknown number: " + w)
return n + g
return textToNumber(words)
def isValid(self, inp):
try:
self.parse(inp)
return True
except:
return False
@staticmethod
def parseMagnitude(m):
"""Parses a number m into a human-ready string representation.
For example, crops off floats if they're too accurate.
Arguments:
m (float): Floating-point number to be cleaned.
Returns:
Human-ready string description of the number.
"""
m = NumberService().parse(m)
def toDecimalPrecision(n, k):
return float("%.*f" % (k, round(n, k)))
# Cast to two digits of precision
digits = 2
magnitude = toDecimalPrecision(m, digits)
# If value is really small, keep going
while not magnitude:
digits += 1
magnitude = toDecimalPrecision(m, digits)
# If item is less than one, go one beyond 'necessary' number of digits
if m < 1.0:
magnitude = toDecimalPrecision(m, digits + 1)
# Ignore decimal accuracy if irrelevant
if int(magnitude) == magnitude:
magnitude = int(magnitude)
# Adjust for scientific notation
magString = str(magnitude)
magString = re.sub(r'(\d)e-(\d+)',
'\g<1> times ten to the negative \g<2>', magString)
magString = re.sub(r'(\d)e\+(\d+)',
'\g<1> times ten to the \g<2>', magString)
magString = re.sub(r'-(\d+)', 'negative \g<1>', magString)
magString = re.sub(r'\b0(\d+)', '\g<1>', magString)
return magString
def longestNumber(self, inp):
"""Extracts the longest valid numerical description from a string.
Not guaranteed to return a result even if some valid numerical
description exists (i.e., method is not particularly advanced).
Args:
inp (str): An arbitrary string, hopefully containing a number.
Returns:
The number with the longest string description in input,
or None if not found.
"""
split = inp.split(' ')
# Assume just a single number
numStart = None
numEnd = None
for i, w in enumerate(split):
if self.isValid(w):
if numStart is None:
numStart = i
numEnd = i
else:
# Check for ordinal, which would signify end
w = re.sub(r'(\w+)s(\b)', '\g<1>\g<2>', w)
if w in self.__ordinals__:
if self.isValid(' '.join(split[numStart:i + 1])):
numEnd = i
break
description = ' '.join(split[numStart:numEnd + 1])
return self.parse(description)
```
#### File: Stephanie/local_libs/search_module.py
```python
class SearchModule:
def __init__(self):
pass
def search_for_competition_by_name(self, competitions, query):
m, answer = self.search(competitions, attribute_name="caption", query=query)
if m == 0:
return False
return answer
def search_for_competition_by_code(self, competitions, query):
return self.search_by_code(competitions, attribute_name="league", query=query)
def search_for_team_by_name(self, teams, query):
m, answer = self.search(teams, attribute_name="name", query=query)
if m == 0:
return False
return answer
def search_for_team_by_code(self, teams, query):
return self.search_by_code(teams, attribute_name="code", query=query)
def search_for_player_by_name(self, players, query):
m, answer = self.search(players, attribute_name="name", query=query)
if m == 0:
return False
return answer
def search_for_team_from_standing_by_name(self, teams, query):
m, answer = self.search(teams, attribute_name="team_name", query=query)
if m == 0:
return False
return answer
@staticmethod
def search_by_code(dataset, attribute_name, query):
search = query.lower()
for index, data in enumerate(dataset):
code = getattr(data, attribute_name).lower()
if code == search:
return dataset[index]
return False
@staticmethod
def search(dataset, attribute_name, query):
values = [0 for _ in range(0, len(dataset))]
search = query.lower().split()
upper_threshold = len(search)
for index, data in enumerate(dataset):
data_name = getattr(data, attribute_name).lower()
search_array = data_name.split()
for index2, text in enumerate(search_array):
if index2 >= upper_threshold:
break
threshold = len(search[index2])
for i in range(0, len(text)):
if i >= threshold - 1:
break
if text[i] == search[index2][i]:
values[index] += 1
max_value = max(values)
max_index = values.index(max_value)
return max_value, dataset[max_index]
```
#### File: Stephanie/Modules/facebook_module.py
```python
import pytz
import datetime
import requests
import facebook
from Stephanie.Modules.base_module import BaseModule
class FacebookModule(BaseModule):
def __init__(self, *args):
super(FacebookModule, self).__init__(*args)
self.oauth_access_token = self.get_configuration('facebook_oauth_token')
self.requests = requests
if self.oauth_access_token:
self.graph = None
self.set_graph()
else:
status = self.do_init()
if not status:
return False
def do_init(self):
app_id = self.get_configuration('facebook_app_id')
app_secret = self.get_configuration('facebook_app_secret')
app_access_token = self.get_configuration('facebook_access_token')
if app_id and app_secret and app_access_token:
params = {
'client_id': app_id,
'client_secret': app_secret,
'grant_type': 'fb_exchange_token',
'fb_exchange_token': app_access_token
}
r = self.requests.get("https://graph.facebook.com/oauth/access_token", params=params)
if r.ok:
oauth_access_token = r.json()['access_token']
self.oauth_access_token = self.write_configuration('facebook_oauth_token', oauth_access_token)
self.graph = facebook.GraphAPI(oauth_access_token)
return True
return False
def set_graph(self, oauth_access_token=None):
if oauth_access_token:
self.oauth_access_token = oauth_access_token
self.graph = facebook.GraphAPI(self.oauth_access_token)
def get_birthday_reminders(self):
"""
Responds to user-input, typically speech text, by listing the user's
Facebook friends with birthdays today.
Arguments:
text -- user-input, typically transcribed speech
self.assistant -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone
number)
"""
try:
results = self.graph.request("me/friends",
args={'fields': 'id,name,birthday'})
except facebook.GraphAPIError:
response = ("I have not been authorized to query your Facebook. If you " +
"would like to check birthdays in the future, please visit " +
"the Jasper dashboard.")
return response
except:
return "I apologize, there's a problem with that service at the moment."
needle = datetime.datetime.now(tz=pytz.utc).strftime("%m/%d")
people = []
for person in results['data']:
try:
if needle in person['birthday']:
people.append(person['name'])
except:
continue
if len(people) > 0:
if len(people) == 1:
output = people[0] + " has a birthday today."
else:
output = "Your friends with birthdays today are " + \
", ".join(people[:-1]) + " and " + people[-1] + "."
else:
output = "None of your friends have birthdays today."
return output
def get_notifications(self):
"""
Not working since facebooks new update which doesn't allow notifications to be fetched. :(
Responds to user-input, typically speech text, with a summary of
the user's Facebook notifications, including a count and details
related to each individual notification.
Arguments:
text -- user-input, typically transcribed speech
self.assistant -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone
number)
"""
try:
results = self.graph.request("me/notifications")
except facebook.GraphAPIError:
response = ("I have not been authorized to query your Facebook. If you " +
"would like to check your notifications in the future, " +
"please visit the Stephanie facebook module configuraton.")
return response
except:
return "I apologize, there's a problem with that service at the moment."
if not len(results['data']):
return "You have no Facebook notifications."
updates = []
for notification in results['data']:
updates.append(notification['title'])
count = len(results['data'])
response = ("You have " + str(count) +
" Facebook notifications. " + " ".join(updates) + ". ")
return response
def status_update(self):
self.assistant.say("What's in your mind?")
text = self.assistant.listen().decipher()
try:
self.graph.put_wall_post(text)
self.assistant.say("You have successully put up a wall post.")
except facebook.GraphAPIError:
response = ("I have not been authorized to query your Facebook. If you " +
"would like to check your notifications in the future, " +
"please visit the Stephanie facebook module configuraton.")
return response
except:
return "I apologize, there's a problem with that service at the moment."
```
#### File: Stephanie/Modules/football_module.py
```python
from Stephanie.Modules.base_module import BaseModule
from Stephanie.local_libs.football_manager import FootballManager
class FootballModule(BaseModule):
modules = (
("FooballModule@GetAllCompetitions", ("all", "competitions")),
("FooballModule@GetEnglishLeague", ("english", "league")),
("FooballModule@GetEnglishSecondLeague", ("english", "second", "league")),
("FooballModule@GetGermanLeague", ("german", "league")),
("FooballModule@GetGermanSecondLeague", ("german", "second", "league")),
("FooballModule@GetFrenchLeague", ("french", "league")),
("FooballModule@GetFrenchSecondLeague", ("french", "second", "league")),
("FooballModule@GetSpanishLeague", ("spanish", "league")),
("FooballModule@GetSpanishSecondLeague", ("spanish", "second", "league")),
("FooballModule@GetGermanCup", ("german", "cup")),
("FooballModule@GetChampionsLeague", ("champions", "league")),
("FooballModule@GetNetherlandsLeague", ("netherlands", "league")),
("FooballModule@GetPortugueseLeague", ("portuguese", "league")),
("FooballModule@GetItalianLeague", ("italian", "league")),
("FooballModule@TeamHandle", ("team", "information")),
("FooballModule@GetNews", ("latest", "news")),
)
def __init__(self, *args):
super(FootballModule, self).__init__(*args)
self.API_KEY = self.get_configuration("api.football.org.key")
self.fm = FootballManager(self.API_KEY)
self.team_id = self.get_configuration("favorite_football_team_id")
self.team_name = self.get_configuration("favorite_football_team_name")
self.competition_name = self.get_configuration("favorite_football_competition_name")
def handle(self):
self.assistant.say("which competition would you like to know about? or maybe your team information? or perhaps some news?")
text = self.assistant.listen().decipher()
module_func = self.assistant.understand(self.modules, text)
getattr(self, module_func)()
def get_all_competitions(self):
return self.fm.get_all_competitions()
def get_english_league(self):
self.get_general_league(426)
def get_english_second_league(self):
self.get_general_league(427)
def get_german_league(self):
self.get_general_league(430)
def get_german_second_league(self):
self.get_general_league(431)
def get_spanish_league(self):
self.get_general_league(439)
def get_spanish_second_league(self):
self.get_general_league(437)
def get_french_league(self):
self.get_general_league(434)
def get_french_second_league(self):
self.get_general_league(435)
def get_netherlands_league(self):
self.get_general_league(433)
def get_portuguese_league(self):
self.get_general_league(436)
def get_italian_league(self):
self.get_general_league(438)
def get_champions_league(self):
self.get_general_league(440)
def get_general_league(self, competition_id):
active = False
modules = (
("FootballModule@LeagueSpecificNews", ("get", "news")),
("FootballModule@LeagueSpecificTable", ("get", "league", "table")),
("FootballModule@LeagueSpecificNext_fixtures", ("get", "next", "fixtures")),
("FootballModule@LeagueSpecificPrevious_fixtures", ("get", "previous", "fixtures")),
)
while not active:
response = self.fm.get_specific_competition(competition_id)
self.assistant.say("%s, would you like to know about it's latest news, league table or "
" maybe fixtures?" % response)
text = self.assistant.listen().decipher()
module_func = self.assistant.understand(modules, text)
active = getattr(self, module_func)()
return active
def league_specific_table(self):
response = self.fm.get_league_table()
self.assistant.say(response)
self.assistant.say("Any other information, you would like to know about? If yes then what would "
"it be?")
text = self.assistant.listen().decipher()
if text.upper() in self.NEGATIVE:
self.assistant.say("Alright then blimey.")
return "Alright then blimey."
return False
def league_specific_next_fixtures(self):
response = self.fm.get_fixtures()
self.assistant.say(response)
self.assistant.say("Any other information, you would like to know about? If yes then what would "
"it be?")
text = self.assistant.listen().decipher()
if text.upper() in self.NEGATIVE:
self.assistant.say("Alright then blimey.")
return "Alright then blimey."
return False
def league_specific_previous_fixtures(self):
response = self.fm.get_fixtures(prev=True)
self.assistant.say(response)
self.assistant.say("Any other information, you would like to know about? If yes then what would "
"it be?")
text = self.assistant.listen().decipher()
if text.upper() in self.NEGATIVE:
self.assistant.say("Alright then blimey.")
return "Alright then blimey."
return False
def team_handle(self):
active = False
modules = (
("FooballModule@TeamNews", ("get", "news")),
("FooballModule@TeamInjuryNews", ("get", "injury", "news")),
("FooballModule@TeamTransferTalk", ("get", "transfer", "talk")),
("FooballModule@TeamPlayers", ("get", "players")),
("FooballModule@TeamNextFixtures", ("get", "next", "fixtures")),
("FooballModule@TeamPreviousFixtures", ("get", "previous", "fixtures")),
)
while not active:
response = self.fm.get_team(self.team_id)
self.assistant.say("%s, would you like to know about it's latest news, transfer talks or "
" maybe fixtures?" % response)
text = self.assistant.listen().decipher()
module_func = self.assistant.understand(modules, text)
active = getattr(self, module_func)()
return active
def team_next_fixtures(self):
response = self.fm.get_team_fixtures()
self.assistant.say(response)
self.assistant.say("Any other information, you would like to know about? If yes then what would "
"it be?")
text = self.assistant.listen().decipher()
if text.upper() in self.NEGATIVE:
self.assistant.say("Alright then blimey.")
return "Alright then blimey."
return False
def team_previous_fixtures(self):
response = self.fm.get_team_fixtures(prev=True)
self.assistant.say(response)
self.assistant.say("Any other information, you would like to know about? If yes then what would "
"it be?")
text = self.assistant.listen().decipher()
if text.upper() in self.NEGATIVE:
self.assistant.say("Alright then blimey.")
return "Alright then blimey."
return False
def league_specific_news(self):
response = self.fm.get_competition_news(self.competition_name)
self.assistant.say(response)
self.assistant.say("For more information, check the sportsmole.co.uk, Any other information, you would like to know about? If yes then what would "
"it be?")
text = self.assistant.listen().decipher()
if text.upper() in self.NEGATIVE:
self.assistant.say("Alright then blimey.")
return "Alright then blimey."
return False
def team_specific_news(self):
response = self.fm.get_competition_news(self.competition_name)
self.assistant.say(response)
self.assistant.say("For more information, check the sportsmole.co.uk, Any other information, you would like to know about? If yes then what would "
"it be?")
text = self.assistant.listen().decipher()
if text.upper() in self.NEGATIVE:
self.assistant.say("Alright then blimey.")
return "Alright then blimey."
return False
def team_news(self):
response = self.fm.get_team_news(self.team_name)
self.assistant.say(response)
self.assistant.say("For more information, check the sportsmole.co.uk, Any other information, you would like to know about? If yes then what would "
"it be?")
text = self.assistant.listen().decipher()
if text.upper() in self.NEGATIVE:
self.assistant.say("Alright then blimey.")
return "Alright then blimey."
return False
def team_injury_news(self):
response = self.fm.get_team_injury_news(self.team_name)
self.assistant.say(response)
self.assistant.say("For more information, check the sportsmole.co.uk, Any other information, you would like to know about? If yes then what would "
"it be?")
text = self.assistant.listen().decipher()
if text.upper() in self.NEGATIVE:
self.assistant.say("Alright then blimey.")
return "Alright then blimey."
return False
def team_transfer_talk(self):
response = self.fm.get_team_news(self.team_name)
self.assistant.say(response)
self.assistant.say("For more information, check the sportsmole.co.uk, Any other information, you would like to know about? If yes then what would "
"it be?")
text = self.assistant.listen().decipher()
if text.upper() in self.NEGATIVE:
self.assistant.say("Alright then blimey.")
return "Alright then blimey."
return False
def get_news(self):
response = self.fm.get_news()
self.assistant.say(response)
self.assistant.say("For more information, check the sportsmole.co.uk, Any other information, you would like to know about? If yes then what would "
"it be?")
text = self.assistant.listen().decipher()
if text.upper() in self.NEGATIVE:
self.assistant.say("Alright then blimey.")
return "Alright then blimey."
return False
```
#### File: Stephanie/Modules/system_module.py
```python
import datetime as dt
from Stephanie.Modules.base_module import BaseModule
class SystemModule(BaseModule):
def __init__(self, *args):
super(SystemModule, self).__init__(*args)
self.name = self.get_configuration(section="USER", key="name")
self.gender = self.get_configuration(section="USER", key="gender")
def default(self):
return "Repeat back your command!."
def meaning_of_life(self):
return "42 is the meaning of life."
def time_right_now(self):
t = dt.datetime.now()
return self.time_teller(t)
def date_today(self):
t = dt.datetime.now()
return self.date_teller(t)
def wake_up(self):
t = dt.datetime.now()
if self.gender:
gender = self.gender.lower()
if gender == "male":
return "%s, sir!" % self.phase_of_the_day(t)
elif gender == "female":
return "%s, sir!" % self.phase_of_the_day(t)
else:
return "%s, sir!" % self.phase_of_the_day(t)
elif self.name:
return "%s, %s!" % (self.phase_of_the_day(t), self.name)
else:
return "%s!" % self.phase_of_the_day(t)
# Example to access assistant instance
# def wake_up(self):
# self.assistant.say("What time is it again?")
# text = self.assistant.listen().decipher()
# return "Good %s, sir!" % text
def go_to_sleep(self):
self.assistant.events.add("sleep").trigger("sleep")
return "Sleep for the weak!"
def quit(self):
self.assistant.events.add("quit").trigger("quit")
return "I will come back stronger!"
def tell_system_status(self):
import psutil
import platform
import datetime
os, name, version, _, _, _ = platform.uname()
version = version.split('-')[0]
cores = psutil.cpu_count()
cpu_percent = psutil.cpu_percent()
memory_percent = psutil.virtual_memory()[2]
disk_percent = psutil.disk_usage('/')[3]
boot_time = datetime.datetime.fromtimestamp(psutil.boot_time())
running_since = boot_time.strftime("%A %d. %B %Y")
response = "I am currently running on %s version %s. " % (os, version)
response += "This system is named %s and has %s CPU cores. " % (name, cores)
response += "Current disk_percent is %s percent. " % disk_percent
response += "Current CPU utilization is %s percent. " % cpu_percent
response += "Current memory utilization is %s percent. " % memory_percent
response += "it's running since %s." % running_since
return response
@staticmethod
def time_teller(time):
# t = time.strftime('%I %M %H')
# phase = time.strftime("%p")
t = time.strftime("%I:%M:%p")
d = {0: "oh",
1: "one",
2: "two",
3: "three",
4: "four",
5: "five",
6: "six",
7: "seven",
8: "eight",
9: "nine",
10: "ten",
11: "eleven",
12: "twelve",
13: "thirteen",
14: "fourteen",
15: "fifteen",
16: "sixteen",
17: "seventeen",
18: "eighteen",
19: "nineteen",
20: "twenty",
30: "thirty",
40: "forty",
50: "fifty",
60: "sixty"}
time_array = t.split(":")
hour, minute, phase = int(time_array[0]), int(time_array[1]), time_array[2]
# hour = d[hour]
# minute = d[minute]
return "The time is %s %s %s" % (hour, minute, phase)
#
# hour = d[int(t[0:2])] if t[0:2] != "00" else d[12]
# # suffix = 'a.m.' if d[int(t[7:9])] == hour else 'p.m.'
# suffix = phase
#
# if t[3] == "0":
# if t[4] == "0":
# minute = ""
# else:
# minute = d[0] + " " + d[int(t[4])]
# else:
# minute = d[int(t[3]) * 10] + '-' + d[int(t[4])]
# return 'The time is %s %s %s.' % (hour, minute, suffix)
@staticmethod
def date_teller(date):
return date.strftime("It's %A, %d %B %Y today!")
@staticmethod
def phase_of_the_day(time):
hour = time.hour
if hour < 12:
return 'Good Morning'
elif 12 <= hour < 18:
return 'Good Afternoon'
if hour > 6:
return 'Good Evening'
```
#### File: Stephanie/TextProcessor/text_processor.py
```python
from Stephanie.TextProcessor.text_sorter import TextSorter
from Stephanie.TextProcessor.text_learner import TextLearner
from Stephanie.TextProcessor.module_router import ModuleRouter
from Stephanie.configurer import config
class TextProcessor:
def __init__(self, events):
self.sorter = TextSorter()
self.learner = TextLearner()
self.router = ModuleRouter(events)
self.c = config
def process(self, raw_text):
try:
explicit = self.c.config.getboolean("SYSTEM", "greet_engine")
sub_words, key_words = self.sorter.sort(raw_text, explicit=explicit)
module_info = self.learner.learn(key_words)
result_speech_text = self.router.inject(module_info, raw_text, sub_words, key_words)
except Exception as e:
print(e)
return None
return result_speech_text
```
#### File: Stephanie/TextProcessor/text_sorter.py
```python
from difflib import SequenceMatcher as sm
from metaphone import doublemetaphone as dm
from Stephanie.configurer import config
class TextSorter:
def __init__(self):
self.raw_text_array = []
self.sub_words = []
self.key_words = []
self.reserved_sub_words = self.get_reserved_sub_words()
self.c = config
def sort(self, raw_text, explicit=True):
return self.clean(raw_text, explicit).process()
def clean(self, raw_text, explicit):
raw_text = raw_text.lower()
self.raw_text_array = raw_text.split()
if explicit:
self.greet_engine()
self.key_words = self.raw_text_array.copy()
return self
def process(self):
for index, raw_text in enumerate(self.raw_text_array):
if raw_text in self.reserved_sub_words:
self.sub_words.append(raw_text)
self.key_words.remove(raw_text)
return self.sub_words, self.key_words
@staticmethod
def get_reserved_sub_words():
return {
"what", "where", "which", "how", "when", "who",
"is", "are", "makes", "made", "make", "did", "do",
"to", "the", "of", "from", "against", "and", "or",
"you", "me", "we", "us", "your", "my", "mine", 'yours',
"could", "would", "may", "might", "let", "possibly",
'tell', "give", "told", "gave", "know", "knew",
'a', 'am', 'an', 'i', 'like', 'has', 'have', 'need',
'will', 'be', "this", 'that', "for"
}
def greet_engine(self):
assistant_name = self.c.config.get('SYSTEM', 'assistant_name')
meta_name = dm(assistant_name)[0]
for index, raw_text in enumerate(self.raw_text_array):
meta_text = dm(raw_text)[0]
chances = sm(None, meta_name, meta_text).ratio()
if chances > 0.7:
self.raw_text_array = self.raw_text_array[index+1:]
return
```
#### File: stephanie-va/Stephanie/updater.py
```python
import requests
from Stephanie.configurer import config
class Updater:
def __init__(self, speaker):
self.speaker = speaker
self.c = config
self.current_version = self.c.config.get("APPLICATION", "version")
self.update_url = "https://raw.githubusercontent.com/SlapBot/va-version-check/master/version.json"
self.requests = requests
self.data = None
def check_for_update(self):
try:
self.data = self.get_update_information()
except Exception:
print("Couldn't access stephanie's version update information.")
return
try:
if str(self.current_version) != str(self.data['version']):
print("Your virtual assistant's version is %s, while the latest one is %s" % (self.current_version, self.data['version']))
if int(self.data['print_status']):
print("Kindly visit the main website of stephanie at www.github.com/slapbot/stephanie-va to update the software to it's latest version.")
if int(self.data['speak_status']):
self.speaker.speak(self.data['message'])
for message in self.data['additional_information']:
print(message)
if self.data['speak_announcement']:
self.speaker.speak(self.data['speak_announcement'])
except Exception:
print("There's some problem in recieving version update information.")
return
def get_update_information(self):
r = self.requests.get(self.update_url)
data = r.json()
return data
```
|
{
"source": "jeremyatia/mini_datathon",
"score": 3
}
|
#### File: jeremyatia/mini_datathon/utils.py
```python
import base64
# @st.cache
# def load_data():
# df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/secom/secom.data',
# sep=' ', header=None)
# labels = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/secom/secom_labels.data',
# header=None, sep=' ', names=['target', 'time']).iloc[:, 0]
# X_train = df.sample(**train_test_sampling)
# y_train = labels.loc[X_train.index]
# X_test = df.loc[~df.index.isin(X_train.index), :]
# y_test = labels.loc[X_test.index]
# return X_train, y_train, X_test, y_test
# st.markdown(data, unsafe_allow_html=True)
# if st.button('Load data'):
# X_train, y_train, X_test, y_test = load_data()
# st.markdown(get_table_download_link(X_train, filename='X_train.csv'), unsafe_allow_html=True)
# st.markdown(get_table_download_link(y_train, filename='y_train.csv'), unsafe_allow_html=True)
# st.markdown(get_table_download_link(X_test, filename='X_test.csv'), unsafe_allow_html=True)
def get_table_download_link(df, filename):
"""Generates a link allowing the data in a given panda dataframe to be downloaded
in: dataframe
out: href string
"""
csv = df.to_csv(index=False)
b64 = base64.b64encode(csv.encode()).decode() # some strings <-> bytes conversions necessary here
return f'<a href="data:file/csv;base64,{b64}" download="{filename}">Download {filename}</a>'
```
|
{
"source": "Jeremyb83/makers-bras-robotique",
"score": 3
}
|
#### File: Jeremyb83/makers-bras-robotique/cinematique.py
```python
import itertools
import numpy as np
import time
import math
import pypot.dynamixel
def degToRad(deg):
return math.pi * deg / 180
def radToDeg(rad):
return 180 * rad / math.pi
#lower_limit = -180
#upper_limit = 180
ports = pypot.dynamixel.get_available_ports()
if not ports:
raise IOError('no port found!')
print('ports found', ports)
print('connecting on the second available port:', ports[0])
dxl_io = pypot.dynamixel.DxlIO(ports[0])
found_ids = dxl_io.scan()
print('Found ids:', found_ids)
ids = found_ids[:5]
dxl_io.enable_torque(ids)
#dxl_io.set_angle_limit(dict(zip(ids, itertools.repeat((lower_limit, upper_limit)))))
print('get angle limit', dxl_io.get_angle_limit(ids))
print('get present position', dxl_io.get_present_position(ids))
xJ = 3
yJ = 0
zJ = 0
l1 = 1
l2 = 1
l3 = 1
l4 = 1
l5 = 1
alpha = degToRad(135)
if yJ == 0:
theta1 = math.pi / 2
else:
tantheta1 = xJ / yJ
if tantheta1 >= 1:
tantheta1 = 1
elif tantheta1 <= -1:
tantheta1 = -1
theta1 = np.arctan2(xJ, yJ)
sintheta2 = (yJ - np.cos(alpha - math.pi)*(l3 + l4 + l5)) / l2
if sintheta2 >= 1:
sintheta2 = 1
elif sintheta2 <= -1:
sintheta2 = -1
theta2 = np.arcsin(sintheta2)
if theta2 < 0:
theta2 = -theta2
theta3 = alpha - theta2
print("theta1 = {}".format(theta1))
print("theta2 = {}".format(theta2))
print("theta3 = {}".format(theta3))
print("theta1 = {}".format(radToDeg(theta1)))
print("theta2 = {}".format(radToDeg(theta2)))
print("theta3 = {}".format(radToDeg(theta3)))
theta1 = -theta1
theta2 = -theta2
theta3 = -theta3 + math.pi /2
dxl_io.set_goal_position({ids[0]: radToDeg(theta1)})
dxl_io.set_goal_position({ids[1]: radToDeg(theta2)})
dxl_io.set_goal_position({ids[2]: radToDeg(theta3)})
print('get present position', dxl_io.get_present_position(ids))
```
#### File: Jeremyb83/makers-bras-robotique/cinematique_robotique.py
```python
import itertools
import numpy as np
import time
import math
import cv2
import cv2.aruco as aruco
import yaml
import scanner
import pypot.dynamixel
from picamera import PiCamera
from picamera.array import PiRGBArray
def degToRad(deg):
return math.pi * deg / 180
def radToDeg(rad):
return 180 * rad / math.pi
def searchObject(i,sens):
#startT = time.time()
#currentT = time.time()
dxl_io.set_goal_position({ids[1]: 20})
dxl_io.set_goal_position({ids[2]: -60})
if sens ==1:
dxl_io.set_goal_position({ids[0]: i})
else:
dxl_io.set_goal_position({ids[0]: -i-(150-120)})
#time.sleep(0.2)
def moveTo(xcam, ycam, zcam):
l1 = 44
l2 = 53
l3 = 47
l4 = 43
l5 = 141
lcam = 290
thetaCam = degToRad(45)
print("ycam = {}".format((ycam)))
print("zcam = {}".format((zcam)))
print("thetaCam = {}".format((thetaCam)))
print("np.cos(thetaCam) = {}".format((np.cos(thetaCam))))
print("np.sin(thetaCam) = {}".format((np.sin(thetaCam))))
#changement repère camera:
xJ = xcam
yJ = np.cos(thetaCam)*ycam + np.sin(thetaCam)*zcam
zJ = np.cos(thetaCam)*zcam - np.sin(thetaCam)*ycam - lcam
print("yJ = {}".format(radToDeg(yJ)))
print("zJ = {}".format(radToDeg(zJ)))
if yJ == 0:
theta1 = math.pi / 2
else:
tantheta1 = xJ / yJ
if tantheta1 >= 1:
tantheta1 = 1
elif tantheta1 <= -1:
tantheta1 = -1
theta1 = np.arctan2(xJ, yJ)
xC = 0
yC = 0
zC = l1
CJ = np.sqrt(xJ**2 + yJ**2 + (zJ - zC)**2)
cosAlpha = (-(l3+l4+l5)**2 + l2**2 + CJ**2)/float(2*l2*CJ)
if cosAlpha >= 1:
cosAlpha = 1
elif cosAlpha <= -1:
cosAlpha = -1
alpha = np.arccos(cosAlpha)
sinBeta = (zJ - zC)/float(CJ)
if sinBeta >= 1:
sinBeta = 1
elif sinBeta <= -1:
sinBeta = -1
beta = np.arcsin(sinBeta)
print("alpha = {}".format(radToDeg(alpha)))
print("beta = {}".format(radToDeg(beta)))
theta2 = math.pi - alpha - beta
cosTheta3 = (-CJ**2 + (l3+l4+l5)**2 + l2**2)/float(2*(l3+l4+l5)*l2)
if cosTheta3 >= 1:
cosTheta3 = 1
elif cosTheta3 <= -1:
cosTheta3 = -1
theta3 = np.arccos(cosTheta3)
print("theta1 = {}".format(theta1))
print("theta2 = {}".format(theta2))
print("theta3 = {}".format(theta3))
print("theta1 = {}".format(radToDeg(theta1)))
print("theta2 = {}".format(radToDeg(theta2)))
print("theta3 = {}".format(radToDeg(theta3)))
theta1 = -theta1
theta2 = -theta2 + math.pi /2
theta3 = theta3 - math.pi /2
print("theta1 = {}".format(radToDeg(theta1)))
print("theta2 = {}".format(radToDeg(theta2)))
print("theta3 = {}".format(radToDeg(theta3)))
#seuil = 10
print('get present position', dxl_io.get_present_position(ids))
#if not ((dxl_io.get_present_position({ids[0]}) > (radToDeg(theta1)-seuil)) and (dxl_io.get_present_position({ids[0]}) < (radToDeg(theta1)+seuil))):
#dxl_io.set_goal_position({ids[0]: radToDeg(theta1)})
seuil = 20
if(xcam < 0 - seuil or xcam > 0 + seuil):
dxl_io.set_goal_position({ids[0]: radToDeg(theta1)})
print("PASSSSSSSSSSSSSSSSSSSSSSSSSOKKKKKKKKKKK")
else:
print("okkkkkkkkkkkkkkkkkkkkkkkkk")
dxl_io.set_goal_position({ids[1]: radToDeg(theta2)})
dxl_io.set_goal_position({ids[2]: radToDeg(theta3)})
def catch():
dxl_io.set_goal_position({ids[4]: 150})
time.sleep(2)
dxl_io.set_goal_position({ids[2]: 0})
dxl_io.set_goal_position({ids[1]: 0})
time.sleep(2)
dxl_io.set_goal_position({ids[4]: 100})
#lower_limit = -180
#upper_limit = 180
ports = pypot.dynamixel.get_available_ports()
if not ports:
raise IOError('no port found!')
print('ports found', ports)
print('connecting on the second available port:', ports[0])
dxl_io = pypot.dynamixel.DxlIO(ports[0])
found_ids = dxl_io.scan()
print('Found ids:', found_ids)
ids = found_ids[:5]
dxl_io.enable_torque(ids)
#dxl_io.set_angle_limit(dict(zip(ids, itertools.repeat((lower_limit, upper_limit)))))
print('get angle limit', dxl_io.get_angle_limit(ids))
v = 70
dxl_io.set_moving_speed({ids[0]: v, ids[1]: v, ids[2]: v, ids[3]: v, ids[4]: v})
print('get present position', dxl_io.get_present_position(ids))
xJ = 284
yJ = 0
zJ = 44
resolution = (1280, 720)
# initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
camera.resolution = resolution
camera.framerate = 30
rawCapture = PiRGBArray(camera, size=resolution)
# allow the camera to warmup
time.sleep(0.1)
skip_lines = 6
data = None
with open('piCamMatrix.yml') as infile:
for i in range(skip_lines):
_ = infile.readline()
data = yaml.load(infile)
mtx, dist = [data[i] for i in ('Camera_Matrix','Distortion_Coefficients')]
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
parameters = aruco.DetectorParameters_create()
debut = time.time()
i = -150
sens = 1
lost = 30
stable = 0
# capture frames from the camera
dxl_io.set_goal_position({ids[4]: 100})
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
# grab the raw NumPy array representing the image, then initialize the timestamp
# and occupied/unoccupied text
image = frame.array
coord = scanner.findMarker(image, aruco_dict, parameters, mtx, dist, 0.048, True)
duree = time.time() - debut
if coord is not None and duree > 0.5 :
debut = time.time()
moveTo(coord[0]*10**3, coord[1]*10**3, coord[2]*10**3)
lost = 0
stable += 1
else:
lost+=1
stable = 0
if lost > 30:
print("searchObject")
if coord is None and i < 120:
searchObject(i, sens)
i+=5
else:
i = -150
sens = (sens+1)%2
if stable == 5:
print("catchhhh")
catch()
rawCapture.truncate(0)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
# When everything done, release the capture
#cap.release()
cv2.destroyAllWindows()
#searchObject()
```
|
{
"source": "jeremybai/Easylink",
"score": 2
}
|
#### File: qiniu/test/io_test.py
```python
import os
import unittest
import string
import random
import urllib
try:
import zlib
binascii = zlib
except ImportError:
zlib = None
import binascii
import cStringIO
from qiniu import conf
from qiniu import rs
from qiniu import io
conf.ACCESS_KEY = os.getenv("QINIU_ACCESS_KEY")
conf.SECRET_KEY = os.getenv("QINIU_SECRET_KEY")
bucket_name = os.getenv("QINIU_TEST_BUCKET")
policy = rs.PutPolicy(bucket_name)
extra = io.PutExtra()
extra.mime_type = "text/plain"
extra.params = {'x:a': 'a'}
def r(length):
lib = string.ascii_uppercase
return ''.join([random.choice(lib) for i in range(0, length)])
class TestUp(unittest.TestCase):
def test(self):
def test_put():
key = "test_%s" % r(9)
# params = "op=3"
data = "hello bubby!"
extra.check_crc = 2
extra.crc32 = binascii.crc32(data) & 0xFFFFFFFF
ret, err = io.put(policy.token(), key, data, extra)
assert err is None
assert ret['key'] == key
def test_put_same_crc():
key = "test_%s" % r(9)
data = "hello bubby!"
extra.check_crc = 2
ret, err = io.put(policy.token(), key, data, extra)
assert err is None
assert ret['key'] == key
def test_put_no_key():
data = r(100)
extra.check_crc = 0
ret, err = io.put(policy.token(), key=None, data=data, extra=extra)
assert err is None
assert ret['hash'] == ret['key']
def test_put_quote_key():
data = r(100)
key = '<KEY>' + r(9)
ret, err = io.put(policy.token(), key, data)
assert err is None
assert ret['key'].encode('utf8') == key
data = r(100)
key = u'a\\b\\c"你好' + r(9)
ret, err = io.put(policy.token(), key, data)
assert err is None
assert ret['key'] == key
def test_put_unicode1():
key = "test_%s" % r(9) + '你好'
data = key
ret, err = io.put(policy.token(), key, data, extra)
assert err is None
assert ret[u'key'].endswith(u'你好')
def test_put_unicode2():
key = "test_%s" % r(9) + '你好'
data = key
data = data.decode('utf8')
ret, err = io.put(policy.token(), key, data)
assert err is None
assert ret[u'key'].endswith(u'你好')
def test_put_unicode3():
key = "test_%s" % r(9) + '你好'
data = key
key = key.decode('utf8')
ret, err = io.put(policy.token(), key, data)
assert err is None
assert ret[u'key'].endswith(u'你好')
def test_put_unicode4():
key = "test_%s" % r(9) + '你好'
data = key
key = key.decode('utf8')
data = data.decode('utf8')
ret, err = io.put(policy.token(), key, data)
assert err is None
assert ret[u'key'].endswith(u'你好')
def test_put_StringIO():
key = "test_%s" % r(9)
data = cStringIO.StringIO('hello buddy!')
ret, err = io.put(policy.token(), key, data)
assert err is None
assert ret['key'] == key
def test_put_urlopen():
key = "test_%s" % r(9)
data = urllib.urlopen('http://pythonsdk.qiniudn.com/hello.jpg')
ret, err = io.put(policy.token(), key, data)
assert err is None
assert ret['key'] == key
def test_put_no_length():
class test_reader(object):
def __init__(self):
self.data = 'abc'
self.pos = 0
def read(self, n=None):
if n is None or n < 0:
newpos = len(self.data)
else:
newpos = min(self.pos + n, len(self.data))
r = self.data[self.pos: newpos]
self.pos = newpos
return r
key = "test_%s" % r(9)
data = test_reader()
extra.check_crc = 2
extra.crc32 = binascii.crc32('abc') & 0xFFFFFFFF
ret, err = io.put(policy.token(), key, data, extra)
assert err is None
assert ret['key'] == key
test_put()
test_put_same_crc()
test_put_no_key()
test_put_quote_key()
test_put_unicode1()
test_put_unicode2()
test_put_unicode3()
test_put_unicode4()
test_put_StringIO()
test_put_urlopen()
test_put_no_length()
def test_put_file(self):
localfile = "%s" % __file__
key = "test_%s" % r(9)
extra.check_crc = 1
ret, err = io.put_file(policy.token(), key, localfile, extra)
assert err is None
assert ret['key'] == key
def test_put_crc_fail(self):
key = "test_%s" % r(9)
data = "hello bubby!"
extra.check_crc = 2
extra.crc32 = "wrong crc32"
ret, err = io.put(policy.token(), key, data, extra)
assert err is not None
def test_put_fail_reqid(self):
key = "test_%s" % r(9)
data = "hello bubby!"
ret, err = io.put("", key, data, extra)
assert "reqid" in err
class Test_get_file_crc32(unittest.TestCase):
def test_get_file_crc32(self):
file_path = '%s' % __file__
data = None
with open(file_path, 'rb') as f:
data = f.read()
io._BLOCK_SIZE = 4
assert binascii.crc32(
data) & 0xFFFFFFFF == io._get_file_crc32(file_path)
if __name__ == "__main__":
unittest.main()
```
|
{
"source": "JeremyBakker/nss-backendCapstone",
"score": 3
}
|
#### File: Prometheus/views/search_view.py
```python
from collections import Counter
from django.shortcuts import render
from django.apps import apps
from nltk import word_tokenize
from nltk.util import ngrams
from statistics import median
import datetime
import re
def search (request):
'''
This function renders the natural language data for the corporate
transcript selected by the user.
---Arguments---
request: the full HTTP request object
---Return Value---
request: the full HTTP request object
template: index.html
context:
'''
# These regex patterns lists contain the possible renderings of the names
# of CEOs and CFOs of the corporations whose transcripts we analyze. The
# transcripts lack this data in any reliable pattern to pull when parsing.
# Thus, a hard-coded list is necessary for now. Each row correlates to a
# company: Apple, Adobe, Amazon, HP, IBM, Microsoft, Oracle, and Samsung.
# Within this data set, there are three instances of a single name
# appearing in two different roles over time. <NAME> served as an EVP and
# COO at Apple before becoming CEO in 2011. I include him in the CEO list
# because <NAME> rarely, if ever, spoke on earnings calls. <NAME>
# filled that role in practice even before he became CEO. <NAME>
# briefly served as interim CEO of HP while maintaining her position as
# CFO. I leave her exclusively in the CFO category since she never
# abandoned that role. Safra Catz has served as President and CFO, the
# the latter role multiple times. I leave her in the CFO category because
# Larry Ellison has been CEO during Catz's entire tenure. Samsung is a bit
# of an anomaly in this list. The corporation often had an executive vice
# president and several senior vice presidents of product lines on the
# call. I categorized the EVP as CEO and the SVP as CFOs. Interpreters will
# need to consider this when comparing Samsung to other companies.
c_exec_o_list = [
'^(Steve)? ?Jobs', '^(Tim)?(othy)? ?Cook',
'^(Bruce)? ?Chizen', '^(Shantanu)? ?Narayen',
'^(Jeff)?(rey)? ?P?.? ?Bezos',
'^(Mark)? ?(V\.)?Hurd', '^(L.o)? ?Apotheker',
'^(Sam)(uel)? ?(J\.)? ?Palmisano',
'^(Bill)? ?Gates',
'^(Lawrence)?(Larry)? ?(J\.)? ?Ellison',
'^(Dr.)? ?(Woo)? ?[Ss]+?(ik)? ?Chu'
]
c_financ_o_list = [
'^(Peter)? ?Oppenheimer', '^(Luca)? ?Maestri',
'^(Murray)? ?Demo', '^(Mark)? ?Garrett',
'^Th?(om)?(as)? (J\.)? ?Szkutak',
'^(Bob)? ?Wayman', '^(Cath)?(erine)?(ie)? ?Lesjak',
'^(Mark)? ?Loughridge',
'^(Chris)?(topher)? ?(P\.)? ?Liddell', '^(Pete)(r)? ?Klein',
'^(Greg)?(ory)? ?(B\.)? ?Maffei', '^(Safra)? ?(A\.)? ?Catz?',
'^(Jeff)?(rey)? ?Epstein',
'^(Dr.)? ?(Yeong) ?[Dd]+?(uk)? ?Cho', '^(Dr.)? ?(David)? ?Steel',
'(Il)? ?(Ung)? ?Kim', '^(Yeongho)? ?Kang', '^(Sangheung)? ?Shin',
'^(Namseong)? ?Cho?', '^(Hyungdo)? ?Kim', '^(Hwan)? ?Kim',
'^(Yangkyu)? Kim?', '^(Myung)(kun)?(ho)? ?Kim', '^(Jungryul)? ?Lee',
'^(You?ng-?[Hh]ee)? ?Lee', '^(Bongku)? ?Kang', '(Wanhoon)? ?Hong',
'^(Dr.)? (Youngcho)? ?Chi', '(Jaeyong)? ?Lee'
]
c_exec_o_list_regex = re.compile(r'\b(?:%s)\b' % '|'.join(
c_exec_o_list))
c_financ_o_list_regex = re.compile(r'\b(?:%s)\b' % '|'.join(
c_financ_o_list))
# Here we pull the transcript name from the select box to get the
# relevant data we will use to render the page.
transcript = request.GET['transcript']
corporation = transcript.split('-')[0]
transcript_date = transcript.split('-')[1]
# Transform the date both for the SQL query and for display on the
# page.
transcript_date = datetime.datetime.strptime(
(transcript_date), "%d %B %y")
transcript_date_for_db = datetime.datetime.strftime(
(transcript_date), "%Y-%m-%d")
transcript_date_for_display = datetime.datetime.strftime(
(transcript_date), "%B %d, %Y")
# Pull the appropriate corporation model according to the string passed
# from the transcript select box on index.html.
model = apps.get_model('Prometheus', corporation)
# Query the Database
answers_query_set = model.objects.filter(
date_of_call = transcript_date_for_db, question=0).order_by("name")
c_exec_o_answer_list = list()
c_financ_o_answer_list = list()
c_exec_o_answer_length_list = list()
c_financ_o_answer_length_list = list()
number_of_shared_words = 0
c_exec_o_negative_words = 0
c_financ_o_negative_words = 0
c_exec_o_positive_words = 0
c_financ_o_positive_words = 0
c_exec_o_bigram_refs_to_general_knowledge = 0
c_exec_o_trigram_refs_to_general_knowledge = 0
c_financ_o_bigram_refs_to_general_knowledge = 0
c_financ_o_trigram_refs_to_general_knowledge = 0
c_exec_o_bigram_refs_to_shareholders_value = 0
c_exec_o_trigram_refs_to_shareholders_value = 0
c_financ_o_bigram_refs_to_shareholders_value = 0
c_financ_o_trigram_refs_to_shareholders_value = 0
c_exec_o_bigram_refs_to_value_creation = 0
c_financ_o_bigram_refs_to_value_creation = 0
c_exec_o_number_of_indefinite_instances = 0
c_financ_o_number_of_indefinite_instances = 0
# For each answer, we determine whether it correlates to the CEO or
# CFO. Then, after tokenizing the text, removing punctuation, and
# capitalizing each word, we split the text into bigrams and trigrams
# for searching by phrase. From there, we compare n-grams (1, 2, or 3)
# to relevant lectionaries stored as .txt files to determine text
# characteristics (positive_words, general_knowledge, etc.). We then
# calculate the appropriate proportional number and pass the data to
# the context for rendering.
for answer in answers_query_set:
if c_exec_o_list_regex.search(answer.name):
# CEO Answers
c_exec_o_filtered_answer = clean_text(
answer.question_answer_text)
c_exec_o_answer_list.append(
c_exec_o_filtered_answer)
c_exec_o_answer_length_list.append(len(
c_exec_o_filtered_answer))
# CEO Bigrams
c_exec_o_bigrams = list(ngrams(
c_exec_o_filtered_answer, 2))
c_exec_o_bigrams_strings = ["%s %s" % bigram for
bigram in c_exec_o_bigrams]
# CEO Trigrams
c_exec_o_trigrams = list(ngrams(
c_exec_o_filtered_answer, 3))
c_exec_o_trigrams_strings = ["%s %s %s" % trigram for
trigram in c_exec_o_trigrams]
with open('Prometheus/static/lexicons/negative_words.txt',
'r') as file:
lines = [line.strip() for line in file.readlines()]
if set(c_exec_o_filtered_answer).intersection(lines):
negative_intersection_length = \
find_number_of_shared_words(
c_exec_o_filtered_answer, lines,
number_of_shared_words)
c_exec_o_negative_words += \
negative_intersection_length
with open('Prometheus/static/lexicons/positive_words.txt',
'r') as file:
lines = [line.strip() for line in file.readlines()]
if set(c_exec_o_filtered_answer).intersection(lines):
positive_intersection_length = \
find_number_of_shared_words(
c_exec_o_filtered_answer, lines,
number_of_shared_words)
c_exec_o_positive_words += \
positive_intersection_length
with open('Prometheus/static/lexicons/general_knowledge.txt',
'r') as file:
lines = [line.strip().upper() for line in file.readlines()]
if set(c_exec_o_bigrams_strings).intersection(lines):
c_exec_o_bigram_refs_to_general_knowledge += 1
if set(c_exec_o_trigrams_strings).intersection(
lines):
c_exec_o_trigram_refs_to_general_knowledge += 1
with open('Prometheus/static/lexicons/shareholders_value.txt',
'r') as file:
lines = [line.strip().upper() for line in file.readlines()]
if set(c_exec_o_bigrams_strings).intersection(
lines):
c_exec_o_bigram_refs_to_shareholders_value += 1
if set(c_exec_o_trigrams_strings).intersection(
lines):
c_exec_o_trigram_refs_to_shareholders_value += 1
with open('Prometheus/static/lexicons/value_creation.txt',
'r') as file:
lines = [line.strip().upper() for line in
file.readlines()]
if set(c_exec_o_bigrams_strings).intersection(lines):
c_exec_o_bigram_refs_to_value_creation += 1
if c_financ_o_list_regex.search(answer.name):
# CFO Answers
c_financ_o_filtered_answer = clean_text(
answer.question_answer_text)
c_financ_o_answer_list.append(
c_financ_o_filtered_answer)
c_financ_o_answer_length_list.append(
len(c_financ_o_filtered_answer))
# CFO Bigrams
c_financ_o_bigrams = list(ngrams(
c_financ_o_filtered_answer, 2))
c_financ_o_bigrams_strings = ["%s %s" % bigram for
bigram in c_financ_o_bigrams]
# CFO Trigrams
c_financ_o_trigrams = list(ngrams(
c_financ_o_filtered_answer, 3))
c_financ_o_trigrams_strings = ["%s %s %s" % trigram for
trigram in c_financ_o_trigrams]
with open('Prometheus/static/lexicons/negative_words.txt',
'r') as file:
lines = [line.strip() for line in file.readlines()]
if set(c_financ_o_filtered_answer).intersection(lines):
negative_intersection_length = \
find_number_of_shared_words(
c_financ_o_filtered_answer, lines,
number_of_shared_words)
c_financ_o_negative_words += \
negative_intersection_length
with open('Prometheus/static/lexicons/positive_words.txt',
'r') as file:
lines = [line.strip() for line in file.readlines()]
if set(c_financ_o_filtered_answer).intersection(lines):
positive_intersection_length = \
find_number_of_shared_words(
c_financ_o_filtered_answer, lines,
number_of_shared_words)
c_financ_o_positive_words += \
positive_intersection_length
with open('Prometheus/static/lexicons/general_knowledge.txt',
'r') as file:
lines = [line.strip().upper() for line in file.readlines()]
if set(c_financ_o_bigrams_strings).intersection(lines):
c_financ_o_bigram_refs_to_general_knowledge += 1
if set(c_financ_o_trigrams_strings).intersection(lines):
c_financ_o_trigram_refs_to_general_knowledge += 1
with open('Prometheus/static/lexicons/shareholders_value.txt',
'r') as file:
lines = [line.strip().upper() for line in file.readlines()]
if set(c_financ_o_bigrams_strings).intersection(lines):
c_financ_o_bigram_refs_to_shareholders_value += 1
if set(c_financ_o_trigrams_strings).intersection(lines):
c_financ_o_trigram_refs_to_shareholders_value += 1
with open('Prometheus/static/lexicons/value_creation.txt',
'r') as file:
lines = [line.strip().upper() for line in file.readlines()]
if set(c_financ_o_bigrams_strings).intersection(lines):
c_financ_o_bigram_refs_to_value_creation += 1
c_exec_o_answer_length_sum = sum(c_exec_o_answer_length_list)
c_financ_o_answer_length_sum = sum(c_financ_o_answer_length_list)
# In the following try/except blocks, I only account for the absence of the
# CEO from the transcript. I want to be alerted to the absence of the CFO.
# I know of transcripts that lack the CEO speaking. I have not found an
# instance in which a CFO does not speak on an earnings call. In fact, I
# cannot think of a logical reason for such an absence. The most
# reasonable explanation of an error raising due to the lack of data from
# a CFO here is that I have not properly formatted the opening regex above
# to account for the different spellings of CFOs names (Tom vs. Thomas, for
# instance). Thus, I would like the program to throw an error so I can
# adjust the preceding code as needed.
# Median
try:
c_exec_o_answer_length_median = median(
c_exec_o_answer_length_list)
except ValueError:
c_exec_o_answer_length_median = 0
c_financ_o_answer_length_median = median(c_financ_o_answer_length_list)
# Negative Words
try:
c_exec_o_negative_proportion = c_exec_o_negative_words / \
c_exec_o_answer_length_sum
except ZeroDivisionError:
c_exec_o_negative_proportion = 0
c_financ_o_negative_proportion = c_financ_o_negative_words / \
c_financ_o_answer_length_sum
# Positive Words
try:
c_exec_o_positive_proportion = c_exec_o_positive_words / \
c_exec_o_answer_length_sum
except ZeroDivisionError:
c_exec_o_positive_proportion = 0
c_financ_o_positive_proportion = c_financ_o_positive_words / \
c_financ_o_answer_length_sum
# General Knowledge
try:
c_exec_o_proportion_refs_to_general_knowledge = \
(c_exec_o_bigram_refs_to_general_knowledge +
c_exec_o_trigram_refs_to_general_knowledge) / \
len(c_exec_o_answer_list)
except ZeroDivisionError:
c_exec_o_proportion_refs_to_general_knowledge = 0
c_financ_o_proportion_refs_to_general_knowledge = \
(c_financ_o_bigram_refs_to_general_knowledge +
c_financ_o_trigram_refs_to_general_knowledge) / \
len(c_financ_o_answer_list)
# Shareholders Value
try:
c_exec_o_proportion_refs_to_shareholders_value = \
(c_exec_o_bigram_refs_to_shareholders_value +
c_exec_o_trigram_refs_to_shareholders_value) / \
len(c_exec_o_answer_list)
except ZeroDivisionError:
c_exec_o_proportion_refs_to_shareholders_value = 0
c_financ_o_proportion_refs_to_shareholders_value = \
(c_financ_o_bigram_refs_to_shareholders_value +
c_financ_o_trigram_refs_to_shareholders_value) / \
len(c_financ_o_answer_list)
# Value Creation
try:
c_exec_o_proportion_refs_to_value_creation = \
c_exec_o_bigram_refs_to_value_creation / \
len(c_exec_o_answer_list)
except ZeroDivisionError:
c_exec_o_proportion_refs_to_value_creation = 0
c_financ_o_proportion_refs_to_value_creation = \
c_financ_o_bigram_refs_to_value_creation / \
len(c_financ_o_answer_list)
# Pronouns
c_exec_o_number_of_i_instances = 0
c_financ_o_number_of_i_instances = 0
c_exec_o_number_of_we_instances = 0
c_financ_o_number_of_we_instances = 0
i = re.compile("I[ ']")
we = re.compile("WE[ ']")
indefinite = re.compile(
'([ANY]{3}|[EVERY]{5}|[SOME]{4}|[NO]{2})([BODY]{4}|[ONE]{3}|[THING]{5}) |EACH |N?EITHER')
#CEO
for answer in c_exec_o_answer_list:
string_answer = (' ').join(answer)
if i.search(string_answer):
c_exec_o_number_of_i_instances += len([m.start() for m in
re.finditer(i, string_answer)])
if we.search(string_answer):
c_exec_o_number_of_we_instances += len([m.start() for m in
re.finditer(we, string_answer)])
if indefinite.search(string_answer):
print(string_answer)
c_exec_o_number_of_indefinite_instances += len([m.start() for m in
re.finditer(indefinite, string_answer)])
print(c_exec_o_number_of_indefinite_instances)
try:
proportion_c_exec_o_number_of_i_instances = \
round(c_exec_o_number_of_i_instances/c_exec_o_answer_length_sum, 4)
except ZeroDivisionError:
proportion_c_exec_o_number_of_i_instances = 0
try:
proportion_c_exec_o_number_of_we_instances = \
round(c_exec_o_number_of_we_instances/c_exec_o_answer_length_sum, 4)
except ZeroDivisionError:
proportion_c_exec_o_number_of_we_instances = 0
try:
proportion_c_exec_o_number_of_indefinite_instances = \
round(c_exec_o_number_of_indefinite_instances/c_exec_o_answer_length_sum, 4)
except ZeroDivisionError:
proportion_c_exec_o_number_of_indefinite_instances = 0
#CFO
for answer in c_financ_o_answer_list:
string_answer = (' ').join(answer)
if i.search(string_answer):
c_financ_o_number_of_i_instances += len([m.start() for m in
re.finditer(i, string_answer)])
if we.search(string_answer):
c_financ_o_number_of_we_instances += len([m.start() for m in
re.finditer(we, string_answer)])
if indefinite.search(string_answer):
c_financ_o_number_of_indefinite_instances += len([m.start() for m in
re.finditer(indefinite, string_answer)])
proportion_c_financ_o_number_of_i_instances = \
round(c_financ_o_number_of_i_instances/
c_financ_o_answer_length_sum, 4)
proportion_c_financ_o_number_of_we_instances = \
round(c_financ_o_number_of_we_instances/
c_financ_o_answer_length_sum, 4)
proportion_c_financ_o_number_of_indefinite_instances = \
round(c_financ_o_number_of_indefinite_instances/
c_financ_o_answer_length_sum, 4)
template = 'index.html'
context = {
"search_view": True,
"corporation": corporation,
"date": transcript_date_for_display,
"cFo_median": c_financ_o_answer_length_median,
"cFo_sum": c_financ_o_answer_length_sum,
"cEo_median": c_exec_o_answer_length_median,
"cEo_sum": c_exec_o_answer_length_sum,
"cEo_negative": round(c_exec_o_negative_proportion, 4),
"cFo_negative": round(c_financ_o_negative_proportion, 4),
"cEo_positive": round(c_exec_o_positive_proportion, 4),
"cFo_positive": round(c_financ_o_positive_proportion, 4),
"cEo_knowledge": round(
c_exec_o_proportion_refs_to_general_knowledge, 4),
"cFo_knowledge": round(
c_financ_o_proportion_refs_to_general_knowledge, 4),
"cEo_shareholders_value": round(
c_exec_o_proportion_refs_to_shareholders_value, 4),
"cFo_shareholders_value": round(
c_financ_o_proportion_refs_to_shareholders_value, 4),
"cEo_value_creation": round(
c_exec_o_proportion_refs_to_value_creation, 4),
"cFo_value_creation": round(
c_financ_o_proportion_refs_to_value_creation, 4),
"cEo_I": proportion_c_exec_o_number_of_i_instances,
"cFo_I": proportion_c_financ_o_number_of_i_instances,
"cEo_we": proportion_c_exec_o_number_of_we_instances,
"cFo_we": proportion_c_financ_o_number_of_we_instances,
"cEo_indef": proportion_c_exec_o_number_of_indefinite_instances,
"cFo_indef": proportion_c_financ_o_number_of_indefinite_instances
}
return render(request, template, context)
def clean_text(question_answer_text):
tokenized_answer = word_tokenize(question_answer_text)
without_punctuation = re.compile('.*[A-Za-z0-9].*')
filtered_answer = [word.upper() for word in tokenized_answer if
without_punctuation.match(word)]
return filtered_answer
def find_number_of_shared_words(filtered_answer, lines,
number_of_shared_words):
shared_words = list()
for word in filtered_answer:
if word in lines:
shared_words.append(word)
number_of_shared_words += len(shared_words)
return number_of_shared_words
```
|
{
"source": "jeremyBanks-archive/chii",
"score": 3
}
|
#### File: chii/commands/retard.py
```python
from chii import config, command
BRAIN = config['retard_brain']
CHATTINESS = 0
WORD_COUNT = 10
WORD_MAX = 1000
SENTENCE_SEPS = ('. ', '! ', '? ', '\n')
if BRAIN:
import random, re, os
from collections import defaultdict
class MarkovChain:
chain = defaultdict(list)
def add_to_brain(self, line, write_to_file=False):
if write_to_file:
with open(BRAIN, 'a') as f:
f.write(line + '\n')
w1 = w2 = "\n"
for word in line.split(' '):
self.chain[(w1, w2)].append(word)
w1, w2 = w2, word
self.chain[w1, w2].append('\n')
def get_key(self, msg=None):
if msg and len(msg.split()) > 1:
words = msg.split()
w1, w2 = words[0:2]
for word in words:
if self.chain[(w1, w2)] != []:
return w1, w2
w1, w2 = w2, word
return random.choice(self.chain.keys())
def generate_sentence(self, msg):
sentence = ''
w1, w2 = self.get_key(msg)
for i in xrange(WORD_MAX):
try:
word = random.choice(self.chain[(w1, w2)])
except IndexError:
word = random.choice(self.chain[self.get_key()])
word = word.strip()
if not word:
break
sentence = ' '.join((sentence, word))
w1, w2 = w2, word
if len(sentence) < 20:
return self.generate_sentence(None)
return sentence
@command
def retard(self, channel, nick, host, *args):
msg = ' '.join(args)
def clean_sentence(sentence):
sentence = sentence.replace('"', '')
if sentence[-1] in (',', ';'):
sentence = sentence[:-1]
if sentence[-1] not in ('!', '.', '?'):
sentence += '.'
return sentence.upper()
prefix = "%s:" % nick
markov_chain.add_to_brain(msg, write_to_file=True)
return prefix + clean_sentence(markov_chain.generate_sentence(msg))
markov_chain = MarkovChain()
if os.path.exists(BRAIN):
with open(BRAIN) as f:
for line in f.readlines():
markov_chain.add_to_brain(line)
print 'Retard Brain Loaded'
```
#### File: chii/events/misc.py
```python
from chii import event
import random
@event('action')
def trout(self, channel, nick, host, action):
if 'trout' in action:
self.me(channel, 'slaps %s around with a large carp' % nick)
@event('msg')
def the_best(self, channel, nick, host, msg):
if (msg.startswith('who is') or msg.startswith('who the')) and (msg.endswith('best?') or msg.endswith('best')):
if self.config['owner'] in '!'.join((nick, host)):
if msg.startswith('who the'):
response = "%s's the best!" % nick
else:
response = '%s is the %s best!' % (nick, ' '.join(msg.split()[2:-1]))
else:
response = 'not you'
if self.nickname == channel:
self.msg(nick, response)
else:
self.msg(channel, response)
@event('msg')
def ya(self, channel, nick, host, msg):
if msg.strip() == 'ya':
self.msg(channel, 'ya')
@event('msg')
def xaimus(self, channel, nick, host, msg):
if 'xaimus' in msg:
self.msg(channel, 'huang')
@event('msg')
def muse(self, channel, nick, host, msg):
if 'muse' in msg:
self.msg(channel, 'U RANG %s' % nick)
@event('msg')
def cool(self, channel, nick, host, msg):
if msg.strip() == 'cool':
self.msg(channel, 'cool')
@event('msg')
def anders(self, channel, nick, host, msg):
ANDERS_IS_GAY = (
'haha what a fag',
'haha anders',
'what a gay',
'...eventually culminating in buttfuckery',
'oh no look anders got penis stuck in his face'
)
if 'anders' in msg.lower():
self.msg(channel, random.choice(ANDERS_IS_GAY))
```
#### File: quoth/quotes/models.py
```python
from django.db import models
class GetOrNoneManager(models.Manager):
"""Adds get_or_none method to objects"""
def get_or_none(self, **kwargs):
try:
return self.get(**kwargs)
except self.model.DoesNotExist:
return None
class Quote(models.Model):
nick = models.CharField(max_length=50)
host = models.CharField(max_length=100)
channel = models.CharField(max_length=50)
quote = models.TextField()
added = models.DateField()
objects = GetOrNoneManager()
```
|
{
"source": "jeremyBanks-archive/kia",
"score": 3
}
|
#### File: src/kia/json_serialization.py
```python
from __future__ import division, print_function, unicode_literals
import base64
import json
class JSONSerializer(object):
"""Provides JSON serialization for a set of classes."""
default_type_property = "__type"
default_indent = None
default_separators = (",", ":")
def __init__(self, types=None, type_property=None, indent=None,
separators=None, ensure_ascii=True, **root_options):
if types:
self.types = dict(types)
else:
self.types = None
self.root_options = root_options
if type_property is None:
self.type_property = self.default_type_property
if separators is None:
separators = self.default_separators
if indent is None:
indent = self.default_indent
self.raw_encoder = json.JSONEncoder(
allow_nan=False,
sort_keys=True,
indent=indent,
ensure_ascii=ensure_ascii,
separators=separators,
default=self.produce_json_equivalent
)
self.raw_decoder = json.JSONDecoder(
object_hook=self.parse_json_equivalent,
parse_constant=self._parse_constant
)
def dump(self, o, fp):
fp.write(self.dumps(o))
def dumps(self, o):
return self.raw_encoder.encode(o)
def load(self, fp, req_type=None):
return self.loads(fp.read(), req_type)
def loads(self, s, req_type=None):
result = self.raw_decoder.decode(s)
if req_type is not None and not isinstance(result, req_type):
raise TypeError("Decoded JSON object does not match required type.")
return result
_constants = {
"true": True,
"false": False,
"null": None
}
def _parse_constant(self, name):
return self._constants[name]
def parse_json_equivalent(self, o):
if self.type_property in o and o[self.type_property] in self.types:
return (self.types[o[self.type_property]]
.from_json_equivalent(o))
else:
return o
def produce_json_equivalent(self, o, options=None):
for type_name, cls in self.types.items():
if isinstance(o, cls):
json_type = type_name
break
else:
return o
if hasattr(o, "to_dynamic_json_equivalent"):
if options is None:
options = self.root_options
def recur(o, **changes):
return self.produce_json_equivalent(o, dict(options, **changes))
result = o.to_dynamic_json_equivalent(recur, **options)
elif hasattr(o, "to_json_equivalent"):
result = o.to_json_equivalent()
else:
raise TypeError("{}s can not be JSON-serialized."
.format(type(o).__name__))
result[self.type_property] = json_type
return result
```
|
{
"source": "jeremybanks/ChatExchange",
"score": 3
}
|
#### File: stackchat/client/__init__.py
```python
import asyncio
from contextlib import contextmanager
import logging
import time
import random
import aiohttp
import sqlalchemy.orm
from .. import parse
from .._version import __version__
from .._util import async
from ..parse.json import events
from ..data import models, _seed
from . import _request
logger = logging.getLogger(__name__)
class _HttpClientSession(aiohttp.ClientSession):
def __init__(self, *a, **kw):
if 'connector' not in kw:
kw['connector'] = aiohttp.TCPConnector(limit_per_host=2)
super().__init__(*a, **kw)
def _request(self, method, url, **kwargs):
# see https://stackoverflow.com/a/45590516/1114
logger.debug('%s %r', method, url)
return super()._request(method, url, **kwargs)
class _SQLSession(sqlalchemy.orm.session.Session):
pass
class Client:
# Defaults used to control caching:
max_age_now = float('-inf')
max_age_current = 60 # one minute until a datum is no longer "current"
max_age_fresh = 60 * 60 * 4 # four hours until a datum is no longer "fresh"
max_age_alive = 60 * 60 * 24 * 64 # two months until a datum is no longer "alive"
max_age_dead = float('inf')
# These should be function options but we'll just set them here for now:
desired_max_age = max_age_fresh
required_max_age = max_age_dead
offline = False
def __init__(self, db_path='sqlite:///:memory:', se_email=None, se_password=None):
self.sql_engine = sqlalchemy.create_engine(db_path)
if db_path.startswith('sqlite:'):
self._prepare_sqlite_hacks()
self._sql_sessionmaker = sqlalchemy.orm.sessionmaker(
bind=self.sql_engine,
expire_on_commit=False,
class_=_SQLSession)
self._init_db()
self._web_session = _HttpClientSession(
read_timeout=20,
raise_for_status=True,
headers={'User-Agent': f'stack.chat/{__version__} (+https://stack.chat)'}
)
self._request_throttle = async.Throttle(interval=0.5)
self._authenticated = asyncio.ensure_future(self._authenticate(se_email, se_password))
async def _authenticate(self, se_email, se_password):
so = self.server('so')
so_fkey = await _request.StackOverflowFKey.request(self, None)
await _request.StackOverflowLogin.request(self, None, email=se_email, password=<PASSWORD>, fkey=so_fkey.fkey)
# XXX: SE/MSE auth is currently broken
# mse_fkey = await _request.MetaStackExchangeFKey.request(self, None)
# await _request.MetaStackExchangeLogin.request(self, None, email=se_email, password=<PASSWORD>, fkey=mse_fkey.fkey)
def _init_db(self):
models.Base.metadata.create_all(self.sql_engine)
with self.sql_session() as sql:
for row in _seed.data():
try:
sql.add(row)
sql.commit()
except sqlalchemy.exc.IntegrityError:
sql.rollback()
continue
def _prepare_sqlite_hacks(self):
# via http://docs.sqlalchemy.org/en/latest/dialects/sqlite.html
@sqlalchemy.event.listens_for(self.sql_engine, 'connect')
def do_connect(dbapi_connection, connection_record):
# disable pysqlite's emitting of the BEGIN statement entirely.
# also stops it from emitting COMMIT before any DDL.
dbapi_connection.isolation_level = None
# enable foreign key constraint checking
# XXX: lol it already breaks us
# cursor = dbapi_connection.cursor()
# cursor.execute("PRAGMA foreign_keys=ON")
# cursor.close()
@sqlalchemy.event.listens_for(self.sql_engine, 'begin')
def do_begin(conn):
# emit our own BEGIN
conn.execute('BEGIN')
_closed = False
def close(self):
if self._closed: raise Exception('already closed')
self._web_session.close()
self._closed = True
def __enter__(self):
return self
def __exit__(self, *exc_details):
self.close()
@contextmanager
def sql_session(self):
if self._closed:
raise Exception('already closed')
session = self._sql_sessionmaker()
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
def server(self, slug_or_host):
with self.sql_session() as sql:
server = sql.query(Server).filter(
(models.Server.slug == slug_or_host) |
(models.Server.host == slug_or_host)).one()
async def f():
await self._authenticated
return await _request.StackChatFKey.request(self, server)
server.set(
_client=self,
_fkey_request=asyncio.ensure_future(f()))
return server
@property
def se(self):
return self.server('se')
@property
def mse(self):
return self.server('mse')
@property
def so(self):
return self.server('so')
class Server(models.Server):
_client = None
_fkey_request = None
async def me(self):
await self._client._authenticated
raise NotImplementedError()
def _get_or_create_user(self, sql, user_id):
assert self.meta_id
assert user_id
user = sql.query(User).filter(
(models.User.server_meta_id == self.meta_id) &
(models.User.user_id == user_id)
).one_or_none()
if not user:
user = User(server_meta_id=self.meta_id, user_id=user_id)
sql.add(user)
sql.flush()
assert user.meta_id
user._server = self
return user
async def user(self, user_id):
await self._client._authenticated
with self._client.sql_session() as sql:
user = self._get_or_create_user(sql, user_id)
if user.meta_update_age < self._client.desired_max_age:
return user
if not self._client.offline:
NotImplemented
if user.meta_update_age <= self._client.required_max_age:
return user
logger.warning("%s failed to load user %s, %s > %s", self, user_id, user.meta_update_age, self._client.required_max_age)
return None
def _get_or_create_room(self, sql, room_id):
assert self.meta_id
assert room_id
room = sql.query(Room).filter(
(models.Room.server_meta_id == self.meta_id) &
(models.Room.room_id == room_id)
).one_or_none()
if not room:
room = Room(server_meta_id=self.meta_id, room_id=room_id)
sql.add(room)
sql.flush()
assert room.meta_id
room._server = self
return room
async def room(self, room_id):
await self._client._authenticated
with self._client.sql_session() as sql:
room = self._get_or_create_room(sql, room_id)
if room.meta_update_age < self._client.desired_max_age:
return room
if not self._client.offline:
transcript = await _request.TranscriptDay.request(self._client, self, room_id=room_id)
room = transcript.room
if room.meta_update_age <= self._client.required_max_age:
return room
logger.warning("%s failed to load room %s, %s > %s", self, room_id, room.meta_update_age, self._client.required_max_age)
return None
def _get_or_create_message(self, sql, message_id):
assert self.meta_id
assert message_id
message = sql.query(Message).filter(
(models.Message.server_meta_id == self.meta_id) &
(models.Message.message_id == message_id)
).one_or_none()
if not message:
message = Message(server_meta_id=self.meta_id, message_id=message_id)
sql.add(message)
sql.flush()
assert message.meta_id
message._server = self
return message
async def message(self, message_id):
await self._client._authenticated
with self._client.sql_session() as sql:
message = self._get_or_create_message(sql, message_id)
if message.meta_update_age < self._client.desired_max_age:
return message
if not self._client.offline:
transcript = await _request.TranscriptDay.request(self._client, self, message_id=message_id)
message = transcript.messages[message_id]
if message.meta_update_age <= self._client.required_max_age:
return message
logger.warning("%s failed to load message %s, %s > %s", self, message_id, message.meta_update_age, self._client.required_max_age)
return None
async def rooms(self):
await self._client._authenticated
logger.warning("Server.rooms() only checks locally for now.")
rooms = []
with self._client.sql_session() as sql:
query = sql.query(Room) \
.filter(models.Room.server_meta_id == self.meta_id) \
.order_by(models.Room.server_meta_id)
for room in query:
room._server = self
rooms.append(room)
return rooms
raise NotImplementedError()
response = self._client._web_session.get('https://%s/rooms?tab=all&sort=active&nohide=true' % (self.host))
class User(models.User):
_server = None
@property
def server(self):
return self._server
class Room(models.Room):
_server = None
@property
def server(self):
return self._server
async def old_messages(self):
await self._server._client._authenticated
response = await _request.RoomMessages.request(
self._server._client,
self._server,
room_id=self.room_id)
while True:
if response.messages:
messages = list(sorted(response.messages, key=lambda m: -m.message_id))
for message in messages:
yield message
response = await _request.RoomMessages.request(
self._server._client,
self._server,
room_id=self.room_id,
before_message_id=messages[-1].message_id)
else:
break
async def new_messages(self):
await self._server._client._authenticated
response = await _request.RoomWSAuth.request(
self._server._client,
self._server,
room_id=self.room_id)
url = response.data._data['url'] + '?l=0' # not sure what this is
logger.info(url)
async with self._server._client._web_session.ws_connect(url, headers={'Origin': 'https://chat.stackoverflow.com'}) as socket:
async for msg in socket:
if msg.type == aiohttp.WSMsgType.TEXT:
parsed = parse.WSEvents(msg.data)
for event in parsed.events:
if isinstance(event, events.MessagePosted):
data = event._data
if data['room_id'] != self.room_id:
continue
with self._server._client.sql_session() as sql:
message = self._server._get_or_create_message(sql, data['id'])
message.mark_updated()
message.content_html = data['content']
owner = self._server._get_or_create_user(sql, data['id'])
owner.mark_updated()
owner.name = data['user_name']
message.owner_meta_id = owner.meta_id
yield message
async def send_message(self, content_markdown):
await self._server._client._authenticated
response = await _request.SendMessage.request(
self._server._client,
self._server,
room_id=self.room_id,
text=content_markdown
)
assert response.id # handle failure for real once we've figured out how to succeed
class Message(models.Message):
_server = None
@property
def server(self):
return self._server
@property
def owner(self):
if self.owner_meta_id:
with self._server._client.sql_session() as sql:
user = sql.query(User).filter(models.User.meta_id == self.owner_meta_id).one()
user._server = self._server
return user
else:
return None
@property
def room(self):
return self._server.room(self.room_id)
async def replies(self):
logger.warning("Message.replies() only checks locally for now.")
with self._server._client.sql_session() as sql:
messages = list(
sql.query(Message)
.filter(models.Message.parent_message_id == self.message_id)
.order_by(models.Message.message_id))
for message in messages:
message._server = self._server
return messages
```
#### File: stackchat/cli/__init__.py
```python
import asyncio
import getpass
import importlib
import inspect
import logging
import os
import docopt
import toml
from .._version import __version__
from ..client import Client
logger = logging.getLogger(__name__)
def main(*argv):
opts = docopt.docopt(
__doc__.replace('stack.chat', argv[0]),
argv[1:],
True,
"stack.chat version %s" % (__version__),
True)
# docopt() will exit when it handles --version and --help for us.
# we also alias them as these psuedo-commands.
if opts['COMMAND'] == 'version':
return main(argv[0], '--version')
if opts['COMMAND'] == 'help':
command_arg = opts['ARGS'][:1]
return main(argv[0], *command_arg, '--help')
if opts['--quiet']:
level = logging.ERROR
elif opts['--verbose']:
level = logging.DEBUG
else:
level = logging.WARNING
logging.basicConfig(format="%(e)32s %(relative)6s ms%(n)s%(levelled_name)32s %(message)s", level=level)
for handler in logging.getLogger().handlers:
handler.addFilter(Filter())
logger.debug("optparse opts: %s" % opts)
subcommand = opts['COMMAND']
subcommand_module = importlib.import_module('.' + subcommand, 'stackchat.cli')
no_chat = getattr(subcommand_module, 'NO_CHAT', False)
logger.debug('subcommand_module == %r', subcommand_module)
se_email, se_password = None, None
try:
with open(os.path.expanduser('~/.stack.chat.toml')) as f:
global_conf = toml.load(f)
logger.debug("read global config: %r", global_conf)
except IOError:
global_conf = {'credentials': {'stack-exchange': {}}}
try:
with open('./.stack.chat.toml') as f:
local_conf = toml.load(f)
logger.debug("read local config: %r", local_conf)
except IOError:
local_conf = {'credentials': {'stack-exchange': {}}}
if not se_email:
se_email = os.environ.get('STACK_EXCHANGE_EMAIL')
if not se_email:
se_email = local_conf['credentials']['stack-exchange'].get('email')
if not se_email:
se_email = global_conf['credentials']['stack-exchange'].get('email')
if not se_email:
se_email = os.environ.get('ChatExchangeU')
if not se_email:
se_email = input("stack exchange login email: ")
if not se_password:
se_password = os.environ.get('STACK_EXCHANGE_PASSWORD')
if not se_password:
se_password = local_conf['credentials']['stack-exchange'].get('password')
if not se_password:
se_password = global_conf['credentials']['stack-exchange'].get('password')
if not se_password:
se_password = os.environ.get('ChatExchangeP')
if not se_password:
se_password = getpass.getpass("stack exchange password: ")
db_path = 'sqlite:///./.stack.chat.sqlite'
# re-construct without flags we handle above
sub_argv = [argv[0], subcommand, *opts['ARGS']]
if getattr(subcommand_module, '__doc__', None):
sub_opts = docopt.docopt(
doc=subcommand_module.__doc__.replace('stack.chat', argv[0]),
argv=sub_argv[1:],
help=True,
version=None,
options_first=False)
logger.debug("subcommand optparse opts: %s" % opts)
else:
sub_opts = None
if not no_chat:
with Client(db_path, se_email, se_password) as chat:
r = subcommand_module.main(chat, sub_opts)
else:
r = subcommand_module.main(dict(locals()), sub_opts)
if inspect.iscoroutinefunction(subcommand_module.main):
asyncio.get_event_loop().run_until_complete(r)
class Filter(logging.Filter):
last = 0
def filter(self, record):
# see https://stackoverflow.com/a/43052949/1114
delta = record.relativeCreated - self.last
record.relative = '+{0:.0f}'.format(delta)
record.e = ''
record.n = '\n'
record.levelled_name = '%s/%-5s' % (record.name, record.levelname)
self.last = record.relativeCreated
return True
```
#### File: stackchat/data/models.py
```python
import datetime
import hashlib
import hmac
import sqlalchemy
import sqlalchemy.ext.declarative
from sqlalchemy import (Boolean, Column, DateTime, ForeignKey, Index, Integer,
String, UniqueConstraint)
import hashids
from .._util import obj_dict
from ._constants import *
_obfuscation_key = 'adbbf3aa342bc82736d0ee71b2a0650e05b2edd21082e1291ae161777550ba0c71002b9ce3ad7aa19c8a4641223f8f4e82bab7ebbf5335d01046cdc5a462bdfe'
class Base:
__tablename__ = None
meta_id = Column(Integer, primary_key=True)
meta_created = Column(DateTime, default=datetime.datetime.now)
meta_updated = Column(DateTime, default=EPOCH)
meta_deleted = Column(DateTime, default=None)
__init__ = obj_dict.update
set = obj_dict.updated
__repr__ = obj_dict.repr
def mark_updated(self):
self.meta_updated = datetime.datetime.now()
def mark_deleted(self):
if self.meta_deleted is None:
self.meta_deleted = datetime.datetime.now()
@property
def meta_update_age(self):
return (datetime.datetime.now() - self.meta_updated).total_seconds()
@property
def meta_slug(self):
"""
Produces a short obfuscated (NOT SECURE!) slug encoding .meta_id.
I like to use these so that we have an identifier for these instances
that is clearly not their official room/message IDs.
"""
salt = ''.join(chr(n) for n in hmac.new(_obfuscation_key, self.__tablename__, hashlib.sha512).digest())
min_length = 4
slugger = hashids.Hashids(salt=salt, min_length=min_length)
meta_slug ,= slugger.encode(self.meta_id)
return meta_slug
@classmethod
def meta_id_from_meta_slug(cls, meta_slug):
salt = ''.join(chr(n) for n in hmac.new(_obfuscation_key, cls.__tablename__, hashlib.sha512).digest())
min_length = 4
slugger = hashids.Hashids(salt=salt, min_length=min_length)
meta_id, = slugger.decode(meta_slug)
return meta_id
Base = sqlalchemy.ext.declarative.declarative_base(cls=Base)
class Server(Base):
__tablename__ = 'Server'
meta_id = Column(Integer, primary_key=True)
name = Column(String)
host = Column(String, nullable=False)
slug = Column(String, nullable=False)
_slug_is_unique = UniqueConstraint('slug')
class User(Base):
__tablename__ = 'User'
server_meta_id = Column(Integer, ForeignKey('Server.meta_id'))
user_id = Column(Integer, nullable=False)
name = Column(String)
about = Column(String)
is_moderator = Column(Boolean)
message_count = Column(Integer)
room_count = Column(Integer)
reputation = Column(Integer)
last_seen = Column(DateTime)
last_message = Column(DateTime)
__table_args__ = (
Index('ix_server_meta_id_user_id_name', server_meta_id, user_id, name),
UniqueConstraint('server_meta_id', 'user_id'),
)
class Room(Base):
__tablename__ = 'Room'
server_meta_id = Column(Integer, ForeignKey('Server.meta_id'))
room_id = Column(Integer, nullable=False)
name = Column(String)
default_access = Column(Integer)
ACCESS_PRIVATE = 0b_00000000
ACCESS_GALLERY = 0b_00000001
ACCESS_PUBLIC = 0b_00000011
__table_args__ = (
Index('ix_server_meta_id_room_id_name', server_meta_id, room_id, name),
UniqueConstraint('server_meta_id', 'room_id'),
)
class Message(Base):
__tablename__ = 'Message'
server_meta_id = Column(Integer, ForeignKey('Server.meta_id'))
room_meta_id = Column(Integer, ForeignKey('Room.meta_id'))
owner_meta_id = Column(Integer, ForeignKey('User.meta_id'))
message_id = Column(Integer, nullable=False)
parent_message_id = Column(Integer, ForeignKey('Message.message_id'), nullable=True)
content_html = Column(String)
content_text = Column(String)
content_markdown = Column(String)
__table_args__ = (
Index('ix_server_meta_id_message_id', server_meta_id, message_id),
Index('ix_parent_message_id_room_meta_id', parent_message_id, room_meta_id),
Index('ix_room_meta_idowner_meta_id_message_id', room_meta_id, owner_meta_id, message_id),
Index('ix_owner_meta_id_room_meta_id_message_id', owner_meta_id, room_meta_id, message_id),
UniqueConstraint('server_meta_id', 'message_id'),
)
```
|
{
"source": "jeremybennett/force-riscv",
"score": 2
}
|
#### File: examples/riscv/basic_random_01_force.py
```python
from riscv.EnvRISCV import EnvRISCV
from riscv.GenThreadRISCV import GenThreadRISCV
from base.Sequence import Sequence
from base.InstructionMap import InstructionMap
from DV.riscv.trees.instruction_tree import *
class MainSequence(Sequence):
"""Generate a sequence of instructions that are randomly selected from the specified subset
of RISC-V instructions.
1 specify the number of instructions to be generated by setting instruction_count
2 specify a the desired subset by setting the instruction_group. Some of the predefined
instruction subsets are listed below with the statement for all but one commented out. Just
uncomment the subset you wish to use, while commenting out the others. For more details
on the specific instructions in each subset and a list of the other available predefined
subsets, see force/py/DV/riscv/trees/instruction_tree.py.
3 if you want to a specific instruction, set the_instruction2 to the appropriate string and
use the_instruction2 in the genInstruction call.
"""
def generate(self, **kargs):
# 1 - set the number of instructions to generate,
# can configure via control file with generator option - 'instruction_count'
(count_opt, count_opt_valid) = self.getOption("instruction_count")
if count_opt_valid:
instruction_count = count_opt
else:
instruction_count = 100
# 2 - Choose the subset of RISCV instruction you wish to use
instruction_group = RV_G_instructions
# instruction_group = BranchJump_instructions
# instruction_group = LDST_All_instructions
# instruction_group = ALU_Int_All_instructions
# instruction_group = ALU_Float_All_instructions
# instruction_group = RV32I_instructions
# instruction_group = RV_A_instructions
# 3 - If you want to specify a specific instruction, set the_instruction2 to the appropriate string here
# and replace the argument in the genInstruction call to the_instruction2. For the string values to
# use for a given instruction, search for that instruction in force/py/DV/riscv/trees/instruction_tree.py.
# the_instruction2 = "ADD##RISCV"
for _ in range(instruction_count):
# select a specific instruction from the instruction group
the_instruction = self.pickWeighted(instruction_group)
# create the instruction
record_id = self.genInstruction(the_instruction)
## Points to the MainSequence defined in this file
MainSequenceClass = MainSequence
## Using GenThreadRISCV by default, can be overriden with extended classes
GenThreadClass = GenThreadRISCV
## Using EnvRISCV by default, can be overriden with extended classes
EnvClass = EnvRISCV
```
#### File: examples/riscv/um_sequences_01_force.py
```python
from riscv.EnvRISCV import EnvRISCV
from riscv.GenThreadRISCV import GenThreadRISCV
from base.Sequence import Sequence
from DV.riscv.trees.instruction_tree import ALU_Int32_instructions
from DV.riscv.trees.instruction_tree import ALU_Float_Double_instructions
class MyMainSequence(Sequence):
# Main sequence which calls the other sequences.
def generate(self, **kwargs):
i100_seq = I100Sequence(self.genThread)
i100_seq.run()
f100_seq = F100Sequence(self.genThread)
f100_seq.run()
class I100Sequence(Sequence):
# generate 100 random integer 32 ALU ops
def generate(self, **kargs):
for _ in range(100):
the_instruction = self.pickWeighted(ALU_Int32_instructions)
self.genInstruction(the_instruction)
self.notice(">>>>> The instruction: {}".format(the_instruction))
class F100Sequence(Sequence):
# Generate 100 random floating point alu ops
def generate(self, **kargs):
for _ in range(100):
the_instruction = self.pickWeighted(ALU_Float_Double_instructions)
self.genInstruction(the_instruction)
self.notice(">>>>> The instruction: {}".format(the_instruction))
## Points to the MainSequence defined in this file
MainSequenceClass = MyMainSequence
## Using GenThreadRISCV by default, can be overriden with extended classes
GenThreadClass = GenThreadRISCV
## Using EnvRISCV by default, can be overriden with extended classes
EnvClass = EnvRISCV
```
#### File: py/base/Bitstream.py
```python
class Bitstream(object):
def __init__(self, bitstream=""):
self.bitstream = bitstream
def stream(self):
if self.bitstream.startswith('0x'):
return self.convertHexToBin();
else:
return self.convert()
def append(self, bitstream):
self.bitstream += bitstream
return self
def prepend(self, bitstream):
self.bitstream = bitstream + self.bitstream
return self
def value(self):
val = ""
for i in self.bitstream:
if i == "1" or i == "0":
val += i
elif i == "X":
val += "0000"
elif i == "x":
val += "0"
return int(val, 2)
def mask(self):
m = ""
for i in self.bitstream:
if i == "1" or i == "0":
m += "1"
elif i == "X":
m += "0000"
elif i == "x":
m += "0"
return int(m, 2)
def valueMask(self):
val = ""
m = ""
for i in self.bitstream:
if i == "1" or i == "0":
val += i
m += "1"
elif i == "X":
val += "0000"
m += "0000"
elif i == "x":
val += "0"
m += "0"
val_str = "0x{0:x}".format(int(val,2))
m_str = "0x{0:x}".format(int(m,2))
return val_str+"/"+m_str
def convert(self):
stream = ""
for i in self.bitstream:
if i == "1" or i == "0" or i == "x":
stream += i
elif i == "X":
stream += "xxxx"
return stream
def bits(self, bits_string):
stream = ""
for item in bits_string.split(","):
if item.find("-") != -1:
myList = item.split("-")
stream += self.strWithin(int(myList[0]), int(myList[1]))
else:
stream += self.strAt(int(item))
return stream
def strWithin(self, begin, end):
stream = self.convert()
start = len(stream)-1-begin
stop = len(stream)-1-end
step = 1
if start > stop:
step = -1
stop -= 1
if stop <= -1:
stop = None
else:
stop += 1
return stream[start:stop:step]
def strAt(self, pos):
stream = self.convert()
if pos < len(stream) and pos >= 0:
return stream[len(stream)-1-pos]
return ""
def __getitem__(self, args):
if isinstance(args, str):
return self.bits(args)
#elif isinstance(args, int):
# return self.strAt(args)
return ""
def convertHexToBin(self):
stream = ""
for i in self.bitstream[2:]:
if i == "_":
continue
stream += "{0:04b}".format(int(i,16))
self.bitstream = stream
return stream
```
#### File: base/exception_handlers/ExceptionHandlerAssignmentParser.py
```python
import json
import os
## This class parses JSON data files that map exception classes to default exception handler
# classes.
class ExceptionHandlerAssignmentParser(object):
## Parse the specified file and return a map from an exception class name and optional
# subexception class name to a tuple consisting of the module name and class name of an
# exception handler. Only two levels of exception codes are currently supported; files
# specifying more than two levels will not parse correctly.
#
# @param aAssignmentFilePath The path to the file to parse.
def parseHandlerAssignments(self, aAssignmentFilePath):
handler_assignments = {}
with open(aAssignmentFilePath) as assignment_file:
assignments = json.loads(assignment_file.read())
# We skip the first element, which is the license string
for assignment in assignments[1:]:
if 'ExceptionSubhandlers' in assignment:
subassignment_file_path = os.path.join(os.path.dirname(aAssignmentFilePath), assignment['ExceptionSubhandlers'])
handler_subassignments = self.parseHandlerAssignments(subassignment_file_path)
exception_class_name = assignment['ExceptionCode']
for ((subexception_class_name, _), (handler_module_name, handler_class_name)) in handler_subassignments.items():
handler_assignments[(exception_class_name, subexception_class_name)] = (handler_module_name, handler_class_name)
else:
exception_class_name = assignment['ExceptionCode']
handler_module_name = assignment['ExceptionHandler']['ExceptionHandlerModule']
handler_class_name = assignment['ExceptionHandler']['ExceptionHandlerClass']
handler_assignments[(exception_class_name, None)] = (handler_module_name, handler_class_name)
return handler_assignments
```
#### File: base/exception_handlers/ReusableSequence.py
```python
from base.Sequence import Sequence
## This class is intended to serve as a base class for creating sequences that can be generated once
# and called as routines and/or branched to from multiple locations. The caller must ensure that the
# routine has been generated prior to attempting to call it or branch to it.
class ReusableSequence(Sequence):
def __init__(self, aGenThread, aFactory, aStack):
super().__init__(aGenThread)
self.mFactory = aFactory
self.mStack = aStack
self.mAssemblyHelper = self.mFactory.createAssemblyHelper(self)
self._mRoutineStartAddresses = {}
## Generate the specified routine and record its starting address. The routine will only be
# generated once, even if this method is called multiple times. If the specified routine is
# dependent on any other routines being generated first, this method will also generate those
# routines.
#
# @param aRoutineName The name of the routine to generate.
# @param kwargs The arguments to pass to the generate<RoutineName>() method.
def generateRoutine(self, aRoutineName, **kwargs):
routine_names = [aRoutineName]
for routine_name in routine_names:
for prereq_routine_name in self.getPrerequisiteRoutineNames(routine_name):
if prereq_routine_name not in routine_names:
routine_names.append(prereq_routine_name)
# Generate with the prerequisite routines first
for routine_name in reversed(routine_names):
if routine_name not in self._mRoutineStartAddresses:
self._generateValidatedRoutine(routine_name, **kwargs)
## Call a routine that has previously been generated. A branch with link instruction will be
# generated to jump to the routine.
#
# @param aRoutineName The name of the routine to call.
# @param aSaveRegIndices The indices of any registers that need to be preserved. The link
# register will always be preserved, so it should not be specified.
def callRoutine(self, aRoutineName, aSaveRegIndices=None):
save_reg_indices = []
if aSaveRegIndices:
save_reg_indices = aSaveRegIndices
routine_start_addr = self._mRoutineStartAddresses.get(aRoutineName)
if routine_start_addr is not None:
self.mStack.newStackFrame(save_reg_indices)
self.mAssemblyHelper.genRelativeBranchWithLinkToAddress(routine_start_addr)
self.mStack.freeStackFrame()
else:
raise ValueError('Routine %s has not been generated' % aRoutineName)
## Jump to a routine that has previously been generated. A branch instruction will be generated
# to jump to the routine.
#
# @param aRoutineName The name of the routine to jump to.
def jumpToRoutine(self, aRoutineName):
routine_start_addr = self._mRoutineStartAddresses.get(aRoutineName)
if routine_start_addr:
self.mAssemblyHelper.genRelativeBranchToAddress(routine_start_addr)
else:
raise ValueError('Routine %s has not been generated' % aRoutineName)
## Return whether the specified routine has been generated
#
# @param aRoutineName The name of the routine.
def hasGeneratedRoutine(self, aRoutineName):
return (aRoutineName in self._mRoutineStartAddresses)
## Return whether this object defines the specified routine.
#
# @param aRoutineName The name of the routine.
def hasRoutine(self, aRoutineName):
return (hasattr(self, ('generate%s' % aRoutineName)))
## Return the names of all routines that the specified routine directly depends on. This method
# is used to determine whether any other routines need to be generated prior to generating the
# specified routine.
def getPrerequisiteRoutineNames(self, aRoutineName):
return tuple()
## Generate the specified routine and record its starting address. This method should only be
# called after ensuring the routine has not been previously generated and ensuring all
# prerequisite routines have already been generated.
#
# @param aRoutineName The name of the routine to generate.
# @param kwargs The arguments to pass to the generate<RoutineName>() method.
def _generateValidatedRoutine(self, aRoutineName, **kwargs):
self._mRoutineStartAddresses[aRoutineName] = self.getPEstate('PC')
routine_gen_method = self.__getattribute__('generate%s' % aRoutineName)
routine_gen_method(**kwargs)
```
#### File: base/exception_handlers/ThreadHandlerSet.py
```python
from base.Sequence import Sequence
import copy
#-------------------------------------------------------------------------------------------------------
# ThreadHandlerSet
#
# this class is responsible for generating specific exception handlers for all
# exception-level/exception-vector-offsets/error-codes.
#
# ASSUME: this is called for each thread (or set of threads, say a pool of handlers)
#
# process:
#
# foreach exception level:
# pick sync-exceptions exc handler table address
#
# pick exception-handlers code block address
#
# exc_handlers = ExceptionHandlers(exc-level, handlers-code-block-addr, 64k, SP_index)
#
# foreach sync-excep error-code:
# NOTE: not generating unique sync exc handler for each vector offset (but we could)
# exc handler table[error-code] = exc_handlers.generate(0, err_code)
#
# NOTE: only one sync-exc dispatcher, for all vector offsets
#
# generate sync exc dispatch code(exc-level, exc handler table address)
#
# pick, set vector base address register value
#
# foreach exc vector offset:
# if sync-offset:
# branch to dispatcher code; exception handlers already generated
# generate-branch-to(vector base address + offset, exc dispatch code)
# else:
# generate async exc handler here for this exc vector offset
# async_handler_address = exc_handlers.generate(offset)
# branch directly to the handler
# generate-branch-to(vector base address + offset, async_handler_address)
#-------------------------------------------------------------------------------------------------------
class ThreadHandlerSet(Sequence):
def __init__(self, gen_thread, memBankHandlerRegistryRepo, factory, exceptionsStack):
super().__init__(gen_thread)
self.memBankHandlerRegistryRepo = memBankHandlerRegistryRepo
self.factory = factory
self.exceptions_stack = exceptionsStack
self.vector_offset_tables = {}
self.handler_memory = {}
self.scratch_registers = None # all generated handlers in set will use the same set of scratch registers
self.default_set_name = None # may be 'Fast', 'Comprehensive', etc.
self.user_sync_dispatcher = None
self.memBankHandlerRegistries = None
self.address_table = None
self.priv_level_handler_sets = {}
for priv_level in self.getPrivilegeLevels():
self.priv_level_handler_sets[priv_level] = self.factory.createPrivilegeLevelHandlerSet(gen_thread, priv_level, memBankHandlerRegistryRepo, exceptionsStack)
def generate(self, **kwargs):
self.address_table = kwargs['address_table'] # handlers can use address table to get recovery address
self.memBankHandlerRegistries = self.memBankHandlerRegistryRepo.getMemoryBankHandlerRegistries()
for mem_bank_handler_registry in self.memBankHandlerRegistries:
self.debugPrint('MEMORY POOL ADDR: (%s) 0x%x' % (mem_bank_handler_registry.mMemBank, mem_bank_handler_registry.mStartAddr))
self.handler_memory[mem_bank_handler_registry.mMemBank] = mem_bank_handler_registry.mStartAddr
self.default_set_name = kwargs['default_set_name'] # default handler set impacts scratch registers, handler generation
# generate exception handlers, vector offset branches, etc. for all
# exception levels/memory-bank combinations
self._genExcepHandlerCombos()
# Notify the backend about the generated handlers and their addresses
info_set = {}
address_pair_format = '%s:%s:%s'
for mem_bank_handler_registry in self.memBankHandlerRegistries:
handler_boundaries = ''
for (handler_name, handler_start_addr, handler_end_addr) in mem_bank_handler_registry.getHandlerBoundaries():
handler_boundaries += address_pair_format % (handler_name, handler_start_addr, handler_end_addr)
handler_boundaries += ';'
# Trim the last separator
handler_boundaries = handler_boundaries.rstrip(';')
info_set[('%s_bounds' % mem_bank_handler_registry.mMemBank.name)] = handler_boundaries
info_set['Function'] = 'RecordExceptionSpecificAddressBounds'
self.exceptionRequest('UpdateHandlerInfo', info_set)
# register any custom exception handlers BEFORE generate is called
def assignSynchronousExceptionHandler(self, aAssignmentRequest):
for priv_level in aAssignmentRequest.mPrivLevels:
self.debugPrint('[ThreadHandlerSet:assignSynchronousExceptionHandler] priv_level: %s, exception_class: %s, handler_class_name: %s\n' % (priv_level, aAssignmentRequest.mExcClass, aAssignmentRequest.mHandlerClassName))
self.priv_level_handler_sets[priv_level].assignSynchronousExceptionHandler(aAssignmentRequest)
def assignAsynchronousExceptionHandler(self, aHandlerClassName):
for priv_level in self.getPrivilegeLevels():
self.priv_level_handler_sets[priv_level].assignAsynchronousExceptionHandler(aHandlerClassName)
## return set of scratch (gpr) registers for a handler set.
## NOTE: call this method after handlers are generated
def getScratchRegisterSets(self):
raise NotImplementedError
def getVectorBaseAddressSets(self):
raise NotImplementedError
def getVectorBaseAddress(self, privLevel, securityState):
return self.vector_offset_tables[(privLevel, securityState)]
def savePrivilegeLevel(self):
raise NotImplementedError
# set privilege level. may affect translation
def setPrivilegeLevel(self, newSecurityState):
raise NotImplementedError
def restorePrivilegeLevel(self):
raise NotImplementedError
def getPrivilegeLevels(self):
raise NotImplementedError
def getSupportedSecurityStates(self, aPrivLevel):
raise NotImplementedError
def getMemoryBanks(self):
raise NotImplementedError
def getVectorTableSize(self):
raise NotImplementedError
def getVectorOffsetIncrement(self):
raise NotImplementedError
def isSynchronousVectorEntry(self, aVectorOffset):
raise NotImplementedError
def getVectorEntryErrorCode(self):
raise NotImplementedError
# use this method to lay down a relative branch
def genRelativeBranchAtAddr(self, br_address, br_target_address):
raise NotImplementedError
def genRelativeBranch(self, br_target_address):
raise NotImplementedError
def fastMode(self):
return self.default_set_name == 'Fast'
def debugPrint(self, msg):
self.debug('DEBUG [ThreadHandlerSet]: %s' % msg)
# Generate a minimal number of handler sets: one set for each memory bank. Then map the handler
# sets to each privilege level/security state combination. This is done to minimize the amount
# of instructions and memory the exception handler sets will take.
def _genExcepHandlerCombos(self):
self.savePrivilegeLevel()
# Reverse the order of privilege levels to start with the highest, so that we can do the
# bulk of the generation with full permissions.
for priv_level in reversed(self.getPrivilegeLevels()):
for security_state in self.getSupportedSecurityStates(priv_level):
self.setPrivilegeLevel(security_state)
self._genPrivilegeLevelSecurityStateHandlerSet(priv_level, security_state)
if self.fastMode():
self._reserveScratchRegisters()
self.restorePrivilegeLevel()
def _genPrivilegeLevelSecurityStateHandlerSet(self, privLevel, securityState):
default_mem_bank = securityState.getDefaultMemoryBank()
priv_level_security_state = (privLevel, securityState)
# exception vectors and handlers all in same block of memory, to allow PC-relative branches
# to be used at each exception vector.
vector_base_address = self.getNextVectorBaseAddress(self.handler_memory[default_mem_bank])
self.vector_offset_tables[priv_level_security_state] = vector_base_address
self.handler_memory[default_mem_bank] = vector_base_address + self.getVectorTableSize()
self.debug('HANDLER MEM(%s): 0x%x' % (priv_level_security_state, self.handler_memory[default_mem_bank]))
priv_level_handler_set = self.priv_level_handler_sets[privLevel]
priv_level_handler_set.generate(address_table=self.address_table,
handler_memory=copy.deepcopy(self.handler_memory),
scratch_regs=self.scratch_registers,
default_set_name=self.default_set_name)
priv_level_handler_set.setupScratchRegisters()
self.scratch_registers = priv_level_handler_set.scratchRegisters()
priv_level_handler_set.generateHandlerSubroutines(securityState)
if self.fastMode():
if self.user_sync_dispatcher is not None:
sync_dispatch_addr = priv_level_handler_set.generateUserSyncDispatch(securityState, self.user_sync_dispatcher)
else:
sync_dispatcher = self.factory.createDefaultFastSynchronousExceptionDispatcher(self.genThread)
sync_dispatch_addr = priv_level_handler_set.generateSynchronousHandlers(securityState, sync_dispatcher)
else:
sync_dispatcher = self.factory.createDefaultSynchronousExceptionDispatcher(self.genThread)
sync_dispatch_addr = priv_level_handler_set.generateSynchronousHandlers(securityState, sync_dispatcher)
for mem_bank in self.getMemoryBanks():
self.handler_memory[mem_bank] = priv_level_handler_set.getNextCodeAddress(mem_bank)
# at each exception vector offset, generate branch to either the synchronous exception
# dispatcher, or to an asynchronous exception handler
vector_base_addr = self.vector_offset_tables[priv_level_security_state]
for vec_offset in range(0, self.getVectorTableSize(), self.getVectorOffsetIncrement()): # for each exception vector offset
branch_addr = vector_base_addr + vec_offset
if self.isSynchronousVectorEntry(vec_offset):
self.notice('EXCEPTION HANDLER: sync vector base 0x%x, offset 0x%x, set %s/%s' % (vector_base_addr, vec_offset, privLevel, securityState))
self.debugPrint('%s VECTOR SYNC OFFSET 0x%x, BR ADDR: 0x%x, DISPATCH ADDR: 0x%x' %
(priv_level_security_state, vec_offset, branch_addr, sync_dispatch_addr))
self.genRelativeBranchAtAddr(branch_addr, sync_dispatch_addr)
self._recordSpecificHandlerBoundary(default_mem_bank, self.getVectorEntryErrorCode(), branch_addr, branch_addr)
else:
priv_level_handler_set.generateAsynchronousHandler(securityState)
self.notice('EXCEPTION HANDLER: async vector base 0x%x, offset 0x%x, set %s/%s' % (vector_base_addr, vec_offset, privLevel, securityState))
self.debugPrint('%s VECTOR ASYNC OFFSET 0x%x, BR ADDR: 0x%x' %
(priv_level_security_state, vec_offset, branch_addr))
save_pc = self.getPEstate('PC')
self.setPEstate('PC', branch_addr)
priv_level_handler_set.genJumpToAsynchronousHandler(securityState)
self.setPEstate('PC', save_pc)
self._recordSpecificHandlerBoundary(default_mem_bank, self.getVectorEntryErrorCode(), branch_addr, branch_addr)
# TODO(Noah): Try just collecting the handler boundaries for the memory bank corresponding
# to memBank after getting this class working correctly. As this method is only
# generating handlers for one memory bank, it seems like it should only be necessary to
# collect the handler boundaries for one memory bank.
# collect the handler boundaries from the PrivilegeLevelHandlerSets.
for mem_bank_handler_registry in self.memBankHandlerRegistries:
for (handler_name, handler_start_addr, handler_end_addr) in priv_level_handler_set.getHandlerBoundaries(mem_bank_handler_registry.mMemBank):
self._recordSpecificHandlerBoundary(mem_bank_handler_registry.mMemBank, handler_name, handler_start_addr, handler_end_addr)
# reserve registers (if required) only after all handlers, all modes, have been generated
def _reserveScratchRegisters(self):
for scratch_reg in self.scratch_registers:
self.reserveRegisterByIndex(64, scratch_reg, 'GPR', 'ReadWrite')
def _recordSpecificHandlerBoundary(self, memBank, handler_name, start_addr, end_addr):
mem_bank_handler_registry = self.memBankHandlerRegistryRepo.getMemoryBankHandlerRegistry(memBank)
mem_bank_handler_registry.addHandlerBoundary(handler_name, start_addr, end_addr)
```
#### File: py/base/FunctionWrapper.py
```python
from base.SortableObject import SortableObject
#
# Provide a FunctionWrapper to wrap a non-sortable object
# so it can be sortable
#
class FunctionWrapper(SortableObject):
def __init__(self,function):
super().__init__()
self.function = function
self.sortableName = str(function)
def returnFunction(self):
return self.function
def display(self):
print ("function name=%s"%self.sortableName)
```
#### File: py/base/GenSemaphore.py
```python
from base.Sequence import Sequence
class GenSemaphore(Sequence):
def __init__(self, aGenThread, aName, aCounter, **kwargs):
super().__init__(aGenThread)
self.mAddrReg = None # register for address
self.mCounterReg = None # register for counter
self.mStatusReg = None # register for status
self.mSemaVA = None # semaphore virtual address
self.mVaAttr = kwargs.get('MemAttrImpl', 'Normal_WBWA') # semaphore va attribute
self.mName = aName # semaphore name
self.mCounter = aCounter # semaphore initial value
self.mBank = kwargs.get('Bank', 0) # which bank to allocate semaphore
self.mSize = kwargs.get('Size', 8) # semaphore size
self.mSharePA = None # physical address allocated
self.mReverseEndian = None # Whether or not Reverse data endian
self.setup()
def _acquireSemaphore(self):
pass
def _releaseSemaphore(self):
pass
def _reloadSemaphore(self):
(self.mSharedPA, self.mReverseEndian, valid) = self.genThread.genSemaphore(self.mName, self.mCounter, self.mBank, self.mSize) # Shared PA has been initialized with the counter
if not valid:
self.error("Thread %d failed to generate semaphore as the PA 0x%x is out of address size" %(self._threadId(), self.mSharePA))
self.mSemaVA = self.genVAforPA(Size=self.mSize, Align=self.mSize, Type="D",
PA = self.mSharedPA, Bank=self.mBank, MemAttrImpl=self.mVaAttr, CanAlias=1)
if (self.mSemaVA & 0x00ffffffffff0000) == 0:
self.error("ERROR VA=%x is invalid"%self.mSemaVA)
shared_va_page_info = self.getPageInfo(self.mSemaVA, "VA", self.mBank)
if not shared_va_page_info["Page"]["MemoryAttr"] == self.mVaAttr:
self.error("ERROR VA=%x is set to %s instead of %s"%(self.mSemaVA,shared_va_page_info["Page"]["MemoryAttr"],self.mVaAttr))
self.notice("Thread %d map va 0x%x to [%d] pa 0x%x" % (self._threadId(), self.mSemaVA, self.mBank, self.mSharedPA))
load_gpr = LoadGPR64(self.genThread)
load_gpr.load(self.mAddrReg, self.mSemaVA)
def _threadId(self):
return self.genThread.genThreadID
def _handleLowPower(self):
gen_mode = self.getPEstate("GenMode")
while gen_mode & (1 << 9): # low power mode
restart_pc = self.getPEstate("PC")
gen_mode &= ~(1 << 9)
self.setPEstate("GenMode", gen_mode)
self.genSequence("ReExecution", {"Address" : restart_pc})
gen_mode = self.getPEstate("GenMode")
```
#### File: py/base/ItemMap.py
```python
from base.SortableObject import SortableObject
## A thin wrapper around an item dict, so that it can be used as a dict key
#
class ItemMap(SortableObject):
def __init__(self, aName, aItemDict):
super().__init__()
self._mItemDict = aItemDict
self._mSortableName = aName
def pick(self, aGenThread):
return aGenThread.pickWeighted(self._mItemDict)
def getPermutated(self, aGenThread, skip_weight_check=False):
return aGenThread.getPermutated(self._mItemDict, skip_weight_check)
def __add__(self, aOther):
if self.isCompatible(aOther):
newDict = {k:v for k, v in self._mItemDict.items()}
newDict.update(aOther._mItemDict)
return self.__class__ (self._mSortableName + aOther._mSortableName, newDict)
else:
return NotImplemented
def __sub__(self, aOther):
if self.isCompatible(aOther):
return self.substract(aOther)
else:
return NotImplemented
def itemType(self):
return NotImplemented
def isCompatible(self, aOther):
return isinstance(aOther, ItemMap)
## Return a string description of the ItemMape
def __str__(self):
return "%sMap(%s)" % (self.itemType(), self._mSortableName)
def toSimpleString(self):
simple_string = " %s map name: " % self.itemType() + self._mSortableName +"\n"
if len(self._mItemDict) == 0:
raise Exception("Empty %s map %s", (self.itemType(), self._mSortableName))
for k,v in self._mItemDict.items():
if isinstance(k, str):
simple_string += " (key:value) = (%s : %d)" % (k, v)
elif isinstance(k, ItemMap):
simple_string += k.toSimpleString()
else:
return NotImplemented
return simple_string
def substract(self, aOther):
self_set = set()
self.getItemIdSet(self_set)
other_set = set()
aOther.getItemIdSet(other_set)
sub_set = self_set - other_set
if len(sub_set) == 0:
return None
if (sub_set == self_set):
return self
return self.clone(sub_set)
# get all item IDs and push them to set
def getItemIdSet(self, aIdSet):
for k, v in self._mItemDict.items():
if isinstance(k, str):
aIdSet.add(k)
elif isinstance(k, ItemMap):
k.getItemIdSet(aIdSet)
else:
return NotImplemented
def clone(self, aIdSet):
sort_name = self._mSortableName
item_dict = self.deepCopyDict(aIdSet)
if len(item_dict) == 0:
return None
return self.__class__(sort_name, item_dict)
def deepCopyDict(self, aIdSet):
copy_dict = {}
for k, v in self._mItemDict.items():
if isinstance(k, ItemMap):
cloned_map = k.clone(aIdSet)
if cloned_map:
copy_dict[cloned_map] = v
elif isinstance(k, str):
if k in aIdSet:
copy_dict[k] = v
else:
return NotImplemented
return copy_dict
def size(self, skip_weight_check=False):
size = 0
for k, v in self._mItemDict.items():
if isinstance(k, ItemMap):
size += k.size(skip_weight_check)
elif isinstance(k, str):
if skip_weight_check:
size += 1
elif v > 0:
size += 1
else:
return NotImplemented
return size
## A thin wrapper around an instruction dict, so that it can be used as a dict key
#
class RegisterMap(ItemMap):
def __init__(self, aName, aItemDict):
super().__init__(aName, aItemDict)
def itemType(self):
return "Register"
def isCompatible(self, aOther):
return isinstance(aOther, RegisterMap)
```
#### File: riscv/sequences/BasicSequences.py
```python
from riscv.EnvRISCV import EnvRISCV
from riscv.GenThreadRISCV import GenThreadRISCV
from base.Sequence import Sequence
from DV.riscv.trees.instruction_tree import ALU_Int_All_instructions
from DV.riscv.trees.instruction_tree import LDST_All_instructions
class Bunch_of_ALU_Int(Sequence):
def generate(self, **kwargs):
self.notice("Generating in 'Bunch_of_ALU_Int'")
for _ in range(self.random32(5, 20)):
instr = self.pickWeighted(ALU_Int_All_instructions)
self.genInstruction(instr)
class Bunch_of_LDST(Sequence):
def generate(self, **kwargs):
self.notice('Generating in "Bunch_of_LDST"')
for _ in range(self.random32(5, 20)):
instr = self.pickWeighted(LDST_All_instructions)
instr_rec_id = self.genInstruction(instr)
```
#### File: riscv/exception_handlers/AsynchronousHandlers.py
```python
from base.exception_handlers.ReusableSequence import ReusableSequence
#-------------------------------------------------------------------------------------------------------
# AsynchronousHandlerRISCV - service asynchronous exceptions, ie, interrupts...
#-------------------------------------------------------------------------------------------------------
class AsynchronousHandlerRISCV(ReusableSequence):
def __init__(self, aGenThread, aFactory, aStack):
super().__init__(aGenThread, aFactory, aStack)
def generateHandler(self, **kwargs):
pass # TODO
```
#### File: riscv/exception_handlers/PrivilegeLevelHandlerSetRISCV.py
```python
from base.exception_handlers.PrivilegeLevelHandlerSet import PrivilegeLevelHandlerSet
from riscv.SecurityState import SecurityStateRISCV
from riscv.exception_handlers.ExceptionClass import ExceptionClassRISCV
from riscv.exception_handlers.ExceptionHandlerContext import ExceptionHandlerContext, ComprehensiveExceptionHandlerContext
class PrivilegeLevelHandlerSetRISCV(PrivilegeLevelHandlerSet):
def __init__(self, gen_thread, privLevel, memBankHandlerRegistryRepo, factory, exceptionsStack):
super().__init__(gen_thread, privLevel, memBankHandlerRegistryRepo, factory, exceptionsStack)
self.handlersBoundaries = []
#------------------------------------------------------------------------
# pick scratch registers to use in exception handlers
#------------------------------------------------------------------------
def setupScratchRegisters(self):
if len(self.scratchRegs) > 0:
return
# Exclude the zero register, implied register operands, the stack pointer and the address
# table pointer
excluded_regs = '0,1,2,%d' % self.address_table.tableIndex()
if not self.fastMode():
excluded_regs = '%d,%s' % (self.exceptions_stack.pointerIndex(), excluded_regs)
self.scratchRegs = self.getRandomGPRs(self._scratchRegisterCount(), exclude=excluded_regs)
if not self.scratchRegs:
raise RuntimeError('Unable to allocate scratch registers required by exception handlers.')
def generateHandlerSubroutines(self, aSecurityState):
if not self.fastMode():
default_mem_bank = aSecurityState.getDefaultMemoryBank()
save_pc = self.getPEstate("PC")
start_addr = self.nextCodeAddresses[default_mem_bank]
self.setPEstate("PC", start_addr)
mem_bank_handler_registry = self.memBankHandlerRegistryRepo.getMemoryBankHandlerRegistry(default_mem_bank)
handler_context = self.createExceptionHandlerContext(0, default_mem_bank)
mem_bank_handler_registry.mHandlerSubroutineGenerator.generateRoutine('TableWalk', handler_context=handler_context)
end_addr = self.getPEstate("PC")
self.setPEstate("PC", save_pc)
self.nextCodeAddresses[default_mem_bank] = end_addr
def getHandlerBoundaries(self, mem_bank):
return self.handlersBoundaries
def getSecurityStates(self):
return tuple(SecurityStateRISCV)
def getExceptionCodeClass(self):
return ExceptionClassRISCV
def recordSpecificHandlerBoundary(self, mem_bank, handler_name, start_addr, end_addr):
self.handlersBoundaries.append((handler_name, start_addr, end_addr))
def getAsynchronousHandlerErrorCode(self):
return 63
def getDispatchErrorCode(self):
return 64
def createExceptionHandlerContext(self, err_code, mem_bank):
mem_bank_handler_registry = self.memBankHandlerRegistryRepo.getMemoryBankHandlerRegistry(mem_bank)
if self.fastMode():
handler_context = ExceptionHandlerContext(err_code, self.scratchRegs, self.privLevel.name, self.exceptions_stack, self.address_table, mem_bank_handler_registry)
else:
handler_context = ComprehensiveExceptionHandlerContext(err_code, self.scratchRegs, self.privLevel.name, self.exceptions_stack, self.address_table, mem_bank_handler_registry)
return handler_context
def getInstructionLength(self):
return 4
```
#### File: riscv/APIs/api_genPA_01_force.py
```python
from riscv.EnvRISCV import EnvRISCV
from riscv.GenThreadRISCV import GenThreadRISCV
from base.Sequence import Sequence
class MainSequence(Sequence):
"""Exercise different combinations of values for the parameters for the genPA instruction.
Focus in this test is to try values of the Size, Align and CanAlias parameters.
Type is always 'D'; Bank is always '0'.
"""
def generate(self, **kargs):
ldstr_byte_ops = ['LB##RISCV', 'SB##RISCV']
ldstr_half_ops = ['LH##RISCV', 'SH##RISCV']
ldstr_word_ops = ['LW##RISCV', 'SW##RISCV']
ldstr_double_ops = ['LD##RISCV', 'SD##RISCV']
theType = 'D'
theBank = 0
theCanAlias = 0
loopCount = 1
set_of_PAs = set()
# Iterate through Size and Align values. Force requires Align to be a power of 2.
# This 1st block tests smaller values of size - 1 byte to 32 bytes.
for theSize in [2 ** x for x in range(0, 5)]:
for theAlign in [2 ** x for x in range(0, 16)]:
if theAlign < theSize: continue
for _ in range(loopCount):
rand_PA = self.genPA(Size=theSize, Align=theAlign, Type=theType, Bank=theBank, CanAlias=theCanAlias)
if rand_PA in set_of_PAs:
self.error(">>>>>>>>> Error -- Received a duplicate PA from self.genPA.")
else:
set_of_PAs.add(rand_PA)
# self.notice(">>>>>> set_of_PAs: {}".format(set_of_PAs))
rand_VA = self.genVAforPA(PA=rand_PA, Bank=theBank, FlatMap=0, Type=theType, Size=theSize)
self.notice(">>>>>> Requested Alignment: {:6d} Requested Size: {:6d} PA target= {:16X} VA target= {:16X}".format(theAlign, theSize, rand_PA, rand_VA))
instr_id = self.genInstruction(self.choice(ldstr_byte_ops), {'LSTarget':rand_VA})
# Iterate through Size and Align values. Force requires Align to be a power of 2.
# This 2nd block tests larger values of size - 32K to 8M.
for theSize in [2 ** x for x in range(15, 18)]:
for theAlign in [2 ** x for x in range(15, 25)]:
if theAlign < theSize: continue
for _ in range(loopCount):
rand_PA = self.genPA(Size=theSize, Align=theAlign, Type=theType, Bank=theBank, CanAlias=theCanAlias)
rand_VA = self.genVAforPA(PA=rand_PA, Bank=theBank, FlatMap=0, CanAlias=0, ForceNewAddress=1, Type=theType, Size=theSize)
self.notice(">>>>>> Requested Alignment: {:6d} Requested Size: {:6d} PA target= {:16X} VA target= {:16X}".format(theAlign, theSize, rand_PA, rand_VA))
instr_id = self.genInstruction(self.choice(ldstr_byte_ops), {'LSTarget':rand_VA})
MainSequenceClass = MainSequence
GenThreadClass = GenThreadRISCV
EnvClass = EnvRISCV
```
#### File: riscv/masterRun/optionsTest_force.py
```python
from riscv.EnvRISCV import EnvRISCV
from riscv.GenThreadRISCV import GenThreadRISCV
from base.Sequence import Sequence
from DV.riscv.trees.instruction_tree import ALU_Int_All_instructions
class MyMainSequence(Sequence):
def generate(self, **kargs):
#option = self.getOption("loopCount")
loopCount, valid = self.getOption("loopCount")
#self.error(">>>>> Option Retrieved: ".format(option))
if not valid:
self.error(">>>>> No 'loopCount' option was specified. Value is {}.".format(loopCount))
else:
self.notice(">>>>> Value specified for 'loopCount' option is: {}".format(loopCount))
for _ in range(loopCount):
instr = self.pickWeighted(ALU_Int_All_instructions)
self.genInstruction(instr)
MainSequenceClass = MyMainSequence
GenThreadClass = GenThreadRISCV
EnvClass = EnvRISCV
```
#### File: riscv/state_transition/state_transition_broad_random_instructions_force.py
```python
from riscv.EnvRISCV import EnvRISCV
from riscv.GenThreadRISCV import GenThreadRISCV
from base.Sequence import Sequence
from DV.riscv.instruction_list import instructions
import state_transition_test_utils
from Enums import EStateElementType
from State import State
import RandomUtils
import StateTransition
## This test verifies that StateTransitions yield the expected State with a wide variety of
# interleaved instructions.
class MainSequence(Sequence):
def __init__(self, aGenThread, aName=None):
super().__init__(aGenThread, aName)
self._mExpectedStateData = {}
def generate(self, **kargs):
for _ in range(3):
state = self._createState()
self._genRandomInstructions()
StateTransition.transitionToState(state)
state_transition_test_utils.verifyState(self, self._mExpectedStateData)
self._genRandomInstructions()
## Generate a random number of a wide variety of instructions.
def _genRandomInstructions(self):
for _ in range(RandomUtils.random32(100, 200)):
instr = self.choice(instructions)
# TODO(Noah): Permit instructions that begin with E and F when exception handlers are
# implemented. The excluded instructions, EBREAK, ECALL and floating point instructions,
# are generally prone to triggering exceptions.
if not (instr.startswith('E') or instr.startswith('F')):
self.genInstruction(instr)
## Create a random State to test an explicit StateTransition.
def _createState(self):
state = State()
self._mExpectedStateData[EStateElementType.Memory] = state_transition_test_utils.addRandomMemoryStateElements(self, state, RandomUtils.random32(0, 20))
self._mExpectedStateData[EStateElementType.GPR] = state_transition_test_utils.addRandomGprStateElements(self, state, RandomUtils.random32(0, 20))
self._mExpectedStateData[EStateElementType.FloatingPointRegister] = state_transition_test_utils.addRandomFloatingPointRegisterStateElements(self, state, RandomUtils.random32(0, 20))
self._mExpectedStateData[EStateElementType.PC] = state_transition_test_utils.addRandomPcStateElement(self, state)
return state
MainSequenceClass = MainSequence
GenThreadClass = GenThreadRISCV
EnvClass = EnvRISCV
```
#### File: riscv/state_transition/state_transition_memory_default_force.py
```python
from riscv.EnvRISCV import EnvRISCV
from riscv.GenThreadRISCV import GenThreadRISCV
from base.Sequence import Sequence
import state_transition_test_utils
from Enums import EStateElementType
from State import State
import RandomUtils
import StateTransition
## This test verifies that the default MemoryStateElement StateTransitionHandler alters the memory
# State as expected.
class MainSequence(Sequence):
def __init__(self, aGenThread, aName=None):
super().__init__(aGenThread, aName)
self._mMemBlockStartAddr = None
self._mMemBlockEndAddr = None
self._mExpectedStateData = {}
def generate(self, **kargs):
state = self._createState()
target_addr_range = '%d-%d' % (self._mMemBlockStartAddr, self._mMemBlockEndAddr)
instructions = ('LD##RISCV', 'SD##RISCV')
for _ in range(RandomUtils.random32(2, 5)):
for _ in range(RandomUtils.random32(50, 100)):
self.genInstruction(self.choice(instructions), {'LSTarget': target_addr_range})
StateTransition.transitionToState(state)
state_transition_test_utils.verifyState(self, self._mExpectedStateData)
## Create a simple State to test an explicit StateTransition.
def _createState(self):
state = State()
expected_mem_state_data = []
mem_block_size = RandomUtils.random32(0x8, 0x20) * 16
self._mMemBlockStartAddr = self.genVA(Size=mem_block_size, Align=16, Type='D')
self._mMemBlockEndAddr = self._mMemBlockStartAddr + mem_block_size - 1
cur_addr = self._mMemBlockStartAddr
while cur_addr <= self._mMemBlockEndAddr:
mem_val = RandomUtils.random64()
state.addMemoryStateElement(cur_addr, 8, mem_val)
expected_mem_state_data.append((cur_addr, mem_val))
cur_addr += 8
self._mExpectedStateData[EStateElementType.Memory] = expected_mem_state_data
return state
MainSequenceClass = MainSequence
GenThreadClass = GenThreadRISCV
EnvClass = EnvRISCV
```
#### File: riscv/state_transition/state_transition_partial_force.py
```python
from riscv.EnvRISCV import EnvRISCV
from riscv.GenThreadRISCV import GenThreadRISCV
from base.Sequence import Sequence
from base.StateTransitionHandler import StateTransitionHandler
import state_transition_test_utils
from Enums import EStateElementType, EStateTransitionType
from State import State
import RandomUtils
import StateTransition
## A test StateTransitionHandler that defers to the default StateTransitionHandler some of the time.
class PartialStateTransitionHandlerTest(StateTransitionHandler):
## Execute the State change represented by the StateElement. Only instances of the StateElement
# types for which the StateTransitionHandler has been registered will be passed to this method.
# Other StateTransitionHandlers will process the other StateElement types. It is important to
# avoid making changes to entities represented by StateElements that have already been
# processed. Changes to entities represented by StateElements that will be processed later are
# permitted.
#
# @param aStateElem A StateElement object.
def processStateElement(self, aStateElem):
processed = False
# Randomly decide whether to process the StateElement or defer to the default implementation
if RandomUtils.random32(0, 1) == 1:
(mem_block_ptr_index,) = self.getArbitraryGprs(1, aExclude=(0,))
self.initializeMemoryBlock(mem_block_ptr_index, (aStateElem,))
self.genInstruction('FLD##RISCV', {'rd': aStateElem.getRegisterIndex(), 'rs1': mem_block_ptr_index, 'simm12': 0, 'NoRestriction': 1})
processed = True
return processed
## This test verifies that a StateTransition handler can process some of the StateElements and defer
# to the default StateTransitionHandler for the remaining StateElements.
class MainSequence(Sequence):
def __init__(self, aGenThread, aName=None):
super().__init__(aGenThread, aName)
self._mExpectedStateData = {}
def generate(self, **kargs):
state_trans_handler = PartialStateTransitionHandlerTest(self.genThread)
StateTransition.registerStateTransitionHandler(state_trans_handler, EStateTransitionType.Explicit, (EStateElementType.FloatingPointRegister,))
state = self._createState()
StateTransition.transitionToState(state)
state_transition_test_utils.verifyState(self, self._mExpectedStateData)
## Create a simple State to test an explicit StateTransition.
def _createState(self):
state = State()
self._mExpectedStateData[EStateElementType.FloatingPointRegister] = state_transition_test_utils.addRandomFloatingPointRegisterStateElements(self, state, RandomUtils.random32(0, 15))
return state
MainSequenceClass = MainSequence
GenThreadClass = GenThreadRISCV
EnvClass = EnvRISCV
```
#### File: builder/test_builder/base_test_builder.py
```python
import sys
sys.path.insert(0, '..')
class BaseTestBuilder:
def __init__(self, archName, groupByNum, groupSize, additionalImport, postMainSeq, debugOutputPath):
#input and output paths and filenames
self.mInputPath = "input/"
self.mOutputPath = "output/"
self.mXmlPath = "xml/"
self.mTxtPath = "txt/"
self.mXmlFiles = []
self.mTxtFiles = []
#architecture name
self.mArchName = archName
#additional hook strings into template to allow for custom imports/post sequence specifiers (such as gen thread init)
self.mAdditionalImport = additionalImport
self.mPostMainSeq = postMainSeq
#data structures for reading input data, key=filename w/o suffix val=data (either instr file obj/list of strings)
self.mInstrFiles = {}
#data structures to manage subgrouping inside of instruction files, contains input from txt files
self.mUnsupportedInstructions = []
self.mSubgroupInstructions = {}
#instruction grouping switches
self.mGroupByNum = groupByNum
self.mGroupSize = groupSize
#if debugOutputPath is defined, messages will be printed to outfile instead of stdout
self.fDebugOutput = open(debugOutputPath, 'w') if (debugOutputPath != "") else None
def debug(self, s):
print('DEBUG: {}'.format(s), file=self.fDebugOutput)
def get_xml_input_path(self):
return self.mInputPath + self.mXmlPath
def get_txt_input_path(self):
return self.mInputPath + self.mTxtPath
def make_output_test_dir(self, dir_name):
import os
try:
os.makedirs(self.mOutputPath + dir_name)
except FileExistsError:
pass
except OSError:
self.debug("Error creating directory {}{}, exiting".format(self.mOutputPath, dir_name))
sys.exit(1)
return self.mOutputPath + dir_name + "/"
def delete_output_test_dir(self, dir_name):
import os
try:
os.rmdir(dir_name)
except OSError:
self.debug("Error removing directory {}, exiting".format(dir_name))
sys.exit(1)
def read_inputs(self):
from shared.instruction_file import InstructionFile
from shared.instruction_file_parser import InstructionFileParser
for xml_file in self.mXmlFiles:
instr_file = InstructionFile()
file_parser = InstructionFileParser(instr_file)
file_parser.parse(self.get_xml_input_path() + xml_file)
self.mInstrFiles[xml_file.rstrip('.xml')] = instr_file
for txt_file in self.mTxtFiles:
with open(self.get_txt_input_path() + txt_file, "r") as txt_handle:
instr_subgroup_name = txt_file.rstrip('.tx')
txt_lines = txt_handle.read().splitlines()
if instr_subgroup_name == "unsupported":
self.mUnsupportedInstructions = txt_lines
else:
self.mSubgroupInstructions[instr_subgroup_name] = txt_lines
def process_instr_group(self, instrFile, instrFileName, testOutputDir, subgroupPrefix, skipInstructions, validInstructions):
instr_grp = self.gen_grouped_instr_file(instrFile, testOutputDir, subgroupPrefix, skipInstructions, validInstructions)
if instr_grp.has_any_instructions():
with open(testOutputDir + "instruction_grouping.txt", "w") as group_handle:
instr_grp.write(group_handle)
with open(testOutputDir + instrFileName + ".txt", "w") as test_handle:
instr_grp.print_tests(test_handle)
return True
else:
self.delete_output_test_dir(testOutputDir)
return False
def gen_grouped_instr_file(self, instrFile, testOutputDir, subgroupPrefix, skipInstructions, validInstructions):
instr_grp = None
if self.mGroupByNum:
from shared.instruction_file_grouping import NumGroupedInstructionFile
instr_grp = NumGroupedInstructionFile(instrFile, self.mArchName, testOutputDir, self.mGroupSize, self.mAdditionalImport, self.mPostMainSeq, skipInstructions, validInstructions, subgroupPrefix)
else:
from shared.instruction_file_grouping import FormatGroupedInstructionFile
instr_grp = FormatGroupedInstructionFile(instrFile, self.mArchName, testOutputDir, self.mAdditionalImport, self.mPostMainSeq, skipInstructions, validInstructions, subgroupPrefix)
return instr_grp
def write_tests(self):
for instr_file_name, instr_file in self.mInstrFiles.items():
valid_subgroups = []
test_output_dir = self.make_output_test_dir(instr_file_name)
import copy
default_skip_instrs = copy.deepcopy(self.mUnsupportedInstructions)
for subgroup_name, subgroup_instrs in self.mSubgroupInstructions.items():
default_skip_instrs += subgroup_instrs
subgroup_output_dir = self.make_output_test_dir(instr_file_name + "/" + subgroup_name)
valid_subgroup = self.process_instr_group(instr_file, instr_file_name + "_" + subgroup_name, subgroup_output_dir, subgroup_name + "_", self.mUnsupportedInstructions, subgroup_instrs)
if valid_subgroup:
valid_subgroups.append(subgroup_name)
self.process_instr_group(instr_file, instr_file_name, test_output_dir, "", default_skip_instrs, None)
self.write_ctrl_files(valid_subgroups, test_output_dir)
def write_ctrl_files(self, subgroupNames, outputDir):
from shared.ctrl_file_builder import CtrlFileBuilder
ctrl_file_builder = CtrlFileBuilder(self.mArchName.lower(), subgroupNames)
ctrl_file_builder.gen_ctrl_files(outputDir)
def run(self):
self.read_inputs()
self.write_tests()
```
#### File: utils/enum_classes/create_enum_files.py
```python
import abc
import collections
import enum_string_hash
import importlib
import os
import string
import sys
class _EnumFileSpec:
def __init__(self, input_file_name, base_output_dir, output_file_name, unit_test_output_dir):
self._base_output_dir = base_output_dir
self._input_file_name = input_file_name
self._output_file_name = output_file_name
self._unit_test_output_dir = unit_test_output_dir
@property
def base_output_dir(self):
return self._base_output_dir
@property
def input_file_name(self):
return self._input_file_name
@property
def output_file_name(self):
return self._output_file_name
@property
def unit_test_output_dir(self):
return self._unit_test_output_dir
class _EnumSpec:
def __init__(self, class_name, raw_type, comments, values):
self._class_name = class_name
self._raw_type = raw_type
self._comments = comments
self._values = values
@property
def class_name(self):
return self._class_name
@property
def raw_type(self):
return self._raw_type
@property
def comments(self):
return self._comments
@property
def values(self):
return self._values
class _CppHeaderGenerator:
def __init__(self, script_path, file_name):
self._script_path = script_path
self._file_name = file_name
self._header_template = _TemplateLoader.load_template('cpp_header.txt')
self._declaration_template = _TemplateLoader.get_enum_declaration_template()
self._indent = ' '*2
def generate(self, enum_specs):
declarations = self._generate_declarations(enum_specs)
return self._header_template.substitute(file_name=self._file_name, enum_declarations=declarations, script_path=self._script_path)
def _generate_declarations(self, enum_specs):
declarations = ''
for enum_spec in enum_specs:
declarations += self._generate_declaration(enum_spec)
return declarations[:-1]
def _generate_declaration(self, enum_spec):
value_settings = self._generate_value_settings(enum_spec.values)
declaration_template_mapping = {
'class_name': enum_spec.class_name,
'raw_type': enum_spec.raw_type,
'comments': enum_spec.comments,
'value_settings': value_settings
}
return self._declaration_template.substitute(declaration_template_mapping)
def _generate_value_settings(self, enum_values):
value_settings = ''
for enum_value in enum_values:
value_settings += self._indent*2 + '%s = %s,\n' % enum_value
return value_settings[:-1]
class _CppSourceGenerator:
def __init__(self, script_path, file_name):
self._script_path = script_path
self._file_name = file_name
self._source_template = _TemplateLoader.load_template('cpp_source.txt')
self._definition_template = _TemplateLoader.get_enum_definition_template()
self._indent = ' '*2
def generate(self, enum_specs):
definitions = self._generate_definitions(enum_specs)
return self._source_template.substitute(file_name=self._file_name, enum_definitions=definitions, script_path=self._script_path)
def _generate_definitions(self, enum_specs):
definitions = ''
for enum_spec in enum_specs:
definitions += self._generate_definition(enum_spec)
return definitions[:-1]
def _generate_definition(self, enum_spec):
(to_enum_function_body, try_to_enum_function_body) = self._generate_to_enum_function_bodies(enum_spec)
to_string_cases = self._generate_to_string_cases(enum_spec)
definition_template_mapping = {
'class_name': enum_spec.class_name,
'raw_type': enum_spec.raw_type,
'comments': enum_spec.comments,
'enum_size': len(enum_spec.values),
'to_enum_function_body': to_enum_function_body,
'try_to_enum_function_body': try_to_enum_function_body,
'to_string_cases': to_string_cases,
'default_string_value': enum_spec.values[0][0]
}
return self._definition_template.substitute(definition_template_mapping)
# Combine generation of string_to_<enum_type> and try_string_to_<enum_type> functions into a single method to avoid
# the cost of having to generate the nested hash table twice.
def _generate_to_enum_function_bodies(self, enum_spec):
nested_hash_table = enum_string_hash.find_limited_char_nested_hash(enum_spec.values)
to_enum_function_generator = _CppStringToEnumFunctionGenerator(enum_spec, nested_hash_table)
to_enum_function_body = to_enum_function_generator.generate_function_body()
try_to_enum_function_generator = _CppTryStringToEnumFunctionGenerator(enum_spec, nested_hash_table)
try_to_enum_function_body = try_to_enum_function_generator.generate_function_body()
return (to_enum_function_body, try_to_enum_function_body)
def _generate_to_string_cases(self, enum_spec):
to_string_cases = ''
for enum_value in enum_spec.values:
to_string_cases += self._indent*2 + 'case E%s::%s: return "%s";\n' % (
enum_spec.class_name, enum_value[0], enum_value[0])
return to_string_cases[:-1]
class _CppUnitTestGenerator:
def __init__(self, script_path, file_name):
self._script_path = script_path
self._file_name = file_name
self._unit_test_template = _TemplateLoader.load_template('cpp_unit_test.txt')
self._unit_test_case_template = _TemplateLoader.get_unit_test_case_template()
self._indent = ' '*2
def generate(self, enum_specs):
unit_test_cases = self._generate_unit_test_cases(enum_specs)
return self._unit_test_template.substitute(file_name=self._file_name, unit_test_cases=unit_test_cases, script_path=self._script_path)
def _generate_unit_test_cases(self, enum_specs):
unit_test_cases = ''
for enum_spec in enum_specs:
unit_test_cases += self._generate_unit_test_case(enum_spec)
return unit_test_cases[:-1]
def _generate_unit_test_case(self, enum_spec):
to_string_tests = self._generate_to_string_tests(enum_spec)
to_enum_tests = self._generate_to_enum_tests(enum_spec)
to_enum_fail_tests = self._generate_to_enum_fail_tests(enum_spec)
try_to_enum_tests = self._generate_try_to_enum_tests(enum_spec)
try_to_enum_fail_tests = self._generate_try_to_enum_fail_tests(enum_spec)
unit_test_case_template_mapping = {
'class_name': enum_spec.class_name,
'to_string_tests': to_string_tests,
'to_enum_tests': to_enum_tests,
'to_enum_fail_tests': to_enum_fail_tests,
'try_to_enum_tests': try_to_enum_tests,
'try_to_enum_fail_tests': try_to_enum_fail_tests
}
return self._unit_test_case_template.substitute(unit_test_case_template_mapping)
def _generate_to_string_tests(self, enum_spec):
to_string_tests = ''
for enum_value in enum_spec.values:
to_string_tests += self._indent*3 + 'EXPECT(E%s_to_string(E%s::%s) == "%s");\n' % (
enum_spec.class_name, enum_spec.class_name, enum_value[0], enum_value[0])
return to_string_tests[:-1]
def _generate_to_enum_tests(self, enum_spec):
to_enum_tests = ''
for enum_value in enum_spec.values:
to_enum_tests += self._indent*3 + 'EXPECT(string_to_E%s("%s") == E%s::%s);\n' % (
enum_spec.class_name, enum_value[0], enum_spec.class_name, enum_value[0])
return to_enum_tests[:-1]
def _generate_to_enum_fail_tests(self, enum_spec):
fail_test_string = self._generate_fail_test_string(enum_spec.values[0][0])
to_enum_fail_tests = self._indent*3 + 'EXPECT_THROWS_AS(string_to_E%s("%s"), EnumTypeError);' % (
enum_spec.class_name, fail_test_string)
return to_enum_fail_tests
def _generate_try_to_enum_tests(self, enum_spec):
try_to_enum_tests = ''
for enum_value in enum_spec.values:
try_to_enum_tests += self._indent*3 + 'EXPECT(try_string_to_E%s("%s", okay) == E%s::%s);\n' % (
enum_spec.class_name, enum_value[0], enum_spec.class_name, enum_value[0])
try_to_enum_tests += self._indent*3 + 'EXPECT(okay);\n'
return try_to_enum_tests[:-1]
def _generate_try_to_enum_fail_tests(self, enum_spec):
fail_test_string = self._generate_fail_test_string(enum_spec.values[0][0])
try_to_enum_fail_tests = self._indent*3 + 'try_string_to_E%s("%s", okay);\n' % (
enum_spec.class_name, fail_test_string)
try_to_enum_fail_tests += self._indent*3 + 'EXPECT(!okay);'
return try_to_enum_fail_tests
# Generate a string that doesn't correspond to any enum value. However, it is intended to be similar enough to a
# string that does match an enum value such that in many cases, the generated string will hash to one of the case
# values and fail at the full equality check.
def _generate_fail_test_string(self, enum_value_name):
enum_value_name = enum_value_name
if len(enum_value_name) > 1:
fail_test_string = enum_value_name[0] + '_' + enum_value_name[2:]
else:
fail_test_string = '_' + enum_value_name[1:]
return fail_test_string
class _CppStringToEnumFunctionGeneratorBase(abc.ABC):
def __init__(self, enum_spec):
self._enum_spec = enum_spec
self._indent = ' '*2
@property
@abc.abstractmethod
def error_statements(self):
...
@property
def enum_spec(self):
return self._enum_spec
@property
def indent(self):
return self._indent
@abc.abstractmethod
def generate_function_body(self):
...
@abc.abstractmethod
def generate_validation(self, class_name, string_value, context):
...
def generate_nested_function_body(self, nested_hash_table, context):
to_enum_function_body = self._generate_hash_computation(nested_hash_table.char_indexes, context)
to_enum_function_body += self._indent*(
2 + context.indent_level) + 'switch (hash_value%s) {\n' % context.var_name_suffix
for table_entry in nested_hash_table.entries:
to_enum_function_body += self._indent*(2 + context.indent_level) + 'case %d:\n' % table_entry.key
if table_entry.has_multiple_values():
to_enum_function_body += self._indent*(3 + context.indent_level) + '{\n'
nested_context = _CppFunctionGenerationContext(context.indent_level + 2, '_%d' % table_entry.key)
to_enum_function_body += self.generate_nested_function_body(
table_entry.inner_hash_table, nested_context)
to_enum_function_body += self._indent*(3 + context.indent_level) + '}\n'
else:
# Validate the input string inside the case statement to guard against hash collisions for strings that
# don't match any of the enum values.
to_enum_function_body += self.generate_validation(
self._enum_spec.class_name, table_entry.get_only_value(), context)
to_enum_function_body += self._indent*(2 + context.indent_level) + 'default:\n'
for error_statement in self.error_statements:
to_enum_function_body += self._indent*(3 + context.indent_level) + error_statement
to_enum_function_body += self._indent*(2 + context.indent_level) + '}\n'
return to_enum_function_body
def _generate_hash_computation(self, char_indexes, context):
hash_computation = self._generate_string_size_expression(char_indexes, context)
hash_computation += self._indent*(2 + context.indent_level) + 'char hash_value%s = ' % context.var_name_suffix
hash_computation += self._generate_char_retrieval_expression(char_indexes[0], context)
for char_index in char_indexes[1:]:
char_retrieval_expression = self._generate_char_retrieval_expression(char_index, context)
hash_computation += ' ^ ' + char_retrieval_expression
hash_computation += ';\n\n'
return hash_computation;
def _generate_string_size_expression(self, char_indexes, context):
if (len(char_indexes) == 1) and (char_indexes[0] == 0):
string_size_expression = ''
else:
string_size_expression = self._indent*(2 + context.indent_level) + 'size_t size%s = in_str.size();\n' % context.var_name_suffix
return string_size_expression
def _generate_char_retrieval_expression(self, char_index, context):
if char_index == 0:
char_retrieval_expression = 'in_str.at(%d)' % char_index
else:
# Use the modulo operator in case the character index is larger than the string size, but not otherwise, as
# it degrades performance.
char_retrieval_expression = 'in_str.at(%d < size%s ? %d : %d %% size%s)' % (char_index, context.var_name_suffix, char_index, char_index, context.var_name_suffix)
return char_retrieval_expression
class _CppStringToEnumFunctionGenerator(_CppStringToEnumFunctionGeneratorBase):
def __init__(self, enum_spec, nested_hash_table):
super().__init__(enum_spec)
self._nested_hash_table = nested_hash_table
self._error_statements = ['unknown_enum_name(enum_type_name, in_str);\n']
@property
def error_statements(self):
return self._error_statements
def generate_function_body(self):
context = _CppFunctionGenerationContext(0, '')
to_enum_function_body = self.generate_nested_function_body(self._nested_hash_table, context)
return to_enum_function_body[:-1]
def generate_validation(self, class_name, string_value, context):
validation = self.indent*(3 + context.indent_level) + 'validate(in_str, "%s", enum_type_name);\n' % (
string_value)
validation += self.indent*(3 + context.indent_level) + 'return E%s::%s;\n' % (class_name, string_value)
return validation
class _CppTryStringToEnumFunctionGenerator(_CppStringToEnumFunctionGeneratorBase):
def __init__(self, enum_spec, nested_hash_table):
super().__init__(enum_spec)
self._nested_hash_table = nested_hash_table
self._error_statements = [
'okay = false;\n', 'return E%s::%s;\n' % (enum_spec.class_name, enum_spec.values[0][0])]
@property
def error_statements(self):
return self._error_statements
def generate_function_body(self):
try_to_enum_function_body = self.indent*2 + 'okay = true;\n'
context = _CppFunctionGenerationContext(0, '')
try_to_enum_function_body += self.generate_nested_function_body(self._nested_hash_table, context)
return try_to_enum_function_body[:-1]
def generate_validation(self, class_name, string_value, context):
validation = self.indent*(3 + context.indent_level) + 'okay = (in_str == "%s");\n' % string_value
validation += self.indent*(3 + context.indent_level) + 'return E%s::%s;\n' % (class_name, string_value)
return validation
class _TemplateLoader:
@classmethod
def load_template(cls, template_file_name):
template_dir = 'templates'
with open(os.path.join(template_dir, template_file_name)) as template_file:
template_file_contents = template_file.read()
return string.Template(template_file_contents)
@classmethod
def get_enum_declaration_template(cls):
import templates.cpp_enum_declaration
return string.Template(templates.cpp_enum_declaration.template_string)
@classmethod
def get_enum_definition_template(cls):
import templates.cpp_enum_definition
return string.Template(templates.cpp_enum_definition.template_string)
@classmethod
def get_unit_test_case_template(cls):
import templates.cpp_unit_test_case
return string.Template(templates.cpp_unit_test_case.template_string)
class _CppFunctionGenerationContext:
def __init__(self, indent_level, var_name_suffix):
self._indent_level = indent_level
self._var_name_suffix = var_name_suffix
@property
def indent_level(self):
return self._indent_level
@property
# Used to generate unique names for variables serving similar purposes within the same method.
def var_name_suffix(self):
return self._var_name_suffix
def generate_enum_files(app_name):
enum_file_specs = _create_enum_file_specs(app_name)
for enum_file_spec in enum_file_specs:
_generate_enum_file(enum_file_spec)
def _generate_enum_file(enum_file_spec):
script_path = os.path.join('utils', 'enum_classes', 'create_enum_files.py')
enum_specs = _load_enum_specs(enum_file_spec.input_file_name)
cpp_header_generator = _CppHeaderGenerator(script_path, enum_file_spec.output_file_name)
cpp_header_contents = cpp_header_generator.generate(enum_specs)
cpp_source_generator = _CppSourceGenerator(script_path, enum_file_spec.output_file_name)
cpp_source_contents = cpp_source_generator.generate(enum_specs)
cpp_unit_test_generator = _CppUnitTestGenerator(script_path, enum_file_spec.output_file_name)
cpp_unit_test_contents = cpp_unit_test_generator.generate(enum_specs)
cpp_header_path = os.path.join(enum_file_spec.base_output_dir, 'inc', ('%s.h' % enum_file_spec.output_file_name))
_write_enum_file(cpp_header_path, cpp_header_contents)
cpp_source_path = os.path.join(enum_file_spec.base_output_dir, 'src', ('%s.cc' % enum_file_spec.output_file_name))
_write_enum_file(cpp_source_path, cpp_source_contents)
cpp_unit_test_path = os.path.join(enum_file_spec.unit_test_output_dir, enum_file_spec.output_file_name, ('%s_test.cc' % enum_file_spec.output_file_name))
_write_enum_file(cpp_unit_test_path, cpp_unit_test_contents)
def _create_enum_file_specs(app_name):
enum_file_specs = []
force_path = os.path.join('..', '..')
if app_name == 'Force':
base_enum_file_spec = _EnumFileSpec('base_enum_classes', os.path.join(force_path, 'base'), 'Enums', os.path.join(force_path, 'unit_tests', 'tests', 'base'))
enum_file_specs.append(base_enum_file_spec)
riscv_enum_file_spec = _EnumFileSpec('riscv_enum_classes', os.path.join(force_path, 'riscv'), 'EnumsRISCV', os.path.join(force_path, 'unit_tests', 'tests', 'riscv'))
enum_file_specs.append(riscv_enum_file_spec)
elif app_name == 'Fpix':
base_enum_file_spec = _EnumFileSpec('fpix_base_enum_classes', os.path.join(force_path, 'fpix'), 'EnumsFPIX', os.path.join(force_path, 'unit_tests', 'tests', 'fpix'))
enum_file_specs.append(base_enum_file_spec)
else:
raise ValueError('Unknown application name %s' % app_name)
return enum_file_specs
def _load_enum_specs(source_file_name):
# Import the list used to define the enums
source_module = importlib.import_module(source_file_name)
enum_specs = []
for class_name, raw_type, comments, values in source_module.enum_classes_details:
enum_specs.append(_EnumSpec(class_name, raw_type, comments, values))
return enum_specs
def _write_enum_file(file_path, file_contents):
with open(file_path, 'w') as enum_file:
enum_file.write(file_contents)
if __name__ == '__main__':
app_name = 'Force'
if len(sys.argv) > 1:
app_name = sys.argv[1]
generate_enum_files(app_name)
```
#### File: applications/compile/CompileInit.py
```python
from classes.ApplicationOption import CommandLineOption, ParameterProcessor
from common.path_utils import PathUtils
from common.sys_utils import SysUtils
## Define additional specific command line parameters
#
class CompileCmdLineOptions(object):
cGroupName = 'Compile related options'
cGroupDescription = 'Useful Compile options to control Compile usage'
# 'option name' 'number of value arguments' 'help text'
# | 'default value' | 'additional arguments' |
# | | | | |
cOptions = [CommandLineOption(aName='compile', aDefault='', aNumArgs=0, aAdditionalArgs={'action':'store_true'}, aHelpText='- When present, overrides the default (False) and enables compiling'),
CommandLineOption(aName='compile.path', aDefault='$PROJ_ROOT/verif/top/sim', aNumArgs=1, aAdditionalArgs={}, aHelpText='- When present, overrides the default path ($PROJ_ROOT/verif/top/sim'),
CommandLineOption(aName='compile.options', aDefault='', aNumArgs=1, aAdditionalArgs={'type':str}, aHelpText='- When present, adds the specified option string for compilation'),
CommandLineOption(aName='compile.mp', aDefault='', aNumArgs=0, aAdditionalArgs={'action':'store_true'}, aHelpText='- When present, overrides the default (False) and triggers mp specific before and after processes'),
]
## Used to process application specific parameters
#
class CompileParametersProcessor(ParameterProcessor):
def __init__(self, aCmdLineOptions):
super().__init__(CompileCmdLineOptions.cOptions, aCmdLineOptions)
default_path = '$PROJ_ROOT/verif/top/sim'
if self.mAppParameters.parameter('compile') or self.mAppParameters.parameter('compile.options') or self.mAppParameters.parameter('compile.path') != default_path:
compile_path = self.mAppParameters.parameter('compile.path')
compile_path = PathUtils.expandVars(compile_path)
compile_makefile = PathUtils.include_trailing_path_delimiter(compile_path) + 'Makefile'
if not PathUtils.check_file(compile_makefile):
raise Exception(compile_makefile + ' does not exist.')
self.mAppParameters.setParameter('compile.path', compile_path)
self.mAppParameters.setParameter('compile.options', PathUtils.expandVars(self.mAppParameters.parameter('compile.options')))
## Process compile control data
#
def processCompileControlData(aControlData, aAppParameters):
if aAppParameters is None: return # TODO Temporary, to avoid failing in forrest run, to remove.
keys = ['compile', 'compile.options'] # Ignore compile.path for now because it has a default value and will always be found, forcing a compile every time master run runs
for key in keys:
if aAppParameters.parameter(key):
aControlData['run'] = True
aControlData['path'] = aAppParameters.parameter('compile.path')
aControlData['options'] = aAppParameters.parameter('compile.options')
aControlData['mp'] = aAppParameters.parameter('compile.mp')
return
# Check compile.path here to determine if it has been changed and, if so, make sure we run the compile app on that non-default path
default_path = '$PROJ_ROOT/verif/top/sim'
if aAppParameters.parameter('compile.path') != default_path:
aControlData['run'] = True
aControlData['path'] = aAppParameters.parameter('compile.path')
aControlData['options'] = aAppParameters.parameter('compile.options')
aControlData['mp'] = aAppParameters.parameter('compile.mp')
```
#### File: applications/force/ForceInit.py
```python
from classes.ApplicationOption import AppPathCmdLineOption, ParameterProcessor
from common.path_utils import PathUtils
from common.version_ctrl_utils import VersionCtrlUtils
from common.msg_utils import Msg
## Define additional FORCE specific command line parameters
#
class ForceCmdLineOptions(object):
cGroupName = "FORCE related options"
cGroupDescription = "Useful FORCE options to control FORCE usage"
# "number of value arguments"
# "option name" | "additional arguments"
# | "default value" | | "help text"
# | | | | |
cOptions = [AppPathCmdLineOption('path', "../../bin/force", 1, None, "- Path to FORCE binary", None, "FORCE_PATH")]
## Used to process application specific parameters
#
class ForceParametersProcessor(ParameterProcessor):
def __init__(self, aCmdLineOptions):
super().__init__(ForceCmdLineOptions.cOptions, aCmdLineOptions)
force_path = self.mAppParameters.parameter('path')
force_bin_dir, _ = PathUtils.split_path(force_path)
force_dir = PathUtils.real_path(PathUtils.include_trailing_path_delimiter(force_bin_dir) + '../')
if not PathUtils.check_exe(force_path):
raise Exception(force_path + " does not exist or is not executable, confirm valid exe")
# determine svn revision information and store as a parameter
version_data = VersionCtrlUtils.get_scm_revisions(force_dir)
version_output = VersionCtrlUtils.get_version_output(version_data)
Msg.info("Force Version Data:\n%s" % version_output)
self.mAppParameters.setParameter("version", version_data)
self.mAppParameters.setParameter("version_dir", force_dir)
## Process force control data
#
def processForceControlData(aControlData, aAppParameters):
"""
:param object aControlData:
:param object aAppParameters:
:return:
"""
if aAppParameters is None:
return # TODO Temporary, to avoid failing in forrest run, to remove.
key = 'path'
if aAppParameters.parameter(key):
aControlData[key] = aAppParameters.parameter(key)
```
#### File: applications/fruntoctrl/FrunToCtrlInit.py
```python
from classes.ApplicationOption import CommandLineOption, ParameterProcessor
from common.path_utils import PathUtils
## Define additional FRUN_TO_CTRL specific command line parameters
#
class FrunToCtrlCmdLineOptions(object):
cGroupName = "Frun to ctrl related options"
cGroupDescription = "Useful FRUN_TO_CTRL options to control FRUN_TO_CTRL usage"
# "number of value arguments"
# "option name" | "additional arguments"
# | "default value" | | "help text"
# | | | | |
cOptions = [ CommandLineOption('frun-to-ctrl', "", 0, {"action":"store_true"}, "- When present, overrides the default (False), and enables conversion from the frun file to an additional control file\non tests that have an frun file"),
]
## Used to process application specific parameters
#
class FrunToCtrlParametersProcessor(ParameterProcessor):
def __init__(self, aCmdLineOptions):
super().__init__(FrunToCtrlCmdLineOptions.cOptions, aCmdLineOptions)
if self.mAppParameters.parameter('frun-to-ctrl'):
frun_to_ctrl_path = PathUtils.include_trailing_path_delimiter( aCmdLineOptions.mProgramPath ) + "../frun_to_ctrl/frun_to_ctrl.py"
if not PathUtils.check_exe( frun_to_ctrl_path ):
raise Exception( frun_to_ctrl_path + " does not exist or is not executable, confirm valid exe" )
frun_to_ctrl_path = PathUtils.real_path(frun_to_ctrl_path)
self.mAppParameters.setParameter('path', frun_to_ctrl_path)
## Process fruntoctrl control data
#
def processFrunToCtrlControlData(aControlData, aAppParameters):
if aAppParameters is None: return # TODO Temporary, to avoid failing in forrest run, to remove.
key = 'frun-to-ctrl'
if aAppParameters.parameter(key):
aControlData['run'] = aAppParameters.parameter(key)
aControlData['path'] = aAppParameters.parameter('path')
```
#### File: regression/classes/ApplicationsInfo.py
```python
class ApplicationParameters(object):
def __init__(self):
self._mParameters = dict()
def resolveParameters(self, aAppOptions, aCmdLineOptions):
for app_option in aAppOptions:
opt_value, opt_specified = app_option.resolveParameter(aCmdLineOptions)
self._mParameters[app_option.name()] = opt_value
self._mParameters[app_option.name() + " from cmdline"] = opt_specified
def parameter(self, aParmName):
return self._mParameters[aParmName]
def setParameter(self, aParmName, aParmValue):
self._mParameters[aParmName] = aParmValue
def commandLineSpecified(self, aParmName):
return self._mParameters[aParmName + " from cmdline"]
## Configuration information for individual application
#
class ApplicationConfig(object):
def __init__(self, aName, aAppModule):
self._mName = aName
self._mModule = aAppModule
if hasattr(self._mModule, 'Tag'):
self._mTag = self._mModule.Tag
else:
self._mTag = self._mName
self.mAppParameters = None
## Return application name
#
def name(self):
return self._mName
## Return application config's tag
#
def tag(self):
return self._mTag
## Return application specific command line options if any.
#
def getCmdLineOptions(self):
return self._mModule.CmdLineOptions
## Return application specific parameters processor if any.
#
def getParametersProcessorClass(self):
return self._mModule.ParametersProcessorClass
## Return certain application parameter value.
#
def parameter(self, aParmName):
return self.mAppParameters.parameter(aParmName)
## Call upon application specific facility to process control data
#
def processControlData(self, aControlData):
self._mModule.ProcessControlData(aControlData, self.mAppParameters)
## Create executor
#
def createExecutor(self):
return self._mModule.ExecutorClass()
## Create reporter
#
def createReporter(self):
return self._mModule.ReporterClass()
## Container of all application components.
#
class ApplicationsInfo(object):
## Init with LSF info passed in.
#
def __init__(self, aLsfInfo):
self.mLsfInfo = aLsfInfo
self.mAllAppsOrder = list()
self.mAllAppsOrder.append(self.mLsfInfo)
self.mSingleRunApps = list()
self.mSequenceApps = list()
self.mCmdLineOpts = None
self.mTagToApp = dict()
self._registerTag(aLsfInfo)
self.mMainAppPath = None
self.mProcessMax = None
self.mTestBaseDir = None
self.mToolPath = None
self.mMode = None
self.mNumTestsCount = 0
self._mNamesWithIndex = dict()
self.mTagToReportInfo = dict()
self.mConfigPath = None
## Add single run application
#
def addSingleRunApplication(self, aSingleRunApp):
self.mSingleRunApps.append(aSingleRunApp)
self._addApplication(aSingleRunApp)
## Add application
def _addApplication(self, aApp):
self.mAllAppsOrder.append(aApp)
self._registerTag(aApp)
## Add sequence application.
#
def addSequenceApplication(self, aSeqApp):
self.mSequenceApps.append(aSeqApp)
self._addApplication(aSeqApp)
## Register the applications' tag.
def _registerTag(self, aAppCfg):
if aAppCfg.tag() in self.mTagToApp.keys():
raise Exception("Registering application %s with tag %s that already exist......" % (aAppCfg.name(), aAppCfg.tag()))
self.mTagToApp[aAppCfg.tag()] = aAppCfg
## Look up an application config by using tag.
#
def getAppConfig(self, aAppTag):
return self.mTagToApp[aAppTag]
## Increment the test count for use with count mode
#
# Note, this is not an atomic add because we stay single threaded in count mode
#
def incrementTestCount(self):
self.mNumTestsCount += 1
## Mainly used in creating indices for sub-tasks, but somewhat generalized to handle names with index.
#
def getNextIndex(self, aName):
next_index = 0
if aName in self._mNamesWithIndex:
next_index = self._mNamesWithIndex[aName] + 1
# update saved index.
self._mNamesWithIndex[aName] = next_index
return next_index
```
#### File: regression/classes/extractor.py
```python
from common.path_utils import PathUtils
from common.msg_utils import Msg
from common.sys_utils import SysUtils
from common.datetime_utils import DateTime
from common.errors import *
# from classes.control_item import ControlItem, CtrlItmKeys
class Extractor( object ):
def __init__( self ):
super().__init()
# laod both the command info and the result info from a specific task
def load( self, arg_request, arg_response ):
Msg.error_trace()
raise AbstractionError( "Abstract Method Error: Extractor::load(...) not implemented in descendent [%s]" % ( str( type( self ))))
# publish string that is displayed in scrolling results output
def report( self ):
Msg.error_trace()
raise AbstractionError( "Abstract Method Error: Extractor::report(...) not implemented in descendent [%s]" % ( str( type( self ))))
# publish line based on desired output directives
def publish( self, arg_sum_lev ):
Msg.error_trace()
raise AbstractionError( "Abstract Method Error: Extractor::publish(...) not implemented in descendent [%s]" % ( str( type( self ))))
# returns true if extractor detects error
def has_error( self ):
Msg.error_trace()
raise AbstractionError( "Abstract Method Error: Extractor::has_error(...) not implemented in descendent [%s]" % ( str( type( self ))))
# assembles and returns error line if extractor detects error
def error_msg( self ):
Msg.error_trace()
raise AbstractionError( "Abstract Method Error: Extractor::error_msg(...) not implemented in descendent [%s]" % ( str( type( self ))))
# returns tuple
# first element: 1 if successful otherwise returns 0
# second element: process message
def result( self ):
Msg.error_trace()
raise AbstractionError( "Abstract Method Error: Extractor::result(...) not implemented in descendent [%s]" % ( str( type( self ))))
```
#### File: regression/classes/performance_summary.py
```python
import re
from common.path_utils import PathUtils
from common.sys_utils import SysUtils
from common.msg_utils import Msg
from common.datetime_utils import DateTime
from classes.summary import Summary, SummaryItem, SummaryGroups, SummaryLevel
class PerformanceInstructionType():
Total = 0
Secondary = 1
Default = 2
performance_instruction_type_strs = [ "Total","Secondary","Default"]
@classmethod
def instruction_type( arg_class, arg_str ):
if arg_str == "Total":
return PerformanceInstructionType.Total
if arg_str == "Secondary":
return PerformanceInstructionType.Secondary
if arg_str == "Default":
return PerformanceInstructionType.Default
raise Exception( "Unable to indentify the instruction type" )
class PerformanceSummaryItem( SummaryItem ):
def unpack( self, arg_queue_item ):
super().unpack( arg_queue_item )
# save the generation results for this task item
def commit_generate( self ):
self.force_result = SysUtils.ifthen( SysUtils.success( self.force_retcode ), "PASS", "FAIL" )
self.force_level = SummaryLevel.Any
if SysUtils.failed( self.force_retcode ):
self.force_level = SummaryLevel.Fail
Msg.lout( self, "user" , "Performance Summary Item Commit Generate" )
if SysUtils.success( self.force_retcode ):
Msg.info( "Instructions: %d, Default: %d, Secondary: %d, Elapsed Time: %0.5f Seconds\n\n" % ( self.total, self.default, self.secondary, self.force_end - self.force_start ))
return 1
return 0
class PerformanceSummary( Summary ):
def __init__( self, arg_summary_path, arg_keep ):
super(). __init__( arg_summary_path, arg_keep )
self.gen_total = 0
self.gen_passed = 0
self.iss_total = 0
self.iss_passed = 0
self.task_total = 0
self.start_time = DateTime.Time()
def create_summary_item( self ):
return PerformanceSummaryItem( self )
def commit_item( self, arg_item ):
self.task_total += 1
if (not arg_item.task_id in self.tasks):
self.tasks[arg_item.task_id] = list()
self.tasks[ arg_item.task_id ].append( arg_item )
self.groups.add_item( arg_item )
my_results = arg_item.commit()
self.gen_total += my_results[0]
self.gen_passed += my_results[1]
def process_summary( self, sum_level = SummaryLevel.Fail ):
my_file_name = "%sperformance_summary.log" % ( PathUtils().include_trailing_path_delimiter( self.summary_dir ))
Msg.dbg( "Master Log File: %s" % ( my_file_name ))
my_utcdt = DateTime.UTCNow()
my_ofile = None
try:
# First try to open file
with open( my_file_name, "w" ) as my_ofile:
my_ofile.write( "Date: %s\n" % ( DateTime.DateAsStr( my_utcdt )))
my_ofile.write( "Time: %s\n" % ( DateTime.TimeAsStr( my_utcdt )))
self.process_errors( my_ofile )
my_total_count, my_total_elapsed = self.process_groups( my_ofile )
Msg.blank( "info" )
my_line = "Total Instructions Generated: %3d\n" % ( my_total_count )
my_line += "Total Elapsed Time: %0.3f\n" % ( my_total_elapsed )
my_line += "Overall Instructions per Second: %0.3f\n" % ( SysUtils.ifthen( bool( my_total_elapsed ), my_total_count / my_total_elapsed, 0 ))
Msg.info( my_line )
my_ofile.write( my_line )
except Exception as arg_ex:
Msg.error_trace()
Msg.err( "Error Processing Summary, " + str( arg_ex ) )
finally:
my_ofile.close()
def process_groups( self, arg_ofile ):
my_total_count = 0
my_total_elapsed = 0
my_groups = self.groups.task_groups();
# Msg.trace("PerformanceSummaryItem::process_groups")
for my_group, my_items in my_groups.items():
try:
my_str = "\nBegin Group: %s\n" % ( my_group )
arg_ofile.write( my_str )
Msg.blank( "info" )
Msg.info( my_str )
my_grp_count, my_grp_elapsed = self.process_group_items( arg_ofile, my_items )
my_total_count += my_grp_count
my_total_elapsed += my_grp_elapsed
my_line = "\nGroup Instructions: %3d\n" % ( my_grp_count )
my_line += "Group Elapsed Time: %0.3f\n" % ( my_grp_elapsed )
my_line += "Group Instructions per Second: %0.3f\n" % ( SysUtils.ifthen( bool( my_grp_elapsed ), my_grp_count / my_grp_elapsed, 0 ))
my_line += "End Group: %s\n" % ( my_group )
Msg.info( my_line )
arg_ofile.write( my_line )
except Exception as arg_ex:
Msg.error_trace()
Msg.err( "Unable to process, Group: %s, Reason: %s" % ( my_group, type( arg_ex )))
return my_total_count, my_total_elapsed
def process_group_items( self, arg_ofile, arg_items ):
# Msg.trace("PerformanceSummaryItem::process_group_items")
my_grp_count = 0
my_grp_elapsed = 0
try:
for my_item in arg_items:
my_item_elapsed = my_item.force_end - my_item.force_start
my_item_count = my_item.total
my_grp_elapsed += my_item_elapsed
my_grp_count += my_item_count
my_line = "\nTask: %s, Instructions: %d, Elapsed: %0.3f\n" % ( my_item.task_id, my_item_count, my_item_elapsed )
arg_ofile.write( my_line )
# Msg.dbg( my_line )
#end: for my_task in my_group["tasks"]:
except Exception as arg_ex:
Msg.error_trace()
Msg.err( "Error Processing Summary, Reason: %s" % ( str( arg_ex ) ))
return my_grp_count, my_grp_elapsed
```
#### File: regression/common/threads.py
```python
import threading
import sys
import threading
import sys
from threading import Thread
from common.sys_utils import SysUtils
from common.msg_utils import Msg
class HiThread( threading.Thread ):
def __init__( self, arg_options ):
# the HiThread has two states state 1 has no thread loop thus there is no need for control to be halted
# to allow other thread processes to continue, State 2 executes a loop until the shutdown sequence is invoked
# which determines whether or not the thread a) has more work to do, or b) will wait for another process to
# finished. A Thread will remain in the shutdown sequence until the terminate flag is set. When the terminate
# flag is set then the thread will execute the terminate sequence.
my_target = SysUtils.ifthen( arg_options.get( "noloop", False ), self.run_once, self.run_loop )
super().__init__( name = arg_options.get( "name", None ), target = my_target )
Msg.dbg( "Created Thread[%s] Processing...." % ( self.name ))
# useful Thread events
self.on_start = arg_options.get( "on-start" , None )
self.on_execute = arg_options.get( "on-execute" , None )
self.on_done = arg_options.get( "on-done" , None )
self.on_shutdown = arg_options.get( "on-shutdown", None )
self.on_finished = arg_options.get( "on-finished", None )
# thread flags
self.shutdown = False # shutdown request has been received
self.finished = False # thread has completed all work and has exited
# the thread heartbeat is set to write a debug message every 30 seconds if the thread
# is waiting. I do not like the term blocking because waiting is a more controlled event.
# The waiting thread asks for permission to procees rather than the thread that is busy having
# to create a block. Blocks are notorious for causing deadlock requiring less than a graceful
# shutdown
self.heartbeat_rate = arg_options.get( "heartbeat-rate", 30 )
self.sleep_period = arg_options.get( "sleep-period" , 1000 )
self.current_tick = 0
# not a good idea to use daemons as a blanket rule, makes bad designs seem to function
# but there are always underlying issues with these. The daemon attribute exists in the
# abstract as does the handler. Setting it here will ensure the thread acts as we expect
self.daemon = arg_options.get( "daemon", False )
if arg_options.get( "active", False ):
self.start_thread()
# perform any remaining initialization outside the thread space
# using a callback handler if initialized then starts the thread
def start_thread( self ):
if self.on_start is not None:
self.on_start( )
self.start()
# waits for the thread to exit then executes a notify callback if initialized
def wait_for( self ):
Msg.dbg( "Before Thread[%s] Join" % ( self.name ))
self.join()
Msg.dbg( "After Thread[%s] Join" % ( self.name ))
# thread has finishedd its work, trigger notify thread done
if self.on_finished is not None:
self.on_finished( )
# general thread loop
def run_loop( self ):
Msg.info( "Entering HIThread[%s] Loop Processing...." % ( self.name ) )
while not self.terminated():
# initialize iteration for work performed in execute
Msg.dbg( "HiThread[%s]::run_loop(1)" % ( self.name ) )
if self.on_execute is not None:
Msg.dbg( "HiThread[%s]::run_loop(2)" % ( self.name ) )
self.on_execute( )
# perform the iteration work
Msg.dbg( "HiThread[%s]::run_loop(3)" % ( self.name ) )
self.execute()
# finish work prior the next iteratation
Msg.dbg( "HiThread[%s]::run_loop(4)" % ( self.name ) )
if self.on_done is not None:
Msg.dbg( "HiThread[%s]::run_loop(5)" % ( self.name ) )
self.on_done( )
Msg.dbg( "HiThread[%s]::run_loop(6)" % ( self.name ) )
Msg.info( "Leaving HIThread[%s] Loop Processing...." % ( self.name ) )
# general thread execute
def run_once( self ):
Msg.dbg( "Entering Thread[%s] Processing...." % ( self.name ))
# initialize thread for work performed in execute
if self.on_execute is not None:
self.on_execute( )
# perform the thread work
self.execute()
# perform any remaining work prior to exit thread space
if self.on_done is not None:
self.on_done( )
Msg.dbg( "Leaving Thread[%s] Processing...." % ( self.name ))
# returns True once Thread has exited
def terminated( self ):
# Msg.info( "HiThread::terminated() - self.finished: %s, self.is_alive(): %s, returns: [%s]" % (str(self.finished),str(self.is_alive())),str(self.finished or ( not self.is_alive() )))
Msg.info( "HiThread[%s]::terminated() - self.finished: %s, self.is_alive(): %s" % (self.name, str(self.finished),str(self.is_alive())) )
my_retval = self.finished or ( not self.is_alive())
Msg.info( "HiThread[%s]::terminated() - returns: [%s]" % ( self.name, str( my_retval )))
return self.finished or ( not self.is_alive() )
def heartbeat( self ):
# increment the heartbeat and when debug messages are enabled then a heartbeat message will be
# posted every self.heartbeat-interval ticks. Whenever the heartbeat method is called the current_tick
# is updated
self.current_tick += 1
if not bool( self.current_tick % self.heartbeat_rate ):
Msg.dbg( "HiThread[%s] Still Working" % ( self.name ) )
# the sleep in SysUtils uses milliseconds as does the rest of the computing world instead of
# fractions of a second. Thus this will pause this thread for 10 seconds allowing the process
# thread some processor time
SysUtils.sleep(self.sleep_period)
return False
def trigger_shutdown( self ):
Msg.dbg( "HiThread[%s]::trigger_shutdown() - enter " % ( self.name ))
if self.on_shutdown is not None:
self.on_shutdown(self)
else:
self.shutdown = True
Msg.dbg( "HiThread[%s]::trigger_shutdown() - exit " % ( self.name ))
def execute( self ):
raise NotImplementedError( "Thread::execute() must be implemented" )
class HiOldThread( threading.Thread ):
def __init__( self, arg_create_active = False ):
super().__init__( name = "HiThread-01" )
# Ensure that all threads are killed when master thread is exited for any reason by marking it as a daemon
self.daemon = True
if arg_create_active:
self.start()
#pass
def run( self ):
pass
def HeartBeat( self ):
# Enable this if it's a good idea to have a periodic printing heartbeat
#Msg.dbg("[Thread %s]: Heartbeat" % (self.threadName))
pass
class HiEvent( threading.Event ):
def __init__( self, arg_options ):
super().__init__()
# default to return immediately
self.timeout = arg_options.get( "timeout" , None )
self.before_signal = arg_options.get( "before-sig" , None ) # use this to perform some action prior to setting event
self.after_signal = arg_options.get( "after-sig" , None ) # use this to perform some action after to setting event
self.before_unsignal = arg_options.get( "before-unsig" , None ) # use this to perform some action prior to unsetting event
self.after_unsignal = arg_options.get( "after-unsig" , None ) # use this to perform some action after to unsetting event
def Signal( self, arg_sender = None ):
# perform any activities prior to notification, this could include finishing some work
# that could make the system unstable. This is a callback that is part of the dictionary
# used to initialize
if self.before_signal is not None:
self.before_signal( arg_stat )
# signal the event
self.set()
# perform any activities once notification has been dispatched, this can be used to notify the caller the even has
# been signaled
# This is a callback that is part of the dictionary used to initialize
if self.after_signal is not None:
self.after_signal( arg_stat )
def Unsignal( self, arg_sender = None ):
# perform any activities prior to notification the event has been cleared and will block, this could include initializing to
# prevent system instability. This is a callback that is part of the dictionary used to initialize the Event
if self.before_unsignal is not None:
self.before_unsignal( self )
self.clear( )
if self.after_unsignal is not None:
self.after_unsignal( self )
def Signaled( self, arg_sender = None ):
return self.isSet()
def Reset( self, arg_sender = None ):
self.clear()
## {{{{ TTTTTTTT OOOOOO DDDDDD OOOOOO }}}}
## {{ TTTTTTTT OOOOOOOO DDDDDDD OOOOOOOO }}
## {{ TT OO OO DD DDD OO OO }}
## {{ TT OO OO DD DD OO OO }}
## {{ TT OO OO DD DDD OO OO }}
## {{ TT OOOOOOOO DDDDDDD OOOOOOOO }}
## {{{{ TT OOOOOO DDDDDD OOOOOO }}}}
##
## GET RID OF THIS INSANITY, replace with proper thread management
# This event is signalled when all the worker threads are completed.
workers_done_event = HiEvent({})
# Summary thread signals master thread that it's done
summary_done_event = HiEvent({})
class HiSemaphore( threading.Semaphore ):
def test( self ):
my_lock = threading.Lock()
# class HiMutex( threading.Lock ):
# def test( self ):
# pass
#
#
# class HiCriticalSection( threading.Lock ):
# pass
#
#
```
#### File: regression/executors/generate_executor.py
```python
from common.path_utils import PathUtils
from common.msg_utils import Msg
from common.sys_utils import SysUtils
from common.errors import *
from classes.control_item import ControlItem
from classes.executor import *
from executors.app_executor import *
# tuple index for information extracted from the generation log file
class GenerateResult( ProcessResult ):
gen_seed = 0
gen_total = 1
gen_secondary = 2
gen_default = 3
# dictionary keys for the generate result
class GenerateKeys( object ):
gen_retcode = "retcode"
gen_stdout = "stdout"
gen_stderr = "stderr"
gen_start = "start"
gen_end = "end"
gen_seed = "seed"
gen_total = "total"
gen_secondary = "secondary"
gen_default = "default"
gen_cmd = "command"
gen_log = "log"
gen_elog = "elog"
gen_app = "app"
gen_message = "message"
# Generate Executor abstract class
class GenerateExecutor( AppExecutor ):
def __init__( self ):
super().__init__()
def extract_results( self, arg_result, arg_log, arg_elog ):
Msg.user( "Suffix: %s" % ( str( self.ctrl_item.suffix )), "GENERATE")
if self.ctrl_item.suffix is not None:
self.rename_elfs( self.ctrl_item.suffix )
my_ret_code = int( arg_result[ GenerateResult.process_retcode ])
if SysUtils.success( my_ret_code ):
# extract information from the generate log
arg_elog = None
my_result, my_error = self.query_logs( arg_log, arg_elog )
Msg.user( "Process: %s" % ( str( arg_result )), "GENERATE")
Msg.user( "Log[%s]: %s" % ( str( arg_log ), str( my_result )), "GENERATE")
self.ctrl_item.seed = my_result[ GenerateResult.gen_seed ]
my_process_data = { GenerateKeys.gen_retcode : int(arg_result[ GenerateResult.process_retcode ])
, GenerateKeys.gen_stdout : str(arg_result[ GenerateResult.process_stdout ])
, GenerateKeys.gen_stderr : str(arg_result[ GenerateResult.process_stderr ])
, GenerateKeys.gen_start : str(arg_result[ GenerateResult.process_start ])
, GenerateKeys.gen_end : str(arg_result[ GenerateResult.process_end ])
, GenerateKeys.gen_seed : self.ctrl_item.seed
, GenerateKeys.gen_total : my_result[ GenerateResult.gen_total ]
, GenerateKeys.gen_secondary : my_result[ GenerateResult.gen_secondary]
, GenerateKeys.gen_default : my_result[ GenerateResult.gen_default ]
}
if my_error is not None:
my_process_data[ GenerateKeys.gen_message ] = my_error
return my_process_data
def rename_elfs( self, arg_suffix ):
# before proceeding it is necessary to remove any existing renamed files to eliminate the possibility of causing
# a chain and messing up the results
for my_mask in ["*.ELF","*.S","*.img"]:
my_match_files = PathUtils.list_files( my_mask )
Msg.lout( my_match_files, "user", "Simulate File List" )
for my_src_file in my_match_files:
Msg.user( "Match File: %s, Suffix: %s" % ( str( my_src_file ), str( arg_suffix )), "MATCH")
if SysUtils.found( my_src_file.find( "_%s_force" % ( str( arg_suffix )))):
PathUtils.remove( my_src_file )
continue
# Now rename all the files that are found
for my_mask in ["*.ELF","*.S","*.img"]:
my_match_files = PathUtils.list_files( my_mask )
for my_src_file in my_match_files:
my_tgt_file= my_src_file.replace( "_force", "_%s_force" % ( str( arg_suffix )))
PathUtils.rename( my_src_file, my_tgt_file )
# raise Exception("Type %s test case for base test: %s not found." % (arg_extension, arg_test_name))
return True
```
#### File: regression/launchers/std_launcher.py
```python
from common.sys_utils import SysUtils
from common.path_utils import PathUtils
from common.msg_utils import Msg
from common.datetime_utils import DateTime
from common.errors import *
from classes.launcher import Launcher
class StdLauncher( Launcher ):
def __init__( self, aSequenceRunner ):
super().__init__( aSequenceRunner )
self.process = None
# def __del__(self):
# Msg.user( "Launcher Id 5: %s" % ( str( id( self ))), "STD-LAUNCHER" )
# Msg.trace( )
# Msg.error_trace( )
#
def launch( self ):
Msg.user( "Timeout: %s" % ( str( self.timeout )), "STD-LAUNCHER" )
self.process_cmd, self.process_log = self.build()
Msg.user( "Process Cmd: %s, Process Log: %s" % ( str( self.process_cmd ), str( self.process_log )), "STD-LAUNCHER" )
Msg.user( "Launcher Id 1: %s" % ( str( id( self ))), "STD-LAUNCHER" )
# enable timeout but only trigger shutdown of spawned process allow that process to kill the child processes.
self.process_result = SysUtils.exec_process( self.process_cmd, self.process_log, self.process_log, self.timeout, False, self.set_process )
Msg.user( "Launcher Id 2: %s" % ( str( id( self ))), "STD-LAUNCHER" )
Msg.user( "Process Results: %s" % ( str( self.process_result )), "STD-LAUNCHER" )
def validate( self ):
pass
def set_process( self, arg_process ):
Msg.user( "Setting Process: %s" % ( str( arg_process )), "STD-LAUNCHER" )
Msg.user( "Launcher Id 3: %s" % ( str( id( self ))), "STD-LAUNCHER" )
self.process = arg_process
Msg.user( "Launcher Id 4: %s" % ( str( id( self ))), "STD-LAUNCHER" )
Msg.user( "Saved Process: %s" % ( str( self.process )), "STD-LAUNCHER" )
def terminate( self ):
Msg.user( "Terminating Process: %s" % ( str( self.process )), "STD-LAUNCHER" )
if self.process is not None:
self.process.kill()
def status( self ):
pass
```
#### File: utils/regression/quick_summary.py
```python
import os
import os.path
import sys, traceback
import datetime
from shared.msg_utils import Msg
from shared.path_utils import PathUtils
class SummaryLevel:
Silent = 0
Fail = 1
Any = 2
class QuickSummary( object ) :
def __init__( self, arg_summary_dir, arg_num_instr ):
self.summary_dir = arg_summary_dir
self.results = {}
self.total_count = 0
self.failed_count = 0
self.curr_test_id = None
self.curr_test_log = None
self.sim_cmd = None
self.num_instr = arg_num_instr
#self.test_path = arg_summary_path
def summary_directory( self ):
return self.summary_dir
def set_current_test( self, arg_test_id, arg_test_log, arg_sim_cmd ):
Msg.dbg( "RegressionSummary:: set_current_test( %s, %s, %s " % ( str( arg_test_id ), str( arg_test_log ), str( arg_sim_cmd)))
self.curr_test_id = arg_test_id
self.curr_test_log = arg_test_log
self.sim_cmd = arg_sim_cmd
def add_result( self, arg_ret_code ):
my_tup = None
self.total_count += 1
Msg.dbg( "Adding Simulation results, \"%s\"" % ( str( self.sim_cmd )))
if arg_ret_code != 0:
# only need to create a tuple with the return code
Msg.dbg( "Simulation Failed, return code: " + str( arg_ret_code ))
self.failed_count += 1
#my_tup = ( 1, arg_ret_code, self.curr_test_log, None, "FAIL", SysUtils.ifthen( self.out_dir is None, self.summary_dir, self.out_dir ))
my_tup = ( 1, arg_ret_code, self.curr_test_log, None, "FAIL" )
else:
my_lines = None
try:
Msg.dbg( "Attempting to Open: " + self.curr_test_log )
with open( self.curr_test_log, "r" ) as my_log:
Msg.dbg( "File Open: " + self.curr_test_log )
my_lines = my_log.readlines()
Msg.dbg( "Line %d: %s" % ( len( my_lines ), my_lines[-1]))
my_lastline = my_lines[-1]
my_segs = my_lastline.split()
my_num_instr = int( my_segs[0] )
if my_num_instr < self.num_instr:
# Simulation worked return code = 0 and the instruction limit was not reached
Msg.dbg( "Simulation Success, return code = 0 and the instruction limit was not reached" )
# my_tup = ( 0, arg_ret_code, self.curr_test_log, None, "PASS", SysUtils.ifthen( self.out_dir is None, self.summary_dir, self.out_dir ) )
my_tup = ( 0, arg_ret_code, self.curr_test_log, None, "PASS" )
else:
#
Msg.dbg( "Simulation Failed, Instruction Limit Reached ")
self.failed_count += 1
#my_tup = ( 2, arg_ret_code, self.curr_test_log, "Instruction Limit Reached: Failed at " + str( self.num_instr ) + " Instructions ", "FAIL", SysUtils.ifthen( self.out_dir is None, self.summary_dir, self.out_dir ) )
my_tup = ( 2, arg_ret_code, self.curr_test_log, "Instruction Limit Reached: Failed at " + str( self.num_instr ) + " Instructions ", "FAIL" )
except:
if Msg._debug():
traceback.print_exc( file=sys.stdout )
my_tup = ( arg_ret_code, "Unsupported", "Unable to Extract Test Failure Information", "FAIL", " " )
finally:
my_log.close()
self.results[ self.curr_test_id ] = my_tup
Msg.dbg( "Results Tuple: " + str( my_tup ))
self.curr_test_id = None
self.curr_test_log = None
#deprocated use view for new work
def summarize( self, sum_level = None ):
self.view( sum_level )
def view( self, sum_level = SummaryLevel.Fail ):
# Instruction Over Flow Failure Count
# print( "Regression::view() " )
from datetime import date, time, datetime, timedelta, timezone, tzinfo
my_utcdt = datetime.utcnow()
my_file_name = PathUtils().include_trailing_path_delimiter( self.summary_dir ) \
+ "regression_summary_" \
+ str( my_utcdt.year ) \
+ str( my_utcdt.month ) \
+ str( my_utcdt.day ) \
+ "-" \
+ str( my_utcdt.hour ) \
+ str( my_utcdt.minute ) \
+ str( my_utcdt.second ) \
+ ".log"
print ( my_file_name )
try:
my_ofile = None
myLines = []
# First try to open file
with open( my_file_name, "w" ) as my_ofile:
my_ofile.write( "Date: " + str( my_utcdt.date()) + "\n" )
my_ofile.write( "Time: " + str( my_utcdt.time()) + "\n" )
print( "\n\n" )
for my_key, my_val in self.results.items():
# print( str( my_val ))
my_line = my_val[4] + " - Test Name: " + my_key + ", Return Code: " + str( my_val[1] ) + ", Log File: " + my_val[2]
if my_val[3]:
my_line += my_val[3]
my_ofile.write( my_line + "\n" )
if sum_level > 2:
print( my_line )
elif sum_level == 1 and my_val[0] in [2]:
print( my_line )
elif sum_level == 2 and my_val[0] in [1, 2]:
print( my_line )
my_ofile.write( "Total Simulations: " + str( self.total_count ) + "\n" )
my_ofile.write( "Total Fails: " + str( self.failed_count ) + "\n" )
if self.total_count > 0:
my_ofile.write( "Success Rate: " + "{0:.2f}".format( 100 * ( self.total_count - self.failed_count ) / self.total_count ) + "%\n" )
else:
my_ofile.write( "Success Rate: 0.00%\n" )
my_ofile.write( "Test Suite Complete\n" )
print( "Total Simulations: " + str( self.total_count ) + "\n")
print( "Total Fails: " + str( self.failed_count )+ "\n")
if self.total_count > 0:
print( "Success Rate: " + "{0:.2f}".format( 100 * ( self.total_count - self.failed_count ) / self.total_count ) + "%" )
else:
print( "Success Rate: 0.00%" )
print( "Test Suite Complete\n" )
except Exception as arg_ex:
if Msg._debug():
traceback.print_exception( "Exception", arg_ex, None )
print( "Error Processing Summary, " + str( arg_ex ))
finally:
pass
```
#### File: tests/smoke_check/test_event.py
```python
from unit_test import UnitTest
from shared.kernel_objs import HiEvent
from shared.path_utils import PathUtils
from shared.sys_utils import SysUtils
from shared.msg_utils import Msg
from shared.errors import *
from test_classes import *
# "pre-sig" - callback triggered before state is changed to signaled
# "post-sig" - callback triggered after state is changed to signaled
# "pre-unsig" - callback triggered before state is changed to unsignaled
# "post-unsig" - callback triggered after state is changed to unsignaled
class EventWrapper( object ):
def __init__( self, arg_name ):
self.name = arg_name
self.timeout = 1000 # milliseconds
self.event = HiEvent( { "name" : arg_name
, "timeout" : self.timeout
, "pre-sig" : self.on_before_signal
, "post-sig" : self.on_after_signal
, "pre-unsig" : self.on_before_unsignal
, "post-unsig": self.on_after_unsignal
} )
self.event.Unsignal() # confirm that the event is in an unsignaled stat
def on_before_signal( self, arg_sender ):
if type( arg_sender ) is not HiEvent:
raise EIncompatableTypeError( "Event Notification Contained Incompatable Type" )
Msg.info( "Event[%s] is about to be signaled ... " % ( arg_sender.name ))
def on_after_signal( self, arg_sender ):
if type( arg_sender ) is not HiEvent:
raise EIncompatableTypeError( "Event Notification Contained Incompatable Type" )
Msg.info( "Event[%s] has been signaled ... " % ( arg_sender.name ))
def on_before_unsignal( self, arg_sender ):
if type( arg_sender ) is not HiEvent:
raise EIncompatableTypeError( "Event Notification Contained Incompatable Type" )
Msg.info( "Event[%s] is about to be unsignaled ... " % ( arg_sender.name ))
def on_after_unsignal( self, arg_sender ):
if type( arg_sender ) is not HiEvent:
raise EIncompatableTypeError( "Event Notification Contained Incompatable Type" )
Msg.info( "Event[%s] has been unsignaled ... " % ( arg_sender.name ))
# HiEvent methods
# Signal() - set the state of the event to signaled
# Unsignal() - set the state of the event to unsignaled
# Signaled() - check to see if Event is signaled
# WaitFor() - Pause thread until this returns. Returns True if Signaled, returns False is a timeout was
# specified and the time specified elapsed
#
# HiEvent event callback procss
# "pre-sig" - callback triggered before state is changed to signaled
# "post-sig" - callback triggered after state is changed to signaled
# "pre-unsig" - callback triggered before state is changed to unsignaled
# "post-unsig" - callback triggered after state is changed to unsignaled
#
# HiEvent default option
# self.timeout = arg_options.get( "timeout", None )
class ThreadWrapper( object ):
def __init__( self ):
self.start_event = EventWrapper( "start-event" ).event
self.execute_event = EventWrapper( "execute-event" ).event
self.done_event = EventWrapper( "done-event" ).event
self.finished_event = EventWrapper( "finished-event" ).event
self.init_thread()
def init_thread( self ):
Msg.info( "HiEvent: Creating Test Thread ..." )
myThreadProcs = { "on-start" : self.thread_start # start thread sequence (outside thread space)
, "on-execute" : self.thread_execute # thread termination handler (inside thread space)
, "on-done" : self.thread_done # thread terminated handler (outside thread space)
, "on-finished": self.thread_finished # thread before finished handler (inside thread space)
}
self.thread = ThreadFactory( "EventTestThread", True, myThreadProcs )
def run_thread( self ):
Msg.info( "UnitTest_HiEvent >> Initializing Thread ... " )
self.thread.start_thread()
# wait for thread to terminate
self.thread.wait_for()
Msg.info( "UnitTest_HiEvent >> Thread Completed ... " )
def thread_start( self ):
Msg.info( "UnitTest_NoLoopThread >> Started .... " )
if self.start_event is not None:
self.start_event.Signaled()
self.start_event.Signal()
self.start_event.Signaled( True )
def thread_execute( self ):
Msg.info( "UnitTest_NoLoopThread << Executing .... " )
if self.execute_event is not None:
self.execute_event.Signaled()
self.execute_event.Signal()
self.execute_event.Signaled( True )
def thread_done ( self ):
Msg.info( "UnitTest_NoLoopThread << Execute Done .... " )
if self.done_event is not None:
self.done_event.Signaled()
self.done_event.Signal()
self.done_event.Signaled( True )
def thread_finished( self ):
Msg.info( "UnitTest_NoLoopThread >> Exiting .... " )
if self.finished_event is not None:
self.finished_event.Signaled()
self.finished_event.Signal()
self.finished_event.Signaled( True )
class UnitTest_HiEvent( UnitTest ):
# need a seperate
def run_test( self ):
Msg.info( "HiEvent: Start Unit Test ..." )
my_wrapper = ThreadWrapper()
my_wrapper.run_thread()
def process_result( self ):
Msg.info( "HiThread(NoLoop): Process Test Results... " )
def process_result( self ):
Msg.info( "HiEvent: Process Test Result ..." )
```
#### File: utils/shared/file_controller.py
```python
import sys
from shared.controller import Controller
from shared.task_controller import TaskController
from shared.summary_core import SummaryErrorQueueItem
from shared.control_item import ControlItem, CtrlItmKeys, ControlItemType
from shared.path_utils import PathUtils
from shared.sys_utils import SysUtils
from shared.msg_utils import Msg
# from shared.errors import EInvalidTypeError
class FileController(Controller):
def __init__( self ):
super().__init__( )
# Msg.dbg( "FileController::__init__( )" )
def load(self, arg_ctrl_item ):
super().load( arg_ctrl_item )
# Msg.dbg( "FileController::load()" )
self.parent_fctrl = self.ctrl_item.file_path()
# Msg.dbg( "File Path: %s" % ( self.parent_fctrl ))
#my_content = open( self.parent_fctrl ).read()
# Msg.dbg( "\n" + my_content )
try:
my_content = open( self.parent_fctrl ).read()
except Exception as arg_ex:
Msg.err( "Message: %s, Control File Path: %s" % ( str( arg_ex ), self.parent_fctrl ))
my_err_queue_item = SummaryErrorQueueItem( { "error" : arg_ex
, "message": "Control File Not Found ..."
, "path" : self.ctrl_item.file_path()
, "type" : str( type( arg_ex ))
} )
self.ctrl_item.summary().queue.enqueue( my_err_queue_item )
return False
finally:
pass
try:
my_glb, my_loc = SysUtils.exec_content( my_content )
except Exception as arg_ex:
my_exc_type, my_exc_val, my_exc_tb = sys.exc_info()
my_ex = arg_ex
Msg.err( "Message: %s, Control File Path: %s" % ( str( arg_ex ), self.parent_fctrl))
Msg.blank()
my_err_queue_item = SummaryErrorQueueItem( { "error" : arg_ex
, "message": "Control File not processed..."
, "path" : self.ctrl_item.file_path()
, "type" : str( my_exc_type )
} )
self.ctrl_item.summary().queue.enqueue( my_err_queue_item )
return False
finally:
pass
self.fcontrol = my_loc["control_items"]
return True
def process(self):
# Msg.dbg( "FileController::process()" )
# Msg.dbg( "FileController Contents: \n\"" + str( self.fcontrol ) + "\n" )
try:
# Msg.lout( my_options["work-dir"], MsgLevel.dbg, "Work Director Stack" )
# a control file may iterate through to completion according to the amount set in num_runs
for my_ndx in range( self.ctrl_item.iterations ):
# my_usr_lbl = Msg.set_label( "user", "FILE-ITERATION" )
# Msg.user( "Executing %d of %d Iterations, Control File: %s" % ( my_ndx + 1, self.ctrl_item.iterations, self.ctrl_item.file_path() ))
# Msg.set_label( "user", my_usr_lbl )
try:
my_item_ndx = 0
# each item in the control set exists as a dictionary
for my_item_dict in self.fcontrol:
try:
my_item_ndx += 1
my_usr_lbl = Msg.set_label( "user", "CTRL-FILE" )
Msg.blank( "user" )
Msg.user( "Processing Line: %s" % (str( my_item_dict )))
my_ctrl_item = ControlItem()
my_ctrl_item.parent_fctrl = self.parent_fctrl
my_ctrl_item.fctrl_item = str( my_item_dict )
my_item_dict[CtrlItmKeys.parent_vals], my_parent_data = self.ctrl_item.values()
Msg.lout( my_parent_data, "user", "Result Parent Data" )
Msg.lout( my_item_dict , "user", "Result Item Dictionary" )
Msg.set_label( "user", my_usr_lbl )
# Msg.user( "Processing Updated Line: %s" % (str( my_item_dict )), "CTRL-FILE" )
# Msg.user( str( my_parent_data.data() ), "CTRL-FILE" )
my_ctrl_item.load( my_item_dict, my_parent_data )
my_item_type = my_ctrl_item.item_type()
my_controller = None
if my_item_type == ControlItemType.TaskItem:
# Msg.dbg( "\nControl Item is a Control Task ..." )
my_controller = TaskController()
elif my_item_type == ControlItemType.FileItem:
# Msg.dbg( "\nControl Item is a Control File ..." )
my_controller = FileController()
else:
raise Exception( "\"" + my_fctrl_name + "\": Unknown Item Type ...\nUnable to Process ... " )
# Whew! everything is set up and ready to rock and roll, let the dogs out
# my_controller.load( my_ctrl_item )
# my_controller.process()
if my_controller.load( my_ctrl_item ):
my_controller.process()
# my_controller.load( my_ctrl_item )
# my_controller.process()
# Msg.dbg( "%s: Controller Creation Complete" % ( my_ctrl_item.fctrl_name ))
except TypeError as arg_ex:
Msg.err( str( arg_ex ))
my_err_queue_item = SummaryErrorQueueItem( { "error" : "Item #%s Contains an Invalid Type" % ( str( my_item_ndx ))
, "message": arg_ex
, "path" : self.ctrl_item.file_path()
, "type" : str( type( arg_ex ))
} )
self.ctrl_item.summary().queue.enqueue( my_err_queue_item )
Msg.blank()
except FileNotFoundError as arg_ex:
Msg.err( str( arg_ex ))
my_err_queue_item = SummaryErrorQueueItem( { "error" : arg_ex
, "message": "Control File Not Found ..."
, "path" : self.ctrl_item.file_path()
, "type" : str( type( arg_ex ))
} )
self.ctrl_item.summary().queue.enqueue( my_err_queue_item )
Msg.blank()
except Exception as arg_ex:
Msg.error_trace( str(arg_ex) )
Msg.err( str(arg_ex))
Msg.blank()
finally:
my_controller = None
my_item_dict = None
except Exception as arg_ex:
Msg.error_trace( "[ERROR] - " + str(arg_ex) )
Msg.err( str(arg_ex))
finally:
pass
finally:
pass
# Msg.dbg()
return True
```
#### File: utils/shared/threads.py
```python
import threading
import sys
class HiThread( threading.Thread ):
def __init__( self, arg_create_active = False ):
super().__init__( name = "HiThread-01" )
# Ensure that all threads are killed when master thread is exited for any reason by marking it as a daemon
self.daemon = True
if arg_create_active:
self.start()
#pass
def run( self ):
pass
def HeartBeat( self ):
# Enable this if it's a good idea to have a periodic printing heartbeat
#Msg.dbg("[Thread %s]: Heartbeat" % (self.threadName))
pass
# def do_onsig( ... ):
# #do stuff after signales
#
# def do_beforeunsig( ... ):
# #do stuff after signales
#
# def do_onpreunsig( ... ):
# #do stuff before unsignaling
# my_event = HiEvent( {"timeout": 5000, "post-sig":do_onsig, "pre-unsig": do_beforeunsig } )
# these can also be created afterwards
class HiEvent( threading.Event ):
def __init__( self, arg_options ):
super().__init__()
# default to return immediately
self.timeout = arg_options.get( "timeout" , None )
self.pre_signal = arg_options.get( "pre-sig" , None )
self.post_signal = arg_options.get( "post-sig" , None )
self.pre_unsignal = arg_options.get( "pre-unsig" , None )
self.post_unsignal = arg_options.get( "post-unsig", None )
def Signal( self, arg_stat = None ):
# perform any activities prior to notification, this could include finishing some work
# that could make the system unstable. This is a callback that is part of the dictionary
# used to initialize
if self.pre_signal is not None:
self.pre_signal( arg_stat )
# signal the event
self.set()
# perform any activities once notification has been dispatched, this can be used to notify the caller the even has
# been signaled
# This is a callback that is part of the dictionary used to initialize
if self.post_signal is not None:
self.post_signal( arg_stat )
def Unsignal( self, arg_stat = None ):
# perform any activities prior to notification the event has been cleared and will block, this could include initializing to
# prevent system instability. This is a callback that is part of the dictionary used to initialize the Event
if self.pre_unsignal is not None:
self.pre_unsignal( arg_stat )
self.clear( )
if self.post_unsignal is not None:
self.post_unsignal( arg_stat )
# This event is signalled when all the worker threads are completed.
workers_done_event = HiEvent({})
# Summary thread signals master thread that it's done
summary_done_event = HiEvent({})
class HiSemaphore( threading.Semaphore ):
def test( self ):
my_lock = threading.Lock()
# class HiMutex( threading.Lock ):
# def test( self ):
# pass
#
#
# class HiCriticalSection( threading.Lock ):
# pass
#
#
```
|
{
"source": "jeremybergen/csci000-astudent",
"score": 4
}
|
#### File: egypt/test/egyptunittest.py
```python
import unittest
import sys
sys.path.append("..") # append parent folder into the system path
import egypt
class Test(unittest.TestCase):
# optional - executes before runing each test function
def setUp(self):
print('Running unittest on egypt module')
self.input1 = [8, 6, 10]
self.input2 = [5, 4, 3]
self.input3 = [5, 12, 13]
self.input4 = [1, 2, 3]
self.input5 = [2000, 100, 30000]
def testAnswer1(self):
expect = "right"
result = egypt.answer(self.input1)
self.assertEqual(expect, result)
def testAnswer2(self):
expect = "right"
result = egypt.answer(self.input2)
self.assertEqual(expect, result)
def testAnswer3(self):
expect = "right"
result = egypt.answer(self.input3)
self.assertEqual(expect, result)
def testAnswer4(self):
expect = "wrong"
result = egypt.answer(self.input4)
self.assertEqual(expect, result)
def testAnswer5(self):
expect = "wrong"
result = egypt.answer(self.input5)
self.assertEqual(expect, result)
def tearDown(self):
# optional - executes after each test function
print('Done running unittest')
if __name__ == "__main__":
unittest.main()
```
|
{
"source": "JeremyBernier/airflow",
"score": 2
}
|
#### File: airflow/example_dags/example_papermill_operator.py
```python
import os
from datetime import timedelta
import scrapbook as sb
import airflow
from airflow.lineage import AUTO
from airflow.models import DAG
from airflow.operators.papermill_operator import PapermillOperator
from airflow.operators.python_operator import PythonOperator
def check_notebook(inlets, execution_date):
"""
Verify the message in the notebook
"""
notebook = sb.read_notebook(inlets[0].url)
message = notebook.scraps['message']
print(f"Message in notebook {message} for {execution_date}")
if message.data != f"Ran from Airflow at {execution_date}!":
return False
return True
args = {
'owner': 'airflow',
'start_date': airflow.utils.dates.days_ago(2)
}
dag = DAG(
dag_id='example_papermill_operator', default_args=args,
schedule_interval='0 0 * * *',
dagrun_timeout=timedelta(minutes=60))
run_this = PapermillOperator(
task_id="run_example_notebook",
dag=dag,
input_nb=os.path.join(os.path.dirname(os.path.realpath(__file__)),
"input_notebook.ipynb"),
output_nb="/tmp/out-{{ execution_date }}.ipynb",
parameters={"msgs": "Ran from Airflow at {{ execution_date }}!"}
)
check_output = PythonOperator(
task_id='check_out',
python_callable=check_notebook,
dag=dag,
inlets=AUTO)
check_output.set_upstream(run_this)
if __name__ == "__main__":
dag.cli()
```
|
{
"source": "JeremyBloom/Optimization-Examples",
"score": 3
}
|
#### File: JeremyBloom/Optimization-Examples/Tableau.py
```python
__author__ = 'bloomj'
from OPLCollector import *
from Optimizer import *
try:
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession, Row, functions
from pyspark.sql.types import StructType, StructField, StringType, IntegerType, DoubleType
except ImportError as e:
print ("Error importing Spark Modules", e)
sys.exit(1)
class Tableau:
"""
Demonstrates equivalence between OPL and SQL using Spark and the Warehousing example.
Demonstrates equivalence between OPL and SQL using Spark and the Warehousing example.
The context and theory of this class are presented in an IBM DSX sample notebook entitled
Optimization Modeling and Relational Data (which uses the code from this file).
"""
def controller(self):
# Create the warehousing data model
networkDataModel = ADMBuilder() \
.addSchema("warehouses", buildSchema(
("location", StringType()),
("fixedCost", DoubleType()),
("capacityCost", DoubleType()))) \
.addSchema("routes", buildSchema(
("location", StringType()),
("store", StringType()),
("shippingCost", DoubleType()))) \
.addSchema("stores", buildSchema(
("storeId", StringType()))) \
.addSchema("mapCoordinates", buildSchema(
("location", StringType()),
("lon", DoubleType()),
("lat", DoubleType()))) \
.build()
demandDataModel = ADMBuilder() \
.addSchema("demands", buildSchema(
("store", StringType()),
("scenarioId", StringType()),
("amount", DoubleType()))) \
.addSchema("scenarios", buildSchema(
("id", StringType()),
("totalDemand", DoubleType()),
("periods", DoubleType()))) \
.build()
warehousingResultDataModel = ADMBuilder() \
.addSchema("objectives", buildSchema(
("problem", StringType()),
("dExpr", StringType()),
("scenarioId", StringType()),
("iteration", IntegerType()),
("value", DoubleType()))) \
.addSchema("openWarehouses", buildSchema(
("location", StringType()),
("scenarioId", StringType()),
("iteration", IntegerType()),
("open", IntegerType()),
("capacity", DoubleType()))) \
.addSchema("shipments", buildSchema(
("location", StringType()),
("store", StringType()),
("scenarioId", StringType()),
("iteration", IntegerType()),
("amount", DoubleType()))) \
.build()
# Note: the "MapCoordinates table and the "scenarioId" and "iteration" fields are not used in this notebook but are included for use in other contexts. URL
credentials_1= {}
networkDataSource = getFromObjectStorage(credentials_1, filename="Warehousing-data.json")
demandDataSource = getFromObjectStorage(credentials_1, filename="Warehousing-sales_data-nominal_scenario.json")
warehousingData = OPLCollector("warehousingData", networkDataModel).setJsonSource(networkDataSource).fromJSON()
warehousingData.addTables(OPLCollector("demandData", demandDataModel).setJsonSource(demandDataSource).fromJSON())
warehousingData.displayTable("warehouses", sys.stdout)
#Create the tableau data model
tableauData= OPLCollector("tableauData")
tableauADMBuilder= tableauData.buildADM()
tableauADMBuilder.addSchema("integerColumns", buildSchema(
("variable", StringType()),
("lower", IntegerType()),
("upper", IntegerType()),
("value", IntegerType())))
tableauADMBuilder.addSchema("booleanColumns", SchemaBuilder()\
.copyFields(tableauADMBuilder.referenceSchema("integerColumns"))\
.buildSchema())
tableauADMBuilder.addSchema("floatColumns", buildSchema(
("variable", StringType()),
("lower", DoubleType()),
("upper", DoubleType()),
("value", DoubleType())))
tableauADMBuilder.addSchema("rows", buildSchema(
("cnstraint", StringType()),
("sense", StringType()),
("rhs", DoubleType())))
tableauADMBuilder.addSchema("entries", buildSchema(
("cnstraint", StringType()),
("variable", StringType()),
("coefficient", DoubleType())))
tableauADMBuilder.addSchema("objectives", buildSchema(
("name", StringType()),
("sense", StringType()),
("value", DoubleType())))
tableauDataModel = tableauADMBuilder.build()
# Create the data model to transform the warehousing data into the tableau
tableauTransformations = OPLCollector("tableauTransformations")
tableauTransformationsADMBuilder = tableauTransformations.buildADM()
tableauTransformationsADMBuilder.addSchema("columns_open", SchemaBuilder()\
.copyFields(tableauData.getADM().get("booleanColumns"))\
.addField("location", StringType())\
.buildSchema())
tableauTransformationsADMBuilder.addSchema("columns_capacity", SchemaBuilder()\
.copyFields(tableauData.getADM().get("floatColumns"))\
.addField("location", StringType())\
.buildSchema())
tableauTransformationsADMBuilder.addSchema("columns_ship", SchemaBuilder()\
.copyFields(tableauData.getADM().get("floatColumns")) \
.addField("location", StringType())\
.addField("store", StringType())\
.buildSchema())
tableauTransformationsADMBuilder.addSchema("rows_ctCapacity", SchemaBuilder()\
.copyFields(tableauData.getADM().get("rows"))\
.buildSchema())
tableauTransformationsADMBuilder.addSchema("rows_ctDemand", SchemaBuilder()\
.copyFields(tableauData.getADM().get("rows"))\
.buildSchema())
tableauTransformationsADMBuilder.addSchema("rows_ctSupply", SchemaBuilder()\
.copyFields(tableauData.getADM().get("rows"))\
.buildSchema())
tableauTransformationsADMBuilder.addSchema("rows_dexpr", SchemaBuilder()\
.copyFields(tableauData.getADM().get("rows"))\
.buildSchema())
tableauTransformationsADMBuilder.addSchema("entries_ctCapacity_capacity", SchemaBuilder()\
.copyFields(tableauData.getADM().get("entries"))\
.buildSchema())
tableauTransformationsADMBuilder.addSchema("entries_ctCapacity_ship", SchemaBuilder()\
.copyFields(tableauData.getADM().get("entries"))\
.buildSchema())
tableauTransformationsADMBuilder.addSchema("entries_ctDemand_ship", SchemaBuilder()\
.copyFields(tableauData.getADM().get("entries"))\
.buildSchema())
tableauTransformationsADMBuilder.addSchema("entries_ctSupply_open", SchemaBuilder()\
.copyFields(tableauData.getADM().get("entries"))\
.buildSchema())
tableauTransformationsADMBuilder.addSchema("entries_ctSupply_ship", SchemaBuilder()\
.copyFields(tableauData.getADM().get("entries"))\
.buildSchema())
tableauTransformationsADMBuilder.addSchema("entries_dexpr_open", SchemaBuilder()\
.copyFields(tableauData.getADM().get("entries"))\
.buildSchema())
tableauTransformationsADMBuilder.addSchema("entries_dexpr_capacity", SchemaBuilder()\
.copyFields(tableauData.getADM().get("entries"))\
.buildSchema())
tableauTransformationsADMBuilder.addSchema("entries_dexpr_ship", SchemaBuilder()\
.copyFields(tableauData.getADM().get("entries"))\
.buildSchema())
tableauTransformationsADMBuilder.build()
tableauTransformer = tableauTransformations.buildData()
# Create input dataframes
warehouses = warehousingData.getTable("warehouses")
stores = warehousingData.getTable("stores")
routes = warehousingData.getTable("routes")
demands = warehousingData.getTable("demands")
scenarios = warehousingData.getTable("scenarios")
scenarioId = scenarios.first()["id"]
# Encode the columns
tableauTransformer.addTable("columns_open",
warehouses.select("location")\
.withColumn("variable", functions.concat(functions.lit("open_"), warehouses["location"]))\
.withColumn("upper", functions.lit(1))\
.withColumn("lower", functions.lit(0))\
.withColumn("value", functions.lit(0)))
tableauTransformer.addTable("columns_capacity",
warehouses.select("location")\
.withColumn("variable", functions.concat(functions.lit("capacity_"), warehouses["location"]))\
.withColumn("upper", functions.lit(1.0e20))\
.withColumn("lower", functions.lit(0.0))\
.withColumn("value", functions.lit(0.0)))
tableauTransformer.addTable("columns_ship",
routes.select("location", "store")\
.withColumn("variable", functions.concat(functions.lit("ship_"), routes["location"], functions.lit("_"),
routes["store"]))\
.withColumn("upper", functions.lit(1.0))\
.withColumn("lower", functions.lit(0.0))\
.withColumn("value", functions.lit(0.0)))
# Encode the Constraints
tableauTransformer.addTable("rows_ctCapacity",
warehouses.select("location")\
.withColumn("cnstraint", functions.concat(functions.lit("ctCapacity_"), warehouses["location"]))\
.withColumn("sense", functions.lit("GE"))\
.withColumn("rhs", functions.lit(0.0)))
tableauTransformer.addTable("rows_ctDemand",
stores.select("storeId")\
.withColumn("cnstraint", functions.concat(functions.lit("ctDemand_"), stores["storeId"]))\
.withColumn("sense", functions.lit("GE"))\
.withColumn("rhs", functions.lit(1.0))\
.withColumnRenamed("storeId", "store"))
tableauTransformer.addTable("rows_ctSupply",
routes.select("location", "store")\
.withColumn("cnstraint", functions.concat(functions.lit("ctSupply_"), routes["location"], functions.lit("_"),
routes["store"]))\
.withColumn("sense", functions.lit("GE"))\
.withColumn("rhs", functions.lit(0.0)))
tableauTransformer.addTable("rows_dexpr",
SPARK_SESSION.createDataFrame(
[ Row(cnstraint= "capitalCost", sense= "dexpr", rhs= 0.0),
Row(cnstraint= "operatingCost", sense= "dexpr", rhs= 0.0),
Row(cnstraint= "totalCost", sense= "dexpr", rhs= 0.0)],
tableauTransformations.getADM().get("rows_dexpr"))\
.select("cnstraint", "sense", "rhs")) #orders the columns properly
# Reshape the Coefficient Data into the Tableau
tableauTransformer.addTable(
"entries_ctCapacity_capacity",
tableauTransformer.referenceTable("rows_ctCapacity")\
.join(tableauTransformer.referenceTable("columns_capacity"), "location")\
.select("cnstraint", "variable")\
.withColumn("coefficient", functions.lit(1.0)))
# demand at the store at the end of each route
demandOnRoute = routes.join(
demands.where(demands["scenarioId"] == functions.lit(scenarioId)), "store")\
.select("location", "store", "amount").withColumnRenamed("amount", "demand")
tableauTransformer.addTable(
"entries_ctCapacity_ship",
tableauTransformer.referenceTable("rows_ctCapacity")\
.join(tableauTransformer.referenceTable("columns_ship"), "location")\
.join(demandOnRoute, ["location", "store"])\
.withColumn("coefficient", -demandOnRoute["demand"])\
.select("cnstraint", "variable", "coefficient"))
tableauTransformer.addTable(
"entries_ctDemand_ship",
tableauTransformer.referenceTable("rows_ctDemand")\
.join(tableauTransformer.referenceTable("columns_ship"), "store")\
.select("cnstraint", "variable")\
.withColumn("coefficient", functions.lit(1.0)))
tableauTransformer.addTable(
"entries_ctSupply_open",
tableauTransformer.referenceTable("rows_ctSupply")\
.join(tableauTransformer.referenceTable("columns_open"), "location")\
.select("cnstraint", "variable")\
.withColumn("coefficient", functions.lit(1.0)))
tableauTransformer.addTable(
"entries_ctSupply_ship",
tableauTransformer.referenceTable("rows_ctSupply")\
.join(tableauTransformer.referenceTable("columns_ship"), ["location", "store"])\
.select("cnstraint", "variable")\
.withColumn("coefficient", functions.lit(-1.0)))
rows_dexpr = tableauTransformer.referenceTable("rows_dexpr")
tableauTransformer.addTable(
"entries_dexpr_open",
(rows_dexpr.where((rows_dexpr["cnstraint"] == functions.lit("capitalCost"))\
| (rows_dexpr["cnstraint"] == functions.lit("totalCost"))))\
.join(tableauTransformer.referenceTable("columns_open")\
.join(warehouses, "location"), how="cross")\
.select("cnstraint", "variable", "fixedCost")\
.withColumnRenamed("fixedCost", "coefficient"))
tableauTransformer.addTable(
"entries_dexpr_capacity",
(rows_dexpr.where((rows_dexpr["cnstraint"] == functions.lit("capitalCost"))\
| (rows_dexpr["cnstraint"] == functions.lit("totalCost"))))\
.join(tableauTransformer.referenceTable("columns_capacity")\
.join(warehouses, "location"), how="cross")\
.select("cnstraint", "variable", "capacityCost")\
.withColumnRenamed("capacityCost", "coefficient"))
tableauTransformer.addTable(
"entries_dexpr_ship",
(rows_dexpr.where((rows_dexpr["cnstraint"] == functions.lit("operatingCost"))\
| (rows_dexpr["cnstraint"] == functions.lit("totalCost"))))\
.join(
(tableauTransformer.referenceTable("columns_ship")\
.join((routes.join(demandOnRoute, ["location", "store"])\
.withColumn("coefficient", demandOnRoute["demand"] * routes["shippingCost"])),
["location", "store"])), how="cross")\
.select("cnstraint", "variable", "coefficient"))
tableauTransformer.build()
# Build the input data for the tableau optimization problem
# Drop the instance-specific keys (location and store), which are not supported in the tableau model
tableauData.buildData()\
.addTable("booleanColumns",
tableauTransformations.getTable("columns_open").drop("location"))\
.addTable("floatColumns",
tableauTransformations.getTable("columns_capacity").drop("location")\
.union(tableauTransformations.getTable("columns_ship").drop("location").drop("store")))\
.addEmptyTable("integerColumns")\
.addTable("rows",
tableauTransformations.getTable("rows_ctCapacity").drop("location")\
.union(tableauTransformations.getTable("rows_ctDemand").drop("store"))\
.union(tableauTransformations.getTable("rows_ctSupply").drop("location").drop("store"))\
.union(tableauTransformations.getTable("rows_dexpr")))\
.addTable("entries",
tableauTransformations.getTable("entries_ctSupply_open")\
.union(tableauTransformations.getTable("entries_ctSupply_ship"))\
.union(tableauTransformations.getTable("entries_ctCapacity_capacity"))\
.union(tableauTransformations.getTable("entries_ctCapacity_ship"))\
.union(tableauTransformations.getTable("entries_ctDemand_ship"))\
.union(tableauTransformations.getTable("entries_dexpr_open"))\
.union(tableauTransformations.getTable("entries_dexpr_capacity"))\
.union(tableauTransformations.getTable("entries_dexpr_ship")))\
.addTable("objectives",
SPARK_SESSION.createDataFrame(
[Row(name= "totalCost", sense= "minimize", value= 0.0)],
tableauData.getADM().get("objectives"))
.select("name", "sense", "value"))\
.build()
# Replace with actual items
TableauDotMod = None
url = None
key = None
tableau_data_model, tableau_inputs, tableau_optimization_problem, tableau_outputs= None
tableauProblem = Optimizer("TableauProblem", credentials={"url": url, "key": key})\
.setOPLModel("TableauProblem.mod",
modelText=[tableau_data_model, tableau_inputs, tableau_optimization_problem, tableau_outputs])\
.setResultDataModel(ADMBuilder()\
.addSchema("booleanDecisions", tableauData.getSchema("booleanColumns"))\
.addSchema("integerDecisions", tableauData.getSchema("integerColumns"))\
.addSchema("floatDecisions", tableauData.getSchema("floatColumns"))\
.addSchema("optimalObjectives", tableauData.getSchema("objectives"))\
.build())
tableauResult = tableauProblem.solve(tableauData)
tableauProblem.getSolveStatus()
# Recover the solution
warehousingResult = OPLCollector("warehousingResult", warehousingResultDataModel)
resultsBuilder = warehousingResult.buildData()
resultsBuilder.addTable("objectives",
tableauResult.getTable("optimalObjectives").select("name", "value")\
.withColumnRenamed("name", "dExpr")\
.withColumn("problem", functions.lit("warehousing"))\
.withColumn("scenarioId", functions.lit(scenarioId))\
.withColumn("iteration", functions.lit(0)))
resultsBuilder.addTable("openWarehouses",
(tableauResult.getTable("booleanDecisions").select("variable", "value").withColumnRenamed("value", "open")\
.join(tableauTransformations.getTable("columns_open"), "variable")).drop("variable")\
.join(
tableauResult.getTable("floatDecisions").select("variable", "value").withColumnRenamed("value", "capacity")\
.join(tableauTransformations.getTable("columns_capacity"), "variable").drop("variable"),
"location")
.select("location", "open", "capacity")\
.withColumn("scenarioId", functions.lit(scenarioId))\
.withColumn("iteration", functions.lit(0)))
floatDecisions = tableauResult.getTable("floatDecisions").select("variable", "value")
resultsBuilder.addTable("shipments",
floatDecisions\
.join(tableauTransformations.getTable("columns_ship"), "variable").drop("variable")\
.join(demandOnRoute, ["location", "store"])\
.withColumn("amount", demandOnRoute["demand"]*(floatDecisions["value"]))\
.select("location", "store", "amount")\
.withColumn("scenarioId", functions.lit(scenarioId))\
.withColumn("iteration", functions.lit(0)))
resultsBuilder.build()
warehousingResult.displayTable("objectives")
warehousingResult.displayTable("openWarehouses")
# to see the lengthy shipments table, uncomment the next line
# warehousingResult.displayTable("shipments")
# end controller
# end class Tableau
```
|
{
"source": "JeremyBloom/Optimization-OPLInterface",
"score": 3
}
|
#### File: JeremyBloom/Optimization-OPLInterface/OPLCollector.py
```python
__author__ = 'bloomj'
'''
Created on Feb 8, 2017
@author: bloomj
'''
import sys
import os
import json
import requests
try:
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession, Row, functions
from pyspark.sql.types import StructType, StructField, StringType, IntegerType, DoubleType
except ImportError as e:
print ("Error importing Spark Modules", e)
sys.exit(1)
SPARK_CONTEXT = SparkContext(conf=SparkConf().setAppName("OPL").setMaster("local[4]"))
#SQL_CONTEXT = sqlContext # sqlContext is predefined
SPARK_SESSION = SparkSession.builder.config("spark.sql.crossJoin.enabled", "true").getOrCreate()
class OPLCollector(object):
'''
Represents an OPL data model in Spark.
Note: Use of this class does not depend on OPL, and in particular, it can be used with the DOcplex Python API.
An application data model (ADM) consists of a set of tables (OPL Tuplesets), each with its own schema.
An ADM is represented by a dictionary in which the keys are the table names and the values are the table schemas.
A builder is provided to create the ADM.
The OPLCollector holds the actual data in Spark Datasets. There are several ways to populate
the data.
- Spark SQL operations can transform tables into other tables.
- A builder is provided when the data is generated programmatically.
- JSON deserialization and serialization are provided when data is exchanged with external applications or stores.
The design of the OPLCollector class aims to reduce the amount of data that must be
manipulated outside of Spark. Where possible, data is streamed among applications without
creating auxiliary in-memory structures or files.
The design of OPLCollector also aims to minimize the amount
of custom coding required to build an application. Collectors are configured
by specifying their schemas through builders rather than by extending with subclasses.
'''
def __init__(self, collectorName, applicationDataModel={}, sparkData={}):
'''
Creates a new OPLCollector instance.
:param collectorName: the name of this collector.
:type collectorName: String
:param applicationDataModel: holds the table schemas for this collector. Each schema is a Spark StructType.
Note that each collector has one and only one application data model.
:type applicationDataModel: dict<String, StructType>
:param sparkData: holds the actual data tables of this collector as a set of Spark datasets.
:type sparkData: dict<String, Dataframe>
'''
self.name = collectorName
self.applicationDataModel = applicationDataModel
self.sparkData = sparkData
self.size = {name: None for name in applicationDataModel.keys()}
self.jsonDestination = None
self.jsonSource = None
def copy(self, name):
"""
Creates a new OPLCollector instance with copies of the application data model and Spark datasets of this collector.
The ADM copy is immutable. The Spark datasets themselves are immutable, but the copy supports the addTable, addData, and replaceTable methods.
Does not copy the JSONSource or JSONDestination fields.
:param name of the new collector
:param tableNames tables to be copied (all tables in this collector, if absent)
:return a new OPLCollector instance
"""
result = OPLCollector(name, self.applicationDataModel, self.sparkData.copy())
result.size = self.size.copy()
return result
def copy(self, name, *tables):
result = OPLCollector(name)
admBuilder = ADMBuilder(result);
for table in tables:
admBuilder.addSchema(table, self.getSchema(table))
admBuilder.build()
dataBuilder = DataBuilder(result.applicationDataModel, collector=result)
for table in tables:
dataBuilder.addTable(table, self.getTable(table))
result.size[table] = self.size[table]
dataBuilder.build();
return result
def getName(self):
"""
Returns the name of this collector.
:return collector name as a string
"""
return self.name
def addTables(self, other):
"""
Adds a set of tables of data from another collector.
An individual table can be set only once.
:param other: another collector
:type other: OPLCollector
:raise ValueError: if the other ADM is empty or if a table name duplicates a name already present in this collector.
"""
if not other.applicationDataModel: # is empty
raise ValueError("empty collector")
for tableName in other.applicationDataModel.viewkeys():
if tableName in self.applicationDataModel:
raise ValueError("table " + tableName + " has already been defined")
self.applicationDataModel.update(other.applicationDataModel)
self.sparkData.update(other.sparkData)
self.size.update(other.size)
return self
def replaceTable(self, tableName, table, size=None):
"""
Replaces an individual table of data.
:param tableName:
:type String
:param table:
:type Spark Dataframe
:param size: number of rows in table (None if omitted)
:return: this collector
:raise ValueError: if the table is not already defined in the ADM
"""
if tableName not in self.applicationDataModel:
raise ValueError("table " + tableName + "has not been defined")
self.sparkData[tableName] = table
if size is not None:
self.size[tableName] = size
else:
self.size[tableName] = table.count()
return None
def addData(self, tableName, table, size=None):
"""
Adds data to an existing table.
Use when a table has several input sources.
Does not deduplicate the data (i.e. allows duplicate rows).
:param tableName:
:type String
:param table:
:type Spark Dataframe
:param size: number of rows in table (None if omitted)
:return: this collector
:raise ValueError: if the table is not already defined in the ADM
"""
if tableName in self.applicationDataModel:
raise ValueError("table " + tableName + " has already been defined")
self.sparkData[tableName] = self.sparkData[tableName].union(table)
count = (self.size[tableName] + size) if (self.size[tableName] is not None and size is not None) else None
self.size[tableName] = count
return self
#NEW
def getADM(self):
"""
Exposes the application data model for this OPLCollector.
The ADM is represented by a map in which the keys are the table names
and the values are the table schemas held in Spark StructType objects.
:return: the application data model
:rtype: dict<String, StructType>
"""
return self.applicationDataModel
def setADM(self, applicationDataModel):
"""
Sets the application data model for this OPLCollector.
The ADM cannot be changed once set.
"""
if (self.applicationDataModel): # is not empty or None
raise ValueError("ADM has already been defined")
self.applicationDataModel = applicationDataModel
return self
def getTable(self, tableName):
return self.sparkData[tableName]
def getSchema(self, tableName):
return self.applicationDataModel[tableName]
def selectSchemas(self, *tableNames):
"""
Returns a subset of the application data model.
"""
return {tableName: self.applicationDataModel[tableName] for tableName in tableNames}
def selectTables(self, collectorName, *tableNames):
"""
Creates a new OPLCollector from a subset of the tables in this collector.
The tables in the new collector are copies of the tables in the original.
"""
adm = self.selectSchemas(tableNames)
data = {tableName: SPARK_SESSION.createDataFrame(self.sparkData[tableName], self.getSchema(tableName))
for tableName in tableNames}
size = {tableName: self.size[tableName] for tableName in tableNames}
return OPLCollector(collectorName, adm, data, size)
def getSize(self, tableName):
"""
Returns the number of rows in a table.
Note: the Spark data set count method is fairly expensive,
so it is used only if there is no other way to count the number of rows.
It is best to count the rows as the table is being deserialized, as is done in the fromJSON method.
Once counted, the number is stored in the size map for future use.
"""
if tableName not in self.size:
raise ValueError("size not defined for table " + tableName)
if self.size[tableName] is None:
self.size[tableName] = self.sparkData[tableName].count()
return self.size[tableName]
def buildADM(self):
"""
Creates the application data model for this collector
"""
if (self.applicationDataModel): # is not empty
raise ValueError("application data model has already been defined")
return ADMBuilder(self)
def buildData(self):
"""
Creates a builder for the data tables for this collector.
Uses this collector's application data model.
:return: a new DataBuilder instance
:raise ValueError: if the application data model has not been defined or if data tables have already been loaded
"""
if not self.applicationDataModel: # is empty
raise ValueError("application data model has not been defined")
if self.sparkData: # is not empty
raise ValueError("data tables have already been loaded")
return DataBuilder(self.applicationDataModel, collector=self)
def setJsonSource(self, source):
"""
Sets the source for the JSON text that populates the collector.
There is a one-to-one correspondence between an OPLCollector instance and its JSON representation;
that is, the JSON source file must fully include all the data tables to be populated in the collector instance.
Thus, it makes no sense to have more than on JSON source for a collector or to change JSON sources.
:param source: a file-like object containing the JSON text.
:return: this collector instance
:raise ValueError: if JSON source has already been set
"""
if self.jsonSource is not None:
raise ValueError("JSON source has already been set")
self.jsonSource = source
return self
#REVISED
def fromJSON(self):
"""
Provides a means to create a collector from JSON.
You must first set the destination (an output stream, file, url, or string) where the JSON will be read.
Then you call the deserializer fromJSON method.
The application data model for the collector must already have been created.
There is a one-to-one correspondence between an OPLCollector instance and its JSON representation;
that is, the JSON source file must fully include all the data tables to be populated in the collector instance.
Methods are provided to merge two collectors with separate JSON sources (addTables),
add a data set to a collector (addTable), and to add data from a data set to an existing table in a collector.
:return: this collector with its data tables filled
:raise ValueError: if the data tables have already been loaded
"""
if self.sparkData: # is not empty
raise ValueError("data tables have already been loaded")
# data: dict {tableName_0: [{fieldName_0: fieldValue_0, ...}, ...], ...}
data = json.load(self.jsonSource)
builder = self.buildData()
for tableName, tableData in data.viewitems():
count = len(tableData)
tableRows = (Row(**fields) for fields in tableData)
builder = builder.addTable(tableName,
SPARK_SESSION.createDataFrame(tableRows, self.getADM()[tableName]),
count) # would like to count the rows as they are read instead,
# but don't see how
builder.build()
return self
def setJsonDestination(self, destination):
"""
Sets the destination for the JSON serialization.
Replaces an existing destination if one has been set previously.
:param destination: an output string, stream, file, or URL
:return: this collector
"""
self.jsonDestination = destination
return self
#REVISED
def toJSON(self):
"""
Provides a means to write the application data as JSON.
You must first set the destination (an output stream, file, url, or string) where the JSON will be written.
Then you call the serializer toJSON method.
"""
self.jsonDestination.write("{\n") # start collector object
firstTable = True
for tableName in self.sparkData:
if not firstTable:
self.jsonDestination.write(',\n')
else:
firstTable = False
self.jsonDestination.write('"' + tableName + '" : [\n') # start table list
firstRow = True
for row in self.sparkData[tableName].toJSON().collect():# better to use toLocalIterator() but it gives a timeout error
if not firstRow:
self.jsonDestination.write(",\n")
else:
firstRow= False
self.jsonDestination.write(row) # write row object
self.jsonDestination.write("\n]") # end table list
self.jsonDestination.write("\n}") # end collector object
#REVISED
def displayTable(self, tableName, out=sys.stdout):
"""
Prints the contents of a table.
:param tableName: String
:param out: a file or other print destination where the table will be written
"""
out.write("collector: " + self.getName() + "\n")
out.write("table: " + tableName + "\n")
self.getTable(tableName).show(self.getSize(tableName), truncate=False)
# REVISED
def display(self, out=sys.stdout):
"""
Prints the contents of all tables in this collector.
:param out: a file or other print destination where the tables will be written
"""
for tableName in self.sparkData:
self.displayTable(tableName, out=out)
# end class OPLCollector
def getFromObjectStorage(credentials, container=None, filename=None):
"""
Returns a stream containing a file's content from Bluemix Object Storage.
:param credentials a dict generated by the Insert to Code service of the host Notebook
:param container the name of the container as specified in the credentials (defaults to the credentials entry)
:param filename the name of the file to be accessed (note: if there is more than one file in the container,
you might prefer to enter the names directly; otherwise, defaults to the credentials entry)
"""
if not container:
container = credentials['container']
if not filename:
filename = credentials['filename']
url1 = ''.join([credentials['auth_url'], '/v3/auth/tokens'])
data = {'auth': {'identity': {'methods': ['password'],
'password': {
'user': {'name': credentials['username'], 'domain': {'id': credentials['domain_id']},
'password': credentials['password']}}}}}
headers1 = {'Content-Type': 'application/json'}
resp1 = requests.post(url=url1, data=json.dumps(data), headers=headers1)
resp1_body = resp1.json()
for e1 in resp1_body['token']['catalog']:
if (e1['type'] == 'object-store'):
for e2 in e1['endpoints']:
if (e2['interface'] == 'public' and e2['region'] == credentials['region']):
url2 = ''.join([e2['url'], '/', container, '/', filename])
s_subject_token = resp1.headers['x-subject-token']
headers2 = {'X-Auth-Token': s_subject_token, 'accept': 'application/json'}
resp2 = requests.get(url=url2, headers=headers2, stream=True)
return resp2.raw
class DataBuilder(object):
"""
Builds the Spark datasets to hold the application data.
Used when the data are created programmatically.
"""
def __init__(self, applicationDataModel, collector=None):
"""
Creates a builder for loading the Spark datasets.
:param applicationDataModel
:param collector: if present, loads the data tables and their sizes directly into the collector;
if not present or null, the Spark data dict is returned directly
:return: a new DataBuilder instance
:raise ValueError: if the application data model has not been defined
"""
if not applicationDataModel: # is empty
raise ValueError("application data model has not been defined")
self.applicationDataModel = applicationDataModel
self.collector = collector
self.result = {}
self.length = {}
def addTable(self, tableName, data, size=None):
"""
Get the external data and create the corresponding application dataset.
Assumes that the schema of this table is already present in the ADM.
:param data: a Spark dataset
:param size: length number of rows in table (null if omitted)
:return this builder instance
:raise ValueError: if the table is not included in the ADM or if the table has already been loaded
"""
if tableName not in self.applicationDataModel:
raise ValueError("table " + tableName + " has not been defined")
if tableName in self.result:
raise ValueError("table " + tableName + " has already been loaded")
self.result[tableName] = data
self.length[tableName] = size
return self
def copyTable(self, tableName, data, size=None):
return self.addTable(tableName,
SPARK_SESSION.createDataFrame(data.rdd()), size);
def addEmptyTable(self, tableName):
return self.addTable(tableName,
SPARK_SESSION.createDataFrame(SPARK_CONTEXT.emptyRDD(),
self.applicationDataModel[tableName]), 0)
#NEW
def referenceTable(self, tableName):
"""
Enables referring to a table in the collector under construction to create a new table.
Can be used in SQL statements.
:param tableName:
:type tableName: String
:return:
:rtype:
"""
if tableName not in self.result:
raise ValueError(tableName + " does not exist")
return self.result.get(tableName)
def build(self):
"""
Completes building the Spark data.
Registers the application data sets as Spark SQL tables.
If an OPLCollector has been supplied in the constructor, loads the data tables and their sizes into it.
:return a dict of table names to Spark data sets containing the application data
:raise ValueError: if a table in the ADM has no associated data or if data tables have already been loaded into the collector
"""
for tableName in self.applicationDataModel:
if tableName not in self.result:
raise ValueError("table " + tableName + "has no data")
for tableName in self.result:
self.result[tableName].createOrReplaceTempView(tableName)
if self.collector is not None:
if self.collector.sparkData: # is not empty
raise ValueError("data tables have already been loaded")
self.collector.sparkData = self.result
self.collector.size = self.length
return self.result
def retrieveSize(self):
"""
:return the size dict created by this builder
Note: calling this method before the build method could return an inaccurate result
"""
return self.length
# end class DataBuilder
class ADMBuilder(object):
"""
Builds an Application Data Model that associates a set of Spark Datasets with their schemas.
Usage:
adm= ADMBuilder()\
.addSchema("warehouse", buildSchema(
("location", StringType()),
("capacity", DoubleType()))\
.addSchema("route", buildSchema(
("from", StringType()),
("to", StringType()),
("capacity", DoubleType()))\
.build();
"""
def __init__(self, collector=None):
"""
Creates a new builder.
:param collector if present, loads the application data model directly into the collector;
if not present or null, the ADM map is returned directly
"""
self.collector = collector
self.result = {}
def addSchema(self, tableName, tupleSchema):
"""
Adds a new table schema to the ADM.
:param tupleSchema can be built with the buildSchema function
:return this builder
:raise ValueError: if a schema for tableName has already been defined
"""
if tableName in self.result:
raise ValueError("tuple schema " + tableName + " has already been defined")
self.result[tableName] = tupleSchema
return self
#NEW
def referenceSchema(self, tableName):
"""
Enables referring to a schema in the ADM under construction to create a new schema.
:param tableName
:return the schema
"""
if tableName not in self.result:
raise ValueError("tuple schema " + tableName + " does not exist")
return self.result[tableName]
def build(self):
"""
Completes building the application data model.
If an OPLCollector has been supplied in the constructor, loads the ADM into it.
:return the ADM
:raise ValueError: if the ADM for the collector has already been defined
"""
if self.collector is not None:
if self.collector.applicationDataModel: # is not empty
raise ValueError("application data model has already been defined")
self.collector.applicationDataModel = self.result
return self.result
# end class ADMBuilder
def buildSchema(*fields):
"""
Creates a schema from a list field tuples
The resulting schema is an instance of a Spark StructType.
:param fields:
:type fields: tuple<String, DataType>
:return:
:rtype: StructType
"""
schema = StructType()
for fieldName, fieldType in fields:
schema = schema.add(fieldName, fieldType, False, None)
return schema
# end buildSchema
#NEW
class SchemaBuilder:
"""
Builds a tuple schema.
Strictly speaking, this builder is not needed since the StructType class provides the necessary functionality.
However, it is provided as a convenience.
Only the following data types are supported in the schema: String, integer, float (represented as Double), and 1-dimensional arrays of integer or float.
The array types are supported only for internal use and cannot be serialized to or deserialized from JSON.
Note, the fields in the resulting schema are sorted in dictionary order by name to insure correct matching with data elements.
Usage:
StructType warehouseSchema= (new OPLTuple.SchemaBuilder()).addField("location", DataTypes.StringType).addField("capacity", DataTypes.DoubleType).buildSchema();
The fields of the resulting StructType are not nullable and have no metadata.
"""
def __init__(self):
# fields is a dictionary<String, StructField>
self.fields= {}
def addField(self, fieldName, fieldType):
if self.fields.has_key(fieldName):
raise ValueError("field " + fieldName + " has already been set")
self.fields[fieldName]= StructField(fieldName, fieldType, False)
return self;
def addFields(self, *fields):
"""
Adds fields from a list field tuples
:param fields: tuple<String, DataType>
:return: StructType
"""
for field in fields:
fieldName, fieldType= field
self.addField(fieldName, fieldType)
return self
def copyField(self, otherSchema, fieldName):
"""
Copies fields from another schema
:param otherSchema: StructType
:param fieldName: String
:return: StructType
"""
if self.fields.has_key(fieldName):
raise ValueError("field " + fieldName + " has already been set")
self.fields[fieldName]= otherSchema[fieldName]
return self
def copyFields(self, otherSchema):
for fieldName in otherSchema.names:
self.copyField(otherSchema, fieldName)
return self
def buildSchema(self):
return StructType(self.fields.values())
# end class SchemaBuilder
```
|
{
"source": "jeremybmerrill/bigappleserialbus",
"score": 3
}
|
#### File: bigappleserialbus/bigappleserialbus/bus.py
```python
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__license__ = 'Apache'
__version__ = '0.1'
from datetime import datetime, timedelta
# import time
from trajectory import Trajectory
from itertools import tee, izip
from collections import OrderedDict
from sqlalchemy import Column, ForeignKey, Integer, String, Text
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import orm
# from pylab import plot,show
from numpy import vstack,array
from numpy.random import rand
from scipy.cluster.vq import kmeans,vq
import kmodes
from sklearn.neighbors import NearestNeighbors
import logging #magically the same as the one in bigappleserialbus.py
default_bus_speed = 4 # m/s ~= 8 miles per hour
#sometimes the bus, at the terminal where it starts, reports itself as, e.g. 0.2 meters along the route.
#this is used to decide that, yes, it's still at the start of the route.
max_gps_error = 20 #meters
# any segment longer than this disqualifies the trajectory, since something went wonky here
MAX_SEGMENT_TIME = 300
MIN_SEGMENT_TIME = 20
class Bus:
def __init__(self, number, journey, route_name, end_stop_id, session):
self.number = number
self.time_location_pairs = []
self.stop_time_pairs = OrderedDict() #store time along the route
self.start_time = None
self.stops = []
self.stop_distances = {}
self.previous_bus_positions = []
self.db_session = session
self.route_name = route_name
self.end_stop_id = end_stop_id
self.red_light_time = None
self.green_light_time = None
self.seconds_away = None
self.error = None
self.first_projected_arrival = datetime.min
self.first_projected_arrival_speeds = 0
self.set_trajectory_points(journey)
def __repr__(self):
seconds_away_str = ''
if self.seconds_away :
seconds_away_str = " %(sec)i s/a" % { 'sec': self.seconds_away }
if self.first_projected_arrival and self.seconds_away:
seconds_away_str += ", FP: %(fp)s" % {'fp': str(self.first_projected_arrival)[11:19]}
return "<Bus #%(number)s%(full_data)s %(route)s/%(stop)s%(sec)s>" % {
'number': self.number,
'full_data': '' if self.has_full_data else '*',
'route': self.route_name,
'stop': self.end_stop_id,
'sec': seconds_away_str
}
def add_observed_position(self, journey, recorded_at_str):
"""tk"""
bus_position = {
'recorded_at': datetime.strptime(recorded_at_str[:19], "%Y-%m-%dT%H:%M:%S"), #recorded_at
'next_stop': journey["OnwardCalls"]["OnwardCall"][0]["StopPointRef"], #next_stop_ref
'next_stop_name': journey["OnwardCalls"]["OnwardCall"][0]["StopPointName"],
'distance_along_route': journey["MonitoredCall"]["Extensions"]["Distances"]["CallDistanceAlongRoute"] - journey["MonitoredCall"]["Extensions"]["Distances"]["DistanceFromCall"],
'distance_to_end': journey["MonitoredCall"]["Extensions"]["Distances"]["DistanceFromCall"], #distance_from_call
'distance_to_next_stop': journey["OnwardCalls"]["OnwardCall"][0]["Extensions"]["Distances"]["DistanceFromCall"],
'is_at_stop': journey["OnwardCalls"]["OnwardCall"][0]["Extensions"]["Distances"]["PresentableDistance"] == "at stop",
}
self._add_observed_position(bus_position)
def _add_observed_position(self, bus_position):
"""From a bus_position, object, update the bus's internal representation of its location and previous trajectory."""
bus_position['is_underway'] = bus_position['next_stop'] in self.stops and self.stops.index(bus_position['next_stop']) >= 0
#legacy crap, for speed stuff
if not (self.time_location_pairs and self.time_location_pairs[0][0] == bus_position['recorded_at']):
self.time_location_pairs.insert(0, [bus_position['recorded_at'], bus_position['distance_to_end']])
if not bus_position['is_underway']:
return;
if not self.previous_bus_positions:
self.previous_bus_positions.append(bus_position)
return
previous_bus_position = self.previous_bus_positions[-1]
self.previous_bus_positions.append(bus_position)
# if this bus_position hasn't been updated since the last check, skip it.
if previous_bus_position['recorded_at'] == bus_position['recorded_at']:
return
# if the bus hasn't moved (i.e. the current next stop has already been visited)
if self.stop_time_pairs and self.stop_time_pairs[bus_position['next_stop']]:
return
# as soon as the bus starts moving away from its start point.
# (Don't count as its start_time time it spends going the opposite direction)
if not self.start_time and bus_position['is_underway']:
self.start_time = bus_position['recorded_at']
#if we've passed the next stop (i.e. the first key with None as its value), interpolate its value
#TODO: test this real good.
for missed_stop in self.stops[:self.stops.index(bus_position['next_stop'])]:
if self.stop_time_pairs[missed_stop] is None:
distance_traveled = previous_bus_position['distance_to_end'] - bus_position['distance_to_end']
time_elapsed = bus_position['recorded_at'] - previous_bus_position['recorded_at']
assert time_elapsed.seconds > 0
print("%(bus_name)s add_observed_position interpolated; next stop: %(stop_ref)s, so prev_stop: %(missed)s @ %(missed_dist)s away" %
{'bus_name': self.number, 'stop_ref': bus_position['next_stop'], 'missed': missed_stop, 'missed_dist': self.stop_distances[self.stops[-1]] - self.stop_distances[missed_stop]})
# print("distance: prev: %(prev_loc)fm, this: %(this_loc)fm; prev_dist: %(prev_dist)f; curtime: %(currec)s, prev: %(prevrec)s" %
# {'prev_loc': previous_bus_position['distance_to_end'], 'this_loc': bus_position['distance_to_end'],
# 'prev_dist': previous_bus_position['distance_to_next_stop'], 'prevrec':previous_bus_position['recorded_at'], 'currec': bus_position['recorded_at']})
# explanation of what's going on here
#
# bust_pos-----S------S-----bus_pos
# 0sec 100 sec
# 0m 150m 320m 600m
# assume a constant speed
# 100 sec here is time_elapsed.seconds
# 600m is distance_traveled
# 150m is (for first stop) self.stop_distances[missed_stop] - previous_bus_position['distance_along_route']
distance_to_missed_stop = int(self.stop_distances[missed_stop] - previous_bus_position['distance_along_route'])
if distance_to_missed_stop < 0:
print(self.number, missed_stop, bus_position['next_stop'], self.stop_distances[missed_stop], previous_bus_position['distance_along_route'])
assert(distance_to_missed_stop >= 0)
time_to_missed_stop = int(time_elapsed.seconds * (float(distance_to_missed_stop) / distance_traveled) )
if not time_to_missed_stop >= 0:
logging.debug("time_to_missed_stop < 0: " + str(time_to_missed_stop) + " (" + str(time_elapsed.seconds) + " * " + str(distance_to_missed_stop) + " / " + str(distance_traveled) + ")")
assert(time_to_missed_stop >= 0)
print("prev/curr dist: %(prev_dist)f/%(curr_dist)f, time elapsed: %(time_elapsed)i, time to stop: %(time_to)i" %
{'prev_dist': previous_bus_position['distance_to_end'], 'curr_dist': bus_position['distance_to_end'],
'time_elapsed': time_elapsed.seconds, 'time_to': time_to_missed_stop})
interpolated_prev_stop_arrival_time = timedelta(seconds=time_to_missed_stop) + previous_bus_position['recorded_at']
self.stop_time_pairs[missed_stop] = interpolated_prev_stop_arrival_time
#if we're at a stop, add it to the stop_time_pairs
# (being at_stop and needing to interpolate the previous stop are not mutually exclusive.)
if self.stops.index(bus_position['next_stop']) > 0 and bus_position['is_at_stop']:
self.stop_time_pairs[bus_position['next_stop']] = bus_position['recorded_at']
# print("%(bus_name)s add_observed_position at stop" % {'bus_name': self.number})
# Buses often lay over at the first stop, so we record the *last* time it as at the stop.
first_stop = self.stops[0]
if self.stops.index(bus_position['next_stop']) == 1 and self.stop_time_pairs[first_stop] is None:
self.stop_time_pairs[first_stop] = previous_bus_position['recorded_at']
# print("%(bus_name)s add_observed_position at stop 1" % {'bus_name': self.number})
print(self.number + str(self.stop_time_pairs))
print(self.number + " stop_time_pairs at " + str(bus_position['next_stop']) + " set to " + str(self.stop_time_pairs[bus_position['next_stop']]))
# print the progress so far.
# print(self.number + ": ")
# print([(stop_ref, self.stop_time_pairs[stop_ref].strftime("%H:%M:%S")) if self.stop_time_pairs[stop_ref] else (stop_ref,) for stop_ref in self.stops ])
# print('')
def fill_in_last_stop(self, recorded_at_str):
"""Fill in the last element in the stop_time_pairs.
If the bus doesn't stop at the last stop (i.e. the one the user selected as their "home" stop),
(or if the bus stopped, but not when we checked for it), the bus will be ignored and add_observed_position
won't be called, and then the final member of stop_time_pairs won't get set. Then we won't be able to
save the bus as a trajectory. This method fixes the last element in this circumstance.
We don't have a "journey" in that case.
"""
# if a bus stops appearing the API responses, but never got any values filled in
# (e.g. because it ran a route in the other direction than what we're following, then left service)
# don't try to "interpolate" its entire trajectory
if(False not in [i == None for i in self.stop_time_pairs.values()]):
print(self.number + " didn't fill in last stop")
return
print(self.number + " filling in last stop")
bus_position = {
'recorded_at': datetime.strptime(recorded_at_str[:19], "%Y-%m-%dT%H:%M:%S"), #recorded_at
'next_stop': self.stops[-1],
'distance_to_end': 0.0,
'distance_along_route': self.stop_distances[self.stops[-1]],
'distance_to_next_stop': 0.0,
'is_at_stop': True,
}
self._add_observed_position(bus_position)
# # if the only None in stop_time_pairs is at the end (for the last stop)
# ordered_times = [self.stop_time_pairs[stop] for stop in self.stops]
# if None in ordered_times and ordered_times.index(None) == (len(self.stop_time_pairs)-1):
# #if we've passed the final stop stop, fill in its value with now
# self.stop_time_pairs[self.stops[-1]] = recorded_at
# this just fills in the keys to self.stop_time_pairs and members of self.stops
# called only on init.
def set_trajectory_points(self, journey):
starting_distance_along_route = journey["OnwardCalls"]["OnwardCall"][0]["Extensions"]["Distances"]["CallDistanceAlongRoute"]
if starting_distance_along_route < max_gps_error:
# print("%(bus_name)s at start: (%(dist)f m away)" % {'bus_name': self.number, 'dist': starting_distance_along_route} )
self.has_full_data = True
else:
print("%(bus_name)s added mid-route: (%(dist)f m along route)" % {'bus_name': self.number, 'dist': starting_distance_along_route} )
self.has_full_data = False
for index, onward_call in enumerate(journey["OnwardCalls"]["OnwardCall"]):
stop_ref = onward_call["StopPointRef"]
distance_along_route = onward_call["Extensions"]["Distances"]["CallDistanceAlongRoute"]
if stop_ref not in self.stops:
# i = stop_ref #IntermediateStop(self.route_name, stop_ref, onward_call["StopPointName"])
self.stops.append(stop_ref)
self.stop_distances[stop_ref] = distance_along_route
self.stop_time_pairs[stop_ref] = None
assert index == 0 or distance_along_route >= self.stop_distances[self.stops[index-1]] #distances should increase, ensuring the stops are in order
if index == 0:
self.stop_time_pairs[stop_ref] = self.start_time
if stop_ref == journey["MonitoredCall"]["StopPointRef"]:
break
# called when we're done with the bus (i.e. it's passed the stop we're interested in)
def convert_to_trajectory(self, route_name, stop_id):
# print("%(bus_name)s converting to trajectory" % {'bus_name': self.number})
segment_intervals = self.segment_intervals()
if None in segment_intervals: # not ready to be converted to trajectory; because a stop doesn't have time data.
print("%(bus_name)s trajectory conversion failed 1: %(segs)s " %{'bus_name': self.number, 'segs': segment_intervals})
return None
if not self.has_full_data:
print("%(bus_name)s trajectory conversion failed 2" % {'bus_name': self.number})
return None
# print("%(bus_name)s converted to trajectory with segment_intervals: " % {'bus_name': self.number})
# print(segment_intervals)
traj = Trajectory(route_name, stop_id, self.start_time)
traj.set_segment_intervals(segment_intervals)
traj.green_light_time = self.green_light_time
traj.red_light_time = self.red_light_time
traj.error = self.error
return traj
def segment_intervals(self):
if not self.stop_time_pairs:
return None
times = []
segment_intervals = []
for stop in self.stops:
times.append(self.stop_time_pairs[stop])
for time1, time2 in pairwise(times):
if time1 is not None and time2 is not None:
segment_intervals.append((time2 - time1).seconds)
else:
segment_intervals.append(None)
return segment_intervals
def find_similar_trajectories(self):
trajs = self.db_session.query(Trajectory.start_time, Trajectory.segment0,Trajectory.segment1,Trajectory.segment2,Trajectory.segment3,Trajectory.segment4,
Trajectory.segment5,Trajectory.segment6,Trajectory.segment7,Trajectory.segment8,Trajectory.segment9,Trajectory.segment10,
Trajectory.segment11,Trajectory.segment12,Trajectory.segment13,Trajectory.segment14,Trajectory.segment15,
Trajectory.segment16,Trajectory.segment17,Trajectory.segment18,Trajectory.segment19,Trajectory.segment20,
Trajectory.segment21,Trajectory.segment22,Trajectory.segment23,Trajectory.segment24,Trajectory.segment25,
Trajectory.segment26,Trajectory.segment27,Trajectory.segment28,Trajectory.segment29,Trajectory.segment30,
Trajectory.segment31,Trajectory.segment32,Trajectory.segment33,Trajectory.segment34,Trajectory.segment35,
Trajectory.segment36,Trajectory.segment37,Trajectory.segment38,Trajectory.segment39).filter(Trajectory.route_name==self.route_name).filter(Trajectory.end_stop_id == self.end_stop_id)
trajs = [traj for traj in trajs if not any(map(lambda x: x != None and (x > MAX_SEGMENT_TIME or x < MIN_SEGMENT_TIME), traj[1:])) ]
# TODO: before filtering based on similarity by segments, filter by time.
similar_trajectories_by_time = self.filter_by_time(trajs)
similar_trajectories_by_time = [traj[1:] for traj in similar_trajectories_by_time] #remove the time item.
if not similar_trajectories_by_time:
return {'similar': [], 'seconds_away': -1}
# this "backup" method was in use until 1/15/15 (Next 19 lines)
# backoff: if there's tons of trajectories, make a maximum of N clusters (max_clusters)
# if N clusters would make "my" cluster contain M trajectories and M < minimum_similar_trajectories
# then try again with N_2 as N_1 / 2
#
# How did I compute max_clusters?
# count of time_periods * count of weather variables * weekday_types * 4
# time_periods = early-morning-rush, late-morning-rush, late-morning, early-afternoon, mid-afternoon, early-evening-rush, late-evening-rush, late-evening, overnight
# weather_variables: hot, cold, rainy, snowy
# weekday_types: weekday, weekend
# max_clusters = 288
# minimum_similar_trajectories = 5
# similar_trajectories = self.filter_by_segment_intervals(similar_trajectories_by_time, max_clusters)
# clusters_cnt = max_clusters
# while clusters_cnt > 1 and len(similar_trajectories) < minimum_similar_trajectories and len(similar_trajectories) > 0:
# # logging.debug(' '.join(map(str, ["backing off, with cluster count", clusters_cnt, "too few similar trajectories", len(similar_trajectories), "from",len(similar_trajectories_by_time), "total"])))
# clusters_cnt = clusters_cnt / 2
# similar_trajectories = self.filter_by_segment_intervals(similar_trajectories_by_time, clusters_cnt)
similar_trajectories = self.filter_by_segment_intervals(similar_trajectories_by_time, 144)
if not similar_trajectories:
return {'similar': [], 'seconds_away': -1}
segment_intervals = self.segment_intervals()
last_defined_segment_index = segment_intervals.index(None) if None in segment_intervals else len(segment_intervals)
# average time-to-home-stop of the similar trajectories
remaining_times_on_similar_trajectories = [sum(traj[last_defined_segment_index:]) for traj in similar_trajectories]
# two methods of determining the remaining time from the similar trajectories
# average the remaining times
seconds_away = sum(remaining_times_on_similar_trajectories) / len(similar_trajectories)
# sum the medians for each remaining segment
# seconds_away = sum([median(list(x)) for x in zip(*[traj[last_defined_segment_index:] for traj in similar_trajectories])])
# similar_trajectories_time_elapsed = [sum(traj[:last_defined_segment_index]) for traj in similar_trajectories] #for sanity checking only
# logging.debug('remaining times ['+str(last_defined_segment_index)+' / '+str(seconds_away)+'] ('+self.previous_bus_positions[-1]["next_stop_name"]+'): ' + ', '.join([str(i) for i in remaining_times_on_similar_trajectories]))
# similar_trajectories_time_elapsed = [sum(traj[:last_defined_segment_index]) for traj in similar_trajectories] #for sanity checking only
# logging.debug('elapsed times ['+str(last_defined_segment_index)+' / '+str(seconds_away)+'] ('+self.previous_bus_positions[-1]["next_stop_name"]+'): ' + ', '.join([str(i) for i in similar_trajectories_time_elapsed]))
self.seconds_away = seconds_away
return {'similar': similar_trajectories, 'seconds_away': seconds_away}
def filter_by_time(self, trajs):
if self.start_time is None:
return trajs
def to_time_of_day(time):
if time.hour in [7,8,9]:
return 0
elif time.hour in [17,18,19]:
return 1
elif time.hour in [10,11,12,13,14,15,16]:
return 2
elif time.hour in [20,21,22,23,0,1,2,3,4,5,6]:
return 3
is_a_weekend = self.start_time.weekday() in [5,6]
by_day = filter(lambda traj: (traj[0].weekday() in [5,6]) == is_a_weekend , trajs)
if not is_a_weekend:
time_of_day = to_time_of_day(self.start_time)
by_time_of_day = filter(lambda traj: to_time_of_day(traj[0]) == time_of_day, by_day)
else:
by_time_of_day = by_day
return by_time_of_day
def filter_by_segment_intervals(self, trajs, number_of_clusters):
truncate_trajs_to = trajs[0].index(None)
trajs = [traj[:truncate_trajs_to] for traj in trajs]
segment_intervals = self.segment_intervals()
if segment_intervals is None or all([seg is None for seg in segment_intervals]):
return []
#truncate to last defined point of this bus (i.e. where it is now) to find similar trajectories _so far_.
# print('%(bus_name)s segment_intervals: ' % {'bus_name': self.number} + ', '.join(map(str, segment_intervals)))
last_defined_segment_index = segment_intervals.index(None) if None in segment_intervals else len(segment_intervals)
truncated_trajectories_list = [traj[:last_defined_segment_index] for traj in trajs]
truncated_trajectories_list = map(preprocess_trajectory, truncated_trajectories_list)
truncated_trajectories = array(truncated_trajectories_list)
truncated_segment_intervals = preprocess_trajectory(segment_intervals[:last_defined_segment_index])
if False: # knearestneighbors is mostly untested
similar_trajectory_indexes = find_similar_by_kmeans(truncated_trajectories, truncated_segment_intervals, number_of_clusters)
else:
similar_trajectory_indexes = find_similar_by_k_nearest_neighbors(truncated_trajectories, truncated_segment_intervals)
similar_trajectories = [trajs[i] for i in similar_trajectory_indexes]
return similar_trajectories
#called when a bus's lights are turned off, when there's not time to make it to the bus
def too_late(self):
pass
#called when a bus's lights are turned red, when there's just enough time to make it to the bus
def imminent(self):
self.red_light_time = self.previous_bus_positions[-1]['recorded_at']
#called when a bus's lights are turned green, when it's time to get ready to go to the bus
def near(self):
self.green_light_time = self.previous_bus_positions[-1]['recorded_at']
#TODO: erase all of this below here (at this indent level)
def get_meters_away(self):
return self.time_location_pairs[0][1]
def get_seconds_away(self):
speed = self.get_speed_mps()
if speed == 0.0:
return 6000 # a big number of seconds
return self.get_meters_away() / speed
def get_minutes_away(self):
return timedelta(seconds=self.get_seconds_away())
def get_speed_mph(self):
return (self.get_speed_mps() * (60 * 60)) / 1609.34
def get_speed_mps(self):
#meters per second
# this is a rolling weighted average over the past distance_to_track time/position values
if len(self.time_location_pairs) < 2:
return default_bus_speed
centroid = 3.0
speed_sum = 0
weight_sum = 0
for i, (time, location) in enumerate(self.time_location_pairs):
if i == 0:
continue;
weight = centroid / (abs(i - centroid) if abs(i - centroid) > 0 else 0.5)
weight_sum += weight
speed_sum += self.naive_speed(0, i) * weight
meters_per_second = speed_sum / weight_sum
return meters_per_second
# def old_get_speed(self):
# if len(self.time_location_pairs) < 2:
# return default_bus_speed
# long_term = self.naive_speed(0, 9)
# medium_term = self.less_naive_speed(0, 4)
# mid_to_short_term = self.less_naive_speed(0, 2)
# short_term = self.less_naive_speed(0, 1) #ignore this, since it might be stuck at a light
# meters_per_second = ( (mid_to_short_term * 2) + (medium_term * 2) + long_term) / 5
# return meters_per_second
def naive_speed(self, start_index, end_index):
if end_index >= len(self.time_location_pairs):
end_index = -1
start = self.time_location_pairs[start_index]
end = self.time_location_pairs[end_index]
distance = float(abs(start[1] - end[1]))
time = abs(start[0] - end[0])
if time.seconds == 0:
return 0
return distance / float(time.seconds)
def less_naive_speed(self, start_index, end_index):
#naive speed, except don't count time the bus spends stopped
if end_index >= len(self.time_location_pairs):
end_index = -1
start = self.time_location_pairs[start_index]
end = self.time_location_pairs[end_index]
distance = float(abs(start[1] - end[1]))
raw_time = abs(start[0] - end[0])
for (a_time, a_dist), (b_time, b_dist) in pairwise(self.time_location_pairs):
if abs(a_dist - b_dist) < 20:
raw_time -= abs(a_time - b_time)
return distance / float(time.seconds)
def preprocess_trajectory(traj):
"""Transform/preprocess a trajectory somehow for use in the kmeans algo"""
new_traj = list(traj)
# old = list(traj[0:len(traj)/3])
# medium = list(traj[len(traj)/3:(2*len(traj))/3])
# new = list(traj[(2*len(traj))/3:])
# # # in this, case multiply the trailing intervals to make them have more bearing on the output of the kmeans
# # new_traj = old + medium * 2 + new * 3
# new_traj = [(o / 3.0) for o in old] + [(o / 2.0) for o in old] + new
new_traj = traj[-8:]
# try scaling down the first few
return new_traj
def find_similar_by_kmeans(truncated_trajectories, truncated_segment_intervals, number_of_clusters=144):
print("kmeansing")
centroids,_ = kmeans(truncated_trajectories, number_of_clusters)
print("vqing")
cluster_indices,_ = vq(truncated_trajectories,centroids)
print("vqing again")
my_cluster_indices, _ = vq(array([truncated_segment_intervals]), centroids)
my_cluster_index = my_cluster_indices[0]
print("done with ML")
logging.debug("clusters: [%(sizes)s]" %
{"sizes": ', '.join([str(cluster_indices.tolist().count(idx)) + ("*" if idx == my_cluster_index else "") for idx in set(sorted(cluster_indices))])})
similar_trajectory_indexes = [i for i in range(0, len(cluster_indices)) if cluster_indices[i] == my_cluster_index]
# #find the suspiciously-large cluster... that might be the problem
# large_cluster_indices = [idx for idx in set(sorted(cluster_indices)) if cluster_indices.tolist().count(idx) > 1000]
# for i, traj in enumerate(trajs):
# if cluster_indices[i] in large_cluster_indices:
# if rand() > 0.995: #5 in 1000
# logging.debug("large cluster member: " + str(traj))
return similar_trajectory_indexes
def find_similar_by_k_nearest_neighbors(truncated_trajectories, truncated_segment_intervals, k=None):
if not k:
#k = int(len(truncated_trajectories)**0.5)
k = 10
k = min(k, len(truncated_trajectories))
nbrs = NearestNeighbors(n_neighbors=k, algorithm='ball_tree').fit(truncated_trajectories)
distances, indices = nbrs.kneighbors(array(truncated_segment_intervals))
my_nearest_neighbors_indices = indices[0]
# indices is, for each point in the argument, a list of the index of its nearest neighbors
# in, presumably, what was sent to fit.
return my_nearest_neighbors_indices
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return izip(a, b)
def median(array):
array_len = len(array)
if not array_len:
return 0
if array_len % 2 == 0:
idx = array_len / 2
idx2 = idx-1
return (array[idx] + array[idx2]) /2
else:
idx = array_len / 2
return array[idx]
```
#### File: bigappleserialbus/bigappleserialbus/chart_trajectories.py
```python
import numpy as np
import matplotlib.pyplot as plt
import os
from trajectory import Trajectory, Base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sklearn.decomposition import PCA
from scipy.stats.stats import pearsonr
from sklearn.neighbors.kde import KernelDensity
sqlite_db_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../buses.db")
engine = create_engine('sqlite:///' + sqlite_db_path) #only creates the file if it doesn't exist already
Base.metadata.create_all(engine)
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
db_session = DBSession()
route_name = "b65"
end_stop_id = "MTA_308054"
traj_objects = db_session.query(Trajectory.start_time, Trajectory.segment0,Trajectory.segment1,Trajectory.segment2,Trajectory.segment3,Trajectory.segment4,
Trajectory.segment5,Trajectory.segment6,Trajectory.segment7,Trajectory.segment8,Trajectory.segment9,Trajectory.segment10,
Trajectory.segment11,Trajectory.segment12,Trajectory.segment13,Trajectory.segment14,Trajectory.segment15,
Trajectory.segment16,Trajectory.segment17,Trajectory.segment18,Trajectory.segment19,Trajectory.segment20,
Trajectory.segment21,Trajectory.segment22,Trajectory.segment23,Trajectory.segment24,Trajectory.segment25,
Trajectory.segment26,Trajectory.segment27,Trajectory.segment28,Trajectory.segment29,Trajectory.segment30,
Trajectory.segment31,Trajectory.segment32,Trajectory.segment33,Trajectory.segment34,Trajectory.segment35,
Trajectory.segment36,Trajectory.segment37,Trajectory.segment38,Trajectory.segment39).filter(Trajectory.route_name==route_name).filter(Trajectory.end_stop_id == end_stop_id)
end_index = traj_objects[0].index(None)
unfiltered_trajs_with_time = [traj[:end_index] for traj in traj_objects ] # remove start_time item and Nones
unfiltered_trajs = [traj[1:] for traj in unfiltered_trajs_with_time]
trajs_with_time = [traj for traj in unfiltered_trajs_with_time if not any(map(lambda x: x != None and (x > 300 or x < 20), traj[1:])) ]
trajs = [traj for traj in unfiltered_trajs if not any(map(lambda x: x != None and (x > 300 or x < 20), traj)) ]
start_times = [traj[0] for traj in traj_objects if not any(map(lambda x: x != None and (x > 300 or x < 20), traj[1:]))]
colors = []
def histogram_lengths():
trip_lengths = [sum(traj) for traj in trajs]
plt.hist(trip_lengths, bins=50)
plt.show()
def histogram_segments():
segments = [item for sublist in trajs for item in sublist]
plt.hist(segments, bins=50)
plt.show()
def rs_by_split_point():
for n in xrange(1,10):
split_point = n/10.0
x = [sum(traj[:int(len(traj) * split_point)]) for traj in trajs]
y = [sum(traj[int(len(traj) * split_point):]) for traj in trajs]
print(n, pearsonr(x,y)[0])
def rs_by_previous(n=5):
rs = []
for i in xrange(n, len(trajs[0])):
x = [sum(traj[i-n:i]) for traj in trajs]
y = [sum(traj[i:]) for traj in trajs] #dependent
rs.append(pearsonr(x,y)[0])
return rs
def rs_by_day_and_time():
# -0.135908180745 correlation between total route time (on b65, downtownbound) and being a weekend
# 0.0.20212506141277539 correlation between total route time (on b65, downtownbound) and being rush hour (7,8,9, 17,18,19) on a weekday
x = [int(start_time.weekday() in [5,6]) for start_time in start_times] #independent
y = [sum(traj) for traj in trajs] #dependent
#TODO: how much variation is there weekend to weekday?
print( "weekend/day", pearsonr(x,y)[0])
x = [int(start_time.hour in [7,8,9, 17,18,19] and start_time.weekday() not in [5,6]) for start_time in start_times] #independent. rush hour?
y = [sum(traj) for traj in trajs] #dependent
print( "rush hour (weekdays)", pearsonr(x,y)[0] )
def chart_by_day():
#
# On average, trips on the weekend take less time than trips on weekdays
# 1337 sec versus 1446 sec
#
weekend_times = [sum(traj[1:]) for traj in trajs_with_time if traj[0].weekday() in [5,6]]
weekday_times = [sum(traj[1:]) for traj in trajs_with_time if traj[0].weekday() not in [5,6]]
weekend = sum(weekend_times) / float(len(weekend_times))
weekday = sum(weekday_times) / float(len(weekday_times))
print("weekend: ", weekend, "weekday: ", weekday)
x = np.linspace(min(weekend_times + weekday_times), max(weekend_times + weekday_times), 100).reshape(-1, 1)
kde_weekend = KernelDensity(bandwidth=100).fit(np.array(weekend_times).reshape(-1, 1))
density_weekend = np.exp(kde_weekend.score_samples(x))
kde_weekday = KernelDensity(bandwidth=100).fit(np.array(weekday_times).reshape(-1, 1))
density_weekday = np.exp(kde_weekday.score_samples(x))
plt.plot(x, density_weekend, 'r')
plt.plot(x, density_weekday, 'b')
plt.xlabel("Time start to Grand Ave: red: weekend, blue, weekday")
plt.ylabel("Density")
plt.show()
def chart_by_time():
weekday_amrush = [sum(traj[1:]) for traj in trajs_with_time if traj[0].weekday() not in [5,6] and traj[0].hour in [7,8,9]]
weekday_pmrush = [sum(traj[1:]) for traj in trajs_with_time if traj[0].weekday() not in [5,6] and traj[0].hour in [17,18,19]]
weekday_midday = [sum(traj[1:]) for traj in trajs_with_time if traj[0].weekday() not in [5,6] and traj[0].hour in [10,11,12,13,14,15,16]]
weekday_night = [sum(traj[1:]) for traj in trajs_with_time if traj[0].weekday() not in [5,6] and traj[0].hour in [20,21,22,23,0,1,2,3,4,5,6]]
weekend = [sum(traj[1:]) for traj in trajs_with_time if traj[0].weekday() in [5,6]]
weekday_amrush_avg = sum(weekday_amrush) / float(len(weekday_amrush))
weekday_pmrush_avg = sum(weekday_pmrush) / float(len(weekday_pmrush))
weekday_midday_avg = sum(weekday_midday) / float(len(weekday_midday))
weekday_night_avg = sum(weekday_night) / float(len(weekday_night))
weekend_avg = sum(weekend) / float(len(weekend))
print("weekday_amrush_avg: ", weekday_amrush_avg,
"weekday_pmrush_avg: ", weekday_pmrush_avg,
"weekday_midday_avg: ", weekday_midday_avg,
"weekday_night_avg: ", weekday_night_avg,
"weekend_avg: ", weekend_avg)
x = np.linspace(min(weekday_amrush+weekday_pmrush+weekday_midday+weekday_night+weekend), max(weekday_amrush+weekday_pmrush+weekday_midday+weekday_night+weekend), 100).reshape(-1, 1)
kde_weekday_amrush = KernelDensity(bandwidth=70).fit(np.array(weekday_amrush).reshape(-1, 1))
density_weekday_amrush = np.exp(kde_weekday_amrush.score_samples(x))
kde_weekday_pmrush = KernelDensity(bandwidth=70).fit(np.array(weekday_pmrush).reshape(-1, 1))
density_weekday_pmrush = np.exp(kde_weekday_pmrush.score_samples(x))
kde_weekday_midday = KernelDensity(bandwidth=70).fit(np.array(weekday_midday).reshape(-1, 1))
density_weekday_midday = np.exp(kde_weekday_midday.score_samples(x))
kde_weekday_night = KernelDensity(bandwidth=70).fit(np.array(weekday_night).reshape(-1, 1))
density_weekday_night = np.exp(kde_weekday_night.score_samples(x))
kde_weekend = KernelDensity(bandwidth=70).fit(np.array(weekend).reshape(-1, 1))
density_weekend = np.exp(kde_weekend.score_samples(x))
plt.plot(x, density_weekday_amrush, 'r')
plt.plot(x, density_weekday_pmrush, 'y')
plt.plot(x, density_weekday_midday, 'g')
plt.plot(x, density_weekday_night, 'b')
plt.plot(x, density_weekend, 'm')
plt.xlabel("Time start to endpoint")
plt.ylabel("Density")
plt.show()
def scatter_halves():
split_point = 8/10.0
x = [sum(traj[:int(len(traj) * split_point)]) for traj in trajs]
y = [sum(traj[int(len(traj) * split_point):]) for traj in trajs]
# colors = np.random.rand(N)
# area = np.pi * (15 * np.random.rand(N))**2 # 0 to 15 point radiuses
plt.scatter(x,
y,
# s=area,
# c=colors,
alpha=0.5)
plt.show()
def do_pca():
pca = PCA(n_components=2)
pca.fit(np.array(trajs))
reduced_trajs = pca.transform(trajs)
print(reduced_trajs)
reduced_trajs = reduced_trajs.T.tolist()
plt.scatter(reduced_trajs[0], reduced_trajs[1], alpha=0.5)
plt.show()
def per_segment_length():
avg_segment_times = [sum(segment_vals)/float(len(segment_vals)) for segment_vals in np.array(trajs).T]
plt.scatter(list(xrange(0, len(avg_segment_times))), avg_segment_times)
plt.plot(list(xrange(0, len(avg_segment_times))), avg_segment_times, 'g')
too_long_segments = [sum([1 for n in segment_vals if n > 300]) for segment_vals in np.array(raw_trajs).T]
print(too_long_segments)
plt.scatter(list(xrange(0, len(too_long_segments))), too_long_segments)
plt.plot(list(xrange(0, len(too_long_segments))), too_long_segments, 'r')
too_short_segments = [sum([1 for n in segment_vals if n < 20]) for segment_vals in np.array(raw_trajs).T]
print(too_short_segments)
plt.scatter(list(xrange(0, len(too_short_segments))), too_short_segments)
plt.plot(list(xrange(0, len(too_short_segments))), too_short_segments, 'b')
plt.show()
# print(rs_by_previous())
# per_segment_length()
# rs_by_day_and_time()
chart_by_time()
# for i in xrange(3,9):
# rs = rs_by_previous(i)
# print(i, sum(rs)/len(rs))
# # histogram_lengths()
#TODO: histogram segment times per segment (i.e. Classon to Grand, Franklin to Classon, etc.)
```
#### File: bigappleserialbus/bigappleserialbus/kmodes.py
```python
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__license__ = 'MIT'
__version__ = '0.8'
import random
import numpy as np
from collections import defaultdict
class KModes(object):
def __init__(self, k):
"""k-modes clustering algorithm for categorical data.
See:
Huang, Z.: Extensions to the k-modes algorithm for clustering large data sets with
categorical values, Data Mining and Knowledge Discovery 2(3), 1998.
Inputs: k = number of clusters
Attributes: clusters = cluster numbers [no. points]
centroids = centroids [k * no. attributes]
membership = membership matrix [k * no. points]
cost = clustering cost, defined as the sum distance of
all points to their respective clusters
"""
assert k > 1, "Choose at least 2 clusters."
self.k = k
# generalized form with alpha. alpha > 1 for fuzzy k-modes
self.alpha = 1
# init some variables
self.membership = self.clusters = self.centroids = self.cost = None
def cluster(self, x, pre_runs=10, pre_pctl=20, *args, **kwargs):
"""Shell around _perform_clustering method that tries to ensure a good clustering
result by choosing one that has a relatively low clustering cost compared to the
costs of a number of pre-runs. (Huang [1998] states that clustering cost can be
used to judge the clustering quality.)
"""
if pre_runs and 'init_method' in kwargs and kwargs['init_method'] == 'Cao':
print("Initialization method and algorithm are deterministic. Disabling preruns...")
pre_runs = None
if pre_runs:
precosts = np.empty(pre_runs)
for pr in range(pre_runs):
self._perform_clustering(x, *args, verbose=0, **kwargs)
precosts[pr] = self.cost
print("Prerun {0} / {1}, Cost = {2}".format(pr+1, pre_runs, precosts[pr]))
goodcost = np.percentile(precosts, pre_pctl)
else:
goodcost = np.inf
while True:
self._perform_clustering(x, *args, verbose=1, **kwargs)
if self.cost <= goodcost:
break
def _perform_clustering(self, x, init_method='Huang', max_iters=100, verbose=1):
"""Inputs: x = data points [no. points * no. attributes]
init_method = initialization method ('Huang' for the one described in
Huang [1998], 'Cao' for the one in Cao et al. [2009])
max_iters = maximum no. of iterations
verbose = 0 for no and 1 for normal algorithm progress information,
2 for internal algorithm details
"""
# convert to numpy array, if needed
x = np.asanyarray(x)
npoints, nattrs = x.shape
assert self.k < npoints, "More clusters than data points?"
self.initMethod = init_method
# ----------------------
# INIT
# ----------------------
if verbose:
print("Init: initializing centroids")
self.init_centroids(x)
if verbose:
print("Init: initializing clusters")
self.membership = np.zeros((self.k, npoints), dtype='int64')
# self._clustAttrFreq is a list of lists with dictionaries that contain the
# frequencies of values per cluster and attribute
self._clustAttrFreq = [[defaultdict(int) for _ in range(nattrs)] for _ in range(self.k)]
for ipoint, curpoint in enumerate(x):
# initial assigns to clusters
cluster = np.argmin(self.get_dissim(self.centroids, curpoint))
self.membership[cluster, ipoint] = 1
# count attribute values per cluster
for iattr, curattr in enumerate(curpoint):
self._clustAttrFreq[cluster][iattr][curattr] += 1
# perform an initial centroid update
for ik in range(self.k):
for iattr in range(nattrs):
self.centroids[ik, iattr] = self.get_mode(self._clustAttrFreq[ik][iattr])
# ----------------------
# ITERATION
# ----------------------
if verbose:
print("Starting iterations...")
itr = 0
converged = False
while itr <= max_iters and not converged:
itr += 1
moves = 0
for ipoint, curpoint in enumerate(x):
cluster = np.argmin(self.get_dissim(self.centroids, curpoint))
# if necessary: move point, and update old/new cluster frequencies and centroids
if not self.membership[cluster, ipoint]:
moves += 1
oldcluster = np.argwhere(self.membership[:, ipoint])[0][0]
self._add_point_to_cluster(curpoint, ipoint, cluster)
self._remove_point_from_cluster(curpoint, ipoint, oldcluster)
# update new and old centroids by choosing most likely attribute
for iattr, curattr in enumerate(curpoint):
for curc in (cluster, oldcluster):
self.centroids[curc, iattr] = self.get_mode(
self._clustAttrFreq[curc][iattr])
if verbose == 2:
print("Move from cluster {0} to {1}".format(oldcluster, cluster))
# in case of an empty cluster, reinitialize with a random point
# that is not a centroid
if sum(self.membership[oldcluster, :]) == 0:
while True:
rindx = np.random.randint(npoints)
if not np.all(x[rindx] == self.centroids).any():
break
self._add_point_to_cluster(x[rindx], rindx, oldcluster)
fromcluster = np.argwhere(self.membership[:, rindx])[0][0]
self._remove_point_from_cluster(x[rindx], rindx, fromcluster)
# all points seen in this iteration
converged = (moves == 0)
if verbose:
print("Iteration: {0}/{1}, moves: {2}".format(itr, max_iters, moves))
self.calculate_clustering_cost(x)
self.clusters = np.array([np.argwhere(self.membership[:, pt])[0] for pt in range(npoints)])
def init_centroids(self, x):
assert self.initMethod in ('Huang', 'Cao')
npoints, nattrs = x.shape
self.centroids = np.empty((self.k, nattrs))
if self.initMethod == 'Huang':
# determine frequencies of attributes
for iattr in range(nattrs):
freq = defaultdict(int)
for curattr in x[:, iattr]:
freq[curattr] += 1
# sample centroids using the probabilities of attributes
# (I assume that's what's meant in the Huang [1998] paper; it works, at least)
# note: sampling using population in static list with as many choices as
# frequency counts this works well since (1) we re-use the list k times here,
# and (2) the counts are small integers so memory consumption is low
choices = [chc for chc, wght in freq.items() for _ in range(wght)]
for ik in range(self.k):
self.centroids[ik, iattr] = random.choice(choices)
# the previously chosen centroids could result in empty clusters,
# so set centroid to closest point in x
for ik in range(self.k):
ndx = np.argsort(self.get_dissim(x, self.centroids[ik]))
# and we want the centroid to be unique
while np.all(x[ndx[0]] == self.centroids, axis=1).any():
ndx = np.delete(ndx, 0)
self.centroids[ik] = x[ndx[0]]
elif self.initMethod == 'Cao':
# Note: O(N * at * k**2), so watch out with k
# determine densities points
dens = np.zeros(npoints)
for iattr in range(nattrs):
freq = defaultdict(int)
for val in x[:, iattr]:
freq[val] += 1
for ipoint in range(npoints):
dens[ipoint] += freq[x[ipoint, iattr]] / float(nattrs)
dens /= npoints
# choose centroids based on distance and density
self.centroids[0] = x[np.argmax(dens)]
dissim = self.get_dissim(x, self.centroids[0])
self.centroids[1] = x[np.argmax(dissim * dens)]
# for the reamining centroids, choose max dens * dissim to the (already assigned)
# centroid with the lowest dens * dissim
for ik in range(2, self.k):
dd = np.empty((ik, npoints))
for ikk in range(ik):
dd[ikk] = self.get_dissim(x, self.centroids[ikk]) * dens
self.centroids[ik] = x[np.argmax(np.min(dd, axis=0))]
return
def _add_point_to_cluster(self, point, ipoint, cluster):
self.membership[cluster, ipoint] = 1
# update frequencies of attributes in cluster
for iattr, curattr in enumerate(point):
self._clustAttrFreq[cluster][iattr][curattr] += 1
return
def _remove_point_from_cluster(self, point, ipoint, cluster):
self.membership[cluster, ipoint] = 0
# update frequencies of attributes in cluster
for iattr, curattr in enumerate(point):
self._clustAttrFreq[cluster][iattr][curattr] -= 1
return
@staticmethod
def get_dissim(a, b):
# simple matching dissimilarity
return (a != b).sum(axis=1)
@staticmethod
def get_mode(dic):
# Fast method (supposedly) to get key for maximum value in dict.
v = list(dic.values())
k = list(dic.keys())
if len(v) == 0:
pass
return k[v.index(max(v))]
def calculate_clustering_cost(self, x):
self.cost = 0
for ipoint, curpoint in enumerate(x):
self.cost += np.sum(self.get_dissim(self.centroids, curpoint) *
(self.membership[:, ipoint] ** self.alpha))
return
class KPrototypes(KModes):
def __init__(self, k):
"""k-protoypes clustering algorithm for mixed numeric and categorical data.
<NAME>.: Clustering large data sets with mixed numeric and categorical values,
Proceedings of the First Pacific Asia Knowledge Discovery and Data Mining Conference,
Singapore, pp. 21-34, 1997.
Inputs: k = number of clusters
Attributes: clusters = cluster numbers [no. points]
centroids = centroids, two lists (num. and cat.) with [k * no. attributes]
membership = membership matrix [k * no. points]
cost = clustering cost, defined as the sum distance of
all points to their respective clusters
gamma = weighing factor that determines relative importance of
num./cat. attributes (see discussion in Huang [1997])
"""
super(KPrototypes, self).__init__(k)
self.gamma = None
def _perform_clustering(self, x, gamma=None, init_method='Huang', max_iters=100, verbose=1):
"""Inputs: xnum = numeric data points [no. points * no. numeric attributes]
xcat = categorical data points [no. points * no. numeric attributes]
gamma = weighing factor that determines relative importance of
num./cat. attributes (see discussion in Huang [1997])
initMethod = initialization method ('Huang' for the one described in
Huang [1998], 'Cao' for the one in Cao et al. [2009])
max_iters = maximum no. of iterations
verbose = 0 for no and 1 for normal algorithm progress information,
2 for internal algorithm details
"""
# convert to numpy arrays, if needed
xnum, xcat = x[0], x[1]
xnum = np.asanyarray(xnum)
xcat = np.asanyarray(xcat)
nnumpoints, nnumattrs = xnum.shape
ncatpoints, ncatattrs = xcat.shape
assert nnumpoints == ncatpoints, "More numerical points than categorical?"
npoints = nnumpoints
assert self.k < npoints, "More clusters than data points?"
self.initMethod = init_method
# estimate a good value for gamma, which determines the weighing of
# categorical values in clusters (see Huang [1997])
if gamma is None:
gamma = 0.5 * np.std(xnum)
self.gamma = gamma
# ----------------------
# INIT
# ----------------------
if verbose:
print("Init: initializing centroids")
# list where [0] = numerical part of centroid and [1] = categorical part
self.init_centroids(xcat)
self.centroids = [np.mean(xnum, axis=0) + np.random.randn(self.k, nnumattrs) *
np.std(xnum, axis=0), self.centroids]
if verbose:
print("Init: initializing clusters")
self.membership = np.zeros((self.k, npoints), dtype='int64')
# keep track of the sum of attribute values per cluster
self._clustAttrSum = np.zeros((self.k, nnumattrs), dtype='float')
# self._clustAttrFreq is a list of lists with dictionaries that contain
# the frequencies of values per cluster and attribute
self._clustAttrFreq = [[defaultdict(int) for _ in range(ncatattrs)] for _ in range(self.k)]
for ipoint in range(npoints):
# initial assigns to clusters
cluster = np.argmin(self.get_dissim_num(self.centroids[0], xnum[ipoint]) +
self.gamma * self.get_dissim(self.centroids[1], xcat[ipoint]))
self.membership[cluster, ipoint] = 1
# count attribute values per cluster
for iattr, curattr in enumerate(xnum[ipoint]):
self._clustAttrSum[cluster, iattr] += curattr
for iattr, curattr in enumerate(xcat[ipoint]):
self._clustAttrFreq[cluster][iattr][curattr] += 1
for ik in range(self.k):
# in case of an empty cluster, reinitialize with a random point
# that is not a centroid
if sum(self.membership[ik, :]) == 0:
while True:
rindex = np.random.randint(npoints)
if not np.all(np.vstack((np.all(xnum[rindex] == self.centroids[0], axis=1),
np.all(xcat[rindex] == self.centroids[1], axis=1))),
axis=0).any():
break
self._add_point_to_cluster(xnum[rindex], xcat[rindex], rindex, ik)
fromcluster = np.argwhere(self.membership[:, rindex])[0][0]
self._remove_point_from_cluster(xnum[rindex], xcat[rindex], rindex, fromcluster)
# perform an initial centroid update
for ik in range(self.k):
for iattr in range(nnumattrs):
# TODO: occasionally "invalid value encountered in double_scalars" in following line
self.centroids[0][ik, iattr] = \
self._clustAttrSum[ik, iattr] / sum(self.membership[ik, :])
for iattr in range(ncatattrs):
self.centroids[1][ik, iattr] = self.get_mode(self._clustAttrFreq[ik][iattr])
# ----------------------
# ITERATION
# ----------------------
if verbose:
print("Starting iterations...")
itr = 0
converged = False
while itr <= max_iters and not converged:
itr += 1
moves = 0
for ipoint in range(npoints):
cluster = np.argmin(self.get_dissim_num(self.centroids[0], xnum[ipoint]) +
self.gamma * self.get_dissim(self.centroids[1], xcat[ipoint]))
# if necessary: move point, and update old/new cluster frequencies and centroids
if not self.membership[cluster, ipoint]:
moves += 1
oldcluster = np.argwhere(self.membership[:, ipoint])[0][0]
self._add_point_to_cluster(xnum[ipoint], xcat[ipoint], ipoint, cluster)
self._remove_point_from_cluster(xnum[ipoint], xcat[ipoint], ipoint, oldcluster)
# update new and old centroids by choosing mean for numerical and
# most likely for categorical attributes
for iattr in range(len(xnum[ipoint])):
for curc in (cluster, oldcluster):
if sum(self.membership[curc, :]):
self.centroids[0][curc, iattr] = \
self._clustAttrSum[curc, iattr] / sum(self.membership[curc, :])
else:
self.centroids[0][curc, iattr] = 0
for iattr in range(len(xcat[ipoint])):
for curc in (cluster, oldcluster):
self.centroids[1][curc, iattr] = \
self.get_mode(self._clustAttrFreq[curc][iattr])
if verbose == 2:
print("Move from cluster {0} to {1}".format(oldcluster, cluster))
# in case of an empty cluster, reinitialize with a random point
# that is not a centroid
if sum(self.membership[oldcluster, :]) == 0:
while True:
rindex = np.random.randint(npoints)
if not np.all(np.vstack((
np.all(xnum[rindex] == self.centroids[0], axis=1),
np.all(xcat[rindex] == self.centroids[1], axis=1))),
axis=0).any():
break
self._add_point_to_cluster(xnum[rindex], xcat[rindex], rindex, oldcluster)
fromcluster = np.argwhere(self.membership[:, rindex])[0][0]
self._remove_point_from_cluster(
xnum[rindex], xcat[rindex], rindex, fromcluster)
# all points seen in this iteration
converged = (moves == 0)
if verbose:
print("Iteration: {0}/{1}, moves: {2}".format(itr, max_iters, moves))
self.calculate_clustering_cost(xnum, xcat)
self.clusters = np.array([np.argwhere(self.membership[:, pt])[0] for pt in range(npoints)])
def _add_point_to_cluster(self, point_num, point_cat, ipoint, cluster):
self.membership[cluster, ipoint] = 1
# update sums of attributes in cluster
for iattr, curattr in enumerate(point_num):
self._clustAttrSum[cluster][iattr] += curattr
# update frequencies of attributes in cluster
for iattr, curattr in enumerate(point_cat):
self._clustAttrFreq[cluster][iattr][curattr] += 1
return
def _remove_point_from_cluster(self, point_num, point_cat, ipoint, cluster):
self.membership[cluster, ipoint] = 0
# update sums of attributes in cluster
for iattr, curattr in enumerate(point_num):
self._clustAttrSum[cluster][iattr] -= curattr
# update frequencies of attributes in cluster
for iattr, curattr in enumerate(point_cat):
self._clustAttrFreq[cluster][iattr][curattr] -= 1
return
@staticmethod
def get_dissim_num(anum, b):
# Euclidean distance
return np.sum((anum - b)**2, axis=1)
def calculate_clustering_cost(self, xnum, xcat):
ncost = 0
ccost = 0
for ipoint, curpoint in enumerate(xnum):
ncost += np.sum(self.get_dissim_num(self.centroids[0], curpoint) *
(self.membership[:, ipoint] ** self.alpha))
for ipoint, curpoint in enumerate(xcat):
ccost += np.sum(self.get_dissim(self.centroids[1], curpoint) *
(self.membership[:, ipoint] ** self.alpha))
self.cost = ncost + self.gamma * ccost
if np.isnan(self.cost):
pass
return
class FuzzyKModes(KModes):
def __init__(self, k, alpha=1.5):
"""Fuzzy k-modes clustering algorithm for categorical data.
Uses traditional, hard centroids, following <NAME>., <NAME>.:
A fuzzy k-modes algorithm for clustering categorical data,
IEEE Transactions on Fuzzy Systems 7(4), 1999.
Inputs: k = number of clusters
alpha = alpha coefficient
Attributes: clusters = cluster numbers with max. membership [no. points]
membership = membership matrix [k * no. points]
centroids = centroids [k * no. attributes]
cost = clustering cost
"""
super(FuzzyKModes, self).__init__(k)
assert alpha > 1, "alpha should be > 1 (alpha = 1 equals regular k-modes)."
self.alpha = alpha
self.omega = None
def _perform_clustering(self, x, init_method='Huang', max_iters=200, tol=1e-5,
cost_inter=1, verbose=1):
"""Inputs: x = data points [no. points * no. attributes]
initMethod = initialization method ('Huang' for the one described in
Huang [1998], 'Cao' for the one in Cao et al. [2009]).
max_iters = maximum no. of iterations
tol = tolerance for termination criterion
cost_inter = frequency with which to check the total cost
(for speeding things up, since it is computationally expensive)
verbose = 0 for no and 1 for normal algorithm progress information,
2 for internal algorithm details
"""
# convert to numpy array, if needed
x = np.asanyarray(x)
npoints, nattrs = x.shape
assert self.k < npoints, "More clusters than data points?"
self.initMethod = init_method
# ----------------------
# INIT
# ----------------------
if verbose:
print("Init: initializing centroids")
self.init_centroids(x)
# store for all attributes which points have a certain attribute value
self._domAttrPoints = [defaultdict(list) for _ in range(nattrs)]
for ipoint, curpoint in enumerate(x):
for iattr, curattr in enumerate(curpoint):
self._domAttrPoints[iattr][curattr].append(ipoint)
# ----------------------
# ITERATION
# ----------------------
if verbose:
print("Starting iterations...")
itr = 0
converged = False
lastcost = np.inf
while itr <= max_iters and not converged:
self.update_membership(x)
self.update_centroids()
# computationally expensive, only check every N steps
if itr % cost_inter == 0:
self.calculate_clustering_cost(x)
converged = self.cost >= lastcost * (1-tol)
lastcost = self.cost
if verbose:
print("Iteration: {0}/{1}, cost: {2}".format(itr, max_iters, self.cost))
itr += 1
self.clusters = np.array([int(np.argmax(self.membership[:, pt])) for pt in range(npoints)])
def update_membership(self, x, threshold=1e-3):
npoints = x.shape[0]
self.membership = np.empty((self.k, npoints))
for ipoint, curpoint in enumerate(x):
dissim = self.get_dissim(self.centroids, curpoint)
if np.any(dissim <= threshold):
self.membership[:, ipoint] = np.where(dissim <= threshold, 1, threshold)
else:
for ik in range(len(self.centroids)):
factor = 1. / (self.alpha - 1)
self.membership[ik, ipoint] = 1 / np.sum((float(dissim[ik]) / dissim)**factor)
return
def update_centroids(self):
self.centroids = np.empty((self.k, len(self._domAttrPoints)))
for ik in range(self.k):
for iattr in range(len(self._domAttrPoints)):
# return attribute that maximizes the sum of the memberships
v = list(self._domAttrPoints[iattr].values())
k = list(self._domAttrPoints[iattr].keys())
memvar = [sum(self.membership[ik, x]**self.alpha) for x in v]
self.centroids[ik, iattr] = k[np.argmax(memvar)]
return
class FuzzyCentroidsKModes(KModes):
def __init__(self, k, alpha=1.5):
"""Fuzzy k-modes clustering algorithm for categorical data.
Uses fuzzy centroids, following and <NAME>., <NAME>., <NAME>.:
Fuzzy clustering of categorical data using fuzzy centroids, Pattern
Recognition Letters 25, 1262-1271, 2004.
Inputs: k = number of clusters
alpha = alpha coefficient
Attributes: clusters = cluster numbers with max. membership [no. points]
membership = membership matrix [k * no. points]
omega = fuzzy centroids [dicts with element values as keys,
element memberships as values, inside lists for
attributes inside list for centroids]
cost = clustering cost
"""
super(FuzzyCentroidsKModes, self).__init__(k)
assert k > 1, "Choose at least 2 clusters."
self.k = k
assert alpha > 1, "alpha should be > 1 (alpha = 1 equals regular k-modes)."
self.alpha = alpha
def _perform_clustering(self, x, max_iters=100, tol=1e-5, cost_inter=1, verbose=1):
"""Inputs: x = data points [no. points * no. attributes]
max_iters = maximum no. of iterations
tol = tolerance for termination criterion
cost_inter = frequency with which to check the total cost
(for speeding things up, since it is computationally expensive)
verbose = 0 for no and 1 for normal algorithm progress information,
2 for internal algorithm details
"""
# convert to numpy array, if needed
x = np.asanyarray(x)
npoints, nattrs = x.shape
assert self.k < npoints, "More clusters than data points?"
# ----------------------
# INIT
# ----------------------
if verbose:
print("Init: initializing centroids")
# count all attributes
freqattrs = [defaultdict(int) for _ in range(nattrs)]
for curpoint in x:
for iattr, curattr in enumerate(curpoint):
freqattrs[iattr][curattr] += 1
# omega = fuzzy set (as dict) for each attribute per cluster
self.omega = [[{} for _ in range(nattrs)] for _ in range(self.k)]
for ik in range(self.k):
for iattr in range(nattrs):
# a bit unclear form the paper, but this is how they do it in their code
# give a random attribute 1.0 membership and the rest 0.0
randint = np.random.randint(len(freqattrs[iattr]))
for iVal, curVal in enumerate(freqattrs[iattr]):
self.omega[ik][iattr][curVal] = float(iVal == randint)
# ----------------------
# ITERATION
# ----------------------
if verbose:
print("Starting iterations...")
itr = 0
converged = False
lastcost = np.inf
while itr <= max_iters and not converged:
# O(k*N*at*no. of unique values)
self.update_membership(x)
# O(k*N*at)
self.update_centroids(x)
# computationally expensive, only check every N steps
if itr % cost_inter == 0:
self.calculate_clustering_cost(x)
converged = self.cost >= lastcost * (1-tol)
lastcost = self.cost
if verbose:
print("Iteration: {0}/{1}, cost: {2}".format(itr, max_iters, self.cost))
itr += 1
self.clusters = np.array([int(np.argmax(self.membership[:, pt])) for pt in range(npoints)])
def update_membership(self, x, threshold=1e-3):
# Eq. 20 from Kim et al. [2004]
npoints = x.shape[0]
self.membership = np.empty((self.k, npoints))
for ipoint, curpoint in enumerate(x):
dissim = self.get_fuzzy_dissim(curpoint)
if np.any(dissim <= threshold):
self.membership[:, ipoint] = np.where(dissim <= threshold, 1, threshold)
else:
# NOTE: squaring the distances is not mentioned in the paper, but it is
# in the code of Kim et al.; seems to improve performance
dissim **= 2
for ik in range(len(self.omega)):
factor = 1. / (self.alpha - 1)
self.membership[ik, ipoint] = 1 / np.sum((float(dissim[ik]) / dissim)**factor)
return
def update_centroids(self, x):
self.omega = [[defaultdict(float) for _ in range(x.shape[1])] for _ in range(self.k)]
for ik in range(self.k):
for iattr in range(x.shape[1]):
for ipoint, curpoint in enumerate(x[:, iattr]):
self.omega[ik][iattr][curpoint] += self.membership[ik, ipoint] ** self.alpha
# normalize so that sum omegas is 1, analogous to k-means
# (see e.g. Yang et al. [2008] who explain better than the original paper)
sumomg = sum(self.omega[ik][iattr].values())
for key in self.omega[ik][iattr].keys():
self.omega[ik][iattr][key] /= sumomg
return
def get_fuzzy_dissim(self, x):
# TODO: slow, could it be faster?
# dissimilarity = sums of all omegas for non-matching attributes
# see Eqs. 13-15 of Kim et al. [2004]
dissim = np.zeros(len(self.omega))
for ik in range(len(self.omega)):
for iattr, curattr in enumerate(self.omega[ik]):
nonmatch = [v for k, v in curattr.items() if k != x[iattr]]
# dissim[ik] += sum(nonmatch)
# following the code of Kim et al., seems to work better
dissim[ik] += sum(nonmatch) / np.sqrt(np.sum(np.array(list(curattr.values())) ** 2))
return dissim
def calculate_clustering_cost(self, x):
self.cost = 0
for ipoint, curpoint in enumerate(x):
self.cost += np.sum(self.get_fuzzy_dissim(curpoint) *
(self.membership[:, ipoint] ** self.alpha))
return
def soybean_test():
# reproduce results on small soybean data set
x = np.genfromtxt('./soybean.csv', dtype=int, delimiter=',')[:, :-1]
y = np.genfromtxt('./soybean.csv', dtype=str, delimiter=',', usecols=35)
# drop columns with single value
x = x[:, np.std(x, axis=0) > 0.]
kmodes_huang = KModes(4)
kmodes_huang.cluster(x, init_method='Huang')
kmodes_cao = KModes(4)
kmodes_cao.cluster(x, init_method='Cao')
kproto = KPrototypes(4)
kproto.cluster([np.random.randn(x.shape[0], 3), x], init_method='Huang')
fkmodes = FuzzyKModes(4, alpha=1.1)
fkmodes.cluster(x)
ffkmodes = FuzzyCentroidsKModes(4, alpha=1.8)
ffkmodes.cluster(x)
for result in (kmodes_huang, kmodes_cao, kproto, fkmodes, ffkmodes):
classtable = np.zeros((4, 4), dtype=int)
for ii, _ in enumerate(y):
classtable[int(y[ii][-1])-1, result.clusters[ii]] += 1
print("\n")
print(" | Cl. 1 | Cl. 2 | Cl. 3 | Cl. 4 |")
print("----|-------|-------|-------|-------|")
for ii in range(4):
prargs = tuple([ii+1] + list(classtable[ii, :]))
print(" D{0} | {1:>2} | {2:>2} | {3:>2} | {4:>2} |".format(*prargs))
if __name__ == "__main__":
soybean_test()
```
#### File: bigappleserialbus/bigappleserialbus/ticker.py
```python
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__license__ = 'Apache'
__version__ = '0.1'
import time
class Ticker:
def __init__(self, betweenTicks=None):
"""Returns a ticker. Optionally set the amount of time per tick."""
self.tickers = {}
self.ticksSoFar = 0
self.error_callbacks = []
self.betweenTicks = 1 if betweenTicks == None else betweenTicks #time in seconds
def __tick__(self):
for func, frequency in self.tickers.iteritems():
if frequency == 0 or self.ticksSoFar % frequency == 0:
func()
def register(self, function, frequency):
"""Set a function to be executed once per `frequency` ticks"""
self.tickers[function] = frequency
def start(self):
try:
while True:
start_time = time.time()
self.__tick__()
duration = time.time() - start_time
time.sleep(max(self.betweenTicks - duration, 0))
self.ticksSoFar += 1
except Exception as e:
for error_callback in self.error_callbacks:
error_callback(e)
def global_error(self, func):
self.error_callbacks.append(func)
```
|
{
"source": "JeremyBuchanan/psf-photometry-pipeline",
"score": 3
}
|
#### File: JeremyBuchanan/psf-photometry-pipeline/core.py
```python
import astropy.io.fits as fits
import copy
import numpy as np
from astropy import units as u
from astropy.modeling.fitting import LevMarLSQFitter
from astropy.nddata import NDData, CCDData
from astropy.stats import gaussian_sigma_to_fwhm, sigma_clipped_stats
from astropy.table import Table
from astropy.wcs import WCS
from astroquery.astrometry_net import AstrometryNet
from ccdproc import Combiner
from photutils import aperture_photometry, CircularAperture, EPSFBuilder, CircularAnnulus
from photutils.background import MMMBackground
from photutils.detection import DAOStarFinder, IRAFStarFinder
from photutils.psf import IterativelySubtractedPSFPhotometry, DAOGroup, extract_stars
from scipy.optimize import curve_fit
def import_images(im_list, p):
'''
A function that imports the data from an image file, following a given
path to find the image file
Paramters
---------
im_list: list
List containing the names of the image files
p: string
The pathway the script should follow to find the image
files on the computer
Returns
-------
im_data: list
A list of the data arrays containing the pixel data
of images
in_headers: list
A list of all the image headers
'''
im_data = []
im_headers = []
for i in im_list:
x = str(i)
path = p + x
hdu = fits.open(path)
data = hdu[1].data
header = hdu[1].header
im_data.append(data)
im_headers.append(header)
return im_data, im_headers
def find_fwhm(image, size=100):
'''
Fits a 2D gaussian surface to the brightest, non-saturated star
on an image
Parameters
----------
image: array-like
raw pixel data from the image
size: integer
radius of the cutout around the star
Returns
-------
popt: list
list of all the best fit values of the gaussians parameters:
x0, y0, sig_x, sig_y, Amplitude, offset (background estimate)
'''
mean_val, median_val, std_val = sigma_clipped_stats(image, sigma=2.0)
search_image = image[100:-100,100:-100]
max_peak = np.max(search_image)
count = 0
while max_peak >= 0:
count += 1
rs, cs = np.where(search_image==max_peak)[0][0], np.where(search_image==max_peak)[1][0]
r = rs+100
c = cs+100
if max_peak < 50000:
star = image[r-size:r+size,c-size:c+size]
x = np.arange(2*size)
y = np.arange(2*size)
X, Y = np.meshgrid(x, y)
def gaussian(M, x0, y0, sig_x, sig_y, A, off):
x, y = M
return A * np.exp(-((x-x0)**2)/(2*sig_x**2)-((y-y0)**2)/(2*sig_y**2)) + off
xdata = np.vstack((X.ravel(), Y.ravel()))
ydata = star.ravel()
p = [size, size, 3, 3, 10000, median_val]
try:
popt, pcov = curve_fit(f=gaussian, xdata=xdata, ydata=ydata, p0=p)
im_sig = np.mean(popt[2:4])
fwhm = im_sig*gaussian_sigma_to_fwhm
except:
fwhm = 0
if fwhm > 2:
break
else:
image[r-size:r+size,c-size:c+size] = 0
search_image = image[100:-100,100:-100]
max_peak = np.max(search_image)
else:
image[r-size:r+size,c-size:c+size] = 0
search_image = image[100:-100,100:-100]
max_peak = np.max(search_image)
if count > 100:
fwhm = 0
im_sig = 0
break
if max_peak < 1000:
fwhm = 0
im_sig = 0
break
return fwhm, im_sig
def find_stars(image, sigma, peak=100000):
'''
Searches data from an image to find objects above a given brightness
threshold based off parameters of the ccd chip
Parameters
----------
image: array-like
Array containing the intensity of light for each pixel
on the ccd chip
sigma: float
sets the size tolerance for detected objects. Usually
5.0, more than 5 is statistically unreasonable
peak: int
The max number of counts the chip can handle before the
image starts to smear. Usual ccd can handle 100,000 counts
Returns
-------
stars: table
A table containing all the found stars and their parameters:
id, xcentroid, ycentroid, sharpness, roundness, npix, sky,
peak, flux, mag
'''
sigma_psf = sigma
mean_val, median_val, std_val = sigma_clipped_stats(image, sigma=2.0)
bkg = median_val
daofind = DAOStarFinder(fwhm=sigma_psf*gaussian_sigma_to_fwhm, threshold=bkg+10*std_val,
sky=bkg, peakmax=peak, exclude_border=True)
stars = daofind(image)
return stars
def calculate_shift(stars1, stars2):
'''
Calculates the necessary shift of one image in order to be aligned
with a second image
Parameters
----------
stars1: table
The table returned from using find_stars on an image
stars2: table
Same as stars1, for a different image
Returns
-------
diff: table
Table containing the x, y, and total offset of each star object
found between two images
'''
diff = np.zeros([stars1['xcentroid'].size, 3])*np.nan
for i in range(stars1['xcentroid'].size):
dx = stars1['xcentroid'][i] - stars2['xcentroid']
dy = stars1['ycentroid'][i] - stars2['ycentroid']
distances = np.abs(np.sqrt((dx)**2 + (dy)**2))
match = (distances == np.min(distances))
if distances[match] < 20:
diff[i, 0] = distances[match]
diff[i, 1] = dx[match]
diff[i, 2] = dy[match]
return diff
def roll_image(image, diff, threshold=0.5):
'''
Averages the x and y offset of objects on 2 images to the nearest
integer, and then rolls the image by that number of pixels along each
axis. Good for aligning two images
Parameters
----------
image: array-like
Array containing the intensity of light for each pixel
on the ccd chip
diff: table
Table containing the x, y, and total offset of each star object
found between two images
threshold: float
The minimum pixel offset between images to allow shifting,
usually 0.5 pixels
Returns
-------
image_shift: array-like
The "rolled" version of the same image, now aligned to another
reference image
'''
offset = np.median(diff[:, 0])
if offset >= threshold:
xshift = np.median(diff[:, 1])
yshift = np.median(diff[:, 2])
xshift_int = np.int(np.round(xshift, 0))
yshift_int = np.int(np.round(yshift, 0))
image_shift = np.roll(image, (yshift_int, xshift_int), axis = (0, 1))
return image_shift
else:
return image
def median_combiner(images):
'''
Function that takes the median of multiple images containing the
same stars objects
Parameters
----------
images: list
A list of the data arrays containing the pixel data
of images
Returns
-------
median_image: array-like
Array containing the median intensity of light for each
pixel for a set of images
'''
ccd_image_list = []
for image in images:
ccd_image = CCDData(image, unit=u.adu)
ccd_image_list.append(ccd_image)
c = Combiner(ccd_image_list)
c.sigma_clipping(func = np.ma.median)
median_image = c.median_combine()
median_image = np.asarray(median_image)
return median_image
def image_combiner(im_data, im_sig):
'''
Returns a median combination of a list of images
Parameters
----------
im_data: list
contains all the image data from the image set
im_sig: float
an image customized size parameter for searching an
image for stars
Returns
-------
median_image: array-like
'''
stars = []
for i in im_data:
s = find_stars(image=i, sigma=im_sig, peak=100000)
stars.append(s)
if s is None:
median_image = None
return median_image
else:
diffs = []
for s in range(len(stars)):
diff = calculate_shift(stars1=stars[0], stars2=stars[s])
diffs.append(diff)
images = []
for i in range(len(im_data)):
image_shift = roll_image(image=im_data[i], diff=diffs[i], threshold=0.5)
images.append(image_shift)
median_image = median_combiner(images=images)
return median_image
def image_mask(image, sources, fwhm, bkg, bkg_std):
'''
Masking routine that rejects stars too close to the edge of the
image, too close to each other, and the 5 brightest and 5 dimmest
stars in the image
Parameters
----------
image: array-like
raw pixel data from the image
sources: Table
contains all the data aquired from the star searching routine
fwhm: float
used for scaling the mask based on how focused the image is
Returns
-------
stars_tbl: Table
condensed form of the sources table, excluding all the masked
stars. columns: xcentroid, ycentroid, flux, peak, id
'''
size = 100
hsize = (size - 1) / 2
x = sources['xcentroid']
y = sources['ycentroid']
flux = sources['flux']
i = sources['id']
p = sources['peak']
mask = ((x > hsize) & (x < (image.shape[1] - 1 - hsize)) &
(y > hsize) & (y < (image.shape[0] - 1 - hsize)))
stars_tbl = Table()
stars_tbl['x'] = x[mask]
stars_tbl['y'] = y[mask]
stars_tbl['flux'] = flux[mask]
stars_tbl['id'] = i[mask]
stars_tbl['peak'] = p[mask]
d = []
idxi = 0
for i in stars_tbl['id']:
idxj = 0
for j in stars_tbl['id']:
if i != j:
threshold = 5*fwhm
dx = stars_tbl['x'][idxi] - stars_tbl['x'][idxj]
dy = stars_tbl['y'][idxi] - stars_tbl['y'][idxj]
distance = np.abs(np.sqrt((dx)**2 + (dy)**2))
if distance <= threshold:
d.append(idxi)
idxj = idxj+1
idxi = idxi + 1
idxp = 0
min_peak = bkg + 10 * bkg_std
for i in stars_tbl['peak']:
if i <= min_peak:
d.append(idxp)
idxp += 1
stars_tbl.remove_rows(d)
stars_tbl.sort('flux', reverse=True)
if len(stars_tbl) > 10:
stars_tbl.remove_rows([-5,-4,-3,-2,-1,0,1,2,3,4])
return stars_tbl
def bkg_sub(image, stars_tbl, fwhm):
'''
Local background subtraction routine for stars on an image
Parameters
----------
image: array-like
raw pixel data of the image
stars_tbl: Table
contains positional and flux data for all the stars
fwhm: float
used for scaling the area to be background subtracted
based on how focused the image is
Returns
-------
image_lbs: array-like
a copy of the original image, with regions around each star
containing no background flux
'''
image_lbs = copy.deepcopy(image)
for s in stars_tbl['x','y']:
position = [s[0],s[1]]
aperture = CircularAperture(position, r=20)
annulus = CircularAnnulus(position, r_in=20, r_out=30)
annulus_mask = annulus.to_mask(method='center')
annulus_data = annulus_mask.multiply(image_lbs)
annulus_data_1d = annulus_data[annulus_mask.data > 0]
_, median_sigclip, _ = sigma_clipped_stats(annulus_data_1d)
bkg_median = median_sigclip
pos_pix = [np.int(np.round(position[0], 0)), np.int(np.round(position[1], 0))]
size = 5*fwhm
for r in range(len(image_lbs)):
if (r > pos_pix[1]-(size/2) and r < pos_pix[1]+(size/2)):
for c in range(len(image_lbs[r])):
if (c > pos_pix[0]-(size/2) and c < pos_pix[0]+(size/2)):
image_lbs[r][c] -= bkg_median
return image_lbs
def build_psf(image, stars_tbl, fwhm):
'''
Constructs a poins spread function (psf) from a sample of stars
on an image
Parameters
----------
image: array-like
raw pixel data of the image
stars_tbl: Table
contains positional and flux data for all the stars
fwhm: float
used for scaling the size of the star cutouts based on
how focused the image is
Returns
-------
epsf: EPSFModel
the effective psf constructed form the stars
stars: EPSFStars
the star cutouts used to build the psf
fitted_stars: EPSFStars
the original stars, with updated centers and fluxes derived
from fitting the output psf
'''
nddata = NDData(data = image)
stars = extract_stars(nddata, stars_tbl, size = 5*fwhm)
epsf_builder = EPSFBuilder(oversampling=2, maxiters=10, progress_bar=False, smoothing_kernel='quadratic')
epsf, fitted_stars = epsf_builder(stars)
return epsf, stars, fitted_stars
def do_photometry(image, epsf, fwhm):
'''
Iterative photometry routine using a point spread function (psf) model
Parameters
----------
image: array-like
raw pixel data from the image
epsf: EPSFModel
the psf model for finding stars on the image
fwhm: float
used for scaling data collection region around each star based
on how focused the image is
Returns
-------
results: Table
contains all the photometry data: x_0, x_fit, y_0, y_fit, flux_0,
flux_fit, id,group_id, flux_unc, x_0_unc, y_0_unc, iter_detected
photometry:
the iterative search function for performing photometry
'''
mean_val, median_val, std_val = sigma_clipped_stats(image, sigma=2.0)
daofind = DAOStarFinder(fwhm=fwhm, threshold=median_val+20*std_val, sky=median_val, peakmax=100000, exclude_border=True)
daogroup = DAOGroup(2*fwhm)
mmm_bkg = MMMBackground()
fitter = LevMarLSQFitter()
def round_to_odd(f):
return np.ceil(f) // 2 * 2 + 1
size = 5*fwhm
fitshape = np.int(round_to_odd(size))
photometry = IterativelySubtractedPSFPhotometry(finder=daofind, group_maker=daogroup, bkg_estimator=mmm_bkg,
psf_model=epsf, fitter=fitter, niters=5, fitshape=fitshape,
aperture_radius=(size-1)/2)
results = photometry(image)
return results, photometry
def get_residuals(results, photometry, fwhm, image):
'''
Generates residual image cutouts from photometry results
Parameters
----------
results: Table
contains all the photometry data: x_0, x_fit, y_0, y_fit, flux_0,
flux_fit, id,group_id, flux_unc, x_0_unc, y_0_unc, iter_detected
photometry:
the iterative search function for performing photometry
Results
-------
results_tbl: Table
condensed table of the photometry results, with just the positional
and flux data
residual_stars: EPSFStars
cutouts of the residuals of the stars left after photometry is completed
'''
results_tbl = Table()
results_tbl['x'] = results['x_fit']
results_tbl['y'] = results['y_fit']
results_tbl['flux'] = results['flux_fit']
results_tbl.sort('flux', reverse=True)
ndresidual = NDData(data=photometry.get_residual_image())
nddata = NDData(data=image)
final_stars = extract_stars(nddata, results_tbl, size=5*fwhm)
residual_stars = extract_stars(ndresidual, results_tbl, size=5*fwhm)
return results_tbl, final_stars, residual_stars
def get_wcs(results_tbl):
'''
Queries the website astrometry.net with image data, returning a world coordinate
system (wcs) solution, along with a header containing this solution
Parameters
----------
results_tbl: Table
contains positional and flux data for all stars found in the photometry
routine
Results
-------
sky: Table
contains all locations for the stars in results_tbl in RA and DEC
instead of pixels
wcs_header: Header
an image header with the RA and DEC included
'''
ast = AstrometryNet()
ast.api_key = '<KEY>'
try_again = True
submission_id = None
image_width = 4096
image_height = 4096
while try_again:
try:
if not submission_id:
wcs_header = ast.solve_from_source_list(results_tbl['x'][:30], results_tbl['y'][:30],
image_width, image_height, submission_id=submission_id,
solve_timeout=600)
else:
wcs_header = ast.monitor_submission(submission_id, solve_timeout=600)
except TimeoutError as e:
submission_id = e.args[1]
else:
try_again = False
if wcs_header:
w = WCS(wcs_header)
sky = w.pixel_to_world(results_tbl['x'], results_tbl['y'])
return sky, wcs_header
else:
return None, wcs_header
```
#### File: JeremyBuchanan/psf-photometry-pipeline/p_io.py
```python
import astropy.io.fits as fits
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import obj_data as od
import saphires as saph
from astropy.time import Time
from astropy.visualization import ZScaleInterval, SqrtStretch, ImageNormalize
from matplotlib.backends.backend_pdf import PdfPages
ra = od.ra
dec = od.dec
pmra = od.pmra
pmdec = od.pmdec
plx = od.plx
epoch = od.epoch
matplotlib.rcParams.update({'font.size': 12})
def write_fits(fn, data, im_headers, wcs_header):
'''
Writes a new fits file including the image data and
and updated header for the new image
Parameters
----------
fn: string
The desired file name of the new fits file
data: array-like
Contains all the image data
Returns
-------
avg_airmass: float
the amount of atmosphere obscuring the target, found in image header. Here
the airmass for all images is averaged
bjd: float
Barycentric Julian Date, found in the image header
header: Header
'''
for keys in wcs_header:
if keys not in ['HISTORY', 'COMMENT']:
im_headers[0][keys] = wcs_header[keys]
airmass = []
for i in im_headers:
airmass.append(i['AIRMASS'])
avg_airmass = np.mean(airmass)
im_headers[0]['AIRMASS'] = avg_airmass
jd_middle = np.zeros(len(im_headers))
for i in range(len(im_headers)):
jd_middle[i] = Time(im_headers[i]['DATE-OBS'], format='isot').jd
exptime = im_headers[i]['EXPTIME']
jd_middle[i] = jd_middle[i] + (exptime/2.0)/3600.0/24.0
isot_date_obs = Time(np.mean(jd_middle), format='jd').isot
tele = im_headers[0]['SITEID']
brv,bjd,bvcorr = saph.utils.brvc(isot_date_obs,0.0,tele,ra=ra,dec=dec,epoch=epoch, pmra=pmra, pmdec=pmdec, px=plx)
im_headers[0]['BJD'] = bjd[0]
header = im_headers[0]
hdu_p = fits.PrimaryHDU(data=data, header=header)
hdu = fits.HDUList([hdu_p])
hdu.writeto(fn)
return avg_airmass, bjd, header
def write_pdf(name, images, model=None, final_stars=None, residual_stars=None, fluxes=None, plot_res=None):
pp = PdfPages(name)
for i in range(len(images)):
fig, ax = plt.subplots(1, figsize=(10, 10))
norm = ImageNormalize(images[i], interval=ZScaleInterval(), stretch=SqrtStretch())
im = ax.imshow(images[i], norm=norm)
plt.colorbar(im)
plt.tight_layout()
pp.savefig()
plt.close()
if model is not None:
fig, ax = plt.subplots(1, figsize=(10, 10))
psf = ax.imshow(model)
plt.colorbar(psf)
ax.set_title('PSF Model')
plt.tight_layout()
pp.savefig()
plt.close()
if final_stars is not None:
if plot_res == 'y':
nrows = len(final_stars)
ncols = 2
fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=(10, 800), squeeze=True)
ax = ax.ravel()
index = 0
for i in range(0, nrows*ncols, 2):
norm = simple_norm(final_stars[index],'log')
norm2 = simple_norm(residual_stars[index], 'linear')
im = ax[i].imshow(final_stars[index], norm=norm, origin='lower', cmap='viridis', interpolation='none')
fig.colorbar(im, ax = ax[i])
ax[i].set_title(np.str(fluxes[index]))
im_r = ax[i+1].imshow(residual_stars[index], norm=norm2, origin='lower', cmap='viridis', interpolation='none')
fig.colorbar(im_r, ax = ax[i+1])
index = index + 1
plt.tight_layout()
pp.savefig()
plt.close()
pp.close()
def write_csv(name, im_name, bjd, filt, airmass, results, sky):
f = open(name, 'w')
f.write('NAME, ID, BJD, FLUX, FLUX ERROR, MAG, MAG ERROR, FILTER, X POSITION, Y POSITION, AIRMASS, RA, DEC\n')
for i in range(sky.size):
if results['flux_fit'][i] > 0:
star_id = results['id'][i]
flux = results['flux_fit'][i]
fluxerr = results['flux_unc'][i]
mag = -2.5*np.log10(flux)
magerr = (1.08574*fluxerr)/(flux)
x_pos = results['x_fit'][i]
y_pos = results['y_fit'][i]
ra = sky[i].ra.degree
dec = sky[i].dec.degree
f.write(im_name+','+np.str(i)+','+np.str(bjd)+','+np.str(flux)+','+np.str(fluxerr)+','+np.str(mag)+','+np.str(magerr)
+','+filt+','+np.str(x_pos)+','+np.str(y_pos)+','+str(airmass)+','+np.str(ra)+','+np.str(dec)+'\n')
f.close()
def write_txt(name, sources, stars_tbl, fwhm, results=None, t0=None,t1=None,t2=None,t3=None,t4=None,t5=None):
'''
Short text file with diagnostic info about each image set, specifically
for a successful run of the image set
Parameters
----------
name: string
name of the saved file
sources: Table
tabulated info about all the stars found on the image
stars_tbl: Table
tabulated info about all the stars used to form a psf
results: Table
tabulated info about all the stars found with the photometry routine
'''
f = open(name, 'w')
f.write('Number of stars in sources: '+np.str(len(sources))+'\nNumber of stars in stars_tbl: '+np.str(len(stars_tbl))
+'\nNumbers of stars in results: '+np.str(len(results))+'\nMin, Max, Median peaks in sources: '
+np.str(np.min(sources['peak']))+', '+np.str(np.max(sources['peak']))+', '+np.str(np.median(sources['peak']))
+'\nMin, Max, Median fluxes in results: '+np.str(np.min(results['flux_fit']))+', '+np.str(np.max(results['flux_fit']))+', '
+np.str(np.median(results['flux_fit']))+'\nFWHM: '+np.str(fwhm)+'\n')
if t5:
t_1 = t1-t0
t_2 = t2-t1
t_3 = t3-t2
t_4 = t4-t3
t_5 = t5-t4
t_f = t5-t0
f.write('Time to combine images: '+np.str(t_1)+'\nTime to find stars: '+np.str(t_2)+'\nTime to build psf: '
+np.str(t_3)+'\nTime to run photometry: '+np.str(t_4)+'\nTime to get wcs: '+np.str(t_5)+'\nTotal time: '
+np.str(t_f)+'\n')
f.close()
```
#### File: JeremyBuchanan/psf-photometry-pipeline/psf_phot.py
```python
import copy as copy
import numpy as np
import core as c
import time as t
import p_io
from astropy.stats import sigma_clipped_stats
from astropy.table import Table
def pipeline(fn_path, fits_path, res_path):
'''
This is a pipeline for reducing raw imaging data using point spread function
photometry
Parameters
----------
fn_path: string
pathway to the csv containing the names of all image files in one
column, and the grouping id of each image in the second column
fits_path: string
pathway to a folder containing all the FITS files of the imaging
im_data
res_path: string
pathway to a directory for all the output files of the photometry
routine
Returns
-------
N/A
Outputs
-------
CSV: tabulated data of the photometry results
FITS: contains the header as well as the imaging data for the median
combined images of each image set
PDF: images of all the images in the image set, as well as the
constructed PSF and (per input) the residual images of all the stars
included in the calculations
TXT: text file containing a bunch of diagnostic information about the
script
'''
filenames, epochid = np.loadtxt(fn_path, unpack=True, delimiter=',', usecols=(0,1), dtype='U100,f')
nepochs = np.int(np.max(epochid))
plot_residuals = input('Plot residuals? [y/n] ')
for i in range(0, nepochs+1):
images = filenames[(epochid == i)]
x = str(images[0])
t0 = t.perf_counter()
set_name = x[:22]
path = fits_path
im_data, headers = c.import_images(im_list=images, p=path)
im_copy = copy.deepcopy(im_data[0])
fwhm, im_sig = c.find_fwhm(image=im_copy)
if im_sig == 0:
print('No stars were detected in this image set.')
p_io.write_pdf(name=res_path+set_name+'_'+np.str(i)+'.pdf', images=im_data)
else:
if len(im_data) > 1:
median_image = c.image_combiner(im_data=im_data, im_sig=im_sig)
if median_image is None:
image = im_data[0]
print('No stars were detected in this image set.')
p_io.write_pdf(name=res_path+set_name+'_'+np.str(i)+'.pdf', images=im_data)
continue
else:
image = median_image
mean_val, median_val, std_val = sigma_clipped_stats(image, sigma=2.0)
image -= median_val
stars_tbl = Table()
t1 = t.perf_counter()
sources = c.find_stars(image=image, sigma=im_sig)
t2 = t.perf_counter()
stars_tbl = c.image_mask(image=image, sources=sources, fwhm=fwhm, bkg=median_val, bkg_std=std_val)
image_lbs = c.bkg_sub(image=image, stars_tbl=stars_tbl, fwhm=fwhm)
epsf, stars, fitted_stars = c.build_psf(image=image_lbs, stars_tbl=stars_tbl, fwhm=fwhm)
t3 = t.perf_counter()
if len(stars_tbl) <= 10 or fwhm > 30:
print('Not enough stars were detected.')
results = []
p_io.write_pdf(name=res_path+set_name+'_'+np.str(i)+'.pdf', images=im_data, model=epsf.data)
p_io.write_txt(name=res_path+set_name+'_'+np.str(i)+'_diag.txt', sources=sources, stars_tbl=stars_tbl, fwhm=fwhm)
else:
results, photometry = c.do_photometry(image=image, epsf=epsf, fwhm=fwhm)
results_tbl, residual_stars, final_stars = c.get_residuals(results=results, photometry=photometry, fwhm=fwhm, image=image)
results.sort('flux_fit', reverse=True)
t4 = t.perf_counter()
sky, wcs_header = c.get_wcs(results_tbl=results_tbl)
if sky:
t5 = t.perf_counter()
avg_airmass, bjd, header = p_io.write_fits(fn=res_path+set_name+'_'+np.str(i)+'.fits', data=image, im_headers=headers, wcs_header=wcs_header)
p_io.write_pdf(name=res_path+set_name+'_'+np.str(i)+'.pdf', images=im_data, model=epsf.data, final_stars=final_stars, residual_stars=residual_stars, fluxes=results_tbl['flux'], plot_res=plot_residuals)
p_io.write_txt(name=res_path+set_name+'_'+np.str(i)+'_diag.txt', sources=sources, stars_tbl=stars_tbl, results=results, fwhm=fwhm,t0=t0,t1=t1,t2=t2,t3=t3,t4=t4,t5=t5)
p_io.write_csv(name=res_path+set_name+'_'+np.str(i)+'.csv', im_name=set_name, bjd=bjd[0], filt=header['FILTER'], airmass=avg_airmass, results=results, sky=sky)
else:
print('Failed to retrieve WCS transformation')
p_io.write_pdf(name=res_path+set_name+'_'+np.str(i)+'.pdf', images=im_data, model=epsf.data, final_stars=final_stars, residual_stars=residual_stars, fluxes=results_tbl['flux'], plot_res=plot_residuals)
p_io.write_txt(name=res_path+set_name+'_'+np.str(i)+'_diag.txt', sources=sources, stars_tbl=stars_tbl, results=results, fwhm=fwhm)
```
|
{
"source": "jeremybusk/demoflaskpgsqlnginxdocker",
"score": 3
}
|
#### File: portal/portal/cli.py
```python
import click
from flask_sqlalchemy import SQLAlchemy
from portal import app
@app.cli.command()
@click.argument('name')
def example(name):
print(name)
@app.cli.command()
def initdata():
with open('data/demoapp1-uuids.txt', 'r') as f:
uuids = f.read().splitlines()
db = SQLAlchemy(app)
app.config['SESSION_SQLALCHEMY'] = db
app.config['SESSION_TYPE'] = 'sqlalchemy'
sql = "INSERT INTO products (name) VALUES ('demoapp1')"
print(sql)
db.engine.execute(sql)
for uuid in uuids:
sql = f"INSERT INTO licenses (product_id, uuid) VALUES (1, '{uuid}')"
print(sql)
db.engine.execute(sql)
```
#### File: portal/portal/utils.py
```python
from cryptography.hazmat.primitives import serialization \
as crypto_serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.backends import default_backend \
as crypto_default_backend
from email.mime.text import MIMEText
from cryptography.hazmat.backends import default_backend
# from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives import serialization
import random
import string
import smtplib
def gen_random_string(chars=32, lower=False):
s = ''.join(
random.choice(
string.ascii_uppercase + string.ascii_lowercase + string.digits
) for _ in range(chars))
if lower:
return s.lower()
return s
def send_email(recipients, subject, body):
email_username = '<EMAIL>'
msg = MIMEText(body)
msg['Subject'] = subject
msg['From'] = f'no-reply <<EMAIL>>'
msg['To'] = recipients
msg = msg.as_string()
session = smtplib.SMTP('smtp.uvoo.io', 587)
session.ehlo()
session.starttls()
session.login(email_username, '$mtpRelayUser123@')
session.sendmail(email_username, recipients, msg)
session.quit()
def create_key_pair(key_cipher='rsa', key_format='openssh'):
if key_cipher == 'rsa' and key_format == 'openssh':
rsa_key = rsa.generate_private_key(
backend=crypto_default_backend(),
public_exponent=65537,
key_size=4096
)
private_key = rsa_key.private_bytes(
crypto_serialization.Encoding.PEM,
crypto_serialization.PrivateFormat.PKCS8,
crypto_serialization.NoEncryption())
public_key = rsa_key.public_key().public_bytes(
crypto_serialization.Encoding.OpenSSH,
crypto_serialization.PublicFormat.OpenSSH
)
elif key_cipher == 'rsa' and key_format == 'pem':
rsa_key = rsa.generate_private_key(
backend=crypto_default_backend(),
public_exponent=65537,
key_size=4096
)
private_key = rsa_key.private_bytes(
crypto_serialization.Encoding.PEM,
crypto_serialization.PrivateFormat.PKCS8,
crypto_serialization.NoEncryption())
public_key = rsa_key.public_key().public_bytes(
crypto_serialization.Encoding.PEM,
crypto_serialization.PublicFormat.SubjectPublicKeyInfo
)
elif key_cipher == 'ec' and key_format == 'pem':
# Ciphers: SECP384R1, SECP521R1
ec_key = ec.generate_private_key(
ec.SECP521R1(),
default_backend()
)
private_key = ec_key.private_bytes(
crypto_serialization.Encoding.PEM,
crypto_serialization.PrivateFormat.PKCS8,
crypto_serialization.NoEncryption())
public_key = ec_key.public_key().public_bytes(
crypto_serialization.Encoding.PEM,
crypto_serialization.PublicFormat.SubjectPublicKeyInfo
)
else:
s = f"Unsupported key cipher {key_cipher} and/or format {key_format}."
print(s)
return -1
return {'private_key': private_key.decode('utf-8'),
'public_key': public_key.decode('utf-8'),
'key_cipher': key_cipher,
'key_format': key_format}
def load_private_key(filename, file_format='pem'):
with open(filename, 'rb') as pem_in:
pemlines = pem_in.read()
private_key = crypto_serialization.load_pem_private_key(
pemlines, None, default_backend())
return private_key
def load_key(filename, key_type='private', file_format='pem'):
with open(filename, 'rb') as f:
key_lines = f.read()
if key_type == 'private':
private_key = crypto_serialization.load_pem_private_key(
key_lines, default_backend(), password=<PASSWORD>)
return private_key
elif key_type == 'public':
public_key = crypto_serialization.load_pem_public_key(
key_lines, default_backend())
return public_key
else:
raise Exception('E: Unsupported key type.')
def save_private_key(private_key, filename):
pem = private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()
)
with open(filename, 'wb') as pem_out:
pem_out.write(pem)
def save_public_key(public_key, filename):
pem = public_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.OpenSSH
)
with open(filename, 'wb') as pem_out:
pem_out.write(pem)
```
#### File: migrations/versions/393219824c73_.py
```python
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '393219824c73'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
pass
# keyciphers = postgresql.ENUM('active', 'inactive', 'archive', name='keyciphers')
# banner_status.create(op.get_bind())
# # ### commands auto generated by Alembic - please adjust! ###
# op.add_column('key', sa.Column('key_cipher', sa.Enum('ed25519', 'rsa', name='keyciphers'), nullable=True))
# op.add_column('key', sa.Column('key_format', sa.Enum('openssh', 'pem', 'pkcs12', name='keyformats'), nullable=True))
# ### end Alembic commands ###
def downgrade():
pass
# ### commands auto generated by Alembic - please adjust! ###
# op.drop_column('key', 'key_format')
# op.drop_column('key', 'key_cipher')
# ### end Alembic commands ###
```
#### File: migrations/versions/45faee087b6e_.py
```python
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '45faee087b6e'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('api_logs',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('name', sa.String(length=128), nullable=True),
sa.Column('ts_created', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.add_column('licenses', sa.Column('token', sa.String(length=128), nullable=True))
op.add_column('licenses', sa.Column('user_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'licenses', 'users', ['user_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'licenses', type_='foreignkey')
op.drop_column('licenses', 'user_id')
op.drop_column('licenses', 'token')
op.drop_table('api_logs')
# ### end Alembic commands ###
```
#### File: migrations/versions/4c3eea649e45_.py
```python
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4c3eea649e45'
down_revision = '5<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('container_port_map',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('container_id', sa.Integer(), nullable=True),
sa.Column('ipv4_address', sa.String(length=128), nullable=True),
sa.Column('ipv6_address', sa.String(length=128), nullable=True),
sa.Column('transport_protocol', sa.Enum('tcp', 'udp', name='transportprotocol'), nullable=True),
sa.Column('port', sa.Integer(), nullable=True),
sa.Column('app_protocol', sa.Enum('ssh', 'http', 'https', name='appprotocol'), nullable=True),
sa.Column('app_name', sa.Enum('demoapp1', name='appname'), nullable=True),
sa.Column('note', sa.String(length=255), nullable=True),
sa.ForeignKeyConstraint(['container_id'], ['container.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.drop_constraint('container_ssh_key_id_fkey', 'container', type_='foreignkey')
op.drop_column('container', 'ssh_key_id')
op.add_column('ssh_key', sa.Column('add_to_authorized_keys', sa.Boolean(), nullable=True))
op.add_column('ssh_key', sa.Column('key_cipher', sa.String(length=32), nullable=True))
op.add_column('ssh_key', sa.Column('key_format', sa.String(length=32), nullable=True))
op.add_column('ssh_key', sa.Column('private_key', sa.String(length=128), nullable=True))
op.add_column('ssh_key', sa.Column('public_key', sa.String(length=128), nullable=True))
op.drop_column('ssh_key', 'private')
op.drop_column('ssh_key', 'public')
op.drop_column('ssh_key', 'type')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('ssh_key', sa.Column('type', sa.VARCHAR(length=24), autoincrement=False, nullable=True))
op.add_column('ssh_key', sa.Column('public', sa.VARCHAR(length=128), autoincrement=False, nullable=True))
op.add_column('ssh_key', sa.Column('private', sa.VARCHAR(length=128), autoincrement=False, nullable=True))
op.drop_column('ssh_key', 'public_key')
op.drop_column('ssh_key', 'private_key')
op.drop_column('ssh_key', 'key_format')
op.drop_column('ssh_key', 'key_cipher')
op.drop_column('ssh_key', 'add_to_authorized_keys')
op.add_column('container', sa.Column('ssh_key_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.create_foreign_key('container_ssh_key_id_fkey', 'container', 'ssh_key', ['ssh_key_id'], ['id'])
op.drop_table('container_port_map')
# ### end Alembic commands ###
```
#### File: migrations/versions/ab2f1cdac54c_.py
```python
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '1<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('container_port_map', sa.Column('created', sa.DateTime(), nullable=True))
op.add_column('container_port_map', sa.Column('deleted', sa.DateTime(), nullable=True))
op.add_column('container_port_map', sa.Column('status', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('container_port_map', 'status')
op.drop_column('container_port_map', 'deleted')
op.drop_column('container_port_map', 'created')
# ### end Alembic commands ###
```
|
{
"source": "JeremyButt/GDMC",
"score": 3
}
|
#### File: ChirpVillage/Mocks/Box.py
```python
class BoundingBox(object):
def __init__(self, minx, miny, minz, maxx, maxy, maxz):
self.minx = minx
self.miny = miny
self.minz = minz
self.maxx = maxx
self.maxy = maxy
self.maxz = maxz
```
#### File: ChirpVillage/Mocks/Level.py
```python
from random import randrange
class Level(object):
def __init__(self, x_min=0, y_min=0, z_min=0, x_max=300, y_max=254, z_max=300):
self.RandomSeed = 2523870351887443968
self.x_min = x_min
self.y_min = y_min
self.z_min = z_min
self.x_max = x_max
self.y_max = y_max
self.z_max = z_max
self.world = self.init_world()
def init_world(self):
"""
Initialize the world with stone or air blocks at random height (30->254)
:return: initialized 3d world filled with generated data
"""
world = []
for i in range(self.x_max):
j_column = []
for j in range(self.z_max):
height = randrange(60, 80)
# add river down center
if j == self.z_max-self.z_min:
j_column.append([0 if h >= height else 8 for h in range(self.y_max)])
# add lava down quarter
elif j == (self.z_max -self.z_min)/2:
j_column.append([0 if h >= height else 10 for h in range(self.y_max)])
else:
j_column.append([0 if h >= height else 1 for h in range(self.y_max)])
world.append(j_column)
return world
def blockAt(self, x, y, z):
"""
Fetch the block that is at the coords
:param x, y, z: x, y, z coords to the block required
:return: block at the x, y, z coords in the 3d world matrix member
"""
return self.world[x][z][y]
```
|
{
"source": "JeremyBYU/FastGaussianAccumulator",
"score": 2
}
|
#### File: python/archive/run_normals copy.py
```python
import time
import sys
import argparse
import numpy as np
import open3d as o3d
from fastgac import GaussianAccumulatorS2Beta, MatX3d, convert_normals_to_hilbert, IcoCharts
from fastgac.peak_and_cluster import find_peaks_from_accumulator, find_peaks_from_ico_charts
from fastgac.o3d_util import get_arrow, get_pc_all_peaks, get_arrow_normals, plot_meshes, assign_vertex_colors, plot_meshes, get_colors, create_open_3d_mesh
import matplotlib.pyplot as plt
from tests.python.helpers.setup_helper import cluster_normals, sort_by_distance_from_point
np.random.seed(1)
np.set_printoptions(suppress=True, precision=3)
def integrate_normals_and_visualize(to_integrate_normals, ga):
to_integrate_normals_mat = MatX3d(to_integrate_normals)
t0 = time.perf_counter()
neighbors_idx = np.asarray(ga.integrate(to_integrate_normals_mat))
t1 = time.perf_counter()
elapsed_time = (t1 - t0) * 1000
normalized_counts = np.asarray(ga.get_normalized_bucket_counts())
color_counts = get_colors(normalized_counts)[:, :3]
refined_icosahedron_mesh = create_open_3d_mesh(np.asarray(ga.mesh.triangles), np.asarray(ga.mesh.vertices))
# Colorize normal buckets
colored_icosahedron = assign_vertex_colors(refined_icosahedron_mesh, color_counts, None)
return colored_icosahedron
def example_normals(normals:np.ndarray):
LEVEL = 4
kwargs_base = dict(level=LEVEL)
kwargs_s2 = dict(**kwargs_base)
axis_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(0.5).translate([-2.0, 0, 0])
# Create Gaussian Accumulator
ga_cpp_s2 = GaussianAccumulatorS2Beta(**kwargs_s2)
# Integrate the normals and get open3d visualization
colored_icosahedron = integrate_normals_and_visualize(normals, ga_cpp_s2)
o3d.visualization.draw_geometries([colored_icosahedron, axis_frame])
# Create the IcoChart for unwrapping
ico_chart_ = IcoCharts(LEVEL)
normalized_bucket_counts_by_vertex = ga_cpp_s2.get_normalized_bucket_counts_by_vertex(True)
ico_chart_.fill_image(normalized_bucket_counts_by_vertex)
average_vertex_normals = np.asarray(ga_cpp_s2.get_average_normals_by_vertex(True))
# 2D Peak Detection
find_peaks_kwargs = dict(threshold_abs=20, min_distance=1, exclude_border=False, indices=False)
cluster_kwargs = dict(t=0.05, criterion='distance')
average_filter = dict(min_total_weight=0.2)
# New simplified API for finding peaks
res = np.array(ga_cpp_s2.find_peaks(threshold_abs=find_peaks_kwargs['threshold_abs'], cluster_distance=cluster_kwargs['t'], min_cluster_weight=average_filter['min_total_weight']))
print("New Detected Peaks:")
res = sort_by_distance_from_point(res)
print(res)
# Old Way of finding peaks
_, _, avg_peaks, _ = find_peaks_from_ico_charts(ico_chart_, np.asarray(normalized_bucket_counts_by_vertex), vertices=average_vertex_normals, find_peaks_kwargs=find_peaks_kwargs, cluster_kwargs=cluster_kwargs)
avg_peaks = sort_by_distance_from_point(avg_peaks)
print("Detected Peaks:")
print(avg_peaks)
full_image = np.asarray(ico_chart_.image)
plt.imshow(full_image)
plt.xticks(np.arange(0, full_image.shape[1], step=1))
plt.yticks(np.arange(0, full_image.shape[0], step=1))
plt.show()
# Don't forget to reset the GA
ga_cpp_s2.clear_count()
def main():
parser = argparse.ArgumentParser(description='Integrate some Normals')
parser.add_argument('--path', type=str, help='Specify an optional file')
args = parser.parse_args()
if args.path is None:
clusters, normals = cluster_normals(10, 1000, patch_deg=5)
combined =np.concatenate(clusters)
else:
print("loading data from ", args.path)
data = np.load(args.path)
combined = data['clusters']
normals = data['normals']
print(sort_by_distance_from_point(normals))
# sys.exit()
# normals = np.asarray([
# [0.0, 0.0, 0.95],
# [0.0, 0.0, 0.98],
# [0.95, 0.0, 0],
# [0.98, 0.0, 0]
# ])
example_normals(combined)
if __name__ == "__main__":
main()
```
#### File: python/paper/plot_mesh_beta.py
```python
import time
from pathlib import Path
from collections import namedtuple
import numpy as np
import open3d as o3d
from scipy.spatial.transform import Rotation as R
import matplotlib.pyplot as plt
from fastgac import GaussianAccumulatorKD, GaussianAccumulatorOpt, GaussianAccumulatorS2, MatX3d, convert_normals_to_hilbert, IcoCharts, GaussianAccumulatorS2Beta
from fastgac.peak_and_cluster import find_peaks_from_accumulator, find_peaks_from_ico_charts
from fastgac.o3d_util import get_arrow, get_pc_all_peaks, get_arrow_normals
from examples.python.run_meshes import visualize_gaussian_integration, plot_meshes
from examples.python.util.mesh_util import ALL_MESHES, ALL_MESHES_ROTATIONS
def main():
EXAMPLE_INDEX = 1
kwargs_base = dict(level=4)
kwargs_s2 = dict(**kwargs_base)
kwargs_opt_integrate = dict(num_nbr=12)
query_max_phi = 175
# Get an Example Mesh
ga_cpp_s2 = GaussianAccumulatorS2Beta(**kwargs_s2)
example_mesh = o3d.io.read_triangle_mesh(str(ALL_MESHES[EXAMPLE_INDEX]))
r = ALL_MESHES_ROTATIONS[EXAMPLE_INDEX]
example_mesh_filtered = example_mesh
if r is not None:
example_mesh_filtered = example_mesh_filtered.rotate(r.as_matrix())
example_mesh_filtered = example_mesh_filtered.filter_smooth_laplacian(5)
example_mesh_filtered.compute_triangle_normals()
# np.save('fixtures/normals/basement.npy', np.asarray(example_mesh_filtered.triangle_normals))
colored_icosahedron_s2, normals, neighbors_s2 = visualize_gaussian_integration(
ga_cpp_s2, example_mesh_filtered, max_phi=query_max_phi, integrate_kwargs=kwargs_opt_integrate)
o3d.visualization.draw_geometries([example_mesh_filtered])
o3d.visualization.draw_geometries([colored_icosahedron_s2])
# Visualize unwrapping
ico_chart_ = IcoCharts(kwargs_base['level'])
t2 = time.perf_counter()
normalized_bucket_counts_by_vertex = ga_cpp_s2.get_normalized_bucket_counts_by_vertex(True)
ico_chart_.fill_image(normalized_bucket_counts_by_vertex)
average_bucket_normals = np.asarray(ga_cpp_s2.get_bucket_average_normals(True))
pcd = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(average_bucket_normals))
pcd.paint_uniform_color([1, 0, 0])
average_vertex_normals = np.asarray(ga_cpp_s2.get_average_normals_by_vertex(True))
find_peaks_kwargs=dict(threshold_abs=50,min_distance=1, exclude_border=False, indices=False)
print(np.asarray(ico_chart_.image).shape)
cluster_kwargs=dict(t =0.1,criterion ='distance')
_, _, avg_peaks, avg_weights = find_peaks_from_ico_charts(ico_chart_, np.asarray(normalized_bucket_counts_by_vertex), vertices=average_vertex_normals, find_peaks_kwargs=find_peaks_kwargs, cluster_kwargs=cluster_kwargs)
t3 = time.perf_counter()
print(t3 -t2)
print(avg_peaks)
# import ipdb; ipdb.set_trace()
arrow_avg_peaks = get_arrow_normals(avg_peaks, avg_weights)
wireframe = o3d.geometry.LineSet.create_from_triangle_mesh(colored_icosahedron_s2)
o3d.visualization.draw_geometries([colored_icosahedron_s2, *arrow_avg_peaks, wireframe])
# o3d.visualization.draw_geometries([colored_icosahedron_s2, *arrow_avg_peaks, pcd])
full_image = np.asarray(ico_chart_.image)
plt.imshow(full_image)
plt.axis('off')
# plt.xticks(np.arange(0, full_image.shape[1], step=1))
# plt.yticks(np.arange(0, full_image.shape[0], step=1))
plt.show()
if __name__ == "__main__":
main()
"""Mesh
{
"class_name" : "ViewTrajectory",
"interval" : 29,
"is_loop" : false,
"trajectory" :
[
{
"boundingbox_max" : [ 1.8764505760969685, 3.0280973667097442, 3.045776668203259 ],
"boundingbox_min" : [ -2.2365574934452548, -3.6804227036671078, 0.51828136237409295 ],
"field_of_view" : 60.0,
"front" : [ -0.43966986583569911, 0.57136927624194478, 0.69298453030552898 ],
"lookat" : [ 0.30001921841467899, -0.99779994278506134, 1.5071575255263165 ],
"up" : [ 0.44135525764305411, -0.53453483690843095, 0.72074825333268089 ],
"zoom" : 0.31999999999999978
}
],
"version_major" : 1,
"version_minor" : 0
}
"""
"""
{
"class_name" : "ViewTrajectory",
"interval" : 29,
"is_loop" : false,
"trajectory" :
[
{
"boundingbox_max" : [ 1.1339119391275889, 1.1343327326857235, 1.1998729449684717 ],
"boundingbox_min" : [ -1.1353148374296551, -1.0, -1.1999606130137823 ],
"field_of_view" : 60.0,
"front" : [ -0.59564118276660283, 0.48513744010499366, 0.6401978175538996 ],
"lookat" :
[
-0.00070144915103309557,
0.067166366342861772,
-4.3834022655286908e-05
],
"up" : [ 0.47207151576167344, -0.43341779039025202, 0.76765715197587236 ],
"zoom" : 0.69999999999999996
}
],
"version_major" : 1,
"version_minor" : 0
}
"""
```
#### File: examples/python/run_ga.py
```python
import numpy as np
import time
from fastgac import GaussianAccumulatorKD, MatX3d
np.set_printoptions(suppress=True, precision=2)
def main():
normals = np.array([
[-1, 0, 0],
[0, 0, 1],
[1, 0, 0],
[0, 1, 0],
[0, -1, 0]
])
t0 = time.perf_counter()
ga = GaussianAccumulatorKD(level=0, max_phi=180)
t1 = time.perf_counter()
print("Number of Buckets: {}\n".format(len(ga.buckets)))
print("Bucket representations:\n {}\n".format(ga.buckets))
print("Bucket Cell Surface Normals: \n {}\n".format(np.asarray(ga.get_bucket_normals())))
normals_mat = MatX3d(normals) # need to convert a format we understand
t2 = time.perf_counter()
bucket_indexes = ga.integrate(normals_mat)
t3 = time.perf_counter()
print("These normals: \n {} \n are most similar to these cell normlas: \n {} \n".format(normals, np.asarray(ga.get_bucket_normals())[bucket_indexes,:]))
print(np.asarray(bucket_indexes))
print("Building Index Took (ms): {}; Query Took (ms): {}".format((t1-t0) * 1000, (t3 - t2)* 1000))
print("Change the level see a better approximation")
if __name__ == "__main__":
main()
```
#### File: python/util/mesh_util.py
```python
from os import path, listdir
import numpy as np
import open3d as o3d
from scipy.spatial.transform import Rotation as R
DIR_NAME = path.dirname(__file__)
FIXTURES_DIR = path.join(DIR_NAME, '../../..', 'fixtures')
MESHES_DIR = path.join(FIXTURES_DIR, 'meshes')
DENSE_MESH = path.join(MESHES_DIR, 'dense_first_floor_map_smoothed.ply')
BASEMENT_CHAIR = path.join(MESHES_DIR, 'basement_chair_5cm.ply')
ALL_MESHES = [DENSE_MESH, BASEMENT_CHAIR]
ALL_MESHES_ROTATIONS = [None, None]
def get_mesh_data_iterator():
for i, (mesh_fpath, r) in enumerate(zip(ALL_MESHES, ALL_MESHES_ROTATIONS)):
example_mesh = o3d.io.read_triangle_mesh(str(mesh_fpath))
if r is not None:
example_mesh = example_mesh.rotate(r.as_matrix())
example_mesh_filtered = example_mesh
example_mesh_filtered.compute_triangle_normals()
yield example_mesh_filtered
def main():
for i, mesh in enumerate(get_mesh_data_iterator()):
if i < 1:
continue
colors = np.asarray(mesh.vertex_colors)
colors2 = np.column_stack((colors[:, 2], colors[:, 1], colors[:, 0]))
mesh.vertex_colors = o3d.utility.Vector3dVector(colors2)
o3d.io.write_triangle_mesh('test.ply', mesh)
if __name__ == "__main__":
main()
```
#### File: Python/slowga/peak_and_cluster.py
```python
import time
import numpy as np
from scipy.signal import find_peaks
from scipy.cluster.hierarchy import linkage, fcluster
from .helper import normalized
def find_peaks_from_accumulator(gaussian_normals_sorted, accumulator_normalized_sorted,
find_peaks_kwargs=dict(height=0.05, threshold=None, distance=4, width=None, prominence=0.07),
cluster_kwargs=dict(t=0.15, criterion='distance')):
t0 = time.perf_counter()
peaks, _ = find_peaks(accumulator_normalized_sorted, **find_peaks_kwargs)
t1 = time.perf_counter()
gaussian_normal_1d_clusters = gaussian_normals_sorted[peaks,:]
Z = linkage(gaussian_normal_1d_clusters, 'single')
clusters = fcluster(Z, **cluster_kwargs)
t2 = time.perf_counter()
weights_1d_clusters = accumulator_normalized_sorted[peaks]
average_peaks, average_weights = average_clusters(gaussian_normal_1d_clusters, weights_1d_clusters, clusters)
print("Peak Detection - Find Peaks Execution Time (ms): {:.1f}; Hierarchical Clustering Execution Time (ms): {:.1f}".format((t1-t0) * 1000, (t2-t1) * 1000))
return peaks, clusters, average_peaks, average_weights
def get_point_clusters(points, point_weights, clusters):
point_clusters = []
cluster_groups = np.unique(clusters)
for cluster in cluster_groups:
temp_mask = clusters == cluster
point_clusters.append((points[temp_mask, :], point_weights[temp_mask]))
return point_clusters
def average_clusters(peaks, peak_weights, clusters, average_filter=dict(min_total_weight=0.2)):
cluster_points = get_point_clusters(peaks, peak_weights, clusters)
clusters_averaged = []
clusters_total_weight = []
for points, point_weights in cluster_points:
total_weight = np.sum(point_weights)
avg_point = np.average(points, axis=0, weights=point_weights)
if total_weight < average_filter['min_total_weight']:
continue
clusters_averaged.append(avg_point)
clusters_total_weight.append(total_weight)
normals = np.array(clusters_averaged)
normals, _ = normalized(normals)
return normals, np.array(clusters_total_weight)
```
|
{
"source": "JeremyBYU/img-filter",
"score": 3
}
|
#### File: img-filter/scripts/view_image.py
```python
import argparse
from PIL import Image, ImageOps
import numpy as np
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file')
args = parser.parse_args()
if args.file is None:
raise ValueError("Missing File Arguments")
return args
def scale_depth(image, max_depth=3.0, depth_scale=10000):
"""Scale Z16 image to Float between 0-1
Arguments:
image {PIL.Image} -- Pillow Image
Keyword Arguments:
max_depth {float} -- Maximum Depth (default: {4.0})
"""
data = np.asarray(image)
scale_factor = (max_depth / 10.0) * 10000
data1 = ((data / scale_factor) * 255).astype(np.uint8)
scaled_image = Image.fromarray(data1, mode='L')
color_image = ImageOps.colorize(scaled_image, 'blue', 'red')
return color_image
def show_image(fpath):
with open(fpath, 'rb') as f:
image_bytes = f.read()
image = Image.frombytes("I;16", (848, 480), image_bytes, 'raw')
color_image = scale_depth(image)
color_image.show()
def main():
args = parse_args()
print(args)
show_image(args.file)
if __name__ == "__main__":
main()
```
|
{
"source": "JeremyBYU/polylidar",
"score": 3
}
|
#### File: examples/python/basic2d.py
```python
import time
import numpy as np
from polylidar import MatrixDouble, Polylidar3D
from polylidar.polylidarutil import (generate_test_points, plot_triangles, get_estimated_lmax,
plot_triangle_meshes, get_triangles_from_list, get_colored_planar_segments, plot_polygons)
import matplotlib.pyplot as plt
np.random.seed(1)
def main():
kwargs = dict(num_groups=2, group_size=1000, dist=100.0, seed=1)
# generate 2 random normally distributed clusters of points, 200 X 2 numpy array.
points = generate_test_points(**kwargs)
lmax = get_estimated_lmax(**kwargs)
polylidar_kwargs = dict(alpha=0.0, lmax=lmax, min_triangles=5)
# Convert points to matrix format (no copy) and make Polylidar3D Object
points_mat = MatrixDouble(points, copy=False)
polylidar = Polylidar3D(**polylidar_kwargs)
# Extract the mesh, planes, polygons, and time
t1 = time.perf_counter()
mesh, planes, polygons = polylidar.extract_planes_and_polygons(points_mat)
t2 = time.perf_counter()
print("Took {:.2f} milliseconds".format((t2 - t1) * 1000))
# Convert to numpy format, no copy with np.asarray()
triangles = np.asarray(mesh.triangles)
fig, ax = plt.subplots(figsize=(10, 10), nrows=1, ncols=1)
# plot points
ax.scatter(points[:, 0], points[:, 1], c='k')
# plot all triangles
# plt.triplot(points[:,0], points[:,1], triangles) # better alternative
plot_triangles(get_triangles_from_list(triangles, points), ax)
# plot seperated planar triangular segments
triangle_meshes = get_colored_planar_segments(planes, triangles, points)
plot_triangle_meshes(triangle_meshes, ax)
# plot polygons
plot_polygons(polygons, points, ax)
plt.axis('equal')
plt.show()
if __name__ == "__main__":
main()
```
#### File: examples/python/bilateral.py
```python
import time
import logging
import warnings
import numpy as np
from scipy.spatial.transform import Rotation as R
import open3d as o3d
from examples.python.util.mesh_util import get_mesh_data_iterator
from polylidar import bilateral_filter_normals
from polylidar.polylidarutil.open3d_util import open_3d_mesh_to_trimesh
def main():
for i, mesh in enumerate(get_mesh_data_iterator()):
if i < 0:
continue
mesh.compute_vertex_normals()
mesh.compute_triangle_normals()
print("Before")
o3d.visualization.draw_geometries([mesh])
tri_mesh = open_3d_mesh_to_trimesh(mesh)
t1 = time.perf_counter()
bilateral_filter_normals(tri_mesh, iterations=20, sigma_length=0.1, sigma_angle=0.1)
t2 = time.perf_counter()
print(t2-t1)
normals_smooth = np.asarray(tri_mesh.triangle_normals)
mesh.triangle_normals = o3d.utility.Vector3dVector(normals_smooth)
print("After")
o3d.visualization.draw_geometries([mesh])
if __name__ == "__main__":
main()
```
#### File: python/for_paper/polygon_example_research_statement.py
```python
import time
import logging
import warnings
import numpy as np
from copy import deepcopy
from scipy.spatial.transform import Rotation as R
import matplotlib.pyplot as plt
warnings.filterwarnings("ignore", message="Optimal rotation is not uniquely or poorly defined ")
np.set_printoptions(precision=4, suppress=True)
from examples.python.util.realsense_util import (get_realsense_data, get_frame_data, R_Standard_d400, prep_mesh,
create_open3d_pc, extract_mesh_planes, COLOR_PALETTE, create_open_3d_mesh)
from examples.python.util.mesh_util import get_mesh_data_iterator
from examples.python.util.helper_polylidar import extract_all_dominant_plane_normals
from polylidar import (Polylidar3D, MatrixDouble, MatrixFloat, MatrixInt,
create_tri_mesh_copy, bilateral_filter_normals)
from polylidar.polylidarutil.open3d_util import construct_grid, create_lines, flatten
from polylidar.polylidarutil.plane_filtering import filter_planes_and_holes
from fastga import GaussianAccumulatorS2Beta, MatX3d, IcoCharts
from fastga.peak_and_cluster import find_peaks_from_accumulator
import open3d as o3d
def filter_and_create_open3d_polygons(points, polygons, rm=None, line_radius=0.005):
" Apply polygon filtering algorithm, return Open3D Mesh Lines "
config_pp = dict(filter=dict(hole_area=dict(min=0.01, max=100.0), hole_vertices=dict(min=4), plane_area=dict(min=0.05)),
positive_buffer=0.000, negative_buffer=0.01, simplify=0.01)
# config_pp = dict(filter=dict(hole_area=dict(min=0.00, max=100.0), hole_vertices=dict(min=3), plane_area=dict(min=0.05)),
# positive_buffer=0.00, negative_buffer=0.0, simplify=0.01)
t1 = time.perf_counter()
planes, obstacles = filter_planes_and_holes(polygons, points, config_pp, rm=rm)
t2 = time.perf_counter()
logging.info("Plane Filtering Took (ms): %.2f", (t2 - t1) * 1000)
all_poly_lines = create_lines(planes, obstacles, line_radius=line_radius)
return all_poly_lines, (t2 - t1) * 1000
def open_3d_mesh_to_trimesh(mesh: o3d.geometry.TriangleMesh):
triangles = np.asarray(mesh.triangles)
vertices = np.asarray(mesh.vertices)
triangles = np.ascontiguousarray(triangles)
vertices_mat = MatrixDouble(vertices)
triangles_mat = MatrixInt(triangles)
tri_mesh = create_tri_mesh_copy(vertices_mat, triangles_mat)
return tri_mesh
def extract_all_dominant_planes(tri_mesh, vertices, polylidar_kwargs, ds=50, min_samples=10000):
ga = GaussianAccumulatorS2Beta(level=4)
ico = IcoCharts(level=4)
fast_ga_kwargs = dict(find_peaks_kwargs=dict(threshold_abs=15, min_distance=1, exclude_border=False, indices=False),
cluster_kwargs=dict(t=0.28, criterion='distance'),
average_filter=dict(min_total_weight=0.1))
avg_peaks, _, _, _, alg_timings = extract_all_dominant_plane_normals(
tri_mesh, ga_=ga, ico_chart_=ico, **fast_ga_kwargs)
logging.info("Dominant Plane Normals")
print(avg_peaks)
avg_peaks_selected = np.copy(avg_peaks[[0, 1, 2, 3, 4], :])
pl = Polylidar3D(**polylidar_kwargs)
avg_peaks_mat = MatrixDouble(avg_peaks_selected)
tri_set = pl.extract_tri_set(tri_mesh, avg_peaks_mat)
t0 = time.perf_counter()
all_planes, all_polygons = pl.extract_planes_and_polygons_optimized(tri_mesh, avg_peaks_mat)
t1 = time.perf_counter()
polylidar_time = (t1 - t0) * 1000
all_poly_lines = []
for i in range(avg_peaks_selected.shape[0]):
avg_peak = avg_peaks[i, :]
rm, _ = R.align_vectors([[0, 0, 1]], [avg_peak])
polygons_for_normal = all_polygons[i]
# print(polygons_for_normal)
if len(polygons_for_normal) > 0:
poly_lines, _ = filter_and_create_open3d_polygons(vertices, polygons_for_normal, rm=rm)
all_poly_lines.extend(poly_lines)
return all_planes, tri_set, all_poly_lines, polylidar_time
def split_triangles(mesh):
"""
Split the mesh in independent triangles
"""
triangles = np.asarray(mesh.triangles).copy()
vertices = np.asarray(mesh.vertices).copy()
triangles_3 = np.zeros_like(triangles)
vertices_3 = np.zeros((len(triangles) * 3, 3), dtype=vertices.dtype)
for index_triangle, t in enumerate(triangles):
index_vertex = index_triangle * 3
vertices_3[index_vertex] = vertices[t[0]]
vertices_3[index_vertex + 1] = vertices[t[1]]
vertices_3[index_vertex + 2] = vertices[t[2]]
triangles_3[index_triangle] = np.arange(index_vertex, index_vertex + 3)
mesh_return = deepcopy(mesh)
mesh_return.triangles = o3d.utility.Vector3iVector(triangles_3)
mesh_return.vertices = o3d.utility.Vector3dVector(vertices_3)
mesh_return.triangle_normals = mesh.triangle_normals
mesh_return.paint_uniform_color([0.5, 0.5, 0.5])
return mesh_return
def assign_some_vertex_colors(mesh, triangle_indices, triangle_colors, mask=None):
"""Assigns vertex colors by given normal colors
NOTE: New mesh is returned
Arguments:
mesh {o3d:TriangleMesh} -- Mesh
normal_colors {ndarray} -- Normals Colors
Returns:
o3d:TriangleMesh -- New Mesh with painted colors
"""
split_mesh = split_triangles(mesh)
vertex_colors = np.asarray(split_mesh.vertex_colors)
triangles = np.asarray(split_mesh.triangles)
if mask is not None:
triangles = triangles[mask, :]
if isinstance(triangle_indices, list):
for triangle_set, color in zip(triangle_indices, triangle_colors):
triangle_set = np.asarray(triangle_set)
for i in range(np.asarray(triangle_set).shape[0]):
# import ipdb; ipdb.set_trace()
t_idx = triangle_set[i]
p_idx = triangles[t_idx, :]
vertex_colors[p_idx] = color
else:
for i in range(triangle_indices.shape[0]):
# import ipdb; ipdb.set_trace()
t_idx = triangle_indices[i]
color = triangle_colors[i, :]
p_idx = triangles[t_idx, :]
vertex_colors[p_idx] = color
if not split_mesh.has_triangle_normals():
split_mesh.compute_triangle_normals()
split_mesh.compute_vertex_normals()
return split_mesh
def paint_planes(o3d_mesh, planes):
# colors = np.arange(0, 0+ len(planes))
colors = [0, 3]
all_colors = plt.cm.get_cmap('tab10')(colors)[:, :3]
# planes_list = [np.copy(plane) for plane in planes]
# planes_list = np.
new_mesh = assign_some_vertex_colors(o3d_mesh, planes, all_colors)
return new_mesh
def run_test(mesh, callback=None, stride=2):
# Create Pseudo 3D Surface Mesh using Delaunay Triangulation and Polylidar
polylidar_kwargs = dict(alpha=0.0, lmax=0.15, min_triangles=100,
z_thresh=0.20, norm_thresh=0.95, norm_thresh_min=0.90, min_hole_vertices=6)
# Create Polylidar TriMesh
tri_mesh = open_3d_mesh_to_trimesh(mesh)
# bilateral_filter_normals(tri_mesh, 3, 0.1, 0.1)
vertices = np.asarray(tri_mesh.vertices)
normals_smooth = np.asarray(tri_mesh.triangle_normals)
mesh.triangle_normals = o3d.utility.Vector3dVector(normals_smooth)
o3d.visualization.draw_geometries([mesh], width=600, height=500)
planes, tri_set, all_poly_lines, polylidar_time = extract_all_dominant_planes(tri_mesh, vertices, polylidar_kwargs)
time_polylidar3D = polylidar_time
polylidar_3d_alg_name = 'Polylidar3D with Provided Mesh'
# planes_tri_set = [np.argwhere(np.asarray(tri_set) == i) for i in range(1, 3)]
# # import ipdb; ipdb.set_trace()
# mesh_tri_set = paint_planes(mesh, planes_tri_set)
# callback(polylidar_3d_alg_name, time_polylidar3D, mesh_tri_set)
# mesh_segment = paint_planes(mesh, planes)
# callback(polylidar_3d_alg_name, time_polylidar3D, mesh_segment)
mesh_3d_polylidar = []
mesh_3d_polylidar.extend(flatten([line_mesh.cylinder_segments for line_mesh in all_poly_lines]))
mesh_3d_polylidar.append(mesh)
callback(polylidar_3d_alg_name, time_polylidar3D, mesh_3d_polylidar)
def callback(alg_name, execution_time, mesh=None):
axis_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.2)
axis_frame.translate([0, 0.8, -0.7])
grid_ls = construct_grid(size=2, n=20, plane_offset=-0.8, translate=[0, 1.0, 0.0])
logging.info("%s took (ms): %.2f", alg_name, execution_time)
if mesh:
if isinstance(mesh, list):
o3d.visualization.draw_geometries(
[*mesh, axis_frame], width=600, height=500)
else:
o3d.visualization.draw_geometries([mesh, axis_frame], width=600, height=500)
def main():
axis_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.2)
axis_frame.translate([0, 0.8, -1.0])
grid_ls = construct_grid(size=2, n=20, plane_offset=-1.0, translate=[0, 0.0, 0.0])
mesh = o3d.io.read_triangle_mesh('fixtures/meshes/Table_edit.ply')
mesh.compute_vertex_normals()
run_test(mesh, callback=callback, stride=2)
if __name__ == "__main__":
main()
"""
{
"class_name" : "ViewTrajectory",
"interval" : 29,
"is_loop" : false,
"trajectory" :
[
{
"boundingbox_max" : [ 4.0542712211608887, 1.2899999618530273, 2.809999942779541 ],
"boundingbox_min" : [ -2.2899999618530273, -1.0299999713897705, -2.5627658367156982 ],
"field_of_view" : 60.0,
"front" : [ 0.061353428751916628, -0.93672755075531344, 0.34464075852448922 ],
"lookat" : [ 0.232231386497287, 0.6505503162493752, 0.45416176227377059 ],
"up" : [ -0.006158775652966561, -0.34563996918805912, -0.93834699401774302 ],
"zoom" : 0.25999999999999956
}
],
"version_major" : 1,
"version_minor" : 0
}
"""
```
#### File: examples/python/realsense_mesh.py
```python
import time
import logging
import sys
import numpy as np
from examples.python.util.realsense_util import (get_realsense_data, get_frame_data, R_Standard_d400, prep_mesh,
create_open3d_pc, extract_mesh_planes, COLOR_PALETTE)
from polylidar import (Polylidar3D, MatrixDouble, MatrixFloat, extract_tri_mesh_from_float_depth,
extract_point_cloud_from_float_depth)
from polylidar.polylidarutil.open3d_util import construct_grid, create_lines, flatten, create_open_3d_mesh_from_tri_mesh
from polylidar.polylidarutil.plane_filtering import filter_planes_and_holes
from polylidar.polylidarutil.line_mesh import o3d_major_version
import open3d as o3d
def filter_and_create_open3d_polygons(points, polygons):
" Apply polygon filtering algorithm, return Open3D Mesh Lines "
config_pp = dict(filter=dict(hole_area=dict(min=0.025, max=100.0), hole_vertices=dict(min=6), plane_area=dict(min=0.5)),
positive_buffer=0.00, negative_buffer=0.02, simplify=0.01)
planes, obstacles = filter_planes_and_holes(polygons, points, config_pp)
all_poly_lines = create_lines(planes, obstacles, line_radius=0.01)
return all_poly_lines
def run_test(pcd, rgbd, intrinsics, extrinsics, bp_alg=dict(radii=[0.02, 0.02]), poisson=dict(depth=8), callback=None, stride=2):
"""Demonstrate Polygon Extraction on both unorganized and organized point cloud
Args:
pcd (o3d.geometry.PointCloud): Open3D point cloud
rgbd (np.ndarray): MXN Numpy array
intrinsics (np.ndarray): 3X3 numpy array of camera intrinsics (assume pin hole model)
extrinsics (np.ndarray): 4X4 numpy array of extrinsics of camera
bp_alg (dict, optional): Arguments to Open3D ball pivot alg. Defaults to dict(radii=[0.02, 0.02]).
poisson (dict, optional): Arguments to Open3D Poisson surface reconstruction. Defaults to dict(depth=8).
callback (function, optional): Callback function for visualization. Defaults to None.
stride (int, optional): Skip rows/columns in rgbd. Defaults to 2.
"""
points = np.asarray(pcd.points)
polylidar_kwargs = dict(alpha=0.0, lmax=0.10, min_triangles=100,
z_thresh=0.04, norm_thresh=0.90, norm_thresh_min=0.90, min_hole_vertices=6)
pl = Polylidar3D(**polylidar_kwargs)
################################################################################
##### Treat data as an unorganized point clouds
##### Create Surface Mesh using 2.5 Delaunay Triangulation and extract Polygons
################################################################################
points_mat = MatrixDouble(points)
t1 = time.perf_counter()
mesh, planes, polygons = pl.extract_planes_and_polygons(points_mat)
t2 = time.perf_counter()
# Visualization of mesh and polygons
all_poly_lines = filter_and_create_open3d_polygons(points, polygons)
triangles = np.asarray(mesh.triangles)
mesh_2d_polylidar = extract_mesh_planes(points, triangles, planes, mesh.counter_clock_wise, COLOR_PALETTE[0])
mesh_2d_polylidar.extend(flatten([line_mesh.cylinder_segments for line_mesh in all_poly_lines]))
time_mesh_2d_polylidar = (t2 - t1) * 1000
polylidar_alg_name = 'Treated as **Unorganized** Point Cloud - 2.5D Delaunay Triangulation with Polygon Extraction'
callback(polylidar_alg_name, time_mesh_2d_polylidar, pcd, mesh_2d_polylidar)
################################################################################
###### Treat data as an **Organized** 3D Point Cloud #########
###### Creates a true 3D mesh and is much master using organized structure of point cloud
################################################################################
tri_mesh, t_mesh_creation = make_uniform_grid_mesh(np.asarray(
rgbd.depth), np.ascontiguousarray(intrinsics.intrinsic_matrix), extrinsics, stride=stride)
# Visualization of only the mesh
tri_mesh_o3d = create_open_3d_mesh_from_tri_mesh(tri_mesh)
uniform_alg_name = 'Treated as **Organized** Point Cloud - Right-Cut Triangulation/Uniform Mesh (Mesh only)'
callback(uniform_alg_name, t_mesh_creation, pcd, tri_mesh_o3d)
# Exctact Polygons with Polylidar3D using the Uniform Mesh. Dominant Plane normal is 0,0,1.
t1 = time.perf_counter()
planes, polygons = pl.extract_planes_and_polygons(tri_mesh)
t2 = time.perf_counter()
# Visualization of mesh and polygons
vertices_np = np.asarray(tri_mesh.vertices)
triangles_np = np.asarray(tri_mesh.triangles)
all_poly_lines = filter_and_create_open3d_polygons(vertices_np, polygons)
mesh_3d_polylidar = extract_mesh_planes(vertices_np, triangles_np, planes, tri_mesh.counter_clock_wise)
mesh_3d_polylidar.extend(flatten([line_mesh.cylinder_segments for line_mesh in all_poly_lines]))
time_polylidar3D = (t2 - t1) * 1000
polylidar_3d_alg_name = 'Polygon Extraction on Uniform Mesh (only one dominant plane normal)'
callback(polylidar_3d_alg_name, time_polylidar3D,
create_open3d_pc(vertices_np), mesh_3d_polylidar)
##### Uncomment if you are interested in other mesh creation techniques #####
# # Estimate Point Cloud Normals
# t3 = time.perf_counter()
# pcd.estimate_normals(
# search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.10, max_nn=20))
# t4 = time.perf_counter()
# time_estimate_point_normals = (t4 - t3) * 1000
# point_normal_alg_name = 'Point Normal Estimation'
# callback(point_normal_alg_name, time_estimate_point_normals, pcd, None)
# # Create True 3D Surface Mesh using Ball Pivot Algorithm
# radii = o3d.utility.DoubleVector(bp_alg['radii'])
# t5 = time.perf_counter()
# mesh_ball_pivot = o3d.geometry.TriangleMesh.create_from_point_cloud_ball_pivoting(
# pcd, radii)
# prep_mesh(mesh_ball_pivot)
# t6 = time.perf_counter()
# time_mesh_ball_pivot = (t6 - t5) * 1000
# ball_point_alg_name = 'Ball Pivot'
# callback(ball_point_alg_name, time_mesh_ball_pivot, pcd, mesh_ball_pivot)
# # Create True 3D Surface Mesh using Poisson Reconstruction Algorithm
# t7 = time.perf_counter()
# mesh_poisson, densities = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(
# pcd, **poisson)
# vertices_to_remove = densities < np.quantile(densities, 0.1)
# mesh_poisson.remove_vertices_by_mask(vertices_to_remove)
# t8 = time.perf_counter()
# prep_mesh(mesh_poisson)
# time_mesh_poisson = (t8 - t7) * 1000
# poisson_alg_name = 'Poisson'
# callback(poisson_alg_name, time_mesh_poisson, pcd, mesh_poisson)
# results = [
# dict(alg=polylidar_alg_name, mesh=mesh_2d_polylidar,
# execution_time=time_mesh_2d_polylidar),
# dict(alg=point_normal_alg_name, mesh=None,
# execution_time=time_estimate_point_normals),
# dict(alg=ball_point_alg_name, mesh=mesh_ball_pivot,
# execution_time=time_mesh_ball_pivot),
# dict(alg=poisson_alg_name, mesh=mesh_poisson,
# execution_time=time_mesh_poisson)
# ]
# return results
def make_uniform_grid_mesh(im, intrinsics, extrinsics, stride=2, **kwargs):
"""Create a Unifrom Grid Mesh from an RGBD Image
Arguments:
img {ndarray} -- MXN Float Depth Image
intrinsics {ndarray} -- 3X3 intrinsics matrix
extrinsics {ndarray} -- 4X4 matrix
Keyword Arguments:
stride {int} -- Stride for creating point cloud (default: {2})
Returns:
tuple(dict, dict) - Mesh and timings
"""
t0 = time.perf_counter()
tri_mesh = extract_tri_mesh_from_float_depth(MatrixFloat(
im), MatrixDouble(intrinsics), MatrixDouble(extrinsics), stride=stride)
t1 = time.perf_counter()
return tri_mesh, (t1-t0) * 1000
def callback(alg_name, execution_time, pcd, mesh=None):
axis_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.2)
axis_frame.translate([0, 0.8, -0.7])
grid_ls = construct_grid(size=2, n=20, plane_offset=-1.0, translate=[0, 1.0, 0.0])
logging.info("%s took %.2f milliseconds", alg_name, execution_time)
if mesh:
if isinstance(mesh, list):
o3d.visualization.draw_geometries(
[*mesh, pcd, grid_ls, axis_frame])
else:
o3d.visualization.draw_geometries([mesh, pcd, grid_ls, axis_frame])
def main():
color_files, depth_files, traj, intrinsics = get_realsense_data()
axis_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.2)
axis_frame.translate([0, 0.8, -0.7])
grid_ls = construct_grid(size=2, n=20, plane_offset=-0.8, translate=[0, 1.0, 0.0])
for idx in range(len(color_files)):
if idx < 2:
continue
pcd, rgbd, extrinsics = get_frame_data(idx, color_files, depth_files, traj, intrinsics, stride=2)
center = pcd.get_center() if o3d_major_version > 9 else True
pcd = pcd.rotate(R_Standard_d400[:3, :3], center=center)
logging.info("File %r - Point Cloud; Size: %r", idx, np.asarray(pcd.points).shape[0])
o3d.visualization.draw_geometries([pcd, grid_ls, axis_frame])
results = run_test(pcd, rgbd, intrinsics, extrinsics, callback=callback, stride=2)
if __name__ == "__main__":
main()
```
#### File: python/util/helper_polylidar.py
```python
import time
import logging
from copy import deepcopy
import numpy as np
from scipy.spatial.transform import Rotation as R
import matplotlib.pyplot as plt
from polylidar.polylidarutil.plane_filtering import filter_planes_and_holes
from polylidar import MatrixDouble, Polylidar3D
from polylidar.polylidarutil.open3d_util import create_lines
from fastga import GaussianAccumulatorS2, MatX3d, IcoCharts
from fastga.peak_and_cluster import find_peaks_from_ico_charts
from fastga.o3d_util import get_arrow, get_pc_all_peaks, get_arrow_normals
import open3d as o3d
def down_sample_normals(triangle_normals, down_sample_fraction=0.12, min_samples=100, flip_normals=False, **kwargs):
num_normals = triangle_normals.shape[0]
to_sample = int(down_sample_fraction * num_normals)
to_sample = max(min([num_normals, min_samples]), to_sample)
ds_step = int(num_normals / to_sample)
triangle_normals_ds = np.ascontiguousarray(triangle_normals[:num_normals:ds_step, :])
if flip_normals:
triangle_normals_ds = triangle_normals_ds * -1.0
return triangle_normals_ds
def get_image_peaks(ico_chart, ga, level=2, with_o3d=False,
find_peaks_kwargs=dict(threshold_abs=2, min_distance=1, exclude_border=False, indices=False),
cluster_kwargs=dict(t=0.10, criterion='distance'),
average_filter=dict(min_total_weight=0.01),
**kwargs):
normalized_bucket_counts_by_vertex = ga.get_normalized_bucket_counts_by_vertex(True)
t1 = time.perf_counter()
ico_chart.fill_image(normalized_bucket_counts_by_vertex) # this takes microseconds
# plt.imshow(np.asarray(ico_chart.image))
# plt.show()
average_vertex_normals = np.asarray(ga.get_average_normals_by_vertex(
True)) if hasattr(ga, 'get_average_normals_by_vertex') else None
peaks, clusters, avg_peaks, avg_weights = find_peaks_from_ico_charts(ico_chart, np.asarray(
normalized_bucket_counts_by_vertex), average_vertex_normals, find_peaks_kwargs, cluster_kwargs, average_filter)
t2 = time.perf_counter()
gaussian_normals_sorted = np.asarray(ico_chart.sphere_mesh.vertices)
# Create Open3D structures for visualization
if with_o3d:
pcd_all_peaks = get_pc_all_peaks(peaks, clusters, gaussian_normals_sorted)
arrow_avg_peaks = get_arrow_normals(avg_peaks, avg_weights)
else:
pcd_all_peaks = None
arrow_avg_peaks = None
elapsed_time = (t2 - t1) * 1000
timings = dict(t_fastga_peak=elapsed_time)
logging.debug("Peak Detection - Took (ms): %.2f", (t2 - t1) * 1000)
return avg_peaks, pcd_all_peaks, arrow_avg_peaks, timings
def extract_all_dominant_plane_normals(tri_mesh, level=5, with_o3d=False, ga_=None, ico_chart_=None, **kwargs):
# Reuse objects if provided
if ga_ is not None:
ga = ga_
else:
ga = GaussianAccumulatorS2(level=level)
if ico_chart_ is not None:
ico_chart = ico_chart_
else:
ico_chart = IcoCharts(level=level)
triangle_normals = np.asarray(tri_mesh.triangle_normals)
triangle_normals_ds = down_sample_normals(triangle_normals, **kwargs)
# np.savetxt('bad_normals.txt', triangle_normals_ds)
triangle_normals_ds_mat = MatX3d(triangle_normals_ds)
t1 = time.perf_counter()
ga.integrate(triangle_normals_ds_mat)
t2 = time.perf_counter()
logging.debug("Gaussian Accumulator - Normals Sampled: %d; Took (ms): %.2f",
triangle_normals_ds.shape[0], (t2 - t1) * 1000)
avg_peaks, pcd_all_peaks, arrow_avg_peaks, timings_dict = get_image_peaks(
ico_chart, ga, level=level, with_o3d=with_o3d, **kwargs)
# Create Open3D structures for visualization
if with_o3d:
# Visualize the Sphere
accumulator_counts = np.asarray(ga.get_normalized_bucket_counts())
refined_icosahedron_mesh = create_open_3d_mesh(np.asarray(ga.mesh.triangles), np.asarray(ga.mesh.vertices))
color_counts = get_colors(accumulator_counts)[:, :3]
colored_icosahedron = assign_vertex_colors(refined_icosahedron_mesh, color_counts)
else:
colored_icosahedron = None
elapsed_time_fastga = (t2 - t1) * 1000
elapsed_time_peak = timings_dict['t_fastga_peak']
elapsed_time_total = elapsed_time_fastga + elapsed_time_peak
timings = dict(t_fastga_total=elapsed_time_total,
t_fastga_integrate=elapsed_time_fastga, t_fastga_peak=elapsed_time_peak)
ga.clear_count()
return avg_peaks, pcd_all_peaks, arrow_avg_peaks, colored_icosahedron, timings
def filter_and_create_polygons(points, polygons, rm=None, line_radius=0.005,
postprocess=dict(filter=dict(hole_area=dict(min=0.025, max=100.0), hole_vertices=dict(min=6), plane_area=dict(min=0.05)),
positive_buffer=0.00, negative_buffer=0.00, simplify=0.0)):
" Apply polygon filtering algorithm, return Open3D Mesh Lines "
t1 = time.perf_counter()
planes, obstacles = filter_planes_and_holes(polygons, points, postprocess, rm=rm)
t2 = time.perf_counter()
return planes, obstacles, (t2 - t1) * 1000
def extract_planes_and_polygons_from_mesh(tri_mesh, avg_peaks,
polylidar_kwargs=dict(alpha=0.0, lmax=0.1, min_triangles=2000,
z_thresh=0.1, norm_thresh=0.95, norm_thresh_min=0.95, min_hole_vertices=50, task_threads=4),
filter_polygons=True, pl_=None, optimized=False,
postprocess=dict(filter=dict(hole_area=dict(min=0.025, max=100.0), hole_vertices=dict(min=6), plane_area=dict(min=0.05)),
positive_buffer=0.00, negative_buffer=0.00, simplify=0.0)):
if pl_ is not None:
pl = pl_
else:
pl = Polylidar3D(**polylidar_kwargs)
avg_peaks_mat = MatrixDouble(avg_peaks)
t0 = time.perf_counter()
if optimized:
all_planes, all_polygons = pl.extract_planes_and_polygons_optimized(tri_mesh, avg_peaks_mat)
else:
all_planes, all_polygons = pl.extract_planes_and_polygons(tri_mesh, avg_peaks_mat)
t1 = time.perf_counter()
# tri_set = pl.extract_tri_set(tri_mesh, avg_peaks_mat)
# planes_tri_set = [np.argwhere(np.asarray(tri_set) == i) for i in range(1, 2)]
# o3d_mesh_painted = paint_planes(o3d_mesh, planes_tri_set)
polylidar_time = (t1 - t0) * 1000
# logging.info("Polygon Extraction - Took (ms): %.2f", polylidar_time)
all_planes_shapely = []
all_obstacles_shapely = []
time_filter = []
# all_poly_lines = []
if filter_polygons:
vertices = np.asarray(tri_mesh.vertices)
for i in range(avg_peaks.shape[0]):
avg_peak = avg_peaks[i, :]
rm, _ = R.align_vectors([[0, 0, 1]], [avg_peak])
polygons_for_normal = all_polygons[i]
# print(polygons_for_normal)
if len(polygons_for_normal) > 0:
planes_shapely, obstacles_shapely, filter_time = filter_and_create_polygons(
vertices, polygons_for_normal, rm=rm, postprocess=postprocess)
all_planes_shapely.extend(planes_shapely)
all_obstacles_shapely.extend(obstacles_shapely)
time_filter.append(filter_time)
# all_poly_lines.extend(poly_lines)
timings = dict(t_polylidar_planepoly=polylidar_time, t_polylidar_filter=np.array(time_filter).sum())
# all_planes_shapely, all_obstacles_shapely, all_poly_lines, timings
return all_planes_shapely, all_obstacles_shapely, timings
def extract_planes_and_polygons_from_classified_mesh(tri_mesh, avg_peaks,
polylidar_kwargs=dict(alpha=0.0, lmax=0.1, min_triangles=2000,
z_thresh=0.1, norm_thresh=0.95, norm_thresh_min=0.95, min_hole_vertices=50, task_threads=4),
filter_polygons=True, pl_=None, optimized=True,
postprocess=dict(filter=dict(hole_area=dict(min=0.025, max=100.0), hole_vertices=dict(min=6), plane_area=dict(min=0.05)),
positive_buffer=0.00, negative_buffer=0.00, simplify=0.0)):
if pl_ is not None:
pl = pl_
else:
pl = Polylidar3D(**polylidar_kwargs)
avg_peaks_mat = MatrixDouble(avg_peaks)
t0 = time.perf_counter()
all_planes, all_polygons = pl.extract_planes_and_polygons_optimized_classified(tri_mesh, avg_peaks_mat)
t1 = time.perf_counter()
polylidar_time = (t1 - t0) * 1000
all_planes_shapely = []
all_obstacles_shapely = []
time_filter = []
# all_poly_lines = []
if filter_polygons:
vertices = np.asarray(tri_mesh.vertices)
for i in range(avg_peaks.shape[0]):
avg_peak = avg_peaks[i, :]
rm, _ = R.align_vectors([[0, 0, 1]], [avg_peak])
polygons_for_normal = all_polygons[i]
# print(polygons_for_normal)
if len(polygons_for_normal) > 0:
planes_shapely, obstacles_shapely, filter_time = filter_and_create_polygons(
vertices, polygons_for_normal, rm=rm, postprocess=postprocess)
all_planes_shapely.extend(planes_shapely)
all_obstacles_shapely.extend(obstacles_shapely)
time_filter.append(filter_time)
# all_poly_lines.extend(poly_lines)
timings = dict(t_polylidar_planepoly=polylidar_time, t_polylidar_filter=np.array(time_filter).sum())
# all_planes_shapely, all_obstacles_shapely, all_poly_lines, timings
return all_planes_shapely, all_obstacles_shapely, timings
```
#### File: python/util/mesh_util.py
```python
import time
import math
import sys
from os import path, listdir
from os.path import exists, isfile, join, splitext
import re
import logging
import numpy as np
from polylidar.polylidarutil import COLOR_PALETTE
from polylidar.polylidarutil.line_mesh import o3d_major_version
import open3d as o3d
from scipy.spatial.transform import Rotation as R
DIR_NAME = path.dirname(__file__)
FIXTURES_DIR = path.join(DIR_NAME, '../../..', 'fixtures')
MESHES_DIR = path.join(FIXTURES_DIR, 'meshes')
DENSE_MESH = path.join(MESHES_DIR, 'dense_first_floor_map_smoothed.ply')
SPARSE_MESH = path.join(MESHES_DIR, 'sparse_basement.ply')
BASEMENT_CHAIR = path.join(MESHES_DIR, 'basement_chair_5cm.ply')
ALL_MESHES = [DENSE_MESH, SPARSE_MESH, BASEMENT_CHAIR]
# ALL_MESHES_ROTATIONS = [R.from_rotvec(-np.pi / 2 * np.array([1, 0, 0])),
# R.from_rotvec(-np.pi / 2 * np.array([1, 0, 0]))]
ALL_MESHES_ROTATIONS = [None, None, None]
def get_mesh_data_iterator():
for i, (mesh_fpath, r) in enumerate(zip(ALL_MESHES, ALL_MESHES_ROTATIONS)):
example_mesh = o3d.io.read_triangle_mesh(str(mesh_fpath))
if r is not None:
center = [0, 0, 0] if o3d_major_version > 9 else True
example_mesh = example_mesh.rotate(r.as_matrix(), center=center)
example_mesh_filtered = example_mesh
example_mesh_filtered.compute_triangle_normals()
yield example_mesh_filtered
def main():
for i, mesh in enumerate(get_mesh_data_iterator()):
if i < 1:
continue
colors = np.asarray(mesh.vertex_colors)
colors2 = np.column_stack((colors[:, 2], colors[:, 1], colors[:, 0]))
mesh.vertex_colors = o3d.utility.Vector3dVector(colors2)
o3d.io.write_triangle_mesh('test.ply', mesh)
if __name__ == "__main__":
main()
```
|
{
"source": "JeremyBYU/polylidar-kitti",
"score": 3
}
|
#### File: kittiground/grounddetector/__init__.py
```python
import sys
import math
import time
import logging
import numpy as np
from scipy import spatial
import cv2
from shapely.geometry import Polygon, JOIN_STYLE
from polylidar import MatrixDouble, Delaunator, bilateral_filter_normals
M2TOCM2 = 10000
CMTOM = 0.01
ORANGE = [249, 115, 6]
ORANGE_BGR = [6, 115, 249]
def axis_angle_rm(axis=np.array([1, 0, 0]), angle=-1.57):
"""
Create rotation matrix given an axis and angle
https://www.euclideanspace.com/maths/geometry/rotations/conversions/angleToMatrix/
"""
c = math.cos(angle)
s = math.sin(angle)
t = 1 - c
x, y, z = axis[0], axis[1], axis[2]
rotation_matrix = np.array(
[
[t*x*x + c, t*x*y - z*s, t*x*z + y*s],
[t*x*y + z*s, t*y*y + c, t*y*z - x*s],
[t*x*z - y*s, t*y*z + x*s, t*z*z + c]
])
return rotation_matrix
def rotate_points(points, rot):
"""
Rotate 3D points given a provided rotation matrix
"""
points_rot = points.transpose()
points_rot = rot @ points_rot
points_rot = points_rot.transpose()
# print(f"Rotation Took {(time.time() - t0) * 1000:.1f} ms")
return points_rot
def plot_points(image, points, color):
""" plot projected velodyne points into camera image """
hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
radius = 1
for i in range(points.shape[1]):
pt_2d = (points[0, i], points[1, i])
c = (color[i,:] * 255).astype(dtype=np.uint8).tolist()
cv2.circle(hsv_image, pt_2d, radius, c, -1)
return cv2.cvtColor(hsv_image, cv2.COLOR_RGB2BGR)
def align_vector_to_axis(points, vector=np.array([0, 0, 1]), axis=[0, 0, -1], ):
"""
Aligns z axis frame to chosen vector
"""
# Shortcut from computing cross product of -Z axis X vector
axis_ = np.cross(vector, np.array(axis))
axis_ = axis_ / np.linalg.norm(axis_)
angle = math.acos(-vector[2])
rm = axis_angle_rm(axis_, angle)
points_rot = rotate_points(points, rm)
return points_rot, rm
def get_points(point_idxs, points):
return points[point_idxs, :]
def create_kd_tree(shell_coords, hole_coords):
hole_coords.append(shell_coords)
all_vertices = np.vstack(hole_coords)
kd_tree = spatial.KDTree(all_vertices, leafsize=100)
return kd_tree
def add_column(array, z_value):
ones = np.ones((array.shape[0], 1)) * z_value
stacked = np.column_stack((array, ones))
return stacked
def recover_3d(poly, kd_tree, z_value):
shell_3D = add_column(np.array(poly.exterior), z_value)
# print(shell_3D.shape)
d, shell_idx = kd_tree.query(shell_3D)
# print(shell_idx.shape)
kd_data = kd_tree.data[shell_idx,:]
# print(kd_data.shape)
shell_3D[:, 2] = kd_data[:, 2]
holes_lr = []
for hole in poly.interiors:
hole_lr = add_column(np.array(hole), z_value)
d, shell_idx = kd_tree.query(hole_lr)
kd_data = kd_tree.data[shell_idx,:]
hole_lr[:, 2] = kd_data[:, 2]
holes_lr.append(hole_lr)
poly_3d = Polygon(shell=shell_3D, holes=holes_lr)
return poly_3d
# print(poly.exterior)
# print(poly_3d.exterior)
def get_polygon(points3D_cam, polylidar, postprocess_config):
"""Gets polygons from point cloud
Arguments:
points3D_cam {ndarray} -- Point cloud in Camera Frame
Returns:
(ndarray, ndarray, list[Polygons], list[Polygons], tuple(ints)) --
Rotated point cloud,
rotation matrix from camera frame to rotated frame
list of shapley polygons for planes and obstacles
a tuple of execution times
"""
t0 = time.perf_counter()
points3D_rot, rm = align_vector_to_axis(
points3D_cam, np.array([0, 1, 0]))
points3D_rot_ = np.ascontiguousarray(points3D_rot[:, :3])
logging.debug(
"Extracting Polygons from point cloud of size: %d", points3D_rot.shape[0])
t1 = time.perf_counter()
points_mat = MatrixDouble(points3D_rot_)
# We need to perform these steps manually if we are going to pass a mesh instead of just the points
# only necessary because I want the timings of just the frontend
mesh = Delaunator(points_mat)
mesh.triangulate()
mesh.compute_triangle_normals()
t1_2 = time.perf_counter()
# bilateral_filter_normals(mesh, 5, 0.25, 0.25)
t1_3 = time.perf_counter()
planes, polygons = polylidar.extract_planes_and_polygons(mesh)
t2 = time.perf_counter()
planes, obstacles = filter_planes_and_holes2(
polygons, points3D_rot_, postprocess_config)
logging.debug("Number of Planes: %d; Number of obstacles: %d",
len(planes), len(obstacles))
t3 = time.perf_counter()
t_rotation = (t1 - t0) * 1000
t_polylidar = (t2 - t1) * 1000
t_polylidar_mesh = (t1_2 - t1) * 1000
t_polylidar_bilateral = (t1_3 - t1_2) * 1000
t_polylidar_planepoly = (t2 - t1_3) * 1000
t_polyfilter = (t3 - t2) * 1000
times = dict(t_rotation=t_rotation, t_polylidar_all=t_polylidar, t_polyfilter=t_polyfilter, t_polylidar_mesh=t_polylidar_mesh, t_polylidar_bilateral=t_polylidar_bilateral, t_polylidar_planepoly=t_polylidar_planepoly)
return points3D_rot, rm, planes, obstacles, times
def filter_planes_and_holes2(polygons, points, config_pp):
"""Extracts the plane and obstacles returned from polylidar
Will filter polygons according to: number of vertices and size
Will also buffer (dilate) and simplify polygons
Arguments:
polygons {list[Polygons]} -- A list of polygons returned from polylidar
points {ndarray} -- MX3 array
config_pp {dict} -- Configuration for post processing filtering
Returns:
tuple -- A list of plane shapely polygons and a list of obstacle polygons
"""
# filtering configuration
post_filter = config_pp['filter']
# will hold the plane(s) and obstacles found
planes = []
obstacles = []
for poly in polygons:
# shell_coords = [get_point(pi, points) for pi in poly.shell]
shell_coords = get_points(poly.shell, points)
hole_coords = [get_points(hole, points) for hole in poly.holes]
poly_shape = Polygon(shell=shell_coords, holes=hole_coords)
area = poly_shape.area
# logging.info("Got a plane!")
if area < post_filter['plane_area']['min']:
# logging.info("Skipping Plane")
continue
z_value = shell_coords[0][2]
if config_pp['simplify']:
poly_shape = poly_shape.simplify(tolerance=config_pp['simplify'], preserve_topology=True)
# Perform 2D geometric operations
if config_pp['buffer'] or config_pp['positive_buffer']:
# poly_shape = poly_shape.buffer(-config_pp['buffer'], 1, join_style=JOIN_STYLE.mitre).buffer(config_pp['buffer'], 1, join_style=JOIN_STYLE.mitre)
poly_shape = poly_shape.buffer(config_pp['positive_buffer'], join_style=JOIN_STYLE.mitre, resolution=4)
poly_shape = poly_shape.buffer(distance=-config_pp['buffer'] * 3, resolution=4)
if poly_shape.geom_type == 'MultiPolygon':
all_poly_shapes = list(poly_shape.geoms)
poly_shape = sorted(all_poly_shapes, key=lambda geom: geom.area, reverse=True)[0]
poly_shape = poly_shape.buffer(distance=config_pp['buffer'] * 2, resolution=4)
if config_pp['simplify']:
poly_shape = poly_shape.simplify(tolerance=config_pp['simplify'], preserve_topology=False)
# Its possible that our polygon has no broken into a multipolygon
# Check for this situation and handle it
all_poly_shapes = [poly_shape]
# iteratre through every polygons and check for plane extraction
for poly_shape in all_poly_shapes:
area = poly_shape.area
# logging.info("Plane is big enough still")
if area >= post_filter['plane_area']['min']:
# logging.info("Plane is big enough still")
if config_pp['buffer'] or config_pp['simplify'] or config_pp['positive_buffer']:
# convert back to 3D coordinates
# create kd tree for vertex lookup after buffering operations
kd_tree = create_kd_tree(shell_coords, hole_coords)
poly_shape = recover_3d(poly_shape, kd_tree, z_value)
# Capture the polygon as well as its z height
new_plane_polygon = Polygon(shell=poly_shape.exterior)
planes.append((new_plane_polygon, z_value))
for hole_lr in poly_shape.interiors:
# Filter by number of obstacle vertices, removes noisy holes
if len(hole_lr.coords) > post_filter['hole_vertices']['min']:
hole_poly = Polygon(shell=hole_lr)
area = hole_poly.area
# filter by area
if area >= post_filter['hole_area']['min'] and area < post_filter['hole_area']['max']:
z_value = hole_lr.coords[0][2]
obstacles.append((hole_poly, z_value))
return planes, obstacles
def project_points(pts3D_cam_rect, proj_matrix, img_m, img_n):
pts2D_cam_rect = proj_matrix @ pts3D_cam_rect
# Remove pixels that are outside the image
pts2D_cam_rect[0, :] = pts2D_cam_rect[0, :] / pts2D_cam_rect[2, :]
pts2D_cam_rect[1, :] = pts2D_cam_rect[1, :] / pts2D_cam_rect[2, :]
idx = (pts2D_cam_rect[0, :] >= 0) & (pts2D_cam_rect[0, :] < img_n) & \
(pts2D_cam_rect[1, :] >= 0) & (pts2D_cam_rect[1, :] < img_m)
pts2D_cam_rect_filt = np.ascontiguousarray(
pts2D_cam_rect[:, idx].astype(np.int))
return pts2D_cam_rect_filt, idx
def get_pix_coordinates(pts, proj_mat, w, h):
"""Get Pixel coordinates of ndarray
Arguments:
pts {ndarray} -- 3D point clouds 3XN
proj_mat {ndarray} -- 4X3 Projection Matrix
w {int} -- width
h {int} -- height
Returns:
ndarray -- Pixel coordinates
"""
points_t = np.ones(shape=(4, pts.shape[1]))
points_t[:3, :] = pts
pixels, idx = project_points(points_t, proj_mat, h, w)
pixels = np.ascontiguousarray(pixels[:2, :])
logging.debug("Pixels Shape %r", pixels.shape)
return pixels
def plot_opencv_polys(polygons, color_image, proj_mat, rot_mat, w, h, color=(0, 255, 0), thickness=3):
for i, (poly, height) in enumerate(polygons):
# Get 2D polygons and assign z component the height value of extracted plane
pts = np.array(poly.exterior.coords) # NX2
# pts = np.column_stack((pts, np.ones((pts.shape[0])) * height)) # NX3
# Transform polylidar plane coordinate system (z-up) to original cordinate system of camera frame
pts = pts.transpose() # 3XN
pts = np.linalg.inv(rot_mat) @ pts
# Project coordinates to image space
pix_coords = get_pix_coordinates(pts, proj_mat, w, h).T
pix_coords = pix_coords.reshape((-1, 1, 2))
cv2.polylines(color_image, [pix_coords],
True, color, thickness=thickness)
return color_image
def plot_planes_and_obstacles(planes, obstacles, proj_mat, rot_mat, color_image, width, height, thickness=3):
"""Plots the planes and obstacles (3D polygons) into the color image
Arguments:
planes {list(Polygons)} -- List of Shapely Polygon with height tuples
obstacles {list[(polygon, height)]} -- List of tuples with polygon with height
proj_mat {ndarray} -- Projection Matrix
rot_mat {ndarray} -- Rotation Matrix
color_image {ndarray} -- Color Image
width {int} -- width of image
height {int} -- height of image
"""
color_image = plot_opencv_polys(
planes, color_image, proj_mat, rot_mat, width,
height, color=(0, 255, 0), thickness=thickness)
color_image = plot_opencv_polys(
obstacles, color_image, proj_mat, rot_mat, width,
height, color=ORANGE_BGR, thickness=thickness)
return color_image
```
|
{
"source": "JeremyBYU/polylidar-plane-benchmark",
"score": 2
}
|
#### File: polylidar_plane_benchmark/scripts/run_param.py
```python
from polylidar_plane_benchmark.scripts.train_core import evaluate_with_params_visualize
def main():
params = {'fname': 'pc_02.pcd', 'tcomp': 0.80, 'variance': 1, 'kernel_size': 5,
'loops_bilateral': 2, 'loops_laplacian': 4, 'sigma_angle': 0.1,
'norm_thresh_min': 0.95, 'min_triangles': 1000, 'stride':1, 'predict_loops_laplacian': False}
evaluate_with_params_visualize(params)
if __name__ == "__main__":
main()
```
#### File: polylidar_plane_benchmark/scripts/run_test.py
```python
from polylidar_plane_benchmark.scripts.train_core import evaluate_with_params
# optimal params for stride=1
# params = dict(kernel_size=5, loops_laplacian=2, loops_bilateral=2, sigma_angle=0.1,
# min_triangles=1000, norm_thresh_min=0.95, stride=1, predict_loops_laplacian=True)
# semi-optimal params for stride=2
# params = dict(kernel_size=3, loops_laplacian=2, loops_bilateral=1, sigma_angle=0.1,
# min_triangles=250, norm_thresh_min=0.95, stride=2, predict_loops_laplacian=True)
def main():
params = dict(kernel_size=5, loops_laplacian=2, loops_bilateral=2, sigma_angle=0.1,
min_triangles=1000, norm_thresh_min=0.95, stride=1, predict_loops_laplacian=True)
dataset = 'test'
print("Evaluating Variance 1")
evaluate_with_params([params], 0, 1, None, dataset)
print("Evaluating Variance 2")
evaluate_with_params([params], 0, 2, None, dataset)
print("Evaluating Variance 3")
evaluate_with_params([params], 0, 3, None, dataset)
print("Evaluating Variance 4")
evaluate_with_params([params], 0, 4, None, dataset)
if __name__ == "__main__":
main()
# Stride 1
# [4 rows x 16 columns]
# n_gt 42.491667
# n_ms_all 25.316667
# f_weighted_corr_seg 0.762061
# rmse 0.009151
# f_corr_seg 0.470380
# n_corr_seg 18.308333
# n_over_seg 0.266667
# n_under_seg 0.400000
# n_missed_seg 23.033333
# n_noise_seg 6.058333
# laplacian 1.227258
# bilateral 2.994489
# mesh 8.497396
# fastga_total 6.612652
# polylidar 14.829927
# dtype: float64
# Stride 2
# n_gt 42.491667
# n_ms_all 26.400000
# f_weighted_corr_seg 0.697639
# rmse 0.009437
# f_corr_seg 0.392567
# n_corr_seg 14.658333
# n_over_seg 0.950000
# n_under_seg 0.341667
# n_missed_seg 26.150000
# n_noise_seg 8.908333
# laplacian 0.472313
# bilateral 0.715748
# mesh 1.793931
# fastga_total 2.583590
# polylidar 4.291157
# dtype: float64
# These parameters are not used, but kept for posterity. **IF** you were to split parameters by variance
# This split would give good results
# var1_params = dict(kernel_size=5, loops_laplacian=2, loops_bilateral=2, sigma_angle=0.1,
# min_triangles=1000, norm_thresh_min=0.95, stride=1)
# var2_params = dict(kernel_size=5, loops_laplacian=4, loops_bilateral=2, sigma_angle=0.1,
# min_triangles=1000, norm_thresh_min=0.95, stride=1)
# var3_params = dict(kernel_size=5, loops_laplacian=6, loops_bilateral=2, sigma_angle=0.1,
# min_triangles=1000, norm_thresh_min=0.95, stride=1)
# var4_params = dict(kernel_size=5, loops_laplacian=8, loops_bilateral=2, sigma_angle=0.1,
# min_triangles=1000, norm_thresh_min=0.95, stride=1)
```
#### File: polylidar_plane_benchmark/scripts/visualize.py
```python
import json
from pathlib import Path
import logging
import time
import numpy as np
import matplotlib.pyplot as plt
import open3d as o3d
import colorcet as cc
import pandas as pd
import seaborn as sns
from polylidar_plane_benchmark import (DEFAULT_PPB_FILE, DEFAULT_PPB_FILE_SECONDARY, logger, SYNPEB_ALL_FNAMES, SYNPEB_DIR, SYNPEB_MESHES_DIR,
SYNPEB_DIR_TEST_GT, SYNPEB_DIR_TRAIN_GT, SYNPEB_DIR_TEST_ALL, SYNPEB_DIR_TRAIN_ALL)
from polylidar_plane_benchmark.utility.o3d_util import create_open_3d_pcd, plot_meshes, get_arrow, create_open_3d_mesh, flatten, mark_invalid_planes
from polylidar_plane_benchmark.utility.helper import (load_pcd_file, convert_planes_to_classified_point_cloud,
extract_all_dominant_plane_normals, load_pcd_and_meshes, convert_polygons_to_classified_point_cloud,
extract_planes_and_polygons_from_mesh, create_open_3d_pcd, paint_planes)
from polylidar_plane_benchmark.utility.evaluate import evaluate
import click
# Variance 1 - Laplacian Smoothing Loops 4
@click.group()
def visualize():
"""Visualize Data"""
pass
@visualize.command()
@click.option('-i', '--input-file', type=click.Path(exists=True), default=DEFAULT_PPB_FILE)
@click.option('-s', '--stride', type=int, default=2)
def pcd(input_file: str, stride):
"""Visualize PCD File"""
pc_raw, pc_image = load_pcd_file(input_file, stride)
# pc_raw_filt = pc_raw[pc_raw[:, 3] == 3.0, :]
# Get just the points, no intensity
pc_points = np.ascontiguousarray(pc_raw[:, :3])
# Create Open3D point cloud
cmap = cc.cm.glasbey_bw
pcd_raw = create_open_3d_pcd(pc_points[:, :3], pc_raw[:, 3], cmap=cmap)
plot_meshes([pcd_raw])
@visualize.command()
@click.option('-i', '--input-file', type=click.Path(exists=True), default=DEFAULT_PPB_FILE)
@click.option('-s', '--stride', type=int, default=2)
@click.option('-l', '--loops', type=int, default=5)
@click.option('--llambda', type=float, default=1.0)
@click.option('-ks', '--kernel-size', type=int, default=3)
@click.option('-lb', '--loops-bilateral', type=int, default=0)
def mesh(input_file: str, stride, loops, llambda, kernel_size, loops_bilateral):
"""Visualize Mesh from PCD File"""
pc_raw, pcd_raw, pc_image, tri_mesh, tri_mesh_o3d, _ = load_pcd_and_meshes(
input_file, stride, loops, llambda, loops_bilateral, kernel_size=kernel_size)
# Write Smoothed Mesh to File, debugging purposes
output_file = str(input_file).replace(str(SYNPEB_DIR), str(SYNPEB_MESHES_DIR))
output_file = output_file.replace('.pcd', '_loops={}.ply'.format(loops))
parent_dir = Path(output_file).parent
parent_dir.mkdir(parents=True, exist_ok=True)
o3d.io.write_triangle_mesh(output_file, tri_mesh_o3d)
# import ipdb; ipdb.set_trace()
# mask = pc_raw[:, 3] == 3.0
# colors = np.asarray(pcd_raw.colors)
# colors[mask] = [0.0,1.0,0]
plot_meshes([pcd_raw, tri_mesh_o3d])
@visualize.command()
@click.option('-i', '--input-file', type=click.Path(exists=True), default=DEFAULT_PPB_FILE)
@click.option('-s', '--stride', type=int, default=2)
@click.option('-l', '--loops', type=int, default=5)
@click.option('--llambda', type=float, default=1.0)
@click.option('-ks', '--kernel-size', type=int, default=3)
@click.option('-lb', '--loops-bilateral', type=int, default=0)
def ga(input_file, stride, loops, llambda, kernel_size, loops_bilateral):
"""Visualize Gaussian Accumulator From PCD File"""
pc_raw, pcd_raw, pc_image, tri_mesh, tri_mesh_o3d, _ = load_pcd_and_meshes(
input_file, stride, loops, llambda, loops_bilateral, kernel_size=kernel_size)
avg_peaks, pcd_all_peaks, arrow_avg_peaks, colored_icosahedron, _ = extract_all_dominant_plane_normals(tri_mesh)
# arrow = get_arrow(origin=[0,0,0], end=[3, 0, 0], cylinder_radius=0.01)
plot_meshes([colored_icosahedron, pcd_all_peaks, *arrow_avg_peaks], [pcd_raw, tri_mesh_o3d])
@visualize.command()
@click.option('-i', '--input-file', type=click.Path(exists=True), default=DEFAULT_PPB_FILE)
@click.option('-s', '--stride', type=int, default=2)
@click.option('-l', '--loops', type=int, default=5)
@click.option('--llambda', type=float, default=1.0)
@click.option('-ks', '--kernel-size', type=int, default=3)
@click.option('-lb', '--loops-bilateral', type=int, default=0)
def polygons(input_file, stride, loops, llambda, kernel_size, loops_bilateral):
"""Visualize Polygon Extraction PCD File"""
pc_raw, pcd_raw, pc_image, tri_mesh, tri_mesh_o3d, _ = load_pcd_and_meshes(
input_file, stride, loops, llambda, loops_bilateral, kernel_size=kernel_size)
avg_peaks, pcd_all_peaks, arrow_avg_peaks, colored_icosahedron, _ = extract_all_dominant_plane_normals(tri_mesh)
# use these parameters for visualization. Open3D's visualizer will chug if there are two many vetices in the `LineMesh` for polygons
# Note that for the benchmark test there is no visualization or evaluation on polygons; only on planes.
config_pp = dict(filter=dict(hole_area=dict(min=0.00, max=100.0), hole_vertices=dict(min=6), plane_area=dict(min=0.0001)),
positive_buffer=0.01, negative_buffer=0.01, simplify=0.02)
_, _, all_poly_lines, _ = extract_planes_and_polygons_from_mesh(tri_mesh, avg_peaks, config_pp=config_pp)
mesh_3d_polylidar = []
mesh_3d_polylidar.extend(flatten([line_mesh.cylinder_segments for line_mesh in all_poly_lines]))
plot_meshes([pcd_raw, tri_mesh_o3d, *mesh_3d_polylidar])
def plot_triangle_normals(normals: np.ndarray):
colors = ((normals * 0.5 + 0.5) * 255).astype(np.uint8)
im = colors.reshape((249, 249, 2, 3))
im = im[:, :, 1, :]
plt.imshow(im, origin='upper')
plt.show()
@visualize.command()
@click.option('-i', '--input-file', type=click.Path(exists=True), default=DEFAULT_PPB_FILE)
@click.option('-s', '--stride', type=int, default=2)
@click.option('-l', '--loops', type=int, default=5)
@click.option('--llambda', type=float, default=1.0)
@click.option('-ks', '--kernel-size', type=int, default=3)
@click.option('-lb', '--loops-bilateral', type=int, default=0)
def planes(input_file, stride, loops, llambda, kernel_size, loops_bilateral):
"""Visualize Plane Extraction from PCD File"""
pc_raw, pcd_raw, pc_image, tri_mesh, tri_mesh_o3d, mesh_timings = load_pcd_and_meshes(
input_file, stride, loops, llambda, loops_bilateral, kernel_size=kernel_size)
avg_peaks, pcd_all_peaks, arrow_avg_peaks, colored_icosahedron, fastga_timings = extract_all_dominant_plane_normals(
tri_mesh)
# print(avg_peaks)
# print((avg_peaks * 0.5 + 0.5) * 255)
# tri_mesh_normals = np.asarray(tri_mesh.triangle_normals)
# plot_triangle_normals(tri_mesh_normals)
all_planes, all_polygons, _, polylidar_timings = extract_planes_and_polygons_from_mesh(
tri_mesh, avg_peaks, filter_polygons=False)
all_timings = dict(**mesh_timings, **fastga_timings, **polylidar_timings)
all_planes_classified = convert_planes_to_classified_point_cloud(all_planes, tri_mesh, avg_peaks)
# paint the planes
# all_planes_classified.append(dict(triangles=np.array([51032])))
# del all_planes_classified[-1]
# can be evaluated by polygons (using downsampled image) or just the planes
# for evaluation we need the full point cloud, not downsampled
# _, gt_image = load_pcd_file(input_file, stride=1)
# all_planes_classified = convert_polygons_to_classified_point_cloud(all_polygons, tri_mesh, avg_peaks, gt_image, stride,)
# results, auxiliary = evaluate(gt_image, all_planes_classified)
# get results
results, auxiliary = evaluate(pc_image, all_planes_classified, tcomp=0.8)
tri_mesh_o3d_painted = paint_planes(all_planes_classified, auxiliary, tri_mesh_o3d)
# create invalid plane markers, green = gt_label_missed, red=ms_labels_noise, blue=gt_label_over_seg,gray=ms_label_under_seg
invalid_plane_markers = mark_invalid_planes(pc_raw, auxiliary, all_planes_classified)
# invalid_plane_markers = []
# plot_meshes([tri_mesh_o3d_painted])
plot_meshes([pcd_raw, tri_mesh_o3d_painted, *invalid_plane_markers])
@visualize.command()
@click.option('-v', '--variance', type=click.Choice(['0', '1', '2', '3', '4']), default='1')
@click.option('-d', '--data', default="train")
@click.option('-s', '--stride', type=int, default=2)
@click.option('-l', '--loops', type=int, default=5)
@click.option('--llambda', type=float, default=1.0)
@click.option('-ks', '--kernel-size', type=int, default=3)
@click.option('-lb', '--loops-bilateral', type=int, default=0)
@click.pass_context
def planes_all(ctx, variance, data, stride, loops, llambda, kernel_size, loops_bilateral):
"""Visualize Plane Extraction from training/testing/gt set"""
if int(variance) == 0:
base_dir = SYNPEB_DIR_TRAIN_GT if data == "train" else SYNPEB_DIR_TEST_GT
else:
base_dir = SYNPEB_DIR_TRAIN_ALL[int(
variance) - 1] if data == "train" else SYNPEB_DIR_TEST_ALL[int(variance) - 1]
all_fnames = SYNPEB_ALL_FNAMES
if int(variance) != 0:
all_fnames = all_fnames[0:10]
for fname in all_fnames:
fpath = str(base_dir / fname)
logger.info("File: %s; stride=%d, loops=%d", fpath, stride, loops)
ctx.invoke(planes, input_file=fpath, stride=stride, loops=loops, llambda=llambda,
kernel_size=kernel_size, loops_bilateral=loops_bilateral)
@visualize.command()
@click.option('-v', '--variance', type=click.Choice(['0', '1', '2', '3', '4']), default='1')
@click.option('-d', '--data', default="train")
@click.option('-s', '--stride', type=int, default=2)
@click.option('-l', '--loops', type=int, default=5)
@click.option('--llambda', type=float, default=1.0)
@click.pass_context
def polygons_all(ctx, variance, data, stride, loops, llambda):
"""Visualize Polygon Extraction from training/testing/gt set"""
if int(variance) == 0:
base_dir = SYNPEB_DIR_TRAIN_GT if data == "train" else SYNPEB_DIR_TEST_GT
else:
base_dir = SYNPEB_DIR_TRAIN_ALL[int(
variance) - 1] if data == "train" else SYNPEB_DIR_TEST_ALL[int(variance) - 1]
all_fnames = SYNPEB_ALL_FNAMES
if int(variance) != 0:
all_fnames = all_fnames[0:10]
for fname in all_fnames:
fpath = str(base_dir / fname)
logger.info("File: %s; stride=%d, loops=%d", fpath, stride, loops)
ctx.invoke(polygons, input_file=fpath, stride=stride, loops=loops, llambda=llambda)
def main():
visualize()
if __name__ == "__main__":
main()
```
|
{
"source": "JeremyBYU/pypcd",
"score": 2
}
|
#### File: pypcd/examples/simple_pcd.py
```python
from pypcd.pypcd import PointCloud
PCD_FILE = "test_data/pc_01.pcd"
def main():
a = PointCloud.from_path(PCD_FILE)
print(a.pc_data['x'])
if __name__ == "__main__":
main()
```
|
{
"source": "JeremyBYU/simplifyline",
"score": 2
}
|
#### File: tests/python/bench_test.py
```python
import pytest
import numpy as np
from simplifyline import MatrixDouble, simplify_line_2d, simplify_line_3d, simplify_radial_dist_2d
from simplification.cutil import simplify_coords
def test_example1_simplifyline_lowquality(benchmark, example1):
mat = MatrixDouble(example1)
result = benchmark(simplify_line_2d, mat, 0.1, False)
def test_example1_simplifyline_lowquality(benchmark, example1):
mat = MatrixDouble(example1)
result = benchmark(simplify_line_2d, mat, 0.1, False)
def test_example1_simplifyline(benchmark, example1):
mat = MatrixDouble(example1)
result = benchmark(simplify_line_2d, mat, 0.1, True)
def test_example1_simplification(benchmark, example1):
result = benchmark(simplify_coords, example1, 0.1)
# @pytest.mark.parametrize("max_distance", [0.1, 1.0, 10.0])
# @pytest.mark.parametrize("high_quality", [True, False])
# def test_bench_example1_simplifyline_parmetersweep(benchmark, example1,max_distance, high_quality):
# # benchmark something
# mat = MatrixDouble(example1)
# result = benchmark(simplify_line_2d, mat, max_distance, high_quality)
```
|
{
"source": "JeremyBYU/UnrealRooftopLanding",
"score": 2
}
|
#### File: airsimcollect/scripts/generatepoi.py
```python
import sys
import logging
from os import path
import math
from functools import partial
import json
import numpy as np
from shapely.algorithms.polylabel import polylabel
from shapely.geometry import Point
import click
click.option = partial(click.option, show_default=True)
from airsimcollect.helper.helper import import_world, plot_collection_points
logging.basicConfig(level=logging.INFO, format='%(message)s')
logger = logging.getLogger("GeneratePoi")
logger.setLevel(logging.INFO)
def num_collection_points(yaw_range, yaw_delta, pitch_range, pitch_delta):
# Whether to include endpoint of the yaw and pitch range
yaw_endpoint = True
theta_endpoint = True
if yaw_delta < .01:
num_yaw = 1
else:
num_yaw = int((abs(yaw_range[0] - yaw_range[1])) / yaw_delta) + 1
# Dont sample the last 0 and 360
if int(abs(yaw_range[0] - yaw_range[1])) == 360:
num_yaw -= 1
yaw_endpoint = False
if pitch_delta is None or pitch_delta < .01:
num_phi = 1
else:
num_phi = int((abs(pitch_range[0] - pitch_range[1])) / pitch_delta) + 1
return num_yaw, num_phi, num_phi * num_yaw, yaw_endpoint
def remove_collision(collection_points, collisions):
x = collection_points[:, 0]
y = collection_points[:, 1]
z = collection_points[:, 2]
obstacle_mask = np.zeros_like(x, dtype='bool')
for (minx, miny, maxx, maxy), height in collisions:
z_m = z < height
x_m = (x > minx) & (x < maxx)
y_m = (y > miny) & (y < maxy)
obstacle_mask = obstacle_mask | (z_m & x_m & y_m)
return collection_points[~obstacle_mask]
def sample_circle(focus_point, radius, yaw_range, yaw_delta, fixed_phi=np.pi/2):
num_yaw, num_phi, _, yaw_endpoint = num_collection_points(yaw_range, yaw_delta, None, None)
theta = np.linspace(math.radians(
yaw_range[0]), math.radians(yaw_range[1]), num_yaw, endpoint=yaw_endpoint)
phi = np.ones_like(theta) * fixed_phi
roll = np.zeros_like(theta)
x = np.cos(theta) * radius + focus_point[0]
y = np.sin(theta) * radius + focus_point[1]
z = np.ones_like(phi) * focus_point[2]
collection_points = np.stack((x, y, z, phi, roll, theta), axis=1)
collection_points = np.append(collection_points,[[*focus_point, fixed_phi, 0, 0]], axis=0)
print(fixed_phi)
return collection_points
def sample_sphere(focus_point, radius, pitch_range, pitch_delta, yaw_range, yaw_delta):
num_yaw, num_phi, _, yaw_endpoint = num_collection_points(yaw_range, yaw_delta, pitch_range, pitch_delta)
theta = np.linspace(math.radians(
yaw_range[0]), math.radians(yaw_range[1]), num_yaw, endpoint=yaw_endpoint)
phi = np.linspace(math.radians(
pitch_range[0]), math.radians(pitch_range[1]), num_phi)
theta = np.repeat(theta, num_phi)
phi = np.tile(phi, num_yaw)
roll = np.zeros_like(phi)
x = np.cos(theta) * np.sin(phi) * radius + focus_point[0]
y = np.sin(theta) * np.sin(phi) * radius + focus_point[1]
z = np.cos(phi) * radius + focus_point[2]
collection_points = np.stack((x, y, z, phi, roll, theta), axis=1)
return collection_points
def random_points_within(poly, num_points):
min_x, min_y, max_x, max_y = poly.bounds
points = []
while len(points) < num_points:
random_point = Point(
[np.random.uniform(min_x, max_x), np.random.uniform(min_y, max_y)])
if (random_point.within(poly)):
points.append(random_point)
return points
def genereate_radii(feature, radius_min=0.0, radius_increase=0.0, num_spheres=1, radius_delta=200.0):
"""Generates a list of radii for collection spheres
Arguments:
feature {GeoJSON} -- GeoJSON Feature
Keyword Arguments:
radius_min {float} -- Minimum Radius. If 0 takes on different defaults (default: {0.0})
num_spheres {int} -- Number of collection spheres (default: {1})
radius_delta {float} -- How much to expand each radi from the previous (default: {200.0})
Returns:
list -- list of radi
"""
radius_min_default_point = 500
geom = feature['geometry']
if geom.geom_type == 'Point' or geom.geom_type == 'LineString':
radius_min_ = radius_min_default_point if radius_min == 0.0 else radius_min
radius_min_ += radius_increase
else:
minx, miny, maxx, maxy = geom.bounds
radius_geom = min(maxx - minx, maxy - miny) / 2.0
radius_min_ = radius_geom if radius_min == 0.0 else radius_min
radius_min_ += radius_increase
return [radius_min_ + radius_delta * i for i in range(num_spheres)]
def generate_line_points(geom, num_focus_points):
sections = len(geom.coords) - 1
point_per_section = max(int(math.floor(num_focus_points / sections)), 1)
x_points = []
y_points = []
for i, (x_prev, y_prev) in enumerate(geom.coords[:-1]):
x_next, y_next = geom.coords[i + 1]
x_points.append(np.linspace(x_prev, x_next, num=point_per_section, endpoint=False))
y_points.append(np.linspace(y_prev, y_next, num=point_per_section, endpoint=False))
# Must add the last point
last_point = geom.coords[-1]
x_points.append(np.array([last_point[0]]))
y_points.append(np.array([last_point[1]]))
# Flattten and combine data
x = np.concatenate(x_points)
y = np.concatenate(y_points)
points = np.column_stack((x, y))
return points
def generate_focus_points(feature, focus_point, num_focus_points, height_offset=0.0):
geom = feature['geometry']
height = feature['properties']['height'] + height_offset
# Check if LineString Feature, return early
if geom.geom_type == 'LineString':
points = generate_line_points(geom, num_focus_points)
return [[point[0], point[1], height] for point in points]
# Point or Polygon Feature
if geom.geom_type == 'Point':
points = [geom]
else:
if focus_point == 'random':
points = random_points_within(geom, num_focus_points)
elif focus_point == 'centroid':
points = [geom.centroid]
elif focus_point == 'pia':
points = [polylabel(geom)]
return [[point.x, point.y, height] for point in points]
@click.group()
def cli():
"""Generates points of interest from geojson file from unreal world"""
pass
@cli.command()
@click.option('-m', '--map-path', type=click.Path(exists=True), required=True,
help='GeoJSON map file of points of interests (features) in the UE4 world.')
@click.option('-pr', '--pitch-range', nargs=2, type=float, default=[30, 90],
help='Range in pitch (phi) on a collection sphere to sample each collection point')
@click.option('-pd', '--pitch-delta', type=float, default=15.0,
help='Change in pitch angle (degrees) on collection sphere for each collection point')
@click.option('-yr', '--yaw-range', nargs=2, type=float, default=[0, 360],
help='Range in yaw (theta) on a collection sphere to sample each collection point')
@click.option('-yd', '--yaw-delta', type=float, default=15.0,
help='Change in yaw angle (degrees) on collection sphere for each collection point')
@click.option('-ho', '--height-offset', type=float, default=0.0,
help='Add a height offset to each feature')
@click.option('-ns', '--num-spheres', type=int, default=1,
help='Number of collection spheres to generate and sample from.')
@click.option('-rm', '--radius-min', type=float, default=0.0,
help="Fixed minimum radius of collection sphere (distance from the focus point). " +
"If 0 and map feature is a polygon, will use smallest sized circle to circumscribe polygon. " +
"If 0 and map feature is a point, set to 500.")
@click.option('-ri', '--radius-increase', type=float, default=0.0,
help="Increase (bias) from minimum radius of collection sphere (distance from the focus point). ")
@click.option('-rd', '--radius-delta', type=float, default=500.0,
help='Change in growing collection sphere radius. Only applicable for -ns > 1.')
@click.option('-fp', '--focus-point', type=click.Choice(['pia', 'centroid', 'random']), default='centroid',
help='Only applicable to polygon features. Determines what point on a 2D polygon ' +
'should be used as the center of the collection sphere')
@click.option('-nf', '--num-focus-points', type=int, default=1,
help='Number of focus points to randomly generate on 2D polygon. Only applicable to -fp random.')
@click.option('-rfn', '--record-feature-name', type=str, default=None,
help='Set to geojson property name if you want to record a label associated to each point')
@click.option('-o', '--out', type=click.Path(exists=False), default="collection_points.npy",
help="Output numpy array of position and angles")
@click.option('-ao', '--append-out', is_flag=True,
help="If output file already exists, just append to it")
@click.option('--seed', type=int, default=1, help="Random seed")
@click.option('-ic', '--ignore-collision', is_flag=True,
help="By default this module ensures the collection point does not collide with any known features " +
"in the map. Set this flag to ignore this check.")
@click.option('-sc', '--sampling-method', type=click.Choice(['sphere', 'circle']), default='sphere',
help='Whether we are sampling on a sphere or on a 2D circle at a height offset from the focus point')
@click.option('-pp', '--plot-points', is_flag=True,
help="Whether to plot points for viewing. Debug only.")
@click.option('-d', '--debug', is_flag=True,
help="Whether to print debug statements")
def generate(map_path, pitch_range, pitch_delta, yaw_range, yaw_delta, height_offset, num_spheres, radius_min, radius_increase, radius_delta,
focus_point, num_focus_points, record_feature_name, out, append_out, seed, ignore_collision, sampling_method, plot_points, debug):
if debug:
logger.setLevel(logging.DEBUG)
logger.debug("{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}".format(
map_path, pitch_range, pitch_delta, yaw_delta, num_spheres, radius_min, radius_increase, radius_delta, focus_point,
num_focus_points, ignore_collision, out, seed, sampling_method, plot_points))
click.secho("Generating collection points...")
# generate_collection_points(map_path, pitch_range, pitch_delta, yaw_delta, num_spheres, radius_min, radius_delta, focus_point, ignore_collision)
try:
features, collisions = import_world(map_path)
except Exception as e:
click.secho("Error parsing GeoJSON file. Is it valid?", fg='red')
logger.exception(e)
sys.exit()
all_points = []
all_feature_names = []
for feature in features:
logger.debug("Inspecting feature: %s", feature)
focus_points = generate_focus_points(feature, focus_point, num_focus_points, height_offset=height_offset)
radii = genereate_radii(feature, radius_min, radius_increase, num_spheres, radius_delta)
for focus_point_ in focus_points:
logger.debug("At focus point: %s", focus_point_)
for radius in radii:
if sampling_method == 'sphere':
collection_points = sample_sphere(focus_point_, radius, pitch_range,
pitch_delta, yaw_range, yaw_delta)
else:
fixed_phi = pitch_range[0]
print(fixed_phi)
collection_points = sample_circle(focus_point_, radius, yaw_range, yaw_delta, fixed_phi=fixed_phi)
logger.debug("At radius level: %s", radius)
if not ignore_collision:
prev_shape = collection_points.shape
collection_points = remove_collision(collection_points, collisions)
if collection_points.shape != prev_shape:
logger.debug("Collisions removed for feature %r", feature['properties']['class_label'])
all_points.append(collection_points)
if record_feature_name:
all_feature_names.extend([feature['properties'][record_feature_name]] * collection_points.shape[0])
if plot_points:
plot_collection_points(collection_points, focus_point_, radius, feature, sampling_method)
all_points = np.vstack(all_points)
click.echo(
"Finished generating {:d} collection points for {:d} points of interests!".format(
all_points.shape[0],
len(features)))
if append_out and path.isfile(out):
old_data = np.load(out)
all_points = np.vstack((old_data, all_points))
np.save(out, all_points)
if all_feature_names:
out_feature_names = out[:-4] + '.json'
with open(out_feature_names, 'w') as f:
json.dump(all_feature_names, f, indent=2)
```
#### File: scripts/archive/check_saved_processed.py
```python
import time
from pathlib import Path
import argparse
import joblib
import os
from descartes import PolygonPatch
import matplotlib.pyplot as plt
import numpy as np
from shapely.affinity import affine_transform
from scipy.spatial.transform import Rotation as R
from airsimcollect.helper.helper_logging import logger
from airsimcollect.helper.helper_transforms import get_seg2rgb_map, project_points_img
from airsimcollect.helper.helper_mesh import (create_meshes_cuda)
from airsimcollect.helper.helper_metrics import load_records, select_building, load_map, compute_metric
from airsimcollect.helper.o3d_util import create_frustum
from airsimcollect.helper.helper_confidence_maps import (create_fake_confidence_map_seg,
create_confidence_map_planarity,
create_confidence_map_combined,
get_homogenous_projection_matrices)
from airsimcollect.helper.helper_polylidar import extract_polygons
ROOT_DIR = Path(__file__).parent.parent
SAVED_DATA_DIR = ROOT_DIR / 'AirSimCollectData/LidarRoofManualTest'
PROCESSED_DATA_DIR = SAVED_DATA_DIR / 'Processed'
GEOSON_MAP = ROOT_DIR / Path("assets/maps/roof-lidar-manual.geojson")
O3D_VIEW = ROOT_DIR / Path("assets/o3d/o3d_view_default.json")
RESULTS_DIR = ROOT_DIR / Path("assets/results")
# def plot_polygons(polygons, points, ax, linewidth=2, shell_color='green', hole_color='orange'):
# for poly in polygons:
# shell_coords = poly.exteror
# outline = Polygon(shell=shell_coords)
# outlinePatch = PolygonPatch(outline, ec=shell_color, fill=False, linewidth=linewidth)
# ax.add_patch(outlinePatch)
# for hole_poly in poly.holes:
# shell_coords = [get_point(pi, points) for pi in hole_poly]
# outline = Polygon(shell=shell_coords)
# outlinePatch = PolygonPatch(outline, ec=hole_color, fill=False, linewidth=linewidth)
# ax.add_patch(outlinePatch)
def transform_points(points, hom_transform):
temp = np.ones(shape=(4, points.shape[0]))
temp[:3, :] = points.transpose()
point_cam_ned = hom_transform.dot(temp)
return point_cam_ned
def transform_ponts_raw(points, hom_transform):
pass
def convert_points(points, hom, proj, width, height):
cam_poly_points = transform_points(points, hom)
pixels, _ = project_points_img(cam_poly_points, proj, width, height, None)
return pixels
def rot_to_hom(rot, invert=False):
rot_ = rot.T if invert else rot
ans = np.identity(4)
ans[:3,:3] = rot_
return ans
def affine_mat(hom):
return [hom[0,0], hom[0, 1], hom[0,2], hom[1, 0], hom[1, 1], hom[1,2], hom[2,0], hom[2,1], hom[2, 2], hom[0,3], hom[1, 3], hom[2,3]]
# def poly_rotate(rm, )
def main():
records = sorted(os.listdir(PROCESSED_DATA_DIR), key=lambda x: 100 * int(x[:-4].split('-')[0]) + int(x[:-4].split('-')[1]) )
for record_str in records:
record = joblib.load(PROCESSED_DATA_DIR / record_str)
conf_map_comb = record['conf_map_comb']
hom = record['hom']
proj = record['proj']
poly = record['poly']
poly_normal = record['poly_normal']
rm, _ = R.align_vectors([[0, 0, 1]], poly_normal)
rm = rot_to_hom(rm.as_matrix(), invert=True)
# poly = affine_transform(poly, affine_mat(rm))
poly_points = np.array(poly.exterior)
# if in lidar local frame where xy is not flat
new_hom = hom @ rm
pixels = convert_points(poly_points, hom, proj, conf_map_comb.shape[0], conf_map_comb.shape[1])
fig, ax = plt.subplots(nrows=1, ncols=2)
conf_map_comb[pixels[:, 1], pixels[:, 0]] = 0.5
ax[0].imshow(conf_map_comb)
ax[1].add_patch(PolygonPatch(poly, ec='k', alpha=0.5, zorder=2),)
ax[1].axis('equal')
ax[1].scatter(poly_points[:, 0], poly_points[:, 1])
plt.show()
def parse_args():
parser = argparse.ArgumentParser(description="Check LiDAR")
parser.add_argument('--gui', dest='gui', action='store_true')
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main()
# Check the center, if less than 0.5, then take max of 4 corners, if all 4 corners bad then stop
```
#### File: UnrealRooftopLanding/scripts/check_saved_lidar.py
```python
from pathlib import Path
from os import listdir
from os.path import isfile, join
from functools import partial
import argparse
import time
import yaml
# import matplotlib.pyplot as plt
from rich import print as rprint
import numpy as np
from shapely.geometry import shape, Polygon
import shapely
from airsim.types import Vector3r, Quaternionr
import cv2
import pandas as pd
from airsimcollect.helper.helper_logging import logger
from airsimcollect.helper.o3d_util import (update_linemesh, handle_linemeshes, init_vis, create_frustum,
update_o3d_colored_point_cloud, create_linemesh_from_shapely,
update_frustum, load_view_point, save_view_point, toggle_visibility, BLUE, PURPLE)
from airsimcollect.helper.helper_transforms import classify_points, polygon_to_pixel_coords
from airsimcollect.helper.helper_metrics import (update_projected_image, BLUE_NORM, GOLD_NORM, PURPLE_NORM,
load_map, select_building, load_map, load_records, compute_metric, update_state, get_inscribed_circle_polygon)
from airsimcollect.helper.helper_polylidar import extract_polygons
from fastga import GaussianAccumulatorS2Beta, IcoCharts
from polylidar import MatrixDouble, extract_tri_mesh_from_organized_point_cloud, HalfEdgeTriangulation, Polylidar3D, MatrixUInt8
ROOT_DIR = Path(__file__).parent.parent
SAVED_DATA_DIR = ROOT_DIR / 'AirSimCollectData/LidarRoofManualTest'
GEOSON_MAP = ROOT_DIR / Path("assets/maps/roof-lidar-manual.geojson")
RESULTS_DIR = ROOT_DIR / Path("assets/results")
O3D_VIEW = ROOT_DIR / Path("assets/o3d/o3d_view_default.json")
FOV = 90
# Nice Pictures UIDs - 31, 34, 38, 39, 42, 45
# Bad Segmentation Drone but still success - 40, 41,
def main(save_data_dir: Path, geoson_map, results_fname, gui=True, segmented=False, computer='desktop', save_images=True):
records, lidar_paths_dict, scene_paths_dict, segmentation_paths_dict, seg_infer_path_dict, seg_infer_dict = load_records(
save_data_dir)
output_dir = (save_data_dir / "Output")
output_dir.mkdir(parents=False, exist_ok=True)
# Load yaml file
with open('./assets/config/PolylidarParams.yaml') as file:
config = yaml.safe_load(file)
start_offset_unreal = np.array(records['start_offset_unreal'])
map_features_dict = load_map(geoson_map, start_offset_unreal)
airsim_settings = records.get('airsim_settings', dict())
lidar_beams = airsim_settings.get('lidar_beams', 64)
range_noise = airsim_settings.get('range_noise', 0.05)
# have to turn some json keys into proper objects, quaternions...
update_state(airsim_settings, position='lidar_to_camera_pos',
rotation='lidar_to_camera_quat')
# Create Polylidar Objects
pl = Polylidar3D(**config['polylidar'])
ga = GaussianAccumulatorS2Beta(level=config['fastga']['level'])
ico = IcoCharts(level=config['fastga']['level'])
# Initialize 3D Viewer, Map and Misc geometries
if gui:
vis, geometry_set = init_vis()
line_meshes = [feature['line_meshes']
for features in map_features_dict.values() for feature in features]
line_meshes = [
line_mesh for line_mesh_set in line_meshes for line_mesh in line_mesh_set]
geometry_set['map_polys'].line_meshes = handle_linemeshes(
vis, geometry_set['map_polys'].line_meshes, line_meshes)
load_view_point(vis, str(O3D_VIEW))
vis.register_key_callback(ord("X"), partial(
toggle_visibility, geometry_set, 'pcd'))
vis.register_key_callback(ord("C"), partial(
toggle_visibility, geometry_set, 'map_polys'))
vis.register_key_callback(ord("V"), partial(
toggle_visibility, geometry_set, 'pl_polys'))
vis.register_key_callback(ord("B"), partial(
toggle_visibility, geometry_set, 'frustum'))
vis.register_key_callback(ord("N"), partial(
toggle_visibility, geometry_set, 'pl_isec'))
vis.register_key_callback(ord("M"), partial(
toggle_visibility, geometry_set, 'gt_isec'))
vis.register_key_callback(ord(","), partial(
toggle_visibility, geometry_set, 'circle_polys'))
vis.register_key_callback(ord("U"), partial(
save_view_point, filename='temp.json'))
vis.register_key_callback(ord("Y"), partial(
load_view_point, filename='temp.json'))
else:
geometry_set = dict(pl_polys=None)
result_records = []
for record in records['records']:
path_key = f"{record['uid']}-{record['sub_uid']}-0"
bulding_label = record['label'] # building name
if record['uid'] in [5, 6, 25, 26, 27, 28, 29]:
logger.warn("Skipping record; UID: %s; SUB-UID: %s; Building Name: %s. Rooftop assets don't match map. Rooftop assets randomness wasn't fixed on this asset!",
record['uid'], record['sub_uid'], bulding_label)
continue
# uid #45 is best segmentation example
# if record['uid'] < 8:
# continue
logger.info("Inspecting record; UID: %s; SUB-UID: %s; Building Name: %s",
record['uid'], record['sub_uid'], bulding_label)
# Get camera data
img_meta = record['sensors'][0]
update_state(img_meta)
camera_position = img_meta['position'].to_numpy_array()
# map feature of the building
building_features = map_features_dict[bulding_label]
building_feature = select_building(building_features, camera_position)
distance_to_camera = building_feature['ned_height'] - camera_position[2]
# Load LiDAR Data
pc_np = np.load(str(lidar_paths_dict[path_key]))
# Load Images
img_scene = cv2.imread(str(scene_paths_dict[path_key]))
img_seg = cv2.imread(str(segmentation_paths_dict[path_key]))
img_seg_infer = cv2.imread(str(seg_infer_path_dict[path_key]))
img_meta['data'] = seg_infer_dict[path_key]
# Combine all images
img = np.concatenate((img_scene, img_seg, img_seg_infer), axis=1)
# Update LIDAR Data to use inference from neural network
pc_np_infer = np.copy(pc_np)
t1 = time.perf_counter()
point_classes, mask, pixels = classify_points(
img_meta['data'], pc_np[:, :3], img_meta, airsim_settings)
t2 = time.perf_counter()
t_classify_pointcloud = (t2-t1) * 1000
pc_np_infer[:, 3] = point_classes
# Update projected image to have lidar data
img_projected = np.copy(img_scene)
# Points that define the camera FOV frustum
frustum_points = create_frustum(
distance_to_camera, camera_position, hfov=FOV, vfov=FOV)
# Polygon Extraction of surface
# Only Polylidar3D
pl_planes, alg_timings, _, _, _ = extract_polygons(pc_np, geometry_set['pl_polys'] if not segmented else None, pl, ga,
ico, config, segmented=False, lidar_beams=lidar_beams, drone_pose=camera_position)
# Polylidar3D with Perfect (GT) Segmentation
pl_planes_seg_gt, alg_timings_seg, _, _, _ = extract_polygons(pc_np, geometry_set['pl_polys'] if segmented else None, pl, ga,
ico, config, segmented=True, lidar_beams=lidar_beams, drone_pose=camera_position,
prefilter=True)
# Polylidar3D with Inferred (NN) Segmentation
pl_planes_seg_infer, alg_timings_seg, _, _, _ = extract_polygons(pc_np_infer, geometry_set['pl_polys'] if segmented else None, pl, ga,
ico, config, segmented=True, lidar_beams=lidar_beams, drone_pose=camera_position,
prefilter=True)
alg_timings_seg.update(t_classify_pointcloud=t_classify_pointcloud)
if pl_planes and True:
base_iou, pl_poly_baseline, gt_poly = compute_metric(
building_feature, pl_planes, frustum_points)
seg_gt_iou, pl_poly_estimate_seg, _ = compute_metric(
building_feature, pl_planes_seg_gt, frustum_points)
seg_infer_iou, pl_poly_estimate_seg, _ = compute_metric(
building_feature, pl_planes_seg_infer, frustum_points)
logger.info("Polylidar3D Base IOU - %.1f; Seg GT IOU - %.1f; Seg Infer IOU - %.1f",
base_iou * 100, seg_gt_iou * 100, seg_infer_iou * 100)
# Get Largest Inscribed Circle
circle_poly, circle = get_inscribed_circle_polygon(pl_poly_estimate_seg, config['polylabel']['precision'])
circle_poly_baseline, circle_baseline = get_inscribed_circle_polygon(
pl_poly_baseline, config['polylabel']['precision'])
alg_timings_seg.update(t_polylabel=circle['t_polylabel'])
result_records.append(dict(uid=record['uid'], sub_uid=record['sub_uid'],
building=bulding_label, pl_base_iou=base_iou,
pl_seg_gt_iou=seg_gt_iou, pl_seg_infer_iou=seg_infer_iou,
computer=computer,
**alg_timings_seg))
img_projected_baseline = np.copy(img_projected)
# img_projected[pixels[:,1], pixels[:, 0]] = [0, 255,0]
update_projected_image(img_projected, circle_poly, pl_poly_estimate_seg,
gt_poly, pixels, img_meta, airsim_settings)
update_projected_image(img_projected_baseline, circle_poly_baseline,
pl_poly_baseline, gt_poly, pixels, img_meta, airsim_settings)
# Visualize these intersections
if gui:
# Visualize the polylidar with segmentation results
if segmented:
pl_poly_baseline = pl_poly_estimate_seg
update_linemesh([pl_poly_baseline], geometry_set['pl_isec'])
update_linemesh([gt_poly], geometry_set['gt_isec'], color=PURPLE)
update_linemesh([circle_poly], geometry_set['circle_polys'], color=BLUE)
elif gui:
update_linemesh([], geometry_set['pl_isec'])
update_linemesh([], geometry_set['gt_isec'])
update_linemesh([], geometry_set['circle_polys'])
if save_images:
baseline_fname = output_dir / "{}-projected-baseline.png".format(path_key)
semantic_fname = output_dir / "{}-projected-semantic.png".format(path_key)
cv2.imwrite(str(baseline_fname), img_projected_baseline)
cv2.imwrite(str(semantic_fname), img_projected)
if gui:
# Create Frustum
update_frustum(vis, distance_to_camera, camera_position,
hfov=FOV, vfov=FOV,
frustum=geometry_set['frustum'])
# Load Lidar Data
update_o3d_colored_point_cloud(pc_np_infer, geometry_set['pcd'].geometry)
# Update Drone Position
geometry_set['drone'].geometry.translate(img_meta['position'].to_numpy_array(), relative=False)
# Update Plot Images
img = np.concatenate((img, img_projected, img_projected_baseline), axis=1)
cv2.imshow('Scene View'.format(record['uid']), img)
# Update geometry and view
vis.update_geometry(geometry_set['pcd'].geometry)
vis.update_geometry(geometry_set['drone'].geometry)
vis.update_renderer()
while(True):
vis.poll_events()
vis.update_renderer()
res = cv2.waitKey(10)
if res != -1:
break
df = pd.DataFrame.from_records(result_records)
print(df)
df['iou_diff'] = df['pl_base_iou'] - df['pl_seg_gt_iou']
df.to_csv(RESULTS_DIR / results_fname)
print(df.mean())
def parse_args():
parser = argparse.ArgumentParser(description="Check LiDAR")
parser.add_argument('--data', type=str, default=SAVED_DATA_DIR)
parser.add_argument('--map', type=str, default=GEOSON_MAP)
parser.add_argument('--results', type=str, default='results.csv')
parser.add_argument('--gui', dest='gui', action='store_true')
parser.add_argument('--seg', dest='seg', action='store_true')
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main(Path(args.data), Path(args.map), args.results, args.gui, args.seg)
```
#### File: UnrealRooftopLanding/scripts/collect_decision_point_data.py
```python
import argparse
import json
import logging
import time
from os import path
import numpy as np
from numpy.core.fromnumeric import squeeze
from airsim import Vector3r, Pose, to_quaternion, ImageRequest
from airsimcollect.helper.helper import update, update_collectors, DEFAULT_CONFIG
from airsimcollect import AirSimCollect
from airsimcollect.helper.helper_logging import logger
def parse_args():
parser = argparse.ArgumentParser(description="Collect Decision Point Data")
parser.add_argument('--config', type=str, help='Configuration file for collection',
default='./assets/config/collect_lidar_decision_point.json')
args = parser.parse_args()
return args
def setup_airsimcollect(config_file):
with open(config_file) as file:
config = json.load(file)
config = update(DEFAULT_CONFIG, config)
config['collectors'] = update_collectors(config['collectors'])
asc = AirSimCollect(
**config, manual_collect=True) # pylint: disable=E1132,
fpath = path.normpath(path.join(asc.save_dir, 'records.json'))
if path.exists(fpath):
with open(fpath, 'r') as outfile:
data = json.load(outfile)
try:
global_id = data['records'][-1]['uid'] + 1
except:
global_id = 0
records = data['records']
else:
records = []
global_id = 0
return asc, records, global_id
def create_square_pose(altitude=-5, pose=np.array([0, 0, 0]), square_size=4, grid_size=4):
half_square = square_size / 2.0
poses = []
delta = square_size / grid_size
for x in range(grid_size + 1):
half_square_x = half_square - x * delta
for y in range(grid_size + 1):
half_square_y = half_square - y * delta
new_pose = pose + \
np.array([half_square_x, half_square_y, altitude])
poses.append(new_pose.tolist())
return poses
def main(config_file):
heights = [5, 10, 15, 20, 25, 30]
# heights= [5]
asc, records, global_id = setup_airsimcollect(config_file)
for height in heights:
poses = create_square_pose(-height)
for sub_id, pose in enumerate(poses):
asc.client.simSetVehiclePose(
Pose(Vector3r(*pose), to_quaternion(0, 0, 0)), True)
time.sleep(asc.min_elapsed_time)
extra_data = dict(lidar_beams=asc.airsim_settings['lidar_beams'],
range_noise=asc.airsim_settings['range_noise'],
horizontal_noise=asc.airsim_settings['horizontal_noise'],
height=height)
record = None
while record is None:
try:
record = asc.collect_data_at_point(
global_id, sub_id, label='Building2_Example3', extra_data=extra_data)
records.append(record)
except:
logger.exception("Error getting data from point, retrying..")
time.sleep(asc.min_elapsed_time)
global_id += 1
asc.save_records(records)
if __name__ == "__main__":
args = parse_args()
main(args.config)
```
|
{
"source": "jeremycare/serverless-patterns",
"score": 2
}
|
#### File: cloudfront-lambda-edge-cdk-python/lambda/index.py
```python
import datetime
# The generated page contains some dynamic data, so we don't want
# it to stay in cache for long
cache_control_max_age = 3 # in seconds
def handler(event, context):
today = datetime.datetime.now()
date_time = today.strftime("%m/%d/%Y, %H:%M:%S")
html = "<html><title>Content generated by Lambda@Edge</title><body><h1>This content is generated by Lambda@Edge.</h1> <h3>Content generated at {}</h3></body></html>".format(date_time)
response = {
'status': 200,
'headers' : {
"cache-control": [ { "key": "Cache-Control", "value": "max-age={}".format(date_time) }],
"content-type": [{ "key": "Content-Type", "value": 'text/html;charset=UTF-8' }]
},
'body': html
}
return response
```
#### File: tests/unit/test_cloudfront_lambda_edge_cdk_python_stack.py
```python
import aws_cdk as core
import aws_cdk.assertions as assertions
from cloudfront_lambda_edge_cdk_python.cloudfront_lambda_edge_cdk_python_stack import CloudfrontLambdaEdgeCdkPythonStack
# example tests. To run these tests, uncomment this file along with the example
# resource in cloudfront_lambda_edge_cdk_python/cloudfront_lambda_edge_cdk_python_stack.py
def test_sqs_queue_created():
app = core.App()
stack = CloudfrontLambdaEdgeCdkPythonStack(app, "cloudfront-lambda-edge-cdk-python")
template = assertions.Template.from_stack(stack)
# template.has_resource_properties("AWS::SQS::Queue", {
# "VisibilityTimeout": 300
# })
```
#### File: lambda-sns-cdk/src/app.py
```python
from aws_cdk import (
aws_lambda as _lambda,
aws_logs as logs,
aws_sns as sns,
core as cdk
)
from constructs import Construct
class LambdaSnsCdkStack(cdk.Stack):
def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
super().__init__(scope, construct_id, **kwargs)
# Create SNS Topic
# SNS topic
topic = sns.Topic(self, 'sns-to-lambda-topic',
display_name='My SNS topic')
# Create Lambda function
lambdaFn = _lambda.Function(self, "SNSPublisher",
runtime=_lambda.Runtime.PYTHON_3_9,
code=_lambda.Code.from_asset("lambda"),
handler="handler.main",
timeout=cdk.Duration.seconds(10))
# Set Lambda Logs Retention and Removal Policy
logs.LogGroup(
self,
'logs',
log_group_name=f"/aws/lambda/{lambdaFn.function_name}",
removal_policy=cdk.RemovalPolicy.DESTROY,
retention=logs.RetentionDays.ONE_DAY
)
# Grant publish to lambda function
topic.grant_publish(lambdaFn)
cdk.CfnOutput(self, 'snsTopicArn',
value=topic.topic_arn,
description='The arn of the SNS topic')
cdk.CfnOutput(self, 'functionName',
value=lambdaFn.function_name,
description='The name of the handler function')
app = cdk.App()
LambdaSnsCdkStack(app, "LambdaSnsCdkStack")
app.synth()
```
#### File: sfn-inside-sfn-cdk-python/sfn_inside_sfn_cdk/sfn_inside_sfn_cdk_stack.py
```python
from aws_cdk import (
Duration,
Stack,
CfnOutput,
aws_stepfunctions as sfn,
aws_stepfunctions_tasks as tasks,
)
from constructs import Construct
class SfnInsideSfnCdkStack(Stack):
def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
super().__init__(scope, construct_id, **kwargs)
innerSfnPassState = sfn.Pass(self, 'PassState');
innerSfn = sfn.StateMachine(self, 'InnerStepFunction',
definition = innerSfnPassState,
timeout=Duration.minutes(60)
)
task1 = tasks.StepFunctionsStartExecution(self, "StepFunction1",
state_machine=innerSfn,
integration_pattern=sfn.IntegrationPattern.RUN_JOB,
input=sfn.TaskInput.from_object({
"input.$": "$.Output.input"
}),
output_path="$",
result_selector = {
"Output.$": "$.Output"
}
)
task2 = tasks.StepFunctionsStartExecution(self, "StepFunction2",
state_machine=innerSfn,
integration_pattern=sfn.IntegrationPattern.RUN_JOB,
input=sfn.TaskInput.from_object({
"input.$": "$.Output.input"
}),
output_path="$",
result_selector = {
"Output.$": "$.Output"
}
)
task3 = tasks.StepFunctionsStartExecution(self, "StepFunction3",
state_machine=innerSfn,
integration_pattern=sfn.IntegrationPattern.RUN_JOB,
input=sfn.TaskInput.from_object({
"input.$": "$.Output.input"
}),
output_path="$",
result_selector = {
"Output.$": "$.Output"
}
)
outer_sfn = sfn.StateMachine(self, "OuterStepFunction",
definition=task1.next(task2).next(task3),
timeout=Duration.minutes(60)
)
CfnOutput(self, "StepFunctionArn",
value = outer_sfn.state_machine_arn,
export_name = 'OuterStepFunctionArn',
description = 'Outer Step Function arn')
```
#### File: sqs-lambda-eb-cdk-python/sqs_lambda_eb_cdk/sqs_lambda_eb_cdk_stack.py
```python
from aws_cdk import (
Duration,
Stack,
CfnOutput,
RemovalPolicy,
aws_sqs as _sqs,
aws_lambda as _lambda,
aws_logs as logs,
aws_events as events,
aws_events_targets as events_target
)
from constructs import Construct
class SqsLambdaEbCdkStack(Stack):
def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
super().__init__(scope, construct_id, **kwargs)
queue = _sqs.Queue(
self, "MyQueue",
visibility_timeout=Duration.seconds(300)
)
# Create the AWS Lambda function to subscribe to Amazon SQS queue
# The source code is in './lambda' directory
lambda_function = _lambda.Function(
self, "MyLambdaFunction",
runtime=_lambda.Runtime.PYTHON_3_9,
handler="submit_job.handler",
code=_lambda.Code.from_asset("lambda"),
environment = {
'QUEUE_URL': queue.queue_url
}
)
# Set Lambda Logs Retention and Removal Policy
logs.LogGroup(
self,
'logs',
log_group_name = f"/aws/lambda/{lambda_function.function_name}",
removal_policy = RemovalPolicy.DESTROY,
retention = logs.RetentionDays.ONE_DAY
)
#Event Bridge rule
#Change the rate according to your needs
rule = events.Rule(self, 'Rule',
description = "Trigger Lambda function every 2 minutes",
schedule = events.Schedule.expression('rate(2 minutes)')
)
rule.add_target(events_target.LambdaFunction(lambda_function));
#Grant permission to AWS Lambda function to consume messages from the Amazon SQS queue
queue.grant_consume_messages(lambda_function)
CfnOutput(self, "FunctionName",
value = lambda_function.function_name,
export_name = 'FunctionName',
description = 'Function name')
CfnOutput(self, "QueueName",
value = queue.queue_name,
export_name = 'QueueName',
description = 'SQS queue name')
CfnOutput(self, "QueueArn",
value = queue.queue_arn,
export_name = 'QueueArn',
description = 'SQS queue ARN')
CfnOutput(self, "QueueUrl",
value = queue.queue_url,
export_name = 'QueueUrl',
description = 'SQS queue URL')
CfnOutput(self, "RuleName",
value = rule.rule_name,
export_name = 'RuleName',
description = 'EventBridge rule name')
```
|
{
"source": "JeremyCCHsu/CapsNet-tf",
"score": 2
}
|
#### File: JeremyCCHsu/CapsNet-tf/capsule.py
```python
import json
import numpy as np
import tensorflow as tf
from helper import *
class CapsuleNet(object):
def __init__(self, arch):
self.arch = arch
self._C = tf.make_template('Recognizer', self._recognize)
self._G = tf.make_template('Generator', self._generate)
def _get_shape_JDUV(self):
J = self.arch['num_class'] # 10
D = self.arch['Primary Capsule']['depth'] # 32
U = self.arch['Primary Capsule']['dim'] # 8
V = self.arch['Digit Capsule']['dim'] # 16
return J, D, U, V
def _recognize(self, x):
'''`x`: [b, h, w, c]
'''
J, D, U, V = self._get_shape_JDUV()
net = self.arch['recognizer']
assert D * U == net['output'][-1]
self.W = tf.get_variable('W', shape=[J, V, U], dtype=tf.float32)
for i, (o, k, s) in enumerate(zip(net['output'], net['kernel'], net['stride'])):
if i + 1 == len(net['output']):
activation = None
else:
activation = tf.nn.relu
x = tf.layers.conv2d(x, o, k, s, activation=activation)
S = tf.shape(x) # [n, h', w', c']
I = S[1] * S[2] * D
primary = tf.reshape(x, [-1, S[1], S[2], D, U])
primary = tf.reshape(primary, [-1, I, U])
u = primary # NOTE: iteratively process the previous capsule `u`
B = tf.zeros([tf.shape(x)[0], I, J]) # the "attention" matrix
for _ in range(self.arch['Digit Capsule']['nRouting']):
v, B = self._stack(u, B)
return v
def _stack(self, u, B):
'''
I, J, Nu, Nv = 6*6*32, 10, 8, 16
Input:
`u`: [n, I, Nu]
`b`: [n, I, J]
Return:
`V`: [n, J, V]
`B`: [n, I, J]
'''
with tf.name_scope('Capsule'):
# [n, I, U] dot [J, V, U] => [n, I, J, V]
uji = tf.tensordot(u, self.W, [[2], [2]])
C = tf.nn.softmax(B, dim=1) # [n, I, J]
# [n, I, J, 1] (necessary for broadcasting)
C = tf.expand_dims(C, -1)
S = tf.reduce_sum(C * uji, 1) # [n, J, V]
v_ = squash(S) # [n, J, V]
v = tf.expand_dims(v_, 1) # [n, 1, J, V]
dB = tf.reduce_sum(uji * v, -1)
B = B + dB
return v_, B
def _generate(self, v, y):
'''
Input:
`y`: [n,]
`v`: Mask. [n, J=10, V=16]
Return:
`x`: Image [n, h, w, c]
'''
J, _, _, V = self._get_shape_JDUV()
Y = tf.one_hot(y, J) # [n, J]
Y = tf.expand_dims(Y, -1) # [n, J, 1]
x = v * Y
x = tf.reshape(x, [-1, J * V])
net = self.arch['generator']
for o in net['output']:
x = tf.layers.dense(x, o, tf.nn.relu)
h, w, c = self.arch['hwc']
x = tf.layers.dense(x, h * w * c, tf.nn.tanh)
return tf.reshape(x, [-1, h, w, c])
def _get_loss_parameter(self):
return self.arch['']
def loss(self, x, y):
'''
`x`: [b, 28, 28, 1]
`y`: label [b]
'''
v = self._C(x) # [n, J=10, V=16]
xh = self._G(v, y) # [n, h, w, c]
J, _, _, _ = self._get_shape_JDUV()
with tf.name_scope('Loss'):
tf.summary.image('x', x, 4)
tf.summary.image('xh', xh, 4)
tf.summary.image('V', tf.expand_dims(v, -1), 4)
hparam = self.arch['loss']
l_reconst = hparam['reconst weight'] * \
tf.reduce_mean(tf.reduce_sum(tf.square(x - xh), [1, 2, 3]))
tf.summary.scalar('l_reconst', l_reconst)
v_norm = tf.norm(v, axis=-1) # [n, J=10]
tf.summary.histogram('v_norm', v_norm)
Y = tf.one_hot(y, J) # [n, J=10]
loss = Y * tf.square(tf.maximum(0., hparam['m+'] - v_norm)) \
+ hparam['lambda'] * (1. - Y) * \
tf.square(tf.maximum(0., v_norm - hparam['m-']))
loss = tf.reduce_mean(tf.reduce_sum(loss, -1))
tf.summary.scalar('loss', loss)
loss += l_reconst
acc = tf.reduce_mean(
tf.cast(
tf.equal(y, tf.argmax(v_norm, 1)),
tf.float32
))
return {'L': loss, 'acc': acc, 'reconst': l_reconst}
def inspect(self, x):
with tf.name_scope('Inpector'):
J, _, _, V = self._get_shape_JDUV()
R = self.arch['valid']['spacing']
m = self.arch['valid']['magnitude']
h, w, c = self.arch['hwc']
v = self._C(x) # 10, J=10, V=16, generated from exemplar images
v_eps, y_eps = make_linear_perturbation(J, V, R, m)
for i in range(10):
vi = tf.expand_dims(v[i], 0) # [1, J=10, V=16]
vi = vi + v_eps # [V*21, 10, V]
xh = self._G(vi, i * y_eps)
xh = tf.reshape(xh, [1, R, V, h, w, c])
xh = tf.transpose(xh, [0, 1, 3, 2, 4, 5])
xh = tf.reshape(xh, [1, R * h, V * w, c])
tf.summary.image('xh{}'.format(i), xh)
def train(self, loss, loss_t):
global_step = tf.Variable(0)
hparam = self.arch['training']
maxIter = hparam['num_epoch'] * 60000 // hparam['batch_size']
learning_rate = tf.train.exponential_decay(
1e-3, global_step,
hparam['decay_step'], 0.99, staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate)
opt = optimizer.minimize(loss['L'], global_step=global_step)
tf.summary.scalar('lr', learning_rate)
sv = tf.train.Supervisor(
logdir=self.arch['logdir'],
# save_summaries_secs=120,
global_step=global_step,
)
sess_config = tf.ConfigProto(
allow_soft_placement=True,
gpu_options=tf.GPUOptions(allow_growth=True)
)
with sv.managed_session(config=sess_config) as sess:
for it in range(maxIter):
if it % hparam['update_freq'] == 0:
a = list()
for _ in range(100):
a_ = sess.run(loss_t['acc'])
a.append(a_)
a_t = np.mean(a)
l, a, _ = sess.run([loss['L'], loss['acc'], opt])
print(
'\rIter {}/{}: loss = {:.4e}, acc={:.2f}%; test acc={:.2f}'.format(
it, maxIter, l, a * 100., a_t * 100.),
end=''
)
else:
sess.run(opt)
print()
class CapsuleMultiMNIST(CapsuleNet):
def _pick(self, v, y):
''' v: [b, J, V]
`y`: [b,]
'''
i = tf.expand_dims(tf.range(tf.shape(v)[0]), -1)
y = tf.expand_dims(tf.cast(y, tf.int32), -1)
return tf.gather_nd(v, tf.concat([i, y], -1))
def loss(self, x, y, xi, xj):
v = self._C(x) # [n, J=10, V=16]
tf.summary.image('V', tf.expand_dims(v, -1), 4)
xh_ = self._G(
tf.concat([v, v], 0),
tf.concat([y[:, 0], y[:, 1]], 0)
)
# TODO: an exp on rescaling the DigitCaps
with tf.name_scope('Experiment'):
v_norm = tf.norm(v + 1e-6, axis=-1, keep_dims=True)
v_ = v / v_norm
xh_exp = self._G(
tf.concat([v_, v_], 0),
tf.concat([y[:, 0], y[:, 1]], 0)
)
xhie, xhje = tf.split(xh_exp, 2)
tf.summary.image('xei', xhie, 4)
tf.summary.image('xej', xhje, 4)
with tf.name_scope('Loss'):
with tf.name_scope('Images'):
xhi, xhj = tf.split(xh_, 2)
# pad by -1 (float) = 0 (uint8)
xhx = tf.concat([xhi, xhj, - tf.ones_like(xhi)], -1)
tf.summary.image('x', x, 4)
tf.summary.image('xhx', xhx, 4)
tf.summary.image('xhi', xhi, 4)
tf.summary.image('xhj', xhj, 4)
tf.summary.image('xi', xi, 4)
tf.summary.image('xj', xj, 4)
hparam = self.arch['loss']
r = hparam['reconst weight']
x_ = tf.concat([xi, xj], 0)
l_reconst = r * \
tf.reduce_mean(tf.reduce_sum(tf.square(xh_ - x_), [1, 2, 3]))
tf.summary.scalar('l_reconst', l_reconst)
v_norm = tf.norm(v, axis=-1) # [n, J=10]
tf.summary.histogram('v_norm', v_norm)
J, _, _, _ = self._get_shape_JDUV()
Yi = tf.one_hot(y[:, 0], J)
Yj = tf.one_hot(y[:, 1], J) # [n, J]
Y = Yi + Yj
tf.summary.histogram('v_norm_i', self._pick(v_norm, y[:, 0]))
tf.summary.histogram('v_norm_j', self._pick(v_norm, y[:, 1]))
l, m, M = hparam['lambda'], hparam['m-'], hparam['m+']
with tf.name_scope('Classification'):
# <sol 1> According to Sec. 3, this is it.
loss = Y * tf.square(tf.maximum(0., M - v_norm)) \
+ l * (1. - Y) * tf.square(tf.maximum(0., v_norm - m))
loss = tf.reduce_mean(tf.reduce_sum(loss, -1))
tf.summary.scalar('loss', loss)
loss = loss + l_reconst
# NOTE: the convergence rate of MNIST is astonishingly fast
# (after 1K, the reconst is already pretty good)
# TODO: HOW TO CALCULATE THE "ACCURACY" in MultiMNIST?
acc = tf.cast(tf.nn.in_top_k(v_norm, y[:, 0], 2), tf.float32) \
+ tf.cast(tf.nn.in_top_k(v_norm, y[:, 1], 2), tf.float32)
acc = tf.reduce_mean(acc) / 2.
tf.summary.scalar('UR', acc)
acc = tf.cast(tf.nn.in_top_k(v_norm, y[:, 0], 2), tf.float32) \
* tf.cast(tf.nn.in_top_k(v_norm, y[:, 1], 2), tf.float32)
acc = tf.reduce_mean(acc)
tf.summary.scalar('EM', acc)
return {'L': loss, 'acc': acc, 'reconst': l_reconst}
def train(self, loss, loss_t):
global_step = tf.Variable(0, name='global_step')
hparam = self.arch['training']
maxIter = hparam['num_epoch'] * \
60000000 // hparam['batch_size'] # TODO
learning_rate = tf.train.exponential_decay(
1e-3, global_step,
hparam['decay_step'], 0.99, staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
opt = optimizer.minimize(loss['L'], global_step=global_step)
tf.summary.scalar('lr', learning_rate)
sv = tf.train.Supervisor(
logdir=self.arch['logdir'],
# save_summaries_secs=120,
global_step=global_step,
)
sess_config = tf.ConfigProto(
allow_soft_placement=True,
gpu_options=tf.GPUOptions(allow_growth=True)
)
with sv.managed_session(config=sess_config) as sess:
for it in range(maxIter):
if it % hparam['update_freq'] == 0:
# a = list()
# for _ in range(100):
# a_ = sess.run(loss_t['acc'])
# a.append(a_)
# a_t = np.mean(a)
l, a, _ = sess.run([loss['L'], loss['acc'], opt])
print(
'\rIter {}/{}: loss = {:.4e}, acc={:.2f}%;'.format(
it, maxIter, l, a * 100.), # , a_t * 100.),
end=''
)
else:
sess.run(opt)
print()
```
|
{
"source": "JeremyCCHsu/jutil",
"score": 3
}
|
#### File: JeremyCCHsu/jutil/image.py
```python
import tensorflow as tf
def nchw_to_nhwc(x):
return tf.transpose(x, [0, 2, 3, 1])
def nhwc_to_nchw(x):
return tf.transpose(x, [0, 3, 1, 2])
def make_png_thumbnail(x, n):
'''
Input:
`x`: Tensor, value range=[-1, 1), shape=[n*n, h, w, c]
`n`: sqrt of the number of images
Return:
`tf.string` (bytes) of the PNG.
(write these binary directly into a file)
'''
with tf.name_scope('MakeThumbnail'):
_, h, w, c = x.get_shape().as_list()
x = tf.reshape(x, [n, n, h, w, c])
x = tf.transpose(x, [0, 2, 1, 3, 4])
x = tf.reshape(x, [n * h, n * w, c])
x = x / 2. + .5
x = tf.image.convert_image_dtype(x, tf.uint8, saturate=True)
x = tf.image.encode_png(x)
return x
```
|
{
"source": "JeremyCCHsu/test-debug",
"score": 2
}
|
#### File: src/wavegrad/inference.py
```python
import numpy as np
import os
import torch
import torchaudio
from argparse import ArgumentParser
from wavegrad.params import AttrDict, params as base_params
from wavegrad.model import WaveGrad
from time import time
models = {}
# TODO: add `severity=1000` and initial `audio`
def predict(spectrogram, model_dir=None, params=None, device=torch.device('cuda'), audio=None, severity=None):
# Lazy load model.
if not model_dir in models:
if os.path.exists(f'{model_dir}/weights.pt'):
checkpoint = torch.load(f'{model_dir}/weights.pt')
else:
checkpoint = torch.load(model_dir)
model = WaveGrad(AttrDict(base_params)).to(device)
model.load_state_dict(checkpoint['model'])
model.eval()
models[model_dir] = model
model = models[model_dir]
model.params.override(params)
with torch.no_grad():
beta = np.array(model.params.noise_schedule)
alpha = 1 - beta
alpha_cum = np.cumprod(alpha)
if severity is None:
severity = len(model.params.noise_schedule)
alpha = alpha[-severity:]
alpha_cum = alpha_cum[-severity:]
# Expand rank 2 tensors by adding a batch dimension.
if len(spectrogram.shape) == 2:
spectrogram = spectrogram.unsqueeze(0)
spectrogram = spectrogram.to(device)
length = model.params.hop_samples * spectrogram.shape[-1]
if audio is None:
audio = torch.randn(spectrogram.shape[0], length, device=device)
else:
# TODO FROME HERE: padding or truncation
if audio.shape[-1] > length:
audio = audio[..., :length]
else:
audio = audio.to(device)
padding = (
torch.zeros([audio.shape[0], length - audio.shape[1]])
.to(device)
)
audio = torch.cat([audio, padding], -1)
noise_scale = torch.from_numpy(alpha_cum**0.5).float().unsqueeze(1).to(device)
ti = time()
for n in range(severity - 1, -1, -1):
print(f"{n}/{len(alpha)}", end="\r")
c1 = 1 / alpha[n]**0.5
c2 = (1 - alpha[n]) / (1 - alpha_cum[n])**0.5
prediction = model(audio, spectrogram, noise_scale[n]).squeeze(1)
audio = c1 * (audio - c2 * prediction)
if n > 0:
noise = torch.randn_like(audio)
sigma = ((1.0 - alpha_cum[n-1]) / (1.0 - alpha_cum[n]) * beta[n])**0.5
audio += sigma * noise
audio = torch.clamp(audio, -1.0, 1.0) # TODO: J: I disagree with this step
print(f"\nFinished {spectrogram.shape} in {time() - ti:.2f} secs.")
return audio, model.params.sample_rate
def main(args):
spectrogram = torch.from_numpy(np.load(args.spectrogram_path))
params = {}
if args.noise_schedule:
params['noise_schedule'] = torch.from_numpy(np.load(args.noise_schedule))
audio, sr = predict(spectrogram, model_dir=args.model_dir, params=params)
torchaudio.save(args.output, audio.cpu(), sample_rate=sr)
if __name__ == '__main__':
parser = ArgumentParser(description='runs inference on a spectrogram file generated by wavegrad.preprocess')
parser.add_argument('model_dir',
help='directory containing a trained model (or full path to weights.pt file)')
parser.add_argument('spectrogram_path',
help='path to a spectrogram file generated by wavegrad.preprocess')
parser.add_argument('--noise-schedule', '-n', default=None,
help='path to a custom noise schedule file generated by wavegrad.noise_schedule')
parser.add_argument('--output', '-o', default='output.wav',
help='output file name')
main(parser.parse_args())
```
#### File: src/wavegrad/__main__.py
```python
from argparse import ArgumentParser
from wavegrad.learner import train
from wavegrad.params import params
from wavegrad.dataset import from_path as dataset_from_path
def main(args):
train(dataset_from_path(args.data_dirs, params), args, params)
if __name__ == '__main__':
parser = ArgumentParser(description='train (or resume training) a WaveGrad model')
parser.add_argument('model_dir',
help='directory in which to store model checkpoints and training logs')
parser.add_argument('data_dirs', nargs='+',
help='space separated list of directories from which to read .wav files for training')
parser.add_argument('--max_steps', default=None, type=int,
help='maximum number of training steps')
parser.add_argument('--fp16', action='store_true', default=False,
help='use 16-bit floating point operations for training')
main(parser.parse_args())
```
#### File: src/wavegrad/model.py
```python
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from math import log as ln
CONTENT_EMB_DIM = 32
SPEAKER_EMB_DIM = 96
N_SPEAKER = 5
class Conv1d(nn.Conv1d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.reset_parameters()
def reset_parameters(self):
nn.init.orthogonal_(self.weight)
nn.init.zeros_(self.bias)
class PositionalEncoding(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x, noise_level):
"""
Arguments:
x:
(shape: [N,C,T], dtype: float32)
noise_level:
(shape: [N], dtype: float32)
Returns:
noise_level:
(shape: [N,C,T], dtype: float32)
"""
N = x.shape[0]
T = x.shape[2]
return (x + self._build_encoding(noise_level)[:, :, None])
def _build_encoding(self, noise_level):
count = self.dim // 2
step = torch.arange(count, dtype=noise_level.dtype, device=noise_level.device) / count
encoding = noise_level.unsqueeze(1) * torch.exp(-ln(1e4) * step.unsqueeze(0))
encoding = torch.cat([torch.sin(encoding), torch.cos(encoding)], dim=-1)
return encoding
class FiLM(nn.Module):
def __init__(self, input_size, output_size):
super().__init__()
self.encoding = PositionalEncoding(input_size)
self.input_conv = nn.Conv1d(input_size, input_size, 3, padding=1)
self.output_conv = nn.Conv1d(input_size, output_size * 2, 3, padding=1)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.input_conv.weight)
nn.init.xavier_uniform_(self.output_conv.weight)
nn.init.zeros_(self.input_conv.bias)
nn.init.zeros_(self.output_conv.bias)
def forward(self, x, noise_scale):
x = self.input_conv(x)
x = F.leaky_relu(x, 0.2)
x = self.encoding(x, noise_scale)
shift, scale = torch.chunk(self.output_conv(x), 2, dim=1)
return shift, scale
class UBlock(nn.Module):
def __init__(self, input_size, hidden_size, factor, dilation):
super().__init__()
assert isinstance(dilation, (list, tuple))
assert len(dilation) == 4
self.factor = factor
self.block1 = Conv1d(input_size, hidden_size, 1)
self.block2 = nn.ModuleList([
Conv1d(input_size, hidden_size, 3, dilation=dilation[0], padding=dilation[0]),
Conv1d(hidden_size, hidden_size, 3, dilation=dilation[1], padding=dilation[1])
])
self.block3 = nn.ModuleList([
Conv1d(hidden_size, hidden_size, 3, dilation=dilation[2], padding=dilation[2]),
Conv1d(hidden_size, hidden_size, 3, dilation=dilation[3], padding=dilation[3])
])
def forward(self, x, film_shift, film_scale):
block1 = F.interpolate(x, size=x.shape[-1] * self.factor)
block1 = self.block1(block1)
block2 = F.leaky_relu(x, 0.2)
block2 = F.interpolate(block2, size=x.shape[-1] * self.factor)
block2 = self.block2[0](block2)
block2 = film_shift + film_scale * block2
block2 = F.leaky_relu(block2, 0.2)
block2 = self.block2[1](block2)
x = block1 + block2
block3 = film_shift + film_scale * x
block3 = F.leaky_relu(block3, 0.2)
block3 = self.block3[0](block3)
block3 = film_shift + film_scale * block3
block3 = F.leaky_relu(block3, 0.2)
block3 = self.block3[1](block3)
x = x + block3
return x
class DBlock(nn.Module):
def __init__(self, input_size, hidden_size, factor):
super().__init__()
self.factor = factor
self.residual_dense = Conv1d(input_size, hidden_size, 1)
self.conv = nn.ModuleList([
Conv1d(input_size, hidden_size, 3, dilation=1, padding=1),
Conv1d(hidden_size, hidden_size, 3, dilation=2, padding=2),
Conv1d(hidden_size, hidden_size, 3, dilation=4, padding=4),
])
def forward(self, x):
size = x.shape[-1] // self.factor
residual = self.residual_dense(x)
residual = F.interpolate(residual, size=size)
x = F.interpolate(x, size=size)
for layer in self.conv:
x = F.leaky_relu(x, 0.2)
x = layer(x)
return x + residual
class WaveGrad(nn.Module):
def __init__(self, params):
super().__init__()
self.params = params
self.downsample = nn.ModuleList([
Conv1d(1, 32, 5, padding=2),
DBlock(32, 128, 2),
DBlock(128, 128, 2),
DBlock(128, 256, 3),
DBlock(256, 512, 5),
])
self.film = nn.ModuleList([
FiLM(32, 128),
FiLM(128, 128),
FiLM(128, 256),
FiLM(256, 512),
FiLM(512, 512),
])
self.upsample = nn.ModuleList([
UBlock(768, 512, 5, [1, 2, 1, 2]),
UBlock(512, 512, 5, [1, 2, 1, 2]),
UBlock(512, 256, 3, [1, 2, 4, 8]),
UBlock(256, 128, 2, [1, 2, 4, 8]),
UBlock(128, 128, 2, [1, 2, 4, 8]),
])
self.first_conv = Conv1d(128, 768, 3, padding=1)
self.last_conv = Conv1d(128, 1, 3, padding=1)
def forward(self, audio, spectrogram, noise_scale):
# FIXME: EXPERIMENTAL: UNCONDITIONAL ===============
spectrogram = 0 * spectrogram
# ==================================================
x = audio.unsqueeze(1)
downsampled = []
for film, layer in zip(self.film, self.downsample):
x = layer(x)
downsampled.append(film(x, noise_scale))
x = self.first_conv(spectrogram)
for layer, (film_shift, film_scale) in zip(self.upsample, reversed(downsampled)):
x = layer(x, film_shift, film_scale)
x = self.last_conv(x)
return x
# Jadd =======================================
class SpeakerFiLM(nn.Module):
def __init__(self, output_size, n_speakers=N_SPEAKER):
super().__init__()
self.shift = nn.Embedding(n_speakers, output_size)
self.scale = nn.Embedding(n_speakers, output_size)
def forward(self, x, index):
"""
:param x: [B, ]
"""
shift = self.shift(index).unsqueeze(-1)
scale = self.scale(index).unsqueeze(-1)
return scale * x + shift
class DownCleansingBlock(nn.Module):
def __init__(self, input_size, hidden_size, factor):
super().__init__()
self.factor = factor
self.residual_dense = Conv1d(input_size, hidden_size, 1)
self.conv = nn.ModuleList([
Conv1d(input_size, hidden_size, 3, dilation=1, padding=1),
Conv1d(hidden_size, hidden_size, 3, dilation=2, padding=2),
Conv1d(hidden_size, hidden_size, 3, dilation=4, padding=4),
])
self.instance_norm = nn.ModuleList([
nn.InstanceNorm1d(hidden_size, affine=False),
nn.InstanceNorm1d(hidden_size, affine=False),
nn.InstanceNorm1d(hidden_size, affine=False),
])
def forward(self, x):
size = x.shape[-1] // self.factor
residual = self.residual_dense(x)
residual = F.interpolate(residual, size=size)
x = F.interpolate(x, size=size)
for layer, normalize in zip(self.conv, self.instance_norm):
x = normalize(x)
x = F.leaky_relu(x, 0.2)
x = layer(x)
return x + residual
class ContentEncoder(nn.Module):
"""
Enc(waveform, speaker) --> phone embedding
"""
def __init__(self): #, params):
super().__init__()
# self.params = params
self.downsample = nn.ModuleList([
Conv1d(1, 32, 5, padding=2),
DownCleansingBlock(32, 128, 2),
DownCleansingBlock(128, 128, 2),
DownCleansingBlock(128, 256, 3),
DownCleansingBlock(256, 512, 5),
DownCleansingBlock(512, CONTENT_EMB_DIM, 5),
])
self.film = nn.ModuleList([
SpeakerFiLM(32,),
SpeakerFiLM(128),
SpeakerFiLM(128),
SpeakerFiLM(256),
SpeakerFiLM(512),
SpeakerFiLM(CONTENT_EMB_DIM),
])
# self.first_conv = Conv1d(128, 768, 3, padding=1)
# self.last_conv = Conv1d(128, 1, 3, padding=1)
def forward(self, audio, speaker):
"""
:param audio: [B, T]
:param speaker: [B,]
"""
x = audio.unsqueeze(1)
# downsampled = []
for film, layer in zip(self.film, self.downsample):
x = layer(x)
# shift, scale = film(x)
# x = shift + scale * x
x = film(x, speaker)
# downsampled.append(x)
# x = self.first_conv(spectrogram)
# for layer, (film_shift, film_scale) in zip(self.upsample, reversed(downsampled)):
# x = layer(x, film_shift, film_scale)
# x = self.last_conv(x)
return x
class LatentEmbedding(nn.Module):
"""
Enc(waveform, speaker) --> phone embedding
"""
def __init__(self): #, params):
super().__init__()
self.speaker_emb = nn.Embedding(N_SPEAKER, SPEAKER_EMB_DIM)
# self.conv = nn.ModuleList([
# nn.Conv1d()
# ])
dim = SPEAKER_EMB_DIM + CONTENT_EMB_DIM
self.conv = nn.Sequential(
nn.Conv1d(dim, 256, 3, padding=1),
nn.LeakyReLU(0.2),
nn.Conv1d(256, dim, 3, padding=1)
)
final_layer = nn.Conv1d(256, dim, 3, padding=1)
final_layer.bias.data.fill_(2.)
# torch.nn.init.xavier_uniform(conv1.weight)
self.gate = nn.Sequential(
nn.Conv1d(dim, 256, 3, padding=1),
nn.LeakyReLU(0.2),
# nn.Conv1d(256, dim, 3, padding=1),
final_layer,
nn.Sigmoid()
)
# augment -> conv -> ReLU -> conv -> linear = conv
# \-> conv -> ReLU -> conv -> sigmoid = gate
# gate * conv
def forward(self, content_emb, speaker):
"""
:param content_emb: [B, c, L]
:param speaker: [B]
"""
L = content_emb.shape[2]
speaker_emb = self.speaker_emb(speaker).unsqueeze(-1).repeat([1, 1, L])
embeddings = torch.cat([content_emb, speaker_emb], 1)
conv = self.conv(embeddings)
gate = self.gate(embeddings)
return gate * conv
class WaveVCTraining(nn.Module):
def __init__(self, params):
super().__init__()
self.params = params
self.wavegrad = WaveGrad(params)
self.fuse_latent = LatentEmbedding()
self.encoder = ContentEncoder()
def encode(self, audio, source_speakers):
"""
:param audio: [B, T]
:param speaker: [B, (S)]
:return pseudospecs: [S, B, c, T]
"""
if len(source_speakers.shape) == 1:
source_speakers = source_speakers.unsqueeze(-1)
# target_speakers = target_speakers.unsqueeze(-1)
# pseudospecs = []
content_emb = []
n_speaker = source_speakers.shape[1]
for i in range(n_speaker):
content = self.encoder(audio, source_speakers[:, i])
content_emb.append(content)
# pseudospec = self.fuse_latent(content, target_speakers[:, i])
# pseudospecs.append(pseudospec)
# return pseudospecs
return content_emb
def sample(self, content):
"""
:param content: [b, 2c, T] c for mean and log var, respectively.
"""
torch.split()
def blende(self, content, speaker):
# equiv to `fuse_latent`
pass
def forward(self, audio, speaker, noise_scale):
if len(speaker.shape) == 1:
speaker = speaker.unsqueeze(-1)
embeddings = []
n_speaker = speaker.shape[1]
for i in range(n_speaker):
content = self.encoder(audio, speaker[:, i])
pseudospec = self.fuse_latent(content, speaker[:, i])
embeddings.append(pseudospec)
pseudospec = sum(embeddings)
# NOTE: the output is grad log p(x|z, y)
output = self.wavegrad(audio, pseudospec, noise_scale)
return output
class WaveConvert(WaveVCTraining):
def __init__(self, params):
super().__init__(params)
def decode(self, audio, pseudospecs, noise_scale): # speaker,
"""
:param audio: [B, T]
:param pseudospecs: [S, B, c, T]
# :param speaker: [B, (S)]
:return gradient: [B, c=1, T]
"""
# if len(speaker.shape) == 1:
# speaker = speaker.unsqueeze(-1)
# assert speaker.shape[1] == len(pseudospecs)
# TODO: maybe change its format
pseudospecs = torch.stack(pseudospecs)
n_speakers, batch_size, c, n_frames = pseudospecs.shape
pseudospecs = pseudospecs.view(-1, c, n_frames)
_, time_length = audio.shape
audio = audio.unsqueeze(1).repeat(1, n_speakers, 1).view(-1, time_length)
# speaker = speaker.repeat(batch_size, 1).view(-1)
return self.wavegrad(audio, pseudospecs, noise_scale)
# output = []
# n_speaker = speaker.shape[1]
# for i in range(n_speaker):
# output = self.wavegrad(audio, pseudospecs[i], noise_scale)
def encode(self, audio, source_speakers, target_speakers):
"""
:param audio: [B, T]
:param speaker: [B, (S)]
:return pseudospecs: [S, B, c, T]
"""
if len(source_speakers.shape) == 1:
source_speakers = source_speakers.unsqueeze(-1)
target_speakers = target_speakers.unsqueeze(-1)
pseudospecs = []
n_speaker = source_speakers.shape[1]
for i in range(n_speaker):
content = self.encoder(audio, source_speakers[:, i])
pseudospec = self.fuse_latent(content, target_speakers[:, i])
pseudospecs.append(pseudospec)
return pseudospecs
```
|
{
"source": "jeremycclo/msci_reionisation",
"score": 3
}
|
#### File: msci_reionisation/code2/hk3d.py
```python
import numpy as np
import numpy.random as npr
import matplotlib.pyplot as plt
verbose = False
def hoshen_kopelman(box):
"""
Given binary input file in 3D, return labelled points identifying
clusters
"""
mask = box.copy()
#list of labels initially set to labels[x] = x i.e. each element in array
# has a unique identifier
labels = np.arange(mask.size)
label = np.zeros(mask.shape)
largest_label = 0;
for x in range(mask.shape[0]):
if verbose:
print ("ID clusters: %i of %i" % (x, mask.shape[0]) )
for y in range(mask.shape[1]):
for z in range(mask.shape[2]):
if mask[x,y,z]:
#find value in left and up, being careful about edges
#Note that removing the if statements here, should allow
#for periodic boundary conditions
if x>0:
left = mask[x-1, y, z].astype(int)
else:
left = 0
if y>0:
up = mask[x, y-1, z].astype(int)
else:
up = 0
if z>0:
behind = mask[x, y, z-1].astype(int)
else:
behind = 0
#Check for neighbours
#next line counts which of left and up are non-zero
check = (not not left) + (not not up) + (not not behind)
if check == 0: #No occupied neighbours
#Make a new, as-yet-unused cluster label.
mask[x,y,z] = make_set(labels)
elif check == 1: # One neighbor, either left or up.
#use whichever is non-zero
mask[x,y,z] = max(max(up, left), behind)
elif check == 2: #Two neighbours to be linked together
if up and left:
mask[x,y,z] = union(left, up, labels)
elif up and behind:
mask[x,y,z] = union(behind, up, labels)
elif left and behind:
mask[x,y,z] = union(behind, left, labels)
else:
raise Exception("Something's gone wrong!")
elif check == 3: #Three neighbours to be linked
#Link all three via two unions of two
#Is ordering of this important? Doesn't seem to be
mask[x,y,z] = union(left, up, labels)
mask[x,y,z] = union(left, behind, labels)
else:
raise Exception("Invalid value for check")
#Now at the end relabel to ensure consistency i.e. that clusters are
#numbered sequentially
#In 3D a checker pattern gives the largest number of possible clusters
max_labels = mask.size / 2
n_labels = max_labels
new_labels = np.zeros(n_labels)
for i in range(mask.shape[0]):
if verbose:
print ("Resequencing: %i of %i" % (i, mask.shape[0]) )
for j in range(mask.shape[1]):
for k in range(mask.shape[2]):
if mask[i, j, k]:
x = find(mask[i, j, k].astype(int), labels)
if new_labels[x] == 0:
new_labels[0] += 1
new_labels[x] = new_labels[0]
mask[i, j, k] = new_labels[x]
return mask
def find_simple(x, labels):
while(labels[x] != x):
x = labels[x]
return x
def union(x, y, labels):
#Make two labels equivalent by linking their respective chains of aliases
labels[find(x, labels)] = find(y, labels)
return find(y, labels)
def find(x, labels):
"""
Better version of find - Much faster
"""
y = x.copy()
z = 0
while (labels[y] != y):
y = labels[y]
#second part collapses labels
while (labels[x] !=x):
z = labels[x].copy()
labels[x] = y
x = z
return y
def make_set(labels):
"""
Create a new equivalence class and return its class label
"""
labels[0] += 1
labels[labels[0]] = labels[0]
return labels[0].astype(int)
def check_labelling(mask):
"""
Check identification of clusters i.e. that all neighbours of a pixel
have the same label
"""
for i in range(mask.shape[0]):
print ("Checking labels: %i of %i" % (i, mask.shape[0]) )
for j in range(mask.shape[1]):
for k in range(mask.shape[2]):
if mask[i, j, k]:
N = (0 if i == 0 else mask[i-1][j][k])
S = (0 if i == mask.shape[0]-1 else mask[i+1][j][k])
E = (0 if j == mask.shape[1]-1 else mask[i][j+1][k])
W = (0 if j == 0 else mask[i][j-1][k])
U = (0 if k == 0 else mask[i][j][k-1])
D = (0 if k == mask.shape[2]-1 else mask[i][j][k+1])
assert( N==0 or mask[i][j][k]==N )
assert( S==0 or mask[i][j][k]==S )
assert( E==0 or mask[i][j][k]==E )
assert( W==0 or mask[i][j][k]==W )
assert( U==0 or mask[i][j][k]==U )
assert( D==0 or mask[i][j][k]==D )
print ("Labelling checks out")
##########################################
# Analysis functions
##########################################
def cluster_sizes(clusters):
"""
Returns number of cells in different size clusters
0 = not occupied
1 - N are cluster labels
Output has v_clusters[cluster_index] = number of pixels in that cluster
"""
v_clusters = np.bincount(clusters.reshape(clusters.size))
return v_clusters
def locate_largest_cluster(clusters):
"""
pick out the largest cluster for easy plotting
ignore the 0-index stuff
Output labels all clusters as 1 and largest cluster as 2 for easy
plotting
"""
#Find cluster label of largest cluster ignoring 0-index stuff
volumes = cluster_sizes(clusters)
largest = np.where(volumes[1:] == np.max(volumes[1:]))[0][0] + 1
print ("largest cluster is ", largest)
mask = np.zeros(clusters.shape)
mask[clusters>0] = 1
mask[clusters == largest] = 2
return mask
def find_spanning_cluster(clusters):
"""
Look for any clusters that have pixels on any two parallel edges
Will return 0-index as well as
Use set functionality to look for the intersection of points of edge1
and edge 2 to find any clusters that are present at both edges. Since
clusters are continguous this must mean those clusters span the space.
"""
side = clusters.shape[0] * clusters.shape[1]
edge1 = set(clusters[:, :, 0].reshape(side))
edge2 = set(clusters[:, :, -1].reshape(side))
spanningz = edge1.intersection(edge2)
#print "z: ", spanningz
edge1 = set(clusters[:, 0, :].reshape(side))
edge2 = set(clusters[:, -1, :].reshape(side))
spanningx = edge1.intersection(edge2)
#print "x: ", spanningx
edge1 = set(clusters[0, :, :].reshape(side))
edge2 = set(clusters[-1, :, :].reshape(side))
spanningy = edge1.intersection(edge2)
#print "y: ", spanningy
#combine for all spanning clusters
spanning = spanningx.union(spanningy)
spanning = spanning.union(spanningz)
#print "spanning cluster is: ", spanning
return spanning
def summary_statistics(box, clusters):
"""
box is a binary box
clusters is output of HK algorithm containing cluster labels
Calculate volume distribution, idenity of spanning cluster
and order parameter
Order parameter is defined as
(no. pixels in spanning cluster) / (no. pixels in all clusters)
"""
volumes = cluster_sizes(clusters)
spanning = find_spanning_cluster(clusters)
if max(spanning) > 0:
order = volumes[max(spanning)] / float(box.sum())
else:
order = 0.0
return volumes, spanning, order
def size_distribution(volumes, nbins = 20):
"""
Standardise construction of size histogram
Sizes can become huge so need to assemble this carefully and using
log bins
"""
if len(volumes) == 1:
#Handle case that there are no clusters at all
return [], []
#linear bins - becomes very slow if have large clusters
bins = np.arange(int(np.max(volumes[1:])+3))
#log bins
delta = ( np.log(np.max(volumes[1:])) - np.log(1) ) / float(nbins-1)
bins = np.exp( np.arange(nbins) * delta )
hist, bin_edges = np.histogram(volumes[1:], bins = bins)
return bin_edges[0:-1], hist
def summary_clusters(box, clusters):
#pretty display
p = box.sum() / float(box.size)
print ("HK found %i clusters" % (np.max(clusters)) )
MID = box.shape[0]/2
volumes = cluster_sizes(clusters)
print ("Largest cluster has size = ", np.max(volumes[1:]) )
spanning = find_spanning_cluster(clusters)
if max(spanning) > 0:
print ("Spanning cluster exists and has size =", volumes[max(spanning)] )
print ("Number of ionized pixels is =", box.sum() )
print ("Order parameter is ", volumes[max(spanning)] / float(box.sum()) )
#print clusters[:,:,MID]
plt.figure(figsize=(6, 2.2))
plt.subplot(121)
plt.imshow(box[:,:,MID], cmap=plt.cm.gray)
plt.title("Input box f=%0.2f" % (p))
plt.subplot(122)
plt.imshow(locate_largest_cluster(clusters)[:,:,MID])
plt.title("Clusters N=%i" %(np.max(clusters)))
#size distribution
plt.figure()
plt.subplot(121)
plt.plot(volumes[1:], 'ko-',linewidth=2,drawstyle='steps-mid')
plt.ylabel("Size of cluster")
plt.xlabel("Cluster label")
plt.subplot(122)
hist, bin_edges = np.histogram(volumes[1:], bins = np.arange(int(np.max(volumes[1:])+3)))
plt.plot(bin_edges[0:-1], hist, 'ko-',linewidth=2,drawstyle='steps-mid')
plt.ylim([0.1, max(1, np.max(hist)+1)])
plt.ylabel("Number of clusters")
plt.xlabel("Size of cluster")
plt.title("Max size=%i" % (np.max(volumes[1:])))
plt.show()
plt.close()
def main():
"""
Test example with random field
"""
import matplotlib.pyplot as plt
reset = True
m = n = l = 10
ntrials = 1
box = np.zeros([m, n, l])
for trial in range(ntrials):
p = npr.uniform()
#create a random matrix thresholded on p
if reset:
mask = (npr.uniform(size = box.shape) < p)
box[mask] = int(1)
if reset:
filename = "temp.dat"
file = open(filename, "wb")
file.write(box.astype(int))
file.close()
else:
filename = "temp.dat"
f = open(filename, "rb")
dtype = np.int64
data = f.read()
f.close()
DIM = m
_data = np.fromstring(data, dtype)
_data.shape = (DIM, DIM, DIM)
_data = _data.reshape((DIM, DIM, DIM), order='F')
box = _data
#print box
#run the algorithm
clusters = hoshen_kopelman(box)
check_labelling(clusters)
#print probability and cluster output
#print p, clusters
print ("prob = %f" % (p) )
summary_clusters(box, clusters)
if __name__ == "__main__":
main()
```
#### File: msci_reionisation/code2/minkowski.py
```python
import subprocess
import numpy as np
new = True
def run_minkowski_frombox(datacube,
DIM = 256,
nbins = 1, low_threshold = 0.5, high_threshold = 0.5, smoothing = 0):
"""
"""
if new:
box = datacube.copy()
else:
box = np.insert(datacube, 0, datacube.shape)
#write the input data file
infile = "input_mink.dat"
dtype = np.float32
of = open(infile, "wb")
of.write(box.astype(dtype))
of.close()
outfile = "output_mink.dat"
#run the code
run_minkowski_new(infile, outfile, DIM, nbins, low_threshold, high_threshold, smoothing)
#Read in data
data = np.loadtxt(outfile)
threshold = data[:, 0]
V0 = data[:, 1]
V1 = data[:, 2]
V2 = data[:, 3]
V3 = data[:, 4]
return threshold, V0, V1, V2, V3
def run_minkowski(infile, outfile = "output.dat",
DIM = 256,
nbins = 1, low_threshold = 0.5, high_threshold = 0.5, smoothing = 0):
#new = True
if new:
run_minkowski_new(infile,outfile,DIM,nbins, low_threshold, high_threshold, smoothing)
else:
run_minkowski_old(infile,outfile,DIM,nbins, low_threshold, high_threshold, smoothing)
def run_minkowski_new(infile, outfile = "output.dat",
DIM = 256,
nbins = 1, low_threshold = 0.5, high_threshold = 0.5, smoothing = 0):
"""
run ./beyond
This is the Minkowski-3 code from Buchert
Assumes that infile contains a cubic datacube in binary format
Will output nbins values on interval [lo, high]
"""
#Evaluate Minkowski functional at nbins different thresholds between
#low_threshold and high_threshold
#nbins = 1
#low_threshold = 0.5
#high_threshold = 0.5
#integer ideally a power of 2 to allow for Gaussian smoothing
#smoothing = 2
#oversampling
intervals = 2
#Note that beyond calculates thresholds internally on basis of
# [lo, high] with nbin+1 values. This is inconsistent with the
# Readme, but stems from loops like for(j=0;j<=nbins;j++), which
# count nbins+1 values. Hence subtract one from nbins when passing
# to beyond to ensure more sensible meaning to nbins.
# Done this way output should match np.linspace(lo, high, nbins) and
# python conventions for loops
#assemble command string
cmd = "/Users/jpritcha/Documents/current/projects/Solene/MinkowskiFunctionals/beyond/"
cmd = cmd + "beyond -x%d -y%d -z%d -b%i -l%f -h%f -m%i -s%i -i%s -o%s -N -t" % (DIM, DIM, DIM, np.max(nbins-1, 1), low_threshold, high_threshold, intervals, smoothing, infile, outfile)
print cmd
subprocess.call(cmd, shell=True)
def run_minkowski_old(infile, outfile = "output.dat",
DIM = 256,
nbins = 1, low_threshold = 0.5, high_threshold = 0.5, smoothing = 0):
"""
run ./Minkowski
This makes use of the modified (and I think older) version of the code
received from Suman by way of Martina
"""
#Evaluate Minkowski functional at nbins different thresholds between
#low_threshold and high_threshold
#nbins = 1
#low_threshold = 0.5
#high_threshold = 0.5
#integer ideally a power of 2 to allow for Gaussian smoothing
#smoothing = 0
#Note that there is a slight inconsistency in how minkowski has been
#hacked and the threshold values. Internally the thresholds are
#calculated on interval [lo, high] with nbins+1 values, but
#the output is hacked to output only [lo, high) & exclude the top bin.
cmd = "/Users/jpritcha/Documents/current/projects/Solene/MinkowskiFunctionals/Minkowski/"
cmd = cmd + "minkowski -x%d -y%d -z%d -b%i -l%f -h%f -m2 -s%i -i%s -o%s -N -f -c -t" % (DIM, DIM, DIM, nbins, low_threshold, high_threshold, smoothing, infile, outfile)
print cmd
subprocess.call(cmd, shell=True)
########################
# Simple script to make a Gaussian random field and output to a file
# in the format required by the Minkowski codes
########################
def make_test_box(DIM = 128):
"""
Gaussian box to test Minkowski code.
Produces a [DIM, DIM, DIM] box with values chosen from a unit Gaussian.
First three integrers of the output binary file are the dimensions of
the box.
"""
#Gaussian random field
box = npr.normal(size = [DIM, DIM, DIM])
#write out in a form that minkowski can read
#Needs dimensions of box as first three ints
box = np.insert(box, 0, [DIM, DIM, DIM])
outfilename = "test_gaussian.dat"
dtype = np.float32
of = open(outfilename, "wb")
of.write(box.astype(dtype))
of.close()
######################
# Theoretical prediction
#####################
def gaussian_theory(sigma = 1.0, box = None):
"""
Evaluate analytic expressions for Gaussian minkowski functionals
from Schmalzing and Buchert (1997) and Gleser+ (2006)
if box is not None will normalise to a box
"""
if box is not None:
sigma = sqrt(np.var(box))
sigma1 = sqrt(np.var(np.gradient(box)))
else:
sigma1 = 1.0
#dimensionless threshold
threshold = np.linspace(-4.0, 4.0, 40) * sqrt(sigma)
u = threshold / sqrt(sigma)
#lambda parameter
xi = sigma * sigma
xipp = sigma1 * sigma1
lam = sqrt(xipp / (6.0 * pi * xi))
#now calculate the Minkowski functionals
V0 = 0.5 - 0.5 * erf(u / sqrt(2.0))
V1 = (2.0 / 3.0) * (lam / sqrt(2.0 * pi)) * np.exp(- u * u /2.0)
V2 = (2.0 / 3.0) * (lam * lam / sqrt(2.0 * pi)) * u * np.exp(- u * u /2.0)
V3 = (2.0 / 3.0) * (lam * lam * lam / sqrt(2.0 * pi)) * (u*u -1.0) * np.exp(- u * u /2.0)
return V0, V1, V2, V3
def output_plot(infile, box = None):
"""
Example plot of Minkowski functionals
"""
#Theory calculation
#if box is not None:
# threshold, V0, V1, V2, V3 = gaussian_theory()
#Read in data
data = np.loadtxt(infile)
threshold = data[:, 0]
V0 = data[:, 1]
V1 = data[:, 2]
V2 = data[:, 3]
V3 = data[:, 4]
plt.figure()
plt.subplot(221)
plt.plot(threshold, V0)
plt.subplot(222)
plt.plot(threshold, V1)
plt.subplot(223)
plt.plot(threshold, V2)
plt.subplot(224)
plt.plot(threshold, V3)
plt.show()
```
|
{
"source": "jeremycfd/CytoMod",
"score": 2
}
|
#### File: CytoMod/cytomod/example.py
```python
import pandas as pd
import cytomod as cy
import os.path as op
import numpy as np
import palettable
from custom_legends import colorLegend
import seaborn as sns
from hclusterplot import *
sns.set_context('paper')
dataFilename = op.join(DATA_PATH, '170615_LEGENDplex_ADAMTS4_DB.csv')
"""A long df has one analyte measurement per row"""
longDf = pd.read_csv(dataFilename)
longDf.loc[:,'ptid'] = ['%s-%d-%d' % c for c in zip(longDf.genotype, longDf['sample'], longDf['dpi'])]
"""Print table of sample count"""
print(longDf.loc[longDf.cytokine=='mcp1'].groupby(['genotype', 'dpi'])['ptid'].count())
"""Identify primary day for clustering"""
df = longDf.set_index(['ptid', 'dpi','cytokine'])['log10_conc'].unstack(['cytokine','dpi'])
#plt.plot([0, 3, 6, 9, 12], df['ifng'].values.T, '-o')
"""A wide df has one sample per row (analyte measurements across the columns)"""
# dayDf = longDf.loc[longDf.dpi == 9]
dayDf = longDf.loc[longDf.dpi.isin([3, 6, 9])]
tmp = dayDf.pivot_table(index='ptid', columns='cytokine', values='log10_conc')
noVar = tmp.columns[np.isclose(tmp.std(), 0)].tolist()
naCols = tmp.columns[(~tmp.isnull()).sum() < 5].tolist() + ['il21', 'il9']
keepCols = [c for c in tmp.columns if not c in (noVar + naCols)]
def _prepCyDf(dayDf, keepCols, K=3, normed=False):
dayDf = dayDf.pivot_table(index='ptid', columns='cytokine', values='log10_conc')[keepCols]
"""By setting normed=True the data our normalized based on correlation with mean analyte concentration"""
rcyc = cy.cytomod_class(studyStr='ADAMTS', sampleStr='LUNG', adjusted=normed, rCyDf=dayDf)
rcyc.cluster_cytokines(K=K, metric='spearman-signed', minN=0)
rcyc.printModules()
return rcyc
rcyc = _prepCyDf(dayDf, keepCols, normed=True)
wt = _prepCyDf(dayDf.loc[dayDf.genotype == 'WT'], keepCols, normed=True)
ko = _prepCyDf(dayDf.loc[dayDf.genotype == 'KO'], keepCols, normed=True)
"""Now you can use attributes in nserum for plots and testing: cyDf, modDf, dmatDf, etc."""
plt.figure(41, figsize=(15.5, 9.5))
colInds = plotHColCluster(rcyc.cyDf,
method='complete',
metric='pearson-signed',
col_labels=rcyc.labels,
col_dmat=rcyc.dmatDf,
tickSz='large',
vRange=(0,1))
plt.figure(43, figsize = (15.5, 9.5))
colInds = cy.plotting.plotHierClust(1 - rcyc.pwrel,
rcyc.Z,
labels=rcyc.labels,
titleStr='Pairwise reliability (%s)' % rcyc.name,
vRange=(0, 1),
tickSz='large')
plt.figure(901, figsize=(13, 9.7))
cy.plotting.plotModuleEmbedding(rcyc.dmatDf, rcyc.labels, method='kpca', txtSize='large')
colors = palettable.colorbrewer.get_map('Set1', 'qualitative', len(np.unique(rcyc.labels))).mpl_colors
colorLegend(colors, ['%s%1.0f' % (rcyc.sampleStr, i) for i in np.unique(rcyc.labels)], loc='lower left')
"""df here should have one column per module and the genotype column"""
ptidDf = longDf[['ptid', 'sample', 'genotype', 'dpi']].drop_duplicates().set_index('ptid')
df = rcyc.modDf.join(ptidDf)
ind = df.genotype == 'WT'
col = 'SERUM1'
stats.ranksums(df[col].loc[ind], df[col].loc[~ind])
```
#### File: cytomod/otherTools/corrplots.py
```python
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from scipy import polyfit, polyval, stats
import pandas as pd
# from mytext import textTL, textTR
import statsmodels.api as sm
from patsy import dmatrices,ModelDesc,Term,LookupFactor
from copy import deepcopy
import itertools
import warnings
import palettable
__all__ = ['partialcorr',
'combocorrplot',
'scatterfit',
'heatmap',
'crosscorr',
'pwpartialcorr',
'corrheatmap',
'validPairwiseCounts',
'removeNARC',
'permcorr']
"""Red --> Green colormap with 1024 interpolated values"""
_cdict = {'green' : ((0, 1, 1), (0.5, 0, 0), (1, 0, 0)),
'red': ((0, 0, 0), (0.5, 0, 0), (1, 1, 1)),
'blue' : ((0, 0, 0), (1, 0, 0))}
#_heatCmap = matplotlib.colors.LinearSegmentedColormap('my_colormap', _cdict, 1024)
_heatCmap = palettable.colorbrewer.diverging.RdBu_11_r.mpl_colormap
def partialcorr(x, y, adjust=[], method='pearson', minN=None):
"""Finds partial correlation of x with y adjusting for variables in adjust
This function is index aware (i.e. uses index of x, y and adjust for joining).
Rho and p-value match those from stats.spearmanr, and stats.pearsonr when adjust = [].
TODO:
(1) Compute CIs
(2) Make into its own testable module
(3) Include partial_corr gist
(4) Include function to compute whole partial correlation matrix
(5) Add second method which takes correlation of residuals (should be equivalent, but is nice test)
Parameters
----------
x,y : pd.Series
Each contains data for assessing correlation.
adjust : list of pd.Series objects
Correlation is assessed between x and y adjusting for all variables in z (default: [])
method : string
Method can be 'pearson' (default) or 'spearman', which uses rank-based correlation and adjustment.
minN : int
Minimum number of non-nan paired observations. If N < minN then returns pc = nan and p = 1
Returns
-------
partial_rho : float
Partial correlation coefficient between x and y after adjustment.
pvalue : float
P-value for the partial correlation coefficient."""
if not isinstance(x, pd.Series):
x = pd.Series(x, name = 'X')
if not isinstance(y, pd.Series):
y = pd.Series(y, name = 'Y')
assert x.shape[0] == y.shape[0]
if x.name == y.name:
x.name += '_X'
y.name += '_Y'
"""Make one big DataFrame out of x, y and adjustment variables"""
tmpDf = pd.concat((x,y), join='inner', axis=1)
for a in adjust:
tmpDf = tmpDf.join(a, how='left')
tmpDf = tmpDf.dropna(axis=0, how='any')
if not minN is None and tmpDf.shape[0] < minN:
return np.nan, 1.
m = np.zeros((tmpDf.shape[0], 2+len(adjust)))
if method == 'spearman':
"""Convert data to ranks"""
m[:,0] = tmpDf[x.name].rank()
m[:,1] = tmpDf[y.name].rank()
for i,a in enumerate(adjust):
m[:,i+2] = tmpDf[a.name].rank()
else:
m[:,0] = tmpDf[x.name]
m[:,1] = tmpDf[y.name]
for i,a in enumerate(adjust):
m[:,i+2] = tmpDf[a.name]
if all(m[:,0] == m[:,1]):
"""Testing for perfect correlation avoids SingularMatrix exception"""
return 1,0.0
"""Take the inverse of the covariance matrix including all variables
pc = -p_ij / sqrt(p_ii * p_ij)
where p is the inverse covariance matrix"""
try:
icv = np.linalg.inv(np.cov(m,rowvar=0))
pc = -icv[0,1] / np.sqrt(icv[0,0] * icv[1,1])
n = m.shape[0]
gn = len(adjust)
statistic = pc * np.sqrt((n-2-gn)/(1-pc**2))
#pvalue = 2*stats.norm.cdf(-abs(statistic))
#SAS and pearsonr look the statistic up in a t distribution while R uses the normnal
pvalue = 2*stats.t.cdf(-np.abs(statistic),n-2-gn)
except:
"""These were used to check that non-partial rho's and pvalues match those of their scipy equivalents
They do! Use them if the other fails and warn the caller"""
if method == 'pearson':
pc,pvalue = stats.pearsonr(tmpDf[x.name].values,tmpDf[y.name].values)
else:
pc,pvalue = stats.spearmanr(tmpDf[x.name].values,tmpDf[y.name].values)
if len(adjust) > 0:
warnings.warn('Error computing %s and %s correlation: using scipy equivalent to return UNADJUSTED results' % (x.name,y.name))
else:
warnings.warn('Error computing %s and %s correlation: using scipy equivalent' % (x.name,y.name))
#raise
"""Below verifies that the p-value for the coefficient in the multivariate model including adjust
is the same as the p-value of the partial correlation"""
"""formula_like=ModelDesc([Term([LookupFactor(y.name)])],[Term([]),Term([LookupFactor(x.name)])]+[Term([LookupFactor(a.name)]) for a in adjust])
Y, X = dmatrices(formula_like, data=tmpDf, return_type='dataframe')
model=sm.GLM(Y,X,family=sm.families.Gaussian())
print model.fit().summary()"""
return pc, pvalue
def combocorrplot(data,method='spearman',axLimits='variable',axTicks=False,axTicklabels=False,valueFlag=True,ms=2, plotLine = False):
"""Shows correlation scatter plots in combination with a heatmap for small sets of variables.
Parameters
----------
data : pd.DataFrame
method : string
Correlation method, can be 'pearson' or 'spearman'
axLimits : string
If 'variable' then allows the limits to be different for each pair of variables.
axTicks : bool
Display axis tick marks on each square?
axTicklabels : bool
Display axis tick labels on each square?
valueFlag : bool
Display correlation coefficient in each square?
ms : int
Scatter plot marker size in points.
plotLine : bool
Plot fit-line on the subplots?"""
border = 0.05
pad = 0.02
cbwidth = 0.1
labels = data.columns
"""Use pd.DataFrame method to compute the pairwise correlations"""
coef = data.corr(method=method)
n = coef.shape[0]
axh = np.empty((n,n), dtype=object)
plth = np.empty((n,n), dtype=object)
mx = None
mn = None
for col in data.columns:
if mx==None:
mx = data[col].max()
mn = data[col].min()
mx = max(data[col].max(),mx)
mn = min(data[col].min(),mn)
plt.clf()
fh = plt.gcf()
gs = GridSpec(n,n, left=border,bottom=border,right=1.-border-cbwidth,top=1.-border,wspace=pad,hspace=pad)
#cbgs=GridSpec(1,1,left=1.-cbwidth,bottom=border,right=1.-border,top=1.-border,wspace=pad,hspace=pad)
for r in range(n):
for c in range(n):
if r == c:
axh[r,c] = fh.add_subplot(gs[r,c],yticklabels=[],xticklabels=[],xticks=[],yticks=[])# LIEL axisbg = 'gray')
plt.text(0,0,'%s' % (data.columns[r]),ha='center',va='center')
plt.axis([-1,1,-1,1])
elif r>c:
if axTicks:
if axTicklabels:
if r < len(labels)-1 and c>0:
axh[r,c] = fh.add_subplot(gs[r,c],xticklabels=[],yticklabels=[])
elif r < len(labels)-1 and c==0:
axh[r,c] = fh.add_subplot(gs[r,c],xticklabels=[])
elif r == len(labels)-1 and c>0:
axh[r,c] = fh.add_subplot(gs[r,c],yticklabels=[])
elif r == len(labels)-1 and c==0:
axh[r,c] = fh.add_subplot(gs[r,c])
else:
axh[r,c] = fh.add_subplot(gs[r,c],xticklabels=[],yticklabels=[])
else:
axh[r,c] = fh.add_subplot(gs[r,c],xticks=[],yticks=[])
plotx = data[labels[r]]
ploty = data[labels[c]]
validInd = (~np.isnan(plotx)) & (~np.isnan(ploty))
plotx,ploty = plotx[validInd], ploty[validInd]
if method == 'pearson' and plotLine:
ar,br = polyfit(plotx,ploty,1)
xfit = np.array([min(plotx),max(plotx)])
yfit = polyval([ar,br],xfit)
plt.plot(xfit,yfit,'-',lw=1,color='gray')
plt.plot(plotx, ploty, 'ok', ms = ms)
if axLimits == 'variable':
rmax,rmin = max(plotx),min(plotx)
cmax,cmin = max(ploty),min(ploty)
else:
rmax,cmax = mx,mx
rmin,cmin = mn,mn
plt.axis([rmin-0.1*(rmax-rmin), rmax+0.1*(rmax-rmin),cmin-0.1*(cmax-cmin), cmax+0.1*(cmax-cmin)])
elif r < c:
axh[r,c] = fh.add_subplot(gs[r,c],yticklabels=[],xticklabels=[],xticks=[],yticks=[])
val = coef[labels[r]][labels[c]]
plth[r,c] = plt.pcolor(np.ones((2,2))*val, cmap=_heatCmap, vmin=-1., vmax=1.)
plt.axis([0,1,0,1])
if valueFlag:
if val<0.5 and val>-0.5:
txtcol = 'white'
else:
txtcol = 'black'
plt.text(0.5,0.5,'%1.2f' % (val),ha='center',va='center',family='monospace',color=txtcol)
cbax = fh.add_axes([1.-cbwidth-border/2,border,cbwidth-border-0.02,1.-2*border])
cb = plt.colorbar(plth[0,0],cax=cbax)
method = method[0].upper() + method[1:]
plt.annotate('%s correlation' % (method),[0.98,0.5],xycoords='figure fraction',ha='right',va='center',rotation='vertical')
def pwpartialcorr(df, rowVars=None, colVars=None, adjust=[], method='pearson', minN=0, adjMethod='fdr_bh'):
"""Pairwise partial correlation.
Parameters
----------
df : pd.DataFrame [samples, variables]
Data for correlation assessment (Nans will be ignored for each column pair)
rowVars, colVars : lists
List of column names to incude on heatmap axes.
adjust : list
List of column names that will be adjusted for in the pairwise correlations.
method : string
Specifies whether a pearson or spearman correlation is performed. (default: 'spearman')
minN : int
If a correlation has fewer than minN samples after dropping Nans
it will be reported as rho = 0, pvalue = 1 and will not be included in the multiplicity adjustment.
Returns
-------
rho : pd.DataFrame [rowVars, colVars]
Correlation coefficients.
pvalue : pd.DataFrame [rowVars, colVars]
Pvalues for pairwise correlations.
qvalue : pd.DataFrame [rowVars, colVars]
Multiplicity adjusted q-values for pairwise correlations."""
if rowVars is None:
rowVars = df.columns
if colVars is None:
colVars = df.columns
pvalue = np.zeros((len(rowVars),len(colVars)))
qvalue = np.nan * np.zeros((len(rowVars),len(colVars)))
rho = np.zeros((len(rowVars),len(colVars)))
"""Store p-values in dict with keys that are unique pairs (so we only adjust across these)"""
pairedPvalues = {}
paireQPvalues = {}
allColumns = df.columns.tolist()
for i,rowv in enumerate(rowVars):
for j,colv in enumerate(colVars):
if not rowv == colv:
if not df[[rowv,colv]].dropna().shape[0] < minN:
rho[i,j],pvalue[i,j] = partialcorr(df[rowv],df[colv],adjust=[df[a] for a in adjust], method=method)
else:
"""Pvalue = nan excludes these from the multiplicity adjustment"""
rho[i,j],pvalue[i,j] = 1,np.nan
"""Define unique key for the pair by sorting in order they appear in df columns"""
key = tuple(sorted([rowv,colv], key = allColumns.index))
pairedPvalues.update({key:pvalue[i,j]})
else:
"""By setting these pvalues to nan we exclude them from multiplicity adjustment"""
rho[i,j],pvalue[i,j] = 1,np.nan
"""Now only adjust using pvalues in the unique pair dict"""
keys = pairedPvalues.keys()
qvalueTmp = _pvalueAdjust(np.array([pairedPvalues[k] for k in keys]), method=adjMethod)
"""Build a unique qvalue dict from teh same unique keys"""
pairedQvalues = {k:q for k,q in zip(keys,qvalueTmp)}
"""Assign the unique qvalues to the correct comparisons"""
for i,rowv in enumerate(rowVars):
for j,colv in enumerate(colVars):
if not rowv == colv:
key = tuple(sorted([rowv,colv], key = allColumns.index))
qvalue[i,j] = pairedQvalues[key]
else:
pvalue[i,j] = 0.
qvalue[i,j] = 0.
pvalue = pd.DataFrame(pvalue, index=rowVars, columns=colVars)
qvalue = pd.DataFrame(qvalue, index=rowVars, columns=colVars)
rho = pd.DataFrame(rho, index=rowVars, columns=colVars)
return rho, pvalue, qvalue
def crosscorr(dfA, dfB, method='pearson', minN=0, adjMethod='fdr_bh'):
"""Pairwise correlations between A and B after a join,
when there are potential column name overlaps.
Parameters
----------
dfA,dfB : pd.DataFrame [samples, variables]
DataFrames for correlation assessment (Nans will be ignored in pairwise correlations)
method : string
Specifies whether a pearson or spearman correlation is performed. (default: 'spearman')
minN : int
If a correlation has fewer than minN samples after dropping Nans
it will be reported as rho = 0, pvalue = 1 and will not be included in the multiplicity adjustment.
Returns
-------
rho : pd.DataFrame [rowVars, colVars]
Correlation coefficients.
pvalue : pd.DataFrame [rowVars, colVars]
Pvalues for pairwise correlations.
qvalue : pd.DataFrame [rowVars, colVars]
Multiplicity adjusted q-values for pairwise correlations."""
colA = dfA.columns
colB = dfB.columns
dfA = dfA.rename_axis(lambda s: s + '_A', axis=1)
dfB = dfB.rename_axis(lambda s: s + '_B', axis=1)
joinedDf = pd.merge(dfA, dfB, left_index=True, right_index=True)
rho, pvalue, qvalue = pwpartialcorr(joinedDf, rowVars=dfA.columns, colVars=dfB.columns, method=method, minN=minN, adjMethod=adjMethod)
rho.index = colA
rho.columns = colB
pvalue.index = colA
pvalue.columns = colB
qvalue.index = colA
qvalue.columns = colB
return rho, pvalue, qvalue
def corrheatmap(df, rowVars=None, colVars=None, adjust=[], annotation=None, cutoff=None, cutoffValue=0.05, method='pearson', labelLookup={}, xtickRotate=True, labelSize='medium', minN=0, adjMethod='fdr_bh'):
"""Compute pairwise correlations and plot as a heatmap.
Parameters
----------
df : pd.DataFrame [samples, variables]
Data for correlation assessment (Nans will be ignored for each column pair)
rowVars, colVars : lists
List of column names to incude on heatmap axes.
adjust : list
List of column names that will be adjusted for in the pairwise correlations.
annotation : string
Specify what is annotated in each square of the heatmap (e.g. pvalue, qvalue, rho, rho2)
cutoff : str
Specify how to apply cutoff (e.g. pvalue, qvalue, rho, rho2)
cutoffValue : float
Absolute minimum threshold for squares whose color is displayed (color is proportional to rho).
method : string
Specifies whether a pearson or spearman correlation is performed. (default: 'spearman')
labelLookup : dict
Used to translate column names into appropriate label strings.
xtickRotate : bool
Specify whether to rotate the labels along the x-axis
labelSize : str or int
Size of x- and y-ticklabels by string (e.g. "large") or points
minN : int
If a correlation has fewer than minN samples after dropping Nans
it will be reported as rho = 0, pvalue = 1 and will not be included in the multiplicity adjustment.
Returns
-------
rho : ndarray [samples, variables]
Matrix of correlation coefficients.
pvalue : ndarray [samples, variables]
Matrix of pvalues for pairwise correlations.
qvalue : ndarray [samples, variables]
Matrix of multiplicity adjusted q-values for pairwise correlations."""
if rowVars is None:
rowVars = df.columns
if colVars is None:
colVars = df.columns
if cutoff is None:
cutoff = 'pvalue'
rho,pvalue,qvalue = pwpartialcorr(df, rowVars=rowVars, colVars=colVars, adjust=adjust, method=method, minN=minN)
plt.clf()
fh = plt.gcf()
pvalueTxtProp = dict(family='monospace',
size='large',
weight='bold',
color='white',
ha='center',
va='center')
axh = fh.add_subplot(111, yticks = np.arange(len(rowVars))+0.5,
xticks = np.arange(len(colVars))+0.5)
if xtickRotate:
rotation = 'vertical'
else:
rotation = 'horizontal'
_ = axh.set_xticklabels(map(lambda key: labelLookup.get(key,key),colVars),rotation=rotation,size=labelSize)
_ = axh.set_yticklabels(map(lambda key: labelLookup.get(key,key),rowVars),size=labelSize)
tmprho = rho.copy()
if cutoff == 'qvalue':
criticalValue = qvalue
elif cutoff == 'pvalue':
criticalValue = pvalue
elif cutoff == 'rho':
criticalValue = np.abs(rho)
elif cutoff == 'rho2':
criticalValue = rho**2
tmprho[~(criticalValue <= cutoffValue)] = 0.
plt.pcolor(tmprho, cmap=_heatCmap, vmin=-1., vmax=1.)
for i in range(len(rowVars)):
for j in range(len(colVars)):
if criticalValue.iloc[i,j] <= cutoffValue and not rowVars[i] == colVars[j]:
ann = ''
if annotation == 'pvalue':
if pvalue.iloc[i,j] > 0.001:
ann = '%1.3f' % pvalue.iloc[i,j]
else:
ann = '%1.1e' % pvalue.iloc[i,j]
elif annotation == 'rho':
ann = '%1.2f' % rho.iloc[i,j]
elif annotation == 'rho2':
ann = '%1.2f' % (rho.iloc[i,j] ** 2)
elif annotation == 'qvalue':
if qvalue[i,j]>0.001:
ann = '%1.3f' % qvalue.iloc[i,j]
else:
ann = '%1.1e' % qvalue.iloc[i,j]
if not ann == '':
plt.text(j+0.5, i+0.5, ann, **pvalueTxtProp)
plt.colorbar(fraction=0.05)
method = method[0].upper() + method[1:]
plt.annotate('%s correlation' % method,[0.98,0.5], xycoords='figure fraction', ha='right', va='center', rotation='vertical')
return rho, pvalue, qvalue
def scatterfit(x, y, method='pearson', adjustVars=[], labelLookup={}, plotLine=True, annotateFit=True, annotatePoints=False, returnModel=False, lc='gray', **kwargs):
"""Scatter plot of x vs. y with a fitted line overlaid.
Expects x and y as pd.Series but will accept arrays.
Prints covariate unadjusted AND adjusted rho/pvalues on the figure.
Plots covariate unadjusted data.
Parameters
----------
x,y : ndarrays or pd.Series
method : string
'pearson'
adjustVars : list
labelLookup : dict
plotLine : bool
annotateFit : bool
annotatePoints : bool
returnModel : bool
kwargs : additional keyword arguments
Passed to the plot function for the data points.
Returns
-------
model : statsmodels GLM object
Optionally the fitted model, depending on returnModel."""
k = kwargs.keys()
if not 'mec' in k:
kwargs.update({'mec':'k'})
if not 'mfc' in k:
kwargs.update({'mfc':'k'})
if not 'ms' in k:
kwargs.update({'ms':5})
"""Try to force X and Y into pandas.Series objects"""
if not isinstance(x, pd.core.series.Series):
x = pd.Series(x, name='X')
if not isinstance(y, pd.core.series.Series):
y = pd.Series(y, name='Y')
xlab = x.name
ylab = y.name
if xlab == ylab:
ylab = 'y_'+ylab
xlab = 'x_'+xlab
x.name = xlab
y.name = ylab
tmpDf = pd.concat((x,y,), axis=1, join='inner')
for av in adjustVars:
tmpDf = pd.concat((tmpDf,pd.DataFrame(av)), axis=1)
"""Drop any row with a nan in either column"""
tmpDf = tmpDf.dropna(axis=0, how='any')
plt.gca().set_xmargin(0.2)
plt.gca().set_ymargin(0.2)
unrho,unp = partialcorr(tmpDf[xlab],tmpDf[ylab],method=method)
"""Print unadjusted AND adjusted rho/pvalues
Plot unadjusted data with fit though..."""
if method == 'spearman' and plotLine:
#unrho,unp=stats.spearmanr(tmpDf[xlab],tmpDf[ylab])
if unrho > 0:
plt.plot(sorted(tmpDf[xlab]),sorted(tmpDf[ylab]),'-',color=lc)
else:
plt.plot(sorted(tmpDf[xlab]),sorted(tmpDf[ylab],reverse=True),'-',color=lc)
elif method == 'pearson' and plotLine:
#unrho,unp=stats.pearsonr(tmpDf[xlab],tmpDf[ylab])
formula_like = ModelDesc([Term([LookupFactor(ylab)])],[Term([]),Term([LookupFactor(xlab)])])
Y, X = dmatrices(formula_like, data=tmpDf, return_type='dataframe')
model = sm.GLM(Y,X,family=sm.families.Gaussian())
results = model.fit()
mnmxi = np.array([tmpDf[xlab].idxmin(),tmpDf[xlab].idxmax()])
plt.plot(tmpDf[xlab][mnmxi],results.fittedvalues[mnmxi],'-',color=lc)
plt.plot(tmpDf[xlab],tmpDf[ylab],'o',**kwargs)
if annotatePoints:
annotationParams = dict(xytext=(0,5), textcoords='offset points', size='medium')
for x,y,lab in zip(tmpDf[xlab],tmpDf[ylab],tmpDf.index):
plt.annotate(lab, xy=(x, y), **annotationParams)
if annotateFit:
if unp>0.001:
s = 'p = %1.3f\nrho = %1.2f\nn = %d' % (unp, unrho, tmpDf.shape[0])
else:
s = 'p = %1.1e\nrho = %1.2f\nn = %d' % (unp, unrho, tmpDf.shape[0])
textTL(plt.gca(),s,color='black')
if len(adjustVars) > 0:
rho,p = partialcorr(tmpDf[xlab], tmpDf[ylab], adjust = adjustVars, method = method)
if p>0.001:
s = 'adj-p = %1.3f\nadj-rho = %1.2f\nn = %d' % (p, rho, tmpDf.shape[0])
else:
s = 'adj-p = %1.1e\nadj-rho = %1.2f\nn = %d' % (p, rho, tmpDf.shape[0])
textTR(plt.gca(),s,color='red')
plt.xlabel(labelLookup.get(xlab,xlab))
plt.ylabel(labelLookup.get(ylab,ylab))
if returnModel:
return model
def _pvalueAdjust(pvalues, method = 'fdr_bh'):
"""Convenient function for doing p-value adjustment
Accepts any matrix shape and adjusts across the entire matrix
Ignores nans appropriately
1) Pvalues can be DataFrame or Series or array
2) Turn it into a one-dimensional vector
3) Qvalues intialized at p to copy nans in the right places
4) Drop the nans, calculate qvalues, copy to qvalues vector
5) Reshape qvalues
6) Return same type as pvalues
"""
p = np.array(pvalues).flatten()
qvalues = deepcopy(p)
nanInd = np.isnan(p)
dummy,q,dummy,dummy = sm.stats.multipletests(p[~nanInd], alpha=0.2, method=method)
qvalues[~nanInd] = q
qvalues = qvalues.reshape(pvalues.shape)
if type(pvalues) is pd.core.frame.DataFrame:
return pd.DataFrame(qvalues,columns=[x+'_q' for x in pvalues.columns],index=pvalues.index)
elif type(pvalues) is pd.core.series.Series:
return pd.Series(qvalues,name=pvalues.name+'_q',index=pvalues.index)
else:
return qvalues
def validPairwiseCounts(df, cols=None):
"""Count the number of non-NA data points for
all pairs of cols in df, as would be needed for
generating a correlation heatmap.
Useful for determining a threshold minimum number of
data pairs for a valid correlation.
Parameters
----------
df : pd.DataFrame
cols : list
Column names to consider
Returns
-------
pwCounts : pd.DataFrame
DataFrame with columns and index matching cols"""
if cols is None:
cols = df.columns
n = len(cols)
pwCounts = pd.DataFrame(np.zeros((n,n)), index=cols, columns=cols)
for colA,colB in itertools.product(cols,cols):
if colA == colB:
pwCounts.loc[colA,colA] = df[colA].dropna().shape[0]
elif colA > colB:
n = df[[colA,colB]].dropna().shape[0]
pwCounts.loc[colA,colB] = n
pwCounts.loc[colB,colA] = n
return pwCounts
def heatmap(df, colLabels=None, rowLabels=None, labelSize='medium', **kwargs):
"""Heatmap based on values in df
Parameters
----------
df : pd.DataFrame
All data in df will be included in heatmap
colLabels : list
Strings to replace df column names as x-tick labels
rowLabels : list
Strings to replace df index as y-tick labels
labelSize : fontsize in points or str (e.g. 'large')
kwargs : dict
Passed to pcolor()"""
if not 'cmap' in kwargs:
kwargs['cmap'] = _heatCmap
if colLabels is None:
colLabels = df.columns
if rowLabels is None:
rowLabels = df.index
plt.clf()
axh = plt.subplot(111)
nrows,ncols = df.shape
plt.pcolor(df.values, **kwargs)
axh.xaxis.tick_top()
plt.xticks(np.arange(ncols) + 0.5)
plt.yticks(np.arange(nrows) + 0.5)
xlabelsL = axh.set_xticklabels(colLabels, size=labelSize, rotation=90, fontname='Consolas')
ylabelsL = axh.set_yticklabels(rowLabels, size=labelSize, fontname='Consolas')
plt.ylim((nrows,0))
plt.xlim((0,ncols))
plt.colorbar(fraction=0.05)
plt.tight_layout()
def removeNARC(inDf,minRow=1, minCol=1, minFrac=None):
"""Removes all columns and rows that don't have at least
minX non-NA values. Considers columns then rows iteratively
until criteria is met or all columns or rows have been removed."""
def _validCols(df,minCol):
return [col for col in df.columns if (df.shape[0] - df[col].isnull().sum()) >= minCol]
def _validRows(df,minRow):
return [row for row in df.index if (df.shape[1] - df.loc[row].isnull().sum()) >= minRow]
df = inDf.copy()
if not minFrac is None:
minRow = np.round(df.shape[1] * minFrac)
minCol = np.round(df.shape[0] * minFrac)
nRows = df.shape[0] + 1
nCols = df.shape[1] + 1
while (nCols > df.shape[1] or nRows > df.shape[0]) and df.shape[0]>0 and df.shape[1]>0:
nRows, nCols = df.shape
df = df[_validCols(df,minCol)]
df = df.loc[_validRows(df,minRow)]
return df
def permcorr(a,b,corrFunc, nperms = 10000):
"""Use shuffled permutations of a and b (np.ndarrays or pd.Series)
to estimate the correlation p-value and rho with CIs (TODO)
Parameters
----------
a,b : np.ndarray or pd.Series
corrFunc : function
Parameters are a and b with return value rho, p-value
Returns
-------
rho : float
p : float"""
if isinstance(a,pd.Series):
a = a.values
if isinstance(b,pd.Series):
b = b.values
rhoShuff = np.zeros(nperms)
pShuff = np.zeros(nperms)
rho,pvalue = corrFunc(a,b)
L = a.shape[0]
for permi in np.arange(nperms):
rind = np.floor(np.random.rand(L) * L).astype(int)
rhoShuff[permi],pShuff[permi] = corrFunc(a,b[rind])
if rho >= 0:
p = ((rhoShuff >= rho).sum() + 1)/(nperms + 1)
else:
p = ((rhoShuff <= rho).sum() + 1)/(nperms + 1)
return rho, p
```
#### File: cytomod/otherTools/myboxplot.py
```python
import numpy as np
import matplotlib.pyplot as plt
from numpy.random import permutation,seed
import pandas as pd
#import seaborn as sns
__all__ = ['scatterdots',
'myboxplot',
'manyboxplots']
def scatterdots(data, x, axh=None, width=0.8, returnx=False, rseed=820, **kwargs):
"""Dots plotted with random x-coordinates and y-coordinates from data array.
Parameters
----------
data : ndarray
x : float
Specifies the center of the dot cloud on the x-axis.
axh : matplotlib figure handle
If None then use plt.gca()
width : float
Specifies the range of the dots along the x-axis.
returnx : bool
If True, return the x-coordinates of the plotted data points.
rseed : float
Random seed. Defaults to a constant so that regenerated figures of
the same data are identical.
Returns
-------
Optionally returns the x-coordinates as plotted."""
if axh is None:
axh = plt.gca()
np.random.seed(rseed)
if data is None or len(data) == 0:
if returnx:
return None
return
if not type(data) == np.ndarray:
data = np.array(data)
validi = np.arange(len(data))
if any(np.isnan(data)):
validi = np.where(np.logical_not(np.isnan(data)))[0]
ploty = data[validi]
if len(ploty) == 0:
if returnx:
return None
return
w = width
plotx = np.random.permutation(np.linspace(-w/2., w/2., len(ploty)) + x)
axh.scatter(plotx, ploty, **kwargs)
if returnx:
outx = np.nan * np.ones(data.shape)
outx[validi] = plotx
return outx
def myboxplot(data, x = 1, axh=None, width=0.8, boxcolor='black',scatterwidth=0.6,dotcolor='red',returnx=False,subsetInd=None,altDotcolor='gray',violin=False,**kwargs):
"""Make a boxplot with scatterdots overlaid.
Parameters
----------
data : np.ndarray or pd.Series
x : float
Position of box along x-axis.
axh : matplotlib figure handle
If None then use plt.gca()
width : float
Width of the box.
boxcolor : mpl color
scatterwidth : float
Width of the spread of the data points.
dotcolor : mpl color
subsetInd : boolean or int index
Indicates a subset of the data that should be summarized in the boxplot.
However, all data points will be plotted.
altDotcolor : mpl color
Specify the color of the data points that are not in the subset.
returnx : bool
Return the x-coordinates of the data points.
violin : bool
Specify whether the box is a violin plot.
Returns
-------
outx : np.ndarray
Optionall, an array of the x-coordinates as plotted."""
if axh is None:
axh = plt.gca()
if type(data) is pd.Series:
data = data.values
if not subsetInd is None:
if not (subsetInd.dtype == np.array([0,1], dtype=bool).dtype):
tmp = np.zeros(data.shape, dtype=bool)
tmp[subsetInd] = True
subsetInd = tmp
else:
subsetInd = np.ones(data.shape, dtype=bool)
subsetInd = np.asarray(subsetInd)
if not 's' in kwargs:
kwargs['s'] = 20
if not 'marker' in kwargs:
kwargs['marker'] = 'o'
if not 'linewidths' in kwargs:
kwargs['linewidths'] = 0.5
"""Boxplot with dots overlaid"""
outx = np.zeros(data.shape)
if subsetInd.sum() > 0:
if not boxcolor == 'none' and not boxcolor is None:
if violin and False:
sns.violinplot(data[subsetInd], color = boxcolor, positions = [x], alpha = 0.5)
else:
bp = axh.boxplot(data[subsetInd], positions = [x], widths = width, sym = '')
for element in bp.keys():
for b in bp[element]:
b.set_color(boxcolor)
kwargs['c'] = dotcolor
subsetx = scatterdots(data[subsetInd], x = x, axh = axh, width = scatterwidth, returnx = True, **kwargs)
outx[subsetInd] = subsetx
if (~subsetInd).sum() > 0:
kwargs['c'] = altDotcolor
subsetx = scatterdots(data[~subsetInd], x = x, axh = axh, width = scatterwidth, returnx = True, **kwargs)
outx[~subsetInd] = subsetx
if returnx:
return outx
def manyboxplots(df, cols=None, axh=None, colLabels=None,annotation='N',horizontal=False,vRange=None,xRot=0, **kwargs):
"""Series of boxplots along x-axis (or flipped horizontally along y-axis [NOT IMPLEMENTED])
WORK IN PROGRESS
Optionally add annotation for each boxplot with:
(1) "N"
(2) "pctpos" (response rate, by additionally specifying responders)
NOT YET IMPLEMENTED
Parameters
----------
df : pd.DataFrame
cols : list
Column names to be plotted
axh : matplotlib figure handle
If None then use plt.gca()
colLabels : list
Column labels (optional)
annotation : str or None
Specifies what the annotation should be: "N" or "pctpos"
horizontal : bool
Specifies whether boxplots should be vertical (default, False) or horizontal (True)
kwargs : additional arguments
Passed to myboxplot function to specify colors etc."""
if axh is None:
axh = plt.gca()
if cols is None:
cols = df.columns
if colLabels is None:
colLabels = cols
elif len(colLabels)<cols:
colLabels += cols[len(colLabels):]
for x,c in enumerate(cols):
myboxplot(df[c].dropna(), x = x, axh = axh, **kwargs)
if not vRange is None:
plt.ylim(vRange)
yl = plt.ylim()
annotationKwargs = dict(xytext = (0,-10), textcoords = 'offset points', ha = 'center', va = 'top', size = 'medium')
for x,c in enumerate(cols):
tmp = df[c].dropna()
if annotation == 'N':
plt.annotate('%d' % len(tmp), xy = (x, yl[1]), **annotationKwargs)
elif annotation == 'pctpos':
pass
plt.xlim((-1,x+1))
plt.xticks(np.arange(x+1))
xlabelsL = axh.set_xticklabels(colLabels, fontsize = 'large', rotation = xRot, fontname = 'Consolas')
```
|
{
"source": "jeremychauvet/aws-python-sdk-showroom",
"score": 3
}
|
#### File: kms/get-key-rotation-status/main.py
```python
import boto3
import botocore
# This function list all KMS keys with no key rotation enable.
client = boto3.client("kms", region_name="eu-central-1")
def list_keys():
try:
return client.list_keys(Limit=100)
except:
print("[ERROR] Unable to retrive key list.")
if __name__ == "__main__":
output = list_keys()
for row in output["Keys"]:
# Check key rotation status.
try:
response = client.get_key_rotation_status(KeyId=row["KeyId"])
if response["KeyRotationEnabled"] == False:
print(
"Key : "
+ row["KeyId"]
+ " - KeyRotationEnabled : "
+ str(response["KeyRotationEnabled"])
)
except botocore.exceptions.ClientError as e:
print("[ERROR]", e)
```
|
{
"source": "jeremychauvet/aws-sam-showroom",
"score": 2
}
|
#### File: aws-sam-showroom/cost-forecast/app.py
```python
import json
import boto3
import datetime
def handler(event, context):
try:
client = boto3.client('ce', region_name='eu-central-1')
response = client.get_usage_forecast(
TimePeriod={
'Start': str(datetime.date.today()),
'End': '2020-11-30'
},
Metric='USAGE_QUANTITY',
Granularity='MONTHLY',
Filter={
'Dimensions': {
'Key': 'USAGE_TYPE',
'Values': [
'hours',
]
}
},
)
except client.exceptions.DataUnavailableException as e:
# Send some context about this error to Lambda Logs
print(e)
raise e
return {
"statusCode": 200,
"body": json.dumps({
"response": response.text.replace("\n", "")
}),
}
```
|
{
"source": "jeremycheong/kcarhome",
"score": 3
}
|
#### File: jeremycheong/kcarhome/kcarhome.py
```python
import os
import config as cfg
from parserpage import htmlparser, getclasses, getclassbrandurl
TEST_URL = 'https://product.360che.com/img/c1_s65_b31_s116.html'
def get_brand_name(brand_url):
sub_brand_soup = htmlparser(TEST_URL)
brand_name_tag_list = sub_brand_soup.find('div', 'map_mal').find_all('a')[-3:]
brand_name = '_'.join(list(map(lambda brand_name_tag: brand_name_tag.get_text().replace('图片库', ''), brand_name_tag_list)))
print(brand_name)
if __name__ == "__main__":
get_brand_name(cfg.domain)
```
|
{
"source": "jeremycheong/MNN",
"score": 3
}
|
#### File: tools/MNNPythonOfflineQuant/mnn_offline_quant.py
```python
from __future__ import print_function
import time
import argparse
import numpy as np
import tqdm
import MNN
import yaml
from calibration_dataset import calibration_dataset
from test_dataset import ImagenetDataset
nn = MNN.nn
F = MNN.expr
def get_mnn_format(format_str):
fmt = str.lower(format_str)
if fmt == 'nchw':
return F.NCHW
elif fmt == 'nhwc':
return F.NHWC
elif fmt == 'nc4hw4':
return F.NC4HW4
else:
raise ValueError("unknown format:", format_str)
def quant_func(net, dataloader, opt):
net.train(True)
dataloader.reset()
t0 = time.time()
for i in tqdm.trange(dataloader.iter_number):
example = dataloader.next()
input_data = example[0]
predicts = net.forward(input_data)
# fake update
opt.step(F.const(0.0, []))
for predict in predicts:
predict.read()
t1 = time.time()
cost = t1 - t0
print("Epoch cost: %.3f s." % cost)
def main():
'''
offline quantization using MNN python api.
1. you need to convert your model to mnn model
2. you need to provide a calibration dataset by modifying preprocessing steps in
'calibration_dataset.py' to suit your case.
3. you need to provide a config yaml file in which provide input and output information about your model.
'''
parser = argparse.ArgumentParser()
parser.add_argument("--mnn_model", type=str, required=True,\
help="original float MNN model file")
parser.add_argument("--quant_model", type=str, required=True, \
help="name of quantized model to save")
parser.add_argument("--batch_size", type=int, required=False, default=32,\
help="calibration batch size")
args = parser.parse_args()
mnn_model = args.mnn_model
quant_model = args.quant_model
batch_size = args.batch_size
dataloader = MNN.data.DataLoader(calibration_dataset, batch_size=batch_size, shuffle=True)
m = F.load_as_dict(mnn_model)
inputs_outputs = F.get_inputs_and_outputs(m)
for key in inputs_outputs[0].keys():
print('input names:\t', key)
for key in inputs_outputs[1].keys():
print('output names:\t', key)
config_file = "config.yaml"
f = open(config_file)
config = yaml.load(f)
# get inputs and outputs
inputs = []
for name in config['inputs']['names']:
inputs.append(m[name])
outputs = []
for name in config['output_names']:
outputs.append(m[name])
input_placeholders = []
for i in range(len(inputs)):
shape = config['inputs']['shapes'][i]
fmt = config['inputs']['formats'][i]
nnn_format = get_mnn_format(fmt)
input_placeholders.append(F.placeholder(shape, nnn_format))
net = nn.load_module(inputs, outputs, True)
# no use optimizer
opt = MNN.optim.SGD(net, 0.01, 0.9, 0.0005)
nn.compress.train_quant(net, quant_bits=8)
quant_func(net, dataloader, opt)
# save model
net.train(False)
predicts = net.forward(input_placeholders)
print("quantized model save to " + quant_model)
F.save(predicts, quant_model)
if __name__ == "__main__":
main()
```
|
{
"source": "jeremychonggg/Alpaca-Trading-Bot",
"score": 3
}
|
#### File: jeremychonggg/Alpaca-Trading-Bot/backtest2.py
```python
import requests
import os
import json
import pandas as pd
from bs4 import BeautifulSoup
import requests
from copy import deepcopy
import numpy as np
import alpaca_trade_api as tradeapi
import time
endpoint = "https://data.alpaca.markets/v1"
headers = json.loads(open("key.txt", 'r').read())
# Webscrap list of top gainer stocks
day_gainers_stock = {}
def find_day_gainers_stock():
url = "https://finance.yahoo.com/gainers?count=100&offset=0"
page = requests.get(url)
page_content = page.content
soup = BeautifulSoup(page_content,'html.parser')
table = soup.find_all("table", {"class" : "W(100%)"})
for t in table:
rows = t.find_all("tr")
for row in rows:
day_gainers_stock[row.get_text(separator='|').split("|")[0]] = row.get_text(separator='|').split("|")[4]
# Webscrap list of top volume stocks
most_active_stock = {}
def find_most_active_stock():
url = "https://finance.yahoo.com/most-active?count=100&offset=0"
page = requests.get(url)
page_content = page.content
soup = BeautifulSoup(page_content,'html.parser')
table = soup.find_all("table", {"class" : "W(100%)"})
for t in table:
rows = t.find_all("tr")
for row in rows:
most_active_stock[row.get_text(separator='|').split("|")[0]] = row.get_text(separator='|').split("|")[5]
########## ENTRY STRATEGY - PREPARE LIST ##########
# Sort out 25 Top Gainer Stock from last trading day
find_day_gainers_stock() # Call this function on Monday 1 hour before market start
# Sort out 25 Highest Volume Stock from last trading day
find_most_active_stock() # Call this function on Monday 1 hour before market start
########## ENTRY STRATEGY - PICK TIMING ##########
########## BACKTESTING - PREPARE TOOLS ##########
def hist_data(symbols, timeframe="15Min", limit=200, start="", end="", after="", until=""):
"""
Returns historical bar data for a string of symbols seperated by comma.
Symbols should be in a string format separated by comma e.g. symbols = "MSFT,AMZN,GOOG".
"""
df_data = {}
bar_url = endpoint + "/bars/{}".format(timeframe)
params = {"symbols" : symbols,
"limit" : limit,
"start" : start,
"end" : end,
"after" : after,
"until" : until}
r = requests.get(bar_url, headers=headers, params=params)
json_dump = r.json()
for symbol in json_dump:
temp = pd.DataFrame(json_dump[symbol])
temp.rename({"t": "time",
"o": "open",
"h": "high",
"l": "low",
"c": "close",
"v": "volume"}, axis=1, inplace=True)
temp["time"] = pd.to_datetime(temp["time"], unit="s")
temp.set_index("time", inplace=True)
temp.index = temp.index.tz_localize("UTC").tz_convert("America/New_York")
temp.between_time("09:31", "16:00")
df_data[symbol] = temp
return df_data
def MACD(df_dict, a=12, b=26, c=9):
"""
function to calculate MACD
typical values: a(fast moving average) = 12;
b(slow moving average) = 26;
c(signal line ma window) = 9
"""
for df in df_dict:
df_dict[df]["ma_fast"] = df_dict[df]["close"].ewm(span=a, min_periods=a).mean()
df_dict[df]["ma_slow"] = df_dict[df]["close"].ewm(span=b, min_periods=b).mean()
df_dict[df]["macd"] = df_dict[df]["ma_fast"] - df_dict[df]["ma_slow"]
df_dict[df]["signal"] = df_dict[df]["macd"].ewm(span=c, min_periods=c).mean()
df_dict[df].drop(["ma_fast", "ma_slow"], axis=1, inplace=True)
def stochastic(df_dict, lookback=14, k=3, d=3):
"""
function to calculate Stochastic Oscillator
lookback = lookback period
k and d = moving average window for %K and %D
"""
for df in df_dict:
df_dict[df]["HH"] = df_dict[df]["high"].rolling(lookback).max()
df_dict[df]["LL"] = df_dict[df]["low"].rolling(lookback).min()
df_dict[df]["%K"] = (100 * (df_dict[df]["close"] - df_dict[df]["LL"]) / (df_dict[df]["HH"] - df_dict[df]["LL"])).rolling(k).mean()
df_dict[df]["%D"] = df_dict[df]["%K"].rolling(d).mean()
df_dict[df].drop(["HH", "LL"], axis=1, inplace=True)
# KPI
def CAGR(df_dict, candle_period='1day'):
"function to calculate the Cumulative Annual Growth Rate; DF should have return column"
absolute_return = (1 + df_dict["return"]).cumprod().iloc[-1]
if candle_period == '1day':
n = len(df_dict["return"]) / 252
elif candle_period == '1hour':
n = len(df_dict["return"]) / (252 * 8)
elif candle_period == '30min':
n = len(df_dict["return"]) / (252 * 8 * 2)
elif candle_period == '15min':
n = len(df_dict["return"]) / (252 * 8 * 4)
elif candle_period == '5min':
n = len(df_dict["return"]) / (252 * 8 * 12)
elif candle_period == '3min':
n = len(df_dict["return"]) / (252 * 8 * 20)
elif candle_period == '1min':
n = len(df_dict["return"]) / (252 * 8 * 60)
cagr = (absolute_return)**(1/n) - 1
# abs_return = (1 + df_dict["return"]).cumprod().iloc[-1]
# n = len(df_dict[df])/252
# cagr[df] = (abs_return)**(1/n) - 1
# for df in df_dict:
# abs_return = (1 + df_dict[df]["return"]).cumprod().iloc[-1]
# n = len(df_dict[df])/252
# cagr[df] = (abs_return)**(1/n) - 1
return cagr
def volatility(df_dict):
"function to calculate annualized volatility; DF should have ret column"
vol = {}
for df in df_dict:
vol[df] = df_dict[df]["return"].std() * np.sqrt(252)
return vol
def sharpe(df_dict, rf_rate):
"function to calculate sharpe ratio ; rf is the risk free rate"
sharpe = {}
cagr = CAGR(df_dict)
vol = volatility(df_dict)
for df in df_dict:
sharpe[df] = (cagr[df] - rf_rate)/vol[df]
return sharpe
def max_dd(df_dict):
"function to calculate max drawdown"
max_drawdown = {}
for df in df_dict:
df_dict[df]["cum_return"] = (1 + df_dict[df]["return"]).cumprod()
df_dict[df]["cum_max"] = df_dict[df]["cum_return"].cummax()
df_dict[df]["drawdown"] = df_dict[df]["cum_max"] - df_dict[df]["cum_return"]
df_dict[df]["drawdown_pct"] = df_dict[df]["drawdown"]/df_dict[df]["cum_max"]
max_drawdown[df] = df_dict[df]["drawdown_pct"].max()
df_dict[df].drop(["cum_return","cum_max","drawdown","drawdown_pct"], axis=1, inplace=True)
return max_drawdown
# INTRADAY KPI
def winRate(DF):
"""
function to calculate win rate of intrady trading strategy
"""
df = DF["return"]
pos = df[df>1]
neg = df[df<1]
return (len(pos) / len(pos + neg)) * 100
def meanReturnPerTrade(DF):
df = DF["return"]
df_temp = (df - 1).dropna()
return df_temp[df_temp != 0].mean()
def meanReturnWinRate(DF):
df = DF["return"]
df_temp = (df - 1).dropna()
return df_temp[df_temp > 0].mean()
def meanReturnLostRate(DF):
df = DF["return"]
df_temp = (df - 1).dropna()
return df_temp[df_temp < 0].mean()
def maxConsecutiveLoss(DF):
df = DF["return"]
df_temp = df.dropna(axis=0)
df_temp2 = np.where(df_temp < 1, 1, 0)
count_consecutive = []
seek = 0
for i in range(len(df_temp2)):
if df_temp2[i] == 0:
seek = 0
else:
seek = seek + 1
count_consecutive.append(seek)
if len(count_consecutive) > 0:
return max(count_consecutive)
else:
return 0
########## BACKTESTING - START ##########
# Prepare tickers list
tickers = ""
for ticker in most_active_stock:
tickers = tickers + "," + ticker
tickers = tickers[8:] # Remove the words ",symbol,"
# Get Historical Data
historicalData = hist_data(tickers, limit=1000)
ohlc_dict = deepcopy(historicalData)
stoch_signal = {}
tickers_signal = {}
tickers_return = {}
trade_count = {}
trade_data = {}
high_water_mark = {}
# avg_return = 0 #
MACD(ohlc_dict)
stochastic(ohlc_dict)
for ticker in tickers.split(","):
ohlc_dict[ticker].dropna(inplace=True)
stoch_signal[ticker] = ""
tickers_signal[ticker] = ""
trade_count[ticker] = 0
high_water_mark[ticker] = 0
tickers_return[ticker] = [0]
trade_data[ticker] = {}
# Calculate Return of each stock
for ticker in tickers.split(","):
print("Calculation returns for:", ticker)
for i in range(1, len(ohlc_dict[ticker])-1):
# Strategy 1: Check Stochastic
if ohlc_dict[ticker]["%K"][i] < 20:
stoch_signal[ticker] = "oversold"
elif ohlc_dict[ticker]["%K"][i] > 80:
stoch_signal[ticker] = "overbought"
# ENTRY STRATEGY
if tickers_signal[ticker] == "":
tickers_return[ticker].append(0)
# LONG STRATEGY
if ( ohlc_dict[ticker]["macd"][i] > ohlc_dict[ticker]["signal"][i] ) and \
( ohlc_dict[ticker]["macd"][i-1] > ohlc_dict[ticker]["signal"][i-1] ) and \
stoch_signal[ticker] == "oversold":
tickers_signal[ticker] = "Buy"
trade_count[ticker] += 1
trade_data[ticker][trade_count[ticker]] = [ohlc_dict[ticker]["open"][i+1]]
high_water_mark[ticker] = ohlc_dict[ticker]["open"][i+1]
# EXIT STRATEGY
elif tickers_signal[ticker] == "Buy":
# Check if stop loss triggered
if ohlc_dict[ticker]["low"][i] < 0.985 * high_water_mark[ticker]:
tickers_signal[ticker] = ""
trade_data[ticker][trade_count[ticker]].append(0.985*high_water_mark[ticker])
tickers_return[ticker].append( (0.985*high_water_mark[ticker] / ohlc_dict[ticker]["close"][i-1]) - 1 )
trade_count[ticker] += 1
else:
high_water_mark[ticker] = max(high_water_mark[ticker], ohlc_dict[ticker]["high"][i])
tickers_return[ticker].append( (ohlc_dict[ticker]["close"][i] / ohlc_dict[ticker]["close"][i-1]) - 1 )
if trade_count[ticker] % 2 != 0:
trade_data[ticker][trade_count[ticker]].append(ohlc_dict[ticker]["close"][i+1])
tickers_return[ticker].append(0)
ohlc_dict[ticker]["return"] = np.array(tickers_return[ticker])
# print(ohlc_dict[ticker]["return"][-1] * 100) # print(ohlc_dict[ticker]["return"].sum() * 100) #
# avg_return = avg_return + (ohlc_dict[ticker]["return"].sum() * 100) #
# print(avg_return) #
# Calculate Overall Stategy's KPI
strategy_df = pd.DataFrame()
for ticker in tickers.split(","):
strategy_df[ticker] = ohlc_dict[ticker]["return"]
strategy_df[ticker].fillna(0, inplace=True)
strategy_df["return"] = strategy_df.mean(axis=1)
print("CAGR:", CAGR(strategy_df, candle_period='15min'))
# sharpe(strategy_df, 0.03)
# max_dd(strategy_df)
(1 + strategy_df["return"]).cumprod().plot()
```
#### File: jeremychonggg/Alpaca-Trading-Bot/get_stock_data.py
```python
import json
import requests
import pandas as pd
import websocket
# Get Alpaca API Credential
endpoint = "https://data.alpaca.markets/v2"
headers = json.loads(open("key.txt", 'r').read())
def hist_data(symbols, start="2021-01-01", timeframe="1Hour", limit=50, end=""):
"""
returns historical bar data for a string of symbols separated by comma
symbols should be in a string format separated by comma e.g. symbols = "MSFT,AMZN,GOOG"
"""
df_data_tickers = {}
for symbol in symbols:
bar_url = endpoint + "/stocks/{}/bars".format(symbol)
params = {"start":start, "limit" :limit, "timeframe":timeframe}
data = {"bars": [], "next_page_token":'', "symbol":symbol}
while True:
r = requests.get(bar_url, headers = headers, params = params)
r = r.json()
if r["next_page_token"] == None:
data["bars"]+=r["bars"]
break
else:
params["page_token"] = r["next_page_token"]
data["bars"]+=r["bars"]
data["next_page_token"] = r["next_page_token"]
df_data = pd.DataFrame(data["bars"])
df_data.rename({"t":"time","o":"open","h":"high","l":"low","c":"close","v":"volume"},axis=1, inplace=True)
df_data["time"] = pd.to_datetime(df_data["time"])
df_data.set_index("time",inplace=True)
df_data.index = df_data.index.tz_convert("America/Indiana/Petersburg")
df_data_tickers[symbol] = df_data
return df_data_tickers
def get_historical_data(ticker_list, start_date, end_date=None, limit=10000, timeframe="1Day"):
"""
returns historical bar data for a string of symbols separated by comma
symbols should be in a string format separated by comma e.g. symbols = "MSFT,AMZN,GOOG"
* timeframe - Timeframe for the aggregation. Available values are: `1Min`, `1Hour`, `1Day`
https://alpaca.markets/docs/api-documentation/api-v2/market-data/alpaca-data-api-v2/historical/#bars
"""
df_data_tickers = {}
for symbol in ticker_list:
bar_url = endpoint + "/stocks/{}/bars".format(symbol)
params = {"start":start_date, "end": end_date, "limit": limit, "timeframe":timeframe}
data = {"bars": [], "next_page_token": '', "symbol": symbol}
# r = requests.get(bar_url, headers=headers, params=params)
# r = r.json()
# data["bars"] += r["bars"]
while True:
r = requests.get(bar_url, headers=headers, params=params)
r = r.json()
try:
if r["next_page_token"] == None:
data["bars"] += r["bars"]
break
else:
params["page_token"] = r["next_page_token"]
data["bars"] += r["bars"]
data["next_page_token"] = r["next_page_token"]
except:
break
# Create a DataFrame for the data["bars"] of each stock
df_data = pd.DataFrame(data["bars"])
df_data.rename({"t":"time","o":"open","h":"high","l":"low","c":"close","v":"volume"},axis=1, inplace=True)
try:
df_data["time"] = pd.to_datetime(df_data["time"])
df_data.set_index("time",inplace=True)
df_data.index = df_data.index.tz_convert("America/New_York")
df_data_tickers[symbol] = df_data
except:
pass
print("---- Created for [{}]".format(symbol))
return df_data_tickers
```
#### File: jeremychonggg/Alpaca-Trading-Bot/order.py
```python
import requests
import os
import json
import pandas as pd
os.chdir("D:\\SynologyDrive\Programming\AlpacaTradingBot")
endpoint = "https://paper-api.alpaca.markets"
headers = json.loads(open("key.txt", 'r').read())
# https://alpaca.markets/docs/api-documentation/api-v2/orders/
def market_order(symbol, quantity, side="buy", tif="day"):
order_url = endpoint + "/v2/orders"
params = {"symbol" : symbol,
"qty" : quantity, # Number of shares to trade
"side" : side, # 'buy' or 'sell'
"type" : "market", # 'market', 'limit', 'stop', 'stop_limit', 'trailing_stop'
"time_in_force" : tif # 'day', 'gtc', 'opg', 'cls', 'ioc' or 'fok'
}
r = requests.post(order_url, headers=headers, json=params)
return r.json()
def limit_order(symbol, quantity, limit_price, side="buy", tif="day"):
order_url = endpoint + "/v2/orders"
params = {"symbol" : symbol,
"qty" : quantity, # Number of shares to trade
"side" : side, # 'buy' or 'sell'
"type" : "limit", # 'market', 'limit', 'stop', 'stop_limit', 'trailing_stop'
"limit_price" : limit_price, # price to go in
"time_in_force" : tif # 'day', 'gtc', 'opg', 'cls', 'ioc' or 'fok'
}
r = requests.post(order_url, headers=headers, json=params)
return r.json()
def stop_order(symbol, quantity, stop_price, side="buy", tif="day"):
order_url = endpoint + "/v2/orders"
params = {"symbol" : symbol,
"qty" : quantity, # Number of shares to trade
"side" : side, # 'buy' or 'sell'
"type" : "stop", # 'market', 'limit', 'stop', 'stop_limit', 'trailing_stop'
"stop_price" : stop_price,
"time_in_force" : tif # 'day', 'gtc', 'opg', 'cls', 'ioc' or 'fok'
}
r = requests.post(order_url, headers=headers, json=params)
return r.json()
def stop_limit_order(symbol, quantity, stop_price, limit_price, side="buy", tif="day"):
order_url = endpoint + "/v2/orders"
params = {"symbol" : symbol,
"qty" : quantity, # Number of shares to trade
"side" : side, # 'buy' or 'sell'
"type" : "stop_limit", # 'market', 'limit', 'stop', 'stop_limit', 'trailing_stop'
"stop_price" : stop_price,
"limit_price" : limit_price,
"time_in_force" : tif # 'day', 'gtc', 'opg', 'cls', 'ioc' or 'fok'
}
r = requests.post(order_url, headers=headers, json=params)
return r.json()
def trail_stop_order(symbol, quantity, trail_price, side="buy", tif="day"):
order_url = endpoint + "/v2/orders"
params = {"symbol" : symbol,
"qty" : quantity, # Number of shares to trade
"side" : side, # 'buy' or 'sell'
"type" : "trailing_stop", # 'market', 'limit', 'stop', 'stop_limit', 'trailing_stop'
"trail_price" : trail_price,
"time_in_force" : tif # 'day', 'gtc', 'opg', 'cls', 'ioc' or 'fok'
}
r = requests.post(order_url, headers=headers, json=params)
return r.json()
def bracket_order(symbol, quantity, tp_limit_price, sl_stop_price, sl_limit_price, side="buy", tif="day"):
order_url = endpoint + "/v2/orders"
params = {"symbol" : symbol,
"qty" : quantity, # Number of shares to trade
"side" : side, # 'buy' or 'sell'
"type" : "market", # 'market', 'limit', 'stop', 'stop_limit', 'trailing_stop'
"time_in_force" : tif, # 'day', 'gtc', 'opg', 'cls', 'ioc' or 'fok'
"order_class" : "bracket",
"take_profit" : {"limit_price": tp_limit_price},
"stop_loss" : {"stop_price" : sl_stop_price,
"limit_price": sl_limit_price
}
}
r = requests.post(order_url, headers=headers, json=params)
return r.json()
def order_list(status="open", limit=50):
"""
Retrieves a list of orders for the account, filtered by the supplied query parameters.
"""
order_list_url = endpoint + "/v2/orders"
params = {"status": status}
r = requests.get(order_list_url, headers=headers, params=params)
data = r.json()
return pd.DataFrame(data)
def order_cancel(order_id=""):
if len(order_id) > 1:
# Cancel specific order
order_cancel_url = endpoint + "/v2/orders/{}".format(order_id)
else:
# Cancel all order
order_cancel_url = endpoint + "/v2/orders"
r = requests.delete(order_cancel_url, headers=headers)
return r.json()
# order_df = order_list()
# order_cancel(order_df[order_df["symbol"]=="CSCO"]["id"].to_list()[0])
def order_replace(order_id, params):
order_cancel_url = endpoint + "/v2/orders/{}".format(order_id)
r = requests.patch(order_cancel_url, headers=headers, json=params)
return r.json()
# order_replace(order_df[order_df["symbol"]=="CSCO"]["id"].to_list()[0],
# {"qty": 10, "trail": 3})
```
#### File: jeremychonggg/Alpaca-Trading-Bot/others.py
```python
import requests
import os
import json
import pandas as pd
os.chdir("D:\\SynologyDrive\Programming\AlpacaTradingBot")
endpoint = "https://paper-api.alpaca.markets"
headers = json.loads(open("key.txt", 'r').read())
# https://alpaca.markets/docs/api-documentation/api-v2/positions/
def get_position(symbol=""):
if len(symbol) > 1:
positions_url = endpoint + "/v2/positions/{}".format(symbol)
else:
positions_url = endpoint + "/v2/positions"
r = requests.get(positions_url, headers=headers)
return r.json()
def close_position(symbol="", qty=0):
if len(symbol) > 1:
positions_url = endpoint + "/v2/positions/{}".format(symbol)
params = {"qty": qty}
else:
positions_url = endpoint + "/v2/positions"
params = {}
r = requests.delete(positions_url, headers=headers, json=params)
return r.json()
# https://alpaca.markets/docs/api-documentation/api-v2/account/
def get_account():
account_url = endpoint + "/v2/account"
r = requests.get(account_url, headers=headers)
return r.json()
```
#### File: jeremychonggg/Alpaca-Trading-Bot/prepare_ticker_list.py
```python
import requests
from bs4 import BeautifulSoup
# Webscrap list of top gainer stocks
day_gainers_stock = {}
def find_day_gainers_stock():
url = "https://finance.yahoo.com/gainers?count=100&offset=0"
page = requests.get(url)
page_content = page.content
soup = BeautifulSoup(page_content,'html.parser')
table = soup.find_all("table", {"class" : "W(100%)"})
for t in table:
rows = t.find_all("tr")
for row in rows:
day_gainers_stock[row.get_text(separator='|').split("|")[0]] = row.get_text(separator='|').split("|")[4]
# Webscrap list of top volume stocks
most_active_stock = {}
def find_most_active_stock():
url = "https://finance.yahoo.com/most-active?count=100&offset=0"
page = requests.get(url)
page_content = page.content
soup = BeautifulSoup(page_content,'html.parser')
table = soup.find_all("table", {"class" : "W(100%)"})
for t in table:
rows = t.find_all("tr")
for row in rows:
most_active_stock[row.get_text(separator='|').split("|")[0]] = row.get_text(separator='|').split("|")[5]
# Get Alpaca API Credential
import json
endpoint = "https://data.alpaca.markets/v2"
headers = json.loads(open("key.txt", 'r').read())
def prepare_ticker_list():
find_day_gainers_stock()
find_most_active_stock()
ticker_list = []
# https://www.invesco.com/us/financial-products/etfs/holdings?audienceType=Investor&ticker=QQQ
qqq_list = ['AAPL', 'MSFT', 'AMZN', 'GOOG', 'FB', 'GOOGL', 'NVDA', 'TSLA', 'PYPL', 'ADBE', 'CMCSA', 'NFLX', 'INTC', 'PEP', 'AVGO', 'COST', 'TXN', 'TMUS', 'QCOM', 'HON', 'INTU', 'MRNA', 'CHTR', 'SBUX', 'AMD', 'AMGN', 'AMAT', 'ISRG', 'BKNG', 'MELI', 'GILD', 'ADP', 'LRCX', 'MDLZ', 'MU', 'ZM', 'FISV', 'CSX', 'ADSK', 'REGN', 'ILMN', 'ASML', 'ATVI', 'NXPI', 'JD', 'ADI', 'DOCU', 'IDXX', 'CRWD', 'ALGN', 'KLAC', 'EBAY', 'VRTX', 'BIIB', 'MNST', 'WDAY', 'LULU', 'SNPS', 'DXCM', 'MRVL', 'KDP', 'TEAM', 'EXC', 'CDNS', 'AEP', 'KHC', 'MAR', 'MCHP', 'ROST', 'WBA', 'ORLY', 'PAYX', 'CTAS', 'EA', 'CTSH', 'BIDU', 'XLNX', 'MTCH', 'XEL', 'PDD', 'CPRT', 'OKTA', 'VRSK', 'FAST', 'ANSS', 'SWKS', 'SGEN', 'PCAR', 'PTON', 'NTES', 'CDW', 'SIRI', 'SPLK', 'VRSN', 'CERN', 'DLTR', 'CHKP', 'INCY', 'TCOM', 'FOXA', 'FOX']
# # Prepare Day Gainer
# for ticker in day_gainers_stock.keys():
# ticker_list.append(ticker)
# ticker_list.remove('Symbol')
# Prepare Most Active
for ticker in most_active_stock.keys():
ticker_list.append(ticker)
ticker_list.remove('Symbol')
# Add QQQ List
for ticker in qqq_list:
ticker_list.append(ticker)
# Clear Ticker that are not in ALPACA
for ticker in ticker_list:
bar_url = endpoint + "/stocks/{}/bars".format(ticker)
params = {"start":'2021-01-01', "limit": 3, "timeframe":'1Day'}
data = {"bars": [], "next_page_token": '', "symbol": ticker}
while True:
r = requests.get(bar_url, headers=headers, params=params)
r = r.json()
try:
if r["next_page_token"] == None:
data["bars"] += r["bars"]
break
else:
params["page_token"] = r["next_page_token"]
data["bars"] += r["bars"]
data["next_page_token"] = r["next_page_token"]
finally:
ticker_list.remove(ticker)
break
return ticker_list
```
|
{
"source": "jeremyciak/terraform-aws-control_tower_account_factory",
"score": 2
}
|
#### File: lambda/aft-account-provisioning-framework-account-metadata-ssm/aft_account_provisioning_framework_account_metadata_ssm.py
```python
import inspect
import json
from typing import TYPE_CHECKING, Any, Dict, List, Sequence, Union
import aft_common.aft_utils as utils
import boto3
from boto3.session import Session
if TYPE_CHECKING:
from mypy_boto3_ssm import SSMClient
from mypy_boto3_sts import STSClient
else:
SSMClient = object
STSClient = object
AFT_EXEC_ROLE = "AWSAFTExecution"
SSM_PARAMETER_PATH = "/aft/account-request/custom-fields/"
logger = utils.get_logger()
def get_ssm_parameters_names_by_path(session: Session, path: str) -> List[str]:
client = session.client("ssm")
response = client.get_parameters_by_path(Path=path, Recursive=True)
logger.debug(response)
parameter_names = []
for p in response["Parameters"]:
parameter_names.append(p["Name"])
return parameter_names
def delete_ssm_parameters(session: Session, parameters: Sequence[str]) -> None:
if len(parameters) > 0:
client = session.client("ssm")
response = client.delete_parameters(Names=parameters)
logger.info(response)
def create_ssm_parameters(session: Session, parameters: Dict[str, str]) -> None:
client = session.client("ssm")
for key, value in parameters.items():
response = client.put_parameter(
Name=SSM_PARAMETER_PATH + key, Value=value, Type="String", Overwrite=True
)
logger.info(response)
def lambda_handler(event: Dict[str, Any], context: Union[Dict[str, Any], None]) -> None:
try:
account_request = event["payload"]["account_request"]
custom_fields = json.loads(account_request.get("custom_fields", "{}"))
target_account_id = event["payload"]["account_info"]["account"]["id"]
local_session = boto3.session.Session()
aft_session = utils.get_aft_admin_role_session(local_session)
target_account_role_arn = utils.build_role_arn(
aft_session, AFT_EXEC_ROLE, target_account_id
)
# Create the custom field parameters in the AFT home region
target_region = aft_session.region_name
aft_ssm_session_policy = {
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"ssm:GetParametersByPath",
"ssm:PutParameter",
"ssm:DeleteParameters",
],
"Effect": "Allow",
"Resource": f"arn:aws:ssm:{target_region}:{target_account_id}:parameter{SSM_PARAMETER_PATH}*",
}
],
}
target_account_creds = utils.get_assume_role_credentials(
session=aft_session,
role_arn=target_account_role_arn,
session_name="aft_ssm_metadata",
session_policy=json.dumps(aft_ssm_session_policy),
)
target_account_session = utils.get_boto_session(target_account_creds)
params = get_ssm_parameters_names_by_path(
target_account_session, SSM_PARAMETER_PATH
)
existing_keys = set(params)
new_keys = set(custom_fields.keys())
# Delete SSM parameters which do not exist in new custom fields
params_to_remove = list(existing_keys.difference(new_keys))
logger.info(message=f"Deleting SSM params: {params_to_remove}")
delete_ssm_parameters(target_account_session, params_to_remove)
# Update / Add SSM parameters for custom fields provided
logger.info(message=f"Adding/Updating SSM params: {custom_fields}")
create_ssm_parameters(target_account_session, custom_fields)
except Exception as e:
message = {
"FILE": __file__.split("/")[-1],
"METHOD": inspect.stack()[0][3],
"EXCEPTION": str(e),
}
logger.exception(message)
raise
if __name__ == "__main__":
import json
import sys
from optparse import OptionParser
logger.info("Local Execution")
parser = OptionParser()
parser.add_option(
"-f", "--event-file", dest="event_file", help="Event file to be processed"
)
(options, args) = parser.parse_args(sys.argv)
if options.event_file is not None:
with open(options.event_file) as json_data:
event = json.load(json_data)
lambda_handler(event, None)
else:
lambda_handler({}, None)
```
#### File: lambda/aft-account-provisioning-framework-persist-metadata/aft_account_provisioning_framework_persist_metadata.py
```python
import inspect
from typing import Any, Dict, Union
import aft_common.aft_utils as utils
import boto3
from boto3.session import Session
logger = utils.get_logger()
def persist_metadata(
payload: Dict[str, Any], account_info: Dict[str, str], session: Session
) -> Dict[str, Any]:
logger.info("Function Start - persist_metadata")
account_tags = payload["account_request"]["account_tags"]
account_customizations_name = payload["account_request"][
"account_customizations_name"
]
metadata_table_name = utils.get_ssm_parameter_value(
session, utils.SSM_PARAM_AFT_DDB_META_TABLE
)
item = {
"id": account_info["id"],
"email": account_info["email"],
"account_name": account_info["name"],
"account_creation_time": account_info["joined_date"],
"account_status": account_info["status"],
"account_level_tags": account_tags,
"account_customizations_name": account_customizations_name,
"parent_ou": account_info["parent_id"],
"vcs_information": {},
"terraform_workspace": {},
}
logger.info("Writing item to " + metadata_table_name)
logger.info(item)
response = utils.put_ddb_item(session, metadata_table_name, item)
logger.info(response)
return response
def lambda_handler(
event: Dict[str, Any], context: Union[Dict[str, Any], None]
) -> Dict[str, Any]:
try:
logger.info("AFT Account Provisioning Framework Handler Start")
rollback = None
try:
if event["rollback"]:
rollback = True
except KeyError:
pass
payload = event["payload"]
action = event["action"]
session = boto3.session.Session()
if action == "persist_metadata":
account_info = payload["account_info"]["account"]
update_metadata = persist_metadata(payload, account_info, session)
return update_metadata
else:
raise Exception(
"Incorrect Command Passed to Lambda Function. Input: {action}. Expected: 'persist_metadata'"
)
except Exception as e:
message = {
"FILE": __file__.split("/")[-1],
"METHOD": inspect.stack()[0][3],
"EXCEPTION": str(e),
}
logger.exception(message)
raise
if __name__ == "__main__":
import json
import sys
from optparse import OptionParser
logger.info("Local Execution")
parser = OptionParser()
parser.add_option(
"-f", "--event-file", dest="event_file", help="Event file to be processed"
)
(options, args) = parser.parse_args(sys.argv)
if options.event_file is not None:
with open(options.event_file) as json_data:
event = json.load(json_data)
lambda_handler(event, None)
else:
lambda_handler({}, None)
```
#### File: aft-customizations/tests/test_tf_unitests.py
```python
import json
import os
import boto3
import pytest
import workspace_manager
@pytest.mark.unit
def test_new_resources(plan):
with open(os.path.join(".", f"./testdata/expected_resources.json")) as json_file:
response_schema = json.load(json_file)
result = {}
plan_results = plan["plan_analysis"]
for key in plan_results.keys():
table = [[r, len(plan_results[key][r])] for r in plan_results[key]]
result[key] = table
assert json.dumps(response_schema, sort_keys=True) == json.dumps(
result, sort_keys=True
)
@pytest.mark.unit
def test_resource_count_by_modules(plan):
with open(
os.path.join(".", f"./testdata/expected_resources_by_module.json")
) as json_file:
response_schema = json.load(json_file)
modules = plan["modules"]
result = {}
for m in modules:
print(f"******: {m} : {len(modules[m]['resources'])}")
print(f"******: {m} : {len(modules[m]['child_modules'])}")
print([len(modules[m]["child_modules"]), len(modules[m]["resources"])])
result[m] = [len(modules[m]["child_modules"]), len(modules[m]["resources"])]
# val = [{ result[m] : [len(modules[m]["child_modules"]),len(modules[m]["resources"])]} for m in modules]
# print(json.dumps(result, sort_keeiifccneijdtclbhideckfuhvdetcrjlbnlfeighfdue
# ys=True))
# print(json.dumps(response_schema, sort_keys=True))
assert json.dumps(response_schema, sort_keys=True) == json.dumps(
result, sort_keys=True
)
@pytest.mark.unit
def test_pipeline_execution():
session = boto3.Session(profile_name="aft-management", region_name="us-west-2")
ssm_client = session.client("ssm")
tf_token = ssm_client.get_parameter(
Name="/aft/config/tf-token", WithDecryption=True
)
workspace_manager.setup_and_run_workspace(
"TLZCloud",
"dev-tfc-cc-544582079943-global",
"arn:aws:iam::544582079943:role/AWSAFTExecution",
tf_token["Parameter"]["Value"],
)
workspace_manager.delete_workspace(
"TLZCloud",
"dev-tfc-cc-544582079943-global",
"arn:aws:iam::544582079943:role/AWSAFTExecution",
tf_token["Parameter"]["Value"],
)
return True
```
#### File: aft-lambda-layer/aft_common/datetime_encoder.py
```python
import json
from datetime import date, datetime
class DateTimeEncoder(json.JSONEncoder):
def default(self, o: object) -> str:
if isinstance(o, (datetime, date)):
serial = o.isoformat()
return serial
raise TypeError("Type %s not serializable" % type(o))
```
|
{
"source": "JeremyCJM/Ensemble",
"score": 2
}
|
#### File: JeremyCJM/Ensemble/imagenet_ens_new.py
```python
import numpy as np
import torch
import pdb
import torch.nn as nn
from torch.autograd import Variable
import itertools
num_data = 50000
num_classes = 1000
# Temps = [0.001, 0.01, 0.1, 5, 10, 50, 100, 500, 1000, 5000, 10000]
# Temps = [1.2, 1.4, 1.6, 1.8, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5]
# Temps = np.arange(1.01, 1.2 ,0.02)
Temps = [1,2,3,4,5]
models = ['vgg19_bn', 'resnet152', 'densenet161', 'densenet121', 'densenet201', 'resnet101', 'densenet169', 'resnet50']
target = torch.load('Ensemble/imagenet/vgg19_bn/Targets of vgg19_bn.pt')
data = {}
for m in models:
data[m] = torch.load('Ensemble/imagenet/{}/Logit Outputs of {}.pt'.format(m, m))
def logit_ensemble(models, data, target):
output = torch.zeros(num_data, num_classes).cuda()
for m in models:
output += data[m]
target_exp = target.view(-1, 1).expand(-1, num_classes).cuda()
_, pred = output.topk(num_classes, 1, True, True)
correct = pred.data.eq(target_exp).t()
correct_1 = torch.sum(correct[:1])
correct_5 = torch.sum(correct[:5])
V = torch.Tensor([range(1, num_classes+1)]).t().cuda()
gesNum = V * correct.float()
zero_map = gesNum == 0
zero_map = zero_map.float() * 999
# pdb.set_trace()
gesNum = gesNum + zero_map
gesNum, _ = torch.min(gesNum,0)
# pdb.set_trace()
AverGesNum = torch.mean(gesNum)
if AverGesNum > 50:
pdb.set_trace()
return correct_1 / len(target), correct_5 / len(target), AverGesNum
def temperature_ensemble(models, data, target, Temps):
softmax = nn.Softmax().cuda()
output = Variable(torch.zeros(num_data, num_classes).cuda())
for m,T in zip(models,Temps):
output += softmax(Variable(data[m])/T)
# pdb.set_trace()
target_exp = target.view(-1, 1).expand(-1, num_classes).cuda()
_, pred = output.topk(num_classes, 1, True, True)
correct = pred.data.eq(target_exp).t()
correct_1 = torch.sum(correct[:1])
correct_5 = torch.sum(correct[:5])
V = torch.Tensor([range(1, num_classes+1)]).t().cuda()
gesNum = V * correct.float()
zero_map = gesNum == 0
zero_map = zero_map.float() * 999
# pdb.set_trace()
gesNum = gesNum + zero_map
gesNum, _ = torch.min(gesNum,0)
# pdb.set_trace()
AverGesNum = torch.mean(gesNum)
# if AverGesNum > 50:
# pdb.set_trace()
return correct_1 / len(target), correct_5 / len(target), AverGesNum
def geometric_ensemble(models, data, target):
softmax = nn.Softmax().cuda()
output = Variable(torch.ones(num_data, num_classes).cuda())
for m in models:
output *= softmax(Variable(data[m]))
target = target.view(-1, 1).expand(-1, 5).cuda()
_, pred = output.topk(5, 1, True, True)
correct = pred.data.eq(target).t()
correct_1 = torch.sum(correct[:1])
correct_5 = torch.sum(correct[:5])
return correct_1 / len(target), correct_5 / len(target)
Result = {}
compare_top1 = {}
compare_top5 = {}
# for T in Temps:
# print(T)
compare_top1 = {}
compare_top5 = {}
compare_top1['better'], compare_top1['worse'], compare_top1['equal'], compare_top1['improve'], compare_top1['gesNum'] = 0, 0, 0, [], (-1,-1)
compare_top1['gNumBetter'], compare_top1['gNumWorse'], compare_top1['gNumEqual'] = 0, 0, 0
compare_top5['better'], compare_top5['worse'], compare_top5['equal'], compare_top5['improve'] = 0, 0, 0, []
ground_gesNum = []
gesNum = []
## average improvement
# for r in range(2, len(models)+1):
for submodels in itertools.combinations(models, 5):
submodels = list(submodels)
A1, A5, Anum = temperature_ensemble(submodels, data, target, [1,1,1,1,1])
C1, C5, Cnum = temperature_ensemble(submodels, data, target, Temps)
compare_top1['improve'].append(C1 - A1)
compare_top5['improve'].append(C5 - A5)
ground_gesNum.append(Anum)
gesNum.append(Cnum)
# print('T = {}: ({},{})'.format(T, Anum, Cnum))
if C1 > A1:
compare_top1['better'] += 1
elif C1 < A1:
compare_top1['worse'] += 1
elif C1 == A1:
compare_top1['equal'] += 1
if C5 > A5:
compare_top5['better'] += 1
elif C5 < A5:
compare_top5['worse'] += 1
elif C5 == A5:
compare_top5['equal'] += 1
if Cnum < Anum:
compare_top1['gNumBetter'] += 1
elif Cnum > Anum:
compare_top1['gNumWorse'] += 1
elif Cnum == Anum:
compare_top1['gNumEqual'] += 1
compare_top1['improve'] = sum(compare_top1['improve']) / len(compare_top1['improve'])
compare_top5['improve'] = sum(compare_top5['improve']) / len(compare_top5['improve'])
compare_top1['accBetterRate'] = compare_top1['better'] / (compare_top1['better']+compare_top1['equal']+compare_top1['worse'])
compare_top5['accBetterRate'] = compare_top5['better'] / (compare_top5['better']+compare_top5['equal']+compare_top5['worse'])
compare_top1['numBetterRate'] = compare_top1['gNumBetter'] / (compare_top1['gNumBetter']+compare_top1['gNumEqual']+compare_top1['gNumWorse'])
ground_gesNum = np.mean(ground_gesNum)#sum(ground_gesNum) / len(ground_gesNum)
gesNum = np.mean(gesNum)#sum(gesNum) / len(gesNum)
compare_top1['gesNum'] = (ground_gesNum, gesNum)
# pdb.set_trace()
Result['top1'] = compare_top1
Result['top5'] = compare_top5
torch.save(Result, 'Ensemble/ImageNet_Result_new.pt')
```
|
{
"source": "jeremyclewell/kings-table",
"score": 3
}
|
#### File: jeremyclewell/kings-table/Brandubh.py
```python
import numpy as np
import pudb
from easyAI import TwoPlayersGame, DictTT
black_pieces = [[0,3],[1,3],[3,0],[3,1],[3,5],[3,6],[5,3],[6,3]]
white_pieces = [[2,3],[3,2],[3,3],[3,4],[4,3]]
king = [3,3]
throne = np.array([[3,3]])
corners = np.array([[0,0],[0,6],[6,0],[6,6]])
pieces = [black_pieces, white_pieces + [king]]
BLACK = 1
WHITE = 2
class Game(TwoPlayersGame):
"""
"""
def __init__(self, players, board_size = (7, 7)):
self.players = players
self.board_size = board_size
self.board = np.zeros(board_size, dtype = int)
for piece in black_pieces:
self.board[piece[0]][piece[1]] = 1
for piece in white_pieces:
self.board[piece[0]][piece[1]] = 2
self.king = np.array([5,5])
self.nplayer = 1 # player 1 starts.
def pieceIsKing(piece):
return piece == king
def validMoveFilter(boardSlice, piece):
return corner in boardSlice
def possible_moves_for_piece(self, piece):
v_moves = []
column = self.board[piece[0]]
v_mask = np.ma.masked_where(column != 0, column)
v_slices = np.ma.notmasked_contiguous(v_mask)
v_slices = [slice for slice in v_slices if slice.stop == piece[1] or piece[1]+1 == slice.start]
if len(v_slices) != 0:
v_moves = range(np.amin(v_slices).start, np.amax(v_slices).stop)
if piece[1] in v_moves:
v_moves.remove(piece[1])
v_moves = [[piece[0], val] for val in v_moves]
h_moves = []
row = self.board[:, piece[1]]
h_mask = np.ma.masked_where(row != 0, row)
h_slices = np.ma.notmasked_contiguous(h_mask)
h_slices = [slice for slice in h_slices if slice.start == piece[0]+1 or piece[0]-1 == slice.stop]
if len(h_slices) != 0:
h_moves = range(np.amin(h_slices).start, np.amax(h_slices).stop)
if piece[0] in h_moves:
h_moves.remove(piece[0])
h_moves = [[val, piece[1]] for val in h_moves]
restricted_squares = throne
if piece is not king:
restricted_squares = np.concatenate((throne, corners), axis=0)
moves = [(piece, move) for move in h_moves + v_moves if move not in restricted_squares]
return moves
def get_piece(self, coord):
try:
return self.board[coord[0]][coord[1]]
except:
return None
def get_pieces(self, player):
pieces = np.where(self.board == player)
return np.dstack(pieces)[0]
def capture(self, position):
directions = [np.array([0,1]),np.array([0,-1]),np.array([1,0]),np.array([-1,0])]
for direction in directions:
target = direction + position
if self.get_piece(target) == self.nopponent:
if self.get_piece(direction + target) == self.nplayer or \
any(np.equal(corners, direction + target).all(1)) or \
any(np.equal(throne, direction + target).all(1)):
self.board[target[0]][target[1]] = 0
def possible_moves(self):
moves = []
pieces = self.get_pieces(self.nplayer)
if self.nmove % 3:
pieces = pieces[::-1]
for piece in pieces:
moves.extend(self.possible_moves_for_piece(piece))
if len(moves) == 0:
pudb.set_trace()
return moves
def make_move(self, move):
current_pos = move[0]
next_pos = move[1]
self.board[current_pos[0]][current_pos[1]] = 0
self.board[next_pos[0]][next_pos[1]] = self.nplayer
if (self.king == current_pos).all():
self.king = next_pos
self.capture(next_pos)
def show(self):
print('\n' + '\n'.join([' 1 2 3 4 5 6 7'] +
['ABCDEFG'[k] +
' ' + ' '.join([['∙', '⚫️', '⚪️', '👑'][self.board[k, i]]
for i in range(self.board_size[0])])
for k in range(self.board_size[1])] + ['']))
def lose(self):
if self.nplayer == BLACK:
self.has_lost = (self.king == corners).any()
else:
self.has_lost = self.get_pieces(WHITE).size == 0
# if not (self.king == self.get_pieces(WHITE)).any():
# return True
return self.has_lost
def scoring(self):
if not self.has_lost:
material = len(self.get_pieces(BLACK))/2./len(self.get_pieces(WHITE))
# king_to_corner = min([np.linalg.norm(np.array(self.king)-corner) for corner in corners])
# attackers_to_king = np.array([np.linalg.norm(np.array(self.king)-piece) for piece in self.get_pieces(BLACK)]).mean()
# return king_to_corner + material**10 - attackers_to_king
# return -attackers_to_king + king_to_corner
return material
# return material
else:
return -100
def is_over(self):
return self.lose()
def ttentry(self):
return "".join([".0X"[i] for i in self.board.flatten()])
if __name__ == "__main__":
from easyAI import AI_Player, Negamax
#ai_algo = Negamax(3, None , tt = DictTT())
ai_algo = Negamax(5, None)
game = Game([AI_Player(ai_algo), AI_Player(ai_algo)])
game.play()
print("player %d loses" % (game.nplayer))
```
|
{
"source": "jeremyclewell/peacock",
"score": 2
}
|
#### File: peacock/syntax/lexer.py
```python
from pygments.token import *;
from pygments.lexer import RegexLexer, bygroups;
from pygments.token import Token, STANDARD_TYPES;
DataType = Token.DataType;
class RE:
Domain = r'\b(?:[A-Za-z0-9\-\.]+){1,4}\.(?:com|net|org|biz|gov|edu|mil|aero|asia|cat|coop|info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|[A-Za-z]{2})\b';
Email = r'[\w\d!#$%&*._0/=?^_`{|}~\-]+@(?:[A-Za-z0-9\-\.]+){1,}\.[A-Za-z\-]{2,6}';
IPv4_CIDR = r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})(/)(\d{1,2})';
IPv4_Address = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}';
Time = r'\d{1,2}:\d{2}(:\d{2})?(\s+(AM|PM))?';
Dates = [
r'((?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct\Nov|Dec)\s+\d{1,2})(\s+\d{2,4})?(\s+)',
r'\d{1,2}/\d{1,2}/\d{2,4}',
r'\d{4}-\d{1,2}-\d{1,2}',
];
URL = '';
RE.URL = r'(https?|ftp|ssh|git|svn|file)://' + RE.Domain + r'(/\S+)?\b'
class DataTypesLexer(RegexLexer):
"""
Custom formatter for postfix-reject-log by <NAME>
"""
name = 'DataTypes'
aliases = ['DataTypes','datatypes']
filenames = []
mimetypes = ['text/x-datatypes-lexer']
tokens = {
'root': [
(RE.URL, DataType.URL),
(RE.Email, DataType.Email),
(RE.Domain, DataType.Domain),
(RE.IPv4_CIDR, bygroups(DataType.Net.IPv4.CIDR.Address,Operator,DataType.Net.IPv4.CIDR.Mask)),
(RE.IPv4_Address, DataType.Net.IPv4.Address),
(RE.Time, DataType.Time),
(RE.Dates[0], bygroups(DataType.Date, DataType.Date, Literal)),
(RE.Dates[1], DataType.Date),
(RE.Dates[2], DataType.Date),
(r'.', Other),
],
}
def __init__(self, **options):
# print('DataTypesLexer Loaded');
super(DataTypesLexer, self).__init__(**options);
# noinspection PyMethodParameters
def analyse_text(text):
return .3;
```
|
{
"source": "jeremycline/crochet",
"score": 2
}
|
#### File: jeremycline/crochet/setup.py
```python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import versioneer
def read(path):
"""
Read the contents of a file.
"""
with open(path) as f:
return f.read()
setup(
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
name='crochet',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="Use Twisted anywhere!",
install_requires=[
"Twisted>=15.0",
"wrapt",
],
keywords="twisted threading",
license="MIT",
packages=["crochet", "crochet.tests"],
url="https://github.com/itamarst/crochet",
maintainer='<NAME>',
maintainer_email='<EMAIL>',
long_description=read('README.rst') + '\n' + read('docs/news.rst'),
)
```
|
{
"source": "jeremycline/python-sdk",
"score": 2
}
|
#### File: python-sdk/deepgram/_utils.py
```python
from ._constants import DEFAULT_ENDPOINT
from ._types import Options
from ._version import __version__
from typing import Any, Union, Optional, IO, Mapping, Tuple, List
import aiohttp, urllib.parse, json, re, platform
import websockets, websockets.client
Payload = Optional[Union[dict, str, bytes, IO]]
def _prepare_headers(options: Options, headers: Mapping[str, str] = {}) -> dict:
return {**headers,
'Authorization': None if 'api_key' not in options else options.get('auth_method', 'Token') + ' ' + options['api_key'],
'User-Agent': f'deepgram/{__version__} python/{platform.python_version()}'
}
def _normalize_payload(payload: Payload) -> Optional[Union[bytes, IO]]:
if payload is None:
return None
if isinstance(payload, dict):
return json.dumps(payload).encode('utf-8')
if isinstance(payload, str):
return payload.encode('utf-8')
return payload
def _make_query_string(params: Mapping[str, Any] = {}) -> str:
def elem_decomposer(key: str, value: Any) -> List[Tuple[str, str]]:
if value in [None, ""]:
return []
if isinstance(value, list):
return [elem_decomposer(key, item)[0] for item in value] # break into multiple parameters
# just take the first element in the sublist, rather than trying to flatten recursively
# passing nested lists as query parameters isn't really well-defined,
# nor does anything in our API currently take things like that as of 2021-08-10
# so everything coming through this second pass should be a 1-item list
if isinstance(value, bool):
return [(key, str(value).lower())] # make sure False and True stay lowercased in accordance with DG convention
return [(key, str(value))]
unflattened = [elem_decomposer(k, v) for k, v in params.items()] # sublist for each original parameter
flattened = sum(unflattened, []) # flatten
return ('?' if flattened else '') + urllib.parse.urlencode(flattened)
async def _request(path: str, options: Options, method: str = 'GET', payload: Payload = None, headers: Optional[Mapping[str, str]] = {}) -> Optional[dict]:
destination = options.get('api_url', DEFAULT_ENDPOINT) + path
updated_headers = _prepare_headers(options, headers)
try:
async with aiohttp.request(method, destination, data=_normalize_payload(payload), headers=updated_headers, raise_for_status=True) as resp:
content = (await resp.text()).strip()
if not content:
return None
body = json.loads(content)
if body.get('error'):
raise Exception(f'DG: {content}')
return body
except aiohttp.ClientResponseError as e:
raise Exception(f'DG: {e}')
except aiohttp.ClientError as e:
raise e
async def _socket_connect(path: str, options: Options, headers: Optional[Mapping[str, str]] = {}) -> websockets.client.WebSocketClientProtocol:
destination = re.sub(r'^http', 'ws', options.get('api_url', DEFAULT_ENDPOINT)) + path
updated_headers = _prepare_headers(options, headers)
try:
return await websockets.connect(destination, extra_headers=updated_headers, ping_interval=5)
# If we're streaming too much faster than realtime, connection might close without an aggressive ping interval
except websockets.exceptions.InvalidHandshake as e:
raise Exception(f'DG: {e}')
```
|
{
"source": "JeremyCodeClan/spentrack_project",
"score": 3
}
|
#### File: spentrack_project/controllers/merchants_controller.py
```python
from flask import Blueprint, Flask, render_template, request, redirect
from models.merchant import Merchant
import repositories.transaction_repository as transaction_repo
import repositories.merchant_repository as merchant_repo
import repositories.tag_repository as tag_repo
merchants_blueprint = Blueprint("merchants", __name__)
@merchants_blueprint.route("/jeremy_e51/merchants")
def merchants():
merchants = merchant_repo.select_all()
return render_template("merchants/index.html", merchants = merchants, login = 1)
@merchants_blueprint.route("/jeremy_e51/merchants/new")
def new():
merchants = merchant_repo.select_all()
return render_template(
"merchants/new.html",
merchants = merchants, login = 1, new_cancel = 1
)
@merchants_blueprint.route("/jeremy_e51/merchants", methods=['POST'])
def add_merchant():
name = request.form['name']
merchant = Merchant(name)
merchant_repo.save(merchant)
return redirect('/jeremy_e51/merchants')
@merchants_blueprint.route("/jeremy_e51/merchants/<id>")
def show_merchant(id):
one_merchant = merchant_repo.select(id)
merchants = merchant_repo.select_all()
found_transactions = transaction_repo.select_by_merchant(id)
return render_template(
"merchants/show.html",
one_merchant = one_merchant, merchants = merchants, found_transactions = found_transactions, login = 1, id = int(id)
)
@merchants_blueprint.route("/jeremy_e51/merchants/<id>/delete", methods=["POST"])
def delete_merchant(id):
found_transactions = transaction_repo.select_by_merchant(id)
for transaction in found_transactions:
transaction.merchant = None
transaction_repo.update(transaction)
merchant_repo.delete(id)
return redirect("/jeremy_e51/merchants")
```
#### File: spentrack_project/controllers/transactions_controller.py
```python
from flask import Blueprint, Flask, render_template, request, redirect
from models.transaction import Transaction
import repositories.transaction_repository as transaction_repo
import repositories.merchant_repository as merchant_repo
import repositories.tag_repository as tag_repo
transactions_blueprint = Blueprint("transactions", __name__)
@transactions_blueprint.route("/jeremy_e51")
def transactions():
order = 'order_date_desc'
transactions = transaction_repo.select_all()
total = transaction_repo.total_amount(transactions)
return render_template(
"transactions/index.html",
transactions = transactions, total = total, login = 1, order = order
)
@transactions_blueprint.route("/jeremy_e51/new")
def new():
transactions = transaction_repo.select_all()
total = transaction_repo.total_amount(transactions)
return render_template(
"transactions/new.html",
transactions = transactions, total = total, login = 1, new_cancel = 1
)
@transactions_blueprint.route("/jeremy_e51/<id>")
def transaction_show(id):
order = 'order_date_desc'
show_one = transaction_repo.select(id)
merchant = None
tag = None
if show_one.merchant: merchant = merchant_repo.select(show_one.merchant)
if show_one.tag: tag = tag_repo.select(show_one.tag)
transactions = transaction_repo.select_all()
total = transaction_repo.total_amount(transactions)
return render_template(
"transactions/show.html",
transactions = transactions, show_one = show_one, merchant = merchant, tag = tag, total = total, login = 1, order = order
)
@transactions_blueprint.route("/jeremy_e51", methods=['POST'])
def add_transaction():
name = request.form['name']
description = request.form['description']
amount = request.form['amount']
date = request.form['date']
transaction = Transaction(name, description, amount, date)
transaction_repo.save(transaction)
return redirect('/jeremy_e51')
@transactions_blueprint.route("/jeremy_e51/<id>/edit")
def edit_transaction(id):
transactions = transaction_repo.select_all()
total = transaction_repo.total_amount(transactions)
merchants = merchant_repo.select_all()
tags = tag_repo.select_all()
return render_template(
'transactions/edit.html',
transactions = transactions, merchants = merchants, tags = tags, id = int(id), total = total, login = 1
)
@transactions_blueprint.route("/jeremy_e51/<id>", methods=['POST'])
def update_transaction(id):
transaction = transaction_repo.select(id)
if "tag_id" in request.form:
if request.form["tag_id"] != "None":
tag_id = request.form["tag_id"]
tag = tag_repo.select(tag_id)
transaction.tag = tag
if "merchant_id" in request.form:
if request.form["merchant_id"] != "None":
merchant_id = request.form["merchant_id"]
merchant = merchant_repo.select(merchant_id)
transaction.merchant = merchant
transaction_repo.update(transaction)
return redirect('/jeremy_e51')
@transactions_blueprint.route("/jeremy_e51/order")
def transactions_by_order():
order_date = request.args['order_date']
order_amount = request.args['order_amount']
order_name = request.args['order_name']
if order_date:
if order_date == 'desc':
order = 'order_date_desc'
transactions = transaction_repo.select_all()
total = transaction_repo.total_amount(transactions)
return render_template(
"transactions/index.html",
transactions = transactions, total = total, login = 1, order = order
)
if order_date == 'asc':
order = 'order_date_asc'
transactions = transaction_repo.select_all_asc()
total = transaction_repo.total_amount(transactions)
return render_template(
"transactions/index.html",
transactions = transactions, total = total, login = 1, order = order
)
if order_amount:
if order_amount == 'desc':
order = 'order_amount_desc'
transactions = transaction_repo.order_by_price_desc()
total = transaction_repo.total_amount(transactions)
return render_template(
"transactions/index.html",
transactions = transactions, total = total, login = 1, order = order
)
if order_amount == 'asc':
order = 'order_amount_asc'
transactions = transaction_repo.order_by_price_asc()
total = transaction_repo.total_amount(transactions)
return render_template(
"transactions/index.html",
transactions = transactions, total = total, login = 1, order = order
)
if order_name:
if order_name == 'desc':
order = 'order_name_desc'
transactions = transaction_repo.order_by_name_desc()
total = transaction_repo.total_amount(transactions)
return render_template(
"transactions/index.html",
transactions = transactions, total = total, login = 1, order = order
)
if order_name == 'asc':
order = 'order_name_asc'
transactions = transaction_repo.order_by_name_asc()
total = transaction_repo.total_amount(transactions)
return render_template(
"transactions/index.html",
transactions = transactions, total = total, login = 1, order = order
)
return redirect('/jeremy_e51')
```
#### File: spentrack_project/models/merchant.py
```python
class Merchant:
def __init__(self, name, total = 0, id = None):
self.name = name
self.total = total
self.id = id
```
|
{
"source": "jeremycole/structy",
"score": 3
}
|
#### File: py/structy/fix16.py
```python
_FIX16_ONE: int = 0x00010000
def _fix16_from_float(x: float) -> int:
return int(x * 65536.0 + 0.5 if x >= 0 else x * 65536.0 - 0.5)
def _fix16_to_float(x: int) -> float:
return float(x) / _FIX16_ONE
def _fix16_clamp(x: int) -> int:
if x > 2 ** 31 - 1:
return 2 ** 31 - 1
elif x < -(2 ** 31):
return -(2 ** 31)
return x
class Fix16:
"""A Q16.16 fixed-point value.
Args:
float_value: Creates a Fix16 that approximates the given float.
raw_value: Creates a Fix16 from the binary representation. This
is useful when deserializing Fix16 values from bytes.
"""
def __init__(self, float_value=None, raw_value=None):
if raw_value is not None:
self._value = _fix16_clamp(raw_value)
else:
self._value = _fix16_clamp(_fix16_from_float(float_value))
def __eq__(self, other):
return self._value == other._value
def __repr__(self):
return f"<Fix16 0x{self._value:08x} {_fix16_to_float(self._value)}>"
def __str__(self):
return str(_fix16_to_float(self._value)) + "Q16.16"
def __add__(self, other):
return Fix16(raw_value=self._value + other._value)
def __sub__(self, other):
return Fix16(raw_value=self._value - other._value)
def __mul__(self, other):
product = self._value * other._value
if product < 0:
product -= 1
result = product >> 16
result += (product & 0x8000) >> 15
return Fix16(raw_value=result)
def __truediv__(self, other):
a = self._value
b = other._value
remainder = a if a >= 0 else (-a)
divider = b if b >= 0 else (-b)
quotient = 0
bit_pos = 17
if divider & 0xFFF00000:
shifted_div = (divider >> 17) + 1
quotient = remainder // shifted_div
remainder -= (quotient * divider) >> 17
while not (divider & 0xF) and bit_pos >= 4:
divider >>= 4
bit_pos -= 4
while remainder and bit_pos >= 0:
shift = 67 - len(bin(-remainder)) & ~remainder >> 64
if shift > bit_pos: # pragma: no cover
shift = bit_pos
remainder <<= shift
bit_pos -= shift
div = remainder // divider
remainder = remainder % divider
quotient += div << bit_pos
remainder <<= 1
bit_pos -= 1
quotient += 1
result = quotient >> 1
if (a ^ b) & 0x80000000:
result = -result
return Fix16(raw_value=result)
def __mod__(self, other):
result = self._value % other._value
return Fix16(raw_value=result)
def __neg__(self):
return Fix16(raw_value=-self._value)
def __index__(self) -> int:
return self._value
```
#### File: tests/py/test_fix16.py
```python
from structy import fix16
class TestFix16:
def test_basic(self):
val = fix16.Fix16(1.0)
assert val._value == 65536
val = fix16.Fix16(-1.0)
assert val._value == -65536
val = fix16.Fix16(10.0)
assert val._value == 655360
val = fix16.Fix16(-10.0)
assert val._value == -655360
def test_saturate(self):
a = fix16.Fix16(65530.0)
b = fix16.Fix16(100.0)
assert (a + b) == fix16.Fix16(65535.0)
a = fix16.Fix16(-65530.0)
assert (a - b) == fix16.Fix16(-65535.0)
def test_operations(self):
a = fix16.Fix16(10.5)
b = fix16.Fix16(5.25)
assert (a + b) == fix16.Fix16(15.75)
assert (a - b) == fix16.Fix16(5.25)
assert (a * b) == fix16.Fix16(55.125)
assert (a * -b) == fix16.Fix16(-55.125)
assert (a / b) == fix16.Fix16(2)
assert (a / -b) == fix16.Fix16(-2)
assert (a / fix16.Fix16(raw_value=0xFFFFFFFF)) == fix16.Fix16(
raw_value=0x00000015
)
assert (a / fix16.Fix16(raw_value=0x00000015)) == fix16.Fix16(
raw_value=0xFFFFFFFF
)
assert (a % b) == fix16.Fix16(0)
def test_str_repr(self):
a = fix16.Fix16(10.5)
assert str(a) == "10.5Q16.16"
assert repr(a) == "<Fix16 0x000a8000 10.5>"
```
|
{
"source": "jeremy-compostella/edk2",
"score": 2
}
|
#### File: IntelFsp2Pkg/Tools/GenCfgOpt.py
```python
import os
import re
import sys
import struct
from datetime import date
from functools import reduce
# Generated file copyright header
__copyright_txt__ = """## @file
#
# THIS IS AUTO-GENERATED FILE BY BUILD TOOLS AND PLEASE DO NOT MAKE MODIFICATION.
#
# This file lists all VPD informations for a platform collected by build.exe.
#
# Copyright (c) %4d, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
"""
__copyright_bsf__ = """/** @file
Boot Setting File for Platform Configuration.
Copyright (c) %4d, Intel Corporation. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
This file is automatically generated. Please do NOT modify !!!
**/
"""
__copyright_h__ = """/** @file
Copyright (c) %4d, Intel Corporation. All rights reserved.<BR>
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
This file is automatically generated. Please do NOT modify !!!
**/
"""
BuildOptionPcd = []
class CLogicalExpression:
def __init__(self):
self.index = 0
self.string = ''
def errExit(self, err = ''):
print ("ERROR: Express parsing for:")
print (" %s" % self.string)
print (" %s^" % (' ' * self.index))
if err:
print ("INFO : %s" % err)
raise SystemExit
def getNonNumber (self, n1, n2):
if not n1.isdigit():
return n1
if not n2.isdigit():
return n2
return None
def getCurr(self, lens = 1):
try:
if lens == -1:
return self.string[self.index :]
else:
if self.index + lens > len(self.string):
lens = len(self.string) - self.index
return self.string[self.index : self.index + lens]
except Exception:
return ''
def isLast(self):
return self.index == len(self.string)
def moveNext(self, len = 1):
self.index += len
def skipSpace(self):
while not self.isLast():
if self.getCurr() in ' \t':
self.moveNext()
else:
return
def normNumber (self, val):
return True if val else False
def getNumber(self, var):
var = var.strip()
if re.match('^0x[a-fA-F0-9]+$', var):
value = int(var, 16)
elif re.match('^[+-]?\d+$', var):
value = int(var, 10)
else:
value = None
return value
def parseValue(self):
self.skipSpace()
var = ''
while not self.isLast():
char = self.getCurr()
if re.match('^[\w.]', char):
var += char
self.moveNext()
else:
break
val = self.getNumber(var)
if val is None:
value = var
else:
value = "%d" % val
return value
def parseSingleOp(self):
self.skipSpace()
if re.match('^NOT\W', self.getCurr(-1)):
self.moveNext(3)
op = self.parseBrace()
val = self.getNumber (op)
if val is None:
self.errExit ("'%s' is not a number" % op)
return "%d" % (not self.normNumber(int(op)))
else:
return self.parseValue()
def parseBrace(self):
self.skipSpace()
char = self.getCurr()
if char == '(':
self.moveNext()
value = self.parseExpr()
self.skipSpace()
if self.getCurr() != ')':
self.errExit ("Expecting closing brace or operator")
self.moveNext()
return value
else:
value = self.parseSingleOp()
return value
def parseCompare(self):
value = self.parseBrace()
while True:
self.skipSpace()
char = self.getCurr()
if char in ['<', '>']:
self.moveNext()
next = self.getCurr()
if next == '=':
op = char + next
self.moveNext()
else:
op = char
result = self.parseBrace()
test = self.getNonNumber(result, value)
if test is None:
value = "%d" % self.normNumber(eval (value + op + result))
else:
self.errExit ("'%s' is not a valid number for comparision" % test)
elif char in ['=', '!']:
op = self.getCurr(2)
if op in ['==', '!=']:
self.moveNext(2)
result = self.parseBrace()
test = self.getNonNumber(result, value)
if test is None:
value = "%d" % self.normNumber((eval (value + op + result)))
else:
value = "%d" % self.normNumber(eval ("'" + value + "'" + op + "'" + result + "'"))
else:
break
else:
break
return value
def parseAnd(self):
value = self.parseCompare()
while True:
self.skipSpace()
if re.match('^AND\W', self.getCurr(-1)):
self.moveNext(3)
result = self.parseCompare()
test = self.getNonNumber(result, value)
if test is None:
value = "%d" % self.normNumber(int(value) & int(result))
else:
self.errExit ("'%s' is not a valid op number for AND" % test)
else:
break
return value
def parseOrXor(self):
value = self.parseAnd()
op = None
while True:
self.skipSpace()
op = None
if re.match('^XOR\W', self.getCurr(-1)):
self.moveNext(3)
op = '^'
elif re.match('^OR\W', self.getCurr(-1)):
self.moveNext(2)
op = '|'
else:
break
if op:
result = self.parseAnd()
test = self.getNonNumber(result, value)
if test is None:
value = "%d" % self.normNumber(eval (value + op + result))
else:
self.errExit ("'%s' is not a valid op number for XOR/OR" % test)
return value
def parseExpr(self):
return self.parseOrXor()
def getResult(self):
value = self.parseExpr()
self.skipSpace()
if not self.isLast():
self.errExit ("Unexpected character found '%s'" % self.getCurr())
test = self.getNumber(value)
if test is None:
self.errExit ("Result '%s' is not a number" % value)
return int(value)
def evaluateExpress (self, Expr):
self.index = 0
self.string = Expr
if self.getResult():
Result = True
else:
Result = False
return Result
class CGenCfgOpt:
def __init__(self):
self.Debug = False
self.Error = ''
self._GlobalDataDef = """
GlobalDataDef
SKUID = 0, "DEFAULT"
EndGlobalData
"""
self._BuidinOptionTxt = """
List &EN_DIS
Selection 0x1 , "Enabled"
Selection 0x0 , "Disabled"
EndList
"""
self._BsfKeyList = ['FIND','NAME','HELP','TYPE','PAGE','OPTION','ORDER']
self._HdrKeyList = ['HEADER','STRUCT', 'EMBED', 'COMMENT']
self._BuidinOption = {'$EN_DIS' : 'EN_DIS'}
self._MacroDict = {}
self._PcdsDict = {}
self._CfgBlkDict = {}
self._CfgPageDict = {}
self._CfgItemList = []
self._DscFile = ''
self._FvDir = ''
self._MapVer = 0
def ParseMacros (self, MacroDefStr):
# ['-DABC=1', '-D', 'CFG_DEBUG=1', '-D', 'CFG_OUTDIR=Build']
self._MacroDict = {}
IsExpression = False
for Macro in MacroDefStr:
if Macro.startswith('-D'):
IsExpression = True
if len(Macro) > 2:
Macro = Macro[2:]
else :
continue
if IsExpression:
IsExpression = False
Match = re.match("(\w+)=(.+)", Macro)
if Match:
self._MacroDict[Match.group(1)] = Match.group(2)
else:
Match = re.match("(\w+)", Macro)
if Match:
self._MacroDict[Match.group(1)] = ''
if len(self._MacroDict) == 0:
Error = 1
else:
Error = 0
if self.Debug:
print ("INFO : Macro dictionary:")
for Each in self._MacroDict:
print (" $(%s) = [ %s ]" % (Each , self._MacroDict[Each]))
return Error
def EvaulateIfdef (self, Macro):
Result = Macro in self._MacroDict
if self.Debug:
print ("INFO : Eval Ifdef [%s] : %s" % (Macro, Result))
return Result
def ExpandMacros (self, Input):
Line = Input
Match = re.findall("\$\(\w+\)", Input)
if Match:
for Each in Match:
Variable = Each[2:-1]
if Variable in self._MacroDict:
Line = Line.replace(Each, self._MacroDict[Variable])
else:
if self.Debug:
print ("WARN : %s is not defined" % Each)
Line = Line.replace(Each, Each[2:-1])
return Line
def ExpandPcds (self, Input):
Line = Input
Match = re.findall("(\w+\.\w+)", Input)
if Match:
for PcdName in Match:
if PcdName in self._PcdsDict:
Line = Line.replace(PcdName, self._PcdsDict[PcdName])
else:
if self.Debug:
print ("WARN : %s is not defined" % PcdName)
return Line
def EvaluateExpress (self, Expr):
ExpExpr = self.ExpandPcds(Expr)
ExpExpr = self.ExpandMacros(ExpExpr)
LogExpr = CLogicalExpression()
Result = LogExpr.evaluateExpress (ExpExpr)
if self.Debug:
print ("INFO : Eval Express [%s] : %s" % (Expr, Result))
return Result
def FormatListValue(self, ConfigDict):
Struct = ConfigDict['struct']
if Struct not in ['UINT8','UINT16','UINT32','UINT64']:
return
dataarray = []
binlist = ConfigDict['value'][1:-1].split(',')
for each in binlist:
each = each.strip()
if each.startswith('0x'):
value = int(each, 16)
else:
value = int(each)
dataarray.append(value)
unit = int(Struct[4:]) / 8
if int(ConfigDict['length']) != unit * len(dataarray):
raise Exception("Array size is not proper for '%s' !" % ConfigDict['cname'])
bytearray = []
for each in dataarray:
value = each
for loop in range(int(unit)):
bytearray.append("0x%02X" % (value & 0xFF))
value = value >> 8
newvalue = '{' + ','.join(bytearray) + '}'
ConfigDict['value'] = newvalue
return ""
def ParseDscFile (self, DscFile, FvDir):
Hardcode = False
AutoAlign = False
self._CfgItemList = []
self._CfgPageDict = {}
self._CfgBlkDict = {}
self._DscFile = DscFile
self._FvDir = FvDir
IsDefSect = False
IsPcdSect = False
IsUpdSect = False
IsVpdSect = False
IfStack = []
ElifStack = []
Error = 0
ConfigDict = {}
DscFd = open(DscFile, "r")
DscLines = DscFd.readlines()
DscFd.close()
MaxAlign = 32 #Default align to 32, but if there are 64 bit unit, align to 64
SizeAlign = 0 #record the struct max align
Base = 0 #Starting offset of sub-structure.
while len(DscLines):
DscLine = DscLines.pop(0).strip()
Handle = False
Match = re.match("^\[(.+)\]", DscLine)
if Match is not None:
IsDefSect = False
IsPcdSect = False
IsVpdSect = False
IsUpdSect = False
if Match.group(1).lower() == "Defines".lower():
IsDefSect = True
if (Match.group(1).lower() == "PcdsFeatureFlag".lower() or Match.group(1).lower() == "PcdsFixedAtBuild".lower()):
IsPcdSect = True
elif Match.group(1).lower() == "PcdsDynamicVpd.Upd".lower():
ConfigDict = {}
ConfigDict['header'] = 'ON'
ConfigDict['region'] = 'UPD'
ConfigDict['order'] = -1
ConfigDict['page'] = ''
ConfigDict['name'] = ''
ConfigDict['find'] = ''
ConfigDict['struct'] = ''
ConfigDict['embed'] = ''
ConfigDict['comment'] = ''
ConfigDict['subreg'] = []
IsUpdSect = True
Offset = 0
else:
if IsDefSect or IsPcdSect or IsUpdSect or IsVpdSect:
if re.match("^!else($|\s+#.+)", DscLine):
if IfStack:
IfStack[-1] = not IfStack[-1]
else:
print("ERROR: No paired '!if' found for '!else' for line '%s'" % DscLine)
raise SystemExit
elif re.match("^!endif($|\s+#.+)", DscLine):
if IfStack:
IfStack.pop()
Level = ElifStack.pop()
if Level > 0:
del IfStack[-Level:]
else:
print("ERROR: No paired '!if' found for '!endif' for line '%s'" % DscLine)
raise SystemExit
else:
Result = False
Match = re.match("!(ifdef|ifndef)\s+(.+)", DscLine)
if Match:
Result = self.EvaulateIfdef (Match.group(2))
if Match.group(1) == 'ifndef':
Result = not Result
IfStack.append(Result)
ElifStack.append(0)
else:
Match = re.match("!(if|elseif)\s+(.+)", DscLine.split("#")[0])
if Match:
Result = self.EvaluateExpress(Match.group(2))
if Match.group(1) == "if":
ElifStack.append(0)
IfStack.append(Result)
else: #elseif
if IfStack:
IfStack[-1] = not IfStack[-1]
IfStack.append(Result)
ElifStack[-1] = ElifStack[-1] + 1
else:
print("ERROR: No paired '!if' found for '!elif' for line '%s'" % DscLine)
raise SystemExit
else:
if IfStack:
Handle = reduce(lambda x,y: x and y, IfStack)
else:
Handle = True
if Handle:
Match = re.match("!include\s+(.+)", DscLine)
if Match:
IncludeFilePath = Match.group(1)
IncludeFilePath = self.ExpandMacros(IncludeFilePath)
PackagesPath = os.getenv("PACKAGES_PATH")
if PackagesPath:
for PackagePath in PackagesPath.split(os.pathsep):
IncludeFilePathAbs = os.path.join(os.path.normpath(PackagePath), os.path.normpath(IncludeFilePath))
if os.path.exists(IncludeFilePathAbs):
IncludeDsc = open(IncludeFilePathAbs, "r")
break
else:
IncludeDsc = open(IncludeFilePath, "r")
if IncludeDsc == None:
print("ERROR: Cannot open file '%s'" % IncludeFilePath)
raise SystemExit
NewDscLines = IncludeDsc.readlines()
IncludeDsc.close()
DscLines = NewDscLines + DscLines
Offset = 0
else:
if DscLine.startswith('!'):
print("ERROR: Unrecognized directive for line '%s'" % DscLine)
raise SystemExit
if not Handle:
continue
if IsDefSect:
#DEFINE UPD_TOOL_GUID = 8C3D856A-9BE6-468E-850A-24F7A8D38E09
#DEFINE FSP_T_UPD_TOOL_GUID = 34686CA3-34F9-4901-B82A-BA630F0714C6
#DEFINE FSP_M_UPD_TOOL_GUID = 39A250DB-E465-4DD1-A2AC-E2BD3C0E2385
#DEFINE FSP_S_UPD_TOOL_GUID = CAE3605B-5B34-4C85-B3D7-27D54273C40F
Match = re.match("^\s*(?:DEFINE\s+)*(\w+)\s*=\s*([/$()-.\w]+)", DscLine)
if Match:
self._MacroDict[Match.group(1)] = self.ExpandMacros(Match.group(2))
if self.Debug:
print ("INFO : DEFINE %s = [ %s ]" % (Match.group(1), self.ExpandMacros(Match.group(2))))
elif IsPcdSect:
#gSiPkgTokenSpaceGuid.PcdTxtEnable|FALSE
#gSiPkgTokenSpaceGuid.PcdOverclockEnable|TRUE
Match = re.match("^\s*([\w\.]+)\s*\|\s*(\w+)", DscLine)
if Match:
self._PcdsDict[Match.group(1)] = Match.group(2)
if self.Debug:
print ("INFO : PCD %s = [ %s ]" % (Match.group(1), Match.group(2)))
i = 0
while i < len(BuildOptionPcd):
Match = re.match("\s*([\w\.]+)\s*\=\s*(\w+)", BuildOptionPcd[i])
if Match:
self._PcdsDict[Match.group(1)] = Match.group(2)
i += 1
else:
Match = re.match("^\s*#\s+(!BSF|@Bsf|!HDR)\s+(.+)", DscLine)
if Match:
Remaining = Match.group(2)
if Match.group(1) == '!BSF' or Match.group(1) == '@Bsf':
Match = re.match("(?:^|.+\s+)PAGES:{(.+?)}", Remaining)
if Match:
# !BSF PAGES:{HSW:"Haswell System Agent", LPT:"Lynx Point PCH"}
PageList = Match.group(1).split(',')
for Page in PageList:
Page = Page.strip()
Match = re.match("(\w+):\"(.+)\"", Page)
self._CfgPageDict[Match.group(1)] = Match.group(2)
Match = re.match("(?:^|.+\s+)BLOCK:{NAME:\"(.+)\"\s*,\s*VER:\"(.+)\"\s*}", Remaining)
if Match:
self._CfgBlkDict['name'] = Match.group(1)
self._CfgBlkDict['ver'] = Match.group(2)
for Key in self._BsfKeyList:
Match = re.match("(?:^|.+\s+)%s:{(.+?)}" % Key, Remaining)
if Match:
if Key in ['NAME', 'HELP', 'OPTION'] and Match.group(1).startswith('+'):
ConfigDict[Key.lower()] += Match.group(1)[1:]
else:
ConfigDict[Key.lower()] = Match.group(1)
else:
for Key in self._HdrKeyList:
Match = re.match("(?:^|.+\s+)%s:{(.+?)}" % Key, Remaining)
if Match:
ConfigDict[Key.lower()] = Match.group(1)
Match = re.match("^\s*#\s+@Prompt\s+(.+)", DscLine)
if Match:
ConfigDict['name'] = Match.group(1)
Match = re.match("^\s*#\s*@ValidList\s*(.+)\s*\|\s*(.+)\s*\|\s*(.+)\s*", DscLine)
if Match:
if Match.group(2).strip() in self._BuidinOption:
ConfigDict['option'] = Match.group(2).strip()
else:
OptionValueList = Match.group(2).split(',')
OptionStringList = Match.group(3).split(',')
Index = 0
for Option in OptionValueList:
Option = Option.strip()
ConfigDict['option'] = ConfigDict['option'] + str(Option) + ':' + OptionStringList[Index].strip()
Index += 1
if Index in range(len(OptionValueList)):
ConfigDict['option'] += ', '
ConfigDict['type'] = "Combo"
Match = re.match("^\s*#\s*@ValidRange\s*(.+)\s*\|\s*(.+)\s*-\s*(.+)\s*", DscLine)
if Match:
if "0x" in Match.group(2) or "0x" in Match.group(3):
ConfigDict['type'] = "EditNum, HEX, (%s,%s)" % (Match.group(2), Match.group(3))
else:
ConfigDict['type'] = "EditNum, DEC, (%s,%s)" % (Match.group(2), Match.group(3))
Match = re.match("^\s*##\s+(.+)", DscLine)
if Match:
ConfigDict['help'] = Match.group(1)
# Check VPD/UPD
if IsUpdSect:
Match = re.match("^([_a-zA-Z0-9]+).([_a-zA-Z0-9]+)\s*\|\s*(0x[0-9A-F]+|\*)\s*\|\s*(\d+|0x[0-9a-fA-F]+)\s*\|\s*(.+)",DscLine)
else:
Match = re.match("^([_a-zA-Z0-9]+).([_a-zA-Z0-9]+)\s*\|\s*(0x[0-9A-F]+)(?:\s*\|\s*(.+))?", DscLine)
if Match:
ConfigDict['space'] = Match.group(1)
ConfigDict['cname'] = Match.group(2)
if Match.group(3) != '*':
Hardcode = True
Offset = int (Match.group(3), 16)
else:
AutoAlign = True
if Hardcode and AutoAlign:
print("Hardcode and auto-align mixed mode is not supported by GenCfgOpt")
raise SystemExit
ConfigDict['offset'] = Offset
if ConfigDict['order'] == -1:
ConfigDict['order'] = ConfigDict['offset'] << 8
else:
(Major, Minor) = ConfigDict['order'].split('.')
ConfigDict['order'] = (int (Major, 16) << 8 ) + int (Minor, 16)
if IsUpdSect:
Value = Match.group(5).strip()
if Match.group(4).startswith("0x"):
Length = int (Match.group(4), 16)
else :
Length = int (Match.group(4))
Offset += Length
else:
Value = Match.group(4)
if Value is None:
Value = ''
Value = Value.strip()
if '|' in Value:
Match = re.match("^.+\s*\|\s*(.+)", Value)
if Match:
Value = Match.group(1)
Length = -1
ConfigDict['length'] = Length
Match = re.match("\$\((\w+)\)", Value)
if Match:
if Match.group(1) in self._MacroDict:
Value = self._MacroDict[Match.group(1)]
ConfigDict['value'] = Value
if (len(Value) > 0) and (Value[0] == '{'):
Value = self.FormatListValue(ConfigDict)
if ConfigDict['name'] == '':
# Clear BSF specific items
ConfigDict['bsfname'] = ''
ConfigDict['help'] = ''
ConfigDict['type'] = ''
ConfigDict['option'] = ''
if IsUpdSect and AutoAlign:
ItemLength = int(ConfigDict['length'])
ItemOffset = int(ConfigDict['offset'])
ItemStruct = ConfigDict['struct']
Unit = 1
if ItemLength in [1, 2, 4, 8] and not ConfigDict['value'].startswith('{'):
Unit = ItemLength
# If there are 64 bit unit, align to 64
if Unit == 8:
MaxAlign = 64
SizeAlign = 8
if ItemStruct != '':
UnitDict = {'UINT8':1, 'UINT16':2, 'UINT32':4, 'UINT64':8}
if ItemStruct in ['UINT8', 'UINT16', 'UINT32', 'UINT64']:
Unit = UnitDict[ItemStruct]
# If there are 64 bit unit, align to 64
if Unit == 8:
MaxAlign = 64
SizeAlign = max(SizeAlign, Unit)
if (ConfigDict['embed'].find(':START') != -1):
Base = ItemOffset
SubOffset = ItemOffset - Base
SubRemainder = SubOffset % Unit
if SubRemainder:
Diff = Unit - SubRemainder
Offset = Offset + Diff
ItemOffset = ItemOffset + Diff
if (ConfigDict['embed'].find(':END') != -1):
Remainder = Offset % (MaxAlign/8) # MaxAlign is either 32 or 64
if Remainder:
Diff = int((MaxAlign/8) - Remainder)
Offset = Offset + Diff
ItemOffset = ItemOffset + Diff
MaxAlign = 32 # Reset to default 32 align when struct end
if (ConfigDict['cname'] == 'UpdTerminator'):
# ItemLength is the size of UpdTerminator
# Itemlength might be 16, 32, or 64
# Struct align to 64 if UpdTerminator
# or struct size is 64 bit, else align to 32
Remainder = Offset % max(ItemLength/8, 4, SizeAlign)
Offset = Offset + ItemLength
if Remainder:
Diff = int(max(ItemLength/8, 4, SizeAlign) - Remainder)
ItemOffset = ItemOffset + Diff
ConfigDict['offset'] = ItemOffset
self._CfgItemList.append(ConfigDict.copy())
ConfigDict['name'] = ''
ConfigDict['find'] = ''
ConfigDict['struct'] = ''
ConfigDict['embed'] = ''
ConfigDict['comment'] = ''
ConfigDict['order'] = -1
ConfigDict['subreg'] = []
ConfigDict['option'] = ''
else:
# It could be a virtual item as below
# !BSF FIELD:{SerialDebugPortAddress0:1}
# or
# @Bsf FIELD:{SerialDebugPortAddress0:1b}
Match = re.match("^\s*#\s+(!BSF|@Bsf)\s+FIELD:{(.+):(\d+)([Bb])?}", DscLine)
if Match:
SubCfgDict = ConfigDict.copy()
if (Match.group(4) == None) or (Match.group(4) == 'B'):
UnitBitLen = 8
elif Match.group(4) == 'b':
UnitBitLen = 1
else:
print("ERROR: Invalide BSF FIELD length for line '%s'" % DscLine)
raise SystemExit
SubCfgDict['cname'] = Match.group(2)
SubCfgDict['bitlength'] = int (Match.group(3)) * UnitBitLen
if SubCfgDict['bitlength'] > 0:
LastItem = self._CfgItemList[-1]
if len(LastItem['subreg']) == 0:
SubOffset = 0
else:
SubOffset = LastItem['subreg'][-1]['bitoffset'] + LastItem['subreg'][-1]['bitlength']
SubCfgDict['bitoffset'] = SubOffset
LastItem['subreg'].append (SubCfgDict.copy())
ConfigDict['name'] = ''
return Error
def GetBsfBitFields (self, subitem, bytes):
start = subitem['bitoffset']
end = start + subitem['bitlength']
bitsvalue = ''.join('{0:08b}'.format(i) for i in bytes[::-1])
bitsvalue = bitsvalue[::-1]
bitslen = len(bitsvalue)
if start > bitslen or end > bitslen:
print ("Invalid bits offset [%d,%d] for %s" % (start, end, subitem['name']))
raise SystemExit
return hex(int(bitsvalue[start:end][::-1], 2))
def UpdateSubRegionDefaultValue (self):
Error = 0
for Item in self._CfgItemList:
if len(Item['subreg']) == 0:
continue
bytearray = []
if Item['value'][0] == '{':
binlist = Item['value'][1:-1].split(',')
for each in binlist:
each = each.strip()
if each.startswith('0x'):
value = int(each, 16)
else:
value = int(each)
bytearray.append(value)
else:
if Item['value'].startswith('0x'):
value = int(Item['value'], 16)
else:
value = int(Item['value'])
idx = 0
while idx < Item['length']:
bytearray.append(value & 0xFF)
value = value >> 8
idx = idx + 1
for SubItem in Item['subreg']:
valuestr = self.GetBsfBitFields(SubItem, bytearray)
SubItem['value'] = valuestr
return Error
def CreateSplitUpdTxt (self, UpdTxtFile):
GuidList = ['FSP_T_UPD_TOOL_GUID','FSP_M_UPD_TOOL_GUID','FSP_S_UPD_TOOL_GUID']
SignatureList = ['0x545F', '0x4D5F','0x535F'] # _T, _M, and _S signature for FSPT, FSPM, FSPS
for Index in range(len(GuidList)):
UpdTxtFile = ''
FvDir = self._FvDir
if GuidList[Index] not in self._MacroDict:
self.Error = "%s definition is missing in DSC file" % (GuidList[Index])
return 1
if UpdTxtFile == '':
UpdTxtFile = os.path.join(FvDir, self._MacroDict[GuidList[Index]] + '.txt')
ReCreate = False
if not os.path.exists(UpdTxtFile):
ReCreate = True
else:
DscTime = os.path.getmtime(self._DscFile)
TxtTime = os.path.getmtime(UpdTxtFile)
if DscTime > TxtTime:
ReCreate = True
if not ReCreate:
# DSC has not been modified yet
# So don't have to re-generate other files
self.Error = 'No DSC file change, skip to create UPD TXT file'
return 256
TxtFd = open(UpdTxtFile, "w")
TxtFd.write("%s\n" % (__copyright_txt__ % date.today().year))
NextOffset = 0
SpaceIdx = 0
StartAddr = 0
EndAddr = 0
Default = 'DEFAULT|'
InRange = False
for Item in self._CfgItemList:
if Item['cname'] == 'Signature' and str(Item['value'])[0:6] == SignatureList[Index]:
StartAddr = Item['offset']
NextOffset = StartAddr
InRange = True
if Item['cname'] == 'UpdTerminator' and InRange == True:
EndAddr = Item['offset']
InRange = False
InRange = False
for Item in self._CfgItemList:
if Item['cname'] == 'Signature' and str(Item['value'])[0:6] == SignatureList[Index]:
InRange = True
if InRange != True:
continue
if Item['cname'] == 'UpdTerminator':
InRange = False
if Item['region'] != 'UPD':
continue
Offset = Item['offset']
if StartAddr > Offset or EndAddr < Offset:
continue
if NextOffset < Offset:
# insert one line
TxtFd.write("%s.UnusedUpdSpace%d|%s0x%04X|0x%04X|{0}\n" % (Item['space'], SpaceIdx, Default, NextOffset - StartAddr, Offset - NextOffset))
SpaceIdx = SpaceIdx + 1
NextOffset = Offset + Item['length']
TxtFd.write("%s.%s|%s0x%04X|%s|%s\n" % (Item['space'],Item['cname'],Default,Item['offset'] - StartAddr,Item['length'],Item['value']))
TxtFd.close()
return 0
def ProcessMultilines (self, String, MaxCharLength):
Multilines = ''
StringLength = len(String)
CurrentStringStart = 0
StringOffset = 0
BreakLineDict = []
if len(String) <= MaxCharLength:
while (StringOffset < StringLength):
if StringOffset >= 1:
if String[StringOffset - 1] == '\\' and String[StringOffset] == 'n':
BreakLineDict.append (StringOffset + 1)
StringOffset += 1
if BreakLineDict != []:
for Each in BreakLineDict:
Multilines += " %s\n" % String[CurrentStringStart:Each].lstrip()
CurrentStringStart = Each
if StringLength - CurrentStringStart > 0:
Multilines += " %s\n" % String[CurrentStringStart:].lstrip()
else:
Multilines = " %s\n" % String
else:
NewLineStart = 0
NewLineCount = 0
FoundSpaceChar = False
while (StringOffset < StringLength):
if StringOffset >= 1:
if NewLineCount >= MaxCharLength - 1:
if String[StringOffset] == ' ' and StringLength - StringOffset > 10:
BreakLineDict.append (NewLineStart + NewLineCount)
NewLineStart = NewLineStart + NewLineCount
NewLineCount = 0
FoundSpaceChar = True
elif StringOffset == StringLength - 1 and FoundSpaceChar == False:
BreakLineDict.append (0)
if String[StringOffset - 1] == '\\' and String[StringOffset] == 'n':
BreakLineDict.append (StringOffset + 1)
NewLineStart = StringOffset + 1
NewLineCount = 0
StringOffset += 1
NewLineCount += 1
if BreakLineDict != []:
BreakLineDict.sort ()
for Each in BreakLineDict:
if Each > 0:
Multilines += " %s\n" % String[CurrentStringStart:Each].lstrip()
CurrentStringStart = Each
if StringLength - CurrentStringStart > 0:
Multilines += " %s\n" % String[CurrentStringStart:].lstrip()
return Multilines
def CreateField (self, Item, Name, Length, Offset, Struct, BsfName, Help, Option):
PosName = 28
PosComment = 30
NameLine=''
HelpLine=''
OptionLine=''
IsArray = False
if Length in [1,2,4,8]:
Type = "UINT%d" % (Length * 8)
if Name.startswith("UnusedUpdSpace") and Length != 1:
IsArray = True
Type = "UINT8"
else:
IsArray = True
Type = "UINT8"
if Item and Item['value'].startswith('{'):
Type = "UINT8"
IsArray = True
if Struct != '':
Type = Struct
if Struct in ['UINT8','UINT16','UINT32','UINT64']:
IsArray = True
Unit = int(Type[4:]) / 8
Length = Length / Unit
else:
IsArray = False
if IsArray:
Name = Name + '[%d]' % Length
if len(Type) < PosName:
Space1 = PosName - len(Type)
else:
Space1 = 1
if BsfName != '':
NameLine=" - %s\n" % BsfName
else:
NameLine="\n"
if Help != '':
HelpLine = self.ProcessMultilines (Help, 80)
if Option != '':
OptionLine = self.ProcessMultilines (Option, 80)
if Offset is None:
OffsetStr = '????'
else:
OffsetStr = '0x%04X' % Offset
return "\n/** Offset %s%s%s%s**/\n %s%s%s;\n" % (OffsetStr, NameLine, HelpLine, OptionLine, Type, ' ' * Space1, Name,)
def PostProcessBody (self, TextBody):
NewTextBody = []
OldTextBody = []
IncludeLine = False
StructName = ''
VariableName = ''
IsUpdHdrDefined = False
IsUpdHeader = False
for Line in TextBody:
SplitToLines = Line.splitlines()
MatchComment = re.match("^/\*\sCOMMENT:(\w+):([\w|\W|\s]+)\s\*/\s([\s\S]*)", SplitToLines[0])
if MatchComment:
if MatchComment.group(1) == 'FSP_UPD_HEADER':
IsUpdHeader = True
else:
IsUpdHeader = False
if IsUpdHdrDefined != True or IsUpdHeader != True:
CommentLine = " " + MatchComment.group(2) + "\n"
NewTextBody.append("/**" + CommentLine + "**/\n")
Line = Line[(len(SplitToLines[0]) + 1):]
Match = re.match("^/\*\sEMBED_STRUCT:(\w+):(\w+):(START|END)\s\*/\s([\s\S]*)", Line)
if Match:
Line = Match.group(4)
if Match.group(1) == 'FSP_UPD_HEADER':
IsUpdHeader = True
else:
IsUpdHeader = False
if Match and Match.group(3) == 'START':
if IsUpdHdrDefined != True or IsUpdHeader != True:
NewTextBody.append ('typedef struct {\n')
StructName = Match.group(1)
VariableName = Match.group(2)
MatchOffset = re.search('/\*\*\sOffset\s0x([a-fA-F0-9]+)', Line)
if MatchOffset:
Offset = int(MatchOffset.group(1), 16)
else:
Offset = None
Line
IncludeLine = True
OldTextBody.append (self.CreateField (None, VariableName, 0, Offset, StructName, '', '', ''))
if IncludeLine:
if IsUpdHdrDefined != True or IsUpdHeader != True:
NewTextBody.append (Line)
else:
OldTextBody.append (Line)
if Match and Match.group(3) == 'END':
if (StructName != Match.group(1)) or (VariableName != Match.group(2)):
print ("Unmatched struct name '%s' and '%s' !" % (StructName, Match.group(1)))
else:
if IsUpdHdrDefined != True or IsUpdHeader != True:
NewTextBody.append ('} %s;\n\n' % StructName)
IsUpdHdrDefined = True
IncludeLine = False
NewTextBody.extend(OldTextBody)
return NewTextBody
def WriteLinesWithoutTailingSpace (self, HeaderFd, Line):
TxtBody2 = Line.splitlines(True)
for Line2 in TxtBody2:
Line2 = Line2.rstrip()
Line2 += '\n'
HeaderFd.write (Line2)
return 0
def CreateHeaderFile (self, InputHeaderFile):
FvDir = self._FvDir
HeaderFileName = 'FspUpd.h'
HeaderFile = os.path.join(FvDir, HeaderFileName)
# Check if header needs to be recreated
ReCreate = False
TxtBody = []
for Item in self._CfgItemList:
if str(Item['cname']) == 'Signature' and Item['length'] == 8:
Value = int(Item['value'], 16)
Chars = []
while Value != 0x0:
Chars.append(chr(Value & 0xFF))
Value = Value >> 8
SignatureStr = ''.join(Chars)
# Signature will be _T / _M / _S for FSPT / FSPM / FSPS accordingly
if '_T' in SignatureStr[6:6+2]:
TxtBody.append("#define FSPT_UPD_SIGNATURE %s /* '%s' */\n\n" % (Item['value'], SignatureStr))
elif '_M' in SignatureStr[6:6+2]:
TxtBody.append("#define FSPM_UPD_SIGNATURE %s /* '%s' */\n\n" % (Item['value'], SignatureStr))
elif '_S' in SignatureStr[6:6+2]:
TxtBody.append("#define FSPS_UPD_SIGNATURE %s /* '%s' */\n\n" % (Item['value'], SignatureStr))
TxtBody.append("\n")
for Region in ['UPD']:
UpdOffsetTable = []
UpdSignature = ['0x545F', '0x4D5F', '0x535F'] #['_T', '_M', '_S'] signature for FSPT, FSPM, FSPS
UpdStructure = ['FSPT_UPD', 'FSPM_UPD', 'FSPS_UPD']
for Item in self._CfgItemList:
if Item["cname"] == 'Signature' and Item["value"][0:6] in UpdSignature:
UpdOffsetTable.append (Item["offset"])
for UpdIdx in range(len(UpdOffsetTable)):
CommentLine = ""
for Item in self._CfgItemList:
if Item["comment"] != '' and Item["offset"] >= UpdOffsetTable[UpdIdx]:
MatchComment = re.match("^(U|V)PD_DATA_REGION:([\w|\W|\s]+)", Item["comment"])
if MatchComment and MatchComment.group(1) == Region[0]:
CommentLine = " " + MatchComment.group(2) + "\n"
TxtBody.append("/**" + CommentLine + "**/\n")
elif Item["offset"] >= UpdOffsetTable[UpdIdx] and Item["comment"] == '':
Match = re.match("^FSP([\w|\W|\s])_UPD", UpdStructure[UpdIdx])
if Match:
TxtBody.append("/** Fsp " + Match.group(1) + " UPD Configuration\n**/\n")
TxtBody.append("typedef struct {\n")
NextOffset = 0
SpaceIdx = 0
Offset = 0
LastVisible = True
ResvOffset = 0
ResvIdx = 0
LineBuffer = []
InRange = False
for Item in self._CfgItemList:
if Item['cname'] == 'Signature' and str(Item['value'])[0:6] == UpdSignature[UpdIdx] or Region[0] == 'V':
InRange = True
if InRange != True:
continue
if Item['cname'] == 'UpdTerminator':
InRange = False
if Item['region'] != Region:
continue
if Item["offset"] < UpdOffsetTable[UpdIdx]:
continue
NextVisible = LastVisible
if LastVisible and (Item['header'] == 'OFF'):
NextVisible = False
ResvOffset = Item['offset']
elif (not LastVisible) and Item['header'] == 'ON':
NextVisible = True
Name = "Reserved" + Region[0] + "pdSpace%d" % ResvIdx
ResvIdx = ResvIdx + 1
TxtBody.append(self.CreateField (Item, Name, Item["offset"] - ResvOffset, ResvOffset, '', '', '', ''))
if Offset < Item["offset"]:
if LastVisible:
Name = "Unused" + Region[0] + "pdSpace%d" % SpaceIdx
LineBuffer.append(self.CreateField (Item, Name, Item["offset"] - Offset, Offset, '', '', '', ''))
SpaceIdx = SpaceIdx + 1
Offset = Item["offset"]
LastVisible = NextVisible
Offset = Offset + Item["length"]
if LastVisible:
for Each in LineBuffer:
TxtBody.append (Each)
LineBuffer = []
Comment = Item["comment"]
Embed = Item["embed"].upper()
if Embed.endswith(':START') or Embed.endswith(':END'):
if not Comment == '' and Embed.endswith(':START'):
Marker = '/* COMMENT:%s */ \n' % Item["comment"]
Marker = Marker + '/* EMBED_STRUCT:%s */ ' % Item["embed"]
else:
Marker = '/* EMBED_STRUCT:%s */ ' % Item["embed"]
else:
if Embed == '':
Marker = ''
else:
self.Error = "Invalid embedded structure format '%s'!\n" % Item["embed"]
return 4
Line = Marker + self.CreateField (Item, Item["cname"], Item["length"], Item["offset"], Item['struct'], Item['name'], Item['help'], Item['option'])
TxtBody.append(Line)
if Item['cname'] == 'UpdTerminator':
break
TxtBody.append("} " + UpdStructure[UpdIdx] + ";\n\n")
# Handle the embedded data structure
TxtBody = self.PostProcessBody (TxtBody)
HeaderTFileName = 'FsptUpd.h'
HeaderMFileName = 'FspmUpd.h'
HeaderSFileName = 'FspsUpd.h'
UpdRegionCheck = ['FSPT', 'FSPM', 'FSPS'] # FSPX_UPD_REGION
UpdConfigCheck = ['FSP_T', 'FSP_M', 'FSP_S'] # FSP_X_CONFIG, FSP_X_TEST_CONFIG, FSP_X_RESTRICTED_CONFIG
UpdSignatureCheck = ['FSPT_UPD_SIGNATURE', 'FSPM_UPD_SIGNATURE', 'FSPS_UPD_SIGNATURE']
ExcludedSpecificUpd = ['FSPT_ARCH_UPD', 'FSPM_ARCH_UPD', 'FSPS_ARCH_UPD']
IncLines = []
if InputHeaderFile != '':
if not os.path.exists(InputHeaderFile):
self.Error = "Input header file '%s' does not exist" % InputHeaderFile
return 6
InFd = open(InputHeaderFile, "r")
IncLines = InFd.readlines()
InFd.close()
for item in range(len(UpdRegionCheck)):
if UpdRegionCheck[item] == 'FSPT':
HeaderFd = open(os.path.join(FvDir, HeaderTFileName), "w")
FileBase = os.path.basename(os.path.join(FvDir, HeaderTFileName))
elif UpdRegionCheck[item] == 'FSPM':
HeaderFd = open(os.path.join(FvDir, HeaderMFileName), "w")
FileBase = os.path.basename(os.path.join(FvDir, HeaderMFileName))
elif UpdRegionCheck[item] == 'FSPS':
HeaderFd = open(os.path.join(FvDir, HeaderSFileName), "w")
FileBase = os.path.basename(os.path.join(FvDir, HeaderSFileName))
FileName = FileBase.replace(".", "_").upper()
HeaderFd.write("%s\n" % (__copyright_h__ % date.today().year))
HeaderFd.write("#ifndef __%s__\n" % FileName)
HeaderFd.write("#define __%s__\n\n" % FileName)
HeaderFd.write("#include <%s>\n\n" % HeaderFileName)
HeaderFd.write("#pragma pack(1)\n\n")
Export = False
for Line in IncLines:
Match = re.search ("!EXPORT\s+([A-Z]+)\s+EXTERNAL_BOOTLOADER_STRUCT_(BEGIN|END)\s+", Line)
if Match:
if Match.group(2) == "BEGIN" and Match.group(1) == UpdRegionCheck[item]:
Export = True
continue
else:
Export = False
continue
if Export:
HeaderFd.write(Line)
HeaderFd.write("\n")
Index = 0
StartIndex = 0
EndIndex = 0
StructStart = []
StructStartWithComment = []
StructEnd = []
for Line in TxtBody:
Index += 1
Match = re.match("(typedef struct {)", Line)
if Match:
StartIndex = Index - 1
Match = re.match("}\s([_A-Z0-9]+);", Line)
if Match and (UpdRegionCheck[item] in Match.group(1) or UpdConfigCheck[item] in Match.group(1)) and (ExcludedSpecificUpd[item] not in Match.group(1)):
EndIndex = Index
StructStart.append(StartIndex)
StructEnd.append(EndIndex)
Index = 0
for Line in TxtBody:
Index += 1
for Item in range(len(StructStart)):
if Index == StructStart[Item]:
Match = re.match("^(/\*\*\s*)", Line)
if Match:
StructStartWithComment.append(StructStart[Item])
else:
StructStartWithComment.append(StructStart[Item] + 1)
Index = 0
for Line in TxtBody:
Index += 1
for Item in range(len(StructStart)):
if Index >= StructStartWithComment[Item] and Index <= StructEnd[Item]:
self.WriteLinesWithoutTailingSpace(HeaderFd, Line)
HeaderFd.write("#pragma pack()\n\n")
HeaderFd.write("#endif\n")
HeaderFd.close()
HeaderFd = open(HeaderFile, "w")
FileBase = os.path.basename(HeaderFile)
FileName = FileBase.replace(".", "_").upper()
HeaderFd.write("%s\n" % (__copyright_h__ % date.today().year))
HeaderFd.write("#ifndef __%s__\n" % FileName)
HeaderFd.write("#define __%s__\n\n" % FileName)
HeaderFd.write("#include <FspEas.h>\n\n")
HeaderFd.write("#pragma pack(1)\n\n")
for item in range(len(UpdRegionCheck)):
Index = 0
StartIndex = 0
EndIndex = 0
StructStart = []
StructStartWithComment = []
StructEnd = []
for Line in TxtBody:
Index += 1
Match = re.match("(typedef struct {)", Line)
if Match:
StartIndex = Index - 1
Match = re.match("#define\s([_A-Z0-9]+)\s*", Line)
if Match and (UpdSignatureCheck[item] in Match.group(1) or UpdSignatureCheck[item] in Match.group(1)):
StructStart.append(Index - 1)
StructEnd.append(Index)
Index = 0
for Line in TxtBody:
Index += 1
for Item in range(len(StructStart)):
if Index == StructStart[Item]:
Match = re.match("^(/\*\*\s*)", Line)
if Match:
StructStartWithComment.append(StructStart[Item])
else:
StructStartWithComment.append(StructStart[Item] + 1)
Index = 0
for Line in TxtBody:
Index += 1
for Item in range(len(StructStart)):
if Index >= StructStartWithComment[Item] and Index <= StructEnd[Item]:
self.WriteLinesWithoutTailingSpace(HeaderFd, Line)
HeaderFd.write("#pragma pack()\n\n")
HeaderFd.write("#endif\n")
HeaderFd.close()
return 0
def WriteBsfStruct (self, BsfFd, Item):
LogExpr = CLogicalExpression()
if Item['type'] == "None":
Space = "gPlatformFspPkgTokenSpaceGuid"
else:
Space = Item['space']
Line = " $%s_%s" % (Space, Item['cname'])
Match = re.match("\s*\{([x0-9a-fA-F,\s]+)\}\s*", Item['value'])
if Match:
DefaultValue = Match.group(1).strip()
else:
DefaultValue = Item['value'].strip()
if 'bitlength' in Item:
BsfFd.write(" %s%s%4d bits $_DEFAULT_ = %s\n" % (Line, ' ' * (64 - len(Line)), Item['bitlength'], DefaultValue))
else:
BsfFd.write(" %s%s%4d bytes $_DEFAULT_ = %s\n" % (Line, ' ' * (64 - len(Line)), Item['length'], DefaultValue))
TmpList = []
if Item['type'] == "Combo":
if not Item['option'] in self._BuidinOption:
OptList = Item['option'].split(',')
for Option in OptList:
Option = Option.strip()
(OpVal, OpStr) = Option.split(':')
test = LogExpr.getNumber (OpVal)
if test is None:
raise Exception("Selection Index '%s' is not a number" % OpVal)
TmpList.append((OpVal, OpStr))
return TmpList
def WriteBsfOption (self, BsfFd, Item):
PcdName = Item['space'] + '_' + Item['cname']
WriteHelp = 0
if Item['type'] == "Combo":
if Item['option'] in self._BuidinOption:
Options = self._BuidinOption[Item['option']]
else:
Options = PcdName
BsfFd.write(' %s $%s, "%s", &%s,\n' % (Item['type'], PcdName, Item['name'], Options))
WriteHelp = 1
elif Item['type'].startswith("EditNum"):
Match = re.match("EditNum\s*,\s*(HEX|DEC)\s*,\s*\((\d+|0x[0-9A-Fa-f]+)\s*,\s*(\d+|0x[0-9A-Fa-f]+)\)", Item['type'])
if Match:
BsfFd.write(' EditNum $%s, "%s", %s,\n' % (PcdName, Item['name'], Match.group(1)))
WriteHelp = 2
elif Item['type'].startswith("EditText"):
BsfFd.write(' %s $%s, "%s",\n' % (Item['type'], PcdName, Item['name']))
WriteHelp = 1
elif Item['type'] == "Table":
Columns = Item['option'].split(',')
if len(Columns) != 0:
BsfFd.write(' %s $%s "%s",' % (Item['type'], PcdName, Item['name']))
for Col in Columns:
Fmt = Col.split(':')
if len(Fmt) != 3:
raise Exception("Column format '%s' is invalid !" % Fmt)
try:
Dtype = int(Fmt[1].strip())
except:
raise Exception("Column size '%s' is invalid !" % Fmt[1])
BsfFd.write('\n Column "%s", %d bytes, %s' % (Fmt[0].strip(), Dtype, Fmt[2].strip()))
BsfFd.write(',\n')
WriteHelp = 1
if WriteHelp > 0:
HelpLines = Item['help'].split('\\n\\r')
FirstLine = True
for HelpLine in HelpLines:
if FirstLine:
FirstLine = False
BsfFd.write(' Help "%s"\n' % (HelpLine))
else:
BsfFd.write(' "%s"\n' % (HelpLine))
if WriteHelp == 2:
BsfFd.write(' "Valid range: %s ~ %s"\n' % (Match.group(2), Match.group(3)))
def GenerateBsfFile (self, BsfFile):
if BsfFile == '':
self.Error = "BSF output file '%s' is invalid" % BsfFile
return 1
Error = 0
OptionDict = {}
BsfFd = open(BsfFile, "w")
BsfFd.write("%s\n" % (__copyright_bsf__ % date.today().year))
BsfFd.write("%s\n" % self._GlobalDataDef)
BsfFd.write("StructDef\n")
NextOffset = -1
for Item in self._CfgItemList:
if Item['find'] != '':
BsfFd.write('\n Find "%s"\n' % Item['find'])
NextOffset = Item['offset'] + Item['length']
if Item['name'] != '':
if NextOffset != Item['offset']:
BsfFd.write(" Skip %d bytes\n" % (Item['offset'] - NextOffset))
if len(Item['subreg']) > 0:
NextOffset = Item['offset']
BitsOffset = NextOffset * 8
for SubItem in Item['subreg']:
BitsOffset += SubItem['bitlength']
if SubItem['name'] == '':
if 'bitlength' in SubItem:
BsfFd.write(" Skip %d bits\n" % (SubItem['bitlength']))
else:
BsfFd.write(" Skip %d bytes\n" % (SubItem['length']))
else:
Options = self.WriteBsfStruct(BsfFd, SubItem)
if len(Options) > 0:
OptionDict[SubItem['space']+'_'+SubItem['cname']] = Options
NextBitsOffset = (Item['offset'] + Item['length']) * 8
if NextBitsOffset > BitsOffset:
BitsGap = NextBitsOffset - BitsOffset
BitsRemain = BitsGap % 8
if BitsRemain:
BsfFd.write(" Skip %d bits\n" % BitsRemain)
BitsGap -= BitsRemain
BytesRemain = int(BitsGap / 8)
if BytesRemain:
BsfFd.write(" Skip %d bytes\n" % BytesRemain)
NextOffset = Item['offset'] + Item['length']
else:
NextOffset = Item['offset'] + Item['length']
Options = self.WriteBsfStruct(BsfFd, Item)
if len(Options) > 0:
OptionDict[Item['space']+'_'+Item['cname']] = Options
BsfFd.write("\nEndStruct\n\n")
BsfFd.write("%s" % self._BuidinOptionTxt)
for Each in OptionDict:
BsfFd.write("List &%s\n" % Each)
for Item in OptionDict[Each]:
BsfFd.write(' Selection %s , "%s"\n' % (Item[0], Item[1]))
BsfFd.write("EndList\n\n")
BsfFd.write("BeginInfoBlock\n")
BsfFd.write(' PPVer "%s"\n' % (self._CfgBlkDict['ver']))
BsfFd.write(' Description "%s"\n' % (self._CfgBlkDict['name']))
BsfFd.write("EndInfoBlock\n\n")
for Each in self._CfgPageDict:
BsfFd.write('Page "%s"\n' % self._CfgPageDict[Each])
BsfItems = []
for Item in self._CfgItemList:
if Item['name'] != '':
if Item['page'] != Each:
continue
if len(Item['subreg']) > 0:
for SubItem in Item['subreg']:
if SubItem['name'] != '':
BsfItems.append(SubItem)
else:
BsfItems.append(Item)
BsfItems.sort(key=lambda x: x['order'])
for Item in BsfItems:
self.WriteBsfOption (BsfFd, Item)
BsfFd.write("EndPage\n\n")
BsfFd.close()
return Error
def Usage():
print ("GenCfgOpt Version 0.55")
print ("Usage:")
print (" GenCfgOpt UPDTXT PlatformDscFile BuildFvDir [-D Macros]")
print (" GenCfgOpt HEADER PlatformDscFile BuildFvDir InputHFile [-D Macros]")
print (" GenCfgOpt GENBSF PlatformDscFile BuildFvDir BsfOutFile [-D Macros]")
def Main():
#
# Parse the options and args
#
i = 1
GenCfgOpt = CGenCfgOpt()
while i < len(sys.argv):
if sys.argv[i].strip().lower() == "--pcd":
BuildOptionPcd.append(sys.argv[i+1])
i += 1
i += 1
argc = len(sys.argv)
if argc < 4:
Usage()
return 1
else:
DscFile = sys.argv[2]
if not os.path.exists(DscFile):
print ("ERROR: Cannot open DSC file '%s' !" % DscFile)
return 2
OutFile = ''
if argc > 4:
if sys.argv[4][0] == '-':
Start = 4
else:
OutFile = sys.argv[4]
Start = 5
if argc > Start:
if GenCfgOpt.ParseMacros(sys.argv[Start:]) != 0:
print ("ERROR: Macro parsing failed !")
return 3
FvDir = sys.argv[3]
if not os.path.exists(FvDir):
os.makedirs(FvDir)
if GenCfgOpt.ParseDscFile(DscFile, FvDir) != 0:
print ("ERROR: %s !" % GenCfgOpt.Error)
return 5
if GenCfgOpt.UpdateSubRegionDefaultValue() != 0:
print ("ERROR: %s !" % GenCfgOpt.Error)
return 7
if sys.argv[1] == "UPDTXT":
Ret = GenCfgOpt.CreateSplitUpdTxt(OutFile)
if Ret != 0:
# No change is detected
if Ret == 256:
print ("INFO: %s !" % (GenCfgOpt.Error))
else :
print ("ERROR: %s !" % (GenCfgOpt.Error))
return Ret
elif sys.argv[1] == "HEADER":
if GenCfgOpt.CreateHeaderFile(OutFile) != 0:
print ("ERROR: %s !" % GenCfgOpt.Error)
return 8
elif sys.argv[1] == "GENBSF":
if GenCfgOpt.GenerateBsfFile(OutFile) != 0:
print ("ERROR: %s !" % GenCfgOpt.Error)
return 9
else:
if argc < 5:
Usage()
return 1
print ("ERROR: Unknown command '%s' !" % sys.argv[1])
Usage()
return 1
return 0
return 0
if __name__ == '__main__':
sys.exit(Main())
```
|
{
"source": "jeremy-compostella/home-manager",
"score": 2
}
|
#### File: home-manager/src/car_charger.py
```python
import os
import sys
from datetime import datetime, timedelta
from select import select
from time import sleep
import Pyro5
import requests
from cachetools import TTLCache
from wallbox import Wallbox
from car_sensor import CarSensorProxy
from power_sensor import RecordScale
from scheduler import Priority, SchedulerProxy, Task
from sensor import SensorReader
from tools import NameServer, Settings, debug, init, log_exception
from watchdog import WatchdogProxy
DEFAULT_SETTINGS = {'power_sensor_key': 'EV',
'min_available_current': 6,
'cycle_length': 15}
MODULE_NAME = 'car_charger'
class CarCharger(Task):
'''Wallbox car charger Task.
This task handles a Wallbox car charger and automatically adjusts the
charge rate based on produced power availability.
'''
FULLY_CHARGED = 'Connected: waiting for car demand'
PLUGGED_IN = ['Charging', FULLY_CHARGED,
'Connected: waiting for next schedule',
'Paused by user']
def __init__(self, wallbox: Wallbox, charger_id: int, settings: Settings):
Task.__init__(self, Priority.LOW, keys=[settings.power_sensor_key],
auto_adjust=True)
self.wallbox = wallbox
self.charger_id = charger_id
self.settings = settings
self.cache = TTLCache(1, timedelta(seconds=3), datetime.now)
self.state_of_charge = None
def __call(self, name, *args):
for _ in range(3):
try:
method = getattr(self.wallbox, name)
return method(self.charger_id, *args)
except requests.exceptions.HTTPError:
log_exception('%s%s failed' % (name, args), *sys.exc_info())
self.wallbox.authenticate()
except requests.exceptions.ReadTimeout:
log_exception('%s%s failed' % (name, args), *sys.exc_info())
sleep(0.5)
raise RuntimeError('%s%s failed too many times' % (name, args))
@property
def status(self):
'''JSON representation of the charger status.'''
try:
return self.cache['status']
except KeyError:
self.cache['status'] = self.__call('getChargerStatus')
return self.cache['status']
@Pyro5.api.expose
@Pyro5.api.oneway
def start(self):
debug('Starting')
self.__call('resumeChargingSession')
self.cache.clear()
@Pyro5.api.expose
@Pyro5.api.oneway
def stop(self):
debug('Stopping')
self.__call('pauseChargingSession')
self.__call('setMaxChargingCurrent', self.min_available_current)
self.cache.clear()
@property
def status_description(self):
'''String describing the charger status.'''
return self.status['status_description']
@property
def min_available_current(self):
'''Minimum current supported by the charger in Ampere.'''
return self.settings.min_available_current
@property
def max_available_current(self):
'''Maximal current supported by the charger in Ampere.'''
return self.status['config_data']['max_available_current']
@Pyro5.api.expose
def is_running(self) -> bool:
return self.status_description == 'Charging'
@Pyro5.api.expose
def is_stoppable(self):
return True
@Pyro5.api.expose
def is_runnable(self):
'''True if calling the 'start' function would initiate charging.'''
return self.status_description in self.PLUGGED_IN \
and self.status_description != self.FULLY_CHARGED
@Pyro5.api.expose
def meet_running_criteria(self, ratio, power=0) -> bool:
debug('meet_running_criteria(%.3f, %.3f)' % (ratio, power))
if not self.is_runnable():
return False
if self.is_running():
return ratio >= 0.8
return ratio >= 1
@property
@Pyro5.api.expose
def desc(self):
description = '%s(%s' % (self.__class__.__name__, self.priority.name)
if self.state_of_charge is not None:
description += ', %.1f%%' % self.state_of_charge
return description + ')'
@property
@Pyro5.api.expose
def power(self):
return self.min_available_current * .24
def adjust_priority(self, state_of_charge):
'''Update the priority according to the current state of charge'''
self.state_of_charge = state_of_charge
thresholds = {Priority.URGENT: 50, Priority.HIGH: 65,
Priority.MEDIUM: 80, Priority.LOW: 101}
for priority in reversed(Priority):
if state_of_charge < thresholds[priority]:
self.priority = priority
break
def current_rate_for(self, power):
'''Return the appropriate current in Ampere for POWER in KWh.'''
rate = max(int(power / .24), self.min_available_current)
return min(rate, self.max_available_current)
def adjust_charge_rate(self, record):
'''Adjust the charging rate according to the instant POWER record.'''
available = -(record['net'] - self.usage(record))
current = self.current_rate_for(available)
if self.status['config_data']['max_charging_current'] != current:
debug('Adjusting to %dA (%.2f KWh)' % (current, available))
self.__call('setMaxChargingCurrent', current)
def main():
'''Register and run the car charger task.'''
# pylint: disable=too-many-locals
base = os.path.splitext(__file__)[0]
config = init(base + '.log')['Wallbox']
settings = Settings(base + '.ini', DEFAULT_SETTINGS)
wallbox = Wallbox(config['login'], config['password'],
requestGetTimeout=5)
wallbox.authenticate()
device_id = int(config['device_id'])
if device_id not in wallbox.getChargersList():
raise RuntimeError('%d charger ID does not exist' % device_id)
task = CarCharger(wallbox, device_id, settings)
Pyro5.config.COMMTIMEOUT = 5
daemon = Pyro5.api.Daemon()
nameserver = NameServer()
uri = daemon.register(task)
nameserver.register_task(MODULE_NAME, uri)
sensor = CarSensorProxy()
power_sensor = SensorReader('power')
power_simulator = SensorReader('power_simulator')
scheduler = SchedulerProxy()
watchdog = WatchdogProxy()
debug("... is now ready to run")
while True:
settings.load()
watchdog.register(os.getpid(), MODULE_NAME)
watchdog.kick(os.getpid())
try:
nameserver.register_task(MODULE_NAME, uri)
except RuntimeError:
log_exception('Failed to register the sensor',
*sys.exc_info())
# Self-testing: on basic operation failure unregister from the
# scheduler.
try:
task.status_description # pylint: disable=pointless-statement
scheduler.register_task(uri)
except RuntimeError:
debug('Self-test failed, unregister from the scheduler')
scheduler.unregister_task(uri)
next_cycle = datetime.now() + timedelta(
# pylint: disable=maybe-no-member
seconds=settings.cycle_length)
while True:
timeout = next_cycle - datetime.now()
sockets, _, _ = select(daemon.sockets, [], [],
timeout.seconds
+ timeout.microseconds / 1000000)
if sockets:
daemon.events(sockets)
if datetime.now() >= next_cycle:
break
try:
task.adjust_priority(sensor.read()['state of charge'])
except RuntimeError:
debug('Could not read current state of charge')
if not task.is_running():
continue
record = power_sensor.read(scale=RecordScale.SECOND)
if not record:
debug('No new power record, use the simulator')
record = power_simulator.read(scale=RecordScale.SECOND)
if not record:
debug('Failed to get a record from the simulator')
if record:
try:
task.adjust_charge_rate(record)
except RuntimeError:
log_exception('adjust_charge_rate() failed', *sys.exc_info())
if __name__ == "__main__":
main()
```
#### File: home-manager/src/monitor.py
```python
import os
import sys
from select import select
import Pyro5.api
from sensor import Sensor
from tools import NameServer, Settings, debug, init, log_exception
DEFAULT_SETTINGS = {'max_loop_duration': 5}
MODULE_NAME = 'monitor'
class Monitor(Sensor):
def __init__(self):
self._states = {}
@Pyro5.api.expose
def track(self, name, state):
'''Update or start tracking "name" with current value "state"'''
if not isinstance(state, bool):
raise TypeError('state must be a boolean')
self._states[name] = state
@Pyro5.api.expose
def read(self, **kwargs):
return self._states
@Pyro5.api.expose
def units(self, **kwargs):
return {key:'binary' for key, _ in self._states.items()}
class MonitorProxy:
'''Helper class for monitor service users.
This class is a wrapper with exception handler of the monitor service. It
provides convenience for modules using the monitor by suppressing the
burden of locating the monitor and handling the various remote object
related errors.
'''
def __init__(self, max_attempt=2):
self._monitor = None
self.max_attempt = max_attempt
def track(self, *args):
for attempt in range(self.max_attempt):
if not self._monitor:
try:
self._monitor = NameServer().locate_service(MODULE_NAME)
except Pyro5.errors.NamingError:
if attempt == self.max_attempt - 1:
log_exception('Failed to locate the monitor',
*sys.exc_info())
except Pyro5.errors.CommunicationError:
if attempt == self.max_attempt - 1:
log_exception('Cannot communicate with the nameserver',
*sys.exc_info())
if self._monitor:
try:
self._monitor.track(*args)
except Pyro5.errors.PyroError:
if attempt == self.max_attempt - 1:
log_exception('Communication failed with the monitor',
*sys.exc_info())
self._monitor = None
def main():
# pylint: disable=too-many-locals
base = os.path.splitext(__file__)[0]
init(base + '.log')
settings = Settings(base + '.ini', DEFAULT_SETTINGS)
Pyro5.config.MAX_RETRIES = 3
daemon = Pyro5.api.Daemon()
nameserver = NameServer()
uri = daemon.register(Monitor())
nameserver.register_sensor(MODULE_NAME, uri)
nameserver.register_service(MODULE_NAME, uri)
debug("... is now ready to run")
while True:
try:
nameserver.register_sensor(MODULE_NAME, uri)
nameserver.register_service(MODULE_NAME, uri)
except RuntimeError:
log_exception('Failed to register the watchdog service',
*sys.exc_info())
sockets, _, _ = select(daemon.sockets, [], [],
# pylint: disable=maybe-no-member
settings.max_loop_duration)
if sockets:
daemon.events(sockets)
if __name__ == "__main__":
main()
```
|
{
"source": "jeremycook123/aws-key-disabler-2020",
"score": 2
}
|
#### File: lambda/src/RotateAccessKey.py
```python
import boto3
from datetime import datetime
import dateutil.tz
import json
import ast
import re
import time
BUILD_VERSION = '@@buildversion'
AWS_REGION = '@@deploymentregion'
AWS_ACCOUNT_NAME = '@@awsaccountname'
AWS_ACCOUNT_ID = '@@awsaccountid'
SKIP_USERNAMES = '@@skipusernames'
try:
#package.json via iam.skip_usernames
SKIP_USERNAMES = ast.literal_eval(SKIP_USERNAMES)
except:
#commandline via --skipusers parameter
SKIP_USERNAMES = SKIP_USERNAMES.split(",")
EMAIL_FROM = '@@emailfrom'
EMAIL_ADMIN_ENABLED = ast.literal_eval('@@emailadmin')
EMAIL_ADMIN_TO = '@@emailadminto'
EMAIL_USER_CONFIG = ast.literal_eval('@@emailuser')
EMAIL_REGEX = re.compile(r'[^@]+@[^@]+\.[^@]+')
# Length of mask over the IAM Access Key
MASK_ACCESS_KEY_LENGTH = ast.literal_eval('@@maskaccesskeylength')
# First email warning
FIRST_WARNING_NUM_DAYS = @@first_warning_num_days
FIRST_WARNING_MESSAGE = '@@first_warning_message'
# Last email warning
LAST_WARNING_NUM_DAYS = @@last_warning_num_days
LAST_WARNING_MESSAGE = '@@last_warning_message'
# Max AGE days of key after which it is considered EXPIRED (deactivated)
KEY_MAX_AGE_IN_DAYS = @@key_max_age_in_days
KEY_EXPIRED_MESSAGE = '@@key_expired_message'
KEY_YOUNG_MESSAGE = '@@key_young_message'
try:
THROTTLE = @@throttle
THROTTLE = THROTTLE / 1000
except:
THROTTLE = 0
# ==========================================================
# Character length of an IAM Access Key
ACCESS_KEY_LENGTH = 20
KEY_STATE_ACTIVE = "Active"
KEY_STATE_INACTIVE = "Inactive"
# ==========================================================
#check to see if the MASK_ACCESS_KEY_LENGTH has been misconfigured
if MASK_ACCESS_KEY_LENGTH > ACCESS_KEY_LENGTH:
MASK_ACCESS_KEY_LENGTH = 16
# ==========================================================
def tzutc():
return dateutil.tz.tzutc()
def key_age(key_created_date):
tz_info = key_created_date.tzinfo
age = datetime.now(tz_info) - key_created_date
key_age_str = str(age)
if 'days' not in key_age_str:
return 0
days = int(key_age_str.split(',')[0].split(' ')[0])
return days
def send_admin_invaliduseremailaddress_email(userlist):
subject = f'AWS IAM Access Key Rotation for Account: {AWS_ACCOUNT_NAME} / {AWS_ACCOUNT_ID} - Detected Missing/Invalid Email for Users Report'
body = f'The following report contains a list of users who do NOT appear to have a valid email address. Please review username and tags for each user within IAM.\n\n{userlist}'
send_admin_email(subject, body)
def send_admin_deactivate_email(userlist):
subject = f'AWS IAM Access Key Rotation for Account: {AWS_ACCOUNT_NAME} / {AWS_ACCOUNT_ID} - Deactivation of Access Key for Users Report'
body = f'The following report contains a list of users who have had their access key automatically deactivated due to it being too old.\n\n{userlist}'
send_admin_email(subject, body)
def send_admin_completion_email(finished, deactivated_report):
user_list = '<table cellspacing="4" cellpadding="4" border="0">'
for user in deactivated_report["users"]:
if len(user['keys']) > 0:
user_list += '<tr>'
user_list += '<td valign="top">'
user_list += f'User <b>{user["username"]}</b> has keys in the following state:'
user_list += '</td>'
user_list += '<td valign="top">'
user_list += '<table cellspacing="0" cellpadding="0" border="0">'
for key in user["keys"]:
user_list += '<tr><td>'
user_list += f'{key["accesskeyid"]}, age {key["age"]}, {key["state"]}'
user_list += '</td></tr>'
for key in user["keys"]:
user_list += '<tr><td>'
user_list += f'{key["accesskeyid"]}, age {key["age"]}, {key["state"]}'
user_list += '</td></tr>'
for key in user["keys"]:
user_list += '<tr><td>'
user_list += f'{key["accesskeyid"]}, age {key["age"]}, {key["state"]}'
user_list += '</td></tr>'
user_list += '</table>'
user_list += '</td>'
user_list += '</tr>'
user_list += '</table>'
subject = f'AWS IAM Access Key Rotation for Account: {AWS_ACCOUNT_NAME} / {AWS_ACCOUNT_ID} - Completion Report'
body = f"""<html>
<head></head>
<body>
<h1>Deactivation Report</h1>
<p>AWS IAM Access Key Rotation Lambda Function (cron job) finished successfully.</p>
<hr>
{user_list}
</body>
</html>"""
send_admin_email(subject, body)
def send_admin_email(subject, body):
client = boto3.client('ses', region_name=AWS_REGION)
response = client.send_email(
Source=EMAIL_FROM,
Destination={
'ToAddresses': [EMAIL_ADMIN_TO]
},
Message={
'Subject': {
'Charset': 'UTF-8',
'Data': subject
},
'Body': {
'Html': {
'Charset': 'UTF-8',
'Data': body
}
}
})
#Will send email containing one of the following messages:
#Your AWS IAM Access Key (****************34MI) is due to expire in 1 week (7 days) - please rotate.
#Your AWS IAM Access Key (****************34MI) is due to expire in 1 day (tomorrow) - please rotate.
#Your AWS IAM Access Key (****************34MI) is now EXPIRED! Changing key to INACTIVE state - please rotate.
def send_user_email(email_to, key, message):
try:
if not email_to or not EMAIL_REGEX.match(email_to):
return
client = boto3.client('ses', region_name=AWS_REGION)
response = client.send_email(
Source=EMAIL_FROM,
Destination={
'ToAddresses': [email_to]
},
Message={
'Subject': {
'Data': 'AWS IAM Access Key Rotation'
},
'Body': {
'Html': {
'Data': f'Your AWS IAM Access Key {key} {message}.'
}
}
})
except:
pass
def mask_access_key(access_key):
return access_key[-(ACCESS_KEY_LENGTH-MASK_ACCESS_KEY_LENGTH):].rjust(len(access_key), "*")
def lambda_handler(event, context):
print('*****************************')
print(f'RotateAccessKey v{BUILD_VERSION}: starting...')
print("*****************************")
# Connect to AWS APIs
client = boto3.client('iam')
users = {}
data = client.list_users()
userindex = 0
for user in data['Users']:
userid = user['UserId']
username = user['UserName']
usertags = client.list_user_tags(UserName=username)
users[userid] = { "username": username, "tags": usertags}
users_report = []
users_list_first_warning = []
users_list_last_warning = []
users_list_keys_deactivated = []
users_list_email_tag_invalid = []
email_user_enabled = False
try:
email_user_enabled = EMAIL_USER_CONFIG["enabled"]
except:
pass
for user in users:
userindex += 1
user_keys = []
username = users[user]["username"]
usertags = users[user]["tags"]
# check to see if the current user is a special service account
if username in SKIP_USERNAMES:
print(f'detected special username (configured service account etc.) {username}, key rotation skipped for this account...')
continue
# determine if USER based email address is enabled,
# it can be either username based or tag based,
# attempt to extract and set email address for later use
user_email_address = None
if email_user_enabled:
try:
if EMAIL_USER_CONFIG["emailaddressconfig"]["type"] == "username":
if EMAIL_REGEX.match(username):
user_email_address = username
elif EMAIL_USER_CONFIG["emailaddressconfig"]["type"] == "tag":
validuseremailaddress = False
for tag in usertags["Tags"]:
if tag["Key"] == EMAIL_USER_CONFIG["emailaddressconfig"]["tagname"]:
tag_emailaddress = tag["Value"]
if EMAIL_REGEX.match(tag_emailaddress):
user_email_address = tag_emailaddress
validuseremailaddress = True
break
if not validuseremailaddress:
users_list_email_tag_invalid.append(username)
except Exception:
pass
access_keys = client.list_access_keys(UserName=username)['AccessKeyMetadata']
for access_key in access_keys:
access_key_id = access_key['AccessKeyId']
masked_access_key_id = mask_access_key(access_key_id)
existing_key_status = access_key['Status']
key_created_date = access_key['CreateDate']
age = key_age(key_created_date)
# we only need to examine the currently Active and about to expire keys
if existing_key_status == "Inactive":
key_state = 'key is already in an INACTIVE state'
key_info = {'accesskeyid': masked_access_key_id, 'age': age, 'state': key_state, 'changed': False}
user_keys.append(key_info)
continue
key_state = ''
key_state_changed = False
if age < FIRST_WARNING_NUM_DAYS:
key_state = KEY_YOUNG_MESSAGE
elif age == FIRST_WARNING_NUM_DAYS:
key_state = FIRST_WARNING_MESSAGE
users_list_first_warning.append(username)
if email_user_enabled and user_email_address:
send_user_email(user_email_address, masked_access_key_id, FIRST_WARNING_MESSAGE)
elif age == LAST_WARNING_NUM_DAYS:
key_state = LAST_WARNING_MESSAGE
users_list_last_warning.append(username)
if email_user_enabled and user_email_address:
send_user_email(user_email_address, masked_access_key_id, LAST_WARNING_MESSAGE)
elif age >= KEY_MAX_AGE_IN_DAYS:
key_state = KEY_EXPIRED_MESSAGE
users_list_keys_deactivated.append(username)
client.update_access_key(UserName=username, AccessKeyId=access_key_id, Status=KEY_STATE_INACTIVE)
if email_user_enabled and user_email_address:
send_user_email(user_email_address, masked_access_key_id, KEY_EXPIRED_MESSAGE)
key_state_changed = True
key_info = {'accesskeyid': masked_access_key_id, 'age': age, 'state': key_state, 'changed': key_state_changed}
user_keys.append(key_info)
users_report.append({'userid': userindex, 'username': username, 'keys': user_keys})
if THROTTLE > 0:
time.sleep(THROTTLE)
finished = str(datetime.now())
deactivated_report = {'reportdate': finished, 'users': users_report}
if EMAIL_ADMIN_ENABLED:
try:
send_admin_completion_email(finished, deactivated_report)
if len(users_list_keys_deactivated) > 0:
send_admin_deactivate_email(users_list_keys_deactivated)
if EMAIL_USER_CONFIG["emailaddressconfig"]["reportmissingtag"] and len(users_list_email_tag_invalid) > 0:
send_admin_invaliduseremailaddress_email(users_list_email_tag_invalid)
except:
pass
print(f'List of usernames notified with first warning: {users_list_first_warning}')
print(f'List of usernames notified with last warning: {users_list_last_warning}')
print(f'List of usernames whose keys were deactivated today: {users_list_keys_deactivated}')
print(f'List of usernames who dont have a valid email tag: {users_list_email_tag_invalid}')
print('*****************************')
print(f'Completed (v{BUILD_VERSION}): {finished}')
print('*****************************')
return deactivated_report
#if __name__ == "__main__":
# event = 1
# context = 1
# lambda_handler(event, context)
```
|
{
"source": "jeremycote/ImageDatabase",
"score": 3
}
|
#### File: ImageDatabase/src/Img2VecResnet18.py
```python
import os
import numpy as np
import pandas as pd
from PIL import Image
import torch
from torchvision import transforms
from tqdm import tqdm
from torchvision import models
from numpy.testing import assert_almost_equal
from typing import List
from constants import PATH_IMAGES_CNN, PATH_IMAGES_RAW
class Img2VecResnet18():
"""
Class responsible for image recognition.
"""
def __init__(self, reload=False):
"""
Initialize class.
Args:
reload (bool): recompressed raw images for recognition.
"""
#: Torch device to run neural network
self.device = torch.device("cpu")
#: Number of features to extract from images
self.numberFeatures = 512
#: Model to use for similarity
self.modelName = "resnet-18"
self.model, self.featureLayer = self.getFeatureLayer()
self.model = self.model.to(self.device)
self.model.eval()
self.toTensor = transforms.ToTensor()
self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.allVectors = {}
#: Input Directory for building simularity matrix
self.inputDir = PATH_IMAGES_CNN
if reload:
transformImages()
self.updateSimilarityMatrix()
def getFeatureLayer(self):
"""
Gets avgpool layer from `resnet18 <https://pytorch.org/hub/pytorch_vision_resnet/>`_ .
"""
cnnModel = models.resnet18(pretrained=True)
layer = cnnModel._modules.get('avgpool')
self.layer_output_size = 512
return cnnModel, layer
def getVec(self, img: Image):
"""
Converts passed image into a numpy vector
Args:
img (Image): pillow image to convert
Returns:
Tensor as Numpy array
"""
image = self.normalize(self.toTensor(img)).unsqueeze(0).to(self.device)
embedding = torch.zeros(1, self.numberFeatures, 1, 1)
def copyData(m, i, o): embedding.copy_(o.data)
h = self.featureLayer.register_forward_hook(copyData)
self.model(image)
h.remove()
return embedding.numpy()[0, :, 0, 0]
def getSimilarityMatrix(self, vectors):
"""
Create pandas DataFrame of simularities using passed vectors
Args:
vectors (Numpy.Array): Vectors to parse simularities
Returns:
Pandas.DataFrame
"""
v = np.array(list(vectors.values())).T
sim = np.inner(v.T, v.T) / ((np.linalg.norm(v, axis=0).reshape(-1,1)) * ((np.linalg.norm(v, axis=0).reshape(-1,1)).T))
keys = list(vectors.keys())
matrix = pd.DataFrame(sim, columns = keys, index = keys)
return matrix
def updateSimilarityMatrix(self, k: int = 10):
"""
Updates self.SimilarityMatrix, self.similarNames, self.similarValues and self.k using parameter k.
Args:
k (int): Number of recommendations to present when querrying for simularities
"""
self.k = k
for image in tqdm(os.listdir(self.inputDir)):
I = Image.open(os.path.join(self.inputDir, image))
vec = self.getVec(I)
self.allVectors[image] = vec
I.close()
self.similarityMatrix = self.getSimilarityMatrix(self.allVectors)
self.similarNames = pd.DataFrame(index = self.similarityMatrix.index, columns = range(self.k))
self.similarValues = pd.DataFrame(index = self.similarityMatrix.index, columns = range(self.k))
for j in tqdm(range(self.similarityMatrix.shape[0])):
kSimilar = self.similarityMatrix.iloc[j, :].sort_values(ascending = False).head(self.k)
self.similarNames.iloc[j, :] = list(kSimilar.index)
self.similarValues.iloc[j, :] = kSimilar.values
def getSimilarImages(self, image: str):
"""
Gets self.k most similar images from self.similarNames.
Args:
image (str): filename of image for which recommendations are desired
"""
if image in set(self.similarNames.index):
imgs = list(self.similarNames.loc[image, :])
vals = list(self.similarValues.loc[image, :])
# Don't recommend passed image
if image in imgs:
assert_almost_equal(max(vals), 1, decimal = 5)
imgs.remove(image)
vals.remove(max(vals))
return imgs, vals
else:
print("'{}' Unknown image".format(image))
def transformImages(inputDir = PATH_IMAGES_RAW, outputDir = PATH_IMAGES_CNN, filenames: List[str] = None):
"""
Process Images inside inputDir for use with neural network.
Resized images are outputed to the outputDir.
*Paths are absolute
"""
transformationForCNNInput = transforms.Compose([transforms.Resize((224,224))])
if filenames == None:
filenames = os.listdir(inputDir)
if not os.path.exists(outputDir):
os.makedirs(outputDir)
for imageName in filenames:
imageOutputPath = os.path.join(outputDir, imageName)
imagePath = os.path.join(inputDir, imageName)
if not os.path.isfile(imageOutputPath):
I = Image.open(imagePath)
newI = transformationForCNNInput(I)
if "exif" in I.info:
exif = I.info['exif']
newI.save(imageOutputPath, exif=exif)
else:
newI.save(imageOutputPath)
if __name__ == '__main__':
transformImages()
```
|
{
"source": "jeremy-coulon/conan-package-tools",
"score": 2
}
|
#### File: test/unit/packager_test.py
```python
import os
import platform
import unittest
import sys
from collections import defaultdict
from cpt.builds_generator import BuildConf
from cpt.packager import ConanMultiPackager
from conans import tools
from conans.test.utils.tools import TestBufferConanOutput
from conans.model.ref import ConanFileReference
from cpt.test.unit.utils import MockConanAPI, MockRunner, MockCIManager
def platform_mock_for(so):
class PlatformInfoMock(object):
def system(self):
return so
return PlatformInfoMock()
class AppTest(unittest.TestCase):
def setUp(self):
self.runner = MockRunner()
self.conan_api = MockConanAPI()
self.ci_manager = MockCIManager()
self.packager = ConanMultiPackager(username="lasote",
channel="mychannel",
runner=self.runner,
conan_api=self.conan_api,
reference="lib/1.0",
ci_manager=self.ci_manager)
if "APPVEYOR" in os.environ:
del os.environ["APPVEYOR"]
if "TRAVIS" in os.environ:
del os.environ["TRAVIS"]
def _add_build(self, number, compiler=None, version=None):
self.packager.add({"os": "os%d" % number, "compiler": compiler or "compiler%d" % number,
"compiler.version": version or "4.3"},
{"option%d" % number: "value%d" % number,
"option%d" % number: "value%d" % number})
def test_remove_build_if(self):
self.packager.add({"arch": "x86", "build_type": "Release", "compiler": "gcc", "compiler.version": "6"})
self.packager.add({"arch": "x86", "build_type": "Debug", "compiler": "gcc", "compiler.version": "6"})
self.packager.add({"arch": "x86", "build_type": "Release", "compiler": "gcc", "compiler.version": "7"})
self.packager.add({"arch": "x86", "build_type": "Debug", "compiler": "gcc", "compiler.version": "7"})
self.packager.remove_build_if(lambda build: build.settings["compiler.version"] == "6")
packager_expected = ConanMultiPackager("lasote", "mychannel",
runner=self.runner,
conan_api=self.conan_api,
reference="lib/1.0",
ci_manager=self.ci_manager)
packager_expected.add({"arch": "x86", "build_type": "Release", "compiler": "gcc", "compiler.version": "7"})
packager_expected.add({"arch": "x86", "build_type": "Debug", "compiler": "gcc", "compiler.version": "7"})
self.assertEqual([tuple(a) for a in self.packager.items], packager_expected.items)
def test_update_build_if(self):
self.packager.add({"os": "Windows"})
self.packager.add({"os": "Linux"})
self.packager.update_build_if(lambda build: build.settings["os"] == "Windows",
new_build_requires={"*": ["7zip_installer/0.1.0@conan/stable"]})
packager_expected = ConanMultiPackager("lasote", "mychannel",
runner=self.runner,
conan_api=self.conan_api,
reference="lib/1.0",
ci_manager=self.ci_manager)
packager_expected.add({"os": "Windows"}, {}, {}, {"*": ["7zip_installer/0.1.0@conan/stable"]})
packager_expected.add({"os": "Linux"})
self.assertEqual([tuple(a) for a in self.packager.items], packager_expected.items)
def test_add_common_builds_update_build_if(self):
self.packager.add_common_builds()
self.packager.update_build_if(lambda build: build.settings["build_type"] == "Debug",
new_options={"foo:bar": True})
self.packager.update_build_if(lambda build: build.settings["build_type"] == "Release",
new_options={"foo:qux": False})
for settings, options, _, _, _ in self.packager.items:
if settings["build_type"] == "Release":
self.assertEqual(options, {"foo:qux": False})
else:
self.assertEqual(options, {"foo:bar": True})
def test_full_profile(self):
self.packager.add({"os": "Windows", "compiler": "gcc"},
{"option1": "One"},
{"VAR_1": "ONE",
"VAR_2": "TWO"},
{"*": ["myreference/1.0@lasote/testing"]})
self.packager.run_builds(1, 1)
profile = self.conan_api.get_profile_from_call_index(1)
self.assertEquals(profile.settings["os"], "Windows")
self.assertEquals(profile.settings["compiler"], "gcc")
self.assertEquals(profile.options.as_list(), [("option1", "One")])
self.assertEquals(profile.env_values.data[None]["VAR_1"], "ONE")
self.assertEquals(profile.env_values.data[None]["VAR_2"], "TWO")
self.assertEquals(profile.build_requires["*"],
[ConanFileReference.loads("myreference/1.0@lasote/testing")])
def test_profile_environ(self):
self.packager.add({"os": "Windows", "compiler": "gcc"},
{"option1": "One"},
{"VAR_1": "ONE",
"VAR_2": "TWO"},
{"*": ["myreference/1.0@lasote/testing"]})
with tools.environment_append({"CONAN_BUILD_REQUIRES": "br1/1.0@conan/testing"}):
self.packager.run_builds(1, 1)
profile = self.conan_api.get_profile_from_call_index(1)
self.assertEquals(profile.build_requires["*"],
[ConanFileReference.loads("myreference/1.0@lasote/testing"),
ConanFileReference.loads("br1/1.0@conan/testing")])
def test_pages(self):
for number in range(10):
self._add_build(number)
# 10 pages, 1 per build
self.packager.run_builds(1, 10)
self.conan_api.assert_tests_for([0])
# 2 pages, 5 per build
self.conan_api.reset()
self.packager.run_builds(1, 2)
self.conan_api.assert_tests_for([0, 2, 4, 6, 8])
self.conan_api.reset()
self.packager.run_builds(2, 2)
self.conan_api.assert_tests_for([1, 3, 5, 7, 9])
# 3 pages, 4 builds in page 1 and 3 in the rest of pages
self.conan_api.reset()
self.packager.run_builds(1, 3)
self.conan_api.assert_tests_for([0, 3, 6, 9])
self.conan_api.reset()
self.packager.run_builds(2, 3)
self.conan_api.assert_tests_for([1, 4, 7])
self.conan_api.reset()
self.packager.run_builds(3, 3)
self.conan_api.assert_tests_for([2, 5, 8])
def test_deprecation_gcc(self):
with self.assertRaisesRegexp(Exception, "DEPRECATED GCC MINOR VERSIONS!"):
ConanMultiPackager(username="lasote",
channel="mychannel",
runner=self.runner,
conan_api=self.conan_api,
gcc_versions=["4.3", "5.4"],
use_docker=True,
reference="zlib/1.2.11",
ci_manager=self.ci_manager)
def test_32bits_images(self):
packager = ConanMultiPackager(username="lasote",
channel="mychannel",
runner=self.runner,
use_docker=True,
docker_32_images=True,
reference="zlib/1.2.11",
ci_manager=self.ci_manager)
packager.add({"arch": "x86", "compiler": "gcc", "compiler.version": "6"})
packager.run_builds(1, 1)
self.assertIn("docker pull conanio/gcc6-x86", self.runner.calls[0])
self.runner.reset()
packager = ConanMultiPackager(username="lasote",
channel="mychannel",
runner=self.runner,
conan_api=self.conan_api,
use_docker=True,
docker_32_images=False,
reference="zlib/1.2.11",
ci_manager=self.ci_manager)
packager.add({"arch": "x86", "compiler": "gcc", "compiler.version": "6"})
packager.run_builds(1, 1)
self.assertNotIn("docker pull conanio/gcc6-i386", self.runner.calls[0])
self.runner.reset()
with tools.environment_append({"CONAN_DOCKER_32_IMAGES": "1"}):
packager = ConanMultiPackager(username="lasote",
channel="mychannel",
runner=self.runner,
conan_api=self.conan_api,
use_docker=True,
reference="zlib/1.2.11",
ci_manager=self.ci_manager)
packager.add({"arch": "x86", "compiler": "gcc", "compiler.version": "6"})
packager.run_builds(1, 1)
self.assertIn("docker pull conanio/gcc6-x86", self.runner.calls[0])
self.runner.reset()
# Test the opossite
packager = ConanMultiPackager(username="lasote",
channel="mychannel",
runner=self.runner,
conan_api=self.conan_api,
use_docker=True,
docker_32_images=False,
reference="zlib/1.2.11",
ci_manager=self.ci_manager)
packager.add({"arch": "x86", "compiler": "gcc", "compiler.version": "6"})
packager.run_builds(1, 1)
self.assertIn("docker pull conanio/gcc6", self.runner.calls[0])
def test_docker_gcc(self):
self.packager = ConanMultiPackager(username="lasote",
channel="mychannel",
runner=self.runner,
conan_api=self.conan_api,
gcc_versions=["4.3", "5"],
use_docker=True,
reference="zlib/1.2.11",
ci_manager=self.ci_manager)
self._add_build(1, "gcc", "4.3")
self._add_build(2, "gcc", "4.3")
self._add_build(3, "gcc", "4.3")
self.packager.run_builds(1, 2)
self.assertIn("docker pull conanio/gcc43", self.runner.calls[0])
self.assertIn('docker run ', self.runner.calls[1])
self.assertNotIn('sudo pip', self.runner.calls[1])
self.assertIn('pip install', self.runner.calls[1])
self.assertIn('os=os1', self.runner.calls[4])
self.packager.run_builds(1, 2)
self.assertIn("docker pull conanio/gcc43", self.runner.calls[0])
# Next build from 4.3 is cached, not pulls are performed
self.assertIn('os=os3', self.runner.calls[5])
for the_bool in ["True", "False"]:
self.runner.reset()
with tools.environment_append({"CONAN_DOCKER_USE_SUDO": the_bool}):
self.packager = ConanMultiPackager(username="lasote",
channel="mychannel",
runner=self.runner,
conan_api=self.conan_api,
gcc_versions=["4.3", "5"],
use_docker=True,
reference="zlib/1.2.11",
ci_manager=self.ci_manager)
self._add_build(1, "gcc", "4.3")
self.packager.run_builds(1, 2)
if the_bool == "True":
self.assertIn("sudo -E docker run", self.runner.calls[-1])
else:
self.assertNotIn("sudo -E docker run", self.runner.calls[-1])
self.assertIn("docker run", self.runner.calls[-1])
self.runner.reset()
with tools.environment_append({"CONAN_PIP_USE_SUDO": the_bool}):
self.packager = ConanMultiPackager(username="lasote",
channel="mychannel",
runner=self.runner,
conan_api=self.conan_api,
gcc_versions=["4.3", "5"],
use_docker=True,
reference="zlib/1.2.11",
ci_manager=self.ci_manager)
self._add_build(1, "gcc", "4.3")
self.packager.run_builds(1, 2)
if the_bool == "True":
self.assertIn("sudo -E pip", self.runner.calls[1])
else:
self.assertNotIn("sudo -E pip", self.runner.calls[1])
self.assertIn("pip", self.runner.calls[1])
def test_docker_clang(self):
self.packager = ConanMultiPackager(username="lasote",
channel="mychannel",
runner=self.runner,
conan_api=self.conan_api,
clang_versions=["3.8", "4.0"],
use_docker=True,
reference="zlib/1.2.11",
ci_manager=self.ci_manager)
self._add_build(1, "clang", "3.8")
self._add_build(2, "clang", "3.8")
self._add_build(3, "clang", "3.8")
self.packager.run_builds(1, 2)
self.assertIn("docker pull conanio/clang38", self.runner.calls[0])
self.assertIn('docker run ', self.runner.calls[1])
self.assertIn('os=os1', self.runner.calls[4])
# Next build from 3.8 is cached, not pulls are performed
self.assertIn('os=os3', self.runner.calls[5])
def test_docker_gcc_and_clang(self):
self.packager = ConanMultiPackager(username="lasote",
channel="mychannel",
runner=self.runner,
conan_api=self.conan_api,
gcc_versions=["5", "6"],
clang_versions=["3.9", "4.0"],
use_docker=True,
reference="zlib/1.2.11",
ci_manager=self.ci_manager)
self._add_build(1, "gcc", "5")
self._add_build(2, "gcc", "5")
self._add_build(3, "gcc", "5")
self._add_build(4, "clang", "3.9")
self._add_build(5, "clang", "3.9")
self._add_build(6, "clang", "3.9")
self.packager.run_builds(1, 2)
self.assertIn("docker pull conanio/gcc5", self.runner.calls[0])
self.assertIn('docker run ', self.runner.calls[1])
self.assertIn('os=os1', self.runner.calls[4])
self.assertIn('os=os3', self.runner.calls[5])
self.packager.run_builds(2, 2)
self.assertIn("docker pull conanio/clang39", self.runner.calls[16])
self.assertIn('docker run ', self.runner.calls[17])
self.assertIn('os=os4', self.runner.calls[20])
self.assertIn('os=os6', self.runner.calls[21])
def test_upload_false(self):
packager = ConanMultiPackager(username="lasote",
channel="mychannel",
upload=False, reference="zlib/1.2.11",
ci_manager=self.ci_manager)
self.assertFalse(packager._upload_enabled())
def test_docker_env_propagated(self):
# test env
with tools.environment_append({"CONAN_FAKE_VAR": "32"}):
self.packager = ConanMultiPackager(username="lasote",
channel="mychannel",
runner=self.runner,
conan_api=self.conan_api,
gcc_versions=["5", "6"],
clang_versions=["3.9", "4.0"],
use_docker=True,
reference="zlib/1.2.11",
ci_manager=self.ci_manager)
self._add_build(1, "gcc", "5")
self.packager.run_builds(1, 1)
self.assertIn('-e CONAN_FAKE_VAR="32"', self.runner.calls[-1])
def test_docker_home_env(self):
with tools.environment_append({"CONAN_DOCKER_HOME": "/some/dir"}):
self.packager = ConanMultiPackager(username="lasote",
channel="mychannel",
runner=self.runner,
conan_api=self.conan_api,
gcc_versions=["5", "6"],
clang_versions=["3.9", "4.0"],
use_docker=True,
reference="zlib/1.2.11",
ci_manager=self.ci_manager)
self._add_build(1, "gcc", "5")
self.packager.run_builds(1, 1)
self.assertIn('-e CONAN_DOCKER_HOME="/some/dir"',
self.runner.calls[-1])
self.assertEquals(self.packager.docker_conan_home, "/some/dir")
def test_docker_home_opt(self):
self.packager = ConanMultiPackager(username="lasote",
channel="mychannel",
runner=self.runner,
conan_api=self.conan_api,
gcc_versions=["5", "6"],
clang_versions=["3.9", "4.0"],
use_docker=True,
docker_conan_home="/some/dir",
reference="zlib/1.2.11",
ci_manager=self.ci_manager)
self._add_build(1, "gcc", "5")
self.packager.run_builds(1, 1)
self.assertEquals(self.packager.docker_conan_home, "/some/dir")
def test_docker_invalid(self):
self.packager = ConanMultiPackager(username="lasote",
channel="mychannel",
runner=self.runner,
conan_api=self.conan_api,
use_docker=True,
reference="zlib/1.2.11",
ci_manager=self.ci_manager)
self._add_build(1, "msvc", "10")
# Only clang and gcc have docker images
self.assertRaises(Exception, self.packager.run_builds)
def test_assign_builds_retrocompatibility(self):
self.packager = ConanMultiPackager(username="lasote",
channel="mychannel",
runner=self.runner,
conan_api=self.conan_api,
gcc_versions=["4.3", "5"],
use_docker=True,
reference="lib/1.0",
ci_manager=self.ci_manager)
self.packager.add_common_builds()
self.packager.builds = [({"os": "Windows"}, {"option": "value"})]
self.assertEquals(self.packager.items, [BuildConf(settings={'os': 'Windows'},
options={'option': 'value'},
env_vars={}, build_requires={},
reference="lib/1.0@lasote/mychannel")])
def test_only_mingw(self):
mingw_configurations = [("4.9", "x86_64", "seh", "posix")]
builder = ConanMultiPackager(mingw_configurations=mingw_configurations, visual_versions=[],
username="Pepe", platform_info=platform_mock_for("Windows"),
reference="lib/1.0", ci_manager=self.ci_manager)
with tools.environment_append({"CONAN_SHARED_OPTION_NAME": "zlib:shared"}):
builder.add_common_builds(pure_c=True)
expected = [({'compiler.exception': 'seh', 'compiler.libcxx': "libstdc++",
'compiler.threads': 'posix', 'compiler.version': '4.9', 'arch': 'x86_64',
'build_type': 'Release', 'compiler': 'gcc'},
{'zlib:shared': True},
{},
{'*': [ConanFileReference.loads("mingw_installer/1.0@conan/stable")]}),
({'compiler.exception': 'seh', 'compiler.libcxx': "libstdc++", 'arch': 'x86_64',
'compiler.threads': 'posix', 'compiler.version': '4.9', 'build_type': 'Debug',
'compiler': 'gcc'},
{'zlib:shared': True},
{},
{'*': [ConanFileReference.loads("mingw_installer/1.0@conan/stable")]}),
({'compiler.exception': 'seh', 'compiler.libcxx': "libstdc++",
'compiler.threads': 'posix', 'compiler.version': '4.9', 'arch': 'x86_64',
'build_type': 'Release', 'compiler': 'gcc'},
{'zlib:shared': False},
{},
{'*': [ConanFileReference.loads("mingw_installer/1.0@conan/stable")]}),
({'compiler.exception': 'seh', 'compiler.libcxx': "libstdc++", 'arch': 'x86_64',
'compiler.threads': 'posix', 'compiler.version': '4.9', 'build_type': 'Debug',
'compiler': 'gcc'},
{'zlib:shared': False},
{},
{'*': [ConanFileReference.loads("mingw_installer/1.0@conan/stable")]})]
self.assertEquals([tuple(a) for a in builder.builds], expected)
def test_named_pages(self):
builder = ConanMultiPackager(username="Pepe", reference="zlib/1.2.11",
ci_manager=self.ci_manager)
named_builds = defaultdict(list)
with tools.environment_append({"CONAN_SHARED_OPTION_NAME": "zlib:shared"}):
builder.add_common_builds(pure_c=True)
for settings, options, env_vars, build_requires, _ in builder.items:
named_builds[settings['arch']].append([settings, options, env_vars, build_requires])
builder.named_builds = named_builds
self.assertEquals(builder.builds, [])
if platform.system() == "Darwin": # Not default x86 in Macos
self.assertEquals(len(builder.named_builds), 1)
self.assertFalse("x86" in builder.named_builds)
self.assertTrue("x86_64" in builder.named_builds)
else:
self.assertEquals(len(builder.named_builds), 2)
self.assertTrue("x86" in builder.named_builds)
self.assertTrue("x86_64" in builder.named_builds)
def test_remotes(self):
runner = MockRunner()
builder = ConanMultiPackager(username="Pepe",
remotes=["url1", "url2"],
runner=runner,
conan_api=self.conan_api,
reference="lib/1.0@lasote/mychannel",
ci_manager=self.ci_manager)
self.assertEquals(self.conan_api.calls[1].args[1], "url1")
self.assertEquals(self.conan_api.calls[1].kwargs["insert"], -1)
self.assertEquals(self.conan_api.calls[3].args[1], "url2")
self.assertEquals(self.conan_api.calls[3].kwargs["insert"], -1)
runner = MockRunner()
self.conan_api = MockConanAPI()
builder = ConanMultiPackager(username="Pepe",
remotes="myurl1",
runner=runner,
conan_api=self.conan_api,
reference="lib/1.0@lasote/mychannel",
ci_manager=self.ci_manager)
self.assertEquals(self.conan_api.calls[1].args[1], "myurl1")
self.assertEquals(self.conan_api.calls[1].kwargs["insert"], -1)
# Named remotes, with SSL flag
runner = MockRunner()
self.conan_api = MockConanAPI()
remotes = [("u1", True, "my_cool_name1"),
("u2", False, "my_cool_name2")]
builder = ConanMultiPackager(username="Pepe",
remotes=remotes,
runner=runner,
conan_api=self.conan_api,
reference="lib/1.0@lasote/mychannel",
ci_manager=self.ci_manager)
self.assertEquals(self.conan_api.calls[1].args[0], "my_cool_name1")
self.assertEquals(self.conan_api.calls[1].args[1], "u1")
self.assertEquals(self.conan_api.calls[1].kwargs["insert"], -1)
self.assertEquals(self.conan_api.calls[1].kwargs["verify_ssl"], True)
self.assertEquals(self.conan_api.calls[3].args[0], "my_cool_name2")
self.assertEquals(self.conan_api.calls[3].args[1], "u2")
self.assertEquals(self.conan_api.calls[3].kwargs["insert"], -1)
self.assertEquals(self.conan_api.calls[3].kwargs["verify_ssl"], False)
def test_visual_defaults(self):
with tools.environment_append({"CONAN_VISUAL_VERSIONS": "10"}):
builder = ConanMultiPackager(username="Pepe",
platform_info=platform_mock_for("Windows"),
reference="lib/1.0@lasote/mychannel",
ci_manager=self.ci_manager)
builder.add_common_builds()
for settings, _, _, _, _ in builder.items:
self.assertEquals(settings["compiler"], "Visual Studio")
self.assertEquals(settings["compiler.version"], "10")
with tools.environment_append({"CONAN_VISUAL_VERSIONS": "10",
"MINGW_CONFIGURATIONS": "4.9@x86_64@seh@posix"}):
builder = ConanMultiPackager(username="Pepe",
platform_info=platform_mock_for("Windows"),
reference="lib/1.0@lasote/mychannel",
ci_manager=self.ci_manager)
builder.add_common_builds()
for settings, _, _, _, _ in builder.items:
self.assertEquals(settings["compiler"], "gcc")
self.assertEquals(settings["compiler.version"], "4.9")
def test_multiple_references(self):
with tools.environment_append({"CONAN_REFERENCE": "zlib/1.2.8"}):
builder = ConanMultiPackager(username="Pepe", ci_manager=self.ci_manager)
builder.add_common_builds(reference="lib/1.0@lasote/mychannel")
for _, _, _, _, reference in builder.items:
self.assertEquals(str(reference), "lib/1.0@lasote/mychannel")
builder.add_common_builds(reference="lib/2.0@lasote/mychannel")
for _, _, _, _, reference in builder.items:
self.assertTrue(str(reference) in ("lib/1.0@lasote/mychannel", "lib/2.0@lasote/mychannel"))
def test_select_defaults_test(self):
with tools.environment_append({"CONAN_REFERENCE": "zlib/1.2.8"}):
builder = ConanMultiPackager(platform_info=platform_mock_for("Linux"),
gcc_versions=["4.8", "5"],
username="foo",
reference="lib/1.0@lasote/mychannel",
ci_manager=self.ci_manager)
self.assertEquals(builder.build_generator._clang_versions, [])
with tools.environment_append({"CONAN_GCC_VERSIONS": "4.8, 5",
"CONAN_REFERENCE": "zlib/1.2.8"}):
builder = ConanMultiPackager(platform_info=platform_mock_for("Linux"),
username="foo",
reference="lib/1.0@lasote/mychannel",
ci_manager=self.ci_manager)
self.assertEquals(builder.build_generator._clang_versions, [])
self.assertEquals(builder.build_generator._gcc_versions, ["4.8", "5"])
builder = ConanMultiPackager(platform_info=platform_mock_for("Linux"),
clang_versions=["4.8", "5"],
username="foo",
reference="lib/1.0",
ci_manager=self.ci_manager)
self.assertEquals(builder.build_generator._gcc_versions, [])
with tools.environment_append({"CONAN_CLANG_VERSIONS": "4.8, 5",
"CONAN_APPLE_CLANG_VERSIONS": " "}):
builder = ConanMultiPackager(platform_info=platform_mock_for("Linux"),
username="foo",
reference="lib/1.0",
ci_manager=self.ci_manager)
self.assertEquals(builder.build_generator._gcc_versions, [])
self.assertEquals(builder.build_generator._clang_versions, ["4.8", "5"])
self.assertEquals(builder.build_generator._clang_versions, ["4.8", "5"])
self.assertEquals(builder.build_generator._apple_clang_versions, [])
def test_upload(self):
runner = MockRunner()
runner.output = "arepo: myurl"
builder = ConanMultiPackager(username="pepe", channel="testing",
reference="Hello/0.1", password="password",
upload="myurl", visual_versions=[], gcc_versions=[],
apple_clang_versions=[],
runner=runner,
conan_api=self.conan_api,
remotes="myurl, otherurl",
platform_info=platform_mock_for("Darwin"),
ci_manager=self.ci_manager)
builder.add_common_builds()
builder.run()
# Duplicated upload remote puts upload repo first (in the remotes order)
self.assertEqual(self.conan_api.calls[1].args[0], 'upload_repo')
self.assertEqual(self.conan_api.calls[3].args[0], 'remote1')
# Now check that the upload remote order is preserved if we specify it in the remotes
runner = MockRunner()
self.conan_api = MockConanAPI()
builder = ConanMultiPackager(username="pepe", channel="testing",
reference="Hello/0.1", password="password",
upload="myurl", visual_versions=[], gcc_versions=[],
apple_clang_versions=[],
runner=runner,
conan_api=self.conan_api,
remotes="otherurl, myurl, moreurl",
platform_info=platform_mock_for("Darwin"),
ci_manager=self.ci_manager)
builder.add_common_builds()
builder.run()
self.assertEqual(self.conan_api.calls[1].args[0], 'remote0')
self.assertEqual(self.conan_api.calls[3].args[0], 'upload_repo')
self.assertEqual(self.conan_api.calls[5].args[0], 'remote2')
runner = MockRunner()
self.conan_api = MockConanAPI()
builder = ConanMultiPackager(username="pepe", channel="testing",
reference="Hello/0.1", password="password",
upload="myurl", visual_versions=[], gcc_versions=[],
apple_clang_versions=[],
runner=runner,
conan_api=self.conan_api,
remotes="otherurl",
platform_info=platform_mock_for("Darwin"),
ci_manager=self.ci_manager)
builder.add_common_builds()
builder.run()
self.assertEqual(self.conan_api.calls[1].args[0], 'remote0')
self.assertEqual(self.conan_api.calls[3].args[0], 'upload_repo')
def test_build_policy(self):
builder = ConanMultiPackager(username="pepe", channel="testing",
reference="Hello/0.1", password="password",
visual_versions=[], gcc_versions=[],
apple_clang_versions=[],
runner=self.runner,
conan_api=self.conan_api,
remotes="otherurl",
platform_info=platform_mock_for("Darwin"),
build_policy="outdated",
ci_manager=self.ci_manager)
builder.add_common_builds()
builder.run()
self.assertEquals(["outdated"], self.conan_api.calls[-1].kwargs["build_modes"])
for build_policy, expected in [("missing", ["missing"]), ("all",[])]:
with tools.environment_append({"CONAN_BUILD_POLICY": build_policy}):
self.conan_api = MockConanAPI()
builder = ConanMultiPackager(username="pepe", channel="testing",
reference="Hello/0.1", password="password",
visual_versions=[], gcc_versions=[],
apple_clang_versions=[],
runner=self.runner,
conan_api=self.conan_api,
remotes="otherurl",
platform_info=platform_mock_for("Darwin"),
build_policy=build_policy,
ci_manager=self.ci_manager)
builder.add_common_builds()
builder.run()
self.assertEquals(expected, self.conan_api.calls[-1].kwargs["build_modes"])
def test_test_folder(self):
builder = ConanMultiPackager(username="pepe", channel="testing",
reference="Hello/0.1", password="password",
visual_versions=[], gcc_versions=[],
apple_clang_versions=[],
runner=self.runner,
conan_api=self.conan_api,
remotes="otherurl",
platform_info=platform_mock_for("Darwin"),
test_folder="foobar",
ci_manager=self.ci_manager)
builder.add_common_builds()
builder.run()
self.assertEquals("foobar", self.conan_api.calls[-1].kwargs["test_folder"])
with tools.environment_append({"CONAN_BUILD_POLICY": "missing"}):
self.conan_api = MockConanAPI()
builder = ConanMultiPackager(username="pepe", channel="testing",
reference="Hello/0.1", password="password",
visual_versions=[], gcc_versions=[],
apple_clang_versions=[],
runner=self.runner,
conan_api=self.conan_api,
remotes="otherurl",
platform_info=platform_mock_for("Darwin"),
build_policy=None,
ci_manager=self.ci_manager)
builder.add_common_builds()
builder.run()
self.assertEquals(None, self.conan_api.calls[-1].kwargs["test_folder"])
def test_check_credentials(self):
builder = ConanMultiPackager(username="pepe", channel="testing",
reference="Hello/0.1", password="password",
upload="myurl", visual_versions=[], gcc_versions=[],
apple_clang_versions=[],
runner=self.runner,
conan_api=self.conan_api,
platform_info=platform_mock_for("Darwin"),
ci_manager=self.ci_manager)
builder.add_common_builds()
builder.run()
# When activated, check credentials before to create the profiles
self.assertEqual(self.conan_api.calls[2].name, 'authenticate')
self.assertEqual(self.conan_api.calls[3].name, 'create_profile')
self.conan_api = MockConanAPI()
# If we skip the credentials check, the login will be performed just before the upload
builder = ConanMultiPackager(username="pepe", channel="testing",
reference="Hello/0.1", password="password",
upload="myurl", visual_versions=[], gcc_versions=[],
apple_clang_versions=[],
runner=self.runner,
conan_api=self.conan_api,
platform_info=platform_mock_for("Darwin"),
skip_check_credentials=True,
ci_manager=self.ci_manager)
builder.add_common_builds()
builder.run()
self.assertNotEqual(self.conan_api.calls[2].name, 'authenticate')
# No upload, no authenticate
self.conan_api = MockConanAPI()
builder = ConanMultiPackager(username="pepe", channel="testing",
reference="Hello/0.1", password="password",
upload=None, visual_versions=[], gcc_versions=[],
apple_clang_versions=[],
runner=self.runner,
conan_api=self.conan_api,
platform_info=platform_mock_for("Darwin"),
skip_check_credentials=True,
ci_manager=self.ci_manager)
builder.add_common_builds()
builder.run()
for action in self.conan_api.calls:
self.assertNotEqual(action.name, 'authenticate')
self.assertNotEqual(action.name, 'upload')
def channel_detector_test(self):
for branch, expected_channel in [("testing", "a_channel"),
("dummy", "a_channel"),
("stable", "stable"),
("stable/something", "stable"),
("release", "stable"),
("release/something", "stable"),
("master", "stable"),
("master/something", "a_channel")]:
builder = ConanMultiPackager(username="pepe",
channel="a_channel",
reference="lib/1.0",
ci_manager=MockCIManager(current_branch=branch))
self.assertEquals(builder.channel, expected_channel, "Not match for branch %s" % branch)
def test_pip_conanio_image(self):
self.packager = ConanMultiPackager(username="lasote",
channel="mychannel",
runner=self.runner,
conan_api=self.conan_api,
gcc_versions=["4.3", "5"],
use_docker=True,
docker_image='conanio/gcc43',
reference="zlib/1.2.11",
ci_manager=self.ci_manager)
self._add_build(1, "gcc", "4.3")
self.packager.run_builds(1, 2)
self.assertNotIn("sudo -E pip", self.runner.calls[1])
self.assertIn("pip", self.runner.calls[1])
self.runner.reset()
self.packager = ConanMultiPackager(username="lasote",
channel="mychannel",
runner=self.runner,
conan_api=self.conan_api,
gcc_versions=["4.3", "5"],
docker_image='conanio/gcc43',
reference="zlib/1.2.11",
ci_manager=self.ci_manager)
self._add_build(1, "gcc", "4.3")
self.packager.run_builds(1, 2)
self.assertNotIn("sudo -E pip", self.runner.calls[1])
self.assertIn("pip", self.runner.calls[1])
@unittest.skipIf(sys.platform.startswith("win"), "Requires Linux")
def test_pip_docker_sudo(self):
self.packager = ConanMultiPackager(username="lasote",
channel="mychannel",
runner=self.runner,
conan_api=self.conan_api,
gcc_versions=["4.3", "5"],
docker_image='foobar/gcc43',
reference="zlib/1.2.11",
ci_manager=self.ci_manager)
self._add_build(1, "gcc", "4.3")
self.packager.run_builds(1, 2)
self.assertIn("sudo -E pip", self.runner.calls[1])
self.runner.reset()
with tools.environment_append({"CONAN_PIP_USE_SUDO": "True"}):
self.packager = ConanMultiPackager(username="lasote",
channel="mychannel",
runner=self.runner,
conan_api=self.conan_api,
gcc_versions=["4.3", "5"],
docker_image='conanio/gcc43',
reference="zlib/1.2.11",
ci_manager=self.ci_manager)
self._add_build(1, "gcc", "4.3")
self.packager.run_builds(1, 2)
self.assertIn("sudo -E pip", self.runner.calls[1])
def test_regular_pip_command(self):
""" CPT Should call `pip` when CONAN_PIP_PACKAGE or CONAN_PIP_INSTALL are declared.
"""
with tools.environment_append({"CONAN_USERNAME": "foobar",
"CONAN_PIP_PACKAGE": "conan==1.0.0-dev",
"CONAN_PIP_INSTALL": "foobar==0.1.0"}):
output = TestBufferConanOutput()
self.packager = ConanMultiPackager(username="lasote",
channel="mychannel",
reference="lib/1.0",
ci_manager=self.ci_manager,
out=output.write,
conan_api=self.conan_api,
runner=self.runner,
exclude_vcvars_precommand=True)
self.packager.add_common_builds()
self.packager.run()
self.assertIn("[pip_update]", output)
self.assertIn(" pip install -q conan==1.0.0-dev", self.runner.calls)
self.assertIn(" pip install -q foobar==0.1.0", self.runner.calls)
def test_custom_pip_command(self):
""" CPT should run custom `pip` path when CONAN_PIP_COMMAND is declared.
"""
pip = "pip3" if tools.which("pip3") else "pip2"
with tools.environment_append({"CONAN_USERNAME": "foobar",
"CONAN_PIP_PACKAGE": "conan==0.1.0",
"CONAN_PIP_INSTALL": "foobar==0.1.0",
"CONAN_PIP_COMMAND": pip}):
output = TestBufferConanOutput()
self.packager = ConanMultiPackager(username="lasote",
channel="mychannel",
reference="lib/1.0",
ci_manager=self.ci_manager,
out=output.write,
conan_api=self.conan_api,
runner=self.runner,
exclude_vcvars_precommand=True)
self.packager.add_common_builds()
self.packager.run()
self.assertIn("[pip_update]", output)
self.assertIn(" {} install -q conan==0.1.0".format(pip), self.runner.calls)
self.assertIn(" {} install -q foobar==0.1.0".format(pip), self.runner.calls)
def test_invalid_pip_command(self):
""" CPT should not accept invalid `pip` command when CONAN_PIP_COMMAND is declared.
"""
with tools.environment_append({"CONAN_USERNAME": "foobar",
"CONAN_PIP_PACKAGE": "conan==0.1.0",
"CONAN_PIP_COMMAND": "/bin/bash"}):
output = TestBufferConanOutput()
with self.assertRaises(Exception) as context:
self.packager = ConanMultiPackager(username="lasote",
channel="mychannel",
reference="lib/1.0",
ci_manager=self.ci_manager,
out=output.write,
conan_api=self.conan_api,
runner=self.runner,
exclude_vcvars_precommand=True)
self.packager.add_common_builds()
self.packager.run()
self.assertTrue("CONAN_PIP_COMMAND: '/bin/bash' is not a valid pip command" in context.exception)
self.assertNotIn("[pip_update]", output)
def test_skip_recipe_export(self):
def _check_create_calls(skip_recipe_export):
not_export = "not_export"
creates = self.conan_api.get_creates()
if skip_recipe_export:
# Only first call should export recipe
self.assertFalse(self.assertFalse(creates[0].kwargs[not_export]))
for call in creates[1:]:
self.assertTrue(call.kwargs[not_export])
else:
for call in creates:
self.assertFalse(call.kwargs[not_export])
output = TestBufferConanOutput()
packager = ConanMultiPackager(username="lasote",
channel="mychannel",
visual_versions=["12"],
archs=["x86", "x86_64"],
build_types=["Release"],
reference="zlib/1.2.11",
runner=self.runner,
conan_api=self.conan_api,
ci_manager=self.ci_manager,
out=output.write)
packager.add_common_builds()
packager.run()
_check_create_calls(False)
with tools.environment_append({"CONAN_SKIP_RECIPE_EXPORT": "True"}):
self.conan_api.reset()
packager = ConanMultiPackager(username="lasote",
channel="mychannel",
visual_versions=["12"],
archs=["x86", "x86_64"],
build_types=["Release"],
reference="zlib/1.2.11",
runner=self.runner,
conan_api=self.conan_api,
ci_manager=self.ci_manager,
out=output.write)
packager.add_common_builds()
packager.run()
_check_create_calls(True)
self.conan_api.reset()
packager = ConanMultiPackager(username="lasote",
channel="mychannel",
visual_versions=["12"],
archs=["x86", "x86_64"],
build_types=["Release"],
reference="zlib/1.2.11",
runner=self.runner,
conan_api=self.conan_api,
ci_manager=self.ci_manager,
skip_recipe_export=True,
out=output.write)
packager.add_common_builds()
packager.run()
_check_create_calls(True)
def test_skip_recipe_export_docker(self):
def _check_run_calls(skip_recipe_export):
env_var = '-e CPT_SKIP_RECIPE_EXPORT="True"'
run_calls = [call for call in self.runner.calls if "docker run --rm" in call]
if skip_recipe_export:
# Only first call should export recipe
self.assertNotIn(env_var, run_calls[0])
for call in run_calls[1:]:
self.assertIn(env_var, call)
else:
for call in run_calls:
self.assertNotIn(env_var, call)
output = TestBufferConanOutput()
packager = ConanMultiPackager(username="lasote",
channel="mychannel",
gcc_versions=["9"],
archs=["x86", "x86_64"],
build_types=["Release"],
reference="zlib/1.2.11",
use_docker=True,
runner=self.runner,
conan_api=self.conan_api,
ci_manager=self.ci_manager,
out=output.write)
packager.add_common_builds()
packager.run()
_check_run_calls(False)
with tools.environment_append({"CONAN_SKIP_RECIPE_EXPORT": "True"}):
self.runner.reset()
packager = ConanMultiPackager(username="lasote",
channel="mychannel",
gcc_versions=["9"],
archs=["x86", "x86_64"],
build_types=["Release"],
reference="zlib/1.2.11",
use_docker=True,
runner=self.runner,
conan_api=self.conan_api,
ci_manager=self.ci_manager,
out=output.write)
packager.add_common_builds()
packager.run()
_check_run_calls(True)
self.runner.reset()
packager = ConanMultiPackager(username="lasote",
channel="mychannel",
gcc_versions=["9"],
archs=["x86", "x86_64"],
build_types=["Release"],
reference="zlib/1.2.11",
use_docker=True,
runner=self.runner,
conan_api=self.conan_api,
ci_manager=self.ci_manager,
skip_recipe_export=True,
out=output.write)
packager.add_common_builds()
packager.run()
_check_run_calls(True)
def test_lockfile(self):
builder = ConanMultiPackager(username="pepe", channel="testing",
reference="Hello/0.1", password="password",
visual_versions=[], gcc_versions=[],
apple_clang_versions=[],
runner=self.runner,
conan_api=self.conan_api,
remotes="otherurl",
platform_info=platform_mock_for("Darwin"),
lockfile="foobar.lock",
ci_manager=self.ci_manager)
builder.add_common_builds()
builder.run()
self.assertEquals("foobar.lock", self.conan_api.calls[-1].kwargs["lockfile"])
with tools.environment_append({"CONAN_LOCKFILE": "couse.lock"}):
self.conan_api = MockConanAPI()
builder = ConanMultiPackager(username="pepe", channel="testing",
reference="Hello/0.1", password="password",
visual_versions=[], gcc_versions=[],
apple_clang_versions=[],
runner=self.runner,
conan_api=self.conan_api,
remotes="otherurl",
platform_info=platform_mock_for("Darwin"),
build_policy=None,
ci_manager=self.ci_manager)
builder.add_common_builds()
builder.run()
self.assertEquals("couse.lock", self.conan_api.calls[-1].kwargs["lockfile"])
```
|
{
"source": "jeremycryan/7DRL-2021",
"score": 3
}
|
#### File: jeremycryan/7DRL-2021/cue.py
```python
class Cue:
def __init__(self):
pass
def power_to_speed(self, power, ball=None):
# Translate a power (0-100) to the ball's velocity.
# Ball object passed in in case this depends on ball weight.
return power*10
def on_hit(self, ball):
# Apply some effect to ball on hit
pass
class BasicCue(Cue):
pass
```
#### File: jeremycryan/7DRL-2021/main_menu_scene.py
```python
from scene import Scene
import pygame
import constants as c
from button import Button
from level_scene import LevelScene
class MainMenuScene(Scene):
def __init__(self, game):
super().__init__(game)
self.background = pygame.image.load(c.image_path("splash.png"))
self.background = pygame.transform.scale(self.background, c.WINDOW_SIZE)
button_surf = pygame.image.load(c.image_path("start_button.png"))
self.button = Button(button_surf, (c.WINDOW_WIDTH//2, c.WINDOW_HEIGHT - 100), "play gaem", self.on_button_click)
self.is_over = False
self.shade = pygame.Surface(c.WINDOW_SIZE)
self.shade.fill(c.BLACK)
self.shade.set_alpha(0)
self.shade_alpha = 0
self.target_alpha = 0
self.game.player_lives = 3
self.game.player_max_lives = 3
self.game.music_started = None
self.game.current_floor = 1
def next_scene(self):
return LevelScene(self.game)
def on_button_click(self):
self.target_alpha = 255
def draw(self, surface, offset=(0, 0)):
surface.blit(self.background, (0, 0))
self.button.draw(surface, *offset)
self.shade.set_alpha(self.shade_alpha)
surface.blit(self.shade, (0, 0))
def update(self, dt, events):
self.button.update(dt, events)
if self.target_alpha > self.shade_alpha:
self.shade_alpha += dt * 800
if self.shade_alpha > self.target_alpha:
self.shade_alpha = self.target_alpha
self.is_over = True
```
#### File: jeremycryan/7DRL-2021/player.py
```python
import math
import pygame
from ball import Ball
from primitives import Pose
from cue import Cue, BasicCue
import constants as c
from copy import copy
class Player(Ball):
def __init__(self, game, x=0, y=0):
super().__init__(game, x, y)
self.mass *= 1.05
self.color = (255, 255, 0)
self.active_cue = BasicCue()
self.is_player = True
self.has_collided = False
self.collided_with = None
self.first_spawn = True
self.perfect = pygame.image.load(c.image_path("perfect_room.png"))
self.perfect_alpha = 0
def win_perfect(self):
self.perfect_alpha = 255
def load_back_surface(self):
self.back_surface = pygame.image.load(c.image_path("player_back.png"))
def update(self, dt, events):
for event in events:
if event.type == pygame.MOUSEBUTTONUP:
if event.button == 1:
mouse_pose = Pose(pygame.mouse.get_pos(), 0) + self.game.current_scene.camera.pose
my_pose = self.pose.copy() # TODO once camera movement exists, account for it
self.cue_hit(mouse_pose - my_pose)
super().update(dt, events)
current_room = self.game.current_scene.current_room() #TODO make this check fake player in simulation
floor_num = self.game.current_floor
self.perfect_alpha -= 50 * dt
if self.perfect_alpha < 128:
self.perfect_alpha -= 150*dt
if self.is_completely_in_room() and not current_room.enemies_have_spawned:
self.velocity *= 0.03**dt
if not self.game.in_simulation:
if self.is_completely_in_room() and not current_room.enemies_have_spawned and self.game.current_scene.all_balls_below_speed() and current_room.doors_are_open:
# if(floor_num == 1 and self.first_spawn):
# current_room.doors_close()
# current_room.spawn_enemies_first_room()
# current_room.waves_remaining = 3
if(current_room.is_boss_room and floor_num != 1):
current_room.doors_close()
current_room.waves_remaining = 1
current_room.spawn_boss()
else:
current_room.doors_close()
current_room.set_difficulty()
current_room.spawn_enemies()
elif current_room.enemies_have_spawned and not current_room.doors_are_open and self.game.current_scene.no_enemies() and current_room.waves_remaining >0:
if (floor_num == 1 and self.first_spawn):
current_room.spawn_enemies_first_room()
else:
current_room.spawn_enemies()
elif current_room.enemies_have_spawned and not current_room.doors_are_open and self.game.current_scene.no_enemies():
if(self.first_spawn):
self.first_spawn = False
current_room.doors_open()
def take_turn(self):
pass
def cue_hit(self, hit_vector):
# TODO use self.knock, and account for cue type
# self.velocity = hit_vector.copy()
hit_vector *= -1
if self.turn_phase != c.BEFORE_HIT or not self.turn_in_progress:
return
elif self.turn_in_progress:
self.turn_phase = c.AFTER_HIT
angle = math.atan2(-hit_vector.y, hit_vector.x) * 180/math.pi
power = hit_vector.magnitude()*0.55
if power > 110:
power = 110
self.velocity *= 0
self.knock(self.active_cue, angle, power)
def draw_prediction_line(self, screen, offset=(0, 0)):
if self.sunk:
return
if not self.turn_in_progress or not self.turn_phase == c.BEFORE_HIT:
return
self.game.in_simulation = True
player_copy = copy(self)
player_copy.is_simulating = True
player_copy.pose = self.pose.copy()
player_copy.velocity = self.velocity.copy()
player_copy.collide_with_other_ball_2 = player_copy.mock_collision
mouse_pose = Pose(pygame.mouse.get_pos(), 0) + self.game.current_scene.camera.pose
my_pose = self.pose.copy()
player_copy.cue_hit(mouse_pose - my_pose)
traveled = 0
positions = []
velocities = []
old = player_copy.pose.copy()
final_position = None
for i in range(c.SIM_ITERATIONS):
new = player_copy.pose.copy()
traveled += (new - old).magnitude()
old = new
if traveled > c.SIM_MAX_DIST:
break
if(c.VARIABLE_SIM_SPEED):
near_wall = False
if(c.SIM_NEAR_WALL_STEP_REDUCTION != 1):
mapTiles = self.game.current_scene.map.tiles_near(player_copy.pose, player_copy.radius + c.SIM_MOVEMENT);
for mapTile in mapTiles:
if(mapTile.collidable):
near_wall = True
break
if near_wall and player_copy.velocity.magnitude() >3:
sim_update = (c.SIM_MOVEMENT / player_copy.velocity.magnitude() / c.SIM_NEAR_WALL_STEP_REDUCTION)
elif player_copy.velocity.magnitude() > 1:
sim_update = (c.SIM_MOVEMENT/player_copy.velocity.magnitude())
else:
break
sim_update = 1 / c.SIM_MIN_FPS
if(sim_update> 1/1 / c.SIM_MIN_FPS):
sim_update = 1 / c.SIM_MIN_FPS
#mapTiles = self.game.current_scene.map.tiles_near(self.pose, self.radius + );
else:
sim_update = 1 / c.SIM_FPS
player_copy.update(sim_update, [])
positions.append(player_copy.pose.copy())
velocities.append(player_copy.velocity.magnitude())
if player_copy.has_collided:
final_position = player_copy.pose.copy()
break
if player_copy.velocity.magnitude() < 1:
final_position = player_copy.pose.copy()
break
if player_copy.sunk:
break
if len(positions) > 1:
final_direction = positions[-1] - positions[-2]
else:
final_direction = Pose((1, 0), 0)
extra = c.SIM_MAX_DIST - traveled
surf = pygame.Surface((3, 3))
surf.fill(c.BLACK)
pygame.draw.circle(surf, c.WHITE, (surf.get_width()//2, surf.get_width()//2), surf.get_width()//2)
alpha = 255
surf.set_colorkey(c.BLACK)
i = -1
for pose in positions[::1]:
i += 1
circle_diam = max(3, min(7, (velocities[i]/160)))
surf = pygame.Surface((circle_diam, circle_diam))
surf.fill(c.BLACK)
surf.set_colorkey(c.BLACK)
pygame.draw.circle(surf, c.WHITE, (surf.get_width() // 2, surf.get_width() // 2), surf.get_width() // 2)
surf.set_alpha(alpha)
screen.blit(surf, (pose.x + offset[0] - surf.get_width()//2, pose.y + offset[1] - surf.get_width()//2))
offset_pose = Pose(offset, 0)
if player_copy.collided_with:
other = player_copy.collided_with
to_other = other.pose - player_copy.pose
angle = math.degrees(-math.atan2(to_other.y, to_other.x))
pointer = pygame.transform.rotate(self.pointer, angle)
pointer_length = 100
start = other.pose - to_other*(1/to_other.magnitude())*other.radius + offset_pose
end = start + to_other*(1/to_other.magnitude())*pointer_length
pygame.draw.line(screen, c.WHITE, start.get_position(), end.get_position())
screen.blit(pointer, (end.x - pointer.get_width()//2, end.y - pointer.get_height()//2))
if final_position:
final_position += offset_pose
pygame.draw.circle(screen, c.WHITE, final_position.get_position(), player_copy.radius, 2)
elif len(positions) >= 1:
final = positions[-1] + offset_pose
angle = math.degrees(-math.atan2(final_direction.y, final_direction.x))
pointer = pygame.transform.rotate(self.pointer, angle)
end = final + final_direction*(1/(final_direction.magnitude()*extra + 1))
screen.blit(pointer, (end.x - pointer.get_width() // 2, end.y - pointer.get_height() // 2))
self.game.in_simulation = False
def draw(self, screen, offset=(0, 0)):
super().draw(screen, offset=offset)
if self.perfect_alpha > 0:
x = self.pose.x + offset[0] - self.perfect.get_width()//2
y = self.pose.y + offset[1] - self.perfect.get_height() - self.radius - 5
self.perfect.set_alpha(self.perfect_alpha)
self.perfect.set_colorkey(c.BLACK)
screen.blit(self.perfect, (x, y))
def sink_for_real(self):
super().sink_for_real()
self.game.current_scene.player_just_sunk()
def mock_collision(self, other): #ONLY FOR MOCK BALL COLLISIONS
if self.has_collided or other.is_player:
return
self.has_collided = True
self.collided_with = other
collision_normal = self.pose - other.pose
collision_normal_unscaled = collision_normal.copy()
#offset_required = (collision_normal.magnitude() - (self.radius + other.radius) ) / 1.95
#collision_normal.scale_to(1)
#self.pose -= collision_normal * offset_required
#other.pose += collision_normal * offset_required
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
collision_normal.scale_to(1)
velocity_vector = self.velocity.copy()
velocity_vector.scale_to(1)
# self.pose += velocity_vector * (offset_required * math.cos(math.atan2(velocity_vector.y-collision_normal.y, velocity_vector.x-collision_normal.x)))
dot_product_self_norm = collision_normal.x * velocity_vector.x + collision_normal.y * velocity_vector.y;
if((collision_normal.magnitude() * velocity_vector.magnitude()) != 0):
acos_input = dot_product_self_norm / (collision_normal.magnitude() * velocity_vector.magnitude())
if(acos_input>1):
acos_input = 1
if(acos_input<-1):
acos_input = -1
angle_vel = math.acos(acos_input)
else:
angle_vel = 1
angle_b = math.asin((math.sin(angle_vel) / (self.radius + other.radius)) * collision_normal_unscaled.magnitude())
angle_c = math.pi - (angle_b + angle_vel)
if(math.sin(angle_vel)== 0):
angle_vel = 1
interpolated_offset = ((self.radius + other.radius) / math.sin(angle_vel)) * math.sin(angle_c)
# print("OFFSET :" + str(interpolated_offset) + " angle C: " + str(math.degrees(angle_c)) + " angle vel: " + str(math.degrees(angle_vel)))
if(self.velocity.magnitude() + other.velocity.magnitude()) != 0:
self.pose -= velocity_vector * abs(interpolated_offset) * (self.velocity.magnitude()/(self.velocity.magnitude() + other.velocity.magnitude()))
#other.pose += velocity_vector * abs(interpolated_offset) * (other.velocity.magnitude()/(self.velocity.magnitude() + other.velocity.magnitude()))
```
|
{
"source": "jeremycryan/GameOff2020",
"score": 3
}
|
#### File: GameOff2020/src/achievement_row.py
```python
import random
import pygame
from primitives import GameObject, Pose
import constants as c
class AchievementRow(GameObject):
class AchievementPanel(GameObject):
def __init__(self,
game,
container,
surface,
points,
description,
requires=None,
tags=None):
super().__init__(game)
self.container = container
self.surface = pygame.transform.scale(surface,
(c.ACHIEVEMENT_WIDTH,
surface.get_height())).convert()
self.points = points
if c.DOUBLE_POINTS_MOD in self.game.modifications:
self.points *= 2
self.description = description
self.achieved = False
self.tags = [] if tags is None else tags
self.requires = {} if requires is None else requires
self.blink = self.surface.copy()
self.blink.fill(c.WHITE)
self.blink.set_alpha(0)
self.blink_alpha = 0
def update(self, dt, events):
self.blink_alpha -= 300 * dt
if self.blink_alpha < 0:
self.blink_alpha = 0
pass
def ship_can_score(self, ship):
if self.requires.get(c.MOON, False):
if not ship.has_hit_moon:
return False
required_nuggets = self.requires.get(c.NUGGET, 0)
if len(ship.nuggets) < required_nuggets:
return False
return True
def achieve(self, player):
if self.achieved:
return
self.achieved = True
self.game.current_scene.shake(15)
base_color = self.surface.get_at((0, 0))
cover_surf = pygame.Surface((self.surface.get_width() - c.ACHIEVEMENT_POINTS_WIDTH, self.surface.get_height() - 3))
cover_surf.fill(base_color)
self.surface.blit(cover_surf, (c.ACHIEVEMENT_POINTS_WIDTH, 0))
veil_surf = pygame.Surface((c.ACHIEVEMENT_WIDTH, self.surface.get_height()-3))
veil_surf.fill(c.BLACK)
veil_surf.set_alpha(120)
self.surface.blit(veil_surf, (0, 0))
veil_surf.fill(player.color)
veil_surf.set_alpha(70)
self.surface.blit(veil_surf, (0, 0))
font = self.game.small_font if len(player.name) < 15 else self.game.very_small_font
font_render = font.render(player.name[:23].upper(), 0, player.color)
y = self.surface.get_height()//2 - font_render.get_height()//2
x = (c.ACHIEVEMENT_WIDTH - c.ACHIEVEMENT_POINTS_WIDTH)//2 + c.ACHIEVEMENT_POINTS_WIDTH - font_render.get_width()//2
self.game.temp_scores[player.name] = self.game.temp_scores.get(player.name, 0) + self.points
self.surface.blit(font_render, (x, y))
self.blink_alpha = 255
def draw(self, surface, offset=(0, 0)):
shake_offset = self.game.current_scene.apply_screenshake((0, 0))
x = self.container.pose.x + offset[0]
y = self.container.pose.y + offset[1]
surface.blit(self.surface, (x, y))
if self.blink_alpha > 0:
self.blink.set_alpha(self.blink_alpha)
surface.blit(self.blink, (x, y))
def __init__(self, game, top_left_position=(0, 0)):
super().__init__(game)
self.pose = Pose(top_left_position, 0)
self.achievements = self.default_achievements()
self.label = pygame.image.load(c.IMAGE_PATH + "/achievement_box_header.png")
self.label = pygame.transform.scale(self.label, (c.ACHIEVEMENT_LABEL_WIDTH, self.label.get_height()))
self.body = pygame.image.load(c.IMAGE_PATH + "/achievement_box_body.png")
self.body = pygame.transform.scale(self.body,
(c.ACHIEVEMENT_LABEL_WIDTH,
sum([item.surface.get_height() for item in self.achievements]) + 5 * (len(self.achievements) - 1) + 8))
def default_achievements(self):
achievements = [
AchievementRow.AchievementPanel(self.game,
self,
pygame.image.load(c.IMAGE_PATH + "/achievement_1.png"),
1000,
"land on moon",
requires={c.MOON:True, c.NUGGET:0},
tags=[c.MOON_ACH]),
AchievementRow.AchievementPanel(self.game,
self,
pygame.image.load(c.IMAGE_PATH + "/achievement_2.png"),
1500,
"1 thing and land on moon",
requires={c.MOON:True, c.NUGGET:1},
tags=[c.MOON_1_NUGGET_ACH]),
AchievementRow.AchievementPanel(self.game,
self,
pygame.image.load(c.IMAGE_PATH + "/achievement_3.png"),
2500,
"2 things and land on moon",
requires={c.MOON:True, c.NUGGET:2},
tags=[c.MOON_2_NUGGET_ACH])
]
return achievements
def update(self, dt, events):
for item in self.achievements:
item.update(dt, events)
def get_height(self):
return self.label.get_height() + self.body.get_height()
def draw_box(self, surface, offset=(0, 0)):
x = self.pose.x + offset[0]
y = self.pose.y + offset[1]
surface.blit(self.label, (x, y))
y += self.label.get_height()
surface.blit(self.body, (x, y))
return y - self.pose.y
def draw(self, surface, offset=(0, 0)):
x = self.pose.x + c.SIDE_PANEL_WIDTH//2 - c.ACHIEVEMENT_WIDTH//2
y = self.draw_box(surface, offset)
#surface.blit(self.label, (x, y))
for item in self.achievements:
item.draw(surface, (x, y))
y += item.surface.get_height() + 5
def score_ship(self, ship):
for achievement in self.achievements:
if achievement.ship_can_score(ship):
achievement.achieve(ship.player)
def all_scored(self):
return all([item.achieved for item in self.achievements])
```
#### File: GameOff2020/src/death_particle.py
```python
import random
import pygame
from particle import Particle
import constants as c
from primitives import Pose
class DeathParticle(Particle):
def __init__(self, game, ship):
super().__init__(game)
self.ship = ship
self.pose = ship.pose.copy()
self.velocity = Pose(((random.random() * 2 - 1) * 160,
(random.random() * 2 - 1) * 160),
random.random() * 360) + self.ship.velocity * 0.1
self.start_radius = 40 + random.random()*30
self.duration = 0.6
def get_scale(self):
return 1 - self.through(loading=2.5)
def get_alpha(self):
return 255 * (1 - self.through(loading=1))
def update(self, dt, events):
super().update(dt, events)
self.pose += self.velocity * dt
def draw(self, surface, offset=(0, 0)):
radius = int(self.start_radius * self.get_scale())
surf = pygame.Surface((radius*2, radius*2))
surf.fill(c.BLACK)
surf.set_colorkey(c.BLACK)
pygame.draw.circle(surf, self.ship.player.color, (radius, radius), radius)
x = self.pose.x - offset[0] - surf.get_width()//2
y = self.pose.y - offset[1] - surf.get_height()//2
surf.set_alpha(self.get_alpha())
surface.blit(surf, (x, y))
```
#### File: GameOff2020/src/exhaust_particle.py
```python
import pygame
from particle import Particle
from primitives import Pose
import constants as c
class ExhaustParticle(Particle):
def __init__(self, game, ship):
super().__init__(game)
self.ship = ship
size = 18
if c.DOUBLE_THRUST_MOD in self.game.modifications:
size *= 1.5
self.surface = pygame.Surface((size, size))
self.surface.fill(c.BLACK)
self.surface.set_colorkey(c.BLACK)
pygame.draw.circle(self.surface,
ship.player.color,
(size//2, size//2),
size//2)
self.position = ship.pose.copy()
self.position.add_pose(Pose((-20, 0), 0), 1, self.position)
self.thrust = ship.thrust.copy()
self.thrust_mag = self.thrust.magnitude()
self.thrust.scale_to(-100)
self.duration = 0.4
self.intensity = 1 - (1 - self.thrust_mag/100/c.THRUST)**3
def get_alpha(self):
return (255 - self.through() * 255)*self.intensity
def get_scale(self):
return (1 - self.through())*self.intensity
def update(self, dt, events):
super().update(dt, events)
rotated = self.thrust.copy()
rotated.rotate_position(self.position.angle)
self.position += rotated * dt * 3 * self.intensity
self.position += self.ship.velocity * dt
def draw(self, surface, offset=(0, 0)):
x, y = self.position.x, self.position.y
scale = self.get_scale()
x += offset[0] - self.surface.get_width() * scale/2
y += offset[1] - self.surface.get_width() * scale/2
w = int(self.surface.get_width()*scale)
h = int(self.surface.get_height()*scale)
surf_to_blit = pygame.transform.scale(self.surface, (w, h))
surf_to_blit.set_alpha(self.get_alpha())
surface.blit(surf_to_blit, (x, y))
```
#### File: GameOff2020/src/high_score_scene.py
```python
import pygame
import constants as c
from scene import Scene
from level_scene import LevelScene
from high_score_table import HighScoreTable
from transition_gui import TransitionGui
class HighScoreScene(Scene):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for player in self.game.players:
if player not in [item.name for item in self.game.scoreboard.scores]:
self.game.scoreboard.add_score(player, 0)
self.board_offset = -c.WINDOW_HEIGHT
self.table = HighScoreTable(self.game)
self.table_all = HighScoreTable(self.game, hours_to_display=10**9)
self.table.pose.x = c.WINDOW_WIDTH * 0.3
self.table_all.pose.x = self.table.pose.x
self.age = 0
self.shade = pygame.Surface(c.WINDOW_SIZE)
self.shade.fill(c.BLACK)
self.shade_alpha = 255
self.scene_over = False
self.side_gui = TransitionGui(self.game)
pygame.mixer.music.set_volume(0.25)
def next_scene(self):
pygame.mixer.music.set_volume(1.0)
return LevelScene(self.game)
def update(self, dt, events):
self.age += dt
if self.age > 25 and self.board_offset < 0:
speed = 4
d = abs(self.board_offset)
self.board_offset += min(d * dt * speed, c.WINDOW_HEIGHT*dt*2)
if self.board_offset > 0:
self.board_offset = 0
for event in events:
if event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN:
self.scene_over = True
if self.side_gui.countdown_over():
self.scene_over = True
self.table.update(dt, events)
self.table_all.update(dt, events)
self.side_gui.update(dt, events)
for message in self.game.stream.queue_flush():
if message.text.lower() == '!recolor':
if message.user in self.game.players:
self.game.players[message.user].recolor()
self.game.recolor_flag(message.user)
elif message.text.lower() == '!score':
board = self.game.scoreboard.get_total_by_player(c.SCORE_EXPIRATION)
if message.user in board:
score = self.game.scoreboard.get_total_by_player(c.SCORE_EXPIRATION)[message.user].score
self.game.alertManager.alert("Your score is "+str(score), message.user)
else:
self.game.alertManager.alert("You have not played in the last " + str(c.SCORE_EXPIRATION) + " hours", message.user)
elif message.text.lower()[:5] == "!vote":
split = message.text.lower().split()
if len(split) != 2:
self.game.alertManager.alert("Invalid number of arguments for !vote", message.user)
continue
player_name = message.user
argument = split[1]
self.game.current_scene.side_gui.vote(player_name, argument)
speed = 800
if self.scene_over:
self.shade_alpha += speed*dt
else:
self.shade_alpha -= speed*dt
self.shade_alpha = max(0, min(255, self.shade_alpha))
if self.scene_over and self.shade_alpha == 255:
self.is_running = False
def draw(self, surface, offset=(0, 0)):
surface.fill(c.BLACK)
surface.blit(self.table.background_surface, (0, 0))
self.table.draw(surface, (offset[0], offset[1] + self.board_offset + c.WINDOW_HEIGHT))
self.table_all.draw(surface, (offset[0], offset[1] + self.board_offset))
self.side_gui.draw(surface, offset)
if self.shade_alpha > 0:
self.shade.set_alpha(self.shade_alpha)
surface.blit(self.shade, (0, 0))
```
#### File: GameOff2020/src/scene.py
```python
import pygame
import constants as c
from primitives import GameObject
class Scene(GameObject):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.is_running = True
def main(self):
lag = 0
fps_queue = []
max_ticks_per_render = 5
while self.is_running:
dt, events = self.game.update_globals()
lag += dt
ticks_this_render = 0
while lag > c.TICK_LENGTH:
lag -= c.TICK_LENGTH
self.update(c.TICK_LENGTH, events)
ticks_this_render += 1
if ticks_this_render >= max_ticks_per_render:
lag = 0
break
self.draw(self.game.screen)
fps_queue.append(1/dt)
if len(fps_queue) > 20:
self.game.fps = fps_queue + self.game.fps[:40]
fps_queue = []
self.game.update_screen()
def next_scene(self):
raise NotImplementedError()
```
#### File: GameOff2020/src/ship.py
```python
import random
import pygame
import constants as c
from primitives import PhysicsObject, Pose
from player import Player
from exhaust_particle import ExhaustParticle
from explosion import Explosion
from death_particle import DeathParticle
from wormhole_explosion import WormholeExplosion
class Ship(PhysicsObject):
def __init__(self, game, program_string, player, position=(0, 0), angle=90):
super().__init__(game, position, angle)
self.program_string = program_string
self.program, info = self.parse_program(program_string)
self.player = player
self.age = 0
self.thrust = Pose((0,0), 0)
self.commandIndex = 0
self.delay = 0
self.destroyed = False
self.surface = self.get_surface()
self.since_exhaust = 0
self.radius = 10
self.label = self.game.player_label_font.render(self.player.name,
0,
self.player.color)
self.label_back = pygame.Surface((self.label.get_width() + 10,
self.label.get_height() + 10))
self.label_back.fill(c.BLACK)
self.label_back.set_alpha(100)
self.label_offset = Pose((0, -35), 0)
self.label_pose = self.pose - self.label_offset
self.way_surf = pygame.image.load(c.IMAGE_PATH + "/small_waypoint.png").convert()
h = self.label_back.get_height()
self.way_surf = pygame.transform.scale(self.way_surf, (h-2, h-2))
tint = self.way_surf.copy()
tint.fill(self.player.color)
self.way_surf.blit(tint, (0, 0), special_flags=pygame.BLEND_MULT)
self.way_surf.set_colorkey(self.way_surf.get_at((0, 0)))
self.way_back_surf = pygame.Surface((self.way_surf.get_width() + 5,self.label_back.get_height()))
self.way_back_surf.fill(c.BLACK)
self.way_back_surf.set_alpha(100)
self.scale = 0
self.target_scale = 1
self.nuggets = set()
self.has_hit_moon = False
if self.player.name in self.game.player_flags:
self.flag_surf = self.game.player_flags[self.player.name]
else:
self.flag_surf = pygame.image.load(c.IMAGE_PATH + "/flag.png").convert()
tint = pygame.Surface((self.flag_surf.get_width(), self.flag_surf.get_height()))
tint.fill(self.player.color)
self.flag_surf.blit(tint, (0, 0), special_flags=pygame.BLEND_MULT)
self.flag_surf.set_colorkey(self.flag_surf.get_at((0, 0)))
self.game.player_flags[self.player.name] = self.flag_surf
self.frozen_for = 0
self.last = False
def freeze(self, amt):
self.frozen_for = amt
self.game.current_scene.particles.add(WormholeExplosion(self.game, self))
def is_frozen(self):
frozen = self.frozen_for > 0
if not frozen and self.last:
self.game.current_scene.particles.add(WormholeExplosion(self.game, self))
self.last = frozen
return frozen
def get_surface(self):
surface = pygame.image.load(c.IMAGE_PATH + "/ship.png").convert()
color_surf = pygame.Surface((surface.get_width(), surface.get_height()))
color_surf.fill(self.player.color)
surface.blit(color_surf, (0, 0), special_flags=pygame.BLEND_MULT)
surface.set_colorkey(surface.get_at((surface.get_width()-1, surface.get_height()-1)))
return surface
def destroy(self):
self.destroyed = True
self.game.current_scene.particles.add(Explosion(self.game, self))
for i in range(8):
self.game.current_scene.particles.add(DeathParticle(self.game, self))
self.game.current_scene.shake(20)
self.game.ship_destroy_sound.play()
def update(self, dt, events):
self.age += dt
self.frozen_for -= dt
if self.is_frozen():
return
super().update(dt, events)
self.since_exhaust += dt
exhaust_period = 0.05
if self.since_exhaust > exhaust_period:
self.since_exhaust -= exhaust_period
self.game.current_scene.particles.add(ExhaustParticle(self.game, self))
if self.delay > 0:
self.delay = max(0, self.delay-dt)
self.runCommands(dt)
self.acceleration.clear()
if c.SOLAR_WIND in self.game.modifications:
if not hasattr(self.game, "solar_wind_direction"):
self.game.solar_wind_direction = random.choice((c.UP, c.DOWN, c.LEFT, c.RIGHT))
wind_accel = Pose((self.game.solar_wind_direction), 0)
self.acceleration += wind_accel * c.WIND_STRENGTH
self.acceleration.add_pose(self.thrust, 1, frame=self.pose)
if c.DOUBLE_THRUST_MOD in self.game.modifications:
self.acceleration.add_pose(self.thrust, 1, frame=self.pose)
for planet in self.game.current_scene.planets:
self.acceleration.add_pose(planet.get_acceleration(self))
for nugget in self.game.current_scene.nuggets:
nugget.test_collision(self)
ds = self.target_scale - self.scale
if ds < 0.01:
self.scale = self.target_scale
self.scale += ds * dt * 5
if self.pose.y < 120:
self.label_offset = Pose((0, 35), 0)
if self.pose.y > 150:
self.label_offset = Pose((0, -35), 0)
dl = self.pose - (self.label_pose - self.label_offset)
self.label_pose += dl * dt * 12
if self.pose.x < 0 or self.pose.x > c.LEVEL_WIDTH or \
self.pose.y < 0 or self.pose.y > c.LEVEL_HEIGHT:
self.destroy()
def runCommands(self, dt):
while self.delay <= 0 and self.commandIndex < len(self.program):
command = self.program[self.commandIndex]
if command[0] == 'd': # delay
self.delay += command[1]/1000
if command[0] == 't': # thrust
self.thrust = Pose((command[1]*c.THRUST, 0), 0)
if command[0] == 'r': # rotate
self.velocity.set_angle(command[1])
self.commandIndex += 1
def recolor(self):
self.surface = self.get_surface()
self.label = self.game.player_label_font.render(self.player.name, 0, self.player.color)
self.way_surf = pygame.image.load(c.IMAGE_PATH + "/small_waypoint.png").convert()
h = self.label_back.get_height()
self.way_surf = pygame.transform.scale(self.way_surf, (h-2, h-2))
tint = self.way_surf.copy()
tint.fill(self.player.color)
self.way_surf.blit(tint, (0, 0), special_flags = pygame.BLEND_MULT)
self.way_surf.set_colorkey(self.way_surf.get_at((0, 0)))
self.way_back_surf = pygame.Surface((self.way_surf.get_width() + 5,self.label_back.get_height()))
self.way_back_surf.fill(c.BLACK)
self.way_back_surf.set_alpha(100)
def draw(self, surface, offset=(0, 0)):
if self.destroyed:
return
if self.label_pose.x < self.label_back.get_width()//2 + 10:
self.label_pose.x = self.label_back.get_width()//2 + 10
if self.label_pose.x > c.LEVEL_WIDTH - self.label_back.get_width()//2 - 10:
self.label_pose.x = c.LEVEL_WIDTH - self.label_back.get_width()//2 - 10
x = self.label_pose.x + offset[0] - self.label_back.get_width()//2 - len(self.nuggets) * self.way_back_surf.get_width()//2
y = self.label_pose.y + offset[1] - self.label_back.get_height()//2
if not self.is_frozen():
surface.blit(self.label_back, (x, y))
x += self.label_back.get_width()
if not self.is_frozen():
for item in self.nuggets:
surface.blit(self.way_back_surf, (x, y))
surface.blit(self.way_surf, (x, y+1))
x += self.way_back_surf.get_width()
x = self.label_pose.x + offset[0] - self.label.get_width()//2 - len(self.nuggets) * self.way_back_surf.get_width()//2
y = self.label_pose.y + offset[1] - self.label.get_height()//2
if not self.is_frozen():
surface.blit(self.label, (x, y))
if self.scale == 0:
return
ship_surf = pygame.transform.scale(self.surface,
(int(self.surface.get_width() * self.scale),
int(self.surface.get_height() * self.scale)))
ship_surf = pygame.transform.rotate(ship_surf, self.pose.angle)
x = self.pose.x + offset[0] - ship_surf.get_width()//2
y = self.pose.y + offset[1] - ship_surf.get_height()//2
surface.blit(ship_surf, (x, y))
@staticmethod
def parse_program(program_string):
program_string = program_string[1:].lower().strip() + 'A'
program = []
arguments = []
key = ''
number = ''
isNumber = False
for char in program_string:
if char == '.':
print("Decimals not permitted")
return [], "Decimals not permitted"
elif char.isnumeric() or char == '-':
isNumber = True
number += char
elif char.isalnum():
# terminate previous number
if (len(number) == 1 or number[1:].isnumeric()) and \
(number[0].isdigit() or number[0] == '-') and \
(number != "-"):
arguments.append(int(number))
number = ''
elif number != '':
print('Invalid number, "' + number + '"')
return [], 'Invalid number, "' + number + '"'
# terminate previous command
if isNumber or char == 'A':
if key in c.COMMANDS.values():
command = key
elif key in c.COMMANDS:
command = c.COMMANDS[key]
else:
print('Invalid command, "' + key + '"')
return [], 'Invalid command, "' + key + '"'
if len(arguments) != len(c.COMMANDS_MIN[command]):
print("Invalid number of arguments for " + c.COMMANDS_LONG[command])
return [], "Invalid number of arguments for " + c.COMMANDS_LONG[command]
for i, arg in enumerate(arguments):
if arg < c.COMMANDS_MIN[command][i]:
print(c.COMMANDS_LONG[command] + " was smaller than minimum value")
return [], c.COMMANDS_LONG[command] + " was smaller than minimum value"
if arg > c.COMMANDS_MAX[command][i]:
print(c.COMMANDS_LONG[command] + " was greater than maximum value")
return [], c.COMMANDS_LONG[command] + " was greater than maximum value"
program.append((command, *arguments))
key = ''
arguments = []
isNumber = False
key += char
elif char in " ,;":
isNumber = True
if number[1:].isnumeric() and \
(number[0].isdigit() or number[0] == '-'):
arguments.append(int(number))
number = ''
else:
print('Invalid character, "' + char + '"')
return [], 'Invalid character, "' + char + '"'
return program, None
if __name__ == '__main__':
Ship.parse_program("t100t120 t100")
```
#### File: GameOff2020/src/start_scene.py
```python
import pygame
from scene import Scene
import constants as c
class StartScene(Scene):
""" Display a black screen with a white circle for two seconds. """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.age = 0
def update(self, dt, events):
self.age += dt
if self.age > 2:
self.is_running = False
def draw(self, surf, offset=(0, 0)):
surf.fill(c.BLACK)
pygame.draw.circle(surf,
c.WHITE,
(c.WINDOW_WIDTH//2, c.WINDOW_HEIGHT//2),
c.WINDOW_HEIGHT//4)
def next_scene(self):
return None
```
#### File: GameOff2020/src/transition_gui.py
```python
import math
import random
import pygame
from primitives import GameObject, Pose
from planet import Planet
import constants as c
class AlertBox(GameObject):
def __init__(self, game, position, header, message, side_surface=None):
super().__init__(game)
self.age = 0
self.pose = Pose(position, 0)
self.header = header
self.message = message
self.side_surface = side_surface
self.generate_colors()
self.load_surfs()
self.max_width = self.top_surf.get_width() - c.ALERT_SIDE_PADDING * 2
if self.side_surface is not None:
self.max_width -= self.side_surface.get_width() + c.ALERT_SIDE_PADDING
self.header_surf = self.get_header_surface()
self.message_surface = self.get_message_surface()
def generate_colors(self):
self.header_color = (255, 200, 200)
self.body_color = (190, 160, 160)
def get_header_surface(self):
render = self.game.alert_header_font.render(self.header, 1, self.header_color)
background = pygame.transform.scale(self.middle_surf,
(self.middle_surf.get_width(),
render.get_height() + 8)).convert()
x = background.get_width()//2 - render.get_width()//2
if self.side_surface is not None:
x += (self.side_surface.get_width() + c.ALERT_SIDE_PADDING)//2
background.blit(render, (x, 0))
return background
def get_message_surface(self):
message_surfaces = []
message_lines = self.message.split("\n")
for line in message_lines:
message_words = line.split()
this_line = []
this_width = 0
for word in message_words:
surface = self.game.alert_body_font.render(word, 1, self.body_color)
if this_width + surface.get_width() > self.max_width:
message_surfaces.append(this_line)
this_line = []
this_width = 0
this_line.append(surface)
this_width += surface.get_width() + c.ALERT_BODY_SPACE
message_surfaces.append(this_line)
total_height = c.ALERT_LINE_SPACING*(len(message_surfaces))
if self.side_surface is not None and total_height < self.side_surface.get_height() - self.header_surf.get_height():
total_height = self.side_surface.get_height() - self.header_surf.get_height()
background = pygame.transform.scale(self.middle_surf,
(self.middle_surf.get_width(),
total_height)).convert()
y = 0
for line in message_surfaces:
line_width = sum([item.get_width() + c.ALERT_BODY_SPACE for item in line]) - c.ALERT_BODY_SPACE
x = background.get_width()//2 - line_width//2
if self.side_surface is not None:
x += self.side_surface.get_width()//2 + c.ALERT_SIDE_PADDING//2
for word in line:
background.blit(word, (x, y))
x += word.get_width() + c.ALERT_BODY_SPACE
y += c.ALERT_LINE_SPACING
return background
def load_surfs(self):
self.top_surf = pygame.image.load(c.IMAGE_PATH + "/red_alert_box_top.png")
self.middle_surf = pygame.image.load(c.IMAGE_PATH + "/red_alert_box_middle.png")
self.bottom_surf = pygame.image.load(c.IMAGE_PATH + "/red_alert_box_bottom.png")
def draw(self, surface, offset=(0, 0)):
surfaces = [self.top_surf, self.header_surf, self.message_surface, self.bottom_surf]
x = self.pose.x - self.top_surf.get_width()//2 + offset[0]
y = self.pose.y - sum([item.get_height() for item in surfaces])//2 + offset[1] + 4 * math.sin(self.age * 2)
y0 = y
for piece in surfaces:
surface.blit(piece, (x, y))
y += piece.get_height()
if self.side_surface is not None:
surface.blit(self.side_surface,
(x + c.ALERT_SIDE_PADDING,
y0 + self.top_surf.get_height()
+ self.header_surf.get_height()//2
+ self.message_surface.get_height()//2
- self.side_surface.get_height()//2))
def update(self, dt, events):
self.age += dt
class GreenAlertBox(AlertBox):
def generate_colors(self):
self.header_color = (200, 230, 205)
self.body_color = (150, 180, 160)
def load_surfs(self):
self.top_surf = pygame.image.load(c.IMAGE_PATH + "/green_alert_box_top.png")
self.middle_surf = pygame.image.load(c.IMAGE_PATH + "/green_alert_box_middle.png")
self.bottom_surf = pygame.image.load(c.IMAGE_PATH + "/green_alert_box_bottom.png")
class PlayerMultiplierAlertBox(AlertBox):
def __init__(self, game, position, header, message):
self.background_color = (68, 35, 48)
self.game = game
self.generate_colors()
side_surface = self.generate_multiplier_surface()
super().__init__(game, position, header, message, side_surface=side_surface)
self.age += 2
def generate_multiplier_surface(self):
text = f"x{self.game.player_multiplier()}"
render = self.game.alert_large_font.render(text, 1, self.header_color)
surface = pygame.Surface((render.get_width(), 70))
surface.fill(self.background_color)
surface.blit(render,
(surface.get_width()//2 - render.get_width()//2,
surface.get_height()//2 - render.get_height()//2))
return surface
class VotingObject(GameObject):
def __init__(self, game, parent, position, strings):
super().__init__(game)
self.parent = parent
self.pose = Pose(position, 0)
self.option_keys = c.OPTION_A, c.OPTION_B
self.option_strings = {self.option_keys[i]: strings[i] for i in range(len(self.option_keys))}
self.votes = {option:set() for option in self.option_keys}
self.planet_dict = {option:Planet(self.game, (0, 0), radius=75, surf_det_size=50+i) for i, option in enumerate(self.option_keys)}
self.color_dict = {c.OPTION_A:(255, 225, 200), c.OPTION_B:(200, 210, 255)}
self.label_dict = {option_key:self.get_label_surf(option_key) for option_key in self.option_keys}
self.cover = pygame.Surface((150, 150))
self.cover.fill(c.WHITE)
pygame.draw.circle(self.cover, c.BLACK, (self.cover.get_width()//2, self.cover.get_height()//2), self.cover.get_width()//2)
self.cover.set_colorkey(c.WHITE)
self.cover.set_alpha(80)
self.not_picked_cover = self.cover.copy()
self.not_picked_cover.set_alpha(128)
self.picked = None
self.since_vote = {option:999 for option in self.option_keys}
self.vfam = pygame.image.load(c.IMAGE_PATH + "/vote_for_a_modifier.png")
def vote(self, player_name, option):
option = option.upper()
if option not in self.option_keys:
return 0
for vote_option in self.votes:
cur_votes = self.votes[vote_option]
if player_name in cur_votes:
cur_votes.remove(player_name)
self.votes[option].add(player_name)
self.since_vote[option] = 0
for option in self.label_dict:
self.label_dict[option] = self.get_label_surf(option)
return 1
def get_label_surf(self, option):
text = f"!vote {option}"
color = self.color_dict[option]
render = self.game.alert_header_font.render(text, 1, color)
count_text = str(len(self.votes[option]))
count_render = self.game.alert_body_font.render(count_text, 1, color)
background = pygame.image.load(c.IMAGE_PATH + "/vote_label_background.png").convert()
tint = pygame.Surface((background.get_width(), background.get_height()))
tint.fill(color)
tint.set_alpha(10)
background.blit(tint, (0, 0), special_flags=pygame.BLEND_MULT)
background.set_colorkey(background.get_at((0, 0)))
background.blit(render,
(background.get_width()//2 - render.get_width()//2,
background.get_height()//2 - render.get_height()))
background.blit(count_render,
(background.get_width()//2 - count_render.get_width()//2,
background.get_height()//2))
return background
def determine_winner(self):
option_a_score = len(self.votes[c.OPTION_A])
option_b_score = len(self.votes[c.OPTION_B])
if option_a_score > option_b_score:
self.picked = c.OPTION_A
elif option_a_score < option_b_score:
self.picked = c.OPTION_B
else:
self.picked = random.choice([c.OPTION_A, c.OPTION_B])
modification = self.option_strings[self.picked]
self.game.modifications.append(modification)
if modification is c.SOLAR_WIND:
self.game.solar_wind_direction = random.choice([c.UP, c.DOWN, c.LEFT, c.RIGHT])
def draw_option(self, option_key, surface, offset=(0, 0)):
planet = self.planet_dict[option_key]
x = offset[0]
y = offset[1]
planet.pose.x = x
planet.pose.y = y
planet.align_graphic_pose()
planet.draw(surface)
surface.blit(self.cover, (x - self.cover.get_width()//2, y - self.cover.get_height()//2))
texts = [self.game.voting_planet_font.render(text, 0, c.BLACK) for text in self.option_strings[option_key].split()]
backs = [pygame.Surface((text.get_width(), text.get_height())) for text in texts]
for back in backs:
back.fill(c.MAGENTA)
for i, text in enumerate(texts):
texts[i] = backs[i]
texts[i].blit(text, (0, 0))
texts[i].set_colorkey(c.MAGENTA)
texts[i].set_alpha(90)
white_texts = [self.game.voting_planet_font.render(text, 1, c.WHITE) for text in self.option_strings[option_key].split()]
total_height = sum([text.get_height() for text in texts])
y -= total_height//2
for white, black in zip(white_texts, texts):
for offset in c.TEXT_BLIT_OFFSETS:
surface.blit(black, (x - black.get_width()//2 + offset[0], y + offset[1]))
surface.blit(white, (x - white.get_width()//2, y - 1))
y += black.get_height()
if self.picked is not None and self.picked is not option_key:
surface.blit(self.not_picked_cover, (planet.pose.x - self.cover.get_width()//2, planet.pose.y - self.cover.get_height()//2))
#if self.picked is None:
y = planet.pose.y
label = self.label_dict[option_key]
label_scale = min(1, self.since_vote[option_key]*1.5 + 0.7)
if self.picked is not None and self.picked != option_key:
label_scale = max(0, 1 + self.time_left()*3)
if label_scale != 0:
label = pygame.transform.scale(label,
(int(label.get_width() * label_scale),
int(label.get_height() * label_scale)))
surface.blit(label, (x - label.get_width()//2, y + 95 - label.get_height()//2))
def time_left(self):
return self.parent.countdown.duration - 5
def draw(self, surface, offset):
x = self.pose.x + offset[0]
y = self.pose.y + offset[1]
dist_apart = 200
self.draw_option(self.option_keys[0], surface, offset=(x-dist_apart//2, y))
self.draw_option(self.option_keys[1], surface, offset=(x+dist_apart//2, y))
surface.blit(self.vfam, (x - self.vfam.get_width()//2, y - c.WINDOW_HEIGHT*0.17))
def update(self, dt, events):
for key in self.planet_dict:
self.planet_dict[key].update(dt, events)
for option in self.since_vote:
self.since_vote[option] += dt
if self.time_left() <= 0 and self.picked is None:
self.determine_winner()
class Countdown(GameObject):
def __init__(self, game, position):
super().__init__(game)
self.duration = 50.999 # seconds
self.pose = Pose(position, 0)
self.color = (100, 110, 135)
def update(self, dt, events):
self.duration -= dt
def over(self):
return self.duration < 0
def to_string(self):
if self.over():
return "0"
else:
return f"{int(self.duration)}"
def draw(self, surface, offset=(0, 0)):
text_surf = self.game.scoreboard_font.render("Next round in ", 1, self.color)
surf = self.game.small_timer_font.render(self.to_string(), 1, self.color)
width = text_surf.get_width() + surf.get_width()
x = self.pose.x + offset[0] - width//2
y = self.pose.y + offset[1]
scale = 0.6 + abs(math.sin(self.duration * math.pi)) * 0.4
scale = 1 - (1 - scale)**1.5
if self.duration < 0:
scale = max(0, 0.7 + self.duration)
scaled_surf = pygame.transform.scale(surf, (int(surf.get_width() * scale), int(surf.get_height() * scale)))
surface.blit(scaled_surf,
(x + text_surf.get_width() + surf.get_width()//2 - scaled_surf.get_width()//2,
y - scaled_surf.get_height()//2))
surface.blit(text_surf, (x, y - text_surf.get_height()//2))
class TransitionGui(GameObject):
def __init__(self, game):
super().__init__(game)
self.age = 0
self.width = c.WINDOW_WIDTH - c.SCORE_TABLE_WIDTH
self.height = c.WINDOW_HEIGHT
self.pose = Pose((c.SCORE_TABLE_WIDTH + self.width//2, c.WINDOW_HEIGHT//2), 0)
self.objects = []
self.background = pygame.image.load(c.IMAGE_PATH + "/trans_gui_back.png")
self.background = pygame.transform.scale(self.background, (self.width, self.height))
self.add_tip_box()
self.add_player_mult_box()
self.objects.append(Countdown(self.game, (0, c.WINDOW_HEIGHT*0.44)))
self.countdown = self.objects[-1]
mod_options = random.sample(c.MODIFICATIONS, 2)
self.voting = VotingObject(self.game, self, (0, 0), mod_options)
self.objects.append(self.voting)
def countdown_over(self):
return self.countdown.over()
def add_tip_box(self):
position = 0, c.WINDOW_HEIGHT*0.30
header = "Helpful hint"
body = random.choice(c.HINTS)
#ss = pygame.image.load(c.IMAGE_PATH + "/bang.png")
self.objects.append(GreenAlertBox(self.game, position, header, body))
def add_player_mult_box(self):
position = 0, -c.WINDOW_HEIGHT * 0.35
header = "Player party multiplier"
if self.game.player_multiplier() == 0:
choices = c.MULT_0_MESSAGES
else:
choices = c.MULT_MESSAGES
body = random.choice(choices).replace("{num}", str(self.game.number_of_players_last_round()))
self.objects.append(PlayerMultiplierAlertBox(self.game, position, header, body))
def vote(self, player, option):
if self.voting.picked is None:
self.game.vote_sound.play()
return self.voting.vote(player, option)
def update(self, dt, events):
self.age += dt
for item in self.objects:
item.update(dt, events)
def draw(self, surface, offset=(0, 0)):
xoff = offset[0] + self.pose.x
yoff = offset[1] + self.pose.y
surface.blit(self.background, (xoff - self.width//2, yoff - self.height//2))
for item in self.objects:
item.draw(surface, (xoff, yoff))
```
|
{
"source": "jeremycryan/GGJ2022",
"score": 3
}
|
#### File: jeremycryan/GGJ2022/game.py
```python
import pygame
import sys
import math
import constants as c
from level_scene import LevelScene
from title_scene import TitleScene
class Game:
def __init__(self):
pygame.init()
self.music = pygame.mixer.Sound(c.sound_path("birdsong.ogg"))
self.music.set_volume(0.7)
self.music.play(-1)
self.place_bird_sound = pygame.mixer.Sound(c.sound_path("place_bird.wav"))
self.place_bird_sound.set_volume(0.1)
self.pick_up_bird_sound = pygame.mixer.Sound(c.sound_path("pick_up_bird.wav"))
self.pick_up_bird_sound.set_volume(0.13)
self.talk_sound = pygame.mixer.Sound(c.sound_path("pigeonhole_talk.wav"))
self.talk_sound.set_volume(0.02)
self.advance_dialogue_noise = pygame.mixer.Sound(c.sound_path("advance_dialog.wav"))
self.advance_dialogue_noise.set_volume(0.1)
self.try_again_noise = pygame.mixer.Sound(c.sound_path("try_again.wav"))
self.try_again_noise.set_volume(0.1)
self.change_bird_type = pygame.mixer.Sound(c.sound_path("change_bird_type.wav"))
self.change_bird_type.set_volume(0.04)
self.victory_sound = pygame.mixer.Sound(c.sound_path("victory.wav"))
self.victory_sound.set_volume(0.2)
self.screen = pygame.display.set_mode(c.WINDOW_SIZE)
pygame.display.set_caption(c.TITLE)
self.clock = pygame.time.Clock()
self.since_shake = 0
self.shake_amt = 0
self.seen_rocks = False
self.main()
def shake(self, amt=15):
self.shake_amt = max(amt, self.shake_amt)
self.since_shake = 0
def get_shake_offset(self):
xoff = math.cos(self.since_shake*40) * self.shake_amt
yoff = math.sin(self.since_shake*25) * self.shake_amt
return (xoff, yoff)
def main(self):
self.clock.tick(c.FPS)
levels = [
"basic_2x2.yaml",
"basic_3x3.yaml",
"basic_4x4.yaml",
"stairs_4x4.yaml",
"circle_4x4.yaml",
"hourglass_4x4.yaml",
"fish_4x4.yaml",
"turtles_3x3.yaml",
"turtles_4x4.yaml",
"turtles_5x5.yaml",
"basic_5x5.yaml",
"raven_5x5.yaml",
"raven_6x6.yaml",
"basic_6x6.yaml",
"test_level.yaml",
]
self.level = TitleScene(self)
while True:
dt = self.clock.tick(c.FPS)/1000
dt, events = self.get_events(dt)
self.level.update(dt, events)
self.level.draw(self.screen, self.get_shake_offset())
if self.level.done:
if self.level.level_path == "basic_4x4.yaml":
solved = self.level.grid.get_4x4_type()
if solved and solved in levels:
levels.remove(solved)
self.level = LevelScene(self, levels.pop(0))
pygame.display.flip()
def get_events(self, dt):
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
self.since_shake += dt
self.shake_amt *= 0.005**dt
self.shake_amt -= 10*dt
self.shake_amt = max(0, self.shake_amt)
return dt, events
if __name__=="__main__":
Game()
```
#### File: jeremycryan/GGJ2022/principal.py
```python
import pygame
import constants as c
class Principal:
def __init__(self, lines, game):
self.lines = lines
self.current_line = 0
self.time_on_current_line = 0
self.game = game
self.age = 0
self.gradient = pygame.image.load(c.image_path("gradient_2.png"))
self.gradient = pygame.transform.scale(self.gradient, (c.WINDOW_WIDTH, self.gradient.get_height()))
self.label = pygame.image.load(c.image_path("pigeonhole_label.png"))
self.principal = pygame.image.load(c.image_path("principal_pigeonhole.png"))
self.font = pygame.font.Font(c.font_path("micross.ttf"), 20)
self.done = False
self.since_tweet = 0
self.at = 1
def update(self, dt, events):
self.age += dt
if self.age < self.at:
return
self.since_tweet += dt
if self.lines and int(self.time_on_current_line * c.CHARS_PER_SECOND) < len(self.lines[self.current_line]):
if self. since_tweet > 0.12:
self.game.talk_sound.play()
self.since_tweet -= 0.12
self.time_on_current_line += dt
for event in events:
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
if not self.done and self.time_on_current_line * c.CHARS_PER_SECOND > len(self.lines[self.current_line]):
self.time_on_current_line = 0
if len(self.lines) > 1:
self.since_tweet = 0
self.lines.pop(0)
else:
self.lines = []
self.done = True
self.game.advance_dialogue_noise.play()
def draw(self, surface, offset=(0, 0)):
yoff = self.done * (self.time_on_current_line**2 * 2500 - self.time_on_current_line * 300)
pxoff = (self.time_on_current_line * 400) * self.done
pyoff = (self.time_on_current_line**2 * 2400 - self.time_on_current_line * 1200) * self.done
pspin = self.done * self.time_on_current_line * 600
if self.age < self.at:
youngness = (1 - self.age/self.at)
yoff = 300 * youngness**2
pxoff = -300 * youngness**2
pyoff = -300 * youngness**2
pspin = 60 * youngness**2
psurf = self.principal.copy()
psurf = pygame.transform.rotate(psurf, pspin)
surface.blit(self.gradient, (0, c.WINDOW_HEIGHT - self.gradient.get_height() + 50 + yoff), special_flags=pygame.BLEND_MULT)
surface.blit(self.label, (-50 -yoff * 3, c.WINDOW_HEIGHT - 200), special_flags=pygame.BLEND_ADD)
surface.blit(psurf, (105 + pxoff - psurf.get_width()//2, c.WINDOW_HEIGHT - 235 + pyoff - psurf.get_height()//2))
chars_to_show = int(self.time_on_current_line * c.CHARS_PER_SECOND)
if self.lines:
lines = self.lines[self.current_line][:chars_to_show].split("\n")
else:
lines = []
x = 200
y = c.WINDOW_HEIGHT - 120
for line in lines:
surf = self.font.render(line, 0, (200, 160, 220))
surface.blit(surf, (x, y))
y += 24
pass
```
|
{
"source": "jeremycryan/LD46",
"score": 3
}
|
#### File: jeremycryan/LD46/background.py
```python
import pygame
import constants as c
class Background:
def __init__(self, game):
self.age = 0
self.game = game
def update(self, dt, events):
self.age += dt
pass
def draw(self, surface):
pass
class Caves(Background):
def __init__(self, game):
super().__init__(game)
self.load_layers()
self.upside_down_layers = [pygame.transform.rotate(layer, 180) for layer in self.layers]
self.layer_factors = [0.2, 0.5, 0.8]
self.speed = -300
self.x = 0
self.alpha = 0
self.target_alpha = 0
self.temp_surf = None
self.since_full = 10
self.update_per = 3
def load_layers(self):
rel = "cave"
self.layer_1 = pygame.image.load(c.image_path(f"{rel}_layer_1.png"))
self.layer_2 = pygame.image.load(c.image_path(f"{rel}_layer_2.png"))
self.layer_3 = pygame.image.load(c.image_path(f"{rel}_layer_3.png"))
self.layers = [self.layer_3, self.layer_2, self.layer_1]
def fade_in(self):
self.target_alpha = 150
def fade_out(self):
self.target_alpha = 0
def update(self, dt, events):
super().update(dt, events)
self.since_full += 1
self.x += self.speed * dt
da = self.target_alpha - self.alpha
if da:
self.alpha += da/abs(da) * 200 * dt
if da > 0:
self.alpha = min(self.target_alpha, self.alpha)
else:
self.alpha = max(self.target_alpha, self.alpha)
def draw(self, surface):
y = 0
base = pygame.Surface((self.layers[0].get_width(),
self.layers[0].get_height()*2))
for i, layer in enumerate(self.layers):
x = int((self.x * self.layer_factors[i]) % c.WINDOW_WIDTH)
base.blit(layer, (x, y))
base.blit(layer, (x - layer.get_width(), y))
low_y = y + layer.get_height()
low_layer = self.upside_down_layers[i]
base.blit(low_layer, (x, low_y))
base.blit(low_layer, (x - low_layer.get_width(), low_y))
base.set_alpha(self.alpha)
surface.blit(base, (0, 80))
class Ruins(Caves):
def fade_in(self):
self.target_alpha = 120
def load_layers(self):
rel = "ruins"
self.layer_1 = pygame.image.load(c.image_path(f"{rel}_layer_1.png"))
self.layer_2 = pygame.image.load(c.image_path(f"{rel}_layer_2.png"))
self.layer_3 = pygame.image.load(c.image_path(f"{rel}_layer_3.png"))
self.layers = [self.layer_3, self.layer_2, self.layer_1]
class Dungeon(Caves):
def load_layers(self):
rel = "dungeon"
self.layer_1 = pygame.image.load(c.image_path(f"{rel}_layer_1.png"))
self.layer_2 = pygame.image.load(c.image_path(f"{rel}_layer_2.png"))
self.layer_3 = pygame.image.load(c.image_path(f"{rel}_layer_3.png"))
self.layers = [self.layer_3, self.layer_2, self.layer_1]
def draw(self, surface):
if self.alpha <= 0:
return
y = 0
if not self.temp_surf or self.since_full > 1:
self.temp_surf = pygame.Surface((self.layers[0].get_width(),
self.layers[0].get_height()*2))
self.since_full = 0
for i, layer in enumerate(self.layers):
x = int((self.x * self.layer_factors[i]) % c.WINDOW_WIDTH)
self.temp_surf.blit(layer, (x, y))
self.temp_surf.blit(layer, (x - layer.get_width(), y))
self.temp_surf.set_alpha(self.alpha)
surface.blit(self.temp_surf, (0, 80))
```
#### File: jeremycryan/LD46/particle.py
```python
import pygame
import constants as c
import random
import math
class Particle:
def __init__(self, game, position):
self.x, self.y = position
if self.x > c.WINDOW_WIDTH + 200 or self.x < -200 or self.y > c.WINDOW_HEIGHT + 200 or self.y < -200:
return
game.particles.append(self)
self.game = game
def update(self, dt, events):
pass
def draw(self, surface):
pass
def destroy(self):
if self in self.game.particles:
self.game.particles.remove(self)
class Bit(Particle):
surfs = []
def __init__(self, game, position, color=c.WHITE):
super().__init__(game, position)
self.alpha = 255
self.body_font = pygame.font.Font(c.font_path("Myriad.otf"), 12)
char = random.choice(["0", "1"])
self.surf = self.body_font.render(char, 0, color).convert()
def update(self, dt, events):
self.alpha -= 400 * dt
if self.alpha <= 0:
self.destroy()
if self.x < -200 or self.x > c.WINDOW_WIDTH + 200:
self.destroy()
if self.y < -200 or self.y > c.WINDOW_HEIGHT + 200:
self.destroy()
def draw(self, surface):
x, y = self.game.xy_transform(self.x, self.y)
x -= self.surf.get_width()//2
y -= self.surf.get_height()//2
self.surf.set_alpha(self.alpha)
surface.blit(self.surf, (x, y))
class Spark(Particle):
def __init__(self, game, position, color=c.WHITE, width=3, speed=100, fade=600):
super().__init__(game, position)
self.alpha = 255
self.direction = random.random() * 360
self.speed = random.random() * speed
self.width = width
self.surf = pygame.Surface((width, width))
self.surf.fill(color)
self.fade = fade
def update(self, dt, events):
self.alpha -= self.fade * dt
if self.alpha <= 0:
self.destroy()
self.x += self.speed * dt * math.cos(self.direction * math.pi/180)
self.y += -self.speed * dt * math.sin(self.direction * math.pi/180)
def draw(self, surface):
x, y = self.game.xy_transform(self.x, self.y)
x -= self.surf.get_width()//2
y -= self.surf.get_height()//2
self.surf.set_alpha(self.alpha)
surface.blit(self.surf, (x, y))
class SpeedyFlash(Particle):
def __init__(self, game, direction, duration = 0.2):
y = c.WINDOW_HEIGHT//2
x = c.WINDOW_WIDTH//2
if direction == c.LEFT:
x = 0
elif direction == c.RIGHT:
x = c.WINDOW_WIDTH
elif direction == c.UP:
y = 0
elif direction == c.DOWN:
y = c.WINDOW_HEIGHT
super().__init__(game, (x, y))
self.age = 0
self.duration = duration
self.radius = 40
self.surf = pygame.Surface((self.radius * 2, self.radius * 2))
pygame.draw.circle(self.surf, c.PURPLE, (self.radius, self.radius), self.radius)
self.surf.set_colorkey(c.BLACK)
self.surf.set_alpha(255)
def update(self, dt, events):
super().update(dt, events)
self.age += dt
if self.age > self.duration:
self.destroy()
else:
prop = (self.duration - self.age)/self.duration
self.surf.set_alpha(prop * 150)
self.radius = (1 - prop) * 100 + 40
def draw(self, surface):
prev_alpha = self.surf.get_alpha()
self.surf = pygame.Surface((self.radius * 2, self.radius * 2))
if prev_alpha is not None:
lighten = 50 * prev_alpha/255
else:
lighten = 100
color = min(255, c.PURPLE[0] + lighten), min(255, c.PURPLE[1] + lighten), min(255, c.PURPLE[2] + lighten)
pygame.draw.circle(self.surf, color, (int(self.radius), int(self.radius)), int(self.radius))
self.surf.set_colorkey(c.BLACK)
self.surf.set_alpha(prev_alpha)
x, y = self.game.xy_transform(self.x, self.y)
x -= self.surf.get_width()//2
y -= self.surf.get_height()//2
surface.blit(self.surf, (x, y))
```
|
{
"source": "jeremycryan/LD47",
"score": 3
}
|
#### File: jeremycryan/LD47/particle.py
```python
import pygame
import random
from enemy import Enemy
from sprite_tools import SpriteSheet, Sprite
import constants as c
class Particle:
def __init__(self, game, surface, duration=None, position=(0, 0), velocity=(0, 0), rotation=0, drag = 0.05):
self.game = game
self.surf = surface
self.duration = duration
self.velocity = list(velocity)
self.x, self.y = position
self.rotation = rotation
self.angle = random.random()*360
self.game.particles.add(self)
self.drag = drag
self.radius = 5
self.age = 0
self._bump_tile = Enemy.bump_tile
self._check_tile_collisions = Enemy.check_tile_collisions
def update(self, dt, events):
self.velocity[0] *= self.drag**dt
self.velocity[1] *= self.drag**dt
self.x += self.velocity[0] * dt
self.y += self.velocity[1] * dt
self.angle += self.rotation * dt
self.rotation *= 0.5**dt
if self.duration is not None:
self.duration -= dt
if self.duration <= 0:
self.destroy()
self.age += dt
self.check_tile_collisions()
def bump_tile(self, *args, **kwargs):
self._bump_tile(self, *args, **kwargs)
def check_tile_collisions(self):
self._check_tile_collisions(self)
def destroy(self):
self.game.particles.remove(self)
def draw(self, surface, offset=(0, 0)):
surf = pygame.transform.rotate(self.surf, self.angle)
x = self.x - surf.get_width()//2 + offset[0]
y = self.y - surf.get_height()//2 + offset[1]
surface.blit(surf, (x, y))
class BulletHit(Particle):
def __init__(self, game, position=(0, 0)):
super().__init__(game, None, position=position)
self.sprite = Sprite(16)
anim = SpriteSheet(c.image_path("bullet_hit.png"), (8, 1), 8)
self.sprite.add_animation({"Default": anim})
self.sprite.start_animation("Default")
self.game.top_particles.add(self)
surface = pygame.Surface((100, 100))
surface.fill((0, 0, 0))
pygame.draw.circle(surface, (255, 255, 255), (50, 50), 50)
self.boom = surface
self.boom.set_colorkey((0, 0, 0))
self.boom_alpha = 200
self.boom_size = 40
def update(self, dt, events):
self.age += dt
self.boom_alpha -= 1600*dt
self.boom_size += 600*dt
self.sprite.update(dt)
if self.age > 0.4:
self.destroy()
def draw(self, surface, offset=(0, 0)):
x = self.x + offset[0]
y = self.y + offset[1]
self.sprite.set_position((x, y))
self.sprite.draw(surface)
boom = pygame.transform.scale(self.boom, (int(self.boom_size), int(self.boom_size)))
boom.set_alpha(self.boom_alpha)
surface.blit(boom, (x - self.boom_size//2, y - self.boom_size//2))
def destroy(self):
super().destroy()
self.game.top_particles.remove(self)
class TrailBit(Particle):
def __init__(self, game, owner, **kwargs):
surface = pygame.Surface((24, 24))
surface.fill((0, 0, 0))
self.owner = owner
color = (255, 255, 255)
pygame.draw.circle(surface, color, (12, 12), 12)
surface.set_colorkey((0, 0, 0))
super().__init__(game, surface, **kwargs)
self.duration = 1
self.start_alpha = 50
def update(self, dt, events):
super().update(dt, events)
alpha = (self.duration - self.age)*self.start_alpha
self.surf.set_alpha(alpha)
if self.age > self.duration:
self.destroy()
```
#### File: jeremycryan/LD47/powerup.py
```python
import pygame
import constants as c
import math
class Powerup:
def __init__(self, game, surface, pos=(0, 0)):
self.radius = 24
self.age = 0
self.game = game
self.x, self.y = pos
self.y_offset = -self.y
self.surface = surface
self.shadow = pygame.Surface((self.radius*2, self.radius*2))
self.shadow.fill((255, 255, 255))
self.shadow.set_colorkey((255, 255, 255))
self.shadow.set_alpha(40)
self.landed = False
pygame.draw.circle(self.shadow, (0, 0, 0), (self.radius, self.radius), self.radius)
self.glow = self.game.get_static(c.image_path("glow.png"))
def update(self, dt, events):
self.age += dt
if self.landed:
self.y_offset = 6 * math.sin(self.age * 6)
else:
self.y_offset += 600*dt
if self.y_offset >= 0:
self.y_offset = 0
self.landed = True
self.game.current_scene.shake(5)
self.game.powerup_land_noise.play()
self.check_collisions()
def draw(self, surface, offset=(0, 0)):
x = self.x + offset[0] - self.glow.get_width()//2
y = self.y + offset[1] - self.glow.get_height()//2 + self.y_offset - 30
surface.blit(self.glow, (x, y), special_flags = pygame.BLEND_RGBA_ADD)
width = self.shadow.get_width()
width -= 10
if (int(width + self.y_offset/2)) > 0:
shadow = pygame.transform.scale(self.shadow, (int(width + self.y_offset/2), int(width + self.y_offset/2)))
x = self.x + offset[0] - shadow.get_width()//2
y = self.y + offset[1] - shadow.get_height()//2
surface.blit(shadow, (x, y))
x = self.x + offset[0] - self.surface.get_width()//2
y = self.y + offset[1] - self.surface.get_height()//2 + self.y_offset - 30
surface.blit(self.surface, (x, y))
def check_collisions(self):
for player in self.game.current_scene.players:
if c.mag(player.x - self.x, player.y - self.y) < player.radius + self.radius and self.landed:
self.collected_by(player)
break
def collected_by(self, player):
self.game.powerup_collect_noise.play()
self.game.powerups.remove(self)
class FastSpinPowerup(Powerup):
def __init__(self, game, pos=(0, 0)):
surface = game.get_static(c.image_path("spin.png"))
super().__init__(game, surface, pos=pos)
def collected_by(self, player):
super().collected_by(player)
FastSpin(player)
class SlipperySocksPowerup(Powerup):
def __init__(self, game, pos=(0, 0)):
surface = game.get_static(c.image_path("socks.png"))
super().__init__(game, surface, pos=pos)
def collected_by(self, player):
super().collected_by(player)
SlipperySocks(player)
class DoubleShotPowerup(Powerup):
def __init__(self, game, pos=(0, 0)):
surface = game.get_static(c.image_path("double.png"))
super().__init__(game, surface, pos=pos)
def collected_by(self, player):
super().collected_by(player)
DoubleShot(player)
class BouncyPowerup(Powerup):
def __init__(self, game, pos=(0, 0)):
surface = game.get_static(c.image_path("bouncy.png"))
super().__init__(game, surface, pos=pos)
def collected_by(self, player):
super().collected_by(player)
Bouncy(player)
class FastShootingPowerup(Powerup):
def __init__(self, game, pos=(0, 0)):
surface = game.get_static(c.image_path("mandible.png"))
super().__init__(game, surface, pos=pos)
def collected_by(self, player):
super().collected_by(player)
FastShooting(player)
class Effect:
def __init__(self, owner):
self.age = 0
self.owner = owner
found = False
for item in self.owner.effects:
if item.id == self.id:
item.age = 0
found = True
break
if not found:
self.owner.effects.append(self)
def update(self, dt, events):
self.age += dt
if self.age > self.duration:
self.end()
def end(self):
self.owner.effects.remove(self)
class FastSpin(Effect):
def __init__(self, owner):
self.id=c.FAST_SPINNING
self.duration = 25
super().__init__(owner)
self.name = "Caffeine"
self.description = "Spin to win"
self.mult = 2
self.icon = pygame.transform.scale2x(self.owner.game.get_static(c.image_path("spin_icon.png")))
class SlipperySocks(Effect):
def __init__(self, owner):
self.id=c.SLIPPERY_SOCKS
self.name = "Slippery Socks"
self.description = "There better be a bulk discount"
self.duration = 18
super().__init__(owner)
self.icon = pygame.transform.scale2x(self.owner.game.get_static(c.image_path("socks_icon.png")))
class DoubleShot(Effect):
def __init__(self, owner):
self.id=c.DOUBLE_SHOT
self.name = "Double Shot"
self.description = "For that special someone you really want to shoot twice"
self.duration = 18
super().__init__(owner)
self.icon = pygame.transform.scale2x(self.owner.game.get_static(c.image_path("double_icon.png")))
class Bouncy(Effect):
def __init__(self, owner):
self.id=c.BOUNCY
self.name = "Bouncy Bullets"
self.description = "When the collision code works correctly"
self.duration = 18
super().__init__(owner)
self.icon = pygame.transform.scale2x(self.owner.game.get_static(c.image_path("bouncy_icon.png")))
class FastShooting(Effect):
def __init__(self, owner):
self.id=c.FAST_SHOOTING
self.name = "<NAME>"
self.description = "Improves regurigation efficiency by 80% or more"
self.duration = 25
super().__init__(owner)
self.icon = pygame.transform.scale2x(self.owner.game.get_static(c.image_path("mandible_icon.png")))
```
|
{
"source": "jeremycryan/ScoreSpace8",
"score": 3
}
|
#### File: jeremycryan/ScoreSpace8/enemy.py
```python
import constants as c
import pygame
import math
from particle import Particle, Chunk, Fadeout
import os
import random
import time
lantern_surf = pygame.image.load(os.path.join(c.ASSETS_PATH, "lantern.png"))
lantern_touched_surf = pygame.image.load(os.path.join(c.ASSETS_PATH, "lantern_touched.png"))
big_lantern_surf = pygame.image.load(os.path.join(c.ASSETS_PATH, "big_lantern.png"))
big_lantern_touched_surf = pygame.image.load(os.path.join(c.ASSETS_PATH, "big_lantern_touched.png"))
perfect_surf = pygame.image.load(os.path.join(c.ASSETS_PATH, "perfect.png"))
perfect_surf_large = pygame.transform.scale(perfect_surf, (perfect_surf.get_width()*2, perfect_surf.get_height()*2))
good_surf = pygame.image.load(os.path.join(c.ASSETS_PATH, "good.png"))
okay_surf = pygame.image.load(os.path.join(c.ASSETS_PATH, "okay.png"))
nope_surf = pygame.image.load(os.path.join(c.ASSETS_PATH, "nope.png"))
class Enemy:
def __init__(self, game, radius = 30, x=c.WINDOW_WIDTH//2, y=c.WINDOW_HEIGHT//2):
self.game = game
self.radius = radius
self.x = x
self.y = y
self.angle = random.random() * 60 + 15
self.surf = lantern_surf
self.draw_surf = pygame.transform.rotate(self.surf, self.angle)
self.touched_surf = lantern_touched_surf
self.touched_surf = pygame.transform.rotate(self.touched_surf, self.angle)
# self.draw_surf.set_colorkey(c.BLACK)
# self.touched_surf.set_colorkey(c.BLACK)
self.touched = False
self.launch_factor=1.0
self.glow = self.generate_glow()
self.age = random.random()
def generate_glow(self, radius=1.7):
glow_radius = int(radius * self.radius)
self.glow = pygame.Surface((glow_radius*2, glow_radius*2))
pygame.draw.circle(self.glow, c.WHITE, (glow_radius, glow_radius), glow_radius)
self.glow.set_alpha(20)
self.glow.set_colorkey(c.BLACK)
return self.glow
def update(self, dt, events):
if self.y < self.game.y_offset - self.radius*3:
self.remove()
self.age += dt
radius = 1.7 + 0.07*math.sin(self.age*25)
if self.y < self.game.y_offset + 1.5*c.WINDOW_HEIGHT:
self.glow = self.generate_glow(radius)
def draw(self, surface):
if self.y > self.game.y_offset + c.WINDOW_HEIGHT*2:
return
x, y = self.game.game_position_to_screen_position((self.x, self.y))
surface.blit(self.glow, (int(x - self.glow.get_width()//2), int(y - self.glow.get_height()//2)))
if not self.touched:
surface.blit(self.draw_surf,
(int(x - self.draw_surf.get_width()/2), int(y - self.draw_surf.get_height()/2)))
else:
surface.blit(self.touched_surf,
(int(x - self.draw_surf.get_width()/2), int(y - self.draw_surf.get_height()/2)))
def touch(self):
self.touched = True
def remove(self):
self.game.enemies.remove(self)
def destroy(self, cut_prop=0.5):
self.remove()
angle = self.game.player.get_angle()
cutoff = int(cut_prop*self.radius*2)
top_offset = self.radius - cutoff//2
bottom_offset = -cutoff//2
angle_rad = -angle/180 * math.pi
top_offset = (top_offset * math.sin(angle_rad), top_offset * math.cos(angle_rad))
bottom_offset = (bottom_offset * math.sin(angle_rad), bottom_offset * math.cos(angle_rad))
particle_surf = pygame.Surface((self.radius*2, cutoff))
particle_surf.blit(self.surf, (0, 0))
top_half = Particle(self.game,
particle_surf,
(self.x + top_offset[0], self.y + top_offset[1]),
rotation=120,
velocity=(-30, 500),
angle=angle)
self.game.particles.append(top_half)
particle_surf = pygame.Surface((self.radius*2, self.radius*2 - cutoff))
particle_surf.blit(self.surf, (0, -cutoff))
bottom_half = Particle(self.game,
particle_surf,
(self.x + bottom_offset[0], self.y + bottom_offset[1]),
rotation=-40,
velocity=(60, 150),
angle=angle)
self.game.particles.append(bottom_half)
self.game.particles.append(Fadeout(self.game, self.glow, (self.x, self.y)))
for i in range(30):
self.game.particles.append(Chunk(self.game, (self.x, self.y)))
if abs(cut_prop - 0.5) < 0.02:
self.glow.set_alpha(100)
surf = perfect_surf.copy().convert()
surf2 = perfect_surf_large.copy().convert()
surf2.set_colorkey((255, 0, 255))
surf2.set_alpha(90)
self.game.text_particles.append(Fadeout(self.game, surf2, (self.x, self.y), rate=200))
self.game.flare_up(60)
self.game.tear_sound()
elif abs(cut_prop - 0.5) < 0.25:
surf = good_surf.copy().convert()
self.game.bad_tear_sound()
else:
surf = okay_surf.copy().convert()
self.game.bad_tear_sound()
surf.set_colorkey((255, 0, 255))
surf.set_alpha(255)
self.game.text_particles.append(Fadeout(self.game, surf, (self.x, self.y), rate=400))
class BigEnemy(Enemy):
def __init__(self, game, x=c.WINDOW_WIDTH//2, y=c.WINDOW_HEIGHT//2):
self.game = game
self.radius = 40
self.x = x
self.y = y
self.angle = random.random() * 60 - 30
self.surf = big_lantern_surf
self.draw_surf = pygame.transform.rotate(self.surf, self.angle)
self.touched_surf = big_lantern_touched_surf
self.touched_surf = pygame.transform.rotate(self.touched_surf, self.angle)
self.touched = False
self.launch_factor = 1.3
self.age = 0
self.glow = self.generate_glow()
class TutorialEnemy(BigEnemy):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def draw(self, surface):
super().draw(surface)
def destroy(self, cut_prop=0.5):
if abs(cut_prop - 0.5) < 0.02:
super().destroy(cut_prop=cut_prop)
else:
self.game.nope.play()
self.game.shake_effect(15)
surf = nope_surf.copy().convert()
surf.set_colorkey((255, 0, 255))
surf.set_alpha(255)
self.game.text_particles.append(Fadeout(self.game, surf, (self.x, self.y), rate=400))
self.since_hit = 0
class SmallEnemy(Enemy):
def __init__(self, game, x=c.WINDOW_WIDTH//2, y=c.WINDOW_HEIGHT//2):
self.game = game
self.radius = 35
self.x = x
self.y = y
self.angle = random.random() * 60 + 15
self.surf = pygame.image.load(os.path.join(c.ASSETS_PATH, "small_lantern.png"))
self.draw_surf = pygame.transform.rotate(self.surf, self.angle)
self.touched_surf = pygame.image.load(os.path.join(c.ASSETS_PATH, "small_lantern_touched.png"))
self.touched_surf = pygame.transform.rotate(self.touched_surf, self.angle)
self.touched = False
self.launch_factor = 1.15
self.age = 0
self.glow = self.generate_glow()
```
|
{
"source": "jeremyCtown/block-is-hott",
"score": 3
}
|
#### File: hott/hott_api/tests.py
```python
from django.test import TestCase
# Create your tests here.
class BasicAPIViewTests(TestCase):
"""Class for testing views."""
def test_crime_correct(self):
"""Test."""
response = self.client.get('/api/v1/crime/')
self.assertEqual(response.status_code, 200)
def test_entertainment_correct(self):
"""Test."""
response = self.client.get('/api/v1/entertainment/')
self.assertEqual(response.status_code, 200)
def test_events_correct(self):
"""Test."""
response = self.client.get('/api/v1/events/')
self.assertEqual(response.status_code, 200)
def test_art_correct(self):
"""Test."""
response = self.client.get('/api/v1/art/')
self.assertEqual(response.status_code, 200)
def test_dirtiness_correct(self):
"""Test."""
response = self.client.get('/api/v1/dirtiness/')
self.assertEqual(response.status_code, 200)
def test_login_correct(self):
"""Test."""
response = self.client.get('/api/v1/login/')
self.assertEqual(response.status_code, 405)
def test_user_correct(self):
"""Test."""
response = self.client.get('/api/v1/user/')
self.assertEqual(response.status_code, 200)
```
#### File: hott/hott_api/views.py
```python
from rest_framework.authentication import TokenAuthentication
from .serializers import UserSerializer
from rest_framework.response import Response
from rest_framework import generics, status
from rest_framework.views import APIView
from ipywidgets.embed import embed_minimal_html
from traitlets.traitlets import TraitError
from hott_overlays.models import Crimes, Entertainment, Events, Art, Dirtiness
import gmaps
import os
import re
class UserApi(generics.RetrieveAPIView, generics.CreateAPIView):
permission_classes = '' # IsAuthenticated??
authentication_classes = (TokenAuthentication,)
serializer_class = UserSerializer
def retrieve(self, request, pk=None):
if not pk:
return Response(
UserSerializer(request.user).data, status=status.HTTP_200_OK)
return super().retrieve(request, pk)
def post(self, request, format=None):
serializer = UserSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class CrimeMap(APIView):
"""Crime map view that takes in our crime data and serves the response."""
authentication_classes = ''
permission_classes = ''
def get(self, request, format=None):
"""Get route for crime map."""
gmaps.configure(api_key=os.environ.get('MAPS_API'))
locations = []
for each in Crimes.objects.all():
temp = []
temp.append(each.latitude)
temp.append(each.longitude)
locations.append(temp)
try:
heatmap_layer = gmaps.heatmap_layer(locations)
except TraitError:
heatmap_layer = gmaps.heatmap_layer([[47.465568160532435, -122.50131030799446]])
heatmap_layer.gradient = [
(0, 0, 0, 0.7),
(255, 105, 180, 0.4),
(255, 0, 0, 0.8)
]
fig = gmaps.figure()
fig.add_layer(heatmap_layer)
embed_minimal_html('export.html', views=[fig])
export = open('export.html').read()
return Response(export)
class EntertainmentMap(APIView):
"""Entertainment map view that takes in our cultural centers and serves the response."""
authentication_classes = ''
permission_classes = ''
def get(self, request, format=None):
"""Get route for entertainment map."""
gmaps.configure(api_key=os.environ.get('MAPS_API'))
locations = []
for each in Entertainment.objects.all():
temp = []
p = re.compile('[()°,]') # I know this is bad regex
split_location = p.sub('', str(each.location)).split()
try:
if split_location[0] != 'None' or split_location[1] != 'None':
temp.append(float(split_location[0]))
temp.append(float(split_location[1]))
locations.append(temp)
except IndexError:
pass
try:
heatmap_layer = gmaps.heatmap_layer(locations)
except TraitError:
heatmap_layer = gmaps.heatmap_layer([[47.465568160532435, -122.50131030799446]])
heatmap_layer.gradient = [
(0, 0, 0, 0.7),
(255, 178, 102, 0.4),
(255, 128, 0, 0.8)
]
fig = gmaps.figure()
fig.add_layer(heatmap_layer)
embed_minimal_html('export.html', views=[fig])
export = open('export.html').read()
return Response(export)
class EventMap(APIView):
"""Event map view that takes in our Event data and serves the response."""
authentication_classes = ''
permission_classes = ''
def get(self, request, format=None):
"""Get route for Event map."""
gmaps.configure(api_key=os.environ.get('MAPS_API'))
locations = []
for each in Events.objects.all():
temp = []
if each.latitude and each.longitude:
temp.append(each.latitude)
temp.append(each.longitude)
locations.append(temp)
try:
heatmap_layer = gmaps.heatmap_layer(locations)
except TraitError:
heatmap_layer = gmaps.heatmap_layer([[47.465568160532435, -122.50131030799446]])
heatmap_layer.gradient = [
(0, 0, 0, 0.7),
(255, 255, 153, 0.7),
(255, 255, 0, 1)
]
fig = gmaps.figure()
fig.add_layer(heatmap_layer)
embed_minimal_html('export.html', views=[fig])
export = open('export.html').read()
return Response(export)
class ArtMap(APIView):
"""Art map view that takes in our Art data and serves the response."""
authentication_classes = ''
permission_classes = ''
def get(self, request, format=None):
"""Get route for Art map."""
gmaps.configure(api_key=os.environ.get('MAPS_API'))
locations = []
for each in Art.objects.all():
temp = []
if each.latitude and each.longitude:
temp.append(each.latitude)
temp.append(each.longitude)
locations.append(temp)
try:
heatmap_layer = gmaps.heatmap_layer(locations)
except TraitError:
heatmap_layer = gmaps.heatmap_layer([[47.465568160532435, -122.50131030799446]])
heatmap_layer.gradient = [
(0, 0, 0, 0.7),
(0, 153, 0, 0.4),
(102, 255, 102, 0.8)
]
fig = gmaps.figure()
fig.add_layer(heatmap_layer)
embed_minimal_html('export.html', views=[fig])
export = open('export.html').read()
return Response(export)
class DirtinessMap(APIView):
"""Dirtiness map view that takes in our Dirtiness data and serves the response."""
authentication_classes = ''
permission_classes = ''
def get(self, request, format=None):
"""Get route for Dirtiness map."""
gmaps.configure(api_key=os.environ.get('MAPS_API'))
locations = []
for each in Dirtiness.objects.all():
temp = []
if each.latitude and each.longitude:
temp.append(each.latitude)
temp.append(each.longitude)
locations.append(temp)
try:
heatmap_layer = gmaps.heatmap_layer(locations)
except TraitError:
heatmap_layer = gmaps.heatmap_layer([[47.465568160532435, -122.50131030799446]])
heatmap_layer.gradient = [
(0, 0, 0, 0.7),
(255, 178, 102, 0.4),
(102, 51, 0, 0.8)
]
fig = gmaps.figure()
fig.add_layer(heatmap_layer)
embed_minimal_html('export.html', views=[fig])
export = open('export.html').read()
return Response(export)
```
|
{
"source": "jeremyCtown/data-structures-and-algorithms",
"score": 4
}
|
#### File: challenges/breadth-first-traversal/breadth_first_traversal.py
```python
from queue import Queue
from bst import BST as tree
from node_bst import Node as node
def breadth_first_traversal(tree):
"""
Prints out bst from top to bottom, left to right
"""
q = Queue()
q.enqueue(tree.root)
test = [] # for testing purposes only
while q:
temp = q.front
out = q.dequeue().val.val
print(out)
test.append(out) # for testing purposes only
if temp.val.left:
q.enqueue(temp.val.left)
if temp.val.right:
q.enqueue(temp.val.right)
return test
```
#### File: challenges/fifo-animal-shelter/pets.py
```python
class Dog:
"""
Creates dog object
"""
def __init__(self):
self.val = 'dog'
def __repr__(self):
return self.val
class Cat:
"""
Creates cat object
"""
def __init__(self):
self.val = 'cat'
def __repr__(self):
return self.val
```
#### File: challenges/multi-bracket-validation/multi_bracket_validation.py
```python
from stack import Stack
from node import Node as node
def multi_bracket_validation(stuff):
"""
This is supposed to solve today's challenge... we'll see
"""
answer = True
checker = Stack()
openers = ['[', '{', '(']
for i in stuff:
if i in openers:
checker.push(i)
if i == ']':
if checker.top.val == '[':
checker.pop()
continue
else:
answer = False
break
if i == '}':
if checker.top.val == '{':
checker.pop()
continue
else:
answer = False
break
if i == ')':
if checker.top.val == '(':
checker.pop()
continue
else:
answer = False
break
return answer
```
#### File: challenges/repeated-word/repeated_word.py
```python
from hash_table import HashTable as ht
def repeated_word(string):
"""Return the first word to appear twice in a string."""
new_string = string.split()
table = ht()
if len(new_string) < 1:
return 'Empty string'
for word in new_string:
check_word = table.get(word)
if len(check_word) == 1:
return word
table.set(word, 1)
return 'No words repeat'
```
#### File: challenges/shift-array/shift_array.py
```python
test_list = [3, 4, 5, 7, 8, 9]
test_num = 6
class ShiftArray:
"""
new class to complete challenge
"""
def __init__(self, test_list, test_num):
self.counter = 0
self.zero = 0
self.new_list = test_list + [0]
self.insert_index = 0
self.splitter = 0
self.test_list = test_list
self.test_num = test_num
def increment_counter(self):
"""
Increments counter to equal list length
"""
for i in self.test_list:
self.counter += 1
self.splitter = self.counter
self.split_counter(self.splitter)
return self.counter
def split_counter(self, splitter):
"""
Divides counter in half
"""
self.insert_index = float(splitter)/2
self.insert_shift_array(self.test_list, self.test_num, self.insert_index)
return self.insert_index
def insert_shift_array(self, test_list, test_num, splitter):
"""
builds new list
"""
for i in test_list:
if self.zero < splitter:
self.new_list[self.zero] = i
self.zero += 1
elif (splitter + 1) > self.zero >= splitter:
self.new_list[self.zero] = test_num
self.new_list[self.zero + 1] = i
self.zero += 2
else:
self.new_list[self.zero] = i
self.zero += 1
print(self.new_list)
return(self.new_list)
if __name__ == '__main__':
x = ShiftArray(test_list, test_num)
x.increment_counter()
```
#### File: challenges/shift-array/test_shift_array.py
```python
import pytest
# import shift-array
from shift_array import ShiftArray as SA
@pytest.fixture
def test_sa():
test_list = [3, 4, 5, 7, 8]
test_num = 6
return SA(test_list, test_num)
@pytest.fixture
def other_sa():
test_list = [3, 4, 5, 7, 8, 9]
test_num = 6
return SA(test_list, test_num)
def test_sa_counter_list_len_odd(test_sa):
"""
function that tests to see if counter adds up based on list length
"""
assert test_sa.counter == 0
test_sa.increment_counter(test_sa.test_list)
assert test_sa.counter == 5
def test_sa_counter_list_len_even(other_sa):
"""
function that tests to see if counter adds up based on list length
"""
assert other_sa.counter == 0
other_sa.increment_counter(other_sa.test_list)
assert other_sa.counter == 6
def test_sa_insert_index_odd(test_sa):
test_sa.split_counter(5)
assert test_sa.insert_index == 2.5
def test_sa_insert_index_even(other_sa):
other_sa.split_counter(6)
assert other_sa.insert_index == 3
def test_sa_new_list_odd(test_sa):
"""
function to ensure empty list to populate is available
"""
test_sa.insert_shift_array(test_sa.test_list, test_sa.test_num, 2.5)
assert test_sa.zero == 6
assert test_sa.new_list == [3, 4, 5, 6, 7, 8]
def test_sa_new_list_even(other_sa):
"""
function to ensure empty list to populate is available
"""
other_sa.insert_shift_array(other_sa.test_list, other_sa.test_num, 3)
assert other_sa.zero == 7
assert other_sa.new_list == [3, 4, 5, 6, 7, 8, 9]
```
#### File: data_structures/hash_table/hash_table.py
```python
from hash_linked_list import LinkedList as LL
class HashTable:
"""This is a hash table."""
def __init__(self, max_size=1024):
"""Initialize a hash_table."""
self.max_size = max_size
self.buckets = [LL() for _ in range(self.max_size)]
self._size = 0
def hash_key(self, key):
"""Generate a hash_key."""
if type(key) is not str:
raise TypeError
sum = 0
for char in key:
sum += ord(char)
return sum % len(self.buckets)
def set(self, key, val):
"""Set a node into the hash table."""
self.buckets[self.hash_key(key)].append({key:val})
self._size += 1
def get(self, key, filter=None):
"""Return a node from a hash table."""
current = self.buckets[self.hash_key(key)].head
result = []
while current:
if key in current.val.keys():
result.append(current.val[key])
current = current._next
return result
def remove(self, key):
"""Remove value from bucket."""
bucket = self.buckets[self.hash_key(key)]
current = bucket.head
if current is None:
raise ValueError('Key is not in hash table')
if key in bucket.head.val.keys():
deleted = current
bucket.head = current._next
current = None
self._size -= 1
return deleted
while current:
if key in current.val.keys():
deleted = current
last._next = current._next
current = None
self._size -= 1
return deleted
last = current
current = current._next
```
#### File: linked_lists/ll-kth-from-end/ll_kth.py
```python
from node import Node
class LinkedList:
"""
initializes LL
"""
def __init__(self, iter=[]):
self.head = None
self._size = 0
for item in reversed(iter):
self.insert(item)
def __repr__(self):
"""
assumes head will have a val and we will need this
"""
return '<head> => {}'.format(self.head.val)
def __str__(self):
""" this is where we can see the list"""
def __len__(self):
"""
returns size of LL
"""
return self._size
def insert(self, val):
"""
basic insertion method for adding to front of LL
"""
self.head = Node(val, self.head)
self._size += 1
def append(self, val):
"""
appends node to the end of the LL
"""
new_node = Node(val, None)
current = self.head._next
while current._next is not None:
current._next = current._next._next
if current._next._next is None:
current._next._next = new_node
new_node._next is None
self._size += 1
return new_node._next
def insert_before(self, val, new_val):
"""
inserts node before node at val
"""
new_node = Node(new_val)
current = self.head._next
while current._next is not None:
if current._next.val == val:
new_node._next = current._next
current._next = new_node
self._size += 1
break
current = current._next
if current._next is None:
raise ValueError("Data not in list")
def insert_after(self, val, new_val):
"""
inserts node after node at val
"""
new_node = Node(new_val)
current = self.head._next
while current._next is not None:
if current.val == val:
new_node._next = current._next._next
current._next = new_node
self._size += 1
break
current = current._next
if current._next is None:
raise ValueError("Data not in list")
def kth_from_end(self, k):
"""
returns node at kth from end
"""
if self._size - k < 0:
raise AttributeError
current = self.head
for i in range(self._size - k - 1):
current = current._next
return current
```
#### File: linked_lists/ll-merge-lists/ll_merge.py
```python
from node import Node
class LinkedList:
"""
Doc strings are gud
"""
def __init__(self, iter=[]):
self.head = None
self._size = 0
self._current = self.head
for item in reversed(iter):
self.insert(item)
def __repr__(self):
"""
assumes head will have a val and we will need this
"""
return '<head> => {}'.format(self.head.val)
def __str__(self):
""" this is where we can see the list"""
def __len__(self):
"""
returns size of LL
"""
return self._size
def insert(self, val):
"""
basic insertion method for adding to front of LL
"""
self.head = Node(val, self.head)
self._size += 1
def append(self, val):
"""
appends node to the end of the LL
"""
new_node = Node(val, None)
current = self.head._next
while current._next is not None:
current._next = current._next._next
if current._next._next is None:
current._next._next = new_node
new_node._next is None
self._size += 1
return new_node._next
def find(self, val):
"""
Searches through a list for val and returns the node with that val
"""
current = self.head
while current:
if val == current.val:
return True
current = current._next
return False
def insert_before(self, val, new_val):
"""
inserts node before node at val
"""
new_node = Node(new_val)
current = self.head._next
while current._next is not None:
if current._next.val == val:
new_node._next = current._next
current._next = new_node
self._size += 1
break
current = current._next
if current._next is None:
raise ValueError("Data not in list")
def insert_after(self, val, new_val):
"""
inserts node after node at val
"""
new_node = Node(new_val)
current = self.head._next
while current._next is not None:
if current.val == val:
new_node._next = current._next._next
current._next = new_node
self._size += 1
break
current = current._next
if current._next is None:
raise ValueError("Data not in list")
def kth_from_end(self, k):
"""
returns node at kth from end
"""
current = self.head
counter = 0
answer = 0
kth = self._size - k
while current._next is not None:
if counter == kth:
answer = current
break
else:
counter += 1
current = current._next
return answer
def merge_lists(ll_1, ll_2):
"""
merges two lists
"""
baselist = 0
zipped = 0
size = 0
if (ll_1._size >= ll_2._size):
baselist = ll_1
zipped = ll_2
else:
baselist = ll_2
zipped = ll_1
if zipped.head is None:
return baselist.head
current = baselist.head
temp = current._next
zipped = zipped.head
while current._next is not None:
current._next = zipped
current = current._next
current._next = temp
temp = temp._next
current = current._next
zipped = zipped._next
size += 1
return baselist.head
```
#### File: sorting_algos/mergesort/test_mergesort.py
```python
import pytest
from mergesort import mergesort, merge
def test_empty_list_returns_empty_list():
"""Test mergesort on empty list returns same."""
empty = []
assert mergesort(empty) == []
def test_list_with_one_value():
"""Test mergesort on empty list returns same."""
lst = [8]
assert mergesort(lst) == [8]
def test_list_with_two_values():
"""Test mergesort on empty list returns same."""
lst = [8, 3]
assert mergesort(lst) == [3, 8]
def test_list_with_odd_number_of_values():
"""Test odd number of values returns ordered list."""
lst = [8, 3, 7, 9, 5]
assert mergesort(lst) == [3, 5, 7, 8, 9]
def test_list_with_unbalanced_halves():
"""Test list heavy weighted on one half returns ordered list."""
lst = [2, 4, 3, 8, 1, 9, 10, 13]
assert mergesort(lst) == [1, 2, 3, 4, 8, 9, 10, 13]
def test_merge_merges_two_pairs():
"""Test merge function separate of mergesort."""
L = [1, 3, 5]
R = [2, 4, 6]
assert merge(L, R) == [1, 2, 3, 4, 5, 6]
def test_merge_merges_uneven_lists():
L = [1, 3, 5]
R = [2, 4]
assert merge(L, R) == [1, 2, 3, 4, 5]
def test_merge_on_unbalanced_lists():
"""Test list heavy weighted on one half returns ordered list."""
L = [2, 3, 4, 8]
R = [1, 9, 10, 13]
assert merge(L, R) == [1, 2, 3, 4, 8, 9, 10, 13]
```
|
{
"source": "jeremyCtown/pyramid-stock-portfolio",
"score": 2
}
|
#### File: portfolio_of_stocks/tests/conftest.py
```python
import pytest
from pyramid import testing
@pytest.fixture
def dummy_request():
"""
Creates empty dummy request to server
"""
return testing.DummyRequest()
```
#### File: portfolio_of_stocks/views/notfound.py
```python
from pyramid.view import (notfound_view_config, forbidden_view_config)
from pyramid.httpexceptions import HTTPFound
@notfound_view_config(renderer='../templates/404.jinja2')
def notfound_view(request):
"""
Returns 404 response if no route
"""
request.response.status = 404
return {}
@forbidden_view_config(renderer='../templates/404.jinja2')
def forbidden_view(request):
"""
Returns 404 response if route is forbidden
"""
request.response.status = 404
return {}
```
|
{
"source": "jeremyCtown/snakes-cafe",
"score": 3
}
|
#### File: jeremyCtown/snakes-cafe/snakes_cafe.py
```python
from uuid import uuid4
import csv
intro = '''
**************************************
** Welcome to the Snakes Cafe! **
** Please see our menu below. **
**************************************
'''
order_prompt = '''
**********************************************************************
** What would you like to order? **
** To add an item to your order, type the item name **
** To see the menu, type "menu" **
** To order from togo menu, type "togo" **
** To remove an item from your order, type the "remove <item name>" **
** To see your current order, type "order" **
** To checkout and pay, type "checkout" **
** To quit at any time, type "quit" **
**********************************************************************
\n'''
full_menu = {
'Appetizers': {
'Wings': [0, 2.00, 10],
'Calamari': [0, 2.00, 10],
'Spring Rolls': [0, 2.00, 10],
'Nachos': [0, 2.00, 10],
'Spinach Dip': [0, 2.00, 10],
'Sampler': [0, 2.00, 10],
'Mozz Sticks': [0, 2.00, 10],
'Corn Doggies': [0, 2.00, 10],
'Hummus': [0, 2.00, 10],
'Almonds': [0, 2.00, 10],
'Chips': [0, 2.00, 10],
'Oreos': [0, 2.00, 10]
},
'Entrees': {
'Salmon': [0, 10.00, 10],
'Steak': [0, 10.00, 10],
'Tacos': [0, 10.00, 10],
'Salad': [0, 10.00, 10],
'Pizza': [0, 10.00, 10],
'Vegetarian Delight': [0, 10.00, 10],
'Pasta': [0, 10.00, 10],
'Ribs': [0, 10.00, 10],
'Burrito': [0, 10.00, 10],
'Grilled Chicken': [0, 10.00, 10],
'Fried Fish': [0, 10.00, 10],
'S\'ghetti': [0, 10.00, 10]
},
'Sides': {
'French Fries': [0, 4.00, 10],
'Hush Puppies': [0, 4.00, 10],
'Green Beans': [0, 4.00, 10],
'Mashed Potatoes': [0, 4.00, 10],
'Corn': [0, 4.00, 10],
'Rolls': [0, 4.00, 10],
'Carrots': [0, 4.00, 10],
'Biscuits': [0, 4.00, 10],
'Mac and Cheese': [0, 4.00, 10],
'Spinach': [0, 4.00, 10],
'Asparagus': [0, 4.00, 10],
'Coleslaw': [0, 4.00, 10]
},
'Desserts': {
'Ice Cream': [0, 5.00, 10],
'Cake': [0, 5.00, 10],
'Pie': [0, 5.00, 10],
'Cookies': [0, 5.00, 10],
'Cheese': [0, 5.00, 10],
'Boozy Milkshake': [0, 5.00, 10],
'Sundae': [0, 5.00, 10],
'Gummi Bears': [0, 5.00, 10],
'Oranges': [0, 5.00, 10],
'Jello': [0, 5.00, 10],
'Boba': [0, 5.00, 10],
'Brownie': [0, 5.00, 10]
},
'Drinks': {
'Coffee': [0, 3.00, 10],
'Tea': [0, 3.00, 10],
'Beer': [0, 5.50, 10],
'<NAME>': [0, 5.50, 10],
'Cider': [0, 5.50, 10],
'Bubbles': [0, 5.50, 10],
'Soda': [0, 3.00, 10],
'Juice': [0, 3.00, 10],
'Evian': [0, 1.00, 10],
'Wine': [0, 5.50, 10],
'Hunch Punch': [0, 5.50, 10],
'Seltzer': [0, 1.00, 10]
}
}
togo_menu = {}
def print_menu():
"""
prints the restaurant menu
"""
menu_string = 'Menu:'
for key, value in menu.items():
menu_string += '\n{}\n\n'.format(key)
for k, v in value.items():
menu_string += k + '${:.2f}\n'.format(v[1]).rjust(25-len(k))
menu_string += '\n'
print(menu_string)
return menu_string
def optional_menu():
"""
allow user to use their own menu
"""
global menu
global togo_menu
filename = input('Enter your menu file: ').strip()
try:
with open(filename) as csvfile:
reader = csv.DictReader(csvfile)
unpacked = []
category = []
for row in reader:
category = [row['Category']]
unpacked += [[row['Item'], row['Category'], row['Price'], row['Quantity']]]
togo_menu[category[0]] = {}
for i in range(len(unpacked)):
togo_menu[unpacked[i][1]].update({unpacked[i][0]: [0, float(unpacked[i][2]), int(unpacked[i][3])]})
except (KeyError, FileNotFoundError, IOError):
print('Not a valid menu file; using default menu.')
menu = togo_menu
new_order.input_item()
class Order:
"""
new class
"""
def __init__(self):
self.order_id = uuid4()
self.subtotal = 0
def __str__(self):
return self.display_order()
def __len__(self):
counter = 0
for key, value in menu.items():
for k, v in value.items():
if v[0] != 0:
counter += v[0]
return counter
def __repr__(self):
print('<Order #{} | Items: {} | Total: ${:.2f}>'.format(self.order_id, self.__len__, self.subtotal * 1.101))
def print_category(self, order_line):
"""
prints category
"""
category_string = '\n{}\n'.format(order_line)
for key, value in menu[order_line].items():
category_string += key + '${:.2f}\n'.format(value[1]).rjust(25-len(key))
print(category_string)
return category_string
def add_to_order(self, order_line):
"""
adds items to user order
"""
for key, value in menu.items():
if order_line in value:
while True:
try:
order_quantity = int(input('How many orders of ' + order_line + ' would you like?\n> '))
if order_quantity > 0:
self.add_item(order_line, order_quantity)
else:
print('Please enter a number between 1-' + str(value[order_line][2]))
break
except ValueError:
print('Please enter a number between 1-' + str(value[order_line][2]))
break
else:
print('Please enter a valid menu item')
return 'Please enter a valid menu item'
def add_item(self, order_line, order_quantity):
"""
adds items to user order
"""
for key, value in menu.items():
if order_line in value:
if order_quantity != 0:
if value[order_line][2] < order_quantity:
print('Oh no!! We only have ' + str(value[order_line][2]) + ' left. Please order again')
self.add_to_order(order_line)
return
else:
value[order_line][0] += order_quantity
self.subtotal += value[order_line][1] * order_quantity
value[order_line][2] -= order_quantity
else:
value[order_line][0] += order_quantity
self.subtotal += value[order_line][1] * order_quantity
value[order_line][2] -= order_quantity
print('{} x{} has been added. Your total is ${:.2f}\n'.format(order_line, order_quantity, self.subtotal * 1.101))
def remove_prompt(self, order_line):
"""
prompts self.remove from current order
"""
order_line = order_line.replace('Remove ', '')
for key, value in menu.items():
if order_line in value:
if value[order_line][0] != 0:
while True:
try:
remove_quantity = int(input('You currently have {}x {} in your cart. Remove how many?\n>'.format(value[order_line][0], order_line)))
if remove_quantity > value[order_line][0]:
int(input('You only have {}x {} in your cart. Remove how many?\n>'.format(value[order_line][0], order_line)))
else:
self.remove_item(order_line, remove_quantity)
break
except ValueError:
print('Please enter a number between 1-' + str(value[order_line][0]))
else:
print(order_line + ' is not in your order.')
return order_line + ' is not in your order.'
def remove_item(self, order_line, remove_quantity):
"""
removes item from current order
"""
for key, value in menu.items():
if order_line in value:
value[order_line][0] -= remove_quantity
self.subtotal -= value[order_line][1] * remove_quantity
value[order_line][2] += remove_quantity
print('{} x{} has been removed. Your total is ${:.2f}\n'.format(order_line, remove_quantity, self.subtotal * 1.101))
self.input_item()
def display_order(self):
"""
provides display of user order
"""
order_string = '''\n***********************************************
The Snakes Cafe
"Gettin' Slithered Since 1999"
Seattle, WA
Order #{}
===============================================\n'''.format(self.order_id)
for key, value in menu.items():
for k, v in value.items():
if v[0] != 0:
item = '{} x{}'.format(k, v[0])
order_string += item + '${:.2f}\n'.format(v[0] * v[1]).rjust(46-len(item))
order_string += '\n-----------------------------------------------\n'
order_string += 'Subtotal' + '${:.2f}\n'.format(self.subtotal).rjust(46 - 8)
order_string += 'Sales Tax' + '${:.2f}\n'.format(self.subtotal * 0.101).rjust(46 - 9)
order_string += '-----------------------------------------------\n'
order_string += 'Total Due' + '${:.2f}\n'.format(self.subtotal * 1.101).rjust(46 - 9)
order_string += '***********************************************\n'
print(order_string)
return order_string
def print_receipt(self):
"""
creates file of and prints user order
"""
with open('order-{}.txt'.format(self.order_id), 'w') as f:
f.write(self.display_order())
def input_item(self):
"""
changes order according to user input
"""
global menu
order_line = input('What would you like?\n> ').title()
while order_line != 'Quit':
if order_line == 'Order':
self.display_order()
elif 'Remove' in order_line:
self.remove_prompt(order_line)
elif order_line == 'Menu':
if menu == full_menu:
print_menu()
else:
menu = togo_menu
print_menu()
elif order_line == 'Togo':
optional_menu()
elif order_line == 'Checkout':
self.print_receipt()
elif order_line in menu:
self.print_category(order_line)
else:
self.add_to_order(order_line)
order_line = input('What would you like?\n> ').title()
print('Thank you for coming by. See you soon!')
quit()
if __name__ == '__main__':
print(intro)
menu = full_menu
print_menu()
print(order_prompt)
try:
new_order = Order()
new_order.input_item()
except KeyboardInterrupt:
print('\nThanks for visiting the Snake Cafe.')
```
|
{
"source": "jeremycward/ipp-core",
"score": 2
}
|
#### File: arctic_broker/broker_impl/arctic_storage_method.py
```python
from isharp.datahub.core import StorageMethod,MatrixHeader,Revision,AcquireContentReturnValue,MemStyles,RevisionInfo
from typing import List
import logging
import dataclasses
logging.basicConfig(level=logging.INFO)
class ArcticStorageMethod(StorageMethod):
def __init__(self,store):
self.store = store
super().__init__("arctic")
def _lib_ticker(self,path):
tokens = [ i for i in list(path.split('/')) if len(i)>0]
library = tokens[0]
ticker = '.'.join(tokens[1:])
logging.info("Arctic storage method converted path to libaray [{}], ticker [{}]".format(library,ticker))
if library in self.store.list_libraries():
lib = self.store[library]
if lib.has_symbol(ticker):
versioned = lib.read(ticker)
return (library,ticker)
else:
raise StorageMethod.ResourceException("ticker {} not found".format(ticker))
else:
raise StorageMethod.ResourceException("library {} not found in library list {}".format(library,self.store.list_libraries()))
return (library, ticker)
def acquireContent(self, path, params, version_id=None)->AcquireContentReturnValue:
library, ticker = self._lib_ticker(path)
lib = self.store[library]
v = None if version_id is None else int(version_id)
versioned = lib.read(ticker, v)
header = MatrixHeader(
name= path,
revision_id= str(versioned.version),
storage_method=self.name,
path= path,
memory_style=MemStyles.DATA_FRAME,
description=None
)
return AcquireContentReturnValue(content=versioned.data,header=header)
def storeContent(self, path, params, content,revision_info)->Revision:
library, ticker = self._lib_ticker(path)
_store_content(self.store[library],ticker,content,revision_info)
def history(self,matrix_url)->List[Revision]:
library, ticker = self._lib_ticker(matrix_url.url_components.path)
lib = self.store[library]
meta = lib.read_metadata(ticker)
logging.info("attempted to get history for : {},{} result = [{}]".format(library, ticker,meta))
if meta.metadata is None:
return []
else:
return get_revisions_from_metadata(meta.metadata)
def list(self) -> List[MatrixHeader]:
ret_val = []
for this_lib_name in self.store.list_libraries():
library = self.store[this_lib_name]
for this_symbol in library.list_symbols():
versions = library.list_versions(this_symbol)
filtered = [version for version in versions if not version['deleted']]
max_version = max(map(lambda v: v['version'], filtered))
symbol_with_slashes = this_symbol.replace('.','/')
ret_val.append(MatrixHeader(name=symbol_with_slashes,
description="don't know yet",
storage_method = self.name,
memory_style = MemStyles.DATA_FRAME,
revision_id = str(max_version),
path="{}/{}".format(this_lib_name,symbol_with_slashes)))
return ret_val
history_tag = "revision_history"
def add_revision_to_metadata(revision:Revision,metadata:dict,dict_key:str=history_tag):
if metadata.get(dict_key) is None:
metadata[dict_key] = []
metadata[dict_key].append(dataclasses.asdict(revision))
def _revision_from_dict(dict:dict)->Revision:
#todo ... there must be a better whay of doing this !
revision_id = dict['id']
revision_info = dict["revision_info"]
return Revision(revision_id,RevisionInfo(who=revision_info['who'],what=revision_info['what'],when=revision_info['when']))
def get_revisions_from_metadata(metadata:dict,dict_key:str=history_tag)->List[Revision]:
revision_list = metadata[dict_key]
logging.info("retrieved revision list from metadata: {}".format(revision_list))
if revision_list is not None:
return [_revision_from_dict(i) for i in revision_list]
else:
return []
def import_pandas(lib, pd, symbol_name,revision_info):
meta = {}
add_revision_to_metadata(Revision('1', revision_info), meta)
lib.write(symbol_name, pd, meta)
def _store_content(lib,ticker,content,revision_info)->Revision:
logging.info("storing content {} {}".format(lib, ticker))
original_meta_d = lib.read_metadata(ticker).metadata
old_revisions = get_revisions_from_metadata(original_meta_d)
last_revision = old_revisions[-1]
next_revision_id = str(int(last_revision.id)+1)
new_revision = Revision(next_revision_id,revision_info)
add_revision_to_metadata(new_revision,original_meta_d)
ret = lib.write(ticker,content,original_meta_d)
return new_revision
```
#### File: datahub/broker_client/client_utils.py
```python
import pandas as pd
def header_cols(header):
return (header.name,
header.storage_method,
header.path,
str(header.memory_style),
header.description)
def mtx_headers_as_dataframe(matrix_headers):
record_data = [header_cols(i) for i in matrix_headers]
return pd.DataFrame.from_records(data= record_data, columns= ["name", "storage method", "path", "mem style", "description"])
```
#### File: datahub/broker_client/remote_proxy.py
```python
from typing import List
from isharp.datahub.core import DataBroker, MatrixHeader, Matrix, AcquireContentReturnValue, Revision
from isharp.datahub.core import MatrixPreview
from isharp.datahub.core import RevisionInfo
from urllib.parse import urlparse
from nameko.standalone.rpc import ClusterRpcProxy
import logging
logger = logging.getLogger(__name__)
def remote_config(net_location: str):
return {
'serializer': 'pickle',
'AMQP_URI': 'pyamqp://guest:guest@{}'.format(net_location),
'rpc_exchange': 'nameko-rpc',
'max_workers': 10,
'parent_calls_tracked': 10
}
class PooledBrokerConnection(DataBroker):
def __init__(self, net_location: str):
logger.info("creating remote client at {}".format(net_location))
conf = remote_config(net_location)
self.rpc_proxy = ClusterRpcProxy(conf)
self.proxy = self.rpc_proxy.start()
self.net_location = net_location
def releaseAll(self) -> None:
logger.info("release all for remote client at {}".format(self.net_location))
self.proxy.data_broker_service.releaseAll()
def stop(self):
logger.info("closing remote client at {}".format(self.net_location))
self.rpc_proxy.stop()
def commit(self, matrix: Matrix, revisionInfo: RevisionInfo) -> Revision:
return self.proxy.data_broker_service.commit(matrix, revisionInfo)
def history(self,url: str) -> List[Revision]:
return self.proxy.data_broker_service.history(url)
def list(self) -> List[MatrixHeader]:
return self.proxy.data_broker_service.list()
def checkout(self, url: str, version_id=None) -> Matrix:
return self.proxy.data_broker_service.checkout(url)
def view(self, url: str, version_id=None) -> Matrix:
return self.proxy.data_broker_service.view(url)
def release(self, matrix) -> None:
self.proxy.data_broker_service.release(matrix)
return None
def peek(self, url) -> MatrixPreview:
return self.proxy.data_broker_service.peek(url)
class BrokerConnectionPool(DataBroker):
def __init__(self):
self.pool = {}
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
for thisConnection in self.pool.values():
thisConnection.stop()
def _acquire_connection(self, connection_key):
if connection_key not in self.pool.keys():
self.pool[connection_key] = PooledBrokerConnection(connection_key)
return self.pool[connection_key]
def _connect(self, url: str):
connection_key = self._conn_details(url)
return self._acquire_connection(connection_key)
def _conn_details(self, url: str) -> str:
url_components = urlparse(url)
return url_components.netloc
def checkout(self, url: str, version_id=None) -> Matrix:
return self._connect(url).checkout(url, version_id)
def history(self,url: str) -> List[Revision]:
return self._connect(url).history(url)
def view(self, url: str, version_id=None) -> Matrix:
return self._connect(url).view(url, version_id)
def commit(self, matrix: Matrix, revisionInfo: RevisionInfo) -> Revision:
return self._connect(matrix.url.url).commit(matrix, revisionInfo)
def release(self, matrix) -> None:
self._connect(matrix.url.url).release(matrix)
def releaseAll(self) -> None:
for thisConnection in self.pool.values():
thisConnection.releaseAll()
def peek(self, url) -> MatrixPreview:
return self._connect(url).peek(url)
def list(self, network_location):
return self._acquire_connection(network_location).list()
```
#### File: datahub/broker_service/datahub_main.py
```python
import re
import sys
import isharp.datahub.yaml_support as iYaml
import nameko.cli.main
import isharp.datahub.web.webconsole as web
from multiprocessing import Process
def main():
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
iYaml.set_up_unsafe_loader()
sys.exit(nameko.cli.main.main())
if __name__== "__main__" :
main()
```
#### File: datahub/csv_files/simple_file_broker.py
```python
from .file_storage_method import FileStorageMethod
from isharp.datahub.core import AbstractDataBroker
import logging
class SimpleFileBroker(AbstractDataBroker):
def __init__(self,root_directory):
super().__init__(FileStorageMethod(root_directory))
```
#### File: datahub/web/webconsole.py
```python
from isharp.datahub.broker_client.remote_proxy import BrokerConnectionPool
from flask import Flask, render_template
import os
import socket
import json
hostname=socket.gethostname()
from flask import request
templates_dir = os.getenv('isharp_web_templates', 'templates')
static_dir = os.getenv('isharp_web_static', '/isharp-core/docs')
tableContent = [
[ "plant/plough.gif"
,"plant/seed_and_sprout.gif"
,"plant/seed_sprouting.gif"
,"plant/sunny_sprout.gif"
,"plant/seeding_trailer.gif"
,"plant/wheat_seedling.gif"
]
,[ "feed/raindrops.gif"
,"feed/irrigation_pipe.gif"
,"feed/airplane_irrigation.gif"
,"feed/raindrop_with_cog.gif"
,"feed/feed_spreader.gif"
,"feed/raindrops.gif"
]
,["develop/measure_height.gif"
,"develop/seed_lab.gif"
,"develop/seed_time.gif"
,"develop/chemicals.gif"
,"develop/measure_height.gif"
,"develop/hay_bail.gif"
]
,["harvest/combine_harvester.gif"
,"harvest/bailing_tractor.gif"
,"harvest/grain_silo.gif"
,"harvest/thresher_trailer.gif"
,"harvest/mini_harvester.gif"
,"harvest/distillery.gif"
]
]
print ("templates_dir: {}".format(templates_dir))
print ("static_dir: {} ? {}".format(static_dir,os.path.exists(static_dir)))
app = Flask(__name__,template_folder=templates_dir, static_folder=static_dir)
app.config["CACHE_TYPE"] = 'null'
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
hub_host = os.getenv('isharp_hub_host', 'localhost:5672')
isharp_dev_hostname = os.getenv('isharp_dev_hostname', 'isharpdev')
@app.route('/')
def static_content():
return render_template("index.html",table_images = tableContent, isharp_hostname=isharp_dev_hostname)
@app.route('/datahub')
def listing():
brokers = [["Demo Broker","Prod parallel demo broker"],["UAT","UAT broker"],["DEV","Dev broker"]]
listings = []
with BrokerConnectionPool() as broker:
for thisItem in broker.list(hub_host):
listings.append(thisItem)
return render_template('datahub_index.html',hostname="rabbit", my_list=listings,hub_host="Demo Data Broker", brokers=brokers)
@app.route('/datahub/view/<path:path>', methods=['GET'])
def view(path):
databroker = BrokerConnectionPool()
protocol = request.args.get('protocol')
url = "{}://{}/{}".format(protocol,hub_host,path)
mtx = databroker.view(url)
row_data = []
dict_array = mtx.content.to_dict(orient='records')
for idx, row in enumerate(dict_array):
row["date"]=mtx.content.index[idx]
row_data.append(row)
column_headers = list(mtx.content)
column_headers.insert(0,"date")
history = databroker.history(url)
return render_template('matrix.html',
column_headers=column_headers,
row_data = row_data,
revision_list = history)
```
#### File: isharp/evalengine/core.py
```python
import abc
import luigi
import time
from nameko.rpc import rpc, RpcProxy
from typing import List
import logging
from isharp.datahub.core import DatahubTarget
logger = logging.getLogger(__name__)
class EvalMethod(object):
pass
class Evaluator(abc.ABC):
@abc.abstractmethod
def eval(self, method:EvalMethod ,inputs: List[DatahubTarget])->List[DatahubTarget]:
pass
class EvalService(Evaluator):
name="evaluation_service"
@rpc
def eval(self, method: EvalMethod, inputs: List[DatahubTarget]) -> List[DatahubTarget]:
logger.info("START performing an eval....")
time.sleep(1)
logger.info("END performing an eval....")
return [DatahubTarget(url="url",t=0)]
```
#### File: isharp/evalengine/remote_proxy.py
```python
from typing import List
from nameko.standalone.rpc import ClusterRpcProxy
import logging
import time
from isharp.evalengine.core import Evaluator,EvalMethod
from isharp.datahub.core import DatahubTarget
logger = logging.getLogger(__name__)
def remote_config(net_location: str):
return {
'serializer': 'pickle',
'AMQP_URI': 'pyamqp://guest:guest@{}'.format(net_location),
'rpc_exchange': 'nameko-rpc',
'max_workers': 10,
'parent_calls_tracked': 10
}
class AsyncEvalServiceInvoker:
def __init__(self, conf):
self.rpc_proxy = ClusterRpcProxy(conf)
self.proxy = self.rpc_proxy.start()
def eval(self, method: EvalMethod, inputs: List[DatahubTarget]) -> List[DatahubTarget]:
result = self.proxy.evaluation_service.eval(method, inputs)
print (result)
def stop(self):
self.rpc_proxy.stop()
invoker = AsyncEvalServiceInvoker(remote_config("localhost"))
invoker.eval(None,[])
```
#### File: flow/neo4jflow/py2neoflow.py
```python
from py2neo import Graph
from isharp.flow.core import CalculationTask, DatahubRequirement
import os
def calcTasks(data_hub_host_name, data_hub_port, graph_host):
url = "bolt://{}:7687".format(graph_host)
print ('................using graph host {}'.format(graph_host) )
print('................. using datahub host {}'.format(data_hub_host_name))
ret_val = []
graph = Graph(url)
strategies = graph.nodes.match('Strategy')
for strategy in strategies:
jobs = graph.match((strategy,), r_type='EVAL')
for j in jobs:
deps = []
job_node = j.end_node
dependencies = graph.match((job_node,), r_type="USES")
for dependency in dependencies:
datahub_url = "{}://{}:{}/{}".format("arctic",data_hub_host_name,data_hub_port,dependency.end_node['path'])
deps.append(DatahubRequirement(name=dependency.end_node['name'],t=0,url=datahub_url) )
ret_val.append(CalculationTask(dueBy=job_node['due_by'],requirements=deps,strategy=strategy['name'],eval_label=job_node['name']))
return ret_val
```
#### File: test/data_broker/test_file_data_broker.py
```python
import os
import shutil
import tempfile
import unittest
import pandas as pd
import datetime
import test.testutil.file_utils as fu
import test.testutil.log_utils as lu
from isharp.datahub.core import StorageMethod, MemStyles
from isharp.datahub.csv_files.simple_file_broker import SimpleFileBroker
testpath = os.path.dirname(__file__)
class TestFileDataBroker(unittest.TestCase):
def setUp(self):
rows = ['a','b','c','d','e','f','g','h','i','j','k']
self.test_data_path = tempfile.mkdtemp(prefix="ipp_test_file_data_broker")
lu.logger.info("test file path: {}" + self.test_data_path)
fu.make_file_tree(self.test_data_path,2,3)
self.broker = SimpleFileBroker(self.test_data_path)
def tearDown(self):
shutil.rmtree(self.test_data_path)
def test_invalid_path(self):
with self.assertRaises(StorageMethod.ResourceException):
self.broker.checkout("file://broker.nomura.com/no_dir/no_file?format=CSV")
def test_get_compound_path(self):
testurl = "file:///subdir_1/file_name_1.csv?format=CSV"
m = self.broker.checkout(testurl)
self.assertEqual("file_name_1.csv",m.matrix_header.name)
self.assertEqual("subdir_1/file_name_1.csv", m.matrix_header.path)
def test_peek_with_existing_file(self):
testurl = "file:///subdir_1/file_name_1.csv?format=CSV"
preview = self.broker.peek(testurl)
todays_date = datetime.datetime.now().date()
expected_start_date = todays_date- datetime.timedelta(11)
expected_end_date = expected_start_date + datetime.timedelta(10)
self.assertEqual(expected_start_date.strftime("%Y-%m-%d"), preview.range_start)
self.assertEqual(expected_end_date.strftime("%Y-%m-%d"), preview.range_end)
def test_peek_non_existing_file(self):
testurl = "file:///subdir_1/file_name_xxx.csv?format=CSV"
preview = self.broker.peek(testurl)
self.assertIsNone(preview)
def test_get_simple_matrix(self):
testurl = "file:///file_name_1.csv?format=CSV"
m = self.broker.checkout(testurl)
self.assertEqual("file_name_1.csv",m.matrix_header.name)
self.assertEqual(None,m.matrix_header.revision_id)
self.assertEqual('file', m.matrix_header.storage_method)
self.assertEqual(m.matrix_header.path, "file_name_1.csv")
self.assertTrue(isinstance(m.content,pd.DataFrame))
self.assertEqual(MemStyles.DATA_FRAME, m.matrix_header.memory_style)
def test_list(self):
headers = self.broker.list()
self.assertEquals(14,len(headers))
header = headers[0]
self.assertIsNone(header.revision_id)
self.assertEqual("file",header.storage_method)
self.assertEqual("file_name_1.csv", header.path)
self.assertEqual("file_name_1.csv",header.name)
self.assertEqual("description of file_name_1.csv",header.description)
self.assertEqual(MemStyles.DATA_FRAME, header.memory_style)
```
#### File: test/data_broker/test_github_data_broker.py
```python
import os
import shutil
import tempfile
import unittest
import pandas as pd
import datetime
import random
import test.testutil.file_utils as fu
from github import Github, InputGitAuthor
import test.testutil.log_utils as lu
import test.testutil.pandas_utils as pu
import test.testutil.github_utils as gu
from isharp.datahub.core import StorageMethod, MemStyles
from isharp.datahub.github_broker.broker_impl.github_data_broker import GithubBroker
from isharp.datahub.github_broker.broker_impl.github_storage_method import GithubStorageMethod
rows = ['a','b','c','d','e','f','g','h','i','j','k']
author = InputGitAuthor(
"jeremycward",
"<EMAIL>"
)
class TestGithubDataBroker(unittest.TestCase):
def setUp(self):
self.branch = 'main'
self.repo_name = "jeremycward/datahubtest"
self.gitfile = ''.join(random.choice('abcdefghijklmnopqrstuvwxyz___') for i in range(16))
self.token = '<KEY>'
self.initialTag = ''.join(random.choice('1234567890') for i in range(10))
df = pu.create_simple_series(rows, 10)
self.repo = gu.GitTestRepo(self.token,self.repo_name)
self.repo.create_and_tag(author,self.gitfile,df,self.initialTag,self.branch)
self.broker = GithubBroker(self.token,self.repo_name)
def tearDown(self):
pass
# def test_invalid_path(self):
# with self.assertRaises(StorageMethod.ResourceException):
# self.broker.checkout("file://broker.nomura.com/no_dir/no_file?format=CSV")
#
# def test_get_compound_path(self):
# testurl = "file:///subdir_1/file_name_1.csv?format=CSV"
# m = self.broker.checkout(testurl)
# self.assertEqual("file_name_1.csv",m.matrix_header.name)
# self.assertEqual("subdir_1/file_name_1.csv", m.matrix_header.path)
#
#
#
def test_peek_with_existing_file(self):
testurl = "github:///{}?branch=main".format(self.gitfile)
preview = self.broker.peek(testurl)
print(preview.range_start)
print(preview.range_end)
self.assertEqual(0, preview.range_start)
self.assertEqual(9, preview.range_end)
self.assertEqual(self.initialTag,preview.header.revision_id)
#
# todays_date = datetime.datetime.now().date()
#
# expected_start_date = todays_date- datetime.timedelta(11)
# expected_end_date = expected_start_date + datetime.timedelta(10)
#
#
#
# def test_peek_non_existing_file(self):
# testurl = "file:///subdir_1/file_name_xxx.csv?format=CSV"
# preview = self.broker.peek(testurl)
# self.assertIsNone(preview)
#
#
# def test_get_simple_matrix(self):
# testurl = "file:///file_name_1.csv?format=CSV"
# m = self.broker.checkout(testurl)
# self.assertEqual("file_name_1.csv",m.matrix_header.name)
# self.assertEqual(None,m.matrix_header.revision_id)
# self.assertEqual('file', m.matrix_header.storage_method)
# self.assertEqual(m.matrix_header.path, "file_name_1.csv")
# self.assertTrue(isinstance(m.content,pd.DataFrame))
# self.assertEqual(MemStyles.DATA_FRAME, m.matrix_header.memory_style)
#
# def test_list(self):
# headers = self.broker.list()
# self.assertEquals(14,len(headers))
# header = headers[0]
# self.assertIsNone(header.revision_id)
# self.assertEqual("file",header.storage_method)
# self.assertEqual("file_name_1.csv", header.path)
# self.assertEqual("file_name_1.csv",header.name)
# self.assertEqual("description of file_name_1.csv",header.description)
# self.assertEqual(MemStyles.DATA_FRAME, header.memory_style)
```
#### File: test/testutil/file_utils.py
```python
import os
import test.testutil.pandas_utils as pu
import pandas as pd
import pathlib
rows = ['a','b','c','d','e','f','g','h','i','j','k']
def fill_directory_with_content(root_directory,directory_path, number_files):
index_data =[]
key_data = []
for i in range(1, number_files):
df = pu.create_simple_series(rows[:i + 2], i + 10)
file_name = "file_name_{}.csv".format(i)
filepath = os.path.join(root_directory,directory_path, file_name)
df.to_csv(filepath)
posix_path = pathlib.PureWindowsPath(os.path.join("/",directory_path,file_name))
key_data.append(posix_path.as_posix().strip("/"))
index_data.append((file_name,"description of {}".format(file_name)))
df = pd.DataFrame( index=pd.Index(key_data,name="path"),data=index_data, columns=["name", "description"])
index_file_path = os.path.join(root_directory,directory_path, "index.txt")
df.to_csv(index_file_path)
def make_file_tree(root_directory_path,tree_depth, files_per_directory,relative_path=''):
for i in range(1, files_per_directory):
fill_directory_with_content(root_directory_path,relative_path,files_per_directory)
if tree_depth > 0:
new_relative_path = os.path.join(relative_path,"subdir_{}".format(i))
actual_directory_path =os.path.join(root_directory_path,new_relative_path)
os.mkdir(actual_directory_path)
make_file_tree(root_directory_path, tree_depth-1, files_per_directory,new_relative_path)
```
|
{
"source": "jeremy-cx/juno-bork",
"score": 3
}
|
#### File: jeremy-cx/juno-bork/junobork.py
```python
import os
import re
import csv
import time
import click
import shutil
import requests
from bs4 import BeautifulSoup
from itertools import izip_longest
GENRES = ['house', 'deep-house', 'techno', 'minimal-tech-house']
WINDOWS = ['this-week', 'today', 'eight-weeks']
LIMITS = ['10', '20', '50', '100', '500']
def grouper(n, iterable, fillvalue=None):
args = [iter(iterable)] * n
return izip_longest(fillvalue=fillvalue, *args)
@click.command()
@click.argument('genre', type=click.Choice(GENRES))
@click.argument('window', type=click.Choice(WINDOWS))
@click.argument('limit', type=click.Choice(LIMITS))
@click.option('--sleep', default=3)
def main(genre, window, limit, sleep):
"""
Fetch new releases from http://www.juno.co.uk/.
"""
base_url = 'http://www.juno.co.uk/'
url = '{}/{}/{}/?items_per_page={}'.format(base_url, genre, window, limit)
r = requests.get(url)
soup = BeautifulSoup(r.content)
table = soup.find(class_='product_list')
products = table.find_all(attrs={'ua_location': re.compile('product')})
if not products:
return
try:
os.makedirs(genre)
except OSError:
pass # already exists
with open('{}/{}.csv'.format(genre, genre), 'wb') as out_file:
for idx, (row1, row2, row3) in enumerate(grouper(3, products)):
if idx == 0:
csv_writer = csv.writer(out_file)
csv_writer.writerow([
'artist', 'title', 'label', 'catno', 'format', 'genre',
'style', 'tracks', 'images', 'audio',
])
try:
url = row1\
.find(class_='producttitle')\
.find('a', class_='jhighlight')['href']
r = requests.get(base_url + url)
soup = BeautifulSoup(r.content)
image_keys = []
images = soup.find('div', class_='product-images-large')
for image_idx, image in enumerate(images.find_all('img')):
r = requests.get(image['src'], stream=True)
key = '{}-{}-{}.jpeg'.format(genre, idx, image_idx)
with open('{}/{}'.format(genre, key), 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
image_keys.append(key)
audio_keys = []
anchors = soup.findAll('a', href=re.compile('http.*\.mp3'))
for url in [x['href'] for x in anchors]:
filename = url.rsplit('/', 1)[1]
file_path = os.path.join(genre, filename)
mp3 = requests.get(url, stream=True)
with open(file_path, 'wb') as fd:
for chunk in mp3.iter_content(1024):
fd.write(chunk)
audio_keys.append(filename)
title = row1.find(class_='producttitle').find('span').text
label = row1.find(class_='productlabel').find('span').text
catno = row2.find('span').text
artist = row1\
.find(class_='productartist')\
.find('span')\
.find('a')\
.text
format_ = row1\
.find(class_='producttitle')\
.text.strip('\t\n\r')\
.split('(', 1)[1]\
.split(')')[0]
tracks = filter(None, [
x.strip() for x in row3.find('span').text
.encode('utf8')
.replace('\t', '')
.replace('\n', '')
.replace('\xa0', '')
.replace('\xc2', '')
.splitlines()
])
csv_writer.writerow([
artist,
title,
label,
catno,
format_,
'Electronic',
genre.replace('-', ' ').title(),
', '.join(tracks),
', '.join(image_keys),
', '.join(audio_keys),
])
except (UnicodeEncodeError, UnicodeDecodeError):
continue
time.sleep(sleep)
if __name__ == "__main__":
main()
```
|
{
"source": "jeremyd4500/senior-project-2020",
"score": 2
}
|
#### File: backend/report/views.py
```python
from .models import Report
from .serializers import ReportSerializer
from rest_framework import generics, mixins
from rest_framework.authentication import BasicAuthentication, TokenAuthentication
from rest_framework.permissions import IsAuthenticated
class ReportList(mixins.ListModelMixin,mixins.CreateModelMixin,generics.GenericAPIView):
queryset = Report.objects.all()
serializer_class = ReportSerializer
# authentication_classes = [TokenAuthentication,BasicAuthentication]
# permission_classes = [IsAuthenticated]
def get(self,request):
return self.list(request)
def post(self,request):
return self.create(request)
class ReportDetail(mixins.RetrieveModelMixin, mixins.UpdateModelMixin, mixins.DestroyModelMixin,generics.GenericAPIView):
queryset = Report.objects.all()
serializer_class = ReportSerializer
# authentication_classes = [TokenAuthentication, BasicAuthentication]
# permission_classes = [IsAuthenticated]
def get(self, request, pk):
return self.retrieve(request, pk)
def put(self, request, pk):
return self.update(request, pk)
def delete(self, request, pk):
return self.destroy(request, pk)
```
|
{
"source": "JeremyDaug/ButterBot",
"score": 4
}
|
#### File: JeremyDaug/ButterBot/ledger.py
```python
from typing import Union, Mapping, Tuple, NewType
from item import Items
from account import Account
class Ledger:
"""Ledger class which contains and manages both currency and items.
All valid Commands to be accounted for.
-- handled in chatbot (creates class instance)
-+- Ledger [Ledger Name]
-- Handled by transaction
-+ [Account] gives [Account]: [Value], [Items...]
-+ [Account] buys [Items...]
-+ [Account] sells [Items...]
-+ [Account] takes from Pot: [Value], [Items...]
-+ Bank gives [Account]: [Value], [Items...]
-+ Bank takes from [Account]: [Value], [Items...]
-+- Set Value [Item]: [Value]
-- handled in chatbot and calls show_balance()
-+- [Account] Balance
-- handled in chatbot and calls add_user()
-+- Add Account [Account Name]
-- Handled in chatbot and calls show_rectify()
-+- Rectify
-- Handled in chatbot
-+- New Item [Item]: [Value]
-+- Delete Item [Item]
-+- Show History [Lines]
-+- Show Items
-+- Save
-+- Load
-+- Show Accounts
-+- Toggle User Lock
-+- Toggle Store Lock
-+- Toggle Bank Lock
-+- Toggle Transaction Lock
-+- Total Value
"""
def __init__(self, name: str, server: str, admin: str, key: str,
storekey: str) -> None:
"""
Init function for our Ledger class.
:param name: name of the ledger.
:param server: name of the server.
:param admin: Admin's Name.
:param key: Admin's Key
:param storekey: Key to the store (The bot's key.
"""
# The name of the ledger
self.name = name
# The server it belongs to so more then one can be made.
self.location = server
# A list of users who exist in the system.
self.users = []
# The pot is a non-user, it is a communal pool of value, users can
# add and remove from it at will, but the pot cannot change anything
# it is given.
self.pot = Account("", "Pot", "")
# Store is a system for users to turn items in for money and vice versa
# Currently has no safety besides the item must exist and have a value.
# Safety can be added, requiring admin to check off on transactions
# with the store but for version 1 this is not being added.
self.store = Account(admin, "Store", storekey)
self.store_key = storekey
# The bank, the account for the admin who can add and take into the
# system without worry.
self.bank = Account(admin, "Bank", key)
# The transaction ledger, what all was traded and in what order.
self.history = []
# The location it is saved to.
self.save_location = "save_%s_%s.sav" % (self.location, self.name)
# The Library of items as we know it.
self.library = Items()
# A number of locks to keep things under control.
self.user_lock = False
self.transaction_lock = False
self.store_lock = False
self.bank_lock = False
return
def new_item(self, name: str, value: float=-1) -> bool:
"""
Helper function, calls Item.New_item()
:param name: The Name of the new item.
:param value: The value of the item.
:return: True if successful (item does not exist already).
"""
return self.library.new_item(name=name, value=value)
def delete_item(self, name: str, key: str) -> bool:
"""
A helper function to remove an item from the library.
Calls self.library.delete_item()
:param name: The item to be deleted.
:param key: The user's key to ensure it is the admin.
:return: True if successful, false if anything went wrong.
"""
if key != self.bank.key:
return False
return self.library.delete_item(name)
def admin_new_item(self, key: str, name: str, value: float) -> bool:
"""
A helper function to check it is the admin adding the item. Only used
externally.
:param key: The key of the user.
:param name: Name of the item.
:param value: Value of the item, defaults to unvalued (-1)
:return: True if item was added, false otherwise.
"""
if value in [-1, -2] or value >= 0:
return self.new_item(name, value)
return False
def get_account(self, account: str) -> Union[Account, None]:
"""Get's account by name.
:param account: The account to retrieve
:return: The user we are looking for, None if it was not found.
:rtype: Account, None
"""
if account == "Pot":
return self.pot
if self.is_account_name(account):
for i in range(len(self.users)):
if account == self.users[i].name:
return self.users[i]
else:
return None
def __del__(self) -> None:
""" When this is deleted it should automatically save. We don't want to
lose our data do we?
"""
self.save()
return
def show_users(self) -> str:
""" Returns string of all current accounts.
:return: All current accounts.
"""
ret = ''
for user in self.users:
ret += user.name + '\n'
return ret
def add_user(self, owner: str, name: str, key: str, value: float=0,
items: Mapping[str, int]=None) -> Tuple[bool, str]:
"""Add user function adds the user to the system.
:param owner: Who owns it
:param name: the name of the account
:param key: the owner's key
:param value: how much they start with
:param items: A list of items they start with (if any), takes a string
:return:
"""
if self.user_lock:
return False, 'No new Users allowed.\n'
elif any(i.name == name for i in self.users):
return False, 'User already exists.\n'
if value < 0:
return False, 'Cannot start with negative value.\n'
if items is None:
items = dict()
for item, amount in items.items():
if amount < 1:
return (False, 'Cannot start with a negative number of %s.\n'
% item)
for item in items:
if item not in self.library.library: # if item doesn't exist add
self.library.new_item(name=item, value=-1) # Item at default
self.users.append(Account(owner, name, key, value, items))
self.history.append('{0.name} account added with {0.value}gp and '
'{1}.'.format(self.users[-1],
self.users[-1].item_list()))
return True, '%s account added.\n' % name
def is_account_name(self, name: str) -> bool:
"""Checks if account exists.
:param name: The name of the account to check.
:return: True if there, false otherwise.
"""
if name in ['Pot', 'Bank', 'Store']:
return True
return name in [user.name for user in self.users]
def item_list(self) -> str:
"""Gets a list of all items and their current value.
:return: All the current items and their values.
"""
ret = ''
for item, value in self.library.library.items():
ret += '{}: {}\n'.format(item, value)
return ret
def set_value(self, command: str, key: str) -> str:
""" Sets the value of an item that already exists.
:param command: The command given by the user.
:param key: The key of the user to check against.
:return: A string of anything that happened and if it's successful.
"""
data = command.lstrip('Set Value ')
item, value = data.split(':')
if item not in self.library.library:
return 'Item Not found.\n'
item = item.strip()
value = float(value.strip())
if key == self.bank.key:
if value in [-1, -2] or value > 0:
self.library.change_value(item, value)
else:
return 'Value must be -1 for unvalued, -2 ' \
'for priceless, or non-negative.'
elif self.library.library[item] == -1:
if value < 0:
return "Value must be non-negative."
self.library.change_value(item, value)
self.history.append(command)
return "Value properly set."
def transaction(self, command: str, key: str) -> str:
""" Transaction function.
This will handle almost all transactions on the basic level, acting as
a switch between the more appropriate functions.
:param command: The command to parse.
Valid commands:
[Account] gives [Account]: [Value], [Items...]
[Account] takes from Pot: [Value], [Items...]
[Account] sells [Items...]
[Account] buys [Items...]
Bank gives [Account]: [Value], [Items...]
Bank takes from [Account]: [Value], [Items...]
---- Special Operation ---
Set Value [Item]: [Value]
Item must be in [Name]: [Count] format.
:param key: The key we use for security.
:return: A string of success or failure, if failure
error will be given.
"""
if self.transaction_lock:
return 'Transactions locked.\n'
giver = ""
taker = ""
action = ""
value = ""
items = ""
ret = ''
# special op, while it doesn't move anything around, it is still
# recorded and so put here.
if command.startswith("Set Value"):
return self.set_value(command, key)
words = command.split(" ")
if len(words) < 3:
return 'Command not Recognized.\n'
# giver
giver = words[0]
if giver != 'Bank' and not self.is_account_name(giver):
return 'Account does not exist.\n'
# taker
if words[1] == 'gives':
taker = words[2]
action = 'give'
elif words[1] == 'takes' and words[2] == 'from':
taker = words[3]
action = 'take'
elif words[1] in ['buys', 'sells']:
taker = 'Store'
action = words[1][:-1]
else:
return 'Command not Recognized.\n'
if ':' in taker:
taker = taker.replace(':', '')
if taker not in ['Bank', 'Pot', 'Store'] and not self.is_account_name(taker):
return "Account does not exist.\n"
if giver == taker:
return "Cannot make a transaction with yourself.\n"
# inputs
try:
inputs = ""
if action in ['give', 'take']:
inputs = command.split(':', 1)[1]
elif action in ['buy', 'sell']:
inputs = command.split(" ", 2)[2]
inputs = inputs.split(',')
# value
if len(inputs) == 1:
if ':' in inputs[0]:
items = inputs
value = 0
else:
value = float(inputs[0])
items = {}
elif ':' not in inputs[0]:
value = float(inputs[0])
items = inputs[1:]
else:
value = 0
items = inputs
except IndexError:
return "Improper Syntax.\n"
# items
items_fin = {}
for item in items:
if not item: # if empty, ignore
continue
if ':' not in item:
return "Item must be in [Item]:[Amount] format.\n"
name, amount = item.split(':')
items_fin[name.strip()] = int(amount)
if giver == 'Bank': # Bank actions ---------
if self.bank_lock:
return 'Bank Locked.\n'
if self.bank.key != key:
return "Invalid Key.\n"
if action == 'give':
# bank can give items with no value and does not lose anything when
# it gives
for item in items_fin:
if item not in self.library.library:
self.library.new_item(item)
ret = self.get_account(taker).add(value=value, items=items_fin)
elif action == 'take':
# bank can take without reservation.
ret = self.get_account(taker).take(value=value, items=items_fin)
if not ret:
self.history.append(command)
return ret
elif taker == 'Store':
if self.store_lock:
return "Store Locked.\n"
dne = ''
unvalued = ''
priceless = ''
ret = ''
for item in items_fin:
if item not in self.library.library:
dne += "%s," % item
elif self.library.library[item] == -1:
unvalued += "%s," % item
elif self.library.library[item] == -2:
priceless += "%s," % item
if dne:
ret += "Items don't exist: %s.\n" % dne
if unvalued:
ret += "Items currently unvalued: %s.\n" % unvalued
if priceless:
ret += "Priceless Items: %s.\n" % priceless
if ret:
return ret
price = 0
if action == 'buy':
price = sum([amount*self.library.library[name] for name, amount in items_fin.items()])
ret = self.get_account(giver).remove(key=key, value=price)
if not ret:
ret = self.get_account(giver).add(items=items_fin)
if ret:
return ret
elif action == 'sell':
price = sum([amount*self.library.library[name] for name, amount in items_fin.items()])
ret = self.get_account(giver).remove(items=items_fin, key=key)
if not ret:
ret = self.get_account(giver).add(value=price)
if ret:
return ret
self.history.append(command + " for %d." % price)
elif taker == 'Pot':
if action == 'give':
ret = self.get_account(giver).remove(value=value, items=items_fin, key=key)
if not ret:
ret = self.pot.add(value=value, items=items_fin)
if ret:
return ret
self.history.append(command)
elif action == 'take':
ret = self.pot.remove(value=value, items=items_fin, key="")
if not ret:
ret = self.get_account(giver).add(value=value, items=items_fin)
if ret:
return ret
self.history.append(command)
elif taker == 'Bank':
ret = self.get_account(giver).remove(value=value, items=items_fin,
key=key)
if not ret:
ret = self.bank.add(value=value, items=items_fin)
if ret:
return ret
self.history.append(command)
else:
ret = self.get_account(giver).remove(value=value, items=items_fin,
key=key)
if not ret:
ret = self.get_account(taker).add(value=value, items=items_fin)
if ret:
return ret
self.history.append(command)
return ret
def show_balance(self, account: str) -> Tuple[float, Mapping[str, int], float, str]:
"""Shows the balance of an account.
Value and all items, and the total value including items.
:param account: The account to retrieve
:param key: The key of the account
:return: A string of the balance.
"""
if not self.is_account_name(account):
return 0, {}, 0, "Account does not exist.\n"
value = self.get_account(account=account).value
items = self.get_account(account=account).inventory
total_value = float(value) + sum([amount*self.library.library[item] if self.library.library[item] >= 0 else 0
for item, amount in items.items()])
items_str = '\n'
names = sorted([i for i in items.keys()])
for name in names:
items_str += '%s:%d, \n' % (name, items[name])
return value, items, total_value, "%s has %d and %s Total value of %d.\n" % (account, value, items_str, total_value)
def total_value(self) -> Tuple[float, Mapping[str, int], float, str]:
"""Retrieves the total value of the pot and all users combined.
:return: Returns the value, all items in a single dict, total value,
and a string form of this info.
"""
value = 0
coll_items = {}
for user in self.users:
value += user.value
for item, amount in user.inventory.items():
if item in coll_items:
coll_items[item] += amount
else:
coll_items[item] = amount
# do the same for the pot.
value += self.pot.value
for item, amount in self.pot.inventory.items():
if item in coll_items:
coll_items[item] += amount
else:
coll_items[item] = amount
total_value = value + sum([amount*self.library.library[item] if self.library.library[item] >= 0 else 0
for item, amount in coll_items.items()])
items_str = ''
for item, amount in coll_items.items():
items_str += '%s:%d, ' % (item, amount)
items_str = items_str[:-2]
ret = 'Everyone together holds %d and %s for a total value of %d.\n' % (value, items_str, total_value)
return value, coll_items, total_value, ret
def show_rectify(self) -> str:
"""Shows the current value difference between all users and the average.
This currently only works in value and makes no suggestions on items.
:return: Each user and the total value they need to reach the average.
"""
ret = ''
ave_value = self.total_value()[2]/len(self.users)
for user in self.users:
ret += '%s: %d\n' % (user.name, ave_value-self.show_balance(user.name)[2])
return ret
def toggle_user_lock(self) -> None:
"""
Lock user function.
"""
self.user_lock = not self.user_lock
return
def toggle_transaction_lock(self) -> None:
"""
Lock transactions.
"""
self.transaction_lock = not self.transaction_lock
return
def toggle_store_lock(self) -> None:
"""
Lock store function.
"""
self.store_lock = not self.store_lock
return
def toggle_bank_lock(self) -> None:
"""
Lock bank function.
"""
self.bank_lock = not self.bank_lock
return
def save(self) -> None:
""" Save function. Can be called as needed, guaranteed to be called on
close.
"""
with open(self.save_location, 'w') as file:
# first lines should have the current totals for each account
# All small lines are separated by \n big seperations by \n\n
file.write(self.bank.save_data()+"\n")
file.write(self.store.save_data()+'\n')
file.write(self.pot.save_data()+'\n')
for i in self.users:
file.write(i.save_data()+'\n')
file.write("\n\n")
# we save the items separately from our transactions and users.
# last we write the full transaction history.
file.write(self.transaction_log())
# save our smaller data to a config file.
with open("config_" + self.save_location, 'w') as file:
file.write(str(self.user_lock) + "\n" + str(self.transaction_lock)
+ "\n" + str(self.store_lock) + "\n"
+ str(self.bank_lock))
self.library.set_save_location(self.save_location)
self.library.save_data()
return
def load_config(self) -> None:
"""Config loading file"""
with open("config_"+self.save_location, 'r') as file:
lines = file.readlines()
self.user_lock = lines[0] == 'True'
self.transaction_lock = lines[1] == 'True'
self.store_lock = lines[2] == 'True'
self.bank_lock = lines[3] == 'True'
return
def load_save(self) -> None:
"""Load save file."""
with open(self.save_location, 'r') as file:
data = file.read()
sections = data.split('\n\n')
lines = sections[0].splitlines()
self.bank.load_data(lines[0])
self.store.load_data(lines[1])
self.pot.load_data(lines[2])
for line in lines[2:]:
self.users.append(Account())
self.users[-1].load_data(line)
self.library.set_save_location(self.save_location)
self.library.load_data()
self.history = []
for line in sections[1].splitlines():
self.history.append(line)
def transaction_log(self, transactions: int=0) -> str:
"""Gets the history and returns it in readable format.
:param int transactions: number of transactions to show, 0 shows all.
:return: The history, delineated by newlines.
"""
ret = ''
if 0 < transactions < len(self.history):
for i in self.history[-transactions:]:
ret += i + '\n'
else:
for i in self.history:
ret += i + '\n'
return ret
```
|
{
"source": "jeremydavis-2/Jolanta-by-dvr",
"score": 3
}
|
#### File: Jolanta-by-dvr/Python_libs/jolanta.py
```python
import numpy as np
import scipy.special
"""
Collects functions defining and evaluating the Jolanta model potential
"""
def Jolanta_1D(x, a=0.2, b=0.0, c=0.14):
"""
default 1D potential; has a resonance just below 7 eV
use for DVRs
"""
return (a*x*x-b)*np.exp(-c*x*x)
def Jolanta_3D(r, a=0.1, b=1.2, c=0.1, l=1, as_parts=False):
"""
default 3D potential; has a resonance at 1.75 eV - 0.2i eV
use for DVRs
"""
if as_parts:
Va = a*r**2*np.exp(-c*r**2)
Vb = b*np.exp(-c*r**2)
Vc = 0.5*l*(l+1)/r**2
return (Va, Vb, Vc)
else:
return (a*r**2-b)*np.exp(-c*r**2) + 0.5*l*(l+1)/r**2
def eval_ugto(r, a, l):
"""
ununormalized GTO(r) = r^l * exp(-a*r^2)
"""
return r**l * np.exp(-a*r*r)
def eval_aos(Rs, alphas, Ns, l):
"""
evaluate all AOs (all GTOs) defined by exponents alpha and angular momentum l
Rs np.array with positions to evaluate the MOs at
Cs[:] np.array with coefficients pertaining to normalized GTOs
alphas GTO exponents
Ns normalization factors
l angular momentum common to all GTOs
AO_i(r_k) = N_i * r_k**l * np.exp(-alpha_i*r_k*r_k)
"""
nAOs = len(alphas)
nRs = len(Rs)
AOs = np.zeros((nRs, nAOs))
for i in range(nAOs):
AOs[:,i] = Ns[i] * eval_ugto(Rs, alphas[i], l)
return AOs
def eval_mos(Rs, Cs, alphas, Ns, l):
"""
evaluate MOs defined by coefficients in Cs at all positions in Rs
each AO is defined by: AO(r) = r^l * exp(-a*r^2)
MO[R_k,j] = Sum AO[R_k,i]*Cs[i,j]
"""
#nRs = len(Rs)
#if len(Cs.shape) == 1:
# nMOs = 1
#else:
# nMOs = Cs.shape[1]
#MOs = np.zeros((nRs, nMOs))
AOs = eval_aos(Rs, alphas, Ns, l)
return np.dot(AOs, Cs)
def sp_gamma(a, z):
"""
unnormalized upper incomplete gamma function: integral(t^(a-1)*exp(-t),t,z,oo)
needed for evaluating CAP integrals analytically
"""
return scipy.special.gamma(a) * scipy.special.gammaincc(a,z)
def gto_integral_0_inf(a, n=0):
"""
integral(x^n * exp(-a*x^2), x, 0, oo)
where n is an integer >=0
"""
return 0.5 * a**(-0.5*(n+1)) * scipy.special.gamma(0.5*(n+1))
def gto_integral_d_inf(a, n=0, d=1.0):
"""
integral(x^n * exp(-a*x^2), x, d, oo)
can be directly used to evaluate CAP integrals
W12 = <gto_1 | (r-d)^2 |gto_2> = gto_integral_d_inf(a1+a2, n=l1+l2+2, d=d)
"""
ad2=a*d*d
if n%2 == 0:
m = n/2
return 0.5 * a**(-m-0.5) * sp_gamma(m+0.5,ad2)
else:
m = (n+1)/2
return 0.5 * a**(-m) * sp_gamma(m,ad2)
def S_12(a1, a2, l1, l2):
"""
overlap matrix
S12 = <gto_1 | gto_2>
"""
return gto_integral_0_inf(a1+a2, n=l1+l2)
def T_12(a1, a2, l1, l2, mass=1):
"""
kinetic energy
T12 = 1/(2*mass) <d/dr gto_1 | d/dr gto_2>
assume that both l1 and l2 >= 1
because psi(0) = 0
"""
T1 = gto_integral_0_inf(a1+a2, n=l1+l2-2)
T2 = gto_integral_0_inf(a1+a2, n=l1+l2)
T3 = gto_integral_0_inf(a1+a2, n=l1+l2+2)
return 0.5*(l1*l2*T1 - 2*(l1*a2+l2*a1)*T2 + 4*a1*a2*T3) / mass
def V_12(a1, a2, l1, l2, a=0.1, b=1.2, c=0.1, l=1):
"""
Jolanta_3D matrix element <gto_1| Jolanta_3D |gto_2>
a1, a2 are the GTO exponents, l1 and l2 the powers of r
the unnormalized integral is returned
(a*r**2-b)*np.exp(-c*r**2) + 0.5*l*(l+1)/r**2
"""
Va = gto_integral_0_inf(a1+c+a2, n=l1+l2+2)*a
Vb = gto_integral_0_inf(a1+c+a2, n=l1+l2 )*b
Vc = gto_integral_0_inf(a1 +a2, n=l1+l2-2)*0.5*l*(l+1)
return Va-Vb+Vc
```
|
{
"source": "jeremydeanlakey/lakey-finicity-python",
"score": 3
}
|
#### File: lakey-finicity-python/lakey_finicity/api_http_client.py
```python
import json
import time
from typing import Optional
from requests import Response
import requests
from requests.adapters import HTTPAdapter
from urllib3 import Retry
from lakey_finicity.utils import validate_secret
# https://docs.finicity.com/guide-to-partner-authentication-and-integration/
_FINICITY_URL_BASE = "https://api.finicity.com"
_TWO_HOURS_S = 60 * 60
def _retry_session(retries=3, backoff_factor=0.5) -> requests.Session:
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=(500, 502, 504),
)
adapter = HTTPAdapter(max_retries=retry)
session = requests.Session()
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
class ApiHttpClient(object):
def __init__(self, app_key: str, partner_id: str, partner_secret: str):
"""
:param app_key: Finicity-App-Key from Developer Portal
:param partner_id: Partner ID from Developer Portal
:param partner_secret: Current value of Partner Secret from Developer Portal
"""
self.__app_key = app_key
self.__partner_id = partner_id
self.__secret = partner_secret
self.__token = None
self.__token_expiration = 0
self.last_response = None
def get(self, path: str, params: Optional[dict] = None, extra_headers: Optional[dict] = None) -> Response:
url = _FINICITY_URL_BASE + path
token = self.__get_token()
headers = {
"Finicity-App-Key": self.__app_key,
"Accept": "application/json",
"Finicity-App-Token": token,
}
if extra_headers:
headers.update(extra_headers)
params = params or {}
self.last_response = _retry_session().get(url, headers=headers, params=params)
if self.last_response.ok:
return self.last_response
else:
raise Exception(str(self.last_response.content) + ", see https://community.finicity.com/s/article/201750879-Error-and-Aggregation-Status-Codes")
def post(self, path: str, data: Optional[dict], extra_headers: Optional[dict] = None) -> Response:
url = _FINICITY_URL_BASE + path
token = self.__get_token()
headers = {
"Finicity-App-Key": self.__app_key,
"Content-Type": "application/json",
"Accept": "application/json",
"Finicity-App-Token": token,
}
if extra_headers:
headers.update(extra_headers)
self.last_response = _retry_session().post(url, data=json.dumps(data), headers=headers)
if self.last_response.ok:
return self.last_response
else:
raise Exception(str(self.last_response.content) + ", see https://community.finicity.com/s/article/201750879-Error-and-Aggregation-Status-Codes")
def put(self, path: str, data: dict, extra_headers: Optional[dict] = None) -> Response:
url = _FINICITY_URL_BASE + path
token = self.__get_token()
headers = {
"Finicity-App-Key": self.__app_key,
"Content-Type": "application/json",
"Finicity-App-Token": token,
}
if extra_headers:
headers.update(extra_headers)
self.last_response = _retry_session().put(url, data=json.dumps(data), headers=headers)
if self.last_response.ok:
return self.last_response
else:
raise Exception(str(self.last_response.content) + ", see https://community.finicity.com/s/article/201750879-Error-and-Aggregation-Status-Codes")
def delete(self, path: str, extra_headers: Optional[dict] = None) -> Response:
url = _FINICITY_URL_BASE + path
token = self.__get_token()
headers = {
"Finicity-App-Key": self.__app_key,
"Content-Type": "application/json",
"Finicity-App-Token": token,
}
if extra_headers:
headers.update(extra_headers)
self.last_response = _retry_session().delete(url, headers=headers)
if self.last_response.ok:
return self.last_response
else:
raise Exception(str(self.last_response.content) + ", see https://community.finicity.com/s/article/201750879-Error-and-Aggregation-Status-Codes")
def __get_token(self) -> str:
if not self.__token or time.time() >= self.__token_expiration:
self.authenticate()
return self.__token
# https://community.finicity.com/s/article/Partner-Authentication
# POST /aggregation/v2/partners/authentication
def authenticate(self):
"""Validate the partner’s credentials (Finicity-App-Key, Partner ID, and Partner Secret) and return a temporary access token.
The token must be passed in the HTTP header Finicity-App-Token on all subsequent API requests.
The token is valid for two hours. You can have multiple active tokens at the same time.
Five unsuccessful authentication attempts will cause the partner’s account to be locked.
To unlock the account, send an email to <EMAIL>
:return: A temporary access token, which must be passed in the HTTP header Finicity-App-Token on all subsequent API requests (see Accessing the API).
"""
path = "/aggregation/v2/partners/authentication"
url = _FINICITY_URL_BASE + path
headers = {
"Finicity-App-Key": self.__app_key,
"Content-Type": "application/json",
"Accept": "application/json",
}
data = {
"partnerId": self.__partner_id,
"partnerSecret": self.__secret,
}
new_token_expiration = time.time() + (2 * 60 * 60) - (10 * 60) # two hour expiration less ten minute buffer
response = requests.post(url, headers=headers, data=json.dumps(data))
if response.status_code == 200:
self.__token = response.json()['token']
self.__token_expiration = new_token_expiration
return self.__token
else:
raise Exception(f"authentication issue {response.status_code}: {response.content}")
# https://community.finicity.com/s/article/Modify-Partner-Secret
# PUT /aggregation/v2/partners/authentication
def modify_secret(self, new_partner_secret: str):
"""Change the partner secret that is used to authenticate this partner.
The secret does not expire, but can be changed by calling Modify Partner Secret.
A valid partner secret may contain upper- and lowercase characters, numbers, and the characters !, @, #, $, %, &, *, _, -, +.
It must include at least one number and at least one letter, and its length should be between 12 and 255 characters.
:param new_partner_secret: The new value for Partner Secret
"""
path = "/aggregation/v2/partners/authentication"
validate_secret(new_partner_secret)
data = {
"partnerId": self.__partner_id,
"partnerSecret": self.__secret,
"newPartnerSecret": new_partner_secret,
}
response = self.put(path=path, data=data)
if response.status_code == 204:
self.__secret = new_partner_secret
else:
raise Exception(f"issue modifying secret: {response.status_code}: {response.reason}")
```
#### File: models/account/account_type.py
```python
import enum
# https://community.finicity.com/s/article/201750779-Account-Types
# Finicity can usually determine the correct type for each account, but in some
# rare cases the account type will be unknown. In these cases, the account
# number and name should be displayed to the customer, who must specify the
# correct account type by selecting a value from the following table.
# Calls to Activate Customer Accounts v2 (without Aggregation) will fail if the
# account's field contains unknown or any unrecognized type designation. The
# failed request will return HTTP 400 (Bad Request) with the error code 10106
# (Invalid Account Type).
class AccountType(enum.Enum):
unknown = "unknown" # Type cannot be determined (customer must specify the correct type from other supported types in this table)
checking = "checking" # Standard checking
savings = "savings" # Standard savings
cd = "cd" # Certificates of deposit
moneyMarket = "moneyMarket" # Money Market
creditCard = "creditCard" # Standard credit cards
lineOfCredit = "lineOfCredit" # Home equity,line of credit
investment = "investment" # Generic investment (no details)
investmentTaxDeferred = "investmentTaxDeferred" # Generic tax-advantaged investment (no details)
employeeStockPurchasePlan = "employeeStockPurchasePlan" # ESPP, Employee Stock Ownership Plans (ESOP), Stock Purchase Plans
ira = "ira" # Individual Retirement Account (not Rollover or Roth)
acct_401k = "401k" # 401K Plan
roth = "roth" # Roth IRA, Roth 401K
acct_403b = "403b" # 403B Plan
acct_529 = "529" # 529 Plan
rollover = "rollover" # Rollover IRA
ugma = "ugma" # Uniform Gifts to Minors Act
utma = "utma" # Uniform Transfers to Minors Act
keogh = "keogh" # Keogh Plan
acct_457 = "457" # 457 Plan
acct_401a = "401a" # 401A Plan
mortgage = "mortgage" # Standard Mortgages
loan = "loan" # Auto loans, equity loans, other loans
not_in_enum = "not_in_enum"
@staticmethod
def from_description(description: str):
try:
return AccountType(description)
except:
return None
DEPOSIT_ACCOUNT_TYPES = {
AccountType.checking,
AccountType.savings,
AccountType.cd,
AccountType.moneyMarket,
}
INVESTMENT_ACCOUNT_TYPES = {
AccountType.investment,
AccountType.investmentTaxDeferred,
AccountType.employeeStockPurchasePlan,
AccountType.ira,
AccountType.roth,
AccountType.rollover,
AccountType.ugma,
AccountType.utma,
AccountType.keogh,
AccountType.acct_401k,
AccountType.acct_403b,
AccountType.acct_529,
AccountType.acct_457,
AccountType.acct_401a,
}
CREDIT_LINE_ACCOUNT_TYPES = {
AccountType.creditCard,
AccountType.lineOfCredit,
}
LOAN_ACCOUNT_TYPES = {
AccountType.mortgage,
AccountType.loan,
}
```
#### File: models/connect/answered_mfa_question.py
```python
from dataclasses import dataclass
# https://community.finicity.com/s/article/207505363-Multi-Factor-Authentication-MFA
@dataclass
class AnsweredMfaQuestion(object):
text: str
answer: str # Added by the partner for calls to the "MFA Answers" services
_unused_fields: dict # this is for forward compatibility and should be empty
def to_dict(self) -> dict:
return {
'text': self.text,
'answer': self.answer,
}
@staticmethod
def from_dict(data: dict):
data = dict(data) # don't mutate the original
text = data.pop('text')
answer = data.pop('answer')
return AnsweredMfaQuestion(
text=text,
answer=answer,
_unused_fields=data,
)
```
#### File: models/connect/mfa_question.py
```python
from dataclasses import dataclass
# https://community.finicity.com/s/article/207505363-Multi-Factor-Authentication-MFA
from .answered_mfa_question import AnsweredMfaQuestion
@dataclass
class MfaQuestion(object):
text: str
_unused_fields: dict # this is for forward compatibility and should be empty
def answer(self, answer: str) -> AnsweredMfaQuestion:
return AnsweredMfaQuestion(text=self.text, answer=answer, _unused_fields=self._unused_fields)
@staticmethod
def from_dict(data: dict):
data = dict(data) # don't mutate the original
text = data.pop('text')
return MfaQuestion(
text=text,
_unused_fields=data,
)
```
#### File: models/consumer/consumer.py
```python
from dataclasses import dataclass
from typing import Optional
from lakey_finicity.models.birth_date import BirthDate
# https://community.finicity.com/s/article/Report-Consumers
@dataclass
class Consumer(object):
id: str # ID of the consumer (UUID with max length 32 characters)
firstName: Optional[str] # The consumer's first name(s) / given name(s)
lastName: Optional[str] # The consumer's last name(s) / surname(s)
address: Optional[str] # The consumer's street address
city: Optional[str] # The consumer's city
state: Optional[str] # The consumer's state
zip: Optional[str] # The consumer's ZIP code
phone: Optional[str] # The consumer's phone number
ssn: Optional[str] # Last 4 digits of the consumer's Social Security number
birthday: Optional[BirthDate] # The consumer's birth date
email: Optional[str] # The consumer's email address
createdDate: Optional[int] # A timestamp of when the consumer was created
_unused_fields: dict # this is for forward compatibility and should be empty
@staticmethod
def from_dict(data: dict):
data = dict(data) # don't mutate the original
id = data.pop('id')
firstName = data.pop('firstName', None)
lastName = data.pop('lastName', None)
address = data.pop('address', None)
city = data.pop('city', None)
state = data.pop('state', None)
zip = data.pop('zip', None)
phone = data.pop('phone', None)
ssn = data.pop('ssn', None)
birthday_dict = data.pop('birthday', None)
birthday = BirthDate.from_dict(birthday_dict) if birthday_dict else None
email = data.pop('email', None)
createdDate = data.pop('createdDate', None)
return Consumer(
id=id,
firstName=firstName,
lastName=lastName,
address=address,
city=city,
state=state,
zip=zip,
phone=phone,
ssn=ssn,
birthday=birthday,
email=email,
createdDate=createdDate,
_unused_fields=data,
)
```
#### File: models/customer/customer.py
```python
from dataclasses import dataclass
from typing import Optional
# https://community.finicity.com/s/article/201703219-Customers#customer_record
@dataclass
class Customer(object):
id: int
username: Optional[str]
firstName: Optional[str]
lastName: Optional[str]
type: Optional[str]
createdDate: int
_unused_fields: dict # this is for forward compatibility and should be empty
@staticmethod
def from_dict(data: dict):
data = dict(data) # don't mutate the original
id = data.pop('id')
username = data.pop('username', None)
firstName = data.pop('firstName', None)
lastName = data.pop('lastName', None)
type = data.pop('type', None)
createdDate = data.pop('createdDate')
return Customer(
id=id,
username=username,
firstName=firstName,
lastName=lastName,
type=type,
createdDate=createdDate,
_unused_fields=data,
)
```
#### File: models/report/report_constraints.py
```python
from dataclasses import dataclass
from typing import List, Optional
from lakey_finicity.models.report.report_custom_field import ReportCustomField
# https://community.finicity.com/s/article/VOA-Report
@dataclass
class ReportConstraints(object):
_unused_fields: dict # this is for forward compatibility and should be empty
accountIds: Optional[List[str]]
fromDate: Optional[int]
reportCustomFields: Optional[List[ReportCustomField]]
@staticmethod
def from_dict(data: dict):
data = dict(data) # don't mutate the original
accountIds = data.pop("accountIds", None)
fromDate = data.pop("fromDate", None)
reportCustomFields_raw = data.pop("reportCustomFields", None)
reportCustomFields = [ReportCustomField.from_dict(d) for d in reportCustomFields_raw] if reportCustomFields_raw else None
return ReportConstraints(
accountIds=accountIds,
fromDate=fromDate,
reportCustomFields=reportCustomFields,
_unused_fields=data,
)
```
#### File: report/voa/voa_account_record.py
```python
from dataclasses import dataclass
from typing import List, Optional
from lakey_finicity.models.report.transaction_record import TransactionRecord
from lakey_finicity.models.report.voa.account_asset_record import AccountAssetRecord
from lakey_finicity.models.report.voa.details_record import DetailsRecord
@dataclass
class VoaAccountRecord(object):
id: int # Finicity account ID
number: str # The account number from the institution (obfuscated)
ownerName: Optional[str] # The name(s) of the account owner(s). This field is optional. If no owner information is available, this field will not appear in the report.
ownerAddress: Optional[str] # The mailing address of the account owner(s). This field is optional. If no owner information is available, this field will not appear in the report.
name: Optional[str] # The account name from the institution
type: Optional[str] # VOA: checking / savings / moneyMarket / cd / investment*
aggregationStatusCode: Optional[int] # Finicity aggregation status of the most recent aggregation attempt for this account (non-zero means the account was not accessed successfully for this report, and additional fields for this account may not be reliable)
# institutionLoginId: str # The institutionLoginId (represents one set of credentials at a particular institution, together with all accounts accessible using those credentials at that institution)
transactions: List[TransactionRecord] # A list of all transaction records for this account during the report period (VOI report includes deposit transactions only)
asset: Optional[AccountAssetRecord] # An asset record for the account
details: Optional[DetailsRecord] # A details record for the account
availableBalance: Optional[float] # The available balance for the account
balance: Optional[float] # The cleared balance of the account as-of balanceDate
balanceDate: Optional[int] # A timestamp showing when the balance was captured
averageMonthlyBalance: Optional[float] # The average monthly balance of this account
_unused_fields: dict # this is for forward compatibility and should be empty
@staticmethod
def from_dict(data: dict):
data = dict(data) # don't mutate the original
id = data.pop('id')
number = data.pop('number')
ownerName = data.pop('ownerName', None)
ownerAddress = data.pop('ownerAddress', None)
name = data.pop('name', None)
type = data.pop('type', None)
aggregationStatusCode = data.pop('aggregationStatusCode', None)
# institutionLoginId = data.pop('institutionLoginId')
transactions_raw = data.pop('transactions', None)
transactions = [TransactionRecord.from_dict(d) for d in transactions_raw] if transactions_raw else []
asset_raw = data.pop('asset', None)
asset = AccountAssetRecord.from_dict(asset_raw) if asset_raw else None
details_raw = data.pop('details', None)
details = DetailsRecord.from_dict(details_raw) if details_raw else None
availableBalance = data.pop('availableBalance', None)
balance = data.pop('balance', None)
balanceDate = data.pop('balanceDate', None)
averageMonthlyBalance = data.pop('averageMonthlyBalance', None)
return VoaAccountRecord(
id=id,
number=number,
ownerName=ownerName,
ownerAddress=ownerAddress,
name=name,
type=type,
aggregationStatusCode=aggregationStatusCode,
# institutionLoginId=institutionLoginId,
transactions=transactions,
asset=asset,
details=details,
availableBalance=availableBalance,
balance=balance,
balanceDate=balanceDate,
averageMonthlyBalance=averageMonthlyBalance,
_unused_fields=data,
)
```
#### File: report/voa/voa_institution_record.py
```python
from dataclasses import dataclass
from typing import List
from lakey_finicity.models.report.voa.voa_account_record import VoaAccountRecord
@dataclass
class VoaInstitutionRecord(object):
_unused_fields: dict # this is for forward compatibility and should be empty
id: int
name: str
accounts: List[VoaAccountRecord]
@staticmethod
def from_dict(data: dict):
data = dict(data) # don't mutate the original
id = data.pop('id')
name = data.pop('name')
accounts_raw = data.pop('accounts', None)
accounts = [VoaAccountRecord.from_dict(d) for d in accounts_raw] if accounts_raw else []
return VoaInstitutionRecord(
id=id,
name=name,
accounts=accounts,
_unused_fields=data,
)
```
#### File: report/voa/voa_report.py
```python
from dataclasses import dataclass
from typing import List, Optional
from lakey_finicity.models.report.report_constraints import ReportConstraints
from lakey_finicity.models.report.report_status import ReportStatus
from lakey_finicity.models.report.report_type import ReportType
from lakey_finicity.models.report.voa.asset_record import AssetRecord
from lakey_finicity.models.report.voa.voa_institution_record import VoaInstitutionRecord
@dataclass
class VoaReport(object):
id: str # ID of the report (UUID with max length 32 characters). VOI report ID will have “-voi” appended to the end of it to denote the report type.
portfolioId: Optional[str]
requestId: str # unique requestId for this specific call request
title: Optional[str]
consumerId: Optional[str] # ID of the consumer (UUID with max length 32 characters)
consumerSsn: Optional[str] # Last 4 digits of the report consumer's Social Security number
requesterName: Optional[str]
constraints: Optional[ReportConstraints]
type: Optional[ReportType] # voa or voi
status: Optional[ReportStatus] # inProgress or success or failure
createdDate: Optional[int]
startDate: Optional[int]
endDate: Optional[int]
days: Optional[int]
seasoned: Optional[bool]
institutions: Optional[List[VoaInstitutionRecord]]
assets: Optional[AssetRecord]
_unused_fields: dict # this is for forward compatibility and should be empty
@staticmethod
def from_dict(data: dict):
data = dict(data) # don't mutate the original
id = data.pop('id')
requestId = data.pop('requestId')
portfolioId = data.pop('portfolioId', None)
title = data.pop('title', None)
consumerId = data.pop('consumerId', None)
consumerSsn = data.pop('consumerSsn', None)
type_str = data.pop('type', None)
type = ReportType(type_str) if type_str else None
constraints_raw = data.pop('constraints', None)
requesterName = data.pop('requesterName', None)
constraints = ReportConstraints.from_dict(constraints_raw) if constraints_raw else None
status_str = data.pop('status', None)
status = ReportStatus(status_str) if status_str else None
createdDate = data.pop('createdDate', None)
startDate = data.pop('startDate', None)
endDate = data.pop('endDate', None)
days = data.pop('days', None)
seasoned = data.pop('seasoned', None)
institutions_raw = data.pop('institutions', None)
institutions = [VoaInstitutionRecord.from_dict(d) for d in institutions_raw] if institutions_raw else None
assets_raw = data.pop('assets', None)
assets = AssetRecord.from_dict(assets_raw) if assets_raw else None
return VoaReport(
id=id,
portfolioId=portfolioId,
requestId=requestId,
title=title,
consumerId=consumerId,
consumerSsn=consumerSsn,
requesterName=requesterName,
constraints=constraints,
type=type,
status=status,
createdDate=createdDate,
startDate=startDate,
endDate=endDate,
days=days,
seasoned=seasoned,
institutions=institutions,
assets=assets,
_unused_fields=data,
)
```
#### File: report/voi/income_stream_status.py
```python
import enum
# https://community.finicity.com/s/article/Credit-Decisioning#generate_voi_report
class IncomeStreamStatus(enum.Enum):
active = "ACTIVE"
inactive = "INACTIVE"
@staticmethod
def from_description(description):
return IncomeStreamStatus(description)
```
#### File: report/voi/voi_institution_record.py
```python
from dataclasses import dataclass
from typing import List, Optional
from lakey_finicity.models.report.voi.voi_account_record import VoiAccountRecord
@dataclass
class VoiInstitutionRecord(object):
_unused_fields: dict # this is for forward compatibility and should be empty
id: int
name: Optional[str]
urlHomeApp: Optional[str] # voa only
accounts: List[VoiAccountRecord]
@staticmethod
def from_dict(data: dict):
data = dict(data) # don't mutate the original
id = data.pop('id')
name = data.pop('name', None)
urlHomeApp = data.pop('urlHomeApp', None)
accounts_raw = data.pop('accounts', None)
accounts = [VoiAccountRecord.from_dict(d) for d in accounts_raw] if accounts_raw else []
return VoiInstitutionRecord(
id=id,
name=name,
urlHomeApp=urlHomeApp,
accounts=accounts,
_unused_fields=data,
)
```
#### File: lakey_finicity/resources/consumers.py
```python
from lakey_finicity.api_http_client import ApiHttpClient
from lakey_finicity.models import Consumer, BirthDate
from lakey_finicity.responses.create_consumer_response import CreateConsumerResponse
class Consumers(object):
def __init__(self, http_client: ApiHttpClient):
self.__http_client = http_client
# https://community.finicity.com/s/article/Create-Consumer
# POST /decisioning/v1/customers/{customerId}/consumer
def create(self, customer_id: int, first_name: str, last_name: str, address: str, city: str, state: str, zip: str, phone: str, ssn: str, birthday: BirthDate, email: str) -> str:
"""
Create a consumer record associated with the given customer. A consumer persists as the owner of any reports that are generated, even after the original customer is deleted from the system. A consumer must be created for the given customer before calling any of the Generate Report services.
If a consumer already exists for this customer, this service will return HTTP 409 (Conflict).
:param customer_id: ID of the customer
:param first_name: The consumer's first name(s) / given name(s)
:param last_name: The consumer's last name(s) / surname(s)
:param address: The consumer's street address
:param city: The consumer's city
:param state: The consumer's state
:param zip: The consumer's ZIP code
:param phone: The consumer's phone number
:param ssn: The consumer's 9-digit Social Security number (may include separators: nnn-nn-nnnn)
:param birthday: The consumer's birth date
:param email: The consumer's email address
:return:
"""
data = {
"firstName": first_name,
"lastName": last_name,
"address": address,
"city": city,
"state": state,
"zip": zip,
"phone": phone,
"ssn": ssn,
"birthday": birthday.to_padded_string_dict(),
"email": email,
}
path = f"/decisioning/v1/customers/{customer_id}/consumer"
response = self.__http_client.post(path, data)
response_dict = response.json()
return CreateConsumerResponse.from_dict(response_dict).id
# https://community.finicity.com/s/article/Report-Consumers#get_consumer_for_customer
# GET /decisioning/v1/customers/{customerId}/consumer
def get_for_customer(self, customer_id: int) -> Consumer:
"""
Get the details of a consumer record.
If a consumer has not been created for this customer, the service will return HTTP 404 (Not Found)
:param customer_id:
:return:
"""
path = f"/decisioning/v1/customers/{customer_id}/consumer"
response = self.__http_client.get(path)
response_dict = response.json()
return Consumer.from_dict(response_dict)
# https://community.finicity.com/s/article/Report-Consumers#get_consumer
# GET /decisioning/v1/consumers/{consumerId}
def get(self, consumer_id: str) -> Consumer:
"""
Get the details of a consumer record.
:param consumer_id: ID of the consumer (UUID with max length 32 characters)
:return:
"""
path = f"/decisioning/v1/consumers/{consumer_id}"
response = self.__http_client.get(path)
response_dict = response.json()
return Consumer.from_dict(response_dict)
# https://community.finicity.com/s/article/Report-Consumers#modify_consumer
# PUT /decisioning/v1/consumers/{consumerId}
def modify(self, consumer_id: str, first_name: str, last_name: str, address: str, city: str, state: str, zip: str, phone: str, ssn: str, birthday: BirthDate, email: str):
"""
Modify the details for an existing consumer. All fields are required for a consumer record, but individual fields for this call are optional because fields that are not specified will be left unchanged.
:param consumer_id: ID of the consumer (UUID with max length 32 characters)
:param first_name: The consumer's first name(s) / given name(s)
:param last_name: The consumer's last name(s) / surname(s)
:param address: The consumer's street address
:param city: The consumer's city
:param state: The consumer's state
:param zip: The consumer's ZIP code
:param phone: The consumer's phone number
:param ssn: The consumer's 9-digit Social Security number (may include separators: nnn-nn-nnnn)
:param birthday: The consumer's birth date
:param email: The consumer's email address
:return:
"""
path = f"/decisioning/v1/consumers/{consumer_id}"
data = {
"firstName": first_name,
"lastName": last_name,
"address": address,
"city": city,
"state": state,
"zip": zip,
"phone": phone,
"ssn": ssn,
"birthday": birthday.to_padded_string_dict(),
"email": email,
}
self.__http_client.put(path, data=data)
```
#### File: lakey_finicity/resources/customers.py
```python
from typing import Optional
from lakey_finicity.api_http_client import ApiHttpClient
from lakey_finicity.models import Customer
from lakey_finicity.queries.customers_query import CustomersQuery
from lakey_finicity.responses.create_customer_response import CreateCustomerResponse
from lakey_finicity.utils import validate_username
class Customers(object):
def __init__(self, http_client: ApiHttpClient):
self.__http_client = http_client
def get(self, customer_id: int) -> Customer:
"""
:param customer_id: ID of the customer
:return:
"""
path = f"/aggregation/v1/customers/{customer_id}"
response = self.__http_client.get(path)
response_dict = response.json()
return Customer.from_dict(response_dict)
def get_by_username(self, username: str) -> Optional[Customer]:
qry = CustomersQuery(self.__http_client, username=username)
return qry.first_or_none()
def query(self, search_term: Optional[str] = None) -> CustomersQuery:
"""
Find all customers enrolled by the current partner, where the search text is found in the customer's username or any combination of firstName and lastName fields. If no search text is provided, return all customers.
:param search_term: Text to match, or leave empty to return all customers.
:return:
"""
return CustomersQuery(self.__http_client, search_term=search_term)
# https://community.finicity.com/s/article/Add-Customer
def add(self, username: str, first_name: str, last_name: str) -> int:
"""
Enroll an active customer (the actual owner of one or more real-world accounts). The customer's account transactions will be refreshed every night.
This service is not available from the Test Drive. Calls to this service before enrolling in a paid plan will return HTTP 429 (Too Many Requests).
:param username: The customer's username, assigned by the partner (a unique identifier), following these rules:
minimum 6 characters
maximum 255 characters
any mix of uppercase, lowercase, numeric, and non-alphabet special characters ! @ . # $ % & * _ - +
the use of email in this field is discouraged
it is recommended to use a unique non-email identifier
Use of special characters may result in an error (e.g. í, ü, etc.)
:param first_name: The customer's first name(s) / given name(s) (optional)
:param last_name: The customer's last name(s) / surname(s) (optional)
:return:
"""
validate_username(username)
data = {
'username': username,
'firstName': first_name,
'lastName': last_name,
}
path = f"/aggregation/v1/customers/active"
response = self.__http_client.post(path, data)
response_dict = response.json()
return CreateCustomerResponse.from_dict(response_dict).id
# https://community.finicity.com/s/article/Modify-Customer
def modify(self, customer_id: int, first_name: Optional[str], last_name: Optional[str]):
"""
Modify the details for an enrolled customer. You must specify either the first name, the last name, or both in the request.
If the service is successful, HTTP 204 (No Content) will be returned.
:param customer_id: ID of the customer to modify
:param first_name: The customer's first name(s) / given name(s) (optional)
:param last_name: The customer's last name(s) / surname(s) (optional)
:return:
"""
path = f"/aggregation/v1/customers/{customer_id}"
data = {}
if first_name:
data['firstName'] = first_name
if first_name:
data['lastName'] = last_name
self.__http_client.put(path, data=data)
# https://community.finicity.com/s/article/Delete-Customer
def delete(self, customer_id: int):
"""
Completely remove a customer from the system. This will remove the customer and all associated accounts and transactions.
(Note that the request and responses is the same for JSON or XML clients.)
Use this service carefully! It will not pause for confirmation before performing the operation!
:param customer_id: ID of the customer to delete
:return:
"""
path = f"/aggregation/v1/customers/{customer_id}"
self.__http_client.delete(path)
```
#### File: lakey_finicity/resources/institutions.py
```python
from typing import Optional
from lakey_finicity.api_http_client import ApiHttpClient
from lakey_finicity.models import Institution
from lakey_finicity.queries.institutions_query import InstitutionsQuery
from lakey_finicity.responses.institution_detail_response import InstitutionDetailResponse
class Institutions(object):
def __init__(self, http_client: ApiHttpClient):
self.__http_client = http_client
def query(self, search_term: Optional[str] = None) -> InstitutionsQuery:
"""
:param search_term: search text to match against the name, urlHomeApp, or urlLogonApp
:return:
"""
return InstitutionsQuery(self.__http_client, search_term)
# https://community.finicity.com/s/article/Get-Institution
def get(self, institution_id: str) -> Institution:
"""Get details for the specified institution without the login form.
:param institution_id: ID of the institution to retrieve
:return:
"""
path = f"/institution/v2/institutions/{institution_id}"
response = self.__http_client.get(path)
response_dict = response.json()
return InstitutionDetailResponse.from_dict(response_dict).institution
```
#### File: lakey_finicity/resources/transactions.py
```python
from typing import Optional, List
from lakey_finicity.api_http_client import ApiHttpClient
from lakey_finicity.models import SortOrder, AnsweredMfaQuestion
from lakey_finicity.queries.transaction_query import TransactionsQuery
from lakey_finicity.responses.accounts_response import AccountsResponse
class Transactions(object):
def __init__(self, http_client: ApiHttpClient):
self.__http_client = http_client
def query(self, customer_id: int, from_date: int, to_date: int, sort: SortOrder = SortOrder.asc, include_pending: bool = True, account_id: Optional[str] = None) -> TransactionsQuery:
"""
Get all transactions available for this customer account within the given date range. This service supports paging and sorting by transactionDate (or postedDate if no transaction date is provided), with a maximum of 1000 transactions per request.
Standard consumer aggregation provides up to 180 days of transactions prior to the date each account was added to the Finicity system. To access older transactions, you must first call the Cash Flow Verification service Load Historic Transactions for Account.
There is no limit for the size of the window between fromDate and toDate; however, the maximum number of transactions returned in one page is 1000.
If the value of moreAvailable in the responses is true, you can retrieve the next page of results by increasing the value of the start parameter in your next request:
...&start=6&limit=5
:param customer_id: The ID of the customer whose transactions are to be retrieved
:param account_id: The ID of the account whose transactions are to be retrieved
:param from_date: Starting timestamp for the date range (required) (see Handling Dates and Times)
:param to_date: Ending timestamp for the date range (required, must be greater than fromDate) (see Handling Dates and Times)
:param sort: Sort order: asc for ascending order (oldest transactions are on page 1), descfor descending order (newest transactions are on page 1).
:param include_pending: true to include pending transactions if available.
:return:
"""
return TransactionsQuery(self.__http_client, customer_id, from_date, to_date, sort, include_pending, account_id=account_id)
# TODO add categories to query?
# :param categories: Utilities, Mobile Phone, Television (this is an example of a comma delimited list)
# TXPush Services
# https://community.finicity.com/s/article/Enable-TxPUSH-Notifications
# POST /aggregation/v1/customers/{customerId}/accounts/{accountId}/txpush
def enable_push_notifications(self, customer_id: int, account_id: str, callback_url: str):
"""
TxPUSH services allow an app to register a TxPUSH Listener service to receive notifications whenever a new transaction appears on an account.
:param customer_id: The Finicity ID of the customer who owns the account
:param account_id: The Finicity ID of the account whose events will be sent to the TxPUSH Listener
:param callback_url: The TxPUSH Listener URL to receive TxPUSH notifications (must use https protocol for any real-world account)
:return:
"""
data = {
"callbackUrl": callback_url,
}
path = f"/aggregation/v1/customers/{customer_id}/accounts/{account_id}/txpush"
self.__http_client.post(path, data)
# 201 created
# https://community.finicity.com/s/article/Disable-TxPUSH-Notifications
# DELETE /aggregation/v1/customers/{customerId}/accounts/{accountId}/txpush
def disable_push_notifications(self, customer_id: int, account_id: str):
"""
Disable all TxPUSH notifications for the indicated account. No more notifications will be sent for account or transaction events.
:param customer_id: The ID of the customer who owns the account
:param account_id: The Finicity ID of the account whose events will be sent to the TxPUSH Listener
:return:
"""
path = f"/aggregation/v1/customers/{customer_id}/accounts/{account_id}/txpush"
self.__http_client.delete(path)
# success = 204 no content
# https://community.finicity.com/s/article/Delete-TxPUSH-Subscription
# DELETE /aggregation/v1/customers/{customerId}/subscriptions/{subscriptionId}
def delete_push_subscription(self, customer_id: int, subscription_id: str):
"""
Delete a specific subscription for a class of events (account or transaction events) related to an account. No more notifications will be sent for these events.
:param customer_id: The ID of the customer who owns the account
:param subscription_id: The ID of the specific subscription to be deleted (returned from Enable TxPUSH Notifications
:return:
"""
path = f"/aggregation/v1/customers/{customer_id}/subscriptions/{subscription_id}"
self.__http_client.delete(path)
# Account History Aggregation
# https://community.finicity.com/s/article/Load-Historic-Transactions-for-Account
# POST /aggregation/v1/customers/{customerId}/accounts/{accountId}/transactions/historic
def load_historic_transactions_for_account(self, customer_id: int, account_id: str):
"""
Connect to the account's financial institution and load up to twelve months of historic transactions for the account. For some institutions, up to two years of history may be available.
This is a premium service. The billing rate is the variable rate for Cash Flow Verification under the current subscription plan. The billable event is a call to this service specifying a customerId that has not been seen before by this service. (If this service is called multiple times with the same customerId, to load transactions from multiple accounts, only one billable event has occurred.)
HTTP status of 204 means historic transactions have been loaded successfully. The transactions are now available by calling Get Customer Account Transactions.
HTTP status of 203 means the responses contains an MFA challenge. Contact your Account Manager or Systems Engineers to determine the best route to handle this HTTP status code.
The recommended timeout setting for this request is 180 seconds in order to receive a responses. However you can terminate the connection after making the call the operation will still complete. You will have to pull the account records to check for an updated aggregation attempt date to know when the refresh is complete.
This service usually requires the HTTP header Content-Length: 0 because it is a POST request with no request body.
The date range sent to the institution is calculated from the account's createdDate. This means that calling this service a second time for the same account normally will not add any new transactions for the account. For this reason, a second call to this service for a known accountId will usually return immediately with HTTP 204.
In a few specific scenarios, it may be desirable to force a second connection to the institution for a known accountId. Some examples are:
The institution's policy has changed, making more transactions available.
Finicity has now added Cash Flow Verification support for the institution.
The first call encountered an error, and the resulting Aggregation Ticket has now been fixed by the Finicity Support Team.
In these cases, the POST request can contain the parameter force=true in the request body to force the second connection.
:param customer_id: The ID of the customer who owns the account
:param account_id: The Finicity ID of the account to be refreshed
:return:
"""
path = f"/aggregation/v1/customers/{customer_id}/accounts/{account_id}/transactions/historic"
self.__http_client.post(path, data={})
# POST /aggregation/v1/customers/{customerId}/accounts/{accountId}/transactions/historic/mfa
def load_historic_transactions_for_account_with_mfa_answers(self, mfaSession: str, customerId: str, accountId: str, questions: List[AnsweredMfaQuestion]):
headers = {
'mfaSession': mfaSession
}
data = {
'questions': [q.to_dict() for q in questions],
}
path = f"/aggregation/v1/customers/{customerId}/accounts/{accountId}/transactions/historic"
self.__http_client.post(path, data=data, extra_headers=headers)
# https://community.finicity.com/s/article/Refresh-Customer-Accounts-non-interactive
# POST /aggregation/v1/customers/{customerId}/accounts
def refresh_customer_accounts(self, customer_id: int):
"""
Connect to all of the customer's financial institutions and refresh the transaction data for all of the customer's accounts. This is a non-interactive refresh, so any MFA challenge will cause the account to fail with an aggregationStatusCode value of 185 or 187.
To recover an account that has state 185 or 187, call Refresh Institution Login Accounts during an interactive session with the customer, prompt the customer with the MFA challenge that is returned from that call, and then send that responses to Refresh Institution Login Accounts (with MFA Answers).
This service retrieves account data from the institution. This usually returns quickly, but in some scenarios may take a few minutes to complete. See Asynchronous Aggregation.
Client apps are not permitted to automate calls to the Refresh services. Active accounts are automatically refreshed by Finicity once per day. Apps may call Refresh services for a specific customer when the customer opens the app, or when the customer directly invokes a Refreshaction from the app.
Because many financial institutions only post transactions once per day, calling Refresh repeatedly is usually a waste of resources and is not recommended.
This service requires the HTTP header Content-Length: 0 because it is a POST request with no request body.
The recommended timeout setting for this request is 120 seconds.
:param customer_id: The ID of the customer who owns the accounts
:return:
"""
headers = {
'Content-Length': '0',
}
path = f"/aggregation/v1/customers/{customer_id}/accounts"
response = self.__http_client.post(path, None, extra_headers=headers)
response_dict = response.json()
return AccountsResponse.from_dict(response_dict).accounts
# https://community.finicity.com/s/article/Refresh-Institution-Login-Accounts-Non-Interactive
# POST /aggregation/v1/customers/{customerId}/institutionLogins/{institutionLoginId}/accounts
def refresh_institution_login_accounts(self, customer_id: int, institution_login_id: str):
"""
Connect to a financial institution and refresh transaction data for all accounts associated with a given institutionLoginId.
Client apps are not permitted to automate calls to the Refresh services. Active accounts are automatically refreshed by Finicity once per day. Apps may call Refresh services for a specific customer when the customer opens the app, or when the customer directly invokes a Refreshaction from the app.
Because many financial institutions only post transactions once per day, calling Refreshrepeatedly is usually a waste of resources and is not recommended.
The recommended timeout setting for this request is 120 seconds in order to receive a responses. However you can terminate the connection after making the call the operation will still complete. You will have to pull the account records to check for an updated aggregation attempt date to know when the refresh is complete.
:param customer_id: The ID of the customer who owns the account
:param institution_login_id: The institution login ID (from the account record)
:return:
"""
headers = {
'Content-Length': '0',
}
path = f"/aggregation/v1/customers/{customer_id}/institutionLogins/{institution_login_id}/accounts"
response = self.__http_client.post(path, None, extra_headers=headers)
response_dict = response.json()
return AccountsResponse.from_dict(response_dict).accounts
```
#### File: lakey_finicity/responses/create_report_response.py
```python
from dataclasses import dataclass
@dataclass
class CreateReportResponse(object):
accountIds: str
@staticmethod
def from_dict(data: dict):
data = dict(data) # don't mutate the original
accountIds = data.pop('accountIds')
return CreateReportResponse(
accountIds=accountIds,
)
```
#### File: lakey_finicity/responses/customers_list_response.py
```python
from dataclasses import dataclass
from typing import List
from lakey_finicity.models import Customer
# https://community.finicity.com/s/article/201703219-Customers#get_customers
@dataclass
class CustomersListResponse(object):
found: int # Total number of records matching search criteria
displaying: int # Number of records in this responses
moreAvailable: bool # True if this responses does not contain the last record in the result set
customers: List[Customer]
_unused_fields: dict # this is for forward compatibility and should be empty
@staticmethod
def from_dict(data: dict):
data = dict(data) # don't mutate the original
found = data.pop('found')
displaying = data.pop('displaying')
moreAvailable = data.pop('moreAvailable')
customers_raw = data.pop('customers')
customers = [Customer.from_dict(d) for d in customers_raw]
return CustomersListResponse(
found=found,
displaying=displaying,
moreAvailable=moreAvailable,
customers=customers,
_unused_fields=data,
)
```
#### File: lakey_finicity/responses/transactions_list_response.py
```python
from dataclasses import dataclass
from typing import List
from lakey_finicity.models import SortOrder, Transaction
# https://community.finicity.com/s/article/202460245-Transactions#get_customer_transactions_v3
@dataclass
class TransactionsListResponse(object):
found: int # Total number of records matching search criteria
displaying: int # Number of records in this responses
moreAvailable: bool # True if this responses does not contain the last record in the result set
fromDate: int # Value of the fromDate request parameter that generated this responses
toDate: int # Value of the toDate request parameter that generated this responses
sort: SortOrder # Value of the sort request parameter that generated this responses
transactions: List[Transaction]
_unused_fields: dict # this is for forward compatibility and should be empty
@staticmethod
def from_dict(data: dict):
data = dict(data) # don't mutate the original
found = data.pop('found', 0)
displaying = data.pop('displaying', 0)
moreAvailable = data.pop('moreAvailable', False)
fromDate = data.pop('fromDate', 0)
toDate = data.pop('toDate', 0)
sort_str = data.pop('sort', 'asc')
sort = SortOrder(sort_str) if sort_str else SortOrder.asc
transactions_raw = data.pop('transactions')
transactions = [Transaction.from_dict(d) for d in transactions_raw]
return TransactionsListResponse(
found=found,
displaying=displaying,
moreAvailable=moreAvailable,
fromDate=fromDate,
toDate=toDate,
sort=sort,
transactions=transactions,
_unused_fields=data,
)
```
#### File: models/consumer/test_consumer.py
```python
import json
import unittest
from lakey_finicity.models import Consumer
# https://community.finicity.com/s/article/Report-Consumers
EXAMPLE_CONSUMER_PROPOSED = '''
{
"firstName": "FIRST_NAME",
"lastName": "LAST_NAME",
"address": "ADDRESS",
"city": "CITY",
"state": "STATE",
"zip": "ZIP",
"phone": "PHONE",
"ssn": "123-45-6789",
"birthday": {
"year": "1972",
"month": "07",
"dayOfMonth": "03"
},
"email": "EMAIL_ADDRESS"
}
'''
EXAMPLE_CONSUMER = '''
{
"id": "0h7h3r301md83",
"firstName": "FIRST_NAME",
"lastName": "LAST_NAME",
"address": "ADDRESS",
"city": "CITY",
"state": "STATE",
"zip": "ZIP",
"phone": "PHONE",
"ssn": "6789",
"birthday": {
"year": "1972",
"month": "07",
"dayOfMonth": "03"
},
"email": "EMAIL_ADDRESS",
"createdDate": 1507658822
}
'''
class TestConsumer(unittest.TestCase):
def test_account_detail_response(self):
response_dict = json.loads(EXAMPLE_CONSUMER)
response = Consumer.from_dict(response_dict)
self.assertEqual({}, response._unused_fields)
```
#### File: tests/response/test_new_report_response.py
```python
import json
import unittest
from lakey_finicity.responses.new_report_response import NewReportResponse
# https://community.finicity.com/s/article/Credit-Decisioning
EXAMPLE_START_VOI_RESPONSE = '''
{
"id": "bx28qwkdbw3u",
"requestId": "bmg7d3qrmr",
"consumerId": "3860718db6febd83c64d9d4c523f39f7",
"consumerSsn": "5555",
"constraints": {},
"type": "voi",
"status": "inProgress"
}
'''
# https://community.finicity.com/s/article/Credit-Decisioning
EXAMPLE_START_VOA_RESPONSE = '''
{
"id": "bx28qwkdbw3u",
"requestId": "bmg7d3qrmr",
"consumerId": "3860718db6febd83c64d9d4c523f39f7",
"consumerSsn": "5555",
"constraints": {},
"type": "voa",
"status": "inProgress"
}
'''
class TestNewReportResponse(unittest.TestCase):
def test_voi_short(self):
response_dict = json.loads(EXAMPLE_START_VOI_RESPONSE)
response = NewReportResponse.from_dict(response_dict)
self.assertEqual({}, response._unused_fields)
if response.institutions:
for institution in response.institutions:
self.assertEqual({}, institution._unused_fields)
self.assertEqual({}, institution.address._unused_fields)
def test_voa_short(self):
response_dict = json.loads(EXAMPLE_START_VOA_RESPONSE)
response = NewReportResponse.from_dict(response_dict)
self.assertEqual({}, response._unused_fields)
if response.institutions:
for institution in response.institutions:
self.assertEqual({}, institution._unused_fields)
self.assertEqual({}, institution.address._unused_fields)
```
|
{
"source": "jeremydhoon/nyremodel",
"score": 3
}
|
#### File: jeremydhoon/nyremodel/rentregress.py
```python
import sys
import json
from matplotlib import pyplot
import numpy as np
import pandas as pd
from sklearn.metrics import explained_variance_score, mean_squared_error, median_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, LabelBinarizer
from xgboost.sklearn import XGBRegressor
import compass
import dcf
MAX_PRICE_DOLLARS = 15000
#MAX_PRICE_DOLLARS = 3000000
FEATURE_COLUMNS = [
"neighborhood",
"sq_ft",
"beds",
"baths",
"year_opened",
"building_units",
"unit_type",
"parking_spaces",
"amenity",
]
COMMON_AMENITIES = (
'Elevator',
'Laundry in Building',
'Dishwasher',
'Full-Time Doorman',
'Gym',
'Concierge',
'Washer / Dryer in Unit',
'Common Roof Deck',
'Bike Room',
'High Ceilings',
'Garage',
'Voice Intercom',
'Hardwood Floors',
'Common Outdoor Space',
'Pet Friendly',
'Doorman',
'Walk Up',
'Roof Deck',
'Private Outdoor Space',
'Oversized Windows',
)
def label_encode(df, feature):
enc = LabelBinarizer()
enc.fit(df[feature])
out_df = pd.DataFrame(enc.transform(df[feature]))
col_names = [feature + "_" + cls for cls in enc.classes_]
return out_df.rename(columns=dict(enumerate(col_names)))
def add_amenities(df):
amenities_set = df["amenities"].map(lambda x: set(json.loads(x)) if isinstance(x, str) else set())
return pd.DataFrame(
dict(
("amenity_" + amenity, amenities_set.map(lambda x: float(amenity in x)))
for amenity in COMMON_AMENITIES
)
)
def clean_features(df):
#categorical_cols = ["neighborhood", "unit_type"]
categorical_cols = []
drop_cols = ["neighborhood", "unit_type"]
cols = [
label_encode(df, col)
for col in categorical_cols
]
out = pd.concat([df, *cols, add_amenities(df)], axis=1).drop(columns=categorical_cols + drop_cols)
return out
def compute_model_metrics(targets, predicted_targets):
return {
"explained variance": explained_variance_score(targets, predicted_targets),
"RMS error": np.sqrt(mean_squared_error(targets, predicted_targets)),
"Median absolute error:": median_absolute_error(targets, predicted_targets),
}
def is_feature_col(col_name):
return any([col_name.startswith(feature_name) for feature_name in FEATURE_COLUMNS])
def count_sum(acc, lst):
for el in lst:
acc[el] = acc.get(el, 0) + 1
return acc
def select_feature_columns(df):
return [col for col in df.columns if is_feature_col(col)]
def train(raw_df):
#df = raw_df[raw_df["neighborhood"].isin(set([loc["name"] for loc in compass.BK_LOCATIONS]))]
df = raw_df
df = clean_features(df)
df = df[df["price_dollars"] < MAX_PRICE_DOLLARS]
df = df[df["address"] != "117 Underhill Avenue"]
features = df[select_feature_columns(df)]
targets = df["price_dollars"]
features_train, features_test, targets_train, targets_test = train_test_split(features, targets, test_size=0.10)
reg = XGBRegressor(
#eta=0.1,
max_depth=2,
colsample_bytree=0.25,
)
reg.fit(features_train, targets_train)
predicted_train_targets = reg.predict(features_train)
print("Training metrics:")
print(compute_model_metrics(targets_train, predicted_train_targets))
predicted_test_targets = reg.predict(features_test)
print("Test metrics:")
print(compute_model_metrics(targets_test, predicted_test_targets))
return reg
def zero_if_nan(f):
return 0 if np.isnan(f) else f
def get_irr(row):
# permalink,address,neighborhood,latitude,longitude,price_dollars,original_price_dollars,sq_ft,beds,baths,year_opened,building_id,building_units,monthly_sales_charges,monthly_sales_charges_incl_taxes,unit_type,first_listed,parking_spaces,amenities
# =if(K6014>=2010,200,if(K6014>=2000,300,400))*if(G6014>=2000000,1.5,1)*if(I6014<=2,1,2)
year_built = row["year_opened"]
capital_reserve = (
(200 if year_built >= 2010 else 300 if year_built >= 2000 else 400) *
(1.5 if row["price_dollars"] >= 2000000 else 1) *
(2 if row["beds"] >= 2 else 1)
)
try:
return dcf.get_unlevered_returns(
purchase_price_dollars=1575000,
sq_ft=row["sq_ft"],
closing_costs_pct=0.04,
initial_downtime_months=3,
interim_downtime_months=1,
lease_length_months=36,
annual_rent_growth_pct=0.02,
annual_expense_growth_pct=0.02,
monthly_rent_dollars=row["predicted_rent"],
monthly_utilities_rent_pct=0.025,
monthly_tax_dollars=zero_if_nan(row["monthly_sales_charges_incl_taxes"]) - zero_if_nan(row["monthly_sales_charges"]),
monthly_common_charges_dollars=zero_if_nan(row["monthly_sales_charges"]),
monthly_homeowners_insurance_dollars=100,
monthly_capital_reserve_dollars=capital_reserve,
hold_period_months=60,
exit_cap_pct=0.035,
exit_sq_ft_price_ceiling_dollars=3000,
exit_costs_pct=0.08,
).irr_pct
except Exception as error:
import pdb; pdb.set_trace()
raise
def regress(sales_df, reg):
clean_df = clean_features(sales_df)
df = clean_df[select_feature_columns(clean_df)]
sales_df["predicted_rent"] = reg.predict(df)
sales_df["irr"] = np.array([get_irr(r) for _, r in sales_df.iterrows()])
def main(argv):
rentals_df = pd.read_csv(argv[1])
sales_df = pd.read_csv(argv[2])
reg = train(rentals_df)
regress(sales_df, reg)
sales_df.to_csv(argv[3])
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
```
|
{
"source": "jeremydmoore/automating_scantailor",
"score": 3
}
|
#### File: scripts/python/applescript.py
```python
from subprocess import PIPE, run
# ============ functions ============ #
def convert_applescript_output_to_python_list(applescript_subprocess):
return [x.strip('\n').strip(' ') for x in applescript_subprocess.stdout.split(',')]
def display_dialog(message):
command = [f'Tell application \"System Events\" to display dialog \"{message}\"']
dialog = process(command)
return dialog
def display_notification(message):
command = [f'Tell application \"System Events\" to display notification \"{message}\"']
dialog = process(command)
return dialog
def get_user_input_int(value_name, default_value, attempts_left=5):
user_input = None
# only process if the default value is of the expected input type
if isinstance(default_value, int):
command = [f'set value_name to text returned of (display dialog "Enter {value_name} (type: integer)\nAttempts left: {attempts_left}" default answer "{default_value}")', 'return value_name']
user_input = command_to_python_list(command)
if user_input:
# returned value should be a single string in a list
user_input = user_input[0]
try:
user_input = int(user_input)
except ValueError:
if attempts_left > 0:
attempts_left = attempts_left - 1
user_input = get_user_input_int(value_name, default_value, attempts_left=attempts_left)
else:
user_input = None
return user_input
def get_user_input(value_name, default_value, attempts_left=5):
user_input = None
command = [f'set value_name to text returned of (display dialog "Enter {value_name}" default answer "{default_value}")', 'return value_name']
user_input = command_to_python_list(command)
if user_input:
# returned value should be a single string in a list
user_input = user_input[0]
return user_input
def command_to_python_list(command):
applescript_output = process(command)
python_list = convert_applescript_output_to_python_list(applescript_output)
return python_list
def process(command):
osascript_run_command = ['osascript']
if isinstance(command, list):
for line in command:
osascript_run_command.append('-e')
osascript_run_command.append(line)
else:
osascript_run_command.append('-e')
osascript_run_command.append(command)
applescript_subprocess = run(osascript_run_command, encoding='utf-8', stdout=PIPE, stderr=PIPE)
return applescript_subprocess
def process_script(script_path, args=None):
shell_input = ['osascript', str(script_path)]
if args:
if isinstance(args, list):
for arg in args:
shell_input = shell_input + [arg]
else:
shell_input = shell_input + [args]
applescript_subprocess = run(shell_input, encoding='utf-8', stdout=PIPE, stderr=PIPE)
return applescript_subprocess
def script_to_python_list(script_path, args=None):
applescript_subprocess = process_script(script_path, args)
python_list = convert_applescript_output_to_python_list(applescript_subprocess)
return python_list
```
#### File: scripts/python/move_scantailored_volume_step1-done.py
```python
from pathlib import Path
from shutil import copy2
# 3rd party modules
from PIL import Image
# my modules (located next to this script)
import applescript
import config
# ============ lookup_dicts ============ #
compression_lookup_dict = {1: 'uncompressed',
4: 'group4',
5: 'LZW',
6: 'JPEG',
}
# ============ functions ============ #
def get_compression(image):
compression = image.tag_v2[259]
compression = compression_lookup_dict[compression]
return compression
def decompress_continuous_tone(image, image_path):
compression = get_compression(image)
if compression != 'group4': # save image uncompressed
image.save(image_path, tiffinfo=image.tag, compression=None, dpi=image.info['dpi'])
else:
image.save(image_path, tiffinfo=image.tag, compression=compression, dpi=image.info['dpi'])
def move_dir(to_dir_path):
new_dir_path = config.step2_dir_path.joinpath(to_dir_path.name)
# move directory with rename
to_dir_path.rename(new_dir_path)
if new_dir_path.is_dir():
volume_dir_path = new_dir_path
else:
raise ValueError(f'{new_dir_path} is not a directory')
# get list of images in out directory
tif_path_list = sorted(volume_dir_path.joinpath('out').glob('*.tif'))
new_dir_path = config.step3_dir_path.joinpath(volume_dir_path.name)
new_dir_path.mkdir()
# copy out tifs into new directory in step3_dir_path
for tif_path in tif_path_list:
new_tif_path = new_dir_path.joinpath(tif_path.name)
copy2(tif_path, new_tif_path)
cmd = f'{to_dir_path.name} \
---> \
{new_dir_path}'
applescript.display_notification(cmd)
def copy_scantailor_images(from_dir_path, to_dir_path, decompress=True):
# create new directory in to_dir_path
new_dir_path = to_dir_path.joinpath(from_dir_path.name)
new_dir_path.mkdir()
# get list of images in out directory and copy to new_dir_path
tif_path_list = list(from_dir_path.joinpath('out').glob('*.tif'))
for tif_path in tif_path_list:
new_tif_path = new_dir_path.joinpath(tif_path.name)
if decompress: # then we check for compression
image = Image(tif_path)
decompress_continuous_tone(image, new_tif_path)
else:
copy2(tif_path, new_tif_path)
cmd = f'{to_dir_path.name} \
---> \
{new_dir_path}'
applescript.display_notification(cmd)
if __name__ == '__main__':
# verify config file
config.verify()
# get list of volumes in directory
volume_dir_path_list = sorted([x for x in config.step1_dir_path.iterdir() if x.is_dir()])
# grab 1 volume, which is the first 1 and move it
step1_dir_path = volume_dir_path_list[0]
step2_dir_path = config.step2_dir_path.joinpath(step1_dir_path.name)
step1_dir_path.rename(step2_dir_path)
step3_dir_path = config.step3_dir_path.joinpath(step2_dir_path.name)
copy_scantailor_images(step_2_dir_path, step3_dir_path)
# for batch processing
# for volume_dir_path in volume_dir_path_list:
# print(f'{volume_dir_path.name}')
# move_dir(volume_dir_path)
```
|
{
"source": "jeremydonahue/kittenrescue",
"score": 2
}
|
#### File: kittenrescue/controllers/admin.py
```python
@auth.requires_login()
def index():
user_info = db().select(db.auth_user.first_name,
db.auth_user.last_name,
db.auth_user.orientation_attended,
db.auth_user.phone,
db.auth_user.zip,
db.auth_user.skills,
db.auth_user.email)
return dict(message="hello from admin.py",
user_info=user_info)
```
|
{
"source": "jeremy-donson/magma",
"score": 2
}
|
#### File: magma/monitord/icmp_monitoring.py
```python
import asyncio
import logging
from typing import Dict, List, Optional
from magma.common.job import Job
from magma.magmad.check.network_check.ping import (PingCommandResult,
PingInterfaceCommandParams,
ping_interface_async)
NUM_PACKETS = 4
DEFAULT_POLLING_INTERVAL = 60
TIMEOUT_SECS = 10
CHECKIN_INTERVAL = 10
class ICMPMonitoring(Job):
"""
Class that handles main loop to send ICMP ping to valid subscribers.
"""
def __init__(self, monitoring_module, polling_interval: int, service_loop,
mtr_interface: str):
super().__init__(interval=CHECKIN_INTERVAL, loop=service_loop)
self._MTR_PORT = mtr_interface
logging.info("Running on interface %s..." % self._MTR_PORT)
# Matching response time output to get latency
self._polling_interval = max(polling_interval,
DEFAULT_POLLING_INTERVAL)
self._loop = service_loop
self._module = monitoring_module
async def _ping_targets(self, hosts: List[str],
targets: Optional[Dict] = None):
"""
Sends a count of ICMP pings to target IP address, returns response.
Args:
hosts: List of ip addresses to ping
targets: List of valid subscribers to ping to
Returns: (stdout, stderr)
"""
if targets:
ping_params = [
PingInterfaceCommandParams(host, NUM_PACKETS, self._MTR_PORT,
TIMEOUT_SECS) for host in hosts]
ping_results = await ping_interface_async(ping_params, self._loop)
ping_results_list = list(ping_results)
for host, sub, result in zip(hosts, targets, ping_results_list):
self._save_ping_response(sub, host, result)
def _save_ping_response(self, target_id: str, ip_addr: str,
ping_resp: PingCommandResult) -> None:
"""
Saves ping response to in-memory subscriber dict.
Args:
target_id: target ID to ping
ip_addr: IP Address to ping
ping_resp: response of ICMP ping command
"""
if ping_resp.error:
logging.debug('Failed to ping %s with error: %s',
target_id, ping_resp.error)
else:
self._module.save_ping_response(target_id, ip_addr, ping_resp)
async def _run(self) -> None:
targets, addresses = await self._module.get_ping_targets(self._loop)
if len(targets) > 0:
await self._ping_targets(addresses, targets)
else:
logging.warning('No subscribers/ping targets found')
await asyncio.sleep(self._polling_interval, self._loop)
```
#### File: scripts/cli/cleanup.py
```python
import os
import sys
import json
import click
from boto3 import Session
from .common import (
run_command,
run_playbook,
print_error_msg,
print_success_msg)
def setup_aws_creds():
session = Session()
creds = session.get_credentials()
if not creds:
print_error_msg('''
AWS credentials not configured.
configure through awscli or through orcl
orcl configure set -k aws_access_key_id -v <access_key_id>
orcl configure set -k aws_secret_access_key -v <aws_secret_access_key>
orcl configure set -k region -v <region>
''')
sys.exit(1)
frozen_creds = creds.get_frozen_credentials()
os.environ["AWS_ACCESS_KEY_ID"] = frozen_creds.access_key
os.environ["AWS_SECRET_ACCESS_KEY"] = frozen_creds.secret_key
@click.group(invoke_without_command=True)
@click.pass_context
def cleanup(ctx):
"""
Removes resources deployed for orc8r
"""
tf_destroy = [ "terraform", "destroy", "-auto-approve"]
if ctx.invoked_subcommand is None:
cmd = " ".join(tf_destroy)
click.echo(f"Following commands will be run during cleanup\n{cmd}")
click.confirm('Do you want to continue with cleanup?', abort=True)
click.echo(f"Running {cmd}")
rc = run_command(tf_destroy)
if rc != 0:
print_error_msg("Destroy Failed!!! Attempt cleaning up individual resources using 'orcl cleanup raw' subcommand")
return
@cleanup.command()
@click.pass_context
@click.option('--dryrun', default=False, is_flag=True, help='Show resources to be cleaned up during raw cleanup')
@click.option('--state', help='Provide state file containing resource information e.g. terraform.tfstate or terraform.tfstate.backup')
@click.option('--values', multiple=True, help='Key value pairs. for e.g. cluster_name,orc8r. Can be used multiple times')
def raw(ctx, dryrun, state, values):
"""
Individually cleans up resources deployed for orc8r
"""
if state:
ctx.obj['cleanup_state'] = state
# add additional items
for config_items in values:
k, v = config_items.split(",")
ctx.obj[k] = v
extra_vars = json.dumps(ctx.obj)
cleanup_playbook = "%s/cleanup.yml" % ctx.obj["playbooks"]
playbook_args = [ "ansible-playbook", "-v", "-e", extra_vars]
# Few boto dependent modules in ansible require these values to be
# setup as environment variables. Hence setting these up.
setup_aws_creds()
if dryrun:
tag_args = ["-t", "cleanup_dryrun"]
else:
tag_args = ["-t", "cleanup"]
rc = run_playbook(playbook_args + tag_args + [cleanup_playbook])
if rc != 0:
print_error_msg("Failed cleaning up resources!!!")
sys.exit(1)
print_success_msg("Successfully cleaned up underlying resources")
```
|
{
"source": "jeremydouglass/pathpattern",
"score": 3
}
|
#### File: pathpattern/pathpattern/core.py
```python
import logging
import tempfile
from collections import Counter
import regex as re
from bs4 import BeautifulSoup
from igraph import Graph
from pyx import canvas, color, path, style, trafo, unit
import pathpattern.utils as utils
logging.basicConfig(level=logging.WARNING)
class GlyphSet(object):
"""Glyph set for generating glyphs and signatures from edge lists."""
def __init__(self, **kwargs):
self.glist = []
self.id = 0
self.width = kwargs.get('width', 4)
self.outdir = kwargs.get('outdir', './')
self.prefix = kwargs.get('prefix', 'glyph_')
self.uscale = kwargs.get('uscale', 1)
self.mincount = 0
self.maxcount = 3
if 'graph' in kwargs and 'range' not in kwargs and 'list' not in kwargs:
self.graph = kwargs.get('graph')
degrees = set(zip(self.graph.indegree(), self.graph.outdegree())) # a set -- no dups, no counts
logging.debug('\n degree list: %s', str(degrees))
degrees_counter = Counter(zip(self.graph.indegree(), self.graph.outdegree())) # count dups
logging.debug('* degrees_counter: ')
logging.debug(degrees_counter)
self.mincount = degrees_counter[min(degrees_counter, key=degrees_counter.get)]
self.maxcount = degrees_counter[max(degrees_counter, key=degrees_counter.get)]
logging.debug('* self.mincount: ')
logging.debug(self.mincount)
logging.debug('* self.maxcount: ')
logging.debug(self.maxcount)
logging.debug(degrees_counter)
logging.debug('\n degree counter: %s', str(degrees_counter))
degrees_counter_list = list(degrees_counter.items())
logging.debug('\n degree counter list: ')
logging.debug(degrees_counter_list)
degrees_counter_sorted = []
for key in sorted(degrees_counter.iterkeys()):
degrees_counter_sorted.append((key, degrees_counter[key]))
logging.debug(degrees_counter_sorted)
for i in degrees_counter_sorted:
self.glist.append((i[0][0], i[0][1], i[1]))
elif 'list' in kwargs and 'range' not in kwargs:
self.glist = kwargs.get('list')
elif 'range' in kwargs and 'list' not in kwargs:
self.inrange = kwargs.get('range')[0]
self.outrange = kwargs.get('range')[1]
for i in range(self.inrange[0], self.inrange[1]):
for o in range(self.outrange[0], self.outrange[1]):
self.glist.append((int(i), int(o), 1))
else:
raise ValueError('invalid argument: provide one of the following:\n range = ((xmin, xmax), (ymin, ymax))\n list = [(ax, ay),(bx, by),(cx, cy)...]\n graph = <igraph canvas.canvas object>')
self.glist.sort(key=lambda x: (x[0], x[1])) # http://stackoverflow.com/questions/4233476/sort-a-list-by-multiple-attributes
# sorted -- any reason that custom list input order might matter? index lookup?
# id string -- this isn't unique or unambiguous, but perhaps use different separators and/or do a hash later.
flatlist = [str(element) for tupl in self.glist for element in tupl] # http://stackoverflow.com/questions/3204245/how-do-i-convert-a-tuple-of-tuples-to-a-one-dimensional-list-using-list-comprehe
self.id = ''.join(flatlist)
def __str__(self):
string = 'GlyphSet:\n'
widecount = 0
for i in self.glist:
if widecount >= self.width:
string += '\n'
widecount = 0
string += ' ' + str(i)
widecount += 1
return string
def __len__(self):
return len(self.glist)
def nocounts(self):
"""replace all glist counts with 1"""
newlist = []
for i in self.glist:
newlist.append((i[0], i[1], 1))
self.glist = newlist
def glyph(self, index):
""" For a degree pair (in, out), render a glyph. """
self.scale()
if len(index) > 2:
c = degree_glyph(index[0], index[1], index[2], (self.mincount, self.maxcount))
else:
c = degree_glyph(index[0], index[1], 1, (self.mincount, self.maxcount))
return c
def glyphs(self):
""" For a list of degree pairs, render all glyphs and append to a list of glyphs. """
clist = []
for i in self.glist:
clist.append(self.glyph(i))
return clist
def scale(self, val=0):
"""Interface to the global pyx scale: unit.set(uscale=val)
Either sets scale directly or uses the instance variable default.
0.25 scales to 25%, 3.0 scales to 300%.
- http://pyx.sourceforge.net/manual/unit.html
- http://nullege.com/codes/search/pyx.unit.set
- https://github.com/mjg/PyX/blob/master/test/unit/test_unit.py
"""
if val == 0:
val = self.uscale
unit.set(uscale=val, defaultunit="inch")
def write_glyph(self, index, overwrite=False):
""" For a degree pair (in, out), save a glyph as a PNG file. """
c = self.glyph(index)
index_str = '_'.join(str(x).zfill(3) for x in index)
imgfilepath = self.outdir + index_str + '.png'
if not overwrite and utils.path.exists(imgfilepath):
logging.debug(imgfilepath, " : exists (skip write)")
return ''
c.writeGSfile(filename=imgfilepath)
return imgfilepath
def write_glyphs(self):
""" For a list of degree pairs, save all glyphs as PNG files. """
for i in self.glist:
self.write_glyph(i)
return
def signature(self, deg_max=6, padded=False, has_border=False):
""" For a visualization of glyphs, lay out in a 2D grid PNG file. """
self.scale()
sig = canvas.canvas([trafo.rotate(90), trafo.mirror(0)])
scale = 1.5
if padded or has_border:
sig_margin = 0.2
x = (deg_max + 1) * scale + (1.5 * sig_margin)
border_path = path.path(path.moveto(0, 0),
path.lineto(0, x),
path.lineto(x, x),
path.lineto(x, 0),
path.closepath())
if padded:
border_color = color.cmyk.White
if has_border:
border_color = color.cmyk.Gray
sig.stroke(border_path, [border_color, trafo.translate(-sig_margin*2, -sig_margin*2), style.linewidth(.025)])
for index in self.glist:
if len(index) > 2:
c = degree_glyph(index[0], index[1], index[2], (self.mincount, self.maxcount))
else:
c = degree_glyph(index[0], index[1], 1, (self.mincount, self.maxcount))
sig.insert(c, [trafo.translate(index[0]*scale, (index[1])*scale)]) # text writing requires full latex
return sig
def write_signature(self, **kwargs):
"Write signature to image file."
c = self.signature(**kwargs)
imgfilename = self.outdir + self.prefix + '_signature' + '.png'
c.writeGSfile(filename=imgfilename)
return imgfilename
class tgfFile(object):
"""TGF file for writing edges and nodes in the TGF format."""
def __init__(self, filename, **kwargs):
self.nodelist = []
self.nodeset = set()
self.nodedict = {}
self.edgelist = []
self.filename = filename
if not filename:
raise ValueError('No tgf filename given.')
self.elfilename = kwargs.get('elfilename', '')
try:
with open(self.filename, 'r') as inputfile:
phase = 'nodes'
lines = (line.strip() for line in inputfile) # all lines including the blank ones http://stackoverflow.com/questions/4842057/python-easiest-way-to-ignore-blank-lines-when-reading-a-file
lines = (line for line in lines if line) # non-blank lines
for line in lines:
lt = line.strip().split('\t') # line tuple
if '#' in line:
phase = 'edges'
continue
if phase == 'nodes':
self.nodeset.add(lt[0])
# self.nodedict.update({lt[0], ''})
# ADDING NODES TO DICT IS BROKEN
# self.nodedict.update({lt[0], lt[1:]})
if phase == 'edges':
self.edgelist.append(lt)
# tgf may have nodes which are only listed in edges
self.nodeset.add(lt[0])
self.nodeset.add(lt[1])
# only add keys-without-values if the values don't already exist
if not self.nodedict.get(lt[0]): # no key or empty value
self.nodedict[lt[0]] = ''
if not self.nodedict.get(lt[1]):
self.nodedict[lt[1]] = ''
self.nodelist = list(self.nodeset)
except OSError:
print "File not read."
def __str__(self):
string = 'tgfFile:\n'
string += 'nodes: ' + str(self.nodelist) + '\n'
string += 'edges: ' + str(self.edgelist) + '\n'
return string
def __len__(self):
return len(self.edgelist)
def write_edgefile(self):
"""Write edge list to text file."""
if self.elfilename == '':
self.elfilename = self.filename + '.el'
try:
with open(self.elfilename, "w") as outputfile:
for line in self.edgelist:
outputfile.write('\t'.join(line[:2]) + '\n') # slice out edge labels to avoid igraph NCOL error, tab-delimit the tuples
return self.elfilename
except OSError:
print "File not written."
def to_graph(self):
""" TGF file to igraph graph. Writes an edgefile and passes the filename in for a graph object, as igraph's Read_Ncol can only load from a file."""
# results = edgelistfile_to_graph(elfilename)
return Graph.Read_Ncol(self.write_edgefile(), directed=True)
def tgffile_to_edgelist(tgffilename, elfilename=''):
""" TGF file to edgelist converter. """
results = []
if not tgffilename:
raise ValueError('No tgf filename given.')
if elfilename == '':
elfilename = tgffilename + '.el'
try:
with open(tgffilename, 'r') as inputfile:
with open(elfilename, "w") as outputfile:
phase = 'nodes'
for line in inputfile:
if '#' in line.strip():
phase = 'edges'
continue
if phase == 'nodes':
continue
if phase == 'edges':
outputfile.write(line)
except OSError:
print "File not copied."
results = elfilename
return results
def edgelistfile_to_graph(elfilename):
"""Stub passing an edgelist to igraph, returns an ncol graph object."""
return Graph.Read_Ncol(elfilename, directed=True)
def tgffile_to_graph(tgffilename, elfilename=''):
"""TGF file to igraph graph. Wrapper for intermediate edgelist."""
results = []
if not tgffilename:
raise ValueError('No tgf filename given.')
if elfilename == '':
elfilename = tgffilename + '.el'
try:
elfilename = tgffile_to_edgelist(tgffilename, elfilename)
results = edgelistfile_to_graph(elfilename)
except OSError:
print "File not copied."
return results
def degree_glyph(indegree, outdegree, degreecount=1, degreerange=(1, 3)):
"""Return an igraph canvas glyph image based on indegree, outdegree."""
canvas_ = canvas.canvas()
# box color - turn border off and on
# off, variable 1, solid 2, type colors 3
boxcolorflag = 3
# fill color - turn color background off and on
# background off 0, variable red 1, type colors 2
fillcolorflag = 2
cmin = max([degreerange[0], 1])
cmax = degreerange[1]
cnorm = float(0)
try:
# norm = x[i]−min(x) / (max(x)−min(x))
cnorm = float(degreecount - cmin) / float(cmax-cmin)
except ZeroDivisionError:
cnorm = float(0)
if fillcolorflag == 1:
if cnorm > 0:
logging.debug('cmin/cmax: %s %s cnorm: %s', str(cmin), str(cmax), str(cnorm))
canvas_.fill(path.rect(0, 0, 1, 1), [color.gradient.WhiteRed.getcolor(cnorm)])
elif fillcolorflag == 2:
if (indegree == 0) and (outdegree == 0):
fillcolor = color.cmyk.White
elif indegree == 0:
fillcolor = color.cmyk.Green
elif indegree == 1 and outdegree == 1:
fillcolor = color.cmyk.Yellow
elif outdegree == 0:
fillcolor = color.cmyk.Red
elif indegree == 1 and outdegree > 1:
fillcolor = color.cmyk.ProcessBlue
elif indegree > 1 and outdegree == 1:
fillcolor = color.cmyk.Orange
elif indegree > 1 and outdegree > 1:
fillcolor = color.cmyk.Orchid
else:
fillcolor = color.cmyk.Black
canvas_.fill(path.rect(0, 0, 1, 1), [fillcolor])
dg_box = path.path(path.moveto(0, 0),
path.lineto(0, 1),
path.lineto(1, 1),
path.lineto(1, 0),
path.lineto(0, 0),
path.closepath())
if boxcolorflag == 1:
boxcolor = color.cmyk(0, 0, 0, 0.25)
if (indegree == 0) and (outdegree == 0):
boxcolor = color.cmyk.White
elif indegree == 0:
boxcolor = color.cmyk.YellowGreen
elif outdegree == 0:
boxcolor = color.cmyk.RedOrange
# draw manual bounding box
canvas_.stroke(dg_box, [boxcolor, style.linewidth(.1)])
elif boxcolorflag == 2:
boxcolor = color.cmyk.Gray # Black
elif boxcolorflag == 3:
if (indegree == 0) and (outdegree == 0):
boxcolor = color.cmyk.White
elif indegree == 0:
boxcolor = color.cmyk.Green
elif indegree == 1 and outdegree == 1:
boxcolor = color.cmyk.Yellow
elif outdegree == 0:
boxcolor = color.cmyk.Red
elif indegree == 1 and outdegree > 1:
boxcolor = color.cmyk.ProcessBlue
elif indegree > 1 and outdegree == 1:
boxcolor = color.cmyk.Orange
elif indegree > 1 and outdegree > 1:
boxcolor = color.cmyk.Orchid
else:
boxcolor = color.cmyk.Black
if boxcolorflag == 2 or boxcolorflag == 3:
# reset box wider for cutter
dg_box = path.path(path.moveto(-0.2, -0.2),
path.lineto(-0.2, 1.2),
path.lineto(1.2, 1.2),
path.lineto(1.2, -0.2),
path.lineto(-0.2, -0.2),
path.closepath())
# draw manual bounding box
canvas_.stroke(dg_box, [boxcolor, style.linewidth(.05)])
node_dot = path.circle(.5, .5, .15)
canvas_.fill(node_dot)
if indegree > 0:
gp = path.path(path.moveto(0.5, 0.5),
path.lineto(0.5, 0.75)) # stub
canvas_.stroke(gp, [style.linewidth(.1)])
if indegree == 1:
gp = path.path(path.moveto(0.5, 0.5),
path.lineto(0.5, 1.0)) # indegree 1
canvas_.stroke(gp, [style.linewidth(0.1)])
else:
gp = path.path(path.moveto(0.5, 0.75),
path.lineto(0.0, 0.75), # crossbar
path.lineto(1.0, 0.75))
canvas_.stroke(gp, [style.linewidth(.1)])
if indegree > 1:
logging.debug(range(0, indegree))
for line in range(0, indegree):
linef = float(line)
indegreef = float(indegree-1)
logging.debug(linef, indegreef, linef/indegreef)
gp = path.path(path.moveto(linef/indegreef, 1.00),
path.lineto(linef/indegreef, 0.75), # line for each indegree
path.lineto(0.50, 0.75)) # round off the corner
canvas_.stroke(gp, [style.linewidth(.1)])
if outdegree > 0:
gp = path.path(path.moveto(0.50, 0.50),
path.lineto(0.50, 0.25)) # stub
canvas_.stroke(gp, [style.linewidth(.1)])
if outdegree == 1:
gp = path.path(path.moveto(0.50, 0.50),
path.lineto(0.50, 0.00)) # outdegree 1
canvas_.stroke(gp, [style.linewidth(.1)])
else:
gp = path.path(path.moveto(0.50, 0.25),
path.lineto(0.00, 0.25), # crossbar
path.lineto(1.00, 0.25))
canvas_.stroke(gp, [style.linewidth(0.10)])
if outdegree > 1:
logging.debug(range(0, outdegree))
for line in range(0, outdegree):
linef = float(line)
outdegreef = float(outdegree-1)
logging.debug(linef, outdegreef, linef/outdegreef)
gp = path.path(path.moveto(linef/outdegreef, 0.00),
path.lineto(linef/outdegreef, 0.25), # line for each outdegree
path.lineto(0.50, 0.25)) # round off the corner
canvas_.stroke(gp, [style.linewidth(0.10)])
return canvas_
def pp_graph_stats(graph):
"""Log and return statistical discriptions of graph."""
logging.info('\ngraph stats:')
logging.info('\n degree: %s', str(graph.degree()))
logging.info('\n indegree: %s', str(graph.indegree()))
logging.info('\n outdegree: %s', str(graph.outdegree()))
degrees = zip(graph.indegree(), graph.outdegree())
logging.info('\n degree list: %s', str(degrees))
degrees_counter = Counter(degrees) # http://stackoverflow.com/questions/11055902/how-to-convert-a-counter-object-into-a-usable-list-of-pairs
logging.info('\n degree counter: %s', str(degrees_counter))
degrees_counter_list = list(degrees_counter.items())
logging.info('\n degree counter list: ')
logging.info(degrees_counter_list)
degrees_counter_sorted = []
for key in sorted(degrees_counter.iterkeys()):
degrees_counter_sorted.append((key, degrees_counter[key]))
logging.info('\n degree counter sorted: ')
logging.info(degrees_counter_sorted)
logging.info('\n distribution:\n\n%s', str(graph.degree_distribution(mode='ALL')) + '\n')
logging.info('\n in-dist: \n\n%s', str(graph.degree_distribution(mode='IN')) + '\n')
logging.info('\n out-dist:\n\n%s', str(graph.degree_distribution(mode='OUT')) + '\n')
return degrees_counter_list, degrees_counter_sorted
class twineFile(object):
"""Twine analysis."""
def __init__(self, filename, **kwargs):
self.filename = filename
if not filename:
raise ValueError('No tgf filename given.')
self.format = kwargs.get('format', '')
logging.debug('format: %s', self.format)
self.elfilename = kwargs.get('elfilename', '')
self.html_doc = ""
self.html_doc = utils.txtfile_to_string(filename)
self.html_doc = self.html_doc.replace('-', '')
# stripping all hyphens because of bs4 voodoo http://stackoverflow.com/questions/25375351/beautifulsoup-unable-to-find-classes-with-hyphens-in-their-name
# https://www.crummy.com/software/BeautifulSoup/bs4/doc/
logging.debug(self.html_doc)
soup = BeautifulSoup(self.html_doc, 'html.parser')
self.nodelist = []
self.edgelist = []
# fake data
# for i in [x for x in range(10)]:
# self.edgelist.append((i,i+1))
# for i in [x for x in range(10)]:
# self.edgelist.append((i,i+2))
# for i in [x for x in range(10)]:
# self.edgelist.append((i,i*2))
# https://www.crummy.com/software/BeautifulSoup/bs4/doc/
# <tw-passagedata pid="1" name="node 1" tags="" position="575.5,480">[[node 2]]</tw-passagedata>
# THIS WORKS on an archive file exported from Twine 2 webmin -- could be extended to work with multiple stories in a file using:
# passages = soup.twstorydata.twpassagedata
if self.format == 'published':
# THIS WORKS on porpentine's howling dogs
# view-source:view-source:http://slimedaughter.com/games/twine/howlingdogs/
# view-source:https://commondatastorage.googleapis.com/itchio/html/226733-2206/index.html
passages = soup.select('div[tiddler]')
for psg in passages:
pname = psg['tiddler']
pid = ''
self.nodelist.append((pname.replace(' ', '_'), pid))
# backwards because we are defining edges by names
# check passage contents for links
pat = re.compile(ur'\[\[.*?\]\]')
for match in re.findall(pat, psg.get_text()):
if "|" in match:
match = match.split('|')[1]
self.edgelist.append((pname.replace(' ', '_'), match.replace('[', '').replace(']', '').replace(' ', '_')))
# tuple: ( 'passage name' , matched '[[link 1]]' returned as 'link_1' )
# broken somehow -- tried to add utils cleaning to make The Temple of No work
# self.edgelist.append( ( utils.tlabel(pname) , utils.tlabel(match.replace('[','').replace(']','')) ) )
# tuple: ( 'passage name' , matched '[[link 1]]' returned as 'link_1' )
elif self.format == 'archive':
passages = soup.find_all('twpassagedata')
for psg in passages:
pname = psg['name']
pid = psg['pid']
self.nodelist.append((pname, pid))
# backwards because we are defining edges by names
# check passage contents for links
pat = re.compile(ur'\[\[.*?\]\]')
for match in re.findall(pat, psg.get_text()):
self.edgelist.append((pname.replace(' ', '_'), match.replace('[', '').replace(']', ' ').replace(' ', '_')))
# tuple: ( 'passage name' , matched '[[link 1]]' returned as 'link_1' )
logging.debug(self.nodelist)
logging.debug(self.edgelist)
def __str__(self):
return "twineFile()"
def __len__(self): # not working for some reason ?
return len(self.edgelist)
def clean_nodes_edges_bak(self):
"""Draft Twine edge cleaning code."""
self.nodelist.sort()
for idx, node in enumerate(self.nodelist):
tmpnode = '_' + str(idx) + '_' + re.sub(r'[\W_]', '_', node[0], flags=re.UNICODE)
if tmpnode != node[0]:
newnode = (tmpnode, node[1])
self.nodelist[idx] = newnode
for idx2, edge in enumerate(self.edgelist):
if edge[0] == node[0]:
self.edgelist[idx2] = (tmpnode, edge[1])
for idx2, edge in enumerate(self.edgelist):
if edge[1] == node[0]:
self.edgelist[idx2] = (edge[0], tmpnode)
for idx, edge in enumerate(self.edgelist):
self.edgelist[idx] = (
re.sub(r'[\W_]', '_', edge[0], flags=re.UNICODE),
re.sub(r'[\W_]', '_', edge[1], flags=re.UNICODE)
)
def clean_nodes_edges(self):
"""Clean Twine edge list and node list data after initial import.
Migrates name-based edges into a nodelist and creates unique ID numbers
for all nodes and edges.
Also reformats node names for compatability with further processing
and for visualization -- e.g. join node names with _; crop long names.
"""
self.nodelist.sort()
# rename any existing node names
# in either node or edge references
# to avoid id collisions
change = '_'
for idx, node in enumerate(self.nodelist):
# if node[0][0].isdigit():
self.nodelist[idx] = (change + node[0], node[1])
for idx, edge in enumerate(self.edgelist):
self.edgelist[idx] = (change + edge[0], edge[1])
for idx, edge in enumerate(self.edgelist):
self.edgelist[idx] = (edge[0], change + edge[1])
# give ids to nodes missing ids (all of them for TwineFile)
for idx, node in enumerate(self.nodelist):
self.nodelist[idx] = (node[0], idx)
for idx, node in enumerate(self.nodelist):
node_name = node[0]
node_id = node[1]
# replace matching name based edges with id based edges
for idx2, edge in enumerate(self.edgelist):
e0 = edge[0]
e1 = edge[1]
if e0 == node_name:
e0 = str(node_id)
if e1 == node_name:
e1 = str(node_id)
self.edgelist[idx2] = (e0, e1)
# extract edge names not found in node list
id_counter = len(self.nodelist)
for idx, edge in enumerate(self.edgelist):
e0 = edge[0]
e1 = edge[1]
if not e0.isdigit():
e0 = str(id_counter)
self.nodelist.append((edge[0], e0))
id_counter += 1
if not e1.isdigit():
e1 = str(id_counter)
self.nodelist.append((edge[1], e1))
id_counter += 1
self.edgelist[idx] = (e0, e1)
# clean node names -- no names in edge list anymore
for idx, node in enumerate(self.nodelist):
node_name = re.sub(r'[\W_]', '_', node[0], flags=re.UNICODE)
self.nodelist[idx] = (node_name, node[1])
# all strings
for idx, node in enumerate(self.nodelist):
self.nodelist[idx] = (str(node[0]), str(node[1]))
for idx, edge in enumerate(self.edgelist):
self.edgelist[idx] = (str(edge[0]), str(edge[1]))
# strip leading _ from node names
for idx, node in enumerate(self.nodelist):
self.nodelist[idx] = (str(node[0][1:]), node[1])
def edge_duplicate_max(self, dupemax=0):
"""Reduce duplicate edges if the duplicate count is greater than max.
This is useful for visualizations in which large duplicate edge counts
(e.g. 20, 50, 100) visually obscure other aspects of the graph.
"""
if dupemax == 0:
# remove all duplicate edges
self.edgelist = list(set(self.edgelist))
else:
countmax = dupemax + 1
# prune high duplicate edge counts to max
edgecounts = Counter(self.edgelist)
for key in edgecounts:
if edgecounts[key] > countmax:
edgecounts[key] = countmax
self.edgelist = list(edgecounts.elements())
def edge_remover(self, fun):
"""Remove edges based on a function.
Example: a lambda returning true if either edge name begins 'http'
"""
mylist = list(self.edgelist)
for edge in self.edgelist:
if fun(edge[0]) or fun(edge[1]):
mylist.remove(edge)
self.edgelist = mylist
def node_name_shorten(self, length):
"""Crop node names.
Long cropped names are marked with an ellipsis.
"""
for idx, node in enumerate(self.nodelist):
if len(node[0]) > length:
if length > 9:
self.nodelist[idx] = (node[0][:length-3] + '...', node[1])
else:
self.nodelist[idx] = (node[0][:length], node[1])
def node_orphans_remove(self):
"""Remove orphan nodes from node list.
These are nodes not appearing in the edge list.
In addition to cleaning up messy extracted data,
This is useful after edge pruning -- for example,
pruning http edges and then removing the orphaned
nodes.
"""
# remove orphan nodes
mylist = list(self.nodelist)
for node in self.nodelist:
used = False
for edge in self.edgelist:
if node[1] == edge[0]:
used = True
if node[1] == edge[1]:
used = True
if not used:
mylist.remove(node)
self.nodelist = mylist
def write_edgefile(self, temp=True):
"""Write simple edge list to file.
By default returns a temporary file.
This functionality is necessary because igraph's Graph.Read_Ncol
cannot accept an in-memory object or stream, only a filename
or file handle.
"""
if self.elfilename == '':
self.elfilename = self.filename + '.el'
if temp:
with tempfile.NamedTemporaryFile(delete=False) as fp:
for line in self.edgelist:
fp.write('\t'.join(unicode(v).encode('ascii', 'ignore') for v in line) + '\n')
return fp.name
else:
try:
with open(self.elfilename, "w") as outputfile:
for line in self.edgelist:
# print(line)
outputfile.write('\t'.join(unicode(v).encode('ascii', 'ignore') for v in line) + '\n') # tab-delimit the tuples
# UnicodeEncodeError: 'ascii' codec can't encode character u'\u2026' in position 7: ordinal not in range(128)
# http://stackoverflow.com/questions/9942594/unicodeencodeerror-ascii-codec-cant-encode-character-u-xa0-in-position-20
# .encode('utf-8').strip()
# http://stackoverflow.com/questions/10880813/typeerror-sequence-item-0-expected-string-int-found
# '\t'.join(str(v) for v in value_list)
return self.elfilename
except OSError:
print "File not written."
def write_sparse_edgefile(self):
"""Write edges and nodes to Edger-compatible sparse edge file."""
if self.elfilename == '':
self.elfilename = self.filename + '.el'
try:
with open(self.elfilename, "w") as outputfile:
outputfile.write('# Nodes\n')
for nline in self.nodelist:
outputfile.write(str(nline[1]) + '\t\t' + str(nline[0]) + '\n')
outputfile.write('\n# Edges\n')
for eline in self.edgelist:
outputfile.write('\t'.join(unicode(v).encode('ascii', 'ignore') for v in eline) + '\n')
return self.elfilename
except OSError:
print "File not written."
def to_graph(self, temp=False):
"""TGF file to igraph graph.
Writes an edgefile and passes the filename in for a graph object,
as igraph's Read_Ncol can only load from a file.
"""
# results = edgelistfile_to_graph(elfilename)
return Graph.Read_Ncol(self.write_edgefile(temp), directed=True)
# CURRENTLY UNUSED
# def batch_degree_glyph(indegree, outdegree):
# """"""
# return True
def build_graph(edgelist):
"""Builds a graph from an edgelist.
To use on a TGF:
g = build_graph(utils.tgf_to_list('edges.tgf'))
"""
g = Graph(directed=True)
e = [(a[0], a[1]) for a in edgelist[1]]
g.add_vertices(len(e))
g.add_edges(e)
return g
def edgelistfile_to_edgelist(filename):
"""DRAFT: Load an edgelist file -- possibly not working."""
results = []
if not filename:
raise ValueError('No filename given.')
try:
with open(filename, 'r') as inputfile:
for line in inputfile:
results.append(line)
except OSError:
print "File not copied."
# results = Graph.Read_Ncol(elfilename, directed=True)
return results
def my__tgffile_to_graph(filename):
"""DRAFT: Load a TGF file into a graph -- possibly not working."""
results = []
if not filename:
raise ValueError('No filename given.')
try:
with open(filename, 'r') as inputfile:
elfilename = filename + '.el'
with open(elfilename, "w") as outputfile:
phase = 'nodes'
for line in inputfile:
if '#' in line.strip():
phase = 'edges'
continue
if phase == 'nodes':
continue
if phase == 'edges':
outputfile.write(line)
except OSError:
print "File not copied."
results = Graph.Read_Ncol(elfilename, directed=True)
return results
```
|
{
"source": "JeremyDP2904/goodnightbot",
"score": 2
}
|
#### File: JeremyDP2904/goodnightbot/bot.py
```python
from fbchat import Client
from fbchat.models import *
import random
import config
import schedule
import time
client = Client(config.username, config.password)
def GoodNight():
for friend in config.friendlist:
wish = random.choice(config.wish).format(config.friendlist[friend])
client.sendRemoteImage(random.choice(config.images), message=wish, thread_id=friend, thread_type=ThreadType.USER)
time.sleep(1)
schedule.every().day.at("23:00").do(GoodNight)
while True:
schedule.run_pending()
time.sleep(1)
```
|
{
"source": "JeremyDTaylor/fractal-python",
"score": 2
}
|
#### File: fractal-python/tests/test_api_client.py
```python
import json
import arrow
import pytest
from fractal_python import api_client
from fractal_python.api_client import ApiClient
TOKEN_RESPONSE = {
"access_token": "access token e.g. <PASSWORD>",
"partner_id": "Partner ID e.g. 1juji12f",
"expires_in": 1800,
"token_type": "Bearer",
}
def make_sandbox(requests_mock) -> ApiClient:
requests_mock.register_uri("POST", "/token", text=json.dumps(TOKEN_RESPONSE))
return api_client.sandbox("sandbox-key", "sandbox-partner")
@pytest.fixture()
def sandbox(requests_mock) -> ApiClient:
return make_sandbox(requests_mock)
@pytest.fixture
def live(requests_mock) -> ApiClient:
requests_mock.register_uri("POST", "/token", text=json.dumps(TOKEN_RESPONSE))
return api_client.live("live-key", "live-partner")
def test_sandbox(sandbox):
assert "sandbox-key" in sandbox.headers.values()
assert "sandbox-partner" in sandbox.headers.values()
def test_live(live):
assert "live-key" in live.headers.values()
assert "live-partner" in live.headers.values()
def test_authorise(requests_mock, freezer, live): # skipcq: PYL-W0613
headers = {"X-Api-Key": "live-key", "X-Partner-Id": "live-partner"}
requests_mock.register_uri(
"POST",
"https://auth.askfractal.com/token",
text=json.dumps(TOKEN_RESPONSE),
request_headers=headers,
)
live.expires_at = arrow.now().shift(seconds=-1801)
live._authorise()
assert (
live.expires_at.int_timestamp == arrow.now().shift(seconds=1800).int_timestamp
)
```
#### File: fractal-python/tests/test_forecasting.py
```python
import pytest
from fractal_python.api_client import COMPANY_ID_HEADER, PARTNER_ID_HEADER, ApiClient
from fractal_python.forecasting import (
get_forecasted_balances,
get_forecasted_transactions,
get_forecasts,
)
from tests.test_api_client import make_sandbox
GET_FORECASTS = {
"results": [
{
"id": "forecastId1234",
"bankId": 16,
"accountId": "9aed0933-8e38-4571-93dd-8e775c8233e7",
"name": "Forecast-Mar1",
"date": "2021-03-01T09:34:00.284Z",
"source": "MANUALIMPORT",
},
{
"id": "forecastId3456",
"bankId": 16,
"accountId": "9aed0933-8e38-4571-93dd-8e775c8233e7",
"name": "model_forecast_21_03_01.09:32",
"date": "2021-03-01T09:32:39.831Z",
"source": "MODEL",
},
],
"links": {},
}
GET_FORECASTS_PAGED = {
"results": [
{
"id": "forecastId3457",
"bankId": 16,
"accountId": "9aed0933-8e38-4571-93dd-8e775c8233e7",
"name": "model_forecast_21_03_01.09:32",
"date": "2021-04-01T09:32:39.831Z",
"source": "MODEL",
}
],
"links": {"next": "mock://test/forecasting/v2/forecasts?pageId=2"},
}
@pytest.fixture()
def forecasts_client(requests_mock) -> ApiClient:
request_headers = {
COMPANY_ID_HEADER: "CompanyID1234",
PARTNER_ID_HEADER: "sandbox-partner",
}
requests_mock.register_uri(
"GET",
"/forecasting/v2/forecasts",
json=GET_FORECASTS_PAGED,
request_headers=request_headers,
)
requests_mock.register_uri(
"GET",
"/forecasting/v2/forecasts?pageId=2",
json=GET_FORECASTS,
request_headers=request_headers,
)
return make_sandbox(requests_mock)
def test_get_forecasts(forecasts_client: ApiClient):
forecasts = [
item
for sublist in get_forecasts(
client=forecasts_client, company_id="CompanyID1234"
)
for item in sublist
]
assert len(forecasts) == 3
GET_FORECASTED_TRANSACTIONS_PAGED = {
"results": [
{
"id": "transactionId12342",
"forecastId": "forecastId1234",
"bankId": 16,
"accountId": "9aed0933-8e38-4571-93dd-8e775c8233e7",
"valueDate": "2020-10-18T03:59Z",
"amount": "1000.00",
"currency": "USD",
"type": "CREDIT",
"merchant": "LLoyds",
"category": "Tax",
"reasons": "",
"source": "MANUALIMPORT",
},
],
"links": {"next": "mock://test/forecasting/v2/transactions?pageId=2"},
}
GET_FORECASTED_TRANSACTIONS = {
"results": [
{
"id": "transactionId1234",
"forecastId": "forecastId1234",
"bankId": 16,
"accountId": "9aed0933-8e38-4571-93dd-8e775c8233e7",
"valueDate": "2020-09-18T03:59Z",
"amount": "1000.00",
"currency": "USD",
"type": "CREDIT",
"merchant": "LLoyds",
"category": "Tax",
"reasons": "",
"source": "MANUALIMPORT",
},
{
"id": "transactionId2345",
"forecastId": "forecastId2345",
"bankId": 16,
"accountId": "9aed0933-8e38-4571-93dd-8e775c8233e7",
"valueDate": "2020-09-18T03:59Z",
"amount": "1000.00",
"currency": "USD",
"type": "CREDIT",
"merchant": "LLoyds",
"category": "Tax",
"reasons": "",
"source": "MANUALIMPORT",
},
{
"id": "transactionId3456",
"forecastId": "forecastId3456",
"bankId": 16,
"accountId": "9aed0933-8e38-4571-93dd-8e775c8233e7",
"valueDate": "2020-09-18T03:59Z",
"amount": "1000.00",
"currency": "USD",
"type": "CREDIT",
"merchant": "LLoyds",
"category": "Tax",
"reasons": "",
"source": "MANUALIMPORT",
},
],
"links": {},
}
@pytest.fixture()
def forecasted_transactions_client(requests_mock) -> ApiClient:
request_headers = {
COMPANY_ID_HEADER: "CompanyID1234",
PARTNER_ID_HEADER: "sandbox-partner",
}
requests_mock.register_uri(
"GET",
"/forecasting/v2/transactions",
json=GET_FORECASTED_TRANSACTIONS_PAGED,
request_headers=request_headers,
)
requests_mock.register_uri(
"GET",
"/forecasting/v2/transactions?pageId=2",
json=GET_FORECASTED_TRANSACTIONS,
request_headers=request_headers,
)
return make_sandbox(requests_mock)
def test_get_forecasted_transactions(forecasted_transactions_client: ApiClient):
forecasted_transactions = [
item
for sublist in get_forecasted_transactions(
client=forecasted_transactions_client, company_id="CompanyID1234"
)
for item in sublist
]
assert len(forecasted_transactions) == 4
GET_FORECASTED_BALANCES_PAGED = {
"results": [
{
"id": "90044386-1c09-40bf-b75f-622555da29f2",
"bankId": 16,
"accountId": "accountId1234",
"forecastId": "forecastId1234",
"date": "2020-12-30T03:59Z",
"amount": "100002.00",
"currency": "GBP",
"type": "CREDIT",
"source": "MANUALIMPORT",
},
],
"links": {"next": "mock://test/forecasting/v2/balances?pageId=2"},
}
GET_FORECASTED_BALANCES = {
"results": [
{
"id": "90044386-1c09-40bf-b75f-622555da29f5",
"bankId": 16,
"accountId": "accountId1234",
"forecastId": "forecastId1234",
"date": "2020-11-30T03:59Z",
"amount": "100000.00",
"currency": "GBP",
"type": "CREDIT",
"source": "MANUALIMPORT",
},
{
"id": "6cb6c2d1-5b8e-4efc-8e69-7e051a6c3820",
"bankId": 18,
"accountId": "accountId2345",
"forecastId": "forecastId2345",
"date": "2020-12-30T03:59Z",
"amount": "110000.00",
"currency": "GBP",
"type": "CREDIT",
"source": "MANUALIMPORT",
},
],
"links": {},
}
@pytest.fixture()
def forecasted_balances_client(requests_mock) -> ApiClient:
request_headers = {
COMPANY_ID_HEADER: "CompanyID1234",
PARTNER_ID_HEADER: "sandbox-partner",
}
requests_mock.register_uri(
"GET",
"/forecasting/v2/balances",
json=GET_FORECASTED_BALANCES_PAGED,
request_headers=request_headers,
)
requests_mock.register_uri(
"GET",
"/forecasting/v2/balances?pageId=2",
json=GET_FORECASTED_BALANCES,
request_headers=request_headers,
)
return make_sandbox(requests_mock)
def test_get_forecasted_balances(forecasted_balances_client: ApiClient):
forecasted_balances = [
item
for sublist in get_forecasted_balances(
client=forecasted_balances_client, company_id="CompanyID1234"
)
for item in sublist
]
assert len(forecasted_balances) == 3
```
#### File: fractal-python/tests/test_merchants.py
```python
import pytest
from fractal_python.api_client import PARTNER_ID_HEADER, ApiClient
from fractal_python.banking import retrieve_merchants
from tests.test_api_client import make_sandbox
GET_MERCHANTS = {
"results": [
{
"id": "categoryId1234",
"name": "Vitalityhealth",
"categoryCode": "",
"addressLine": "",
},
{
"id": "categoryId2345",
"name": "Google",
"categoryCode": "",
"addressLine": "",
},
{"id": "categoryId3456", "name": "Uber", "categoryCode": "", "addressLine": ""},
],
"links": {},
}
GET_MERCHANTS_PAGED = {
"results": [
{"id": "categoryId3456", "name": "Lime", "categoryCode": "", "addressLine": ""}
],
"links": {"next": "mock://test/banking/v2/merchants?pageId=2"},
}
@pytest.fixture()
def merchants_client(requests_mock) -> ApiClient:
request_headers = {
PARTNER_ID_HEADER: "sandbox-partner",
}
requests_mock.register_uri(
"GET",
"/banking/v2/merchants",
json=GET_MERCHANTS_PAGED,
request_headers=request_headers,
)
requests_mock.register_uri(
"GET",
"/banking/v2/merchants?pageId=2",
json=GET_MERCHANTS,
request_headers=request_headers,
)
return make_sandbox(requests_mock)
def test_retrieve_merchants(merchants_client: ApiClient):
merchants = [
item
for sublist in retrieve_merchants(client=merchants_client)
for item in sublist
]
assert len(merchants) == 4
```
|
{
"source": "jeremydumais/airodb",
"score": 3
}
|
#### File: airodb/airodb/dumpLoader.py
```python
from os import path
from dumpConverter import DumpConverter
from colorama import Fore, Style
class DumpLoader:
@staticmethod
def Load(filename, sessionName, debug=False):
if path.exists(filename):
fileHandler = open(filename, "r")
dumpConverter = DumpConverter(sessionName)
dumps = []
# Ignore the first line (empty) and the second one (header)
lineCount = 0
for line in fileHandler:
if lineCount > 1:
# Stop before the client section. Only keep AP's
if (line.strip() == ""):
break
dumpObject = dumpConverter.convertToJSON(line)
if dumpObject is not None:
dumps.append(dumpObject)
else:
if (debug):
print(f"{Fore.YELLOW}The line {line} has been ignored due to bad format.{Style.RESET_ALL}")
lineCount += 1
return dumps
else:
print("The file " + filename + " doesn't exist.")
return None
```
#### File: airodb/airodb/macAddress.py
```python
class MACAddress:
@staticmethod
def isValid(macAddress):
sanitizedMACAddress = macAddress.strip().upper()
if sanitizedMACAddress == "":
return False
# Ensure that we have 6 sections
items = sanitizedMACAddress.split(":")
if len(items) != 6:
return False
# Ensure that every section of the MAC Address has 2 characters
for section in items:
if len(section) != 2:
return False
# Ensure that all characters is hexadecimal
HEXADECIMAL_CHARS = ['0', '1', '2', '3', '4',
'5', '6', '7', '8', '9',
'A', 'B', 'C', 'D', 'E', 'F']
for section in items:
for character in section:
if character not in HEXADECIMAL_CHARS:
return False
return True
```
|
{
"source": "jeremydumais/airodb-analyzer",
"score": 3
}
|
#### File: airodb_analyzer/models/accessPoint.py
```python
from models.macAddress import MACAddress
class AccessPoint():
_macAddress = None
_name = None
def __init__(self, macAddress, name):
if (not isinstance(macAddress, MACAddress)):
raise TypeError("macAddress")
if (not isinstance(name, str)):
raise TypeError("name")
self._macAddress = macAddress
self._name = name
def getMACAddress(self):
return self._macAddress
def getName(self):
return self._name
def isHidden(self):
sanitizedName = self._name.strip().replace("\\x00", "")
return True if sanitizedName=="" else False
def isTrusted(self, trustedAPList):
return self._macAddress in trustedAPList
def setMACAddress(self, macAddress):
if (not isinstance(macAddress, MACAddress)):
raise TypeError("macAddress")
self._macAddress = macAddress
def setName(self, name):
if (not isinstance(name, str)):
raise TypeError("name")
self._name = name
def __eq__(self, other):
if isinstance(other, AccessPoint):
return (self._macAddress == other._macAddress and
self._name == other._name)
return False
def __hash__(self):
return hash((self._macAddress, self._name))
```
#### File: airodb_analyzer/models/session.py
```python
from datetime import datetime
class Session():
_name = ""
_firstEntryDateTime = None
_lastEntryDateTime = None
_nbOfRawLogs = 0
def __init__(self, name, firstEntryDateTime = None,
lastEntryDateTime = None, nbOfRawLogs = 0):
if (not isinstance(name, str)):
raise TypeError("name")
if (name.strip()):
self._name = name
else:
raise ValueError("name")
if (firstEntryDateTime != None):
if (not isinstance(firstEntryDateTime, datetime)):
raise TypeError("firstEntryDateTime")
else:
self._firstEntryDateTime = firstEntryDateTime
if (lastEntryDateTime != None):
if (not isinstance(lastEntryDateTime, datetime)):
raise TypeError("lastEntryDateTime")
else:
self._lastEntryDateTime = lastEntryDateTime
if (not isinstance(nbOfRawLogs, int)):
raise TypeError("nbOfRawLogs")
if (nbOfRawLogs < 0):
raise ValueError("nbOfRawLogs")
else:
self._nbOfRawLogs = nbOfRawLogs
def getName(self):
return self._name
def getFirstEntryDateTime(self):
return self._firstEntryDateTime
def getLastEntryDateTime(self):
return self._lastEntryDateTime
def getNBOfRawLogs(self):
return self._nbOfRawLogs
def setName(self, name):
if (not isinstance(name, str)):
raise TypeError("name")
if (name.strip()):
self._name = name
else:
raise ValueError("name")
def setFirstEntryDateTime(self, firstEntryDateTime):
if (firstEntryDateTime != None):
if (not isinstance(firstEntryDateTime, datetime)):
raise TypeError("firstEntryDateTime")
else:
self._firstEntryDateTime = firstEntryDateTime
else:
self._firstEntryDateTime = None
def setLastEntryDateTime(self, lastEntryDateTime):
if (lastEntryDateTime != None):
if (not isinstance(lastEntryDateTime, datetime)):
raise TypeError("lastEntryDateTime")
else:
self._lastEntryDateTime = lastEntryDateTime
else:
self._lastEntryDateTime = None
def setNBOfRawLogs(self, value):
if (not isinstance(value, int)):
raise TypeError("value")
if (value < 0):
raise ValueError("value")
else:
self._nbOfRawLogs = value
def __eq__(self, other):
if isinstance(other, Session):
return (self._name == other._name and
self._firstEntryDateTime == other._firstEntryDateTime and
self._lastEntryDateTime == other._lastEntryDateTime and
self._nbOfRawLogs == other._nbOfRawLogs)
return False
def __hash__(self):
return hash((self._name,
self._firstEntryDateTime,
self._lastEntryDateTime,
self._nbOfRawLogs))
```
#### File: airodb_analyzer/services/dbStorage.py
```python
import pymongo
import sys
from pymongo import MongoClient
from models.accessPoint import AccessPoint
from models.macAddress import MACAddress
from models.session import Session
from models.sessionAPStat import SessionAPStat
from datetime import datetime
from dateutil.parser import parse
class DBStorage():
def __init__(self, mongoClient=None):
if (mongoClient==None):
#try:
self._client = MongoClient()
self._db = self._client["airodb"]
self.dumps = self._db.airodb_dumps
self.trusted_aps = self._db.airodb_trustedAP
#Fix: start a count command to force the client to connect
self.dumps.count_documents({})
#except pymongo.errors.ServerSelectionTimeoutError:
# print("Unable to connect to the local MongoDB instance")
# sys.exit(2)
else:
self._client = mongoClient
self._db = self._client.airodb
self.dumps = self._db.airodb_dumps
self.trusted_aps = self._db.airodb_trustedAP
def __del__(self):
self._client.close()
def getSessionList(self):
retVal = []
sessions = self.dumps.aggregate([{"$match":{}},
{"$group": { "_id":"$SessionName",
"first": { "$first": "$FirstTimeSeen"},
"last": { "$last": "$LastTimeSeen"},
"count": { "$sum": 1}}},
{"$sort": {"name":1}}])
for session in sessions:
retVal.append(Session(session["_id"],
parse(session["first"]),
parse(session["last"]),
session["count"]))
return retVal
def getSessionAP(self, sessionName):
retVal = []
apList = self.dumps.aggregate([{"$match":{"SessionName":sessionName}},
{"$group": { "_id":"$BSSID",
"name": { "$last": "$ESSID" }}},
{"$sort": {"name":1}}])
for ap in apList:
retVal.append(AccessPoint(MACAddress(ap["_id"]), ap["name"]))
return retVal
def getSessionAPStats(self, sessionName, apMACAddress):
retVal = None
stat = list(self.dumps.aggregate([{"$match":{"SessionName":sessionName, "BSSID":apMACAddress.getValue()}}, {
"$group": { "_id":"$BSSID",
"name": { "$last": "$ESSID" },
"firstTimeSeen": { "$first": "$FirstTimeSeen"},
"lastTimeSeen": { "$last": "$LastTimeSeen"},
"encryption": { "$last": "$Privacy"},
"cipher": { "$last": "$Cipher"},
"authentification": { "$last": "$Authentification"},
"channel": { "$last": "$Channel"},
"speed": { "$last": "$Speed"},
"powerMin": { "$min": "$Power"},
"powerMax": { "$max": "$Power"},
"powerAvg": { "$avg": "$Power"}
}}]))
assert(len(stat) == 0 or len(stat) == 1)
if (len(stat) == 1):
retVal = SessionAPStat.createFromDict(stat[0])
return retVal
def getSessionAPRawLogs(self, sessionName, apMACAddress):
return self.dumps.find({"SessionName":sessionName, "BSSID":apMACAddress})
def getTrustedAPList(self):
retVal = []
trustedAPs = self.trusted_aps.find().sort("BSSID", 1)
for trustedAP in trustedAPs:
retVal.append(MACAddress(trustedAP["BSSID"]))
return retVal
def isTrustedAPExist(self, macAddress):
if (not isinstance(macAddress, MACAddress)):
raise TypeError("macAddress")
return (self.trusted_aps.count_documents({"BSSID": macAddress.getValue()}) > 0)
def insertTrustedAP(self, macAddress):
if (not isinstance(macAddress, MACAddress)):
raise TypeError("macAddress")
# Check if the MAC Address already exist
if (self.isTrustedAPExist(macAddress)):
raise RuntimeError(macAddress.getValue() + " already exist!")
self.trusted_aps.insert_one({ "BSSID": macAddress.getValue()})
def updateTrustedAP(self, macAddressOld, macAddressNew):
if (not isinstance(macAddressOld, MACAddress)):
raise TypeError("macAddressOld")
if (not isinstance(macAddressNew, MACAddress)):
raise TypeError("macAddressNew")
# Check if the MAC Address exist
if (not(self.isTrustedAPExist(macAddressOld))):
raise RuntimeError(macAddressOld.getValue() + " doesn't exist!")
# Check if the new MAC Address already exist and is not the old one
if (self.isTrustedAPExist(macAddressNew) and macAddressOld != macAddressNew):
raise RuntimeError(macAddressNew.getValue() + " already exist!")
self.trusted_aps.update_one({"BSSID": macAddressOld.getValue()},
{"$set": {"BSSID": macAddressNew.getValue()}})
def deleteTrustedAP(self, macAddress):
if (not isinstance(macAddress, MACAddress)):
raise TypeError("macAddress")
# Check if the MAC Address exist
if (not(self.isTrustedAPExist(macAddress))):
raise RuntimeError(macAddress.getValue() + " doesn't exist!")
self.trusted_aps.delete_one({"BSSID": macAddress.getValue()})
```
#### File: airodb_analyzer/ui/aboutBoxForm.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets, uic
import qdarkgraystyle
class Ui_AboutBoxForm(QtWidgets.QDialog):
def __init__(self):
super(Ui_AboutBoxForm, self).__init__()
uic.loadUi('airodb_analyzer/designer/aboutBoxForm.ui', self)
self.setStyleSheet(qdarkgraystyle.load_stylesheet())
#Signals
self.buttonClose.clicked.connect(self.buttonCloseClick)
def buttonCloseClick(self):
self.close()
```
#### File: airodb_analyzer/ui/mainForm.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets, uic
from ui.openSessionForm import Ui_OpenSessionForm
from ui.aboutBoxForm import Ui_AboutBoxForm
from ui.manageTrustedAPsForm import Ui_ManageTrustedAPsForm
from services.dbStorage import DBStorage
from models.accessPoint import AccessPoint
from models.macAddress import MACAddress
from bson.json_util import dumps
class Ui_MainWindow(QtWidgets.QMainWindow):
def __init__(self):
super(Ui_MainWindow, self).__init__()
uic.loadUi('airodb_analyzer/designer/mainForm.ui', self)
screenCenter = QtWidgets.QApplication.desktop().screen().rect().center()
self.move(screenCenter.x()-self.width()/2, screenCenter.y()-self.height()/2);
self.tabWidgetAPDetails.setVisible(False)
self._sessionName = ""
self.apListModel = QtGui.QStandardItemModel(self.listViewAP)
self.apListMACAddress = []
self.listViewAP.setModel(self.apListModel)
self.toggleControlsForSession(False)
self.lineEditFilterAPs.setVisible(False)
#Signals
self.tabWidgetAPDetails.currentChanged.connect(self.APDetailsTabChanged)
self.action_Quit.triggered.connect(self.menuQuitClick)
self.action_Open_session.triggered.connect(self.menuOpenSessionClick)
self.actionClose_session.triggered.connect(self.menuCloseSessionClick)
self.action_Open_session_toolbar.triggered.connect(self.menuOpenSessionClick)
self.action_Close_session_toolbar.triggered.connect(self.menuCloseSessionClick)
self.action_Show_all_APs.triggered.connect(self.menuShowAllClick)
self.action_Show_trusted_APs.triggered.connect(self.toggleMenusShowAPsClick)
self.action_Show_untrusted_APs.triggered.connect(self.toggleMenusShowAPsClick)
self.action_Show_hidden_APs.triggered.connect(self.toggleMenusShowAPsClick)
self.action_Manage_Trusted_access_points.triggered.connect(self.menuManageTrustedAPsClick)
self.action_ManageKnownAccessPoints_toolbar.triggered.connect(self.menuManageTrustedAPsClick)
self.actionAbout_airodb_analyzer.triggered.connect(self.menuAboutBoxClick)
self.listViewAP.selectionModel().selectionChanged.connect(self.listViewAPCurrentChange)
self.buttonAddToTrustedAP.clicked.connect(self.buttonAddToTrustedAPClick)
self.buttonRemoveFromTrustedAP.clicked.connect(self.buttonRemoveFromTrustedAPClick)
self.showMaximized()
def showEvent(self, event):
QtCore.QTimer.singleShot(200, lambda: self.lineEditFilterAPs.setStyleSheet("#lineEditFilterAPs { color: lightGray; }"))
def menuQuitClick(self):
self.close()
def menuAboutBoxClick(self):
formAboutBox = Ui_AboutBoxForm()
formAboutBox.exec_()
def loadSession(self, sessionName):
storage = DBStorage()
apList = storage.getSessionAP(sessionName)
self.loadTrustedAPs()
self._sessionName = sessionName
self.apListModel.clear()
self.apListMACAddress.clear()
for ap in apList:
apDisplayName = ap.getName()
if (ap.isHidden()):
apDisplayName = "<hidden>"
if ((not(self.action_Show_hidden_APs.isChecked()) and ap.isHidden())
or (not(self.action_Show_trusted_APs.isChecked()) and ap.isTrusted(self._trustedAPList))
or (not(self.action_Show_untrusted_APs.isChecked()) and not(ap.isTrusted(self._trustedAPList)))):
pass
else:
item = QtGui.QStandardItem(apDisplayName)
self.apListModel.appendRow(item)
self.apListMACAddress.append(ap.getMACAddress())
self.tabWidgetAPDetails.setVisible(False)
self.toggleControlsForSession(True)
def loadTrustedAPs(self):
storage = DBStorage()
self._trustedAPList = storage.getTrustedAPList()
def closeSession(self):
self._sessionName = ""
self.apListModel.clear()
self.apListMACAddress = []
self.tabWidgetAPDetails.setVisible(False)
self.toggleControlsForSession(False)
def toggleControlsForSession(self, isSessionOpen):
self.action_Close_session_toolbar.setEnabled(isSessionOpen)
self.actionClose_session.setEnabled(isSessionOpen)
self.action_Show_hidden_APs.setEnabled(isSessionOpen)
def loadAPRawLogs(self, sessionName, apMACAddress):
storage = DBStorage()
logs = storage.getSessionAPRawLogs(sessionName, apMACAddress)
rawLogs = ""
for log in logs:
rawLogs = rawLogs + dumps(log) + "\n"
self.labelRawLogs.setText(rawLogs)
def getSelectedAPMACAddress(self):
selectedRows = self.listViewAP.selectionModel().selectedRows()
if (len(selectedRows) > 0):
row = selectedRows[0].row()
return self.apListMACAddress[row]
else:
return None
def menuOpenSessionClick(self):
formOpenSession = Ui_OpenSessionForm()
result = formOpenSession.exec_()
if (result == QtWidgets.QDialog.Accepted):
self.loadSession(formOpenSession.selectedSession)
def menuCloseSessionClick(self):
self.closeSession()
def APDetailsTabChanged(self):
if (self.tabWidgetAPDetails.currentIndex() == 1):
apMACAddress = self.getSelectedAPMACAddress()
if (apMACAddress != None):
self.loadAPRawLogs(self._sessionName, apMACAddress.getValue())
def listViewAPCurrentChange(self):
apMACAddress = self.getSelectedAPMACAddress()
if (apMACAddress != None):
storage = DBStorage()
apStat = storage.getSessionAPStats(self._sessionName, apMACAddress)
if (apStat != None):
ap = AccessPoint(apStat.getMACAddress(), apStat.getName())
self.labelName.setText(apStat.getName())
self.labelMACAddress.setText(apStat.getMACAddress().getValue())
isTrustedAP = ap.isTrusted(self._trustedAPList)
self.buttonAddToTrustedAP.setVisible(not(isTrustedAP))
self.buttonRemoveFromTrustedAP.setVisible(isTrustedAP)
self.labelFirstTimeSeen.setText(str(apStat.getFirstTimeSeen()))
self.labelLastTimeSeen.setText(str(apStat.getLastTimeSeen()))
self.widgetProtectionDetails.setVisible(apStat.isProtected())
self.labelIsProtected.setText("True" if apStat.isProtected() else "False")
self.labelEncryption.setText(apStat.getEncryption())
self.labelCipher.setText(apStat.getCipher())
self.labelAuthentification.setText(apStat.getAuthentification())
self.labelChannel.setText(str(apStat.getChannel()))
self.labelSpeed.setText(str(apStat.getSpeed()))
self.labelPowerMin.setText(str(apStat.getPowerLevelMax())) #Max for best signal
self.labelPowerMax.setText(str(apStat.getPowerLevelMin())) #Min for worst signal
self.labelPowerAvg.setText(str(apStat.getPowerLevelAvg()))
self.tabWidgetAPDetails.setCurrentIndex(0)
self.tabWidgetAPDetails.setVisible(True)
def menuShowAllClick(self):
self.action_Show_trusted_APs.setChecked(True)
self.action_Show_untrusted_APs.setChecked(True)
self.action_Show_hidden_APs.setChecked(True)
self.loadSession(self._sessionName)
def toggleMenusShowAPsClick(self):
self.loadSession(self._sessionName)
def menuManageTrustedAPsClick(self):
formManageTrustedAPs = Ui_ManageTrustedAPsForm()
formManageTrustedAPs.exec_()
self.loadTrustedAPs()
def buttonAddToTrustedAPClick(self):
storage = DBStorage()
self.updateFromTrustedAPList(storage.insertTrustedAP)
def buttonRemoveFromTrustedAPClick(self):
storage = DBStorage()
self.updateFromTrustedAPList(storage.deleteTrustedAP)
def updateFromTrustedAPList(self, func):
apMACAddress = self.getSelectedAPMACAddress()
if (apMACAddress != None):
storage = DBStorage()
func(apMACAddress)
self.loadTrustedAPs()
ap = AccessPoint(apMACAddress, "")
isTrustedAP = ap.isTrusted(self._trustedAPList)
self.buttonAddToTrustedAP.setVisible(not(isTrustedAP))
self.buttonRemoveFromTrustedAP.setVisible(isTrustedAP)
```
#### File: airodb-analyzer/tests/accessPoint_test.py
```python
import unittest
from os import path
import sys
sys.path.append(path.join(path.dirname(path.dirname(path.abspath(__file__))), 'airodb_analyzer'))
from models.accessPoint import AccessPoint
from models.macAddress import MACAddress
class TestAccessPointMethods(unittest.TestCase):
def test_constructor_TestWithNoneMacAddress_ThrowTypeError(self):
with self.assertRaises(TypeError):
AccessPoint(None, None)
def test_constructor_TestWithStringMacAddress_ThrowTypeError(self):
with self.assertRaises(TypeError):
AccessPoint("12:34:56:78:89:FF", None)
def test_constructor_TestWithNoneName_ThrowTypeError(self):
with self.assertRaises(TypeError):
AccessPoint(MACAddress("12:34:56:78:89:FF"), None)
def test_constructor_TestWithEmptyName_ReturnValid(self):
AccessPoint(MACAddress("12:34:56:78:89:FF"), "")
def test_constructor_TestWithValidMACAndName_ReturnValid(self):
AccessPoint(MACAddress("12:34:56:78:89:FF"), "MyAP")
def test_getMACAddress_TestWith1234567889FF_Return1234567889FF(self):
ap = AccessPoint(MACAddress("12:34:56:78:89:FF"), "MyAP")
expected = MACAddress("12:34:56:78:89:FF")
self.assertEqual(expected, ap.getMACAddress())
def test_getName_TestWithMyAP_ReturnMyAP(self):
ap = AccessPoint(MACAddress("12:34:56:78:89:FF"), "MyAP")
expected = "MyAP"
self.assertEqual(expected, ap.getName())
def test_setMACAddress_TestWithNoneMacAddress_ThrowTypeError(self):
ap = AccessPoint(MACAddress("12:34:56:78:89:FF"), "MyAP")
with self.assertRaises(TypeError):
ap.setMACAddress(None)
def test_setMACAddress_TestWithStringMacAddress_ThrowTypeError(self):
ap = AccessPoint(MACAddress("12:34:56:78:89:FF"), "MyAP")
with self.assertRaises(TypeError):
ap.setMACAddress("12:34:56:78:89:FF")
def test_setMACAddress_TestWithValidMAC_ReturnValid(self):
ap = AccessPoint(MACAddress("12:34:56:78:89:FF"), "MyAP")
ap.setMACAddress(MACAddress("12:34:56:77:89:FF"))
self.assertEqual(ap.getMACAddress(), MACAddress("12:34:56:77:89:FF"))
def test_setName_TestWithNoneName_ThrowTypeError(self):
ap = AccessPoint(MACAddress("12:34:56:78:89:FF"), "MyAP")
with self.assertRaises(TypeError):
ap.setName(None)
def test_setName_TestWithEmptyName_ReturnValid(self):
ap = AccessPoint(MACAddress("12:34:56:78:89:FF"), "MyAP")
ap.setName("")
self.assertEqual(ap.getName(), "")
def test_setName_TestWithNonEmptyName_ReturnValid(self):
ap = AccessPoint(MACAddress("12:34:56:78:89:FF"), "MyAP")
ap.setName("abc")
self.assertEqual(ap.getName(), "abc")
def test_equalityOperator_TestWithIdenticalValues_ReturnTrue(self):
ap1 = AccessPoint(MACAddress("12:34:56:78:89:FF"), "MyAP")
ap2 = AccessPoint(MACAddress("12:34:56:78:89:FF"), "MyAP")
self.assertEqual(ap1, ap2)
def test_equalityOperator_TestWithDifferentMAC_ReturnTrue(self):
ap1 = AccessPoint(MACAddress("12:34:56:78:89:FE"), "MyAP")
ap2 = AccessPoint(MACAddress("12:34:56:78:89:FF"), "MyAP")
self.assertNotEqual(ap1, ap2)
def test_equalityOperator_TestWithDifferentName_ReturnTrue(self):
ap1 = AccessPoint(MACAddress("12:34:56:78:89:FF"), "MyAP1")
ap2 = AccessPoint(MACAddress("12:34:56:78:89:FF"), "MyAP")
self.assertNotEqual(ap1, ap2)
def test_equalityOperator_TestWithDifferentMACAndName_ReturnTrue(self):
ap1 = AccessPoint(MACAddress("12:34:56:78:89:FE"), "MyAP1")
ap2 = AccessPoint(MACAddress("12:34:56:78:89:FF"), "MyAP")
self.assertNotEqual(ap1, ap2)
def test_equalityOperator_TestWithAnotherType_ReturnFalse(self):
self.assertNotEqual(AccessPoint(MACAddress("12:34:56:78:89:FE"), "MyAP1"), 3)
def test_isHidden_TestWithEmptyName_ReturnTrue(self):
ap = AccessPoint(MACAddress("12:34:56:78:89:FE"), "")
self.assertTrue(ap.isHidden())
def test_isHidden_TestWithOneX00Name_ReturnTrue(self):
ap = AccessPoint(MACAddress("12:34:56:78:89:FE"), "\\x00")
self.assertTrue(ap.isHidden())
def test_isHidden_TestWithTwoX00Name_ReturnTrue(self):
ap = AccessPoint(MACAddress("12:34:56:78:89:FE"), "\\x00\\x00")
self.assertTrue(ap.isHidden())
def test_isHidden_TestWithThreeX00Name_ReturnTrue(self):
ap = AccessPoint(MACAddress("12:34:56:78:89:FE"), "\\x00\\x00\\x00")
self.assertTrue(ap.isHidden())
def test_isHidden_TestWithValidName_ReturnFalse(self):
ap = AccessPoint(MACAddress("12:34:56:78:89:FE"), "Home")
self.assertFalse(ap.isHidden())
def test_hashOperator_TestWithDifferentMAC_ReturnDifferentHash(self):
ap1 = AccessPoint(MACAddress("12:34:56:78:89:FE"), "Home")
ap2 = AccessPoint(MACAddress("12:34:56:78:89:FF"), "Home")
self.assertNotEqual(hash(ap1), hash(ap2))
def test_hashOperator_TestWithDifferentName_ReturnDifferentHash(self):
ap1 = AccessPoint(MACAddress("12:34:56:78:89:FE"), "Home")
ap2 = AccessPoint(MACAddress("12:34:56:78:89:FF"), "Home1")
self.assertNotEqual(hash(ap1), hash(ap2))
def test_hashOperator_TestWithIdenticalValues_ReturnSameHash(self):
ap1 = AccessPoint(MACAddress("12:34:56:78:89:FE"), "Home")
ap2 = AccessPoint(MACAddress("12:34:56:78:89:FE"), "Home")
self.assertEqual(hash(ap1), hash(ap2))
```
|
{
"source": "jeremydumais/airodb",
"score": 3
}
|
#### File: airodb/tests/optionParser_test.py
```python
import unittest
from os import path
import sys
sys.path.append(path.join(path.dirname(path.dirname(path.abspath(__file__))), 'airodb'))
from optionParser import OptionParser
class TestOptionParserMethods(unittest.TestCase):
def test_constructor_TestWithNoneOptions_ThrowTypeError(self):
try:
parser = OptionParser(None)
raise AssertionError("Constructor should have failed")
except TypeError:
pass
def test_constructor_TestWithStringOptions_ThrowTypeError(self):
try:
parser = OptionParser("test")
raise AssertionError("Constructor should have failed")
except TypeError:
pass
def test_constructor_TestWithEmptyListOptions_ReturnValid(self):
parser = OptionParser([])
self.assertEqual(0, parser.Count())
def test_constructor_TestWithOneShortOptListOptions_ReturnValid(self):
parser = OptionParser([("-s", "test")])
self.assertEqual(1, parser.Count())
def test_constructor_TestWithTwoShortOptListOptions_ReturnValid(self):
parser = OptionParser([("-s", "test"), ("-i", "eth0")])
self.assertEqual(2, parser.Count())
def test_constructor_TestWithOneNotTupleStringOptElement_ThrowTypeError(self):
try:
parser = OptionParser(["test"])
raise AssertionError("Constructor should have failed")
except TypeError:
pass
def test_constructor_TestWithOneNotTupleIntOptElement_ThrowTypeError(self):
try:
parser = OptionParser([3])
raise AssertionError("Constructor should have failed")
except TypeError:
pass
def test_Count_TestWithOneShortOptListOptions_Return1(self):
parser = OptionParser([("-s", "test")])
self.assertEqual(1, parser.Count())
def test_Count_TestWithTwoShortOptListOptions_Return2(self):
parser = OptionParser([("-s", "test"), ("-i", "eth0")])
self.assertEqual(2, parser.Count())
def test_IsOptionExist_TestWithDashS_ReturnTrue(self):
parser = OptionParser([("-s", "test")])
self.assertTrue(parser.IsOptionExist("-s"))
def test_IsOptionExist_TestWithDashW_ReturnFalse(self):
parser = OptionParser([("-s", "test")])
self.assertFalse(parser.IsOptionExist("-w"))
def test_IsOptionExist_TestWithSessionLongFormat_ReturnTrue(self):
parser = OptionParser([("--session", "test")])
self.assertTrue(parser.IsOptionExist("--session"))
def test_IsOptionExist_TestWithInterfaceLongFormat_ReturnFalse(self):
parser = OptionParser([("--session", "test")])
self.assertFalse(parser.IsOptionExist("--interface"))
def test_GetOptionValue_TestWithSessionLongFormat_ReturnTrue(self):
parser = OptionParser([("--session", "test")])
self.assertEqual("test", parser.GetOptionValue("--session"))
def test_GetOptionValue_TestWithSessionLongFormatTwoOpts_ReturnTrue(self):
parser = OptionParser([("--session", "test"), ("--interface", "eth0")])
self.assertEqual("test", parser.GetOptionValue("--session"))
def test_GetOptionValue_TestWithInterfaceLongFormatTwoOpts_ReturnTrue(self):
parser = OptionParser([("--session", "test"), ("--interface", "eth0")])
self.assertEqual("eth0", parser.GetOptionValue("--interface"))
def test_GetOptionValue_NotExistingOpt_ReturnNone(self):
parser = OptionParser([("--session", "test"), ("--interface", "eth0")])
self.assertEqual(None, parser.GetOptionValue("--bla"))
def test_IsOptionExistAndValueIsNotEmpty_TestWithDashSAndValue_ReturnTrue(self):
parser = OptionParser([("-s", "test")])
self.assertTrue(parser.IsOptionExistAndValueIsNotEmpty("-s"))
def test_IsOptionExistAndValueIsNotEmpty_TestWithDashSAndEmptyValue_ReturnFalse(self):
parser = OptionParser([("-s", "")])
self.assertFalse(parser.IsOptionExistAndValueIsNotEmpty("-s"))
def test_IsOptionExistAndValueIsNotEmpty_TestWithSessionAndValueLongFormat_ReturnTrue(self):
parser = OptionParser([("--session", "test")])
self.assertTrue(parser.IsOptionExistAndValueIsNotEmpty("--session"))
def test_IsOptionExistAndValueIsNotEmpty_TestWithSessionAndEmptyValueLongFormat_ReturnFalse(self):
parser = OptionParser([("--session", "")])
self.assertFalse(parser.IsOptionExistAndValueIsNotEmpty("--session"))
def test_IsOptionExistAndValueIsNotEmpty_TestWithInterfaceAndValueLongFormat_ReturnTrue(self):
parser = OptionParser([("--session", "test"), ("--interface", "eth0")])
self.assertTrue(parser.IsOptionExistAndValueIsNotEmpty("--interface"))
def test_IsOptionExistAndValueIsNotEmpty_TestWithInterfaceAndEmptyValueLongFormat_ReturnFalse(self):
parser = OptionParser([("--session", ""), ("--interface", "")])
self.assertFalse(parser.IsOptionExistAndValueIsNotEmpty("--interface"))
def test_GetOptionValueOverload_TestWithSessionLongFormatTwoOpts_ReturnTrue(self):
parser = OptionParser([("--session", "test"), ("--interface", "eth0")])
self.assertEqual("test", parser.GetOptionValue("-s", "--session"))
def test_GetOptionValueOverload_TestWithSessionShortFormatTwoOpts_ReturnTrue(self):
parser = OptionParser([("-s", "test"), ("--interface", "eth0")])
self.assertEqual("test", parser.GetOptionValue("-s", "--session"))
def test_GetOptionValueOverload_TestWithSeShortFormatTwoOpts_ReturnFalse(self):
parser = OptionParser([("-s", "test"), ("--interface", "eth0")])
self.assertEqual(None, parser.GetOptionValue("-se", "--session"))
```
|
{
"source": "jeremydw/a4p",
"score": 2
}
|
#### File: backend/app/campaigns.py
```python
from . import base
from . import emails
from . import messages
from google.appengine.ext import ndb
from google.appengine.ext.ndb import msgprop
class Campaign(base.Model):
end = ndb.DateTimeProperty()
goal = ndb.FloatProperty(default=1000)
num_orders = ndb.IntegerProperty()
raised = ndb.FloatProperty()
start = ndb.DateTimeProperty()
artist_name = ndb.StringProperty()
title = ndb.StringProperty()
story = ndb.TextProperty()
@classmethod
def get(cls, ident):
key = ndb.Key('Campaign', ident)
return key.get()
@classmethod
def get_or_create(cls, ident):
key = ndb.Key('Campaign', ident)
ent = key.get()
if ent is None:
ent = cls(key=key)
ent.put()
return ent
@property
def percent_raised(self):
if self.raised and self.goal:
val = self.raised / self.goal * 100
val = '{0:.2f}'.format(round(val, 2))
return float(val)
return 0.0
@property
def url(self):
return 'https://artforx.com/'
@property
def average(self):
if self.raised and self.goal:
val = self.raised / self.num_orders
val = '{0:.2f}'.format(round(val, 2))
return float(val)
return 0.0
def add_order(self, order):
self.num_orders = self.num_orders or 0
self.num_orders += 1
self.raised = self.raised or 0
self.raised += order.amount
self.put()
def to_message(self):
msg = messages.CampaignMessage()
msg.num_orders = self.num_orders
msg.raised = self.raised
msg.goal = self.goal
msg.ident = self.ident
msg.percent_raised = self.percent_raised
msg.end = self.end
msg.start = self.start
msg.average = self.average
return msg
def send_email(self, message):
emailer = emails.Emailer(sender_name=message.sender_name)
subject = 'I just donated to the ACLU by supporting an art fundraising campaign'
kwargs = {
'campaign': self,
'user_supplied_body': message.user_supplied_body,
}
emailer.send(
to=[message.recipient_email, message.sender_email],
subject=subject,
template='email.html',
kwargs=kwargs)
```
#### File: backend/app/emails.py
```python
from . import messages
from google.appengine.api import mail
from google.appengine.ext import ndb
import appengine_config
import jinja2
import os
import webapp2
import jinja2
@jinja2.evalcontextfilter
def nl2br(eval_ctx, value):
result = jinja2.escape(value).unescape().replace('\n', '<br>')
if eval_ctx.autoescape:
result = jinja2.Markup(result)
return result
class Emailer(object):
def __init__(self, sender_name):
self.sender = '{} via Art for X <{}>'.format(sender_name, appengine_config.EMAIL_SENDER)
def send(self, to, subject, template, kwargs):
html = self._render(template, kwargs)
self._send(subject, to, html)
def _render(self, template, kwargs):
jinja_template = self.env.get_template(template)
return jinja_template.render(kwargs)
def _send(self, subject, to, html):
message = mail.EmailMessage(sender=self.sender, subject=subject)
message.to = to
message.html = html
message.send()
@webapp2.cached_property
def env(self):
here = os.path.dirname(__file__)
path = os.path.join(os.path.dirname(__file__), 'templates')
loader = jinja2.FileSystemLoader([path])
env = jinja2.Environment(
loader=loader,
autoescape=True,
trim_blocks=True)
env.filters['nl2br'] = nl2br
return env
```
#### File: backend/app/handlers.py
```python
import logging
import os
from google.appengine.ext.webapp import mail_handlers
#import jinja2
#_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'build'))
#_loader = jinja2.FileSystemLoader(_path)
#_env = jinja2.Environment(loader=_loader, autoescape=True, trim_blocks=True)
#class EmailHandler(mail_handlers.InboundMailHandler):
#
# def receive(self, message):
# logging.info(message)
#
#
#class FrontendHandler(airlock.Handler):
#
# def get(self):
# if not self.me.registered:
# url = self.urls.sign_in()
# self.redirect(url)
# return
# template_name = 'index.html'
# template = _env.get_template(template_name)
# params = {}
# params.update({
# 'me': self.me,
# 'version': os.getenv('CURRENT_VERSION_ID', 'xxx'),
# })
# self.response.write(template.render(params))
```
|
{
"source": "jeremydw/google-apputils-python",
"score": 3
}
|
#### File: google-apputils-python/google_apputils/humanize.py
```python
import datetime
import math
import re
SIBILANT_ENDINGS = frozenset(['sh', 'ss', 'tch', 'ax', 'ix', 'ex'])
DIGIT_SPLITTER = re.compile(r'\d+|\D+').findall
# These are included because they are common technical terms.
SPECIAL_PLURALS = {
'index': 'indices',
'matrix': 'matrices',
'vertex': 'vertices',
}
VOWELS = frozenset('AEIOUaeiou')
# In Python 2.6, int(float('nan')) intentionally raises a TypeError. This code
# attempts to futureproof us against that eventual upgrade.
try:
_IsNan = math.isnan
except AttributeError:
def _IsNan(x):
return type(x) is float and x != x
def Commas(value):
"""Formats an integer with thousands-separating commas.
Args:
value: An integer.
Returns:
A string.
"""
if value < 0:
sign = '-'
value = -value
else:
sign = ''
result = []
while value >= 1000:
result.append('%03d' % (value % 1000))
value /= 1000
result.append('%d' % value)
return sign + ','.join(reversed(result))
def Plural(quantity, singular, plural=None):
"""Formats an integer and a string into a single pluralized string.
Args:
quantity: An integer.
singular: A string, the singular form of a noun.
plural: A string, the plural form. If not specified, then simple
English rules of regular pluralization will be used.
Returns:
A string.
"""
return '%d %s' % (quantity, PluralWord(quantity, singular, plural))
def PluralWord(quantity, singular, plural=None):
"""Builds the plural of an English word.
Args:
quantity: An integer.
singular: A string, the singular form of a noun.
plural: A string, the plural form. If not specified, then simple
English rules of regular pluralization will be used.
Returns:
the plural form of the word.
"""
if quantity == 1:
return singular
if plural:
return plural
if singular in SPECIAL_PLURALS:
return SPECIAL_PLURALS[singular]
# We need to guess what the English plural might be. Keep this
# function simple! It doesn't need to know about every possiblity;
# only regular rules and the most common special cases.
#
# Reference: http://en.wikipedia.org/wiki/English_plural
for ending in SIBILANT_ENDINGS:
if singular.endswith(ending):
return '%ses' % singular
if singular.endswith('o') and singular[-2:-1] not in VOWELS:
return '%ses' % singular
if singular.endswith('y') and singular[-2:-1] not in VOWELS:
return '%sies' % singular[:-1]
return '%ss' % singular
def WordSeries(words, conjunction='and'):
"""Convert a list of words to an English-language word series.
Args:
words: A list of word strings.
conjunction: A coordinating conjunction.
Returns:
A single string containing all the words in the list separated by commas,
the coordinating conjunction, and a serial comma, as appropriate.
"""
num_words = len(words)
if num_words == 0:
return ''
elif num_words == 1:
return words[0]
elif num_words == 2:
return (' %s ' % conjunction).join(words)
else:
return '%s, %s %s' % (', '.join(words[:-1]), conjunction, words[-1])
def AddIndefiniteArticle(noun):
"""Formats a noun with an appropriate indefinite article.
Args:
noun: A string representing a noun.
Returns:
A string containing noun prefixed with an indefinite article, e.g.,
"a thing" or "an object".
"""
if not noun:
raise ValueError('argument must be a word: {!r}'.format(noun))
if noun[0] in VOWELS:
return 'an ' + noun
else:
return 'a ' + noun
def DecimalPrefix(quantity, unit, precision=1, min_scale=0, max_scale=None):
"""Formats an integer and a unit into a string, using decimal prefixes.
The unit will be prefixed with an appropriate multiplier such that
the formatted integer is less than 1,000 (as long as the raw integer
is less than 10**27). For example:
DecimalPrefix(576012, 'bps') -> '576 kbps'
DecimalPrefix(1574215, 'bps', 2) -> '1.6 Mbps'
Only the SI prefixes which are powers of 10**3 will be used, so
DecimalPrefix(100, 'thread') is '100 thread', not '1 hthread'.
See also:
BinaryPrefix()
Args:
quantity: A number.
unit: A string.
precision: An integer, the minimum number of digits to display.
min_scale: minimum power of 1000 to scale to, (None = unbounded).
max_scale: maximum power of 1000 to scale to, (None = unbounded).
Returns:
A string.
"""
return _Prefix(quantity, unit, precision, DecimalScale, min_scale=min_scale,
max_scale=max_scale)
def BinaryPrefix(quantity, unit, precision=1):
"""Formats an integer and a unit into a string, using binary prefixes.
The unit will be prefixed with an appropriate multiplier such that
the formatted integer is less than 1,024 (as long as the raw integer
is less than 2**90). For example:
BinaryPrefix(576012, 'B') -> '562 KiB'
See also:
DecimalPrefix()
Args:
quantity: A number.
unit: A string.
precision: An integer, the minimum number of digits to display.
Returns:
A string.
"""
return _Prefix(quantity, unit, precision, BinaryScale)
def _Prefix(quantity, unit, precision, scale_callable, **args):
"""Formats an integer and a unit into a string.
Args:
quantity: A number.
unit: A string.
precision: An integer, the minimum number of digits to display.
scale_callable: A callable, scales the number and units.
**args: named arguments passed to scale_callable.
Returns:
A string.
"""
if not quantity:
return '0 %s' % unit
if quantity in [float('inf'), float('-inf')] or _IsNan(quantity):
return '%f %s' % (quantity, unit)
scaled_quantity, scaled_unit = scale_callable(quantity, unit, **args)
print_pattern = '%%.%df %%s' % max(0, (precision - int(
math.log(abs(scaled_quantity), 10)) - 1))
return print_pattern % (scaled_quantity, scaled_unit)
def DecimalScale(quantity, unit, min_scale=0, max_scale=None):
"""Get the scaled value and decimal prefixed unit in a tupple.
DecimalScale(576012, 'bps') -> (576.012, 'kbps')
DecimalScale(1574215, 'bps') -> (1.574215, 'Mbps')
Args:
quantity: A number.
unit: A string.
min_scale: minimum power of 1000 to normalize to (None = unbounded)
max_scale: maximum power of 1000 to normalize to (None = unbounded)
Returns:
A tuple of a scaled quantity (float) and BinaryPrefix for the
units (string).
"""
if min_scale is None or min_scale < 0:
negative_powers = ('m', u'µ', 'n', 'p', 'f', 'a', 'z', 'y')
if min_scale is not None:
negative_powers = tuple(negative_powers[0:-min_scale])
else:
negative_powers = None
if max_scale is None or max_scale > 0:
powers = ('k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
if max_scale is not None:
powers = tuple(powers[0:max_scale])
return _Scale(quantity, unit, 1000, powers, negative_powers)
def BinaryScale(quantity, unit):
"""Get the scaled value and binary prefixed unit in a tupple.
BinaryPrefix(576012, 'B') -> (562.51171875, 'KiB')
Args:
quantity: A number.
unit: A string.
Returns:
A tuple of a scaled quantity (float) and BinaryPrefix for the
units (string).
"""
return _Scale(quantity, unit, 1024,
('Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'))
def _Scale(quantity, unit, multiplier, prefixes, inverse_prefixes=None):
"""Returns the formatted quantity and unit into a tuple.
Args:
quantity: A number.
unit: A string
multiplier: An integer, the ratio between prefixes.
prefixes: A sequence of strings.
inverse_prefixes: Prefixes to use for negative powers,
If empty or None, no scaling is done for fractions.
Returns:
A tuple containing the raw scaled quantity and the prefixed unit.
"""
if not quantity:
return 0, unit
if quantity in [float('inf'), float('-inf')] or _IsNan(quantity):
return quantity, unit
power = int(math.log(abs(quantity), multiplier))
if abs(quantity) > 1 or not inverse_prefixes:
if power < 1:
return quantity, unit
power = min(power, len(prefixes))
prefix = prefixes[power - 1]
value = float(quantity) / multiplier ** power
else:
power = min(-power + 1, len(inverse_prefixes))
prefix = inverse_prefixes[power - 1]
value = quantity * multiplier ** power
return value, prefix + unit
# Contains the fractions where the full range [1/n ... (n - 1) / n]
# is defined in Unicode.
FRACTIONS = {
3: (None, u'⅓', u'⅔', None),
5: (None, u'⅕', u'⅖', u'⅗', u'⅘', None),
8: (None, u'⅛', u'¼', u'⅜', u'½', u'⅝', u'¾', u'⅞', None),
}
FRACTION_ROUND_DOWN = 1.0 / (max(FRACTIONS.keys()) * 2.0)
FRACTION_ROUND_UP = 1.0 - FRACTION_ROUND_DOWN
def PrettyFraction(number, spacer=''):
"""Convert a number into a string that might include a unicode fraction.
This method returns the integer representation followed by the closest
fraction of a denominator 2, 3, 4, 5 or 8.
For instance, 0.33 will be converted to 1/3.
The resulting representation should be less than 1/16 off.
Args:
number: a python number
spacer: an optional string to insert between the integer and the fraction
default is an empty string.
Returns:
a unicode string representing the number.
"""
# We do not want small negative numbers to display as -0.
if number < -FRACTION_ROUND_DOWN:
return u'-%s' % PrettyFraction(-number)
number = abs(number)
rounded = int(number)
fract = number - rounded
if fract >= FRACTION_ROUND_UP:
return str(rounded + 1)
errors_fractions = []
for denominator, fraction_elements in FRACTIONS.items():
numerator = int(round(denominator * fract))
error = abs(fract - (float(numerator) / float(denominator)))
errors_fractions.append((error, fraction_elements[numerator]))
unused_error, fraction_text = min(errors_fractions)
if rounded and fraction_text:
return u'%d%s%s' % (rounded, spacer, fraction_text)
if rounded:
return str(rounded)
if fraction_text:
return fraction_text
return u'0'
def Duration(duration, separator=' '):
"""Formats a nonnegative number of seconds into a human-readable string.
Args:
duration: A float duration in seconds.
separator: A string separator between days, hours, minutes and seconds.
Returns:
Formatted string like '5d 12h 30m 45s'.
"""
try:
delta = datetime.timedelta(seconds=duration)
except OverflowError:
return '>=' + TimeDelta(datetime.timedelta.max)
return TimeDelta(delta, separator=separator)
def TimeDelta(delta, separator=' '):
"""Format a datetime.timedelta into a human-readable string.
Args:
delta: The datetime.timedelta to format.
separator: A string separator between days, hours, minutes and seconds.
Returns:
Formatted string like '5d 12h 30m 45s'.
"""
parts = []
seconds = delta.seconds
if delta.days:
parts.append('%dd' % delta.days)
if seconds >= 3600:
parts.append('%dh' % (seconds // 3600))
seconds %= 3600
if seconds >= 60:
parts.append('%dm' % (seconds // 60))
seconds %= 60
seconds += delta.microseconds / 1e6
if seconds or not parts:
parts.append('%gs' % seconds)
return separator.join(parts)
def NaturalSortKey(data):
"""Key function for "natural sort" ordering.
This key function results in a lexigraph sort. For example:
- ['1, '3', '20'] (not ['1', '20', '3']).
- ['Model 9', 'Model 70 SE', 'Model 70 SE2']
(not ['Model 70 SE', 'Model 70 SE2', 'Model 9']).
Usage:
new_list = sorted(old_list, key=humanize.NaturalSortKey)
or
list_sort_in_place.sort(key=humanize.NaturalSortKey)
Based on code by <NAME> <<EMAIL>>.
Args:
data: str, The key being compared in a sort.
Returns:
A list which is comparable to other lists for the purpose of sorting.
"""
segments = DIGIT_SPLITTER(data)
for i, value in enumerate(segments):
if value.isdigit():
segments[i] = int(value)
return segments
```
|
{
"source": "jeremydw/image-processor",
"score": 2
}
|
#### File: jeremydw/image-processor/process.py
```python
import sys
sys.path.insert(0, 'lib')
from PIL import Image
from psd_tools import PSDImage
import os
import yaml
CONFIG_FILE = 'config.yaml.txt'
CONFIG = yaml.load(open(CONFIG_FILE))
INPUT_PATH = CONFIG['input']
OUT_DIR = CONFIG['out_dir']
def _get_image():
if INPUT_PATH.endswith('psd'):
psd = PSDImage.load(INPUT_PATH)
return psd.as_PIL()
else:
return Image.open(INPUT_PATH)
def _get_size(image, rule):
if rule.get('trim') not in (None, False):
return image.size
return rule['size']
def _get_offset(image, size):
return (max((size[0] - image.size[0]) / 2, 0),
max((size[1] - image.size[1]) / 2, 0))
def process(rule):
for fmt in rule['formats']:
size = rule['size']
path = os.path.join(OUT_DIR, rule['name'] + '.' + fmt.lower())
image = _get_image()
image.thumbnail(size, Image.ANTIALIAS)
size = _get_size(image, rule)
offset = _get_offset(image, size)
background_color = rule.get('background', (255, 255, 255, 1))
background = Image.new('RGBA', size, tuple(background_color))
image.load() # Required for split.
mask = image.split()[-1]
background.paste(image, offset, mask=mask)
background.save(
path,
fmt,
quality=rule.get('quality', 100),
optimize=True,
progressive=True)
print 'Saved: {}'.format(path)
def main():
if not os.path.exists(OUT_DIR):
os.makedirs(OUT_DIR)
rules = CONFIG['rules']
for rule in rules:
process(rule)
if __name__ == '__main__':
main()
```
|
{
"source": "jeremyelster/IRS990",
"score": 3
}
|
#### File: IRS990/irsparser/irs_db_utils.py
```python
import sqlite3
import pandas as pd
class DBConnect:
def __init__(self, db_path):
self.con = sqlite3.connect(db_path)
self.cur = self.con.cursor()
def initialize_db(self):
self.cur.execute("DROP TABLE IF EXISTS officer_payment")
self.cur.execute("DROP TABLE IF EXISTS schedule_j")
self.cur.execute("DROP TABLE IF EXISTS irs_dashboard")
self.cur.execute("DROP TABLE IF EXISTS grants")
self.cur.execute(officer_payment_sql)
self.cur.execute(schedule_j_sql)
self.cur.execute(irs_dashboard_sql)
self.cur.execute(grants_sql)
def saveDF(self, df_pandas, table, insert="replace", index=False):
df_pandas.to_sql(table, con=self.con, if_exists=insert, index=index)
def query(self, sqlStr):
df = pd.read_sql(sqlStr, con=self.con)
return df
irs_dashboard_sql = """
CREATE TABLE irs_dashboard (
EIN text,
URL text,
LastUpdated text,
OrganizationName text,
TaxPeriod text,
TaxPeriodBeginDt text,
TaxPeriodEndDt text,
TaxYr text,
StateAbbr text,
Mission text,
TotalEmployee text,
ObjectId text,
NTEECommonCode text,
Foundation text,
OfficerName text,
OfficerTitle text,
OfficerCompensationPart9 float,
GrantDesc text,
GrantMoneyTotal float,
ProgramExpenses float,
PYTotalRevenue float,
CYTotalRevenue float,
PYRevenuesLessExpenses float,
CYRevenuesLessExpenses float,
PYSalaryBenefitsPaid float,
CYSalaryBenefitsPaid float,
TotalAssetsBOY float,
TotalAssetsEOY float,
TotalLiabilitiesBOY float,
TotalLiabilitiesEOY float,
TotalExpenses float,
CYTotalExpenses float,
PYTotalExpenses float,
Part9_1GrantsGov float,
Part9_2GrantsIndv float,
Part9_3GrantsForGov float,
Part9_4Benefits float,
Part9_5OfficerComp float,
Part9_6DisqComp float,
Part9_7OtherSalary float,
Part9_8Pension float,
Part9_9OtherBenefits float,
WorkingCapital float,
LiabilitiesToAsset float,
SurplusMargin float,
ProgramExp float,
ScheduleA text,
ScheduleJ text,
ScheduleI text,
ScheduleO text)"""
officer_payment_sql = """
CREATE TABLE officer_payment (
EIN text PRIMARY KEY,
ObjectId text,
OrganizationName text,
TaxYr text,
StateAbbr text,
PersonNm text,
TitleTxt text,
AverageHoursPerWeekRt float,
ReportableCompFromOrgAmt float,
OtherCompensationAmt float,
ReportableCompFromRltdOrgAmt float,
AverageHoursPerWeekRltdOrgRt float,
IndividualTrusteeOrDirectorInd bool,
OfficerInd bool,
HighestCompensatedEmployeeInd bool,
FormerOfcrDirectorTrusteeInd bool,
KeyEmployeeInd bool,
InstitutionalTrusteeInd bool,
TotalCompFromOrgAmt float)"""
schedule_j_sql = """
CREATE TABLE schedule_j (
EIN text,
ObjectId text,
OrganizationName text,
StateAbbr text,
TaxYr text,
PersonNm text,
TitleTxt text,
TotalCompensationFilingOrgAmt float,
BaseCompensationFilingOrgAmt float,
BonusFilingOrganizationAmount float,
OtherCompensationFilingOrgAmt float,
DeferredCompensationFlngOrgAmt float,
NontaxableBenefitsFilingOrgAmt float,
TotalCompensationRltdOrgsAmt float,
OtherCompensationRltdOrgsAmt float,
BonusRelatedOrganizationsAmt float,
CompensationBasedOnRltdOrgsAmt float,
DeferredCompRltdOrgsAmt float,
NontaxableBenefitsRltdOrgsAmt float,
CompReportPrior990FilingOrgAmt float,
CompReportPrior990RltdOrgsAmt float,
SeverancePaymentInd bool,
TravelForCompanionsInd text)"""
grants_sql = """
CREATE TABLE grants (
EIN text,
ObjectId text,
OrganizationName text,
TaxYr text,
Address text,
City text,
StateAbbr text,
RecipientEIN text,
RecipientBusinessName_BusinessNameLine1Txt text,
PurposeOfGrantTxt text,
CashGrantAmt float,
NonCashAssistanceAmt float,
NonCashAssistanceDesc text,
IRCSectionDesc text,
USAddress_CityNm text,
USAddress_StateAbbreviationCd text,
ForeignAddress_AddressLine1Txt text,
ForeignAddress_CountryCd text)"""
```
#### File: Notebooks/Parsing/officerListParser.py
```python
import pandas as pd
import numpy as np
from pandas.io.json import json_normalize
import flatten_json
import sys
import irsparser as irs
sys.path.append("../../")
def parse_officer_list(df):
"""Parse the Officer List and build new dataframe officer_list
Takes the OfficerList column returned from IRS990 and builds it into a data frame
with each officer getting their own row"""
officers_cols = ["EIN", "ObjectId", "OrganizationName", "TaxYr", "StateAbbr", "OfficerList"]
df_tmp = df[officers_cols].copy()
officer_list = pd.DataFrame()
for i, row in enumerate(df_tmp.itertuples()):
if row[6] is not None:
tbl = row[6]
tmp = json_normalize(tbl)
tmp["EIN"] = row[1]
tmp["ObjectId"] = row[2]
tmp["OrganizationName"] = row[3]
tmp["TaxYr"] = row[4]
tmp["StateAbbr"] = row[5]
officer_list = officer_list.append(tmp, sort=False)
if i % 500 == 0:
print(f"Parsed {str(i)} of {str(len(df_tmp))}: {str(round(100. * i/len(df_tmp), 2))}%")
print(f"Number of officers with PersonNm: {officer_list['PersonNm'].notnull().sum()}")
print(f"Number of officers with PersonNm: {officer_list['BusinessName.BusinessNameLine1Txt'].notnull().sum()}")
print(f"Number of officers with PersonNm: {officer_list['BusinessName.BusinessNameLine1'].notnull().sum()}")
# Consolidate Parsing Quirks
names = np.where(
officer_list["PersonNm"].isnull(),
officer_list["BusinessName.BusinessNameLine1Txt"],
officer_list["PersonNm"])
names = np.where(pd.Series(names).isnull(), officer_list["BusinessName.BusinessNameLine1"], names)
officer_list["PersonNm"] = names
del officer_list['BusinessName.BusinessNameLine1Txt']
del officer_list['BusinessName.BusinessNameLine2Txt']
del officer_list['BusinessName.BusinessNameLine1']
del officer_list['BusinessName.BusinessNameLine2']
column_order = [
'EIN', 'ObjectId', 'OrganizationName', 'TaxYr', 'StateAbbr',
'PersonNm', 'TitleTxt', 'AverageHoursPerWeekRt',
'ReportableCompFromOrgAmt', 'OtherCompensationAmt',
# Other org
'ReportableCompFromRltdOrgAmt', 'AverageHoursPerWeekRltdOrgRt',
# Flags
'IndividualTrusteeOrDirectorInd', 'OfficerInd',
'HighestCompensatedEmployeeInd', 'FormerOfcrDirectorTrusteeInd',
'KeyEmployeeInd', 'InstitutionalTrusteeInd']
# Binarize the Position Type
type_cols = [
'IndividualTrusteeOrDirectorInd', 'HighestCompensatedEmployeeInd',
'FormerOfcrDirectorTrusteeInd', 'KeyEmployeeInd',
'InstitutionalTrusteeInd', 'OfficerInd']
for col in type_cols:
officer_list[col] = np.where(officer_list[col] == 'X', True, False)
# Convert Number Columns from String to Float
num_cols = [
"AverageHoursPerWeekRt", "ReportableCompFromOrgAmt", "OtherCompensationAmt",
"ReportableCompFromRltdOrgAmt", "AverageHoursPerWeekRltdOrgRt"]
for col in num_cols:
officer_list[col] = officer_list[col].fillna(0).astype(float)
# Caps Names
officer_list["PersonNm"] = officer_list["PersonNm"].apply(lambda x: x.upper())
# Deal with Titles
officer_list["TitleTxt"] = officer_list["TitleTxt"].apply(lambda x: str(x).upper())
df_officer = officer_list[column_order].copy()
df_officer["TotalCompFromOrgAmt"] = df_officer["ReportableCompFromOrgAmt"] + df_officer["OtherCompensationAmt"]
df_officer.reset_index(inplace=True, drop=True)
return df_officer
def get_bool(x):
if (x == "true") | (x == "1"):
return 1
elif (x == "false") | (x == "0") | (x is None):
return 0
else:
print(f"Error: {x}")
return x
def parse_schedule_j(df):
officers = []
df_tmp = df[["EIN", "ObjectId", "OrganizationName", "TaxYr", "StateAbbr", "ScheduleJ"]].copy()
for row in df_tmp.itertuples():
if row[6] is not None:
tmp = {}
tmp["EIN"] = row[1]
tmp["ObjectId"] = row[2]
tmp["OrganizationName"] = row[3]
tmp["TaxYr"] = row[4]
tmp["StateAbbr"] = row[5]
d = row[6]
tmp["SeverancePaymentInd"] = d.get("SeverancePaymentInd", None)
tmp["TravelForCompanionsInd"] = d.get("TravelForCompanionsInd", None)
tbl = d.get("RltdOrgOfficerTrstKeyEmplGrp", False)
if tbl:
if isinstance(tbl, dict):
# If its the only element in table, put it in a list to iterate over
tmp2 = []
tmp2.append(tbl)
tbl = tmp2
for officer in tbl:
tmp_officer = flatten_json.flatten(officer)
tmp_officer.update(tmp)
officers.append(tmp_officer)
else:
tmp = {}
df = pd.DataFrame(officers)
id_cols = [
'EIN', 'ObjectId', 'OrganizationName', 'StateAbbr', 'TaxYr',
'PersonNm', 'TitleTxt']
comp_cols = [
'TotalCompensationFilingOrgAmt',
'BaseCompensationFilingOrgAmt', 'BonusFilingOrganizationAmount',
'OtherCompensationFilingOrgAmt', 'DeferredCompensationFlngOrgAmt',
'NontaxableBenefitsFilingOrgAmt', 'TotalCompensationRltdOrgsAmt',
'OtherCompensationRltdOrgsAmt', 'BonusRelatedOrganizationsAmt',
'CompensationBasedOnRltdOrgsAmt', 'DeferredCompRltdOrgsAmt',
'NontaxableBenefitsRltdOrgsAmt', 'CompReportPrior990FilingOrgAmt',
'CompReportPrior990RltdOrgsAmt']
other_cols = ['BusinessName_BusinessNameLine1',
'BusinessName_BusinessNameLine1Txt', 'BusinessName_BusinessNameLine2',
'SeverancePaymentInd', 'TravelForCompanionsInd']
# Reorganize Columns
df = df[id_cols + comp_cols + other_cols].copy()
# Upper Case Name and Title
df["PersonNm"] = df["PersonNm"].apply(lambda x: str(x).upper())
df["TitleTxt"] = df["TitleTxt"].apply(lambda x: str(x).upper())
df["BusinessName_BusinessNameLine1"] = df["BusinessName_BusinessNameLine1"].apply(lambda x: str(x).upper())
df["BusinessName_BusinessNameLine1Txt"] = df["BusinessName_BusinessNameLine1Txt"].apply(lambda x: str(x).upper())
# Fill null values for compensation with 0
for col in comp_cols:
df[col] = df[col].fillna(0).astype(float)
# See if there is a severance payment
df["SeverancePaymentInd"] = df["SeverancePaymentInd"].apply(get_bool)
# Replace NA Names with Business Values where appropriate
df["PersonNm"] = np.where(
df["PersonNm"] == "NAN",
df["BusinessName_BusinessNameLine1"],
df["PersonNm"])
df["PersonNm"] = np.where(
df["PersonNm"] == "NAN",
df["BusinessName_BusinessNameLine1Txt"],
df["PersonNm"])
del df["BusinessName_BusinessNameLine1Txt"]
del df["BusinessName_BusinessNameLine1"]
del df["BusinessName_BusinessNameLine2"]
return df
if __name__ == "__main__":
# Runtime
client = irs.Client(
local_data_dir="../../data", ein_filename="eins",
index_years=[2016, 2017, 2018], save_xml=False,
parser="base")
client.parse_xmls(add_organization_info=True)
df = client.getFinalDF()
officers = df[[
"EIN", "ObjectId", "OrganizationName", "TaxYr", "StateAbbr", "Mission",
"OfficerName", "OfficerTitle", "OfficerCompensationPart9",
"CYTotalExpenses"]].copy()
# Caps Names
officers["OfficerName"] = officers["OfficerName"].apply(lambda x: x.upper())
# Deal with Titles
officers["OfficerTitle"] = officers["OfficerTitle"].apply(lambda x: str(x).upper())
# Add Officer Compensation Percentage of Total Expenses
tmp_val = officers["OfficerCompensationPart9"] / officers["CYTotalExpenses"]
officers.loc[:, "OfficerCompensationPct"] = tmp_val.copy()
# Parse Officer List Form990PartVIISectionAGrp
df_officers = parse_officer_list(officers)
# Parse Schedule J
df_schedulej = parse_schedule_j(df)
# Dump to SQL
con = irs.db_connect()
cur = con.cursor()
irs.initialize_db(con)
officers.to_sql("officer_org", con=con, if_exists="replace", index=False)
df_officers.to_sql("officer_payment", con=con, if_exists="replace", index=False)
df_schedulej.to_sql("schedule_j", con-con, if_exists="replace", index=False)
```
|
{
"source": "jeremyelster/PyStatsBomb",
"score": 3
}
|
#### File: PyStatsBomb/pystatsbomb/passgraph.py
```python
import pandas as pd
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import linear_kernel
from sklearn.preprocessing import StandardScaler
from scipy.sparse import csr_matrix
import networkx as nx
class graph():
def __init__(
self, df, team_name, match_id, pass_cols=None,
max_minute=70, until_first_sub=True
):
# Store some parameters
self.team_name = team_name
self.match_id = match_id
if pass_cols is None:
self.pass_cols = [
"player_name", "pass_recipient_name", "position_name"]
else:
self.pass_cols = pass_cols
df_completed_passes = df.loc[
(df["pass_outcome_name"] == "Complete") &
(df["match_id"] == self.match_id) &
(df["team_name"] == self.team_name), self.pass_cols]
df_completed_passes.loc[:, "weight"] = 1
df = df_completed_passes\
.groupby(["player_name", "pass_recipient_name"], as_index=False)\
.agg({"weight": "sum"})
df_pos = df_completed_passes\
.groupby(["player_name"], as_index=False)\
.agg({"position_name": "max"})
df = pd.merge(df, df_pos, on="player_name", how="left")
position_x_y = self.position_map()
df["pos_x_y"] = df["position_name"].map(position_x_y)
df["x"] = df["pos_x_y"].apply(lambda x: x[0])
df["y"] = df["pos_x_y"].apply(lambda x: x[1])
self.df = df
# Get list of unique players for nodes
self.unique_players = list(set(
df["player_name"]).union(df["pass_recipient_name"]))
# Check if the DF is null
if len(self.df) == 0:
print("No passes for team and match")
raise
# Build the Graph
g = nx.DiGraph()
for i, row in self.df.iterrows():
g.add_node(
node_for_adding=row["player_name"],
player_name=row["player_name"],
position=row["position_name"],
loc=(row["x"], row["y"]))
g.add_weighted_edges_from(
zip(
df["player_name"],
df["pass_recipient_name"],
df["weight"]))
self.g = g
def degree_centrality(self):
"""
The degree centrality is the number of neighbors divided by
all possible neighbors that it could have.
Will return a sorted list of players with their values
"""
centrality = sorted(
nx.degree_centrality(g).items(),
key=lambda kv: kv[1], reverse=True)
return centrality
def position_map():
position_x_y = {
# GK
'Goalkeeper': (10, 40),
# Defense
'Left Back': (30, 70),
'Left Center Back': (30, 50),
'Center Back': (30, 40),
'Right Back': (30, 10),
'Right Center Back': (30, 30),
'Left Wing Back': (40, 70),
'Right Wing Back': (40, 10),
# DM
'Left Defensive Midfield': (50, 50),
'Center Defensive Midfield': (50, 40),
'Right Defensive Midfield': (50, 30),
# CM
'Left Midfield': (60, 70),
'Left Center Midfield': (60, 50),
'Center Midfield': (60, 40),
'Right Center Midfield': (60, 30),
'Right Midfield': (60, 10),
# AMD
'Left Attacking Midfield': (70, 50),
'Center Attacking Midfield': (70, 40),
'Right Attacking Midfield': (70, 30),
# FWD
'Left Center Forward': (90, 50),
'Center Forward': (90, 40),
'Right Center Forward': (90, 30),
# Wing/SS
'Left Wing': (70, 70),
'Right Wing': (70, 10),
'Secondary Striker': (80, 35)}
return position_x_y
```
#### File: PyStatsBomb/pystatsbomb/passingmodel.py
```python
import pandas as pd
import numpy as np
import pickle
import datetime
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import MinMaxScaler
class PassingModel():
def __init__(self, data, lineups):
self.df_passes = self.getPassDF(data, lineups)
def runPassModel(
self, pass_model_type="knn", agg_level="player",
knn_model_file=None, expected_knn_file=None
):
# Build model on df_pass_model copy of df_passes
df_pass_model = self.df_passes.copy()
timestamp = datetime.datetime.strftime(
datetime.datetime.today(), "%Y_%m_%d_%H_%M_%S")
if pass_model_type == "knn":
simple_model_cols = [
"id", "duration", "pass_length", "pass_angle",
"location_origin_x", "location_origin_y",
"location_dest_x", "location_dest_y"]
df_knn = self.df_passes[simple_model_cols].set_index("id")
df_completions = self.df_passes[["id", "pass_outcome_name"]]\
.set_index("id")
completion_array = np.array(np.where(
df_completions["pass_outcome_name"] == "Complete", 1, 0))
# Scale Data and Build Model
min_max_scaler = MinMaxScaler()
df_knn_norm = min_max_scaler.fit_transform(df_knn)
n_neighbors = int(np.floor(len(df_knn) / 100)) + 1
if knn_model_file is not None:
nn_model = pickle.load(open("data/" + knn_model_file, 'rb'))
else:
nn_model = NearestNeighbors(
algorithm='ball_tree', n_neighbors=n_neighbors, p=2,
metric="euclidean", metric_params=None)
nn_model.fit(df_knn_norm)
pickle.dump(
nn_model, open("data/knn_model_file_" + timestamp, 'wb'))
if expected_knn_file is not None:
expected_pass_rate = pickle.load(
open("data/" + expected_knn_file, 'rb'))
else:
completion_array = np.array(
df_pass_model["pass_outcome_name_binary"])
expected_pass_rate = []
passes_per_ep = []
print(f"Total Number of Passes: {len(df_knn)}")
n = 0
for row in df_knn_norm:
sim_passes = self.get_similar_passes(
row.reshape(1, -1), df_knn_norm, nn_model, cutoff=.2)
passes_per_ep.append(len(sim_passes))
expected_value = completion_array[sim_passes].mean()
expected_pass_rate.append(expected_value)
n += 1
if n % 5000 == 0:
print(f"Progress: {n} of {len(df_knn_norm)}")
pickle.dump(
expected_pass_rate,
open('expected_knn_file_' + timestamp, 'wb'))
df_pass_model["xP"] = expected_pass_rate
elif pass_model_type == "box_grid":
origin_box, dest_box = [], []
for i, x in self.df_passes[[
"location_origin_x", "location_origin_y",
"location_dest_x", "location_dest_y"
]].iterrows():
x, y = self.make_pass_grid(
x[0], x[1], x[2], x[3],
nrows=np.linspace(0, 120, 13), ncols=np.linspace(0, 80, 9))
origin_box.append(x)
dest_box.append(y)
if i % 5000 == 0:
print(f"Pass {i} of {len(self.df_passes)}: {round(100*float(i)/len(self.df_passes),2)}% ")
df_pass_model.loc[:, "origin_box"] = origin_box
df_pass_model.loc[:, "dest_box"] = dest_box
df_pass_model["pass_desc"] = list(zip(
df_pass_model["origin_box"], df_pass_model["dest_box"]))
# Get expected value (average) for each grid combination
pass_grid_dict = df_pass_model\
.groupby("pass_desc")["pass_outcome_name_binary"]\
.mean().to_dict()
df_pass_model.loc[:, ("xP")] = df_pass_model["pass_desc"]\
.map(pass_grid_dict)
if agg_level == "player":
# df_pass_model['pass_direction'] = df_pass_model['pass_angle']\
# .apply(self.pass_direction)
df_pass_model["position_name_parsed"] = (
df_pass_model["position_name"].apply(
self.position_base_parser))
df_pass_model["position_detail_parsed"] = (
df_pass_model["position_name"].apply(
self.position_detail_parser))
passing_model = df_pass_model\
.groupby(["team_name", "player_name"], as_index=False)\
.agg({
"position_name_parsed": "max",
"position_detail_parsed": "max",
"pass_outcome_name_binary": ["count", "sum"],
"xP": ["sum", "mean"]})
passing_model.columns = [
"Team", "Player", "Position", "Position_Detail",
"Passes", "Completed", "xP", "xP_Mean"]
passing_model["xP_Rating"] = (
passing_model["Completed"] / passing_model["xP"])
passing_model["comp_pct"] = (
passing_model["Completed"] / passing_model["Passes"])
elif agg_level == "team":
passing_model = df_pass_model\
.groupby(["team_name"], as_index=False)\
.agg({
"pass_outcome_name_binary": ["count", "sum"],
"xP": ["sum", "mean"]})
passing_model.columns = [
"Team", "Passes", "Completed", "xP", "xP_Mean"]
passing_model["xP_Rating"] = (
passing_model["Completed"] / passing_model["xP"])
passing_model["comp_pct"] = (
passing_model["Completed"] / passing_model["Passes"])
else:
# self.passing_model = None
print("Choose player or team")
return None
self.df_pass_model = df_pass_model
self.passing_model = passing_model
return passing_model
def make_pass_grid(
self, origin_x, origin_y, dest_x, dest_y, nrows=None, ncols=None
):
if nrows is None:
nrows = [18, 60, 102, 120]
if ncols is None:
ncols = [18, 40, 62, 80]
o_x = np.searchsorted(nrows, origin_x, side="left")
o_y = np.searchsorted(ncols, origin_y, side="left")
d_x = np.searchsorted(nrows, dest_x, side="left")
d_y = np.searchsorted(ncols, dest_y, side="left")
origin_box = (o_x) * 4 + (o_y + 1)
dest_box = (d_x) * 4 + (d_y + 1)
return(origin_box, dest_box)
def pass_direction(self, x):
"""
According to statsbomb, pass_angle is between 0 (pass ahead)
and pi (pass behind). Clockwise
We divide the circle into 4 equal sections.
Directions are forward, right, left, behind"""
pi_div = np.pi / 4
if (x <= pi_div) & (x >= -pi_div):
return "Forward"
elif (x > pi_div) & (x <= 3 * pi_div):
return "Right"
elif (x > 3 * pi_div) | (x < -3 * pi_div):
return "Behind"
else:
return "Left"
def position_base_parser(self, pos):
# Midfield
if "Center Midfield" in pos:
return "Midfield"
elif "Defensive Midfield" in pos:
return "Midfield"
elif "Attacking Midfield" in pos:
return "Midfield"
elif "Midfield" in pos:
return "Midfield"
# Defense
elif "Wing Back" in pos:
return "Defense"
elif "Center Back" in pos:
return "Defense"
elif "Back" in pos:
return "Defense"
# Forward
elif "Forward" in pos:
return "Forward"
elif "Striker" in pos:
return "Forward"
# Other
elif "Wing" in pos:
return "Forward"
elif "Goalkeeper" in pos:
return "Goalkeeper"
else:
return pos
def position_detail_parser(self, pos):
# Midfield
if "Center Midfield" in pos:
return "Midfield"
elif "Defensive Midfield" in pos:
return "Defensive Midfield"
elif "Attacking Midfield" in pos:
return "Attacking Midfield"
elif "Midfield" in pos:
return "Wide Midfield"
# Defense
elif "Wing Back" in pos:
return "Wing Back"
elif "Center Back" in pos:
return "Center Back"
elif "Back" in pos:
return "Fullback"
# Forward
elif "Forward" in pos:
return "Forward"
elif "Striker" in pos:
return "Forward"
# Other
elif "Wing" in pos:
return "Winger"
elif "Goalkeeper" in pos:
return "Goalkeeper"
else:
return pos
def get_similar_passes(self, p, df, model, cutoff=.2, n_top=5):
dist, passes = model.kneighbors(
p, n_neighbors=len(df), return_distance=True)
return passes[0][1:np.searchsorted(dist[0], cutoff)]
def getPassDF(self, df_events, lineups):
pass_values = [
'index', "match_id", 'duration', 'id', 'period', 'minute',
'second', 'type_name', 'player_name', 'position_name', "team_name",
'possession_team_name', 'possession', 'possession_team_id',
'related_events', 'under_pressure', 'location',
# Pass details
'pass_aerial_won', 'pass_angle', 'pass_assisted_shot_id',
'pass_backheel', 'pass_body_part_id', 'pass_body_part_name',
'pass_cross', 'pass_deflected', 'pass_end_location',
'pass_goal_assist', 'pass_height_id', 'pass_height_name',
'pass_length', 'pass_outcome_id', 'pass_outcome_name',
'pass_recipient_id', 'pass_recipient_name', 'pass_shot_assist',
'pass_switch', 'pass_through_ball', 'pass_type_id',
'pass_type_name']
df_passes = df_events.loc[
(df_events['type_name'].isin(['Pass'])) &
(~df_events["pass_type_name"].isin(
["Goal Kick", "Corner", "Throw-in", "Free Kick", "Kick Off"])),
pass_values]
df_passes.reset_index(inplace=True)
del df_passes["level_0"]
df_passes["under_pressure"] = df_passes["under_pressure"].fillna(False)
df_passes['pass_outcome_name'].fillna('Complete', inplace=True)
for col in [
"pass_backheel", "pass_cross", "pass_aerial_won",
"pass_deflected", "pass_goal_assist", "pass_shot_assist",
"pass_switch", "pass_through_ball"]:
df_passes[col].fillna(False, inplace=True)
# extract pass location data
p_origin = pd.DataFrame(df_passes["location"].values.tolist(), columns=["location_origin_x", "location_origin_y"])
p_dest = pd.DataFrame(df_passes["pass_end_location"].values.tolist(), columns=["location_dest_x", "location_dest_y"])
df_passes = pd.concat([df_passes, p_origin, p_dest], axis=1)
overall_positions = lineups[[
"team_name", "player.name", "overall_position"
]].drop_duplicates().rename({
"player.name": "player_name",
"overall_position": "position_name"}, axis=1)
pass_grid_cols = [
"team_name", "player_name", "under_pressure", "pass_height_name",
"pass_outcome_name", "pass_angle", "pass_length",
"location_origin_x", "location_origin_y",
"location_dest_x", "location_dest_y"]
df_pass_model = df_passes[pass_grid_cols]
df_pass_model = pd.merge(
df_pass_model, overall_positions,
on=["team_name", "player_name"], how="left")
df_pass_model["pass_outcome_name_binary"] = np.where(
df_pass_model["pass_outcome_name"] == "Complete", True, False)
return df_pass_model
```
#### File: PyStatsBomb/pystatsbomb/passmodelviz.py
```python
import numpy as np
from bokeh.layouts import widgetbox, row, column
from bokeh.models import (
ColumnDataSource, RangeSlider, Select, HoverTool, LabelSet)
from bokeh.models import Span, Label, Panel
from bokeh.plotting import figure
from bokeh.palettes import Spectral6, Viridis10
from bokeh.transform import factor_cmap
from bokeh.models.widgets import CheckboxGroup
def player_tab(passing_model):
positions = list(passing_model.Position.unique())
position_details = list(passing_model.Position_Detail.unique())
position_color = factor_cmap(
'Position_Detail', palette=Viridis10, factors=position_details)
select = Select(title="Position:", value="Midfield", options=positions)
max_passes = int(passing_model["Passes"].max())
pass_slider = RangeSlider(
start=0, end=max_passes, value=(70, max_passes),
step=5, title="Number of Passes")
def make_dataset(select_value, pass_slider_min, pass_slider_max):
source = ColumnDataSource(
data=passing_model.loc[
(passing_model["Position"] == select_value) &
(passing_model["Passes"] >= pass_slider_min) &
(passing_model["Passes"] <= pass_slider_max), :])
source.data["Pass_Size"] = source.data["Passes"] / 10
source.data["xP_Mean_mean"] = np.repeat(source.data["xP_Mean"].mean(), len(source.data["Passes"]))
source.data["xP_Rating_mean"] = np.repeat(source.data["xP_Rating"].mean(), len(source.data["Passes"]))
return source
def make_plot(source):
"""Need to return the span so we can update them in callback (I think)
"""
# Set up Plot Figure
plot_size_and_tools = {
'plot_height': 100, 'plot_width': 1000,
'x_range': (source.data["xP_Rating"].min() * .8, source.data["xP_Rating"].max() * 1.2),
'y_range': (source.data["xP_Mean"].min() * .8, source.data["xP_Mean"].max() * 1.2)}
plot = figure(tools=["tap", "pan", "wheel_zoom",'box_select', 'reset', 'help'], title="Expected Passes v. Pass Difficulty")
plot.y_range.flipped = True
# Get Means and Ranges and Top n% for Labels
xp_ms = source.data["xP_Mean_mean"][0]
xp_mean_span = Span(location=xp_ms,
dimension='width', line_color="black",
line_dash='solid', line_width=3, line_alpha=.2)
plot.add_layout(xp_mean_span)
xp_rs = source.data["xP_Rating_mean"][0]
xp_rating_span = Span(location=xp_rs,
dimension='height', line_color="black",
line_dash='solid', line_width=3, line_alpha=.2)
plot.add_layout(xp_rating_span)
renderer = plot.circle("xP_Rating", "xP_Mean", size="Pass_Size",
color=position_color,
legend="Position_Detail",
source=source,
# set visual properties for selected glyphs
selection_color=Spectral6[5],
#color="Position_Detail",
# set visual properties for non-selected glyphs
nonselection_fill_alpha=0.1,
nonselection_fill_color=Spectral6[0],
nonselection_line_color=Spectral6[5],
nonselection_line_alpha=1.0)
plot.legend.location = (10,50)
plot.legend.border_line_width = 3
plot.legend.border_line_color = "black"
plot.legend.border_line_alpha = 0.5
labels = LabelSet(x='xP_Rating', y='xP_Mean', text='Player', level='glyph',
text_font_size='10pt', x_offset=-2, y_offset=2, source=source, render_mode='canvas')
plot.add_layout(labels)
# Hover tool with vline mode
hover = HoverTool(tooltips=[('Team', '@Team'),
('Player', '@Player'),
('Position', '@Position_Detail'),
('Expected Pass Rating', '@xP_Rating'),
('Total Passes', '@Passes')],
mode='vline')
plot.add_tools(hover)
# Add Labels in the corners
citation1 = Label(x=10, y=10, x_units='screen', y_units='screen',
text='Easier Passes, Poorly Executed', render_mode='css',
border_line_color='black', border_line_alpha=1.0,
background_fill_color='white', background_fill_alpha=1.0)
# Add Labels in the corners
citation2 = Label(x=10, y=510, x_units='screen', y_units='screen',
text='Harder Passes, Poorly Executed', render_mode='css',
border_line_color='black', border_line_alpha=1.0,
background_fill_color='white', background_fill_alpha=1.0)
# Add Labels in the corners
citation3 = Label(x=625, y=10, x_units='screen', y_units='screen',
text='Easier Passes, Well Executed', render_mode='css',
border_line_color='black', border_line_alpha=1.0,
background_fill_color='white', background_fill_alpha=1.0)
# Add Labels in the corners
citation4 = Label(x=625, y=510, x_units='screen', y_units='screen',
text='Easier Passes, Well Executed', render_mode='css',
border_line_color='black', border_line_alpha=1.0,
background_fill_color='white', background_fill_alpha=1.0)
plot.add_layout(citation1)
plot.add_layout(citation2)
plot.add_layout(citation3)
plot.add_layout(citation4)
return plot, xp_mean_span, xp_rating_span
def callback(attr, old, new):
# Pass Slider
range_start = pass_slider.value[0]
range_end = pass_slider.value[1]
# Select
position_val = select.value
# Update Title
plot.title.text = select.value
# Update Dataset
new_df = make_dataset(position_val, range_start, range_end)
source.data.update(new_df.data)
# Update Averages
xp_ms = source.data["xP_Mean_mean"][0]
xp_mean_span.location = xp_ms
xp_rs = source.data["xP_Rating_mean"][0]
xp_rating_span.location = xp_rs
source = make_dataset(
select.value, pass_slider.value[0], pass_slider.value[1])
plot, xp_mean_span, xp_rating_span = make_plot(source)
inputs = widgetbox(select, pass_slider)
select.on_change('value', callback)
pass_slider.on_change('value', callback)
# Create a row layout
layout = column(inputs, plot)
# Make a tab with the layout
tab = Panel(child=layout, title='Player Passing Model')
return tab
def team_tab(passing_model):
result = [
"Group Stage", "Round of 16", "Quarter-finals", "Semi-finals", "Final"]
#position_details = list(passing_model.Position_Detail.unique())
result_color = factor_cmap(
'Round', palette=Spectral6, factors=result)
checkbox = CheckboxGroup(
labels=result, active=[0,1,2,3,4])
rounds = [checkbox.labels[i] for i in checkbox.active]
#max_passes = int(passing_model["Passes"].max())
#pass_slider = RangeSlider(
# start=0, end=max_passes, value=(70, max_passes),
# step=5, title="Number of Passes")
def make_dataset(rounds):
source = ColumnDataSource(
data=passing_model.loc[
passing_model["Round"].isin(rounds), :] ) #&
#(passing_model["Passes"] >= pass_slider_min) &
#(passing_model["Passes"] <= pass_slider_max), :])
source.data["Pass_Size"] = source.data["Passes"] / 50
source.data["xP_Mean_mean"] = np.repeat(source.data["xP_Mean"].mean(), len(source.data["Passes"]))
source.data["xP_Rating_mean"] = np.repeat(source.data["xP_Rating"].mean(), len(source.data["Passes"]))
return source
def make_plot(source):
"""Need to return the span so we can update them in callback (I think)"""
# Set up Plot Figure
plot_size_and_tools = {
'plot_height': 100, 'plot_width': 1000,
'x_range': (source.data["xP_Rating"].min() * .8, source.data["xP_Rating"].max() * 1.2),
'y_range': (source.data["xP_Mean"].min() * .8, source.data["xP_Mean"].max() * 1.2)}
plot = figure(tools=["tap", "pan", "wheel_zoom",'box_select', 'reset', 'help'], title="Expected Passes v. Pass Difficulty")
plot.y_range.flipped = True
# Get Means and Ranges and Top n% for Labels
xp_ms = source.data["xP_Mean_mean"][0]
xp_mean_span = Span(location=xp_ms,
dimension='width', line_color="black",
line_dash='solid', line_width=3, line_alpha=.2)
plot.add_layout(xp_mean_span)
xp_rs = source.data["xP_Rating_mean"][0]
xp_rating_span = Span(location=xp_rs,
dimension='height', line_color="black",
line_dash='solid', line_width=3, line_alpha=.2)
plot.add_layout(xp_rating_span)
renderer = plot.circle("xP_Rating", "xP_Mean", size="Pass_Size",
color=result_color,
legend="Round",
source=source,
# set visual properties for selected glyphs
selection_color=Spectral6[5],
#color="Position_Detail",
# set visual properties for non-selected glyphs
nonselection_fill_alpha=0.1,
nonselection_fill_color=Spectral6[0],
nonselection_line_color=Spectral6[5],
nonselection_line_alpha=1.0)
plot.legend.location = (10,50)
plot.legend.border_line_width = 3
plot.legend.border_line_color = "black"
plot.legend.border_line_alpha = 0.5
labels = LabelSet(x='xP_Rating', y='xP_Mean', text='Team', level='glyph',
text_font_size='10pt', x_offset=-2, y_offset=2, source=source, render_mode='canvas')
plot.add_layout(labels)
# Hover tool with vline mode
hover = HoverTool(tooltips=[('Team', '@Team'),
('Result', '@Round'),
#('Position', '@Position_Detail'),
('Expected Pass Rating', '@xP_Rating'),
('Total Passes', '@Passes')],
mode='vline')
plot.add_tools(hover)
# Add Labels in the corners
citation1 = Label(x=10, y=10, x_units='screen', y_units='screen',
text='Easier Passes, Poorly Executed', render_mode='css',
border_line_color='black', border_line_alpha=1.0,
background_fill_color='white', background_fill_alpha=1.0)
# Add Labels in the corners
citation2 = Label(x=10, y=510, x_units='screen', y_units='screen',
text='Harder Passes, Poorly Executed', render_mode='css',
border_line_color='black', border_line_alpha=1.0,
background_fill_color='white', background_fill_alpha=1.0)
# Add Labels in the corners
citation3 = Label(x=625, y=10, x_units='screen', y_units='screen',
text='Easier Passes, Well Executed', render_mode='css',
border_line_color='black', border_line_alpha=1.0,
background_fill_color='white', background_fill_alpha=1.0)
# Add Labels in the corners
citation4 = Label(x=625, y=510, x_units='screen', y_units='screen',
text='Easier Passes, Well Executed', render_mode='css',
border_line_color='black', border_line_alpha=1.0,
background_fill_color='white', background_fill_alpha=1.0)
plot.add_layout(citation1)
plot.add_layout(citation2)
plot.add_layout(citation3)
plot.add_layout(citation4)
return plot, xp_mean_span, xp_rating_span
def callback(attr, old, new):
new_rounds = [checkbox.labels[i] for i in checkbox.active]
# Update Dataset
new_df = make_dataset(new_rounds)
source.data.update(new_df.data)
# Update Averages
xp_ms = source.data["xP_Mean_mean"][0]
xp_mean_span.location = xp_ms
xp_rs = source.data["xP_Rating_mean"][0]
xp_rating_span.location = xp_rs
source = make_dataset(rounds)
plot, xp_mean_span, xp_rating_span = make_plot(source)
inputs = widgetbox(checkbox)
checkbox.on_change('active', callback)
#pass_slider.on_change('value', callback)
# Create a row layout
layout = column(inputs, plot)
#layout = row(plot)
# Make a tab with the layout
tab = Panel(child=layout, title='Team Passing Model')
return tab
```
#### File: PyStatsBomb/pystatsbomb/plotting.py
```python
import matplotlib.pyplot as plt
from matplotlib.patches import Arc
import matplotlib.cm as cm
import seaborn as sns
import numpy as np
import pandas as pd
from bokeh.plotting import figure
from bokeh.io import show
# import seaborn as sns
def plotpitch(display=False, size=(7,5)):
# Create figure
fig = plt.figure()
fig.set_size_inches(size)
ax = fig.add_subplot(1, 1, 1)
# Pitch Outline & Centre Line
ax.plot([0, 0], [0, 80], color="black")
ax.plot([0, 120], [80, 80], color="black")
ax.plot([120, 120], [80, 0], color="black")
ax.plot([120, 0], [0, 0], color="black")
ax.plot([60, 60], [0, 80], color="black")
# Left Penalty Area
ax.plot([18, 18], [62, 18], color="black")
ax.plot([0, 18], [62, 62], color="black")
ax.plot([18, 0], [18, 18], color="black")
# Right Penalty Area
ax.plot([120, 102], [62, 62], color="black")
ax.plot([102, 102], [62, 18], color="black")
ax.plot([102, 120], [18, 18], color="black")
# Left 6-yard Box
ax.plot([0, 6], [50, 50], color="black")
ax.plot([6, 6], [50, 30], color="black")
ax.plot([6, 0], [30, 30], color="black")
# Right 6-yard Box
ax.plot([120, 114], [50, 50], color="black")
ax.plot([114, 114], [50, 30], color="black")
ax.plot([114, 120], [30, 30], color="black")
# Left Goal
ax.plot([0, -2], [44, 44], color="black")
ax.plot([-2, -2], [44, 36], color="black")
ax.plot([-2, 0], [36, 36], color="black")
# Right Goal
ax.plot([120, 122], [44, 44], color="black")
ax.plot([122, 122], [44, 36], color="black")
ax.plot([122, 120], [36, 36], color="black")
# Prepare Circles
centreCircle = plt.Circle((60, 40), 10, color="black", fill=False, lw=2)
centreSpot = plt.Circle((60, 40), 0.8, color="black")
leftPenSpot = plt.Circle((12, 40), 0.8, color="black")
rightPenSpot = plt.Circle((108, 40), 0.8, color="black")
# Draw Circles
ax.add_patch(centreCircle)
ax.add_patch(centreSpot)
ax.add_patch(leftPenSpot)
ax.add_patch(rightPenSpot)
# Prepare Arcs
leftArc = Arc((12, 40), height=20, width=20, angle=0,
theta1=310, theta2=50, color="black", lw=2)
rightArc = Arc((108, 40), height=20, width=20, angle=0,
theta1=130, theta2=230, color="black", lw=2)
# Draw Arcs
ax.add_patch(leftArc)
ax.add_patch(rightArc)
# Tidy Axes
ax.axis('off')
# sns.regplot(df_shot["x"],df_shot["y"], fit_reg=False)#,
# shade=True,n_levels=50)
# team1 = df_shot[df_shot.team == 'Chelsea LFC']
# team2 = df_shot[df_shot.team != 'Chelsea LFC']
# sns.kdeplot(team1["x"], team1["y"], shade=False,
# shade_lowest=False, n_levels=50, cmap="Reds", ax=ax)
# sns.kdeplot(team2["x"], team2["y"], shade=False,
# shade_lowest=False, n_levels=50, cmap="Blues", ax=ax)
# sns.regplot(team1["x"], team1["y"], fit_reg=False, color="red", ax=ax)
# sns.regplot(team2["x"], team2["y"], fit_reg=False, color="blue", ax=ax)
plt.ylim(-5, 85)
plt.xlim(-5, 125)
# Display Pitch
if display:
plt.show()
else:
return fig, ax
def plotpitch_bokeh(display=False, line_color="black", background="green"):
plot = figure(title="Pitch Baseplot", x_range=(-5,125), y_range=(-5,85), plot_width=600, plot_height=400)
# Styling
color = line_color
plot.background_fill_color = background
plot.background_fill_alpha = 0.1
plot.grid.grid_line_color = None
plot.xaxis.axis_line_color = None
plot.yaxis.axis_line_color = None
plot.xaxis.major_tick_line_color = None
plot.xaxis.minor_tick_line_color = None
plot.yaxis.major_tick_line_color = None
plot.yaxis.minor_tick_line_color = None
plot.xaxis.major_label_text_font_size = '0pt'
plot.yaxis.major_label_text_font_size = '0pt'
# Outline and Midline
plot.multi_line(
[[0, 0], [0, 120], [120, 120], [120, 0], [60, 60]],
[[0, 80], [80, 80], [80, 0], [0, 0], [0, 80]], color=color, name="outline")
# Left Penalty Area
plot.multi_line(
[[18, 18], [0, 18], [18, 0]],
[[62, 18], [62, 62], [18, 18]], color=color, name="left_penalty_area")
# Right Penalty Area
plot.multi_line(
[[120, 102], [102, 102], [102, 120]],
[[62, 62], [62, 18], [18, 18]], color=color, name="right_penalty_area")
# Left 6-yard Box
plot.multi_line(
[[0, 6], [6, 6], [6, 0]],
[[50, 50], [50, 30], [30, 30]], color=color, name="left_6")
# Right 6-yard Box
plot.multi_line(
[[120, 114], [114, 114], [114, 120]],
[[50, 50], [50, 30], [30, 30]], color=color, name="right_6")
# Left Goal
plot.multi_line(
[[0, -2], [-2, -2], [-2, 0]],
[[44, 44], [44, 36], [36, 36]], color=color, name="left_goal")
# Right Goal
plot.multi_line(
[[120, 122], [122, 122], [122, 120]],
[[44, 44], [44, 36], [36, 36]], color=color, name="right_goal")
# Circles and Arcs
plot.circle(60, 40, radius=10, fill_alpha=0, color=color, name="center_cirle")
plot.circle(60, 40, size=5, color=color, name="center_dot")
plot.circle(12, 40, size=5, color=color, name="left_pen_dot")
plot.circle(108, 40, size=5 ,color=color, name="right_pen_dot")
plot.arc(12, 40, radius=10, start_angle=307, end_angle=53, start_angle_units='deg', end_angle_units="deg", color=color, name="left_pen_circle")
plot.arc(108, 40, radius=10, start_angle=127, end_angle=233, start_angle_units='deg', end_angle_units="deg", color=color, name="right_pen_circle")
# Display Pitch
if display:
show(plot)
else:
return plot
def pass_rose(df_passes, palette=None):
"""Based from https://gist.github.com/phobson/41b41bdd157a2bcf6e14"""
if "pass_angle_deg" in df_passes.columns:
pass
else:
print("Adding Pass Angle Degrees")
df_passes['pass_angle_deg'] = (
df_passes['pass_angle'].apply(pass_angle_deg))
total_count = df_passes.shape[0]
print('{} total observations'.format(total_count))
dir_bins = np.arange(-7.5, 370, 15)
dir_labels = (dir_bins[:-1] + dir_bins[1:]) / 2
rosedata = (
df_passes
.assign(PassAngle_bins=lambda df: (
pd.cut(
df['pass_angle_deg'],
bins=dir_bins,
labels=dir_labels,
right=False))))
rosedata.loc[rosedata["PassAngle_bins"] == 360., "PassAngle_bins"] = 0.
rosedata["PassAngle_bins"].cat.remove_categories([360], inplace=True)
rosedata = (
rosedata
.groupby(by=['PassAngle_bins'])
.agg({"pass_length": "size"})
# .unstack(level='PassAngle_bins')
.fillna(0)
.sort_index(axis=0)
#.applymap(lambda x: x / total_count * 100)
)
pass_dirs = np.arange(0, 360, 15)
if palette is None:
palette = sns.color_palette('inferno', n_colors=rosedata.shape[1])
bar_dir, bar_width = _convert_dir(pass_dirs)
fig, ax = plt.subplots(figsize=(5, 5), subplot_kw=dict(polar=True))
ax.set_theta_direction('clockwise')
ax.set_theta_zero_location('N')
c1 = "pass_length"
colors = cm.viridis(rosedata[c1].values / float(max(rosedata[c1].values)))
print(len(bar_dir))
print(len(rosedata[c1].values))
# first column only
ax.bar(
bar_dir,
rosedata[c1].values,
width=bar_width,
color=colors,
edgecolor='none',
label=c1,
linewidth=0)
leg = ax.legend(loc=(0.75, 0.95), ncol=2)
#xtl = ax.set_xticklabels(['N', 'NE', 'E', 'SE', 'S', 'SW', 'W', 'NW'])
# return fig
def _convert_dir(directions, N=None):
if N is None:
N = directions.shape[0]
barDir = directions * np.pi/180. # np.pi/N
barWidth = 2 * np.pi / N
return barDir, barWidth
def dist_labels(bins, units):
labels = []
for left, right in zip(bins[:-1], bins[1:]):
if np.isinf(right):
labels.append('>{} {}'.format(left, units))
else:
labels.append('{} - {} {}'.format(left, right, units))
return list(labels)
def pass_angle_deg(x):
"""Convert negative angles to positive radians from 0 to 2pi clockwise"""
if x >= 0:
return x * 180. / np.pi
else:
return (2 * np.pi + x) * 180. / np.pi
def getArrow(start, end, color, qualifier=None, viz="mpl"):
x = start[0]
y = start[1]
if viz == "mpl":
dx = end[0] - start[0]
dy = end[1] - start[1]
elif viz == "bokeh":
dx = end[0]
dy = end[1]
else:
print("please choose mpl or bokeh")
if color == qualifier:
color = 'blue'
else:
color = 'red'
return x, y, dx, dy, color
```
|
{
"source": "jeremyephron/explore-courses-api",
"score": 4
}
|
#### File: explore-courses-api/explorecourses/classes.py
```python
from typing import Tuple
from xml.etree.ElementTree import Element
class Department(object):
"""
This class represents a department within a school.
Attributes:
name (str): The department name.
code (str): The department code used for searching courses by
department.
"""
def __init__(self, elem: Element):
"""
Constructs a new Department from an XML element.
Args:
elem (Element): The department's XML element.
"""
self.name = elem.get("longname")
self.code = elem.get("name")
def __str__(self):
"""
Returns a string representation of the Department that includes both
department name and code.
"""
return f"{self.name} ({self.code})"
class School(object):
"""
This class represents a school within the university.
Attributes:
name (str): The name of the school.
departments (Tuple[Department]): A list of departments within the
school.
"""
def __init__(self, elem: Element):
"""
Constructs a new School from an XML element.
Args:
elem (Element): The school's XML element.
"""
self.name = elem.get("name")
depts = elem.findall("department")
self.departments = tuple(Department(dept) for dept in depts)
def get_department(self, idf: str) -> Department:
"""
Gets a department within the school identified by name or code.
Args:
idf (str): An identifier of the department; either the name or code.
Returns:
Department: The department matched by the given identifier if a
match was found, None otherwise.
"""
idf = idf.lower()
find_code = lambda dept, code: dept.code.lower() == code
find_name = lambda dept, name: dept.name.lower() == name
find_dept = lambda dept, idf: find_name(dept, idf) or find_code(dept,
idf)
idx = [idx for idx, dept in enumerate(self.departments)
if find_dept(dept, idf)]
return self.departments[idx[0]] if idx else None
def __str__(self):
"""
Returns a string representation of the School that is the School's name.
"""
return self.name
class Instructor(object):
"""
This class represents an instructor for a section.
Attributes:
name (str): The instructor's name in "LastName, FirstInitial." form.
first_name (str): The instructor's first name.
middle_name (str): The instructor's middle name.
last_name (str): The instructor's last name.
sunet_id (str): The instructor's SUNet ID (as in <EMAIL>).
is_primary_instructor (bool): True if the instructor is the primary
instructor for the course, False otherwise.
"""
def __init__(self, elem: Element):
"""
Constructs a new Instructor from an XML element.
Args:
elem (Element): The instructor's XML element.
"""
self.name = elem.findtext("name")
self.first_name = elem.findtext("firstName")
self.middle_name = elem.findtext("middleName")
self.last_name = elem.findtext("lastName")
self.sunet_id = elem.findtext("sunet")
self.is_primary_instructor = elem.findtext("role") == "PI"
def __str__(self):
"""
Returns a string representation of the Instructor that includes the
instructor's first and last name and SUNet ID.
"""
return f"{self.first_name} {self.last_name} ({self.sunet_id})"
class Attribute(object):
"""
This class represents an attribute of a course.
Attributes:
name (str): The name of the attribute.
value (str): The abbreviation value of the attribute.
description (str): A description of the value of the attribute.
catalog_print (bool): True if the attribute has the catalog print flag,
False otherwise.
schedule_print (bool): True if the attribute has the schedule print
flag, False otherwise.
"""
def __init__(self, elem: Element):
"""
Constructs a new Attribute from an XML element.
Args:
elem (Element): The attribute's XML element.
"""
self.name = elem.findtext("name")
self.value = elem.findtext("value")
self.description = elem.findtext("description")
self.catalog_print = elem.findtext("catalogPrint") == "true"
self.schedule_print = elem.findtext("schedulePrint") == "true"
def __str__(self):
"""
Returns a string representation of the Attribute that includes the
attribute's name and value.
"""
return f"{self.name}::{self.value}"
class Schedule(object):
"""
This class represents the schedule of a section, including instructors.
Attributes:
start_date (str): The start date of the section's schedule.
end_date (str): The end date of the section's schedule.
start_time (str): The start time of each section.
end_time (str): The end time of each section.
location (str): The location of each section.
days (Tuple[str]): The days of the week that the section meets.
instructors (Tuple[Instructor]): The section's instructors.
"""
def __init__(self, elem: Element):
"""
Constructs a new Schedule from an XML element.
Args:
elem (Element): The schedule's XML element.
"""
self.start_date = elem.findtext("startDate")
self.end_date = elem.findtext("endDate")
self.start_time = elem.findtext("startTime")
self.end_time = elem.findtext("endTime")
self.location = elem.findtext("location")
self.days = tuple(elem.findtext("days").split())
self.instructors = tuple(Instructor(instr) for instr
in elem.find("instructors"))
def __str__(self):
"""
Returns a string representation of the Schedule that includes the
days of the week the section meets and it's time and location.
"""
return (f"{', '.join(self.days)}, {self.start_time} - {self.end_time} "
f"at {self.location}")
class Section(object):
"""
This class represents a section of a course.
Attributes:
class_id (int): The unique ID of the section.
term (str): The year and quarter during which the section is offered.
units (str): The number of units the section is offered for
section_num (str): The section number which distinguishes between
different sections of the same type.
component (str): The type of section (e.g., LEC)
curr_class_size (int): The current number of students enrolled in the
section.
max_class_size (int): The maximum number of students allowed in the
section.
curr_waitlist_size (int): The current number of students on the
waitlist to enroll in the section.
max_waitlist_size (int): The maximum number of students allowed on the
waitlist for the section.
notes (str): Any notes about the section.
schedules (Tuple[Schedule]): The different schedules of the section.
attributes (Tuple[Attribute]): The section's attributes.
"""
def __init__(self, elem: Element):
"""
Constructs a new Section from an XML element.
Args:
elem (Element): The section's XML element.
"""
self.class_id = int(elem.findtext("classId"))
self.term = elem.findtext("term")
self.units = elem.findtext("units")
self.section_num = elem.findtext("sectionNumber")
self.component = elem.findtext("component")
self.max_class_size = int(elem.findtext("maxClassSize"))
self.curr_class_size = int(elem.findtext("currentClassSize"))
self.curr_waitlist_size = int(elem.findtext("currentWaitlistSize"))
self.max_waitlist_size = int(elem.findtext("maxWaitlistSize"))
self.notes = elem.findtext("notes")
self.schedules = tuple(Schedule(sched) for sched
in elem.find("schedules"))
self.attributes = tuple(Attribute(attr) for attr
in elem.find("attributes"))
def __str__(self):
"""
Returns a string representation of the Section that includes the
section's component and number, and section's ID.
"""
return f"{self.component} {self.section_num} (id: {self.class_id})"
class Tag(object):
"""
This class represents a tag for a course.
Attributes:
organization (str): The organization within the school responsible for
the tag.
name (str): The name of the tag.
"""
def __init__(self, elem: Element):
"""
Constructs a new Tag from an XML element.
Args:
elem (Element): The tag's XML element.
"""
self.organization = elem.findtext("organization")
self.name = elem.findtext("name")
def __str__(self):
"""
Returns a string representation of the Tag that includes the
tag's organization and name.
"""
return f"{self.organization}::{self.name}"
class LearningObjective(object):
"""
This class represents a learning objective for a course.
Attributes:
code (str): The GER that the learning objective is for.
description (str): A description of the learning objective.
"""
def __init__(self, elem: Element):
"""
Constructs a new LearningObjective from an XML element.
Args:
elem (Element): The learning objective's XML element.
"""
self.code = elem.findtext(".//requirementCode")
self.description = elem.findtext(".//description")
def __str__(self):
"""
Returns a string representation of the LearningObjective that includes
the learning objective's code and description.
"""
return f"Learning Objective ({self.code}: {self.description})"
class Course(object):
"""
This class represents a course listed at the university.
Attributes:
year (str): The Academic year that the course is offered.
subject (str): The academic subject of the course (e.g., 'MATH').
code (str): The code listing of the course (e.g., '51').
title (str): The full title of the course.
description (str): A description of the course.
gers (Tuple[str]): The General Education Requirements satisfied
by the course.
repeatable (bool): True if the course is repeatable for credit,
False otherwise.
grading_basis (str): The grading basis options for the course.
units_min (int): The minimum number of units the course can be
taken for.
units_max (int): The maximum number of units the course can be
taken for.
objectives (Tuple[LearningObjective]): The learning objectives of
the course.
final_exam (bool): True if the course has a final exam, False otherwise.
sections (Tuple[Section]): The sections associated with the course.
tags (Tuple[Tag]): The tags associated with the course.
attributes (Tuple[Attributes]): The attributes associated with
the course.
course_id (int): The unique ID of the course.
active (bool): True if the course is currently being taught,
False otherwise.
offer_num (str): The offer number of the course.
academic_group (str): The academic group that the course is a part of.
academic_org (str): The academic organization that the course
is a part of.
academic_career (str): The academic career associated with the course.
max_units_repeat (int): The number of units that the course
can be repeated for.
max_times_repeat (int): The number of times that the course
can be repeated.
"""
def __init__(self, elem: Element):
"""
Constructs a new Course from an XML element.
Args:
elem (Element): The course's XML element.
"""
self.year = elem.findtext("year")
self.subject = elem.findtext("subject")
self.code = elem.findtext("code")
self.title = elem.findtext("title")
self.description = elem.findtext("description")
self.gers = tuple(elem.findtext("gers").split(", "))
self.repeatable = (True if elem.findtext("repeatable") == "true"
else False)
self.grading_basis = elem.findtext("grading")
self.units_min = int(elem.findtext("unitsMin"))
self.units_max = int(elem.findtext("unitsMax"))
self.objectives = tuple(LearningObjective(obj) for obj
in elem.find("learningObjectives"))
self.final_exam = (
True if elem.findtext(".//finalExamFlag") == "Y"
else False if elem.findtext(".//finalExamFlag") == "N"
else None
)
self.sections = tuple(Section(section) for section
in elem.find("sections"))
self.tags = tuple(Tag(tag) for tag in elem.find("tags"))
self.attributes = tuple(Attribute(attr) for attr
in elem.find("attributes"))
self.course_id = int(elem.findtext(".//courseId"))
self.active = (True if elem.findtext(".//effectiveStatus") == "A"
else False if elem.findtext(".//effectiveStatus") == "I"
else None)
self.offer_num = elem.findtext(".//offerNumber")
self.academic_group = elem.findtext(".//academicGroup")
self.academic_org = elem.findtext(".//academicOrganization")
self.academic_career = elem.findtext(".//academicCareer")
self.max_units_repeat = int(elem.findtext(".//maxUnitsRepeat"))
self.max_times_repeat = int(elem.findtext(".//maxTimesRepeat"))
def __str__(self):
"""
Returns a string representation of the Course that includes the
course's subject, code, and full title.
"""
return f"{self.subject}{self.code} {self.title}"
def __eq__(self, other):
"""
Overloads the equality (==) operator for the Course class.
A Course can only be compared to another Course. Course equality is
determined by course ID.
Args:
other: The right operand of the equality operator.
Returns:
bool: True if the object being compared is equal to the Course,
False otherwise.
"""
if type(other) != Course: return False
return self.course_id == other.course_id
def __lt__(self, other):
"""
Overloads the less than (<) operator for Course.
A Course can only be compared to another Course. Courses are compared
first by subject, then by code, and last by year.
Args:
other: The right operand of the less than operator.
Returns:
bool: True if the object being compared is less than the Course,
False otherwise.
"""
if type(other) != Course:
raise TypeError(f"'<' not supported between instances of "
f"'{type(self)}' and '{type(other)}'")
if self.subject != other.subject:
return self.subject < other.subject
if self.code != other.code:
return self.code < other.code
if self.year != other.year:
return self.year < other.year
return False
def __gt__(self, other):
"""
Overloads the greater than (>) operator for Course.
A Course can only be compared to another Course. Courses are compared
first by subject, then by code, and last by year.
Args:
other: The right operand of the greater than operator.
Returns:
bool: True if the object being compared is greater than the Course,
False otherwise.
"""
if type(other) != Course:
raise TypeError(f"'>' not supported between instances of "
f"'{type(self)}' and '{type(other)}'")
return not self.__lt__(other) and not self.__eq__(other)
def __le__(self, other):
"""
Overloads the less than or equal to operator (<=) for Course.
A Course can only be compared to another Course. Courses are compared
first by subject, then by code, and last by year.
Args:
other: The right operand of the less than or equal to operator.
Returns:
bool: True if the object being compared is less than or equal to
the Course, False otherwise.
"""
if type(other) != Course:
raise TypeError(f"'<=' not supported between instances of "
f"'{type(self)}' and '{type(other)}'")
return self.__lt__(other) or self.__eq__(other)
def __ge__(self, other):
"""
Overloads the greater than or equal to operator (>=) for Course.
A Course can only be compared to another Course. Courses are compared
first by subject, then by code, and last by year.
Args:
other: The right operand of the greater than or equal to operator.
Returns:
bool: True if the object being compared is greater than or equal to
the Course, False otherwise.
"""
if type(other) != Course:
raise TypeError(f"'>=' not supported between instances of "
f"'{type(self)}' and '{type(other)}'")
return self.__gt__(other) or self.__eq__(other)
```
|
{
"source": "jeremyephron/forager",
"score": 2
}
|
#### File: forager/add_dataset/clip_inference.py
```python
import concurrent.futures
from typing import List
import numpy as np
import torch
import clip
from PIL import Image
from tqdm import tqdm
BATCH_SIZE = 256
EMBEDDING_DIM = 512
device = "cuda" if torch.cuda.is_available() else "cpu"
model, preprocess = clip.load("ViT-B/32", device=device)
load_image = lambda path: preprocess(Image.open(path)).unsqueeze(0).to(device)
def run(image_paths: List[str], embeddings_output_filename: str):
embeddings = np.memmap(
embeddings_output_filename,
dtype="float32",
mode="w+",
shape=(len(image_paths), EMBEDDING_DIM),
)
with concurrent.futures.ThreadPoolExecutor() as executor:
for i in tqdm(range(0, len(image_paths), BATCH_SIZE)):
# Load batch of images
batch_paths = image_paths[i : i + BATCH_SIZE]
images = torch.cat(list(executor.map(load_image, batch_paths)))
with torch.no_grad():
image_features = model.encode_image(images)
image_features /= image_features.norm(dim=-1, keepdim=True)
embeddings[i : i + BATCH_SIZE] = image_features.cpu().numpy()
embeddings.flush()
```
#### File: forager/add_dataset/utils.py
```python
import asyncio
import functools
import os
def parse_gcs_path(path):
assert path.startswith("gs://")
path = path[len("gs://") :]
bucket_end = path.find("/")
bucket = path[:bucket_end]
relative_path = path[bucket_end:].strip("/")
return bucket, relative_path
def make_identifier(path):
return os.path.splitext(os.path.basename(path))[0]
def unasync(coro):
@functools.wraps(coro)
def wrapper(*args, **kwargs):
return asyncio.run(coro(*args, **kwargs))
return wrapper
```
#### File: forager_server/forager_server_api/views.py
```python
from collections import defaultdict, namedtuple
from dataclasses import dataclass
import distutils.util
import functools
import itertools
import json
import math
import operator
import os
import random
import uuid
import shutil
import logging
import time
from typing import List, Dict, NamedTuple, Optional
from django.db.models import Q
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import get_object_or_404, get_list_or_404
from django.conf import settings
from google.cloud import storage
from rest_framework.decorators import api_view
import requests
from expiringdict import ExpiringDict
from .models import (
Dataset,
DatasetItem,
Category,
Mode,
User,
Annotation,
DNNModel,
CategoryCount,
)
BUILTIN_MODES = ["POSITIVE", "NEGATIVE", "HARD_NEGATIVE", "UNSURE"]
logger = logging.getLogger(__name__)
@api_view(["POST"])
@csrf_exempt
def start_cluster(request):
# TODO(mihirg): Remove this setting from Django; it's now managed by Terraform
# (or figure out how to set it from the frontend if we need that)
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/start_cluster",
)
response_data = r.json()
return JsonResponse(
{
"status": "success",
"cluster_id": response_data["cluster_id"],
}
)
@api_view(["GET"])
@csrf_exempt
def get_cluster_status(request, cluster_id):
params = {"cluster_id": cluster_id}
r = requests.get(
settings.EMBEDDING_SERVER_ADDRESS + "/cluster_status", params=params
)
response_data = r.json()
return JsonResponse(response_data)
@api_view(["POST"])
@csrf_exempt
def stop_cluster(request, cluster_id):
params = {"cluster_id": cluster_id}
requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/stop_cluster",
json=params,
)
return JsonResponse(
{
"status": "success",
}
)
@api_view(["POST"])
@csrf_exempt
def create_model(request, dataset_name, dataset=None):
payload = json.loads(request.body)
model_name = payload["model_name"]
cluster_id = payload["cluster_id"]
bucket_name = payload["bucket"]
index_id = payload["index_id"]
pos_tags = parse_tag_set_from_query_v2(payload["pos_tags"])
neg_tags = parse_tag_set_from_query_v2(payload["neg_tags"])
val_pos_tags = parse_tag_set_from_query_v2(payload["val_pos_tags"])
val_neg_tags = parse_tag_set_from_query_v2(payload["val_neg_tags"])
augment_negs = bool(payload["augment_negs"])
model_kwargs = payload["model_kwargs"]
resume_model_id = payload.get("resume", None)
dataset = get_object_or_404(Dataset, name=dataset_name)
eligible_images = DatasetItem.objects.filter(dataset=dataset, is_val=False)
categories = Category.objects.filter(
tag_sets_to_query(pos_tags, neg_tags, val_pos_tags, val_neg_tags)
)
annotations = Annotation.objects.filter(
dataset_item__in=eligible_images,
category__in=categories,
)
tags_by_pk = get_tags_from_annotations_v2(annotations)
pos_dataset_item_pks = []
neg_dataset_item_pks = []
val_pos_dataset_item_pks = []
val_neg_dataset_item_pks = []
for pk, tags in tags_by_pk.items():
if any(t in pos_tags for t in tags):
pos_dataset_item_pks.append(pk)
elif any(t in neg_tags for t in tags):
neg_dataset_item_pks.append(pk)
elif any(t in val_pos_tags for t in tags):
val_pos_dataset_item_pks.append(pk)
elif any(t in val_neg_tags for t in tags):
val_neg_dataset_item_pks.append(pk)
# Augment with randomly sampled negatives if requested
num_extra_negs = settings.BGSPLIT_NUM_NEGS_MULTIPLIER * len(
pos_dataset_item_pks
) - len(neg_dataset_item_pks)
if augment_negs and num_extra_negs > 0:
# Uses "include" and "exclude" category sets from request
all_eligible_pks = filtered_images_v2(
request,
dataset,
exclude_pks=(
pos_dataset_item_pks
+ neg_dataset_item_pks
+ val_pos_dataset_item_pks
+ val_neg_dataset_item_pks
),
)
sampled_pks = random.sample(
all_eligible_pks, min(len(all_eligible_pks), num_extra_negs)
)
neg_dataset_item_pks.extend(sampled_pks)
pos_dataset_item_internal_identifiers = list(
DatasetItem.objects.filter(pk__in=pos_dataset_item_pks).values_list(
"identifier", flat=True
)
)
neg_dataset_item_internal_identifiers = list(
DatasetItem.objects.filter(pk__in=neg_dataset_item_pks).values_list(
"identifier", flat=True
)
)
val_pos_dataset_item_internal_identifiers = list(
DatasetItem.objects.filter(pk__in=val_pos_dataset_item_pks).values_list(
"identifier", flat=True
)
)
val_neg_dataset_item_internal_identifiers = list(
DatasetItem.objects.filter(pk__in=val_neg_dataset_item_pks).values_list(
"identifier", flat=True
)
)
if resume_model_id:
resume_model = get_object_or_404(DNNModel, model_id=resume_model_id)
resume_model_path = resume_model.checkpoint_path
else:
resume_model = None
resume_model_path = None
params = {
"pos_identifiers": pos_dataset_item_internal_identifiers,
"neg_identifiers": neg_dataset_item_internal_identifiers,
"val_pos_identifiers": val_pos_dataset_item_internal_identifiers,
"val_neg_identifiers": val_neg_dataset_item_internal_identifiers,
"augment_negs": augment_negs,
"model_kwargs": model_kwargs,
"model_name": model_name,
"bucket": bucket_name,
"cluster_id": cluster_id,
"index_id": index_id,
"resume_from": resume_model_path,
}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/start_bgsplit_job",
json=params,
)
response_data = r.json()
if r.status_code != 200:
return JsonResponse(
{"status": "failure", "reason": response_data.get("reason", "")},
status=r.status_code,
)
m = DNNModel(
dataset=dataset,
name=model_name,
model_id=response_data["model_id"],
category_spec={
"augment_negs": augment_negs,
"pos_tags": payload["pos_tags"],
"neg_tags": payload["neg_tags"],
"augment_negs_include": payload.get("include", []) if augment_negs else [],
"augment_negs_exclude": payload.get("exclude", []) if augment_negs else [],
},
)
model_epoch = -1 + model_kwargs.get("epochs_to_run", 1)
if resume_model_id:
m.resume_model_id = resume_model_id
if model_kwargs.get("resume_training", False):
model_epoch += resume_model.epoch + 1
m.epoch = model_epoch
m.save()
return JsonResponse(
{
"status": "success",
"model_id": response_data["model_id"],
}
)
@api_view(["GET"])
@csrf_exempt
def get_model_status(request, model_id):
params = {"model_id": model_id}
r = requests.get(
settings.EMBEDDING_SERVER_ADDRESS + "/bgsplit_job_status", params=params
)
response_data = r.json()
if response_data["has_model"]:
# Index has been successfully created & uploaded -> persist
m = get_object_or_404(DNNModel, model_id=model_id)
m.checkpoint_path = response_data["checkpoint_path"]
m.save()
return JsonResponse(response_data)
@api_view(["POST"])
@csrf_exempt
def update_model_v2(request):
payload = json.loads(request.body)
# user = payload["user"]
old_model_name = payload["old_model_name"]
new_model_name = payload["new_model_name"]
models = get_list_or_404(DNNModel, name=old_model_name)
for m in models:
m.name = new_model_name
m.save()
return JsonResponse({"success": True})
@api_view(["POST"])
@csrf_exempt
def delete_model_v2(request):
payload = json.loads(request.body)
model_name = payload["model_name"]
# cluster_id = payload['cluster_id']
models = get_list_or_404(DNNModel, name=model_name)
for m in models:
# TODO(fpoms): delete model data stored on NFS?
# shutil.rmtree(os.path.join(m.checkpoint_path, '..'))
shutil.rmtree(m.output_directory, ignore_errors=True)
m.delete()
return JsonResponse({"success": True})
@api_view(["POST"])
@csrf_exempt
def run_model_inference(request, dataset_name, dataset=None):
payload = json.loads(request.body)
model_id = payload["model_id"]
cluster_id = payload["cluster_id"]
bucket_name = payload["bucket"]
index_id = payload["index_id"]
dataset = get_object_or_404(Dataset, name=dataset_name)
model_checkpoint_path = get_object_or_404(
DNNModel, model_id=model_id
).checkpoint_path
if model_checkpoint_path is None or len(model_checkpoint_path) == 0:
return JsonResponse(
{
"status": "failure",
"reason": f"Model {model_id} does not have a model checkpoint.",
},
status=400,
)
params = {
"bucket": bucket_name,
"model_id": model_id,
"checkpoint_path": model_checkpoint_path,
"cluster_id": cluster_id,
"index_id": index_id,
}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/start_bgsplit_inference_job",
json=params,
)
response_data = r.json()
return JsonResponse(
{
"status": "success",
"job_id": response_data["job_id"],
}
)
@api_view(["GET"])
@csrf_exempt
def get_model_inference_status(request, job_id):
params = {"job_id": job_id}
r = requests.get(
settings.EMBEDDING_SERVER_ADDRESS + "/bgsplit_inference_job_status",
params=params,
)
response_data = r.json()
if response_data["has_output"]:
model_id = response_data["model_id"]
# Index has been successfully created & uploaded -> persist
m = get_object_or_404(DNNModel, model_id=model_id)
m.output_directory = response_data["output_dir"]
m.save()
return JsonResponse(response_data)
@api_view(["POST"])
@csrf_exempt
def stop_model_inference(request, job_id):
params = {"job_id": job_id}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/stop_bgsplit_inference_job", json=params
)
response_data = r.json()
return JsonResponse(response_data, status=r.status_code)
#
# V2 ENDPOINTS
# TODO(mihirg): Make these faster
#
Tag = namedtuple("Tag", "category value") # type: NamedTuple[str, str]
Box = namedtuple(
"Box", "category value x1 y1 x2 y2"
) # type: NamedTuple[str, str, float, float, float, float]
PkType = int
@dataclass
class ResultSet:
type: str
ranking: List[PkType]
distances: List[float]
model: Optional[str]
# TODO(fpoms): this needs to be wrapped in a lock so that
# updates are atomic across concurrent requests
current_result_sets = ExpiringDict(
max_age_seconds=30 * 60,
max_len=50,
) # type: Dict[str, ResultSet]
def parse_tag_set_from_query_v2(s):
if isinstance(s, list):
parts = s
elif isinstance(s, str) and s:
parts = s.split(",")
else:
parts = []
ts = set()
for part in parts:
if not part:
continue
category, value_str = part.split(":")
ts.add(Tag(category, value_str))
return ts
def tag_sets_to_query(*tagsets):
merged = set().union(*tagsets)
if not merged:
return Q()
return Q(
annotation__in=Annotation.objects.filter(
functools.reduce(
operator.or_,
[Q(category__name=t.category, mode__name=t.value) for t in merged],
)
)
)
def serialize_tag_set_for_client_v2(ts):
return [{"category": t.category, "value": t.value} for t in sorted(list(ts))]
def serialize_boxes_for_client_v2(bs):
return [
{
"category": b.category,
"value": b.value,
"x1": b.x1,
"y1": b.y1,
"x2": b.x2,
"y2": b.y2,
}
for b in sorted(list(bs))
]
def get_tags_from_annotations_v2(annotations):
tags_by_pk = defaultdict(list)
annotations = annotations.filter(is_box=False)
ann_dicts = annotations.values("dataset_item__pk", "category__name", "mode__name")
for ann in ann_dicts:
pk = ann["dataset_item__pk"]
category = ann["category__name"]
mode = ann["mode__name"]
tags_by_pk[pk].append(Tag(category, mode))
return tags_by_pk
def get_boxes_from_annotations_v2(annotations):
boxes_by_pk = defaultdict(list)
annotations = annotations.filter(is_box=True)
ann_dicts = annotations.values(
"dataset_item__pk",
"category__name",
"mode__name",
"bbox_x1",
"bbox_y1",
"bbox_x2",
"bbox_y2",
)
for ann in ann_dicts:
pk = ann["dataset_item__pk"]
category = ann["category__name"]
mode = ann["mode__name"]
box = (ann["bbox_x1"], ann["bbox_y1"], ann["bbox_x2"], ann["bbox_y2"])
boxes_by_pk[pk].append(Box(category, mode, *box))
return boxes_by_pk
def filtered_images_v2(request, dataset, exclude_pks=None) -> List[PkType]:
filt_start = time.time()
if request.method == "POST":
payload = json.loads(request.body)
include_tags = parse_tag_set_from_query_v2(payload.get("include"))
exclude_tags = parse_tag_set_from_query_v2(payload.get("exclude"))
pks = [i for i in payload.get("subset", []) if i]
split = payload.get("split", "train")
offset_to_return = int(payload.get("offset", 0))
num_to_return = int(payload.get("num", -1))
else:
include_tags = parse_tag_set_from_query_v2(request.GET.get("include"))
exclude_tags = parse_tag_set_from_query_v2(request.GET.get("exclude"))
pks = [i for i in request.GET.get("subset", "").split(",") if i]
split = request.GET.get("split", "train")
offset_to_return = int(request.GET.get("offset", 0))
num_to_return = int(request.GET.get("num", -1))
end_to_return = None if num_to_return == -1 else offset_to_return + num_to_return
dataset_items = None
is_val = split == "val"
db_start = time.time()
# Get pks for dataset items of interest
if pks and exclude_pks:
# Get specific pks - excluded pks if requested
exclude_pks = set(exclude_pks)
pks = [pk for pk in pks if pk not in exclude_pks]
elif not pks:
# Otherwise get all dataset items - exclude pks
dataset_items = DatasetItem.objects.filter(dataset=dataset, is_val=is_val)
if exclude_pks:
dataset_items = dataset_items.exclude(pk__in=exclude_pks)
pks = dataset_items.values_list("pk", flat=True)
db_end = time.time()
result = None
db_tag_start = time.time()
if not include_tags and not exclude_tags:
# If no tags specified, just return retrieved pks
result = pks
else:
# Otherwise, filter using include and exclude tags
if dataset_items is None:
dataset_items = DatasetItem.objects.filter(pk__in=pks)
if include_tags:
dataset_items = dataset_items.filter(tag_sets_to_query(include_tags))
if exclude_tags:
dataset_items = dataset_items.exclude(tag_sets_to_query(exclude_tags))
result = dataset_items.values_list("pk", flat=True)
db_tag_end = time.time()
result = list(result[offset_to_return:end_to_return])
filt_end = time.time()
print(
f"filtered_images_v2: tot: {filt_end-filt_start}, "
f"db ({len(result)} items): {db_end-db_start}, db tag: {db_tag_end-db_tag_start}"
)
return result
def process_image_query_results_v2(request, dataset, query_response):
filtered_pks = filtered_images_v2(request, dataset)
# TODO(mihirg): Eliminate this database call by directly returning pks from backend
dataset_items = DatasetItem.objects.filter(pk__in=filtered_pks)
dataset_items_by_path = {di.path: di for di in dataset_items}
distances = []
ordered_pks = []
for r in query_response["results"]:
if r["label"] in dataset_items_by_path:
ordered_pks.append(dataset_items_by_path[r["label"]].pk)
distances.append(r["dist"])
return dict(
pks=ordered_pks,
distances=distances,
)
def create_result_set_v2(results, type, model=None):
pks = results["pks"]
distances = results["distances"]
result_set_id = str(uuid.uuid4())
current_result_sets[result_set_id] = ResultSet(
type=type, ranking=pks, distances=distances, model=model
)
return {
"id": result_set_id,
"num_results": len(pks),
"type": type,
}
@api_view(["GET"])
@csrf_exempt
def get_results_v2(request, dataset_name):
dataset = get_object_or_404(Dataset, name=dataset_name)
index_id = request.GET["index_id"]
result_set_id = request.GET["result_set_id"]
offset_to_return = int(request.GET.get("offset", 0))
num_to_return = int(request.GET.get("num", 500))
clustering_model = request.GET.get("clustering_model", None)
result_set = current_result_sets[result_set_id]
pks = result_set.ranking[offset_to_return : offset_to_return + num_to_return]
distances = result_set.distances[
offset_to_return : offset_to_return + num_to_return
]
dataset_items_by_pk = DatasetItem.objects.in_bulk(pks)
dataset_items = [dataset_items_by_pk[pk] for pk in pks] # preserve order
bucket_name = dataset.train_directory[len("gs://") :].split("/")[0]
path_template = "https://storage.googleapis.com/{:s}/".format(bucket_name) + "{:s}"
internal_identifiers = [di.identifier for di in dataset_items]
params = {
"index_id": index_id,
"identifiers": internal_identifiers,
}
if clustering_model:
params["model"] = clustering_model
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/perform_clustering",
json=params,
)
clustering_data = r.json()
dataset_item_paths = [
(di.path if di.path.find("http") != -1 else path_template.format(di.path))
for di in dataset_items
]
dataset_item_identifiers = [di.pk for di in dataset_items]
return JsonResponse(
{
"paths": dataset_item_paths,
"identifiers": dataset_item_identifiers,
"distances": distances,
"clustering": clustering_data["clustering"],
}
)
@api_view(["POST"])
@csrf_exempt
def keep_alive_v2(request):
requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/keep_alive",
)
return JsonResponse({"status": "success"})
@api_view(["POST"])
@csrf_exempt
def generate_embedding_v2(request):
payload = json.loads(request.body)
image_id = payload.get("image_id")
if image_id:
payload["identifier"] = DatasetItem.objects.get(pk=image_id).identifier
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/generate_embedding",
json=payload,
)
return JsonResponse(r.json())
@api_view(["POST"])
@csrf_exempt
def generate_text_embedding_v2(request):
payload = json.loads(request.body)
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/generate_text_embedding",
json=payload,
)
return JsonResponse(r.json())
@api_view(["POST"])
@csrf_exempt
def query_knn_v2(request, dataset_name):
payload = json.loads(request.body)
index_id = payload["index_id"]
embeddings = payload["embeddings"]
use_full_image = bool(payload.get("use_full_image", True))
use_dot_product = bool(payload.get("use_dot_product", False))
model = payload.get("model", "imagenet")
dataset = get_object_or_404(Dataset, name=dataset_name)
query_knn_start = time.time()
params = {
"index_id": index_id,
"embeddings": embeddings,
"use_full_image": use_full_image,
"use_dot_product": use_dot_product,
"model": model,
}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/query_knn_v2",
json=params,
)
response_data = r.json()
query_knn_end = time.time()
logger.debug("query_knn_v2 time: {:f}".format(query_knn_end - query_knn_start))
results = process_image_query_results_v2(
request,
dataset,
response_data,
)
return JsonResponse(create_result_set_v2(results, "knn", model=model))
@api_view(["GET"])
@csrf_exempt
def train_svm_v2(request, dataset_name):
index_id = request.GET["index_id"]
model = request.GET.get("model", "imagenet")
pos_tags = parse_tag_set_from_query_v2(request.GET["pos_tags"])
neg_tags = parse_tag_set_from_query_v2(request.GET.get("neg_tags"))
augment_negs = bool(
distutils.util.strtobool(request.GET.get("augment_negs", "false"))
)
dataset = get_object_or_404(Dataset, name=dataset_name)
pos_dataset_items = DatasetItem.objects.filter(
tag_sets_to_query(pos_tags),
dataset=dataset,
is_val=False,
)
pos_dataset_item_pks = list(pos_dataset_items.values_list("pk", flat=True))
if neg_tags:
neg_dataset_items = DatasetItem.objects.filter(
tag_sets_to_query(neg_tags),
dataset=dataset,
is_val=False,
).difference(pos_dataset_items)
neg_dataset_item_pks = list(neg_dataset_items.values_list("pk", flat=True))
else:
neg_dataset_item_pks = []
# Augment with randomly sampled negatives if requested
num_extra_negs = settings.SVM_NUM_NEGS_MULTIPLIER * len(pos_dataset_item_pks) - len(
neg_dataset_item_pks
)
if augment_negs and num_extra_negs > 0:
# Uses "include" and "exclude" category sets from GET request
all_eligible_pks = filtered_images_v2(
request, dataset, exclude_pks=pos_dataset_item_pks + neg_dataset_item_pks
)
sampled_pks = random.sample(
all_eligible_pks, min(len(all_eligible_pks), num_extra_negs)
)
neg_dataset_item_pks.extend(sampled_pks)
pos_dataset_item_internal_identifiers = list(
DatasetItem.objects.filter(pk__in=pos_dataset_item_pks).values_list(
"identifier", flat=True
)
)
neg_dataset_item_internal_identifiers = list(
DatasetItem.objects.filter(pk__in=neg_dataset_item_pks).values_list(
"identifier", flat=True
)
)
params = {
"index_id": index_id,
"pos_identifiers": pos_dataset_item_internal_identifiers,
"neg_identifiers": neg_dataset_item_internal_identifiers,
"model": model,
}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/train_svm_v2",
json=params,
)
return JsonResponse(r.json()) # {"svm_vector": base64-encoded string}
@api_view(["POST"])
@csrf_exempt
def query_svm_v2(request, dataset_name):
payload = json.loads(request.body)
index_id = payload["index_id"]
svm_vector = payload["svm_vector"]
score_min = float(payload.get("score_min", 0.0))
score_max = float(payload.get("score_max", 1.0))
model = payload.get("model", "imagenet")
dataset = get_object_or_404(Dataset, name=dataset_name)
params = {
"index_id": index_id,
"svm_vector": svm_vector,
"score_min": score_min,
"score_max": score_max,
"model": model,
}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/query_svm_v2",
json=params,
)
response_data = r.json()
# TODO(mihirg, jeremye): Consider some smarter pagination/filtering scheme to avoid
# running a separate query over the index every single time the user adjusts score
# thresholds
results = process_image_query_results_v2(
request,
dataset,
response_data,
)
return JsonResponse(create_result_set_v2(results, "svm"))
@api_view(["POST"])
@csrf_exempt
def query_ranking_v2(request, dataset_name):
payload = json.loads(request.body)
index_id = payload["index_id"]
score_min = float(payload.get("score_min", 0.0))
score_max = float(payload.get("score_max", 1.0))
model = payload["model"]
dataset = get_object_or_404(Dataset, name=dataset_name)
params = {
"index_id": index_id,
"score_min": score_min,
"score_max": score_max,
"model": model,
}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/query_ranking_v2",
json=params,
)
response_data = r.json()
# TODO(mihirg, jeremye): Consider some smarter pagination/filtering scheme to avoid
# running a separate query over the index every single time the user adjusts score
# thresholds
results = process_image_query_results_v2(
request,
dataset,
response_data,
)
return JsonResponse(create_result_set_v2(results, "ranking", model=model))
@api_view(["POST"])
@csrf_exempt
def query_images_v2(request, dataset_name):
query_start = time.time()
dataset = get_object_or_404(Dataset, name=dataset_name)
payload = json.loads(request.body)
order = payload.get("order", "id")
filter_start = time.time()
result_pks = filtered_images_v2(request, dataset)
filter_end = time.time()
if order == "random":
random.shuffle(result_pks)
elif order == "id":
result_pks.sort()
results = {"pks": result_pks, "distances": [-1 for _ in result_pks]}
resp = JsonResponse(create_result_set_v2(results, "query"))
query_end = time.time()
print(
f"query_images_v2: tot: {query_end-query_start}, "
f"filter: {filter_end-filter_start}"
)
return resp
#
# ACTIVE VALIDATION
#
VAL_NEGATIVE_TYPE = "model_val_negative"
def get_val_examples_v2(dataset, model_id):
# Get positive and negative categories
model = get_object_or_404(DNNModel, model_id=model_id)
pos_tags = parse_tag_set_from_query_v2(model.category_spec["pos_tags"])
neg_tags = parse_tag_set_from_query_v2(model.category_spec["neg_tags"])
augment_negs = model.category_spec.get("augment_negs", False)
augment_negs_include = (
parse_tag_set_from_query_v2(model.category_spec.get("augment_negs_include", []))
if augment_negs
else set()
)
# Limit to validation set
eligible_dataset_items = DatasetItem.objects.filter(
dataset=dataset,
is_val=True,
)
# Get positives and negatives matching these categories
categories = Category.objects.filter(
tag_sets_to_query(pos_tags, neg_tags, augment_negs_include)
)
annotations = Annotation.objects.filter(
dataset_item__in=eligible_dataset_items,
category__in=categories,
)
tags_by_pk = get_tags_from_annotations_v2(annotations)
pos_dataset_item_pks = []
neg_dataset_item_pks = []
for pk, tags in tags_by_pk.items():
if any(t in pos_tags for t in tags):
pos_dataset_item_pks.append(pk)
elif any(t in neg_tags or t in augment_negs_include for t in tags):
neg_dataset_item_pks.append(pk)
# Get extra negatives
if augment_negs:
annotations = Annotation.objects.filter(
dataset_item__in=eligible_dataset_items,
label_category=model_id,
label_type=VAL_NEGATIVE_TYPE,
)
neg_dataset_item_pks.extend(ann.dataset_item.pk for ann in annotations)
return pos_dataset_item_pks, neg_dataset_item_pks
@api_view(["POST"])
def query_metrics_v2(request, dataset_name):
dataset = get_object_or_404(Dataset, name=dataset_name)
payload = json.loads(request.body)
model_id = payload["model"]
index_id = payload["index_id"]
internal_identifiers_to_weights = payload["weights"] # type: Dict[str, int]
pos_dataset_item_pks, neg_dataset_item_pks = get_val_examples_v2(dataset, model_id)
# Construct identifiers, labels, and weights
dataset_items_by_pk = DatasetItem.objects.in_bulk(
pos_dataset_item_pks + neg_dataset_item_pks
)
identifiers = []
labels = []
weights = []
for pk, label in itertools.chain(
((pk, True) for pk in pos_dataset_item_pks),
((pk, False) for pk in neg_dataset_item_pks),
):
di = dataset_items_by_pk[pk]
identifier = di.identifier
weight = internal_identifiers_to_weights.get(identifier)
if weight is None:
continue
identifiers.append(identifier)
labels.append(label)
weights.append(weight)
# TODO(mihirg): Parse false positives and false negatives
params = {
"index_id": index_id,
"model": model_id,
"identifiers": identifiers,
"labels": labels,
"weights": weights,
}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/query_metrics",
json=params,
)
response_data = r.json()
return JsonResponse(response_data)
@api_view(["POST"])
def query_active_validation_v2(request, dataset_name):
dataset = get_object_or_404(Dataset, name=dataset_name)
payload = json.loads(request.body)
model_id = payload["model"]
index_id = payload["index_id"]
current_f1 = payload.get("current_f1")
if current_f1 is None:
current_f1 = 0.5
pos_dataset_item_pks, neg_dataset_item_pks = get_val_examples_v2(dataset, model_id)
# Construct paths, identifiers, and labels
dataset_items_by_pk = DatasetItem.objects.in_bulk(
pos_dataset_item_pks + neg_dataset_item_pks
)
identifiers = []
labels = []
for pk, label in itertools.chain(
((pk, True) for pk in pos_dataset_item_pks),
((pk, False) for pk in neg_dataset_item_pks),
):
di = dataset_items_by_pk[pk]
identifiers.append(di.identifier)
labels.append(label)
params = {
"index_id": index_id,
"model": model_id,
"identifiers": identifiers,
"labels": labels,
"current_f1": current_f1,
}
r = requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/query_active_validation",
json=params,
)
response_data = r.json()
if response_data["identifiers"]:
pks_and_paths = list(
DatasetItem.objects.filter(
dataset=dataset,
identifier__in=response_data["identifiers"],
is_val=True,
).values_list("pk", "path")
)
random.shuffle(pks_and_paths)
pks, paths = zip(*pks_and_paths)
else:
pks, paths = [], []
bucket_name = dataset.val_directory[len("gs://") :].split("/")[0]
path_template = "https://storage.googleapis.com/{:s}/".format(bucket_name) + "{:s}"
paths = [path_template.format(p) for p in paths]
return JsonResponse(
{
"paths": paths,
"identifiers": pks,
"weights": response_data["weights"],
}
)
@api_view(["POST"])
def add_val_annotations_v2(request):
payload = json.loads(request.body)
annotations = payload["annotations"]
user_email = payload["user"]
model = payload["model"]
anns = []
cat_modes = defaultdict(int)
dataset = None
for ann_payload in annotations:
image_pk = ann_payload["identifier"]
is_other_negative = ann_payload.get("is_other_negative", False)
mode_str = "NEGATIVE" if is_other_negative else ann_payload["mode"]
category_name = (
"active:" + model if is_other_negative else ann_payload["category"]
)
user, _ = User.objects.get_or_create(email=user_email)
category, _ = Category.objects.get_or_create(name=category_name)
mode, _ = Mode.objects.get_or_create(name=mode_str)
di = DatasetItem.objects.get(pk=image_pk)
dataset = di.dataset
assert di.is_val
ann = Annotation(
dataset_item=di,
user=user,
category=category,
mode=mode,
misc_data={"created_by": "active_val"},
)
cat_modes[(category, mode)] += 1
anns.append(ann)
Annotation.objects.bulk_create(anns)
for (cat, mode), c in cat_modes.items():
category_count, _ = CategoryCount.objects.get_or_create(
dataset=dataset, category=cat, mode=mode
)
category_count.count += c
category_count.save()
return JsonResponse({"created": len(anns)})
# DATASET INFO
@api_view(["GET"])
@csrf_exempt
def get_datasets_v2(request):
datasets = Dataset.objects.filter(hidden=False)
dataset_names = list(datasets.values_list("name", flat=True))
return JsonResponse({"dataset_names": dataset_names})
@api_view(["GET"])
@csrf_exempt
def get_dataset_info_v2(request, dataset_name):
dataset = get_object_or_404(Dataset, name=dataset_name)
num_train = dataset.datasetitem_set.filter(is_val=False).count()
num_val = dataset.datasetitem_set.filter(is_val=True).count()
return JsonResponse(
{
"index_id": dataset.index_id,
"num_train": num_train,
"num_val": num_val,
}
)
@api_view(["GET"])
@csrf_exempt
def get_models_v2(request, dataset_name):
dataset = get_object_or_404(Dataset, name=dataset_name)
model_objs = DNNModel.objects.filter(
dataset=dataset,
checkpoint_path__isnull=False,
).order_by("-last_updated")
model_names = set()
latest = {}
with_output = {}
for model in model_objs:
model_names.add(model.name)
if model.name not in latest:
latest[model.name] = model
if model.output_directory and model.name not in with_output:
with_output[model.name] = model
models = [
{
"name": model_name,
"latest": model_info(latest[model_name]),
"with_output": model_info(with_output.get(model_name)),
}
for model_name in model_names
]
return JsonResponse({"models": models})
def model_info(model):
if model is None:
return None
pos_tags = parse_tag_set_from_query_v2(model.category_spec.get("pos_tags", []))
neg_tags = parse_tag_set_from_query_v2(model.category_spec.get("neg_tags", []))
augment_negs_include = parse_tag_set_from_query_v2(
model.category_spec.get("augment_negs_include", [])
)
return {
"model_id": model.model_id,
"timestamp": model.last_updated,
"has_checkpoint": model.checkpoint_path is not None,
"has_output": model.output_directory is not None,
"pos_tags": serialize_tag_set_for_client_v2(pos_tags),
"neg_tags": serialize_tag_set_for_client_v2(neg_tags | augment_negs_include),
"augment_negs": model.category_spec.get("augment_negs", False),
"epoch": model.epoch,
}
@api_view(["POST"])
@csrf_exempt
def create_dataset_v2(request):
payload = json.loads(request.body)
name = payload["dataset"]
train_directory = payload["train_path"]
val_directory = payload["val_path"]
index_id = payload["index_id"]
assert all(d.startswith("gs://") for d in (train_directory, val_directory))
# Download index on index server
params = {"index_id": index_id}
requests.post(
settings.EMBEDDING_SERVER_ADDRESS + "/download_index",
json=params,
)
client = storage.Client()
all_blobs = []
for d, is_val in ((train_directory, False), (val_directory, True)):
split_dir = d[len("gs://") :].split("/")
bucket_name = split_dir[0]
bucket_path = "/".join(split_dir[1:])
all_blobs.extend(
(blob, is_val)
for blob in client.list_blobs(bucket_name, prefix=bucket_path)
)
dataset = Dataset(
name=name,
train_directory=train_directory,
val_directory=val_directory,
index_id=index_id,
)
dataset.save()
# Create all the DatasetItems for this dataset
items = [
DatasetItem(
dataset=dataset,
identifier=os.path.splitext(os.path.basename(blob.name))[0],
path=blob.name,
is_val=is_val,
)
for blob, is_val in all_blobs
if (
blob.name.endswith(".jpg")
or blob.name.endswith(".jpeg")
or blob.name.endswith(".png")
)
]
DatasetItem.objects.bulk_create(items, batch_size=10000)
return JsonResponse({"status": "success"})
@api_view(["POST"])
@csrf_exempt
def get_annotations_v2(request):
payload = json.loads(request.body)
image_pks = [i for i in payload["identifiers"] if i]
if not image_pks:
return JsonResponse({})
annotations = Annotation.objects.filter(
dataset_item__in=DatasetItem.objects.filter(pk__in=image_pks),
)
tags_by_pk = get_tags_from_annotations_v2(annotations)
boxes_by_pk = get_boxes_from_annotations_v2(annotations)
annotations_by_pk = defaultdict(lambda: {"tags": [], "boxes": []})
for pk, tags in tags_by_pk.items():
annotations_by_pk[pk]["tags"] = serialize_tag_set_for_client_v2(tags)
for pk, boxes in boxes_by_pk.items():
annotations_by_pk[pk]["boxes"] = serialize_boxes_for_client_v2(boxes)
return JsonResponse(annotations_by_pk)
@api_view(["POST"])
@csrf_exempt
def add_annotations_v2(request):
payload = json.loads(request.body)
image_pks = payload["identifiers"]
images = DatasetItem.objects.filter(pk__in=image_pks)
num_created = bulk_add_single_tag_annotations_v2(payload, images)
return JsonResponse({"created": num_created})
@api_view(["POST"])
@csrf_exempt
def add_annotations_multi_v2(request):
payload = json.loads(request.body)
num_created = bulk_add_multi_annotations_v2(payload)
return JsonResponse({"created": num_created})
@api_view(["POST"])
@csrf_exempt
def add_annotations_by_internal_identifiers_v2(request, dataset_name):
dataset = get_object_or_404(Dataset, name=dataset_name)
payload = json.loads(request.body)
image_identifiers = payload["identifiers"]
images = DatasetItem.objects.filter(
dataset=dataset, identifier__in=image_identifiers
)
num_created = bulk_add_single_tag_annotations_v2(payload, images)
return JsonResponse({"created": num_created})
@api_view(["POST"])
@csrf_exempt
def add_annotations_to_result_set_v2(request):
payload = json.loads(request.body)
result_set_id = payload["result_set_id"]
lower_bound = float(payload["from"])
upper_bound = float(payload["to"])
result_set = current_result_sets[result_set_id]
result_ranking = result_set.ranking
# e.g., lower_bound=0.0, upper_bound=0.5 -> second half of the result set
start_index = math.ceil(len(result_ranking) * (1.0 - upper_bound))
end_index = math.floor(len(result_ranking) * (1.0 - lower_bound))
image_pks = result_ranking[start_index:end_index]
images = DatasetItem.objects.filter(pk__in=image_pks)
num_created = bulk_add_single_tag_annotations_v2(payload, images)
return JsonResponse({"created": num_created})
def bulk_add_single_tag_annotations_v2(payload, images):
'''Adds annotations for a single tag to many dataset items'''
if not images:
return 0
user_email = payload["user"]
category_name = payload["category"]
mode_name = payload["mode"]
created_by = payload.get("created_by",
"tag" if len(images) == 1 else "tag-bulk")
dataset = None
if len(images) > 0:
dataset = images[0].dataset
user, _ = User.objects.get_or_create(email=user_email)
category, _ = Category.objects.get_or_create(name=category_name)
mode, _ = Mode.objects.get_or_create(name=mode_name)
Annotation.objects.filter(
dataset_item__in=images, category=category, is_box=False).delete()
# TODO: Add an actual endpoint to delete annotations (probably by pk); don't rely
# on this hacky "TOMBSTONE" string
annotations = [
Annotation(
dataset_item=di,
user=user,
category=category,
mode=mode,
is_box=False,
misc_data={"created_by": created_by},
)
for di in images
]
bulk_add_annotations_v2(dataset, annotations)
return len(annotations)
def bulk_add_multi_annotations_v2(payload : Dict):
'''Adds multiple annotations for the same dataset and user to the database
at once'''
dataset_name = payload["dataset"]
dataset = get_object_or_404(Dataset, name=dataset_name)
user_email = payload["user"]
user, _ = User.objects.get_or_create(email=user_email)
created_by = payload.get("created_by",
"tag" if len(payload["annotations"]) == 1 else
"tag-bulk")
# Get pks
idents = [ann['identifier'] for ann in payload["annotations"]
if 'identifier' in ann]
di_pks = list(DatasetItem.objects.filter(
dataset=dataset, identifier__in=idents
).values_list("pk", "identifier"))
ident_to_pk = {ident: pk for pk, ident in di_pks}
cats = {}
modes = {}
to_delete = defaultdict(set)
annotations = []
for ann in payload["annotations"]:
db_ann = Annotation()
category_name = ann["category"]
mode_name = ann["mode"]
if category_name not in cats:
cats[category_name] = Category.objects.get_or_create(
name=category_name)[0]
if mode_name not in modes:
modes[mode_name] = Mode.objects.get_or_create(
name=mode_name)[0]
if "identifier" in ann:
pk = ident_to_pk[ann["identifier"]]
else:
pk = ann["pk"]
db_ann.dataset_item_id = pk
db_ann.user = user
db_ann.category = cats[category_name]
db_ann.mode = modes[mode_name]
db_ann.is_box = ann.get("is_box", False)
if db_ann.is_box:
db_ann.bbox_x1 = ann["x1"]
db_ann.bbox_y1 = ann["y1"]
db_ann.bbox_x2 = ann["x2"]
db_ann.bbox_y2 = ann["y2"]
else:
to_delete[db_ann.category].add(pk)
db_ann.misc_data={"created_by": created_by}
annotations.append(db_ann)
for cat, pks in to_delete.items():
# Delete per-frame annotations for the category if they exist since
# we should only have on mode per image
Annotation.objects.filter(
category=cat, dataset_item_id__in=pks, is_box=False).delete()
# TODO: Add an actual endpoint to delete annotations (probably by pk); don't rely
# on this hacky "TOMBSTONE" string
bulk_add_annotations_v2(dataset, annotations)
return len(annotations)
def bulk_add_annotations_v2(dataset, annotations):
'''Handles book keeping for adding many annotations at once'''
Annotation.objects.bulk_create(annotations)
counts = defaultdict(int)
for ann in annotations:
counts[(ann.category, ann.mode)] += 1
for (cat, mode), count in counts.items():
category_count, _ = CategoryCount.objects.get_or_create(
dataset=dataset,
category=cat,
mode=mode
)
category_count.count += count
category_count.save()
@api_view(["POST"])
@csrf_exempt
def delete_category_v2(request):
payload = json.loads(request.body)
category = payload["category"]
category = Category.objects.get(name=category)
category.delete()
return JsonResponse({"status": "success"})
@api_view(["POST"])
@csrf_exempt
def update_category_v2(request):
payload = json.loads(request.body)
old_category_name = payload["oldCategory"]
new_category_name = payload["newCategory"]
category = Category.objects.get(name=old_category_name)
category.name = new_category_name
category.save()
return JsonResponse({"status": "success"})
@api_view(["GET"])
@csrf_exempt
def get_category_counts_v2(request, dataset_name):
dataset = get_object_or_404(Dataset, name=dataset_name)
counts = CategoryCount.objects.filter(dataset=dataset).values(
"category__name", "mode__name", "count"
)
n_labeled = defaultdict(dict)
for c in counts:
category = c["category__name"]
mode = c["mode__name"]
count = c["count"]
n_labeled[category][mode] = count
return JsonResponse(n_labeled)
```
#### File: containers/bgsplit_mapper/handler.py
```python
import numpy as np
import aiohttp
import asyncio
import os.path
from pathlib import Path
import torch
import torch.nn.functional as F
from torchvision import transforms, utils, io
from typing import Dict, List, Optional, Tuple, Union, Any
from enum import Enum
from knn import utils
from knn.mappers import Mapper
from knn.utils import JSONType
import config
from model import Model
class BGSplittingMapper(Mapper):
class ReturnType(Enum):
SAVE = 0
SERIALIZE = 1
def initialize_container(self):
# Create connection pool
self.session = aiohttp.ClientSession()
self.use_cuda = False
async def initialize_job(self, job_args):
return_type = job_args.get("return_type", "serialize")
if return_type == "save":
job_args["return_type"] = BGSplittingMapper.ReturnType.SAVE
elif return_type == "serialize":
job_args["return_type"] = BGSplittingMapper.ReturnType.SERIALIZE
else:
raise ValueError(f"Unknown return type: {return_type}")
# Get checkpoint data
if job_args["checkpoint_path"] == 'TEST':
model = Model(num_main_classes=2, num_aux_classes=1)
else:
map_location = torch.device('cuda') if self.use_cuda else torch.device('cpu')
checkpoint_state = torch.load(job_args["checkpoint_path"],
map_location=map_location)
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in checkpoint_state['state_dict'].items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
if 'model_kwargs' in checkpoint_state:
kwargs = checkpoint_state['model_kwargs']
num_aux_classes = kwargs['num_aux_classes']
else:
num_aux_classes = 1
# Create model
model = Model(num_main_classes=2, num_aux_classes=num_aux_classes)
# Load model weights
model.load_state_dict(new_state_dict)
model.eval()
if self.use_cuda:
model = model.cuda()
job_args["model"] = model
job_args["transform"] = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ConvertImageDtype(torch.float32),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
])
job_args["n_chunks_saved"] = 0
return job_args
@utils.log_exception_from_coro_but_return_none
async def process_chunk(
self, chunk: List[JSONType], job_id: str, job_args: Any, request_id: str
) -> Tuple[np.ndarray, np.ndarray]:
image_paths = [c["path"] for c in chunk]
# Download images
if "http" not in image_paths[0]:
image_bucket = job_args["input_bucket"]
image_paths = [
os.path.join(config.GCS_URL_PREFIX, image_bucket, image_path)
for image_path in image_paths]
transform = job_args["transform"]
async def download_transform(image_path):
return await self.transform_image(
await self.download_image(image_path),
transform=transform)
with self.profiler(request_id, "download_time"):
input_images = await asyncio.gather(
*[
download_transform(image_path)
for image_path in image_paths
])
# Run inference
model = job_args["model"]
with self.profiler(request_id, "inference_time"):
image_batch = torch.stack(input_images)
if self.use_cuda:
image_batch = image_batch.cuda()
embeddings = model.forward_backbone(image_batch)
scores = F.softmax(model.main_head(embeddings), dim=1)[:, 1]
return (embeddings.detach().cpu().numpy(),
scores.detach().cpu().numpy())
async def download_image(
self, image_path: str, num_retries: int = config.DOWNLOAD_NUM_RETRIES
) -> bytes:
for i in range(num_retries + 1):
try:
async with self.session.get(image_path) as response:
assert response.status == 200
return await response.read()
except Exception:
if i < num_retries:
await asyncio.sleep(2 ** i)
else:
raise
assert False # unreachable
async def transform_image(
self, image_bytes: bytes, transform,
) -> torch.Tensor:
data = torch.tensor(
list(image_bytes),
dtype=torch.uint8)
image = io.decode_image(data, mode=io.image.ImageReadMode.RGB)
return transform(image)
async def postprocess_chunk(
self,
inputs,
outputs: Tuple[np.ndarray, np.ndarray],
job_id,
job_args,
request_id,
) -> Union[Tuple[str, List[Optional[int]]],
Tuple[None, List[Optional[str]]]]:
if job_args["return_type"] == BGSplittingMapper.ReturnType.SAVE:
with self.profiler(request_id, "save_time"):
data_path_tmpl = config.DATA_FILE_TMPL.format(
job_id, self.worker_id, job_args["n_chunks_saved"]
)
job_args["n_chunks_saved"] += 1
Path(data_path_tmpl).parent.mkdir(parents=True, exist_ok=True)
data = {'ids': np.array([inp['id'] for inp in inputs], dtype=np.int),
'embeddings': outputs[0],
'scores': outputs[1]}
np.save(data_path_tmpl.format(None), data)
return data_path_tmpl.format(None), [
len(output) if output is not None else None for output in outputs[0]
]
else:
with self.profiler(request_id, "reduce_time"):
reduce_fn = config.REDUCTIONS[job_args.get("reduction")]
reduced_outputs = [
reduce_fn(output) if output is not None else None
for output in outputs
]
with self.profiler(request_id, "serialize_time"):
serialized_outputs = [
utils.numpy_to_base64(output) if output is not None else None
for output in reduced_outputs
]
return None, serialized_outputs
async def process_element(
self,
input: JSONType,
job_id: str,
job_args: Any,
request_id: str,
element_index: int,
) -> Any:
pass
app = BGSplittingMapper().server
```
#### File: containers/bgsplit_trainer/util.py
```python
import requests
import time
import config
class EMA():
'''Exponential moving average'''
def __init__(self, init: float):
self.value = init
self.n_samples = 0
def __iadd__(self, other: float):
self.n_samples += 1
self.value = (
self.value
- (self.value / self.n_samples)
+ other / self.n_samples)
return self
def download(
url: str,
num_retries: int = config.DOWNLOAD_NUM_RETRIES) -> bytes:
for i in range(num_retries + 1):
try:
response = requests.get(url)
if response.status_code != 200:
print('Download failed:',
url, response.status_code, response.reason)
assert response.status_code == 200
return response.content
except requests.exceptions.RequestException as e:
print(f'Download excepted, retrying ({i}):',
url)
if i < num_retries:
time.sleep(2 ** i)
else:
raise e
assert False # unreachable
```
#### File: containers/clip-text-inference/handler.py
```python
import torch
import clip
from typing import List
from knn import utils
from knn.mappers import Mapper
import config
torch.set_grad_enabled(False)
torch.set_num_threads(1)
class TextEmbeddingMapper(Mapper):
def initialize_container(self):
self.model, _ = clip.load(config.CLIP_MODEL, device="cpu")
@utils.log_exception_from_coro_but_return_none
async def process_chunk(
self, chunk: List[str], job_id, job_args, request_id
) -> List[str]:
with torch.no_grad():
text = clip.tokenize(chunk)
text_features = self.model.encode_text(text)
text_features /= text_features.norm(dim=-1, keepdim=True)
return list(map(utils.numpy_to_base64, text_features.numpy()))
async def process_element(self, *args, **kwargs):
raise NotImplementedError()
app = TextEmbeddingMapper().server
```
#### File: containers/mapper/inference.py
```python
import io
import numpy as np
from PIL import Image, ImageEnhance
import torch
from typing import Any, Dict, List
torch.set_grad_enabled(False)
torch.set_num_threads(1)
# AUGMENTATIONS
def brightness(image, factor):
br_enhancer = ImageEnhance.Brightness(image)
return br_enhancer.enhance(factor)
def contrast(image, factor):
cn_enhancer = ImageEnhance.Contrast(image)
return cn_enhancer.enhance(factor)
def flip(image):
return image.transpose(Image.FLIP_LEFT_RIGHT)
def grayscale(image):
return image.convert("L")
def resize(image, params):
return image.resize(((int)(params * image.size[0]), (int)(params * image.size[1])))
def rotate(image, angle):
return image.rotate(angle)
# INFERENCE
def run(
image_bytes: bytes,
image_patch: List[float],
augmentations: Dict[str, Any],
input_format: str,
pixel_mean: torch.Tensor,
pixel_std: torch.Tensor,
model: torch.nn.Module,
) -> Dict[str, torch.Tensor]:
with io.BytesIO(image_bytes) as image_buffer:
image = Image.open(image_buffer)
# Preprocess
image = image.convert("RGB")
# Crop
if image_patch:
x1f, y1f, x2f, y2f = image_patch
w, h = image.size
image = image.crop(
((int)(x1f * w), (int)(y1f * h), (int)(x2f * w), (int)(y2f * h))
)
# Apply transformations (augmentations is a dict)
if "flip" in augmentations:
image = flip(image)
if "gray" in augmentations:
image = grayscale(image)
if "brightness" in augmentations:
image = brightness(image, augmentations["brightness"])
if "contrast" in augmentations:
image = contrast(image, augmentations["contrast"])
if "resize" in augmentations:
image = resize(image, augmentations["resize"])
if "rotate" in augmentations:
image = rotate(image, augmentations["rotate"])
image = torch.as_tensor(np.asarray(image), dtype=torch.float32) # -> tensor
image = image.permute(2, 0, 1) # HWC -> CHW
if input_format == "BGR":
image = torch.flip(image, dims=(0,)) # RGB -> BGR
image = image.contiguous()
image = (image - pixel_mean) / pixel_std
# Input: NCHW
# Output: {'res4': NCHW, 'res5': NCHW} where N = 1
output_dict = model(image.unsqueeze(dim=0))
return {k: v.detach() for k, v in output_dict.items()}
```
#### File: containers/trainer/main.py
```python
import concurrent.futures
from dataclasses import dataclass, field
import functools
import signal
import threading
import time
import traceback
import backoff
import numpy as np
import requests
from flask import Flask, request, abort
from typing import Any, Dict, List, Iterable, Optional, Tuple
from interactive_index import InteractiveIndex
import config
# Step 1: Load saved embeddings into memory
def load(paths: Iterable[str], sample_rate: float) -> Tuple[np.ndarray, int]:
all_embeddings = []
num_paths_read = 0
load_func = functools.partial(load_one, sample_rate=sample_rate)
with concurrent.futures.ThreadPoolExecutor() as pool:
for loaded_embeddings in pool.map(load_func, paths):
if loaded_embeddings:
all_embeddings.extend(loaded_embeddings)
num_paths_read += 1
return np.concatenate(all_embeddings), num_paths_read
def load_one(path: str, sample_rate: float) -> Optional[List[np.ndarray]]:
try:
# Each file is a np.save'd Dict[int, np.ndarray] where each value is N x D
embedding_dict = np.load(
path, allow_pickle=True
).item() # type: Dict[int, np.ndarray]
except Exception as e:
print(f"Error in load (path = {path}), but ignoring. {type(e)}: {e}")
traceback.print_exc()
return None
loaded_embeddings = []
for embeddings in embedding_dict.values():
if sample_rate:
n = embeddings.shape[0]
n_sample = np.random.binomial(n, sample_rate)
if n_sample == 0:
continue
elif n_sample < n:
sample_inds = np.random.choice(n, n_sample)
embeddings = embeddings[sample_inds]
loaded_embeddings.append(embeddings)
return loaded_embeddings
# Step 2: Train index
def train(embeddings: np.ndarray, index_kwargs: Dict[str, Any], index_dir: str):
index_kwargs.update(
tempdir=index_dir,
use_gpu=config.INDEX_USE_GPU,
train_on_gpu=config.INDEX_TRAIN_ON_GPU,
)
index = InteractiveIndex(**index_kwargs)
index.train(embeddings)
# Step 3: Call webhook to indicate completion
@backoff.on_exception(backoff.expo, requests.exceptions.RequestException)
def notify(url: str, payload: Dict[str, str]):
r = requests.put(url, json=payload)
r.raise_for_status()
@dataclass
class TrainingJob:
path_tmpls: List[
str
] # Format strings (param: reduction) for paths to saved embedding dictionaries
index_kwargs: Dict[str, Any] # Index configuration
index_id: str # Index build job identifier
index_name: str # Unique index identifier within job
url: str # Webhook to PUT to after completion
sample_rate: float # Fraction of saved embeddings to randomly sample for training
reduction: Optional[str] # Type of embeddings we should use (e.g., average pooled)
_done: bool = False
_done_lock: threading.Lock = field(default_factory=threading.Lock)
def start(self):
thread = threading.Thread(target=self.run, daemon=True)
thread.start()
def finish(self, success: bool, **kwargs):
with self._done_lock:
if self._done:
return
self._done = True
notify(
self.url,
{
"index_id": self.index_id,
"index_name": self.index_name,
"success": success,
**kwargs,
},
)
@property
def done(self):
with self._done_lock:
return self._done
def run(self):
# TODO(mihirg): Figure out how to handle errors like OOMs and CUDA
# errors maybe start a subprocess?
try:
start_time = time.perf_counter()
paths = (path_tmpl.format(self.reduction)
for path_tmpl in self.path_tmpls)
embeddings, num_paths_read = load(paths, self.sample_rate)
train_start_time = time.perf_counter()
index_dir = config.INDEX_DIR_TMPL.format(
self.index_id, self.index_name)
train(embeddings, self.index_kwargs, index_dir)
end_time = time.perf_counter()
except Exception as e:
traceback.print_exc()
self.finish(False, reason=str(e))
else:
self.finish(
True,
index_dir=index_dir,
num_paths_read=num_paths_read,
profiling=dict(
load_time=train_start_time - start_time,
train_time=end_time - train_start_time,
),
)
current_job: Optional[TrainingJob] = None
app = Flask(__name__)
@app.route("/", methods=["POST"])
def start():
global current_job
if current_job and not current_job.done:
abort(503, description="Busy")
payload = request.json or {}
current_job = TrainingJob(**payload)
current_job.start()
return "Started"
def gracefully_shutdown(signum, frame):
if current_job:
current_job.finish(False, reason="Preempted")
signal.signal(signal.SIGTERM, gracefully_shutdown)
```
#### File: interactive_index/interactive_index/config.py
```python
import copy
import math
import yaml
from interactive_index.utils import *
CONFIG_DEFAULTS = {
'd': 1_024,
'n_centroids': 32, # only used with IVF
'n_probes': 16, # only used with IVF
'vectors_per_index': 10_000,
'tempdir': '/tmp/',
'use_gpu': False,
'train_on_gpu': False,
'use_float16': False,
'use_float16_quantizer': False,
'use_precomputed_codes': False,
'metric': 'L2',
# Transformation
'transform': None, # {'PCA', 'PCAR', 'ITQ', 'OPQ'}
'transform_args': None,
# Search
'search': None, # {'HNSW'}
'search_args': None,
# Encoding
'encoding': 'Flat', # {'SQ', 'PQ', 'LSH'}
'encoding_args': None,
# Misc
'multi_id': False,
'direct_map': 'NoMap', # {'NoMap', 'Array', 'Hashtable'}
}
def read_config(config_fpath: str) -> dict:
"""
Loads a config dictionary from the specified yaml file.
Args:
config_fpath: The file path of the config yaml file.
Returns:
The dictionary of the configuration options, with default options if
any were missing.
"""
cfg = yaml.load(open(config_fpath, 'r'), Loader=yaml.FullLoader)
# Set defaults if missing
for key, val in CONFIG_FILE.items():
if key not in cfg:
cfg[key] = val
return cfg
def _set_config_for_mem_usage(d: int, n_vecs: int, max_mem: int, config: dict):
"""
Args:
d: The vector dimension.
n_vecs: The expected number of vectors.
max_mem: The maximum amount of memory you want the
index to take up in bytes.
config: The configuration dictionary to set.
"""
FLOAT32_SZ = 4 * BYTES
MAX_SUBINDEX_SZ = 1 * GIGABYTE
# Flat
if n_vecs * d * FLOAT32_SZ <= max_mem:
config['encoding'] = 'Flat'
config['vectors_per_index'] = MAX_SUBINDEX_SZ // (d * FLOAT32_SZ)
# Scalar Quantization to 1 byte
elif n_vecs * d <= max_mem:
config['encoding'] = 'SQ'
config['encoding_args'] = [8]
config['vectors_per_index'] = MAX_SUBINDEX_SZ // d
else:
# PCA target dim or PQ subvectors
x = round_down_to_mult(max_mem // n_vecs, 4)
# PCA with rotation to at least 64 dimensions and
# Scalar Quantization to 1 byte
if x > 64:
config['transform'] = 'PCAR'
config['transform_args'] = [x]
config['encoding'] = 'SQ'
config['encoding_args'] = [8]
config['vectors_per_index'] = MAX_SUBINDEX_SZ // x
# OPQ with PQ
else:
y = max(filter(
lambda k: k <= 4 * x and x <= d,
[x, 2 * x, 3 * x, 4 * x, d]
))
config['transform'] = 'OPQ'
config['transform_args'] = [x, y]
config['encoding'] = 'PQ'
config['encoding_args'] = [x]
config['vectors_per_index'] = MAX_SUBINDEX_SZ // x
def auto_config(d: int, n_vecs: int, max_ram: int, pca_d: int = None, sq: int = None):
"""
"""
FLOAT32_SZ = 4 * BYTES
config = copy.copy(CONFIG_DEFAULTS)
config['d'] = d
if pca_d:
config['transform'] = 'PCAR'
config['transform_args'] = [pca_d]
if sq:
config['encoding'] = 'SQ'
config['encoding_args'] = [sq]
# _set_config_for_mem_usage(d, n_vecs, max_mem, config)
# if n_vectors < 10_000:
# "HNSW32"
# memory_usage = d * 4 + 32 * 2 * 4
if n_vecs < 1_000_000:
# IVFx
n_centroids = round_up_to_pow2(4 * int(math.sqrt(n_vecs)))
if n_centroids > 16 * math.sqrt(n_vecs):
n_centroids = round_up_to_mult(4 * int(math.sqrt(n_vecs)), 4)
# train needs to be [30*n_centroids, 256*n_centroids]
config['n_centroids'] = n_centroids
config['recommended_n_train'] = n_centroids * 39
config['n_probes'] = n_centroids // 16
return config
config['use_gpu'] = False
#config['search'] = 'HNSW'
#config['search_args'] = [32]
if n_vecs < 10_000_000:
# IVF65536_HNSW32
# not supported on GPU, if need GPU use IVFx
# can train on GPU though
# Want 2**16, but RAM problem
for i in range(12, 3, -1):
n_centroids = 2**i
if max_ram / (FLOAT32_SZ * d) > n_centroids * 39:
config['n_centroids'] = n_centroids
config['recommended_n_train'] = n_centroids * 39
config['n_probes'] = max(n_centroids // 16, 1)
return config
assert False, 'Too little RAM'
elif n_vecs < 100_000_000:
# IVF262144_HNSW32
# not supported on GPU, if need GPU use IVFx
# can train on GPU though
# Want 2**18, but RAM problem
for i in range(12, 5, -1):
n_centroids = 2**i
if max_ram / (FLOAT32_SZ * d) > n_centroids * 39:
config['n_centroids'] = n_centroids
config['recommended_n_train'] = n_centroids * 39
config['n_probes'] = max(n_centroids // 16, 1)
return config
assert False, 'Too little RAM'
else: # good for n_vectors < 1_000_000_000
# IVF1048576_HNSW32
# not supported on GPU, if need GPU use IVFx
# can train on GPU though
# Want 2**20, but RAM problem
for i in range(13, 5, -1):
n_centroids = 2**i
if max_ram / (FLOAT32_SZ * d) > n_centroids * 39:
config['n_centroids'] = n_centroids
config['recommended_n_train'] = n_centroids * 39
config['n_probes'] = max(n_centroids // 8, 1)
return config
assert False, 'Too little RAM'
```
#### File: knn/jobs/jobs.py
```python
import asyncio
import collections
import resource
import time
import textwrap
import traceback
import uuid
import aiohttp
from aiostream import stream
from runstats import Statistics
from knn import utils
from knn.clusters import GKECluster
from knn.utils import JSONType
from knn.reducers import Reducer
from . import defaults
from typing import (
Any,
AsyncIterable,
Callable,
Dict,
Iterable,
List,
Optional,
Tuple,
Union,
)
# Increase maximum number of open sockets if necessary
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
new_soft = max(min(defaults.DESIRED_ULIMIT, hard), soft)
resource.setrlimit(resource.RLIMIT_NOFILE, (new_soft, hard))
InputSequenceType = Union[Iterable[JSONType], AsyncIterable[JSONType]]
class MapperSpec:
def __init__(
self,
*,
url: Optional[str] = None,
container: Optional[str] = None,
cluster: Optional[GKECluster] = None,
n_mappers: Optional[int] = None,
):
assert (all((url, n_mappers)) and not any((container, cluster))) or (
not url and all((container, cluster))
)
n_mappers = n_mappers or defaults.N_MAPPERS
assert n_mappers < new_soft
self.url = url
self.container = container
self.cluster = cluster
self.n_mappers = n_mappers
# Will be initialized on enter
self._deployment_id: Optional[str] = None
async def __aenter__(self) -> str: # returns endpoint
if self.url:
return self.url
else:
# Start Kubernetes service
assert self.cluster and self.container
self._deployment_id, url = await self.cluster.create_deployment(
self.container, self.n_mappers
)
return url
async def __aexit__(self, type, value, traceback):
if not self.url:
# Stop Kubernetes service
await self.cluster.delete_deployment(self._deployment_id)
class MapReduceJob:
def __init__(
self,
mapper: MapperSpec,
reducer: Reducer,
mapper_args: JSONType = {},
*,
session: Optional[aiohttp.ClientSession] = None,
n_retries: int = defaults.N_RETRIES,
chunk_size: int = defaults.CHUNK_SIZE,
request_timeout: int = defaults.REQUEST_TIMEOUT,
) -> None:
self.job_id = str(uuid.uuid4())
self.mapper = mapper
self.mapper_args = mapper_args
self.reducer = reducer
self.n_retries = n_retries
self.chunk_size = chunk_size
self.request_timeout = aiohttp.ClientTimeout(total=request_timeout)
self.owns_session = session is None
self.session = session or utils.create_unlimited_aiohttp_session()
# Performance stats
self._n_requests = 0
self._n_successful = 0
self._n_failed = 0
self._n_chunks_per_mapper: Dict[str, int] = collections.defaultdict(int)
self._profiling: Dict[str, Statistics] = collections.defaultdict(Statistics)
# Will be initialized later
self._n_total: Optional[int] = None
self._start_time: Optional[float] = None
self._end_time: Optional[float] = None
self._task: Optional[asyncio.Task] = None
# REQUEST LIFECYCLE
async def start(
self,
iterable: InputSequenceType,
callback: Optional[Callable[[Any], None]] = None,
n_total: Optional[int] = None,
) -> None:
async def task():
try:
result = await self.run_until_complete(iterable, n_total)
except asyncio.CancelledError:
pass
else:
if callback:
callback(result)
self._task = asyncio.create_task(task())
async def run_until_complete(
self,
iterable: InputSequenceType,
n_total: Optional[int] = None,
) -> Any:
try:
self._n_total = n_total or len(iterable) # type: ignore
except Exception:
pass
assert self._start_time is None # can't reuse Job instances
self._start_time = time.time()
try:
chunk_stream = stream.chunks(stream.iterate(iterable), self.chunk_size)
async with self.mapper as mapper_url, chunk_stream.stream() as chunk_gen:
async for response in utils.limited_as_completed_from_async_coro_gen(
(self._request(mapper_url, chunk) async for chunk in chunk_gen),
self.mapper.n_mappers,
):
response_tuple = await response
self._reduce_chunk(*response_tuple)
if self._n_total is None:
self._n_total = self._n_successful + self._n_failed
else:
assert self._n_total == self._n_successful + self._n_failed
self.reducer.finish()
return self.result
finally:
self._end_time = time.time()
if self.owns_session:
await self.session.close()
async def stop(self) -> None:
if self._task and not self._task.done():
self._task.cancel()
await self._task
# RESULT GETTERS
@property
def result(self) -> Any:
return self.reducer.result
@property
def progress(self) -> Any:
progress = {
"cost": self.cost,
"finished": self.finished,
"n_processed": self._n_successful,
"n_skipped": self._n_failed,
"elapsed_time": self.elapsed_time,
}
if self._n_total is not None:
progress["n_total"] = self._n_total
return progress
@property
def elapsed_time(self):
end_time = self._end_time or time.time()
start_time = self._start_time or end_time
return end_time - start_time
@property
def performance(self):
return {
"profiling": {
k: {
"mean": v.mean(),
"std": v.stddev() if len(v) > 1 else 0,
"n": len(v),
}
for k, v in self._profiling.items()
},
"mapper_utilization": dict(enumerate(
self._n_chunks_per_mapper.values())),
}
@property
def status(self) -> Dict[str, Any]:
return {"performance": self.performance, "progress": self.progress}
@property
def finished(self) -> bool:
return self._n_total == self._n_successful + self._n_failed
@property
def cost(self) -> float:
return 0 # not implemented for GKE
# INTERNAL
def _construct_request(self, chunk: List[JSONType]) -> JSONType:
return {
"job_id": self.job_id,
"job_args": self.mapper_args,
"inputs": chunk,
}
async def _request(
self, mapper_url: str, chunk: List[JSONType]
) -> Tuple[JSONType, Optional[JSONType], float]:
result = None
start_time = 0.0
end_time = 0.0
request = self._construct_request(chunk)
for i in range(self.n_retries):
start_time = time.time()
end_time = start_time
try:
async with self.session.post(
mapper_url, json=request, timeout=self.request_timeout
) as response:
end_time = time.time()
if response.status == 200:
result = await response.json()
break
except asyncio.CancelledError:
raise
except asyncio.TimeoutError:
print("Request timed out")
except Exception:
action = "skipping" if i == self.n_retries - 1 else "ignoring"
print(f"Error from _request ({action})")
print(textwrap.indent(traceback.format_exc(), " "))
return chunk, result, end_time - start_time
def _reduce_chunk(
self, chunk: List[JSONType], result: Optional[JSONType], elapsed_time: float
):
self._n_requests += 1
if not result:
self._n_failed += len(chunk)
return
# Validate
assert len(result["outputs"]) == len(chunk)
assert "billed_time" in result["profiling"]
n_successful = sum(1 for r in result["outputs"] if r)
self._n_successful += n_successful
self._n_failed += len(chunk) - n_successful
self._n_chunks_per_mapper[result["worker_id"]] += 1
self._profiling["total_time"].push(elapsed_time)
for k, vs in result["profiling"].items():
for v in vs:
self._profiling[k].push(v)
if result["chunk_output"]:
self.reducer.handle_chunk_result(chunk, result["chunk_output"])
for input, output in zip(chunk, result["outputs"]):
if output:
self.reducer.handle_result(input, output)
```
#### File: forager/scripts/generate_distance_matrix.py
```python
from scipy.spatial.distance import pdist, squareform
import numpy as np
def run(
embeddings_filename: str,
num_embeddings: int,
embedding_dim: int,
distance_matrix_filename: str,
):
embeddings = np.memmap(
embeddings_filename,
dtype="float32",
mode="r",
shape=(num_embeddings, embedding_dim),
)
embeddings_in_memory = np.copy(embeddings)
distances_in_memory = squareform(
pdist(embeddings_in_memory).astype(embeddings_in_memory.dtype)
)
distances = np.memmap(
distance_matrix_filename,
dtype=distances_in_memory.dtype,
mode="w+",
shape=distances_in_memory.shape,
)
distances[:] = distances_in_memory[:]
distances.flush()
```
|
{
"source": "jeremy-evert/PyInventGames",
"score": 4
}
|
#### File: PyInventGames/ch05/dragon.py
```python
import random
import time
def displayIntro():
print('''You are in a land full of dragons. In front of you,
you see two caves. In one cave, the dragon is friendly
and you will share their treasure with you. The other dragon
is greedy and hungry, adn will eat you on sight.''')
print()
def chooseCave():
cave = ''
while cave != '1' and cave !='2':
print('Which cave will you go into? (1 or 2)')
cave = input()
return cave
def checkCave(chosenCave):
print('You approach the cave...')
time.sleep(2)
print('It is dark and spooky...')
time.sleep(2)
print('A large dragon jumps out in front of you! They open their jaws and...')
print()
time.sleep(2)
friendlyCave = random.randint(1,2)
if chosenCave == str(friendlyCave):
print('Gives you their treasure!')
else:
print('That was not a dragon. It was a baboon!')
print('Program begins.')
playAgain = 'yes'
while 'yes' == playAgain or 'y' == playAgain:
displayIntro()
caveNumber = chooseCave()
checkCave(caveNumber)
print('Do you want to play again? (yes or no)')
playAgain = input()
print('Program complete. Bye.')
```
|
{
"source": "jeremyfix/easylabwork",
"score": 4
}
|
#### File: examples/subdir/example2.py
```python
import sys
def syr(stem: int):
'''
Compute the serie of Syracuse up to the limit cycle
'''
value = stem
while(value != 1):
#@TEMPL
#if None:
# value = None
#else:
# value = None
#TEMPL@
#@SOL
if value % 2 == 0:
value = value // 2
else:
value = 3 * value + 1
#SOL@
sys.stdout.write(f"{value} ")
sys.stdout.flush()
print()
if __name__ == '__main__':
if len(sys.argv) != 2:
print(f"Usage: {sys.argv[0]} value")
sys.exit(-1)
syr(int(sys.argv[1]))
```
|
{
"source": "jeremyfix/gan_experiments",
"score": 2
}
|
#### File: jeremyfix/gan_experiments/data.py
```python
from typing import Union
from pathlib import Path
import functools
# External imports
import tqdm
import numpy as np
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
_DEFAULT_DATASET_ROOT = "/opt/Datasets"
_DEFAULT_MNIST_DIGIT = 6
_IMG_MEAN = 0.5
_IMG_STD = 0.5
def get_dataloaders(dataset_root: Union[str, Path],
cuda: bool,
batch_size: int = 64,
n_threads: int = 4,
dataset: str = "MNIST",
val_size: float = 0.2,
small_experiment: bool = False):
"""
Build and return the pytorch dataloaders
Args:
dataset_root (str, Path) : the root path of the datasets
cuda (bool): whether or not to use cuda
batch_size (int) : the size of the minibatches
n_threads (int): the number of threads to use for dataloading
dataset (str): the dataset to load
val_size (float): the proportion of data for the validation set
small_experiment (bool): wheter or not to use a small
dataset (usefull for debuging)
"""
datasets = ["MNIST", "FashionMNIST", "EMNIST", "SVHN", "CelebA"]
if dataset not in datasets:
raise NotImplementedError(f"Cannot import the dataset {dataset}."
f" Available datasets are {datasets}")
dataset_loader = getattr(torchvision.datasets, f"{dataset}")
train_kwargs = {}
test_kwargs = {}
if dataset in ["MNIST", "FashionMNIST", "EMNIST"]:
train_kwargs['train'] = True
test_kwargs['train'] = False
if dataset == "EMNIST":
train_kwargs['split'] = 'balanced'
elif dataset in ["SVHN", 'CelebA']:
train_kwargs['split'] = 'train'
test_kwargs['split'] = 'test'
# Get the two datasets, make them tensors in [0, 1]
transform= transforms.Compose([
transforms.ToTensor(),
transforms.Normalize( (_IMG_MEAN,), (_IMG_STD,))
]
)
if dataset == 'CelebA':
transform = transforms.Compose([
transforms.Resize(64),
transforms.CenterCrop(64),
transform
])
train_dataset = dataset_loader(root=dataset_root,
**train_kwargs,
download=True,
transform=transform
)
test_dataset = dataset_loader(root=dataset_root,
**test_kwargs,
download=True,
transform=transform
)
dataset = torch.utils.data.ConcatDataset([train_dataset,
test_dataset])
# Compute the channel-wise normalization coefficients
# mean = std = 0
# img, _ = dataset[0]
# print(img.shape)
# N = len(dataset) * img.shape[1] * img.shape[2]
# for img, _ in tqdm.tqdm(dataset):
# mean += img.sum()/N
# for img, _ in tqdm.tqdm(dataset):
# std += ((img - mean)**2).sum()/N
# std = np.sqrt(std)
# print(mean, std)
if small_experiment:
dataset = torch.utils.data.Subset(dataset, range(batch_size))
# Split the dataset in train/valid
indices = np.arange(len(dataset))
np.random.shuffle(indices)
split_idx = int(val_size * len(dataset))
valid_indices, train_indices = indices[:split_idx], indices[split_idx:]
train_dataset = torch.utils.data.Subset(dataset, train_indices)
valid_dataset = torch.utils.data.Subset(dataset, valid_indices)
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=n_threads)
valid_loader = torch.utils.data.DataLoader(valid_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=n_threads)
img_shape = dataset[0][0].shape # C, H, W
return train_loader, valid_loader, img_shape
def test_mnist():
import matplotlib.pyplot as plt
train_loader, valid_loader, img_shape = get_dataloaders(dataset_root=_DEFAULT_DATASET_ROOT,
batch_size=16,
cuda=False,
dataset="MNIST")
print(f"I loaded {len(train_loader)} train minibatches. The images"
f" are of shape {img_shape}")
X, y = next(iter(train_loader))
grid = torchvision.utils.make_grid(X, nrow=4)
print(grid.min(), grid.max())
print(grid.shape)
plt.figure()
plt.imshow(np.transpose(grid.numpy(), (1, 2, 0)), cmap='gray_r')
plt.show()
def test_celeba():
import matplotlib.pyplot as plt
train_loader, valid_loader, img_shape = get_dataloaders(dataset_root=_DEFAULT_DATASET_ROOT,
batch_size=16,
cuda=False,
dataset="CelebA")
print(f"I loaded {len(train_loader)} train minibatches. The images"
f" are of shape {img_shape}")
X, y = next(iter(train_loader))
grid = torchvision.utils.make_grid(X, nrow=4)
print(grid.min(), grid.max())
print(grid.shape)
plt.figure()
plt.imshow(np.transpose(grid.numpy(), (1, 2, 0)) * _IMG_STD + _IMG_MEAN)
plt.show()
if __name__ == '__main__':
# test_mnist()
test_celeba()
```
#### File: jeremyfix/gan_experiments/models.py
```python
from typing import Optional, Tuple
from functools import reduce
import operator
# External imports
import torch
import torch.nn as nn
def conv_bn_leakyrelu(in_channels, out_channels):
"""
Conv(3x3, same) - BN - LeakyRelu(0.2)
"""
ks = 3
return [
nn.Conv2d(in_channels, out_channels,
kernel_size=ks,
stride=1,
padding=int((ks-1)/2),
bias=False),
nn.BatchNorm2d(out_channels),
nn.LeakyReLU(negative_slope=0.2)
]
def conv_downsampling(channels):
"""
Conv(3x3, s2) - LeakyRelu(0.2)
"""
ks = 3
return [
nn.Conv2d(channels, channels,
kernel_size=ks,
stride=2,
padding=int((ks-1)/2),
bias=True),
nn.LeakyReLU(negative_slope=0.2)
]
class Discriminator(nn.Module):
"""
The discriminator network tells if the input image is real or not
The output logit is supposed to be high(-ly positive) for real images
and low (highly negative) for fake images
"""
def __init__(self,
img_shape: Tuple[int, int, int],
dropout: float,
base_c: int) -> None:
"""
Args:
img_shape : (C, H, W) image shapes
dropout (float) the probability of zeroing before the FC layer
base_c (int): The base number of channels for the discriminator
"""
super(Discriminator, self).__init__()
self.img_shape = img_shape
in_C = img_shape[0]
######################
# START CODING HERE ##
######################
# Definition of the convolutional part of the classifier
# Hint : conv_bn_leakyrelu() and conv_downsampling() can
# be usefull
#@TEMP<EMAIL> = None
#@SOL
# Note: the output receptive field size is 36 x 36
# the output representation size is 3 x 3
self.cnn = nn.Sequential(
*conv_bn_leakyrelu(in_C, base_c),
*conv_bn_leakyrelu(base_c, base_c),
*conv_downsampling(base_c),
nn.Dropout2d(dropout),
*conv_bn_leakyrelu(base_c, base_c*2),
*conv_bn_leakyrelu(base_c*2, base_c*2),
*conv_downsampling(base_c*2),
nn.Dropout2d(dropout),
*conv_bn_leakyrelu(base_c*2, base_c*3),
*conv_bn_leakyrelu(base_c*3, base_c*3),
*conv_downsampling(base_c*3),
nn.Dropout2d(dropout)
)
#SOL@
####################
# END CODING HERE ##
####################
# Compute the size of the representation by forward propagating
# a fake tensor; This can be cpu tensor as the model is not yet
# built and therefore not yet transfered to the GPU
fake_input = torch.zeros((1, *img_shape))
out_cnn = self.cnn(fake_input)
print(f"The output shape of the convolutional part of the "
f"discriminator is {out_cnn.shape}")
num_features = reduce(operator.mul, out_cnn.shape[1:])
######################
# START CODING HERE ##
######################
# The fully connected part of the classifier
#@[email protected] = None
#@SOL
self.classif = nn.Sequential(
nn.Linear(num_features, 1)
)
#SOL@
####################
# END CODING HERE ##
####################
# Run the initialization script
self.apply(self.init_weights)
def init_weights(self, m):
"""
Initialize the weights of the convolutional layers
"""
with torch.no_grad():
if isinstance(m, nn.Conv2d):
torch.nn.init.normal_(m.weight, 0.0, 0.02)
if m.bias is not None:
m.bias.fill_(0.)
def forward(self,
X: torch.Tensor) -> torch.Tensor:
"""
Forward pass of the discriminator
Args:
X(torch.Tensor (B, C, H, W)) : The images to classify
Returns:
Logits (torch.Tensor (B, )) : The logits
"""
######################
# START CODING HERE ##
######################
# Step 1 - Forward pass through the CNN part
#@TEMPL@out_cnn = None
out_cnn = self.cnn(X) #@SOL@
# Step 2 - "Reshape" the 4D tensor to a 2D tensor
# Hint : Tensor.view can be of help
#@TEMPL@input_classif = None
input_classif = out_cnn.view((out_cnn.shape[0], -1)) #@SOL@
# Step 3 - Forward pass through the fully connected layers
#@TEMPL@out_classif = None
out_classif = self.classif(input_classif) #@SOL@
####################
# END CODING HERE ##
####################
return out_classif.squeeze()
def up_conv_bn_relu(in_channels, out_channels):
"""
Upsampling with Upsample - Conv
UpSample(x2) - Conv(3x3) - BN - Relu - Conv(3x3) - BN - Relu
"""
ks = 3
return [
nn.Upsample(scale_factor=2),
nn.Conv2d(in_channels,
out_channels,
kernel_size=ks,
stride=1,
padding=int((ks-1)/2),
bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
nn.Conv2d(out_channels,
out_channels,
kernel_size=ks,
stride=1,
padding=int((ks-1)/2),
bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU()
]
def tconv_bn_relu(in_channels, out_channels, ksize, stride, pad, opad):
"""
Upsampling with transposed convolutions
TConv2D - BN - LeakyRelu(0.2)
"""
return [
nn.ConvTranspose2d(in_channels, out_channels,
kernel_size=ksize,
stride=stride,
padding=pad,
output_padding=opad),
nn.BatchNorm2d(out_channels),
nn.LeakyReLU(negative_slope=0.2)
]
class Generator(nn.Module):
"""
The generator network generates image from random inputs
"""
def __init__(self,
img_shape: Tuple[int, int, int],
latent_size: int,
base_c: int) -> None:
"""
Args:
img_shape : (C, H, W) image shapes
latent_size (int) : The dimension of the latent space
base_c (int) : The base number of channels
"""
super(Generator, self).__init__()
self.img_shape = img_shape
self.latent_size = latent_size
self.base_c = base_c
H, W = img_shape[1:]
######################
# START CODING HERE ##
######################
# Step 1 - Build the feedforward upscaling network
#@[email protected] = nn.Sequential()
#@SOL
self.upscale = nn.Sequential(
nn.Linear(self.latent_size, H//4*W//4*self.base_c*4, bias=False),
nn.BatchNorm1d(H//4*W//4*self.base_c*4),
nn.ReLU()
)
#SOL@
# Step 2 - Build the convolutional upscaling network
# Hint : up_conv_bn_relu() might be useful
#@[email protected] = nn.Sequential()
#@SOL
self.model = nn.Sequential(
*up_conv_bn_relu(self.base_c*4, self.base_c*2),
*up_conv_bn_relu(self.base_c*2, self.base_c),
nn.Conv2d(self.base_c, self.img_shape[0],
kernel_size=1,stride=1, padding=0, bias=True),
nn.Tanh()
)
#SOL@
####################
# END CODING HERE ##
####################
#@SOL
# Note : size, stride, pad, opad
# self.model = nn.Sequential(
# *tconv_bn_relu2(base_c*4, base_c*2, 5, 1, 2, 0),
# # nn.Dropout2d(0.3),
# *tconv_bn_relu2(base_c*2, base_c, 5, 2, 2, 1),
# # nn.Dropout2d(0.3),
# nn.ConvTranspose2d(base_c, 1, 5, 2, 2, 1),
# nn.Tanh() # as suggested by [Radford, 2016]
# )
#SOL@
# Initialize the convolutional layers
self.apply(self.init_weights)
def init_weights(self, m):
with torch.no_grad():
if isinstance(m, nn.Conv2d):
torch.nn.init.normal_(m.weight, 0.0, 0.02)
if m.bias is not None:
m.bias.fill_(0.)
def forward(self,
X: Optional[torch.Tensor] = None,
batch_size: Optional[float] = None) -> torch.Tensor:
"""
Forward pass of the generator. You can either provide a noise
input vector or specify the batch_size to let it generate the input
Args:
X (torch.Tensor, optional): The input noise batch
batch_size (int, optional): The number of samples to generate
"""
# X is expected to be a 2D tensor (B, L)
if X is None:
assert(batch_size is not None)
device = next(self.parameters()).device
X = torch.randn(batch_size, self.latent_size).to(device)
else:
if len(X.shape) != 2:
raise RuntimeError("Expected a 2D tensor as input to the "
f" generator got a {len(X.shape)}D tensor.")
######################
# START CODING HERE ##
######################
# Step 1 - Forward pass through the first linear layers
# to generate the seed image
#@TEMPL@upscaled = None
upscaled = self.upscale(X) #@SOL@
# Step 2 - "Reshape" the upscaled image as a 4D tensor
# Hint : use the view method
#@TEMPL@reshaped = None
reshaped = upscaled.view(-1, self.base_c*4, self.img_shape[1]//4, self.img_shape[2]//4) #@SOL@
# Step 3 : Forward pass through the last convolutional part
# to generate the image
#@TEMPL@out = None
out = self.model(reshaped) #@SOL@
####################
# END CODING HERE ##
####################
return out
class GAN(nn.Module):
def __init__(self,
img_shape: Tuple[int, int, int],
dropout: float,
discriminator_base_c: int,
latent_size: int,
generator_base_c: int) -> None:
"""
Args:
img_shape : (C, H, W) image shapes
dropout (float): The probability of zeroing before the FC layers
discriminator_base_c (int) : The base number of channels for
the discriminator
latent_size (int) : The size of the latent space for the generator
generator_base_c (int) : The base number of channels for the
generator
"""
super(GAN, self).__init__()
self.img_shape = img_shape
self.discriminator = Discriminator(img_shape,
dropout,
discriminator_base_c)
self.generator = Generator(img_shape,
latent_size,
generator_base_c)
def forward(self,
X: Optional[torch.Tensor],
batch_size: Optional[float]):
"""
Given true images, returns the generated tensors
and the logits of the discriminator for both the generated tensors
and the true tensors
Args:
X (torch.Tensor) : a real image or None if we just
want the logits for the generated images
batch_size (int) : the batch to consider when generating
fake images
"""
if X is None and batch_size is None:
raise RuntimeError("Not both X and batch_size can be None")
if X is not None and batch_size is not None:
raise RuntimeError("Not both X and batch_size can be not None")
if X is not None:
######################
# START CODING HERE ##
######################
# An input tensor of real images is provided
# we compute its logits
# 1 line
#@TEMPL@real_logits = None
real_logits = self.discriminator(X) #@SOL@
####################
# END CODING HERE ##
####################
return real_logits, X
else:
######################
# START CODING HERE ##
######################
# No input tensor is provided. We generate batch_size fake images
# and evaluate its logits
# 2 lines
#@TEMPL@fake_images = None
#@TEMPL@fake_logits = None
#@SOL
fake_images = self.generator(X=None, batch_size=batch_size)
fake_logits = self.discriminator(fake_images)
#SOL@
####################
# END CODING HERE ##
####################
return fake_logits, fake_images
#@SOL
def test_tconv():
layers = nn.Sequential(
nn.Conv2d(20, 10, kernel_size=3, stride=1, padding=2)
)
print(layers)
inputs = torch.zeros((1, 20, 2, 2))
outputs = layers(inputs)
print(outputs.shape)
imagify = nn.Linear(100, 7*7*10)
conv1 = nn.ConvTranspose2d(10, 10,
kernel_size=5,
stride=1,
padding=2)
conv2 = nn.ConvTranspose2d(10, 10,
kernel_size=5,
stride=2,
padding=2,
output_padding=1)
conv3 = nn.ConvTranspose2d(10, 1,
kernel_size=5,
stride=2,
padding=2, output_padding=1)
X = torch.randn(64, 100)
X = imagify(X).view(-1, 10, 7, 7)
print('--')
print(X.shape)
X = conv1(X)
print(X.shape)
X = conv2(X)
print(X.shape)
X = conv3(X)
print(X.shape)
#SOL@
def test_discriminator():
critic = Discriminator((1, 28, 28), 0.3, 32)
X = torch.randn(64, 1, 28, 28)
out = critic(X)
assert(out.shape == torch.Size([64]))
def test_generator():
generator = Generator((1, 28, 28), 100, 64)
X = torch.randn(64, 100)
out = generator(X, None)
assert(out.shape == torch.Size([64, 1, 28, 28]))
out = generator(None, 64)
assert(out.shape == torch.Size([64, 1, 28, 28]))
if __name__ == '__main__':
test_tconv() #@SOL@
test_discriminator()
test_generator()
```
|
{
"source": "jeremyfix/listen-attend-and-spell",
"score": 3
}
|
#### File: listen-attend-and-spell/scripts/plot_stats.py
```python
import argparse
import json
# External import
import matplotlib.pyplot as plt
def plot_stats(args):
with open(args.statfile, 'r') as f:
stats = json.load(f)
rate = stats['waveform']['sample_rates'][0]
if len(stats['waveform']['sample_rates']) > 1:
print("Multiple sampling rates, I take the first one of {}".format(stats['waveform']['sample_rates']))
num_samples = list(stats['waveform']['num_samples'].items())
num_samples = sorted(num_samples, key=lambda tu: tu[0], reverse=True)
# Convert num_samples to second
x_num_samples = [int(s)/rate for s, _ in num_samples]
weights_num_samples = [num for _, num in num_samples]
plt.figure()
plt.hist(x_num_samples, weights=weights_num_samples)
plt.xlabel(r"Time ($s$)")
plt.title(f"Sample length from {args.statfile}")
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('statfile',
help="The stat file to process",
type=str)
args = parser.parse_args()
plot_stats(args)
```
|
{
"source": "jeremyforest/annolid",
"score": 3
}
|
#### File: annolid/annotation/coco2yolo.py
```python
import os
import json
from pathlib import Path
import shutil
def xywh2cxcywh(box, img_size):
dw = 1. / img_size[0]
dh = 1. / img_size[1]
x = box[0] + box[2] / 2.0
y = box[1] + box[3] / 2.0
w = box[2]
h = box[3]
x = x * dw
w = w * dw
y = y * dh
h = h * dh
return (x, y, w, h)
def create_dataset(json_file='annotation.json',
results_dir='yolov5_dataset',
dataset_type='train',
class_id=None
):
categories = []
images_path = Path(f"{results_dir}/images/{dataset_type}")
images_path.mkdir(parents=True, exist_ok=True)
labels_path = Path(f"{results_dir}/labels/{dataset_type}")
labels_path.mkdir(parents=True, exist_ok=True)
with open(json_file, 'r') as jf:
data = json.load(jf)
if not os.path.exists(results_dir):
os.makedirs(results_dir)
img_file_path = Path(json_file).parent
for img in data['images']:
file_name = img["file_name"]
img_width = img["width"]
img_height = img["height"]
img_id = img["id"]
file_name = file_name.replace("\\", "/")
shutil.copy(img_file_path / file_name, images_path)
anno_txt_name = os.path.basename(file_name).split(".")[0] + ".txt"
anno_txt_flie = labels_path / anno_txt_name
with open(anno_txt_flie, 'w') as atf:
for ann in data['annotations']:
if ann["image_id"] == img_id:
box = xywh2cxcywh(ann["bbox"], (img_width, img_height))
if class_id is not None:
atf.write("%s %s %s %s %s %s\n" % (class_id, ann["category_id"], box[0],
box[1], box[2], box[3]))
else:
atf.write("%s %s %s %s %s\n" % (ann["category_id"], box[0],
box[1], box[2], box[3]))
for c in data["categories"]:
# exclude backgroud with id 0
if not c['id'] == 0:
categories.append(c['name'])
data_yaml = Path(f"{results_dir}/data.yaml")
names = list(categories)
# dataset folder is in same dir as the yolov5 folder
with open(data_yaml, 'w') as dy:
dy.write(f"train: ../{results_dir}/images/train\n")
dy.write(f"val: ../{results_dir}/images/val\n")
dy.write(f"nc: {len(names)}\n")
dy.write(f"names: {names}")
return names
```
#### File: annolid/annotation/masks.py
```python
import cv2
import ast
import numpy as np
import pycocotools.mask as mask_util
from simplification.cutil import simplify_coords_vwp
def mask_to_polygons(mask,
use_convex_hull=False):
"""
convert predicted mask to polygons
"""
# for cv2 versions that do not support incontiguous array
mask = np.ascontiguousarray(mask)
mask = mask.astype(np.uint8)
# cv2.RETER_CCOMP flag retrieves all the contours
# the arranges them to a 2-level hierarchy.
res = cv2.findContours(mask,
cv2.RETR_CCOMP,
cv2.CHAIN_APPROX_SIMPLE)
hierarchy = res[-1]
# mask is empty
if hierarchy is None:
return [], False
has_holes = (hierarchy.reshape(-1, 4)[:, 3] >= 0).sum() > 0
res = res[-2]
try:
res_simp = simplify_coords_vwp(res[0].squeeze(), 30.0)
res_simp = np.array(res_simp)
res = [np.expand_dims(res_simp, axis=1)]
except ValueError:
print('Failed to simplify the points.')
if use_convex_hull:
hull = []
for i in range(len(res)):
hull.append(cv2.convexHull(res[i], False))
res = [x.flatten() for x in hull]
else:
res = [x.flatten() for x in res]
# convert OpenCV int coordinates [0, H -1 or W-1] to
# real value coordinate spaces
res = [x + 0.5 for x in res if len(x) >= 6]
return res, has_holes
def mask_perimeter(mask):
"""calculate perimeter for a given binary mask
"""
try:
mask = mask_util.decode(mask)
except TypeError:
mask = ast.literal_eval(mask)
rle = [mask]
mask = mask_util.decode(rle)
contours, hierarchy = cv2.findContours(mask, cv2.RETR_CCOMP,
cv2.CHAIN_APPROX_SIMPLE)
cnt = contours[0]
perimeter = cv2.arcLength(cnt, True)
return perimeter
def mask_area(mask):
"""Calulate the area of a RLE mask.
"""
try:
area = mask_util.area(mask)
except TypeError:
mask = ast.literal_eval(mask)
area = mask_util.area(mask)
return area
def mask_iou(this_mask, other_mask):
"""
Calculate intersection over union between two RLE masks.
"""
try:
_iou = mask_util.iou([this_mask],
[other_mask],
[False, False])
except Exception:
this_mask = ast.literal_eval(this_mask)
other_mask = ast.literal_eval(other_mask)
_iou = mask_util.iou([this_mask],
[other_mask],
[False, False])
return _iou.flatten()[0]
```
#### File: gui/widgets/extract_frame_dialog.py
```python
from pathlib import Path
from qtpy import QtCore
from qtpy import QtWidgets
class ExtractFrameDialog(QtWidgets.QDialog):
def __init__(self, video_file=None, *args, **kwargs):
super(ExtractFrameDialog, self).__init__(*args, **kwargs)
self.setWindowTitle("Extract Frames from a video")
self.raidoButtons()
self.slider()
self.num_frames = 100
self.algo = 'random'
self.video_file = video_file
self.out_dir = None
self.start_sconds = None
self.end_seconds = None
self.sub_clip = False
qbtn = QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel
self.buttonbox = QtWidgets.QDialogButtonBox(qbtn)
self.buttonbox.accepted.connect(self.accept)
self.buttonbox.rejected.connect(self.reject)
self.label1 = QtWidgets.QLabel(
f"Please type or select number of frames default={self.num_frames} or use -1 for all frames")
self.inputFileLineEdit = QtWidgets.QLineEdit(self)
if self.video_file is not None:
self.inputFileLineEdit.setText(self.video_file)
self.framesLineEdit = QtWidgets.QLineEdit(self)
self.framesLineEdit.setText(str(self.num_frames))
self.inputFileButton = QtWidgets.QPushButton('Open', self)
self.inputFileButton.clicked.connect(self.onInputFileButtonClicked)
self.framesLineEdit.textChanged.connect(self.onSliderChange)
hboxLayOut = QtWidgets.QHBoxLayout()
self.groupBoxFiles = QtWidgets.QGroupBox("Please choose a video file")
hboxLayOut.addWidget(self.inputFileLineEdit)
hboxLayOut.addWidget(self.inputFileButton)
self.groupBoxFiles.setLayout(hboxLayOut)
self.groupBoxSubClip = QtWidgets.QGroupBox(
"Please type the start seconds and end seconds for the video clip (Optional)")
self.startSecondsLabel = QtWidgets.QLabel(self)
self.startSecondsLabel.setText("Start seconds:")
self.startSecondsLineEdit = QtWidgets.QLineEdit(self)
self.startSecondsLineEdit.textChanged.connect(
self.onCutClipStartTimeChanged)
self.endSecondsLabel = QtWidgets.QLabel(self)
self.endSecondsLabel.setText("End seconds:")
self.endSecondsLineEdit = QtWidgets.QLineEdit(self)
self.endSecondsLineEdit.textChanged.connect(
self.onCutClipEndTimeChanged)
hboxLayOutSubClip = QtWidgets.QHBoxLayout()
hboxLayOutSubClip.addWidget(self.startSecondsLabel)
hboxLayOutSubClip.addWidget(self.startSecondsLineEdit)
hboxLayOutSubClip.addWidget(self.endSecondsLabel)
hboxLayOutSubClip.addWidget(self.endSecondsLineEdit)
self.groupBoxSubClip.setLayout(hboxLayOutSubClip)
self.groupBoxOutDir = QtWidgets.QGroupBox(
"Please choose output directory (Optional)")
self.outFileDirEdit = QtWidgets.QLineEdit(self)
self.outDirButton = QtWidgets.QPushButton('Select', self)
self.outDirButton.clicked.connect(self.onOutDirButtonClicked)
hboxLayOutDir = QtWidgets.QHBoxLayout()
hboxLayOutDir.addWidget(self.outFileDirEdit)
hboxLayOutDir.addWidget(self.outDirButton)
self.groupBoxOutDir.setLayout(hboxLayOutDir)
vbox = QtWidgets.QVBoxLayout()
vbox.addWidget(self.groupBox)
vbox.addWidget(self.label1)
vbox.addWidget(self.framesLineEdit)
vbox.addWidget(self.slider)
vbox.addWidget(self.groupBoxFiles)
vbox.addWidget(self.groupBoxSubClip)
vbox.addWidget(self.groupBoxOutDir)
vbox.addWidget(self.buttonbox)
self.setLayout(vbox)
self.show()
def onInputFileButtonClicked(self):
self.video_file, filter = QtWidgets.QFileDialog.getOpenFileName(
parent=self,
caption="Open video file",
directory=str(Path()),
filter='*'
)
if self.video_file is not None:
self.inputFileLineEdit.setText(self.video_file)
def onOutDirButtonClicked(self):
self.out_dir = QtWidgets.QFileDialog.getExistingDirectory(self,
"Select Directory")
if self.out_dir is not None:
self.outFileDirEdit.setText(self.out_dir)
def raidoButtons(self):
self.groupBox = QtWidgets.QGroupBox("Please choose an algorithm")
hboxLayOut = QtWidgets.QHBoxLayout()
self.radio_btn1 = QtWidgets.QRadioButton("random")
self.radio_btn1.setChecked(True)
self.radio_btn1.toggled.connect(self.onRadioButtonChecked)
hboxLayOut.addWidget(self.radio_btn1)
self.radio_btn2 = QtWidgets.QRadioButton("keyframes")
self.radio_btn2.toggled.connect(self.onRadioButtonChecked)
hboxLayOut.addWidget(self.radio_btn2)
self.radio_btn3 = QtWidgets.QRadioButton("flow")
self.radio_btn3.toggled.connect(self.onRadioButtonChecked)
hboxLayOut.addWidget(self.radio_btn3)
self.groupBox.setLayout(hboxLayOut)
def slider(self):
self.slider = QtWidgets.QSlider(QtCore.Qt.Horizontal)
self.slider.setMinimum(-1)
self.slider.setMaximum(1000)
self.slider.setValue(10)
self.slider.setTickPosition(QtWidgets.QSlider.TicksBelow)
self.slider.setTickInterval(1)
self.slider.valueChanged.connect(self.onSliderChange)
def onRadioButtonChecked(self):
radio_btn = self.sender()
if radio_btn.isChecked():
self.algo = radio_btn.text()
if self.algo == 'keyframes':
self.slider.setDisabled(True)
self.slider.hide()
self.label1.setVisible(False)
self.framesLineEdit.hide()
self.framesLineEdit.setVisible(False)
else:
self.slider.setEnabled(True)
if self.slider.isHidden():
self.slider.setVisible(True)
if self.label1.isHidden():
self.label1.setVisible(True)
if self.framesLineEdit.isHidden():
self.framesLineEdit.setVisible(True)
def onSliderChange(self, position):
self.num_frames = int(position) if position and str(
position).isdigit() else -1
self.framesLineEdit.setText(str(position))
if self.num_frames == -1:
self.label1.setText(
f"You have selected to extract all the frames."
)
else:
self.label1.setText(
f"You have selected {str(self.num_frames)} frames.")
def onCutClipStartTimeChanged(self):
self.start_sconds = self.startSecondsLineEdit.text()
if not self.start_sconds.isdigit():
QtWidgets.QMessageBox.about(self,
"invalid start seconds",
"Please enter a vaild int number for start seconds")
if self.start_sconds.isdigit():
self.start_sconds = int(self.start_sconds)
def onCutClipEndTimeChanged(self):
self.end_seconds = self.endSecondsLineEdit.text()
if not self.end_seconds.isdigit():
QtWidgets.QMessageBox.about(self,
"invalid end seconds",
"Please enter a vaild int number for end seconds")
if self.end_seconds.isdigit():
self.end_seconds = int(self.end_seconds)
```
#### File: gui/widgets/quality_control_dialog.py
```python
from pathlib import Path
from qtpy import QtCore
from qtpy import QtWidgets
class QualityControlDialog(QtWidgets.QDialog):
def __init__(self, *args, **kwargs):
super(QualityControlDialog, self).__init__(*args, **kwargs)
self.setWindowTitle("Convert tracking results to labelme format")
self.video_file = None
self.tracking_results = None
qbtn = QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel
self.buttonbox = QtWidgets.QDialogButtonBox(qbtn)
self.buttonbox.accepted.connect(self.accept)
self.buttonbox.rejected.connect(self.reject)
self.groupBoxVideoFiles = QtWidgets.QGroupBox(
"Please choose a video file")
self.inputVideoFileLineEdit = QtWidgets.QLineEdit(self)
self.inputVideoFileButton = QtWidgets.QPushButton('Open', self)
self.inputVideoFileButton.clicked.connect(
self.onInputVideoFileButtonClicked)
video_hboxLayOut = QtWidgets.QHBoxLayout()
video_hboxLayOut.addWidget(self.inputVideoFileLineEdit)
video_hboxLayOut.addWidget(self.inputVideoFileButton)
self.groupBoxVideoFiles.setLayout(video_hboxLayOut)
hboxLayOut = QtWidgets.QHBoxLayout()
self.groupBoxFiles = QtWidgets.QGroupBox(
"Please choose tracking results CSV file")
self.inputFileLineEdit = QtWidgets.QLineEdit(self)
self.inputFileButton = QtWidgets.QPushButton('Open', self)
self.inputFileButton.clicked.connect(
self.onInputFileButtonClicked)
hboxLayOut.addWidget(self.inputFileLineEdit)
hboxLayOut.addWidget(self.inputFileButton)
self.groupBoxFiles.setLayout(hboxLayOut)
vbox = QtWidgets.QVBoxLayout()
vbox.addWidget(self.groupBoxVideoFiles)
vbox.addWidget(self.groupBoxFiles)
vbox.addWidget(self.buttonbox)
self.setLayout(vbox)
self.show()
def onInputVideoFileButtonClicked(self):
self.video_file, filter = QtWidgets.QFileDialog.getOpenFileName(
parent=self,
caption="Open video file",
directory=str(Path()),
filter='*'
)
if self.video_file is not None:
self.inputVideoFileLineEdit.setText(self.video_file)
def onInputFileButtonClicked(self):
self.tracking_results, filter = QtWidgets.QFileDialog.getOpenFileName(
parent=self,
caption="Open tracking results CSV file",
directory=str(Path()),
filter='*'
)
if self.tracking_results is not None:
self.inputFileLineEdit.setText(self.tracking_results)
def onInputFileButtonClicked(self):
self.tracking_results, filter = QtWidgets.QFileDialog.getOpenFileName(
parent=self,
caption="Open tracking results CSV file",
directory=str(Path()),
filter='*'
)
if self.tracking_results is not None:
self.inputFileLineEdit.setText(self.tracking_results)
```
#### File: gui/widgets/train_model_dialog.py
```python
from pathlib import Path
from qtpy import QtCore
from qtpy import QtWidgets
class TrainModelDialog(QtWidgets.QDialog):
def __init__(self, *args, **kwargs):
super(TrainModelDialog, self).__init__(*args, **kwargs)
self.setWindowTitle("Train models")
self.raidoButtons()
self.slider()
self.batch_size = 8
self.algo = 'MaskRCNN'
self.config_file = None
self.out_dir = None
self.max_iterations = 2000
qbtn = QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel
self.buttonbox = QtWidgets.QDialogButtonBox(qbtn)
self.buttonbox.accepted.connect(self.accept)
self.buttonbox.rejected.connect(self.reject)
self.label1 = QtWidgets.QLabel(f"Please select batch size default=8")
self.inputFileLineEdit = QtWidgets.QLineEdit(self)
self.inputFileButton = QtWidgets.QPushButton('Open', self)
self.inputFileButton.clicked.connect(self.onInputFileButtonClicked)
self.label2 = QtWidgets.QLabel(
f"Please select training max iterations default 2000 (Optional)")
hboxLayOut = QtWidgets.QHBoxLayout()
self.groupBoxFiles = QtWidgets.QGroupBox("Please choose a config file")
hboxLayOut.addWidget(self.inputFileLineEdit)
hboxLayOut.addWidget(self.inputFileButton)
self.groupBoxFiles.setLayout(hboxLayOut)
self.groupBoxOutDir = QtWidgets.QGroupBox(
"Please choose output directory (Optional)")
self.outFileDirEdit = QtWidgets.QLineEdit(self)
self.outDirButton = QtWidgets.QPushButton('Select', self)
self.outDirButton.clicked.connect(self.onOutDirButtonClicked)
hboxLayOutDir = QtWidgets.QHBoxLayout()
hboxLayOutDir.addWidget(self.outFileDirEdit)
hboxLayOutDir.addWidget(self.outDirButton)
self.groupBoxOutDir.setLayout(hboxLayOutDir)
vbox = QtWidgets.QVBoxLayout()
vbox.addWidget(self.groupBox)
vbox.addWidget(self.label1)
vbox.addWidget(self.slider)
vbox.addWidget(self.groupBoxFiles)
if self.algo == 'MaskRCNN':
self.max_iter_slider()
# self.label1.hide()
# self.slider.hide()
vbox.addWidget(self.label2)
vbox.addWidget(self.max_iter_slider)
vbox.addWidget(self.groupBoxOutDir)
vbox.addWidget(self.buttonbox)
self.setLayout(vbox)
self.show()
def max_iter_slider(self):
self.max_iter_slider = QtWidgets.QSlider(QtCore.Qt.Horizontal)
self.max_iter_slider.setMinimum(100)
self.max_iter_slider.setMaximum(20000)
self.max_iter_slider.setValue(2000)
self.max_iter_slider.setTickPosition(QtWidgets.QSlider.TicksBelow)
self.max_iter_slider.setTickInterval(100)
self.max_iter_slider.setSingleStep(100)
self.max_iter_slider.valueChanged.connect(self.onMaxIterSliderChange)
def onSliderChange(self):
self.batch_size = self.slider.value()
self.label1.setText(
f"You selected {str(self.batch_size)} as batch size")
def onInputFileButtonClicked(self):
self.config_file, filter = QtWidgets.QFileDialog.getOpenFileName(
parent=self,
caption="Open config file",
directory=str(Path()),
filter='*'
)
if self.config_file is not None:
self.inputFileLineEdit.setText(self.config_file)
def onOutDirButtonClicked(self):
self.out_dir = QtWidgets.QFileDialog.getExistingDirectory(self,
"Select Directory")
if self.out_dir is not None:
self.outFileDirEdit.setText(self.out_dir)
def raidoButtons(self):
self.groupBox = QtWidgets.QGroupBox("Please choose a model")
hboxLayOut = QtWidgets.QHBoxLayout()
self.radio_btn1 = QtWidgets.QRadioButton("MaskRCNN")
self.radio_btn1.setChecked(True)
self.radio_btn1.toggled.connect(self.onRadioButtonChecked)
hboxLayOut.addWidget(self.radio_btn1)
self.radio_btn2 = QtWidgets.QRadioButton("YOLACT")
self.radio_btn2.toggled.connect(self.onRadioButtonChecked)
self.radio_btn2.setEnabled(True)
hboxLayOut.addWidget(self.radio_btn2)
self.radio_btn3 = QtWidgets.QRadioButton("YOLOv5")
self.radio_btn3.toggled.connect(self.onRadioButtonChecked)
self.radio_btn3.setEnabled(False)
hboxLayOut.addWidget(self.radio_btn3)
self.groupBox.setLayout(hboxLayOut)
def slider(self):
self.slider = QtWidgets.QSlider(QtCore.Qt.Horizontal)
self.slider.setMinimum(1)
self.slider.setMaximum(128)
self.slider.setValue(8)
self.slider.setTickPosition(QtWidgets.QSlider.TicksBelow)
self.slider.setTickInterval(1)
self.slider.valueChanged.connect(self.onSliderChange)
def onRadioButtonChecked(self):
radio_btn = self.sender()
if radio_btn.isChecked():
self.algo = radio_btn.text()
if self.algo == 'YOLACT':
self.label1.show()
self.slider.show()
self.label2.hide()
self.max_iter_slider.hide()
elif self.algo == 'MaskRCNN':
# self.label1.hide()
# self.slider.hide()
self.label2.show()
self.max_iter_slider.show()
def onMaxIterSliderChange(self):
self.max_iterations = self.max_iter_slider.value()
self.label2.setText(
f"You selected to {str(self.max_iterations)} iterations")
```
#### File: annolid/postprocessing/visualizer.py
```python
import torch
import decord
from pathlib import Path
from torch.utils.tensorboard import SummaryWriter
from annolid.features import Embedding
# Temp fix of the no attribute 'get_filesytem' error
#import tensorflow as tf
#import tensorboard as tb
#tf.io.gfile = tb.compat.tensorflow_stub.io.gfile
def tensorboard_writer(logdir=None):
if logdir is None:
here = Path(__file__).parent.resolve()
logdir = here.parent / 'runs' / 'logs'
writer = SummaryWriter(log_dir=str(logdir))
return writer
def frame_embeddings(frame):
embed_vector = Embedding()(frame)
return embed_vector
def main(video_url=None):
decord.bridge.set_bridge('torch')
if torch.cuda.is_available():
ctx = decord.gpu(0)
else:
ctx = decord.cpu(0)
vr = decord.VideoReader(
video_url,
ctx=ctx
)
writer = tensorboard_writer()
frame_number = 0
for frame in vr:
frame_numpy = frame.numpy()
embed_vector = frame_embeddings([frame_numpy])
writer.add_histogram('Frame Embeddings', embed_vector)
writer.add_embedding(embed_vector,
metadata=[1],
label_img=frame.permute(2, 0, 1).unsqueeze(0),
global_step=frame_number
)
frame_number += 1
writer.close()
if __name__ == "__main__":
main()
```
#### File: annolid/utils/box.py
```python
import os
import json
from boxsdk import OAuth2, Client
def get_box_client(config_file='../config/box_config.json'):
"""authenticate with box.com return app client
to access and mananage box folder and files
Args:
config_file (str, optional): this file can be downloaded
from https://your_org_name.app.box.com/developers/console/app/xxxxxxx/configuration
Defaults to '../config/box_config.json'.
Please copy developer token from the above url and and a row `"developer_token":"<PASSWORD>",`
to the box_config.json file.
Returns:
Client: box client object
"""
with open(config_file, 'r') as cfg:
box_cfg = json.load(cfg)
client_id = box_cfg['boxAppSettings']['clientID']
client_secret = box_cfg['boxAppSettings']['clientSecret']
token = box_cfg['developer_token']
oauth = OAuth2(
client_id=client_id,
client_secret=client_secret,
access_token=token,
)
client = Client(oauth)
return client
def get_box_folder_items(client, folder_id='0'):
"""get box folder and items in side the folder with provide folder id
Args:
client (Client): box API client
folder_id (str, optional): folder
ID, e.g. from app.box.com/folder/FOLDER_ID
. Defaults to '0'.
Returns:
Folder, File: box folder and file objects
"""
box_folder = client.folder(folder_id=folder_id)
return box_folder, box_folder.get_items()
def upload_file(box_folder, local_file_path):
"""upload a local file to the box folder
Args:
box_folder (folder object)
local_file_path (str): local file absolute path e.g. /data/video.mp4
"""
box_folder.upload(local_file_path)
def download_file(box_file, local_file_path):
"""Download a file in box to local disk
Args:
box_file (File): box file object
local_file_path (str): local file path e.g. /data/video.mp4
"""
with open(local_file_path, 'wb') as lf:
box_file.download_to(lf)
def is_results_complete(box_folder,
result_file_pattern='_motion.csv',
num_expected_results=0
):
"""Check if a box folder contains all the expected result files
Args:
box_folder (BoxFolder): box folder object
result_file_pattern (str, optional): pattern in the file. Defaults to '_motion.csv'.
Returns:
bool: True if the folder contails the expected number of result files else False
"""
num_of_results = 0
for bf in box_folder.get_items():
if result_file_pattern in bf.name:
num_of_results += 1
return num_of_results == num_expected_results
def upload_results(box_folder,
tracking_results,
csv_pattern='motion.csv'):
"""upload the annolid output results to a box folder
Args:
box_folder (BoxFolder object): box folder contains the results files
tracking_results (dict): a dict contains the folder name : local file path
csv_pattern (str, optional): results csv file pattern. Defaults to 'motion.csv'.
Returns:
_type_: _description_
"""
has_results = False
for bf in box_folder.get_items():
if csv_pattern in bf.name:
has_results = True
break
if not has_results:
local_file = tracking_results[box_folder.name]
if os.path.exists(local_file):
upload_file(box_folder, local_file)
print('Upload file: ', local_file)
return True
```
|
{
"source": "jeremyforest/Hopfield_model",
"score": 2
}
|
#### File: jeremyforest/Hopfield_model/hopfield_model.py
```python
import numpy as np
import matplotlib.pyplot as plt
#################################
##### Simulation parameters #####
#################################
DT = 1.
TSIM = 30.
epoch = int(TSIM/DT)
N = 16
pattern = [[-1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, 1, -1]]
pattern_height = 4
pattern_width = 4
degraded_input = [[1, 1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, 1, 1, 1]]
#########################
######### Code ##########
#########################
class HopfieldNetwork():
def __init__(self, N):
self.N = N
assert N == len(pattern[0])
assert len(pattern[0]) == len(degraded_input[0])
def W(self):
#weight matrix
return np.zeros([self.N, self.N])
def X(self):
# neurons state random initialization
return [np.random.choice([-1,1]) for n in range(self.N)]
def energy(self, X, W):
return -0.5*np.dot(np.dot(np.transpose(X), W), X)
def update_state_async(W, X, theta=0):
# update state asynchronously
neurons_order = np.random.permutation(N)
i = np.random.choice(neurons_order) #choose random neuron
## would need to rewrite this to have no redraw from the sampling until every number has been drawn once (maybe this is why it does not update every epoch ?
X[i] = np.dot(W[i], X) - theta
if X[i] > 0:
X[i] = 1
else:
X[i] = -1
return X
def learn_pattern(pattern):
for pattern_nb in range(len(pattern)):
for i in range(N):
for j in range(N):
if i == j:
W[i,j] = 0
else:
W[i,j] = pattern[pattern_nb][i] * pattern[pattern_nb][j] ### to modify according to multiple pattern when storing multiple pattern (add 1/nb_pattern and the sum term)
W[i,j] = W[i,j]/len(pattern)
W[j,i] = W[i,j]
return W
def input_representation(X, epoch, show=False):
fig, ax = plt.subplots(1, len(pattern), squeeze=False)
for i in range(len(pattern)):
X_prime = np.reshape(X, (4, 4))
ax[i,0].matshow(X_prime, cmap='gray')
ax[i,0].set_xticks([])
ax[i,0].set_yticks([])
if show:
plt.show()
else:
plt.savefig('neuron activation at time' + str(epoch))
def energy_representation(energy, show=False):
plt.figure()
plt.plot(range(epoch), energy)
# plt.axis([0, epoch, 0, np.amax(energy)])
if show:
plt.show()
else:
plt.savefig('energy.png')
plt.close()
###############################
### Instantiate the network ###
###############################
net = HopfieldNetwork(N)
W = net.W()
X = net.X()
energy = []
########################
### Run computations ###
########################
learn_pattern(pattern)
# print('learned input') ; input_representation(pattern)
# print('degraded input') ; input_representation(degraded_input[0])
e=0
X = degraded_input[0]
while e < epoch:
X_new = update_state_async(W, X, theta=0)
energy.append(net.energy(X_new, W))
# import pdb; pdb.set_trace()
input_representation(X_new, e, show=False)
e+=1
X = X_new
energy_representation(energy, show=False)
```
|
{
"source": "jeremyforest/nmc-videos",
"score": 3
}
|
#### File: jeremyforest/nmc-videos/get_yt_links_from_airtable.py
```python
import dotenv
import os
from pyairtable import Table
import pandas as pd
def load_airtable(key, base_id, table_name):
at = Table(key, base_id, table_name)
return at
def get_info(airtable_tab):
yt_links, emails = [], []
for record in range(len(airtable_tab.all())):
talk = airtable_tab.all()[record]
yt_link = talk.get('fields').get('youtube_url')
email = talk.get('fields').get('email')
yt_links.append(yt_link)
emails.append(email)
return yt_links, emails
def save_to_df(yt_links, emails):
df = pd.DataFrame({'emails': emails,
'youtube_url': yt_links})
# people can submit multiple times and with different videos - keep the
# last one only
df.drop_duplicates(keep='last', inplace=True)
df.to_csv('videos/data.csv', index=False)
def update_df(yt_links, emails):
file_path = 'videos/data.csv'
df = pd.read_csv(file_path)
df_new = pd.DataFrame({'emails': emails,
'youtube_url': yt_links})
df_new.drop_duplicates(keep='last', inplace=True)
df = pd.merge(df, df_new, how='right')
df.to_csv('videos/data.csv', index=False)
if __name__ == '__main__':
dotenv.load_dotenv()
AT_BASE_ID = os.getenv('AT_BASE_ID')
AT_API_KEY = os.getenv('AT_API_KEY')
TABLE_NAME = 'uploads'
upload_tab = load_airtable(AT_API_KEY, AT_BASE_ID, TABLE_NAME)
yt_links, emails = get_info(upload_tab)
if os.path.exists('videos/data.csv'):
update_df(yt_links, emails)
else:
save_to_df(yt_links, emails)
```
|
{
"source": "jeremyforest/whole_optic_analysis_pipeline",
"score": 3
}
|
#### File: whole_optic_analysis_pipeline/OPTIMAS/classical_ephy.py
```python
import numpy as np
import matplotlib.pyplot as plt
def import_ephy_data(main_folder_path, experiment):
data = np.load(f'{main_folder_path}experiment_{experiment}/voltage.npy')
data.shape
new_data = np.zeros((8,5000))
new_data[0].shape
for channel in range(data.shape[1]):
for recording in range(data.shape[0]):
if recording == 0:
df = data[recording][channel]
else:
df = np.hstack((data[recording][channel], df))
new_data[channel] = df
return new_data
def plot_ephy_data(data):
x = list(range(data.shape[1]))
fig, axs = plt.subplots(2,4, sharex=True, sharey=True)
axs = axs.ravel()
for channel in range(data.shape[0]):
# plt.plot(x, new_data[channel])
axs[channel].plot(x, data[channel])
axs[channel].set_title(f'channel {channel+1}')
def load_json_timing_data(path_output, experiment):
"""
This function allows to load the json file where the timings of the laser, dlp, camera
and ephys are stored.
Input: path of the json file and the experiment folder number as input.
Output: timings, each in their own variable
"""
with open(f'{path_output}experiment_{experiment}_timings.json') as file:
timings_data = dict(json.load(file))
timings_dlp_on = timings_data['dlp']['on']
timings_dlp_off = timings_data['dlp']['off']
timings_laser_on = timings_data['laser']['on']
timings_laser_off = timings_data['laser']['off']
timings_ephy_on = timings_data['ephy']['on']
timings_ephy_off = timings_data['ephy']['off']
timings_ephy_stim_on = timings_data['ephy_stim']['on']
timings_ephy_stim_off = timings_data['ephy_stim']['off']
timings_camera_images = [] ## timings of the images as per the dcam api
for images_timings in timings_data['camera']:
for image_timing in images_timings:
timings_camera_images.append(image_timing)
timings_camera_images_bis = timings_data['camera_bis'] ## timing of the first frame as per manual clock
print(f'timing difference between first image metadata and manual log is \
{(timings_camera_images[0] - timings_camera_images_bis[0]) * 1000}') ## ms difference
return timings_dlp_on, timings_dlp_off, timings_camera_images, timings_laser_on, timings_laser_off, timings_camera_images_bis
if __name__ == "__main__":
main_folder_path = '/home/jeremy/Documents/Postdoc/Projects/Memory/Computational_Principles_of_Memory/optopatch/data/2020_05_25/'
experiment = 4
data = import_ephy_data(main_folder_path=main_folder_path, experiment=experiment)
plot_ephy_data(data = data)
```
#### File: whole_optic_analysis_pipeline/OPTIMAS/time_serie_pixel_analysis.py
```python
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import cv2
import os
import pickle
import json
from roipoly.roipoly import RoiPoly, MultiRoi
import argparse
import datetime
import time
from threading import Timer, Thread
from OPTIMAS.utils.files_handling import images_list, read_fps, \
load_timing_data, read_image_size
def input_path():
user_input = input('input neurons png file or ROI mask file:')
user_input_ok = True
return user_input
def time_serie(input_data_folder, experiment, data_type='raw',
timing=True, draw_laser_timings=True, draw_dlp_timings=True,
time_start=0, time_stop=int(-1)):
data_export = [] # placeholder for saving data at the end
### PATHS ###
if data_type == 'raw':
path_input_images = f"{input_data_folder}/{experiment}/images/"
elif data_type == 'denoised':
path_input_images = f"{input_data_folder}/experiment_{experiment}/denoised_images/"
path_output = f"{input_data_folder}/{experiment}/"
# if roi comes from manual countours
roi_file_path = f'{path_output}/roi_masks.txt'
images = images_list(path_input_images)
#############
### ROIS ####
#############
## TODO: use the image from the dlp to do the ROIs ? what if not all rois
## on it ? Use a global Roi file ? How to define it ?
if os.path.isfile(roi_file_path):
print('ROI file exists')
with open(roi_file_path, "rb") as file:
rois = pickle.load(file)
else:
print('ROI file doesnt exists')
w,h = read_image_size(f'{input_data_folder}/{experiment}/{experiment}_info.json')
# TODO: ref image for defining rois, need to think about what it can be. The best would be a DIC image ?
# use an automatic segmentation algorithm if possible with a DIC image ?
neurons_png = f'{input_data_folder}/{experiment}/neurons.png'
if os.path.exists(neurons_png):
print('neurons file for delimiting ROI exists')
image = cv2.imread(f'{input_data_folder}/{experiment}/neurons.png',
cv2.IMREAD_GRAYSCALE)
# from scipy.ndimage import convolve
# image_downsampled = convolve(image,
# np.array([[0.25,0.25],[0.25,0.25]]))[:image.shape[0]:2,:image.shape[1]:2]
# image = image_downsampled
#####################################################
##### TO CHANGE IN UPDATED PIPELINE VERSION #########
# else:
# print('no neuron image file')
# pass
# # print('''need user input for ROI file path: it needs to be an image
# from the image folder where you can see the neurons''')
# user_input = [None]
# global user_input_ok
# user_input_ok = False
# thread = Thread(target=input_path, daemon=False)
# thread.start()
# time.sleep(15)
# if user_input_ok:
# thread.join()
# print(user_input)
# else:
# thread._stop()
# if ROI_path.endswith('.txt'):
# with open(ROI_path, "rb") as file:
# rois = pickle.load(file)
# elif ROI_path.endswith('.png'):
# image = cv2.imread(ROI_path, cv2.IMREAD_GRAYSCALE)
# cv2.imwrite(f'{input_data_folder}/{experiment}/neurons.png', image)
# if image.size == 0:
# print('error with neuron image, cannot define ROIs')
# else:
# image = cv2.resize(image, (w,h))
######################################################################
# Show the image
fig = plt.figure()
plt.imshow(image, interpolation='none', cmap='gray')
plt.title("Click on the button to add a new ROI")
# Draw multiple ROI
multiroi_named = MultiRoi(roi_names=['Background', 'ROI 1', 'ROI 2', 'ROI 3', 'ROI 4', 'ROI 5',
'ROI 6', 'ROI 7', 'ROI 8', 'ROI 9', 'ROI 10', 'ROI 11',
'ROI 12', 'ROI 13', 'ROI 14', 'ROI 15', 'ROI 16', 'ROI 17'])
# Draw all ROIs
plt.imshow(image, interpolation='none', cmap='gray')
rois = []
for name, roi in multiroi_named.rois.items():
roi.display_roi()
# roi.display_mean(image)
mask = roi.get_mask(image)
rois.append([name, mask])
plt.legend()
plt.savefig(f'{path_output}/rois.png')
plt.close()
## writing rois to disk
with open(roi_file_path, "wb") as file:
pickle.dump(rois, file)
rois_signal = []
## not the most optimized, would be better to log every roi in each image than load every image multiple times
for roi in rois:
tmp_time_serie_roi = []
for image in tqdm(images):
img = cv2.imread(f'{path_input_images}/{image}',cv2.IMREAD_GRAYSCALE)
mask = roi[1]
#####################################################
##### TO CHANGE IN UPDATED PIPELINE VERSION #########
# roi_average = np.mean(img[mask.T])
roi_average = np.mean(img[mask])
###################################################################
tmp_time_serie_roi.append(roi_average)
rois_signal.append(tmp_time_serie_roi)
print ('generating data plots')
### TIMING DATA ###
json_timings_file = f'{input_data_folder}/{experiment}/{experiment}_timings.json'
json_info_file = f'{input_data_folder}/{experiment}/{experiment}_info.json'
if timing:
timings_dlp_on, \
timings_dlp_off, \
timings_camera_images, \
timings_laser_on, \
timings_laser_off, \
timings_camera_images_bis = load_timing_data(json_timings_file)
# timings perf_counter equivalent to unix timestamp
# timings_camera_images_bis.append(660)
# TODO: handle multiple dlp on/off within each experiment
if len(timings_dlp_on)>1:
print('more than 1 timing from DLP ON')
timings_dlp_on = [timings_dlp_on[0]]
## for diagnostic plot for the times of the camera dcam api metadata
# plt.plot(np.array(timings_camera_images), range(0,len(timings_camera_images)))
## use the timings metadata of the dcap api ## for now broken, replaced with manual incrementation
#timings_camera_images_new = timings_camera_images[time_init : time_end] ## back to this when solved problem of metadata from the dcam api
timings_camera_images_new = []
timings_camera_images_new.append(timings_camera_images[0])
for nb_of_times in range(1,len(timings_camera_images)):
fps = read_fps(json_info_file) ## to get for each expe
timings_camera_images_new.append(timings_camera_images[0] + (1/fps * nb_of_times))
## diagnostic
# plt.plot(np.array(timings_camera_images_new), range(0,len(timings_camera_images_new)))
timings_camera_images = timings_camera_images_new
print(f'number of camera images: {len(timings_camera_images)}')
print(f'number of points in the each roi signal: {len(rois_signal[0])}')
assert len(images) == len(timings_camera_images), 'not the same number of images and images timing metadata' ## will not work when working on subset of the data
## to put both dlp, laser and camera timings in the same format
## putting dlp and laser time refs back into camera ref
#timings_camera_images = [timings_camera_images[i]*10**9 for i in range(len(timings_camera_images))] ##for dcam api meta
_timings_dlp_on = timings_camera_images[0] + (timings_camera_images[0] - timings_camera_images_bis[0]) + (timings_dlp_on[0] - timings_camera_images_bis[1])/1000
_timings_dlp_off = timings_camera_images[0] + (timings_camera_images[0] - timings_camera_images_bis[0]) + (timings_dlp_off[0] - timings_camera_images_bis[1])/1000
##########################################################################
#################### TO UPDATE #################### ####################
_timings_laser_on = timings_camera_images[0] + (timings_camera_images[0] - timings_camera_images_bis[0]) + (timings_laser_on[0] - timings_camera_images_bis[1])/1000
# _timings_laser_on = 0
_timings_laser_off = timings_camera_images[0] + (timings_camera_images[0] - timings_camera_images_bis[0]) + (timings_laser_off[0] - timings_camera_images_bis[1])/1000
# _timings_laser_off = 0
################################################################################
################################################################################
timings_dlp_on = []
timings_dlp_off = []
timings_laser_on = []
timings_laser_off = []
timings_dlp_on.append(_timings_dlp_on)
timings_dlp_off.append(_timings_dlp_off)
timings_laser_on.append(_timings_laser_on)
timings_laser_off.append(_timings_laser_off)
### if different length between timings and images
# cropped_rois_signal = []
# for roi_signal in rois_signal:
# cropped_rois_signal.append(roi_signal[0:len(timings_camera_images)])
# len(cropped_rois_signal[0])
# rois_signal = cropped_rois_signal
time_sorted_rois_signal = []
x_axis_sorted_values = []
for i in range(len(rois_signal)):
data = np.vstack((timings_camera_images, rois_signal[i]))
data.shape
time_sorted_rois_signal.append(data[1][data[0,:].argsort()])
x_axis_sorted_values = np.array(data[0][data[0,:].argsort()])
x_axis = np.array([(x_axis_sorted_values[frame] - x_axis_sorted_values[0]) for frame in range(len(x_axis_sorted_values))])
## diagnostic plot: time between 2 images
times_between_two_images = []
for frame in range(len(x_axis)-1):
times_between_two_images.append((x_axis[frame+1] - x_axis[frame]))
times_between_two_images.append(times_between_two_images[-1])
nb_images = np.arange(0,len(data[1]), 1)
#plt.plot(nb_images, np.array(times_between_two_images))
rois_signal = time_sorted_rois_signal
## for baseline calculation:
if timing:
# find laser_on index on x_axis
takeClosest = lambda num,collection:min(collection,key=lambda x:abs(x-num))
closest_index_laser_on_on_x = takeClosest(timings_laser_on[0]/10**9, x_axis)
index_laser_on_for_baseline_calc = np.where(x_axis == closest_index_laser_on_on_x)
# find dlp_on index on x_axis
closest_index_dlp_on_on_x = takeClosest(timings_dlp_on[0]/10**9, x_axis)
index_dlp_on_for_baseline_calc = np.where(x_axis == closest_index_dlp_on_on_x)
## baseline starting and ending
## need to be timed on the frames after laser activation I think
baseline_starting_frame = index_laser_on_for_baseline_calc[0][0] + 2
#TODO: need to be adjusted to be up to the frame-1 of dlp activation ?
baseline_frame_number = 10
else :
baseline_starting_frame = 1000
baseline_frame_number = 10
### GRAPHS ###
# calculation of F(t)
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w']
# if timings is False no x_axis has been defined
if timing == False:
x_axis = np.arange(0,len(images), 1)
for i in range(len(rois_signal)):
plt.plot(x_axis, np.array(rois_signal[i]),
color = colors[i], label = rois[i][0], alpha=0.7)
if timing:
for i in range(len(timings_dlp_on)):
if draw_dlp_timings:
plt.axvspan(timings_dlp_on[i] - x_axis_sorted_values[0],
timings_dlp_off[i] - x_axis_sorted_values[0],
color='blue', alpha=0.05)
if draw_laser_timings:
plt.axvspan(timings_laser_on[i] - x_axis_sorted_values[0],
timings_laser_off[i] - x_axis_sorted_values[0],
color='red', alpha=0.05)
plt.legend()
plt.title('Pixel value evolution with frames')
plt.ylabel('Value')
if timing == False:
plt.savefig(f'{path_output}pixel_time_serie_whole_data.svg')
#plt.savefig(path_output+'pixel_time_serie_whole_data.png')
elif timing == True:
plt.savefig(f'{path_output}pixel_time_serie_whole_data_{time_start}_{time_stop}.svg')
#plt.savefig(f'{path_output}pixel_time_serie_whole_data_{args.time[0]}_{args.time[1]}.png')
plt.close()
## calculation of F(t) - background(t)
for i in np.arange(1, len(rois_signal), 1):
plt.plot(x_axis, np.array(rois_signal[0])-np.array(rois_signal[i]), color = colors[i], label = rois[i][0], alpha=0.7)
if timing:
for i in range(len(timings_dlp_on)):
if draw_dlp_timings:
plt.axvspan(timings_dlp_on[i] - x_axis_sorted_values[0],timings_dlp_off[i] - x_axis_sorted_values[0], color='blue', alpha=0.05)
if draw_laser_timings:
plt.axvspan(timings_laser_on[i] - x_axis_sorted_values[0],timings_laser_off[i] - x_axis_sorted_values[0], color='red', alpha=0.05)
plt.title('Fluorescence with substracted backg fluorescence (per frame)')
plt.ylabel('Value')
plt.legend()
if timing == False:
plt.savefig(f'{path_output}pixel_time_serie_with_backg_substraction_whole_data.svg')
#plt.savefig(f'{path_output}pixel_time_serie_with_backg_substraction_whole_data.png')
elif timing == True:
plt.savefig(f'{path_output}pixel_time_serie_with_backg_substraction_{time_start}_{time_stop}.svg')
#plt.savefig(f'{path_output}pixel_time_serie_with_backg_substraction_{args.time[0]}_{args.time[1]}.png')
plt.close()
## calculation of percent delta F/F0
times = []
baseline_background = np.mean(np.array(rois_signal[0][baseline_starting_frame:baseline_starting_frame+baseline_frame_number])) ## temporal average
if baseline_background == 0.0:
baseline_background = 1.0
dF_over_F0_background = ((np.array(rois_signal[0]) - baseline_background) / baseline_background)
percent_dF_over_F0_background = dF_over_F0_background*100
# plt.plot(x_axis, percent_dF_over_F0_background, color= 'b', label = rois[0][0], alpha=0.7)
if timing:
for i in range(len(timings_dlp_on)):
if draw_dlp_timings:
plt.axvspan(timings_dlp_on[i] - x_axis_sorted_values[0],timings_dlp_off[i] - x_axis_sorted_values[0], color='blue', alpha=0.05)
if draw_laser_timings:
plt.axvspan(timings_laser_on[i] - x_axis_sorted_values[0],timings_laser_off[i] - x_axis_sorted_values[0], color='red', alpha=0.05)
for i in np.arange(1, len(rois_signal), 1):
_times = []
baseline_soma = np.mean(np.array(rois_signal[i][baseline_starting_frame:baseline_starting_frame + baseline_frame_number]))
if baseline_soma == 0.0:
baseline_soma = 1.0
dF_over_F0_soma = ((np.array(rois_signal[i]) - baseline_soma) / baseline_soma) - dF_over_F0_background
percent_dF_over_F0_soma = dF_over_F0_soma * 100
# plt.ylim([-5,35])
plt.plot(x_axis, percent_dF_over_F0_soma, color = colors[i], label = rois[i][0], alpha=0.7)
data_export.append(percent_dF_over_F0_soma.tolist())
if timing:
dlp_on_value_on_x = 0
dlp_off_value_on_x = 0
laser_off_value_on_x = 0
laser_on_value_on_x = 0
for i in range(len(timings_dlp_on)):
if draw_dlp_timings:
dlp_on_value_on_x = timings_dlp_on[i] - x_axis_sorted_values[0]
dlp_off_value_on_x = timings_dlp_off[i] - x_axis_sorted_values[0]
plt.axvspan(dlp_on_value_on_x, dlp_off_value_on_x, color='blue', alpha=0.05)
if draw_laser_timings:
laser_on_value_on_x = timings_laser_on[i] - x_axis_sorted_values[0]
laser_off_value_on_x = timings_laser_off[i] - x_axis_sorted_values[0]
plt.axvspan(laser_on_value_on_x, laser_off_value_on_x, color='red', alpha=0.05)
_times = dlp_on_value_on_x, dlp_off_value_on_x , laser_on_value_on_x, laser_off_value_on_x
times.append(_times)
plt.ylabel(r'$\%$ $\Delta$ F/F0')
plt.legend()
if timing == False:
plt.savefig(f'{path_output}delta_F_over_F0_whole_data.svg')
#plt.savefig(f'{path_output}delta_F_over_F0__whole_data.png')
elif timing == True:
plt.savefig(f'{path_output}delta_F_over_F0_{time_start}_{time_stop}.svg')
#plt.savefig(f'{path_output}delta_F_over_F0_{args.time[0]}_{args.time[1]}.png')
# saving data
data_export.append(x_axis.tolist())
data_export.append(times)
data_export = np.array(data_export)
## data has format [[ROI1], [ROI2] ..., [ROIn], [X_axis], [[timings_dlp_on(ROI1), timings_dlp_off(ROI1), timings_laser_on(ROI1), timings_laser_off(ROI1)],[...]]
np.save(f'{path_output}dF_over_F0_backcorrect.npy', data_export)
from scipy.io import savemat
np.save(f'{path_output}dF_over_F0_backcorrect_ROIs_only.npy', data_export[0])
matlab_dict = {'ROI': data_export[0], 'frames': data_export[1]}
savemat(f'{path_output}dF_over_F0_backcorrect_ROIs_only.mat', matlab_dict)
plt.close()
## ephys-type graph for percent delta F/F0
# from matplotlib_scalebar.scalebar import ScaleBar
fig = plt.figure(frameon=False)
fig, axs = plt.subplots(len(rois_signal), 1)
fig.subplots_adjust(hspace=0)
baseline_roi_background = np.mean(np.array(rois_signal[0][baseline_starting_frame:baseline_starting_frame + baseline_frame_number]))
if baseline_roi_background == 0.0:
baseline_roi_background = 1.0
# axs[0].set_ylim([-5,150])
axs[0].plot(x_axis, percent_dF_over_F0_background, color = 'b', label = rois[0][0], alpha=0.7)
axs[0].set_axis_off()
if timing:
for j in range(len(timings_dlp_on)):
if draw_dlp_timings:
axs[0].axvspan(timings_dlp_on[j] - x_axis_sorted_values[0],timings_dlp_off[j] - x_axis_sorted_values[0], color='blue', alpha=0.05)
if draw_laser_timings:
axs[0].axvspan(timings_laser_on[j] - x_axis_sorted_values[0],timings_laser_off[j] - x_axis_sorted_values[0], color='red', alpha=0.05)
# equivalent_10ms = 0.1
# scalebar = ScaleBar(0.1, 'ms', frameon=False, location='lower left', length_fraction = equivalent_10ms)
# plt.gca().add_artist(scalebar)
# axs[0].annotate('', xy=(0, 0), xycoords='axes fraction', xytext=(0, .2), textcoords='axes fraction',
# ha='center', va='center', arrowprops=dict(arrowstyle="-", color='black'))
# axs[0].annotate('', xy=(0, 0), xycoords='axes fraction', xytext=(longueur_5ms, 0), textcoords='axes fraction',
# ha='center', va='center', arrowprops=dict(arrowstyle="-", color='black'))
for i in np.arange(1, len(rois_signal), 1):
dF_over_F0_roi = ((np.array(rois_signal[i]) - baseline_roi_background) / baseline_roi_background) - dF_over_F0_background
percent_dF_over_F0_roi = dF_over_F0_roi * 100
axs[i].set_ylim([-5,150])
axs[i].plot(x_axis, percent_dF_over_F0_roi, color = colors[i], label = rois[i][0], alpha=0.7)
axs[i].set_axis_off()
if timing:
for j in range(len(timings_dlp_on)):
if draw_dlp_timings:
axs[i].axvspan(timings_dlp_on[j] - x_axis_sorted_values[0],timings_dlp_off[j] - x_axis_sorted_values[0], color='blue', alpha=0.05)
if draw_laser_timings:
axs[i].axvspan(timings_laser_on[j] - x_axis_sorted_values[0],timings_laser_off[j] - x_axis_sorted_values[0], color='red', alpha=0.05)
if timing == False:
plt.savefig(f'{path_output}delta_F_over_F0_ephys_style_whole_data.svg')
#plt.savefig(f'{path_output}delta_F_over_F0_ephys_style_whole_data.png')
elif timing == True:
plt.savefig(f'{path_output}delta_F_over_F0_ephys_style_{time_start}_{time_stop}.svg')
#plt.savefig(f'{path_output}delta_F_over_F0_ephys_style_{args.time[0]}_{args.time[1]}.png')
plt.close()
if __name__ == "__main__":
# experiment = 'experiment_132'
# input_data_folder = f'/mnt/home_nas/jeremy/Recherches/Postdoc/Projects/Memory/Computational_Principles_of_Memory/optopatch/data/2020_03_02'
experiment = 'experiment_41'
input_data_folder = f'/home/jeremy/Desktop/2020_11_23'
# experiment = 'experiment_95'
# input_data_folder = f'/media/jeremy/Seagate Portable Drive/data/2020_11_05'
time_serie(input_data_folder, experiment, data_type='raw',
timing=True, draw_laser_timings=True, draw_dlp_timings=True,
time_start=0, time_stop=int(-1))
# time_serie(input_data_folder, experiment, data_type='raw',
# timing=False, draw_laser_timings=False, draw_dlp_timings=False,
# time_start=0, time_stop=int(-1))
```
#### File: whole_optic_analysis_pipeline/tests/test_merge.py
```python
import os
import numpy as np
from OPTIMAS.merge_npy import merge_npy
from OPTIMAS.utils.files_handling import images_list, read_image_size
def test_merge_npy():
input_data_folder = 'tests/test_data/2020_09_03'
experiment = 'experiment_1'
output_path = f'{input_data_folder}/{experiment}'
merge_npy(input_data_folder, output_path, experiment)
merged_npy_path = f"{output_path}/raw_data.npy"
merged_npy = np.load(merged_npy_path)
json_file_path = f"{input_data_folder}/{experiment}/{experiment}_info.json"
image_x, image_y = read_image_size(json_file_path)
os.remove(merged_npy_path)
assert merged_npy.shape[0] == len(os.listdir(
f"{input_data_folder}/{experiment}/raw_data"))
assert merged_npy.shape[1] == image_x
assert merged_npy.shape[2] == image_y
```
|
{
"source": "jeremyforest/whole_optic_gui",
"score": 3
}
|
#### File: controller/controller/device.py
```python
from numpy import array
import time
__all__ = ['Device']
class Device(object):
def __init__(self):
pass
def position(self, axis):
'''
Current position along an axis.
Parameters
----------
axis : axis number
Returns
-------
The current position of the device axis in um.
'''
return 0. # fake
def absolute_move(self, x, axis):
'''
Moves the device axis to position x.
Parameters
----------
axis: axis number
x : target position in um.
'''
pass
def relative_move(self, x, axis):
'''
Moves the device axis by relative amount x in um.
Parameters
----------
axis: axis number
x : position shift in um.
'''
self.absolute_move(self.position(axis)+x, axis)
def position_group(self, axes):
'''
Current position along a group of axes.
Parameters
----------
axes : list of axis numbers
Returns
-------
The current position of the device axis in um (vector).
'''
return array([[self.position(axis) for axis in axes]])
def absolute_move_group(self, x, axes):
'''
Moves the device group of axes to position x.
Parameters
----------
axes : list of axis numbers
x : target position in um (vector or list).
'''
for xi,axis in zip(x,axes):
self.absolute_move(xi, axis)
def relative_move_group(self, x, axes):
'''
Moves the device group of axes by relative amount x in um.
Parameters
----------
axes : list of axis numbers
x : position shift in um (vector or list).
'''
self.absolute_move_group(array(self.position_group(axes))+array(x), axes)
def stop(self, axis):
"""
Stops current movements.
"""
pass
def wait_until_still(self, axis = None):
"""
Waits until motors have stopped.
Parameters
----------
axes : list of axis numbers
"""
previous_position = self.position(axis)
new_position = None
while array(previous_position != new_position).any():
previous_position = new_position
new_position = self.position(axis)
time.sleep(0.1) # 100 us
```
#### File: OPTIMAQS/utils/custom_sleep_function.py
```python
from PyQt5.QtWidgets import QApplication
import time
def custom_sleep_function(ms):
c = time.perf_counter()
while (time.perf_counter() - c)*1000 < ms:
QApplication.processEvents() #### this should probably be put in its own thread to avoid forcing gui update
return (time.perf_counter() - c)*1000
```
#### File: view/electrophysiology/electrophysiology_gui.py
```python
from PyQt5 import uic
from PyQt5.QtCore import Qt, QRunnable, pyqtSlot, pyqtSignal, QThreadPool, QObject, QTimer, QEventLoop
from PyQt5.QtWidgets import QApplication, QMainWindow, QPushButton, QVBoxLayout, QWidget, QSlider, QFileDialog, \
QMessageBox, QProgressBar, QGraphicsScene, QInputDialog, QDialog
from PyQt5.QtGui import QImage, QPixmap, QPen, QPainter
from PyQt5.QtTest import QTest
import pyqtgraph as pg
## general imports
import sys
import PyDAQmx
import numpy as np
import time
## Custom imports
from OPTIMAQS.utils.json_functions import jsonFunctions
from OPTIMAQS.utils.signals import Signals
class ElectrophysiologyGui(QWidget):
def __init__(self):
super(ElectrophysiologyGui, self).__init__()
uic.loadUi('OPTIMAQS/view/electrophysiology/electrophysiology.ui', self)
self.show()
self.import_electrophysiology_model()
self.initialize_electrophysiology_parameters()
self.actions()
## paths and timings
self.path = jsonFunctions.open_json('OPTIMAQS/config_files/last_experiment.json')
self.timings_logfile_path = self.path + '/experiment_' + self.path[-1] + '_timings.json'
self.timings_logfile_dict = {}
self.timings_logfile_dict['ephy'] = {}
self.timings_logfile_dict['ephy']['on'] = []
self.timings_logfile_dict['ephy']['off'] = []
self.timings_logfile_dict['ephy_stim'] = {}
self.timings_logfile_dict['ephy_stim']['on'] = []
self.timings_logfile_dict['ephy_stim']['off'] = []
## ephys data
self.ephy_data = []
self.end_expe = False
self.channel_number = [2,6,10,14,16,18,20,22]
self.sampling_rate = 1000
## timings
self.perf_counter_init = jsonFunctions.open_json('OPTIMAQS/config_files/perf_counter_init.json')
def import_electrophysiology_model(self):
"""
import laser model-type script
"""
from OPTIMAQS.model.electrophysiology.electrophysiology import StimVoltage, ReadingVoltage
def initialize_electrophysiology_parameters(self):
"""
Initialize all the laser variables
"""
## custom signals
self.ephy_signal = Signals()
self.ephy_signal.start.connect(self.start_recording)
self.ephy_signal.finished.connect(self.stop_recording)
self.recording = False
self.ephy_graph = False
def actions(self):
"""
Define actions for buttons and items.
"""
self.record_trace_button.clicked.connect(self.init_voltage)
self.display_trace_button.clicked.connect(self.graph_voltage)
self.stimulation_button.clicked.connect(self.ephy_stim)
self.close_graph_button.clicked.connect(self.close_graph_windows)
def init_voltage(self):
## reading voltage
self.analog_input = PyDAQmx.Task()
self.read = PyDAQmx.int32()
self.data_ephy = np.zeros((len(self.channel_number), self.sampling_rate), dtype=np.float64)
for channel in self.channel_number:
self.analog_input.CreateAIVoltageChan(f'Dev1/ai{channel}',
"",
PyDAQmx.DAQmx_Val_Cfg_Default,
-10.0,
10.0,
PyDAQmx.DAQmx_Val_Volts,
None)
self.analog_input.CfgSampClkTiming("",
self.sampling_rate, ## sampling rate
PyDAQmx.DAQmx_Val_Rising, ## active edge
PyDAQmx.DAQmx_Val_FiniteSamps, ## sample mode
1000) ## nb of sample to acquire
self.analog_input.StartTask()
## stimulating
self.analog_output = PyDAQmx.Task()
self.analog_output.CreateAOVoltageChan("Dev1/ao0",
"",
-10.0,
10.0,
PyDAQmx.DAQmx_Val_Volts,
None)
self.analog_output.CfgSampClkTiming("",
self.sampling_rate, ## sampling rate
PyDAQmx.DAQmx_Val_Rising, ## active edge
PyDAQmx.DAQmx_Val_ContSamps, ## sample mode
1000) ## nb of sample to acquire
# self.analog_output.StartTask()
# self.pulse = np.zeros(1, dtype=np.uint8)
# self.write_digital_lines = PyDAQmx.Task()
# self.write_digital_lines.CreateDOChan("/Dev1/port0/line3","",PyDAQmx.DAQmx_Val_ChanForAllLines)
# self.write_digital_lines.StartTask()
def start_recording(self):
self.analog_input.ReadAnalogF64(self.sampling_rate, ## number of sample per channel
10.0, ## timeout in s
PyDAQmx.DAQmx_Val_GroupByChannel, ## fillMode (interleave data acqcuisition or not?)
self.data_ephy, #The array to store read data into
self.data_ephy.shape[0]*self.data_ephy.shape[1], ## length of the data array
PyDAQmx.byref(self.read),None) ## total number of data points read per channel
print(f"Acquired {self.read.value} points")
self.analog_input.StopTask()
print(self.data_ephy.shape)
self.timings_logfile_dict['ephy']['on'].append((time.perf_counter() - self.perf_counter_init)*1000)
return self.data_ephy
def graph_voltage(self):
self.x = range(self.sampling_rate)
self.start_recording()
# colors = ['g', 'r', 'c', 'm', 'y', 'k', 'w']
# self.voltage_plot = pg.plot(self.x, self.data_ephy[0], pen='b')
# if self.data_ephy.shape[0] > 1:
# for channel, color in zip(range(self.data_ephy.shape[0]-1), colors):
# self.voltage_plot.plot(self.x, self.data_ephy[channel]+1, pen = color)
self.plot = pg.GraphicsWindow(title="Electrophysiology")
self.voltage_plot = self.plot.addPlot(title='Voltage')
self.curve0 = self.voltage_plot.plot(self.x, self.data_ephy[0], pen='b')
# self.curve1 = self.voltage_plot.plot(self.x, self.data_ephy[1], pen='g')
# self.curve2 = self.voltage_plot.plot(self.x, self.data_ephy[2], pen='r')
# self.curve3 = self.voltage_plot.plot(self.x, self.data_ephy[3], pen='c')
#
# self.curve4 = self.voltage_plot.plot(self.x, self.data_ephy[4], pen='m')
# self.curve5 = self.voltage_plot.plot(self.x, self.data_ephy[5], pen='y')
# self.curve6 = self.voltage_plot.plot(self.x, self.data_ephy[6], pen='k')
# self.curve7 = self.voltage_plot.plot(self.x, self.data_ephy[7], pen='w')
self.voltage_plot.addLegend()
self.voltage_plot.showGrid(x=True, y=True)
self.plot.setBackground('w')
pg.setConfigOptions(antialias=True)
self.ephy_graph = True
self.timer_ephy = QTimer()
self.timer_ephy.timeout.connect(self.update_plot_ephy)
self.timer_ephy.start(1000)
def update_plot_ephy(self):
if self.ephy_graph==False:
self.timer_ephy.stop()
self.start_recording()
# self.voltage_plot.enableAutoRange('xy', False)
# colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w']
# debug_trace()
# for channel, color in zip(range(self.data_ephy.shape[0]), colors):
# for channel in range(self.data_ephy.shape[0]):
# self.voltage_plot.setData(1000, self.data_ephy[channel])
# self.voltage_plot.plot(self.x, self.data_ephy[channel], clear=True, pen=color)
self.curve0.setData(self.x, self.data_ephy[0])
# self.curve1.setData(self.x, self.data_ephy[1])
# self.curve2.setData(self.x, self.data_ephy[2])
# self.curve3.setData(self.x, self.data_ephy[3])
#
# self.curve4.setData(self.x, self.data_ephy[4])
# self.curve5.setData(self.x, self.data_ephy[5])
# self.curve6.setData(self.x, self.data_ephy[6])
# self.curve7.setData(self.x, self.data_ephy[7])
def close_graph_windows(self):
print('closing ephy plot and stopping recording')
self.ephy_graph = False
self.timer_ephy.stop()
self.stop_recording()
self.plot.close()
def stop_recording(self):
print('ephy end signal received')
self.recording = False
self.analog_input.StopTask()
self.timings_logfile_dict['ephy']['off'].append((time.perf_counter() - self.perf_counter_init)*1000)
def save_ephy_data(self, data):
# np.save(file = f"{self.path}/voltage.npy", arr = data)
np.savetxt(f"{self.path}/voltage.txt", self.data_ephy)
def ephy_recording_thread(self):
if self.record_electrophysiological_trace_radioButton.isChecked():
while self.recording:
data = self.start_recording()
self.graph_voltage()
self.ephy_data.append(data)
self.save_ephy_data(self.ephy_data)
def start_stimulation(self):
print('stimulation start')
# set_voltage_value = 5.
self.stim_data = np.array([5]*1000)
self.stim_data[1:100] = 0
self.stim_data[900:1000] = 0
# self.stim_data = np.array([5])
n=1000
sampsPerChanWritten=PyDAQmx.int32()
# self.analog_output.WriteAnalogScalarF64(1, 10.0, set_voltage_value, None)
# self.analog_output.WriteAnalogF64(1000, 0, 10.0, PyDAQmx.DAQmx_Val_GroupByChannel, self.stim_data, PyDAQmx.byref(sampsPerChanWritten), None)
self.analog_output.WriteAnalogF64(n, 0, 10.0, PyDAQmx.DAQmx_Val_GroupByChannel, self.stim_data, PyDAQmx.byref(sampsPerChanWritten), None)
self.analog_output.StartTask()
self.timings_logfile_dict['ephy_stim']['on'].append((time.perf_counter() - self.perf_counter_init)*1000)
# self.pulse[0]=1
# self.write_digital_lines.WriteDigitalLines(1, 1, 5.0, PyDAQmx.DAQmx_Val_GroupByChannel, self.pulse, None, None)
def end_stimulation(self):
self.analog_output.StopTask()
self.timings_logfile_dict['ephy_stim']['off'].append((time.perf_counter() - self.perf_counter_init)*1000)
print('stimulation end')
# self.pulse[0]=0
# self.write_digital_lines.WriteDigitalLines(1, 1, 5.0, PyDAQmx.DAQmx_Val_GroupByChannel, self.pulse , None, None)
def ephy_stim(self):
self.start_stimulation()
custom_sleep_function(2500)
self.end_stimulation()
if __name__ == "__main__":
app = QApplication(sys.argv)
win = ElectrophysiologyGui()
win.showMaximized()
app.exit(app.exec_())
```
|
{
"source": "jeremy-frank/advent-of-code",
"score": 4
}
|
#### File: 2020/day16/day16ab.py
```python
def load_ticket():
ticket = []
#datafile = 'input-day16-ticket-example'
datafile = 'input-day16-ticket'
with open(datafile, 'r') as input:
for line in input:
bits = line.strip().split(",")
for x in bits:
ticket.append(int(x))
return ticket
def load_rules():
# part1 - single list of all possible values
rule_full_range = []
# part2 - dictionary holding the individual rules
rules = {}
#datafile = 'input-day16-rules-example'
datafile = 'input-day16-rules'
with open(datafile, 'r') as input:
for line in input:
line = line.strip().replace(":", ",").replace(" or", ",")
items = line.split(",")
rule_range = []
for numrun in [items[1], items[2]]:
nums = numrun.split("-")
for x in range(int(nums[0]), int(nums[1]) + 1):
rule_range.append(x)
rule_full_range.append(x)
rules[items[0]] = rule_range
rule_full_range.sort()
return rules, set(rule_full_range)
def load_nearby_tickets():
nearby_tickets = []
#datafile = 'input-day16-nearby-tickets-example'
datafile = 'input-day16-nearby-tickets'
with open(datafile, 'r') as input:
for line in input:
bits = line.strip().split(",")
nearby_tickets.append([int(x) for x in bits])
return nearby_tickets
def part1(rule_range, nearby_tickets):
invalid_values = []
for ticket in nearby_tickets:
for val in ticket:
if val not in rule_range:
invalid_values.append(val)
return sum(invalid_values)
def validate_tickets(rule_range, nearby_tickets):
valid_tickets = []
for ticket in nearby_tickets:
valid = True
for val in ticket:
if val not in rule_range:
valid = False
if valid:
valid_tickets.append(ticket)
#else:
# print(f"Invalid ticket: {ticket}")
return valid_tickets
def process_tickets(rules, tickets, my_ticket):
# for each position, find all rules that could match it
pos_matches = {}
for pos in range(len(tickets[0])):
pos_matches[pos] = []
for rule in rules:
rule_range = rules[rule]
rule_match = True
for ticket in tickets:
if ticket[pos] not in rule_range:
rule_match = False
break
if rule_match:
print(f"{pos} {rule}")
pos_matches[pos].append(rule)
print(f"\n\npos_matches: {pos_matches}\n\n")
# narrow it down - figure out which position maps to what rule
pos_solution = {}
solved_rule = []
while len(pos_solution) < len(rules):
new_pos_matches = {}
for pos in pos_matches:
if len(pos_matches[pos]) == 1:
# found a solution! (add to pos_solution and not to new_pos_matches)
pos_solution[pos] = pos_matches[pos][0]
solved_rule.append(pos_matches[pos][0])
print(f"updated pos_solution: {pos_solution}")
elif len(pos_matches[pos]) == 0:
# shouldn't ever happen
print("ERROR")
else:
# no solution yet, so add anything that isn't yet solved to new_pos_matches
new_pos_matches[pos] = []
for item in pos_matches[pos]:
if item not in solved_rule:
new_pos_matches[pos].append(item)
pos_matches = new_pos_matches
# print out the full position:rule mapping
print("\n")
for x in range(len(pos_solution)):
print(f"{x}: {pos_solution[x]}")
# calculate the solution
print("\n")
answer = 1
for pos in pos_solution:
if "departure" in pos_solution[pos]:
print(f"{pos} - {pos_solution[pos]}, ticket value {my_ticket[pos]}")
answer *= my_ticket[pos]
return answer
if __name__ == '__main__':
my_ticket = load_ticket()
print(f"my_ticket: {my_ticket} \n")
rules, rule_range = load_rules()
print(f"rules: {rules} \n")
print(f"rule_range: {rule_range} \n")
nearby_tickets = load_nearby_tickets()
print(f"nearby_tickets: {nearby_tickets} \n")
results1 = part1(rule_range, nearby_tickets)
valid_tickets = validate_tickets(rule_range, nearby_tickets)
results2 = process_tickets(rules, valid_tickets, my_ticket)
print(f"\nPart 1 - {results1}")
print(f"Part 2 - {results2}\n")
```
#### File: 2020/day21/day21ab.py
```python
def load_data():
"""
Spent a long time coming up with this probably bad data structure:
food {
1: {
ingredients: [],
allergens: [],
}
}
"""
all_ingredients = []
all_allergens = []
food = {}
food_id = 0
#datafile = 'input-day21-example'
datafile = 'input-day21'
with open(datafile, 'r') as input:
for line in input:
food_id += 1
bits = line.strip().replace(")", "").split(" (contains ")
ingredients = bits[0].split()
allergens = bits[1].split(", ")
food[food_id] = {}
food[food_id]["ing"] = ingredients
food[food_id]["all"] = allergens
for ing in ingredients:
all_ingredients.append(ing)
for allergen in allergens:
all_allergens.append(allergen)
all_allergens.sort()
all_ingredients.sort()
uniq_allergens = set(all_allergens)
uniq_ingredients = set(all_ingredients)
return food, uniq_allergens, uniq_ingredients, all_ingredients
def part1(food, uniq_allergens, uniq_ingredients, all_ingredients):
# generate a list of suspects for each allergen
suspects = {}
bad_ingredients = []
for allergen in uniq_allergens:
suspects[allergen] = []
for ingredient in uniq_ingredients:
if check_food(food, ingredient, allergen):
suspects[allergen].append(ingredient)
bad_ingredients.append(ingredient)
print(f"suspects: {suspects}")
#bad_ingredients = ['bsqh', 'cxk', 'cpbzbx', 'kqprv', 'lmzg', 'drbm', 'cfnt', 'bdvmx']
good_ingredient_count = 0
for ingredient in all_ingredients:
if ingredient not in bad_ingredients:
good_ingredient_count += 1
return good_ingredient_count
def check_food(food, ingredient, allergen):
"""Need to find at least one allergen <-> ingredient match, and zero failed matches"""
found_at_least_one_match = False
for food_id in food:
if allergen in food[food_id]["all"]:
if ingredient not in food[food_id]["ing"]:
return False
else:
found_at_least_one_match = True
return found_at_least_one_match
if __name__ == '__main__':
food, uniq_allergens, uniq_ingredients, all_ingredients = load_data()
print(f"food: {food} \n")
results1 = part1(food, uniq_allergens, uniq_ingredients, all_ingredients)
print(f"\nPart 1 - {results1}")
```
#### File: 2020/day2/day2a.py
```python
def compute():
validpasswords = 0
datafile = 'input-day2'
with open(datafile, 'r') as input:
for line in input:
cleanline = line.strip()
minmax, letter, password = cleanline.split()
min, max = minmax.split('-')
letter = letter[0]
if evaluate_password(min, max, letter, password):
validpasswords += 1
print(f"found a valid password: {cleanline}")
return validpasswords
def evaluate_password(min, max, letter, password):
lettercount = 0
for char in password:
if char == letter:
lettercount += 1
if lettercount >= int(min) and lettercount <= int(max):
return True
return False
if __name__ == '__main__':
results = compute()
print(results)
```
#### File: 2020/day8/day8b.py
```python
import copy
def load_data():
code = []
datafile = 'input-day8'
with open(datafile, 'r') as input:
for line in input:
parts = line.strip().split()
code.append([parts[0], int(parts[1])])
return code
def process_data(code):
for num in range(len(code)):
line = code[num]
print(f"investigating {num} - {line}")
if line[0] == "acc":
pass
elif line[0] in ["jmp", "nop"]:
tempcode = copy.deepcopy(code)
if line[0] == "jmp":
tempcode[num] = ["nop", line[1]]
elif line[0] == "nop":
tempcode[num] = ["jmp", line[1]]
print(f" - testing line {num} - changed jmp to {tempcode[num]}")
accumulator, successful_test = run_code(tempcode)
if successful_test:
print(f"Success! changed line {num} - {line}")
return accumulator
def run_code(code):
accumulator = 0
location = 0
visited_locations = []
while location < len(code):
# failure! (infinite loop)
if location in visited_locations:
return accumulator, False
line = code[location]
visited_locations.append(location)
if line[0] == "acc":
accumulator += line[1]
location += 1
elif line[0] == "jmp":
location += line[1]
elif line[0] == "nop":
location += 1
# success!
return accumulator, True
if __name__ == '__main__':
data = load_data()
print(data)
results = process_data(data)
print(results)
```
#### File: 2021/day1/day1.py
```python
def load_data():
data = []
datafile = 'input-day1'
with open(datafile, 'r') as input:
for line in input:
num = line.strip()
data.append(int(num))
return data
def part1(depths):
"""
Count the number of times a depth measurement increases from the previous measurement
How many measurements are larger than the previous measurement?
"""
depth_increases = 0
previous_depth = depths[0]
for depth in depths:
if depth > previous_depth:
depth_increases += 1
previous_depth = depth
return depth_increases
def part2(depths):
"""
Use the sum of 3-value windows to determine if the depth has increased or not
If there are not at least 3 values left, stop
"""
depth_increases = 0
previous_depth_sum = depths[0] + depths[1] + depths[2]
for i in range(len(depths)):
if i+2 >= len(depths):
return depth_increases
current_depth_sum = depths[i] + depths[i+1] + depths[i+2]
if current_depth_sum > previous_depth_sum:
depth_increases += 1
previous_depth_sum = current_depth_sum
if __name__ == '__main__':
data = load_data()
print(f"{data}\n")
results1 = part1(data)
print(f"Part 1 - {results1}")
results2 = part2(data)
print(f"Part 2 - {results2}\n")
```
#### File: 2021/day3/day3.py
```python
from copy import deepcopy
def load_data():
#datafile = 'input-day3-example'
datafile = 'input-day3'
data = []
with open(datafile, 'r') as input:
for line in input:
data.append(line.strip())
return data
def part1(report):
"""
Use the binary numbers in the diagnostic report to generate two new binary numbers (called the gamma rate and the epsilon rate)
The power consumption can then be found by multiplying the gamma rate by the epsilon rate.
gamma rate: Find the most common bit in the corresponding position of all numbers in the diagnostic report
epsilon rate: Find the least common bit in each position
"""
gamma = ""
epsilon = ""
for pos in range(len(report[0])):
# count the zeros and ones in the current position
zero_count = 0
one_count = 0
for line in report:
if line[pos] == "0":
zero_count += 1
else:
one_count += 1
# evaluate the counts and determine what next value should be
if zero_count > one_count:
gamma += "0"
epsilon += "1"
elif one_count > zero_count:
gamma += "1"
epsilon += "0"
else:
print("ERROR: Same amount of zeros and ones!")
# convert binary to decimal
gamma_dec = int(gamma, 2)
epsilon_dec = int(epsilon, 2)
return gamma_dec * epsilon_dec
def part2(report):
"""
Verify the life support rating, which can be determined by multiplying the oxygen generator rating by the CO2 scrubber rating
Start with the full list of binary numbers from your diagnostic report and consider just the first bit of those numbers
Keep only numbers selected by the bit criteria (see below)
If you only have one number left, stop; this is the rating value for which you are searching.
Otherwise, repeat the process, considering the next bit to the right
Bit Criteria:
Oxygen generator rating: determine the most common value (0 or 1) in the current bit position, and keep only numbers with that bit in that position.
If 0 and 1 are equally common, keep values with a 1 in the position being considered
CO2 scrubber rating: determine the least common value (0 or 1) in the current bit position, and keep only numbers with that bit in that position.
If 0 and 1 are equally common, keep values with a 0 in the position being considered
"""
oxygen_rating = find_rating("oxygen", report)
co2_rating = find_rating("co2", report)
return oxygen_rating * co2_rating
def find_rating(rating_type, original_report):
report = deepcopy(original_report)
for pos in range(len(report[0])):
# count the zeros and ones in the current position
zero_count = 0
one_count = 0
for line in report:
if line[pos] == "0":
zero_count += 1
else:
one_count += 1
# evaluate the counts and determine what the bad (unwanted) value is
if (rating_type == "oxygen" and zero_count > one_count) or (rating_type == "co2" and zero_count <= one_count):
bad_val = "1"
else:
bad_val = "0"
# remove unwanted items in the report
for line in deepcopy(report):
if line[pos] == bad_val:
report.remove(line)
# if there's only one item left, we've found it
if len(report) == 1:
# convert binary to decimal
return int(report[0], 2)
if __name__ == '__main__':
data = load_data()
print(f"Data: {data}\n")
print(f"Part 1: {part1(data)}")
print(f"Part 2: {part2(data)}\n")
```
#### File: 2021/day5/day5.py
```python
from copy import deepcopy
from pprint import pprint
def load_data():
#datafile = 'input-day5-example'
datafile = 'input-day5'
data = []
with open(datafile, 'r') as input:
for line in input:
str_points = line.strip().replace(" -> ",",").split(",")
points = [int(point) for point in str_points]
data.append(points)
return data
def part1_and_2(lines, draw_diagonal=False):
"""
Part1: Consider only horizontal and vertical lines.
Part2: Consider horizontal, vertical, *and* diagonal lines.
All diagonal lines will be exactly 45 degrees
At how many points do at least two lines overlap?
"""
# create the empty graph
graph = dict()
for y in range(0,1000):
graph[y] = [0 for x in range(1000)]
# draw lines:
for line in lines:
x1, y1, x2, y2 = line[0], line[1], line[2], line[3]
# vertical line:
if x1 == x2:
for i in range(min(y1, y2), max(y1, y2)+1):
graph[i][x1] += 1
# horizontal line:
elif y1 == y2:
for i in range(min(x1, x2), max(x1, x2)+1):
graph[y1][i] += 1
# everything else must be a diagonal line:
elif draw_diagonal:
if x1 > x2:
# ensure x increases from x1 to x2
x1, y1, x2, y2 = line[2], line[3], line[0], line[1]
while x1 <= x2:
graph[y1][x1] += 1
x1 += 1
if y1 < y2: # downhill slope
y1 += 1
else: # uphill slope
y1 -= 1
# count the number of crossing lines
crossing_lines = 0
for y in graph:
for spot in graph[y]:
if spot > 1:
crossing_lines += 1
return crossing_lines
def alternate_solution(lines, draw_diagonal=False):
"""
Inspired by a few solutions I saw - instead of a graph, just use a dict with coordinates as keys
Also, splice in the crossed line counting to avoid a final sweep through the dict at the end
This solution should be faster, but harder to troubleshoot, as you cannot just print out the graph
"""
from collections import defaultdict
graph = defaultdict(int)
crossing_lines = 0
# add coordinates to the "graph":
for line in lines:
x1, y1, x2, y2 = line[0], line[1], line[2], line[3]
# vertical line:
if x1 == x2:
for i in range(min(y1, y2), max(y1, y2)+1):
graph[(i, x1)] += 1
if graph[(i, x1)] == 2:
crossing_lines += 1
# horizontal line:
elif y1 == y2:
for i in range(min(x1, x2), max(x1, x2)+1):
graph[(y1, i)] += 1
if graph[(y1, i)] == 2:
crossing_lines += 1
# everything else must be a diagonal line:
elif draw_diagonal:
if x1 > x2:
# ensure x increases from x1 to x2
x1, y1, x2, y2 = line[2], line[3], line[0], line[1]
while x1 <= x2:
graph[(y1, x1)] += 1
if graph[(y1, x1)] == 2:
crossing_lines += 1
x1 += 1
if y1 < y2:
y1 += 1
else:
y1 -= 1
return crossing_lines
if __name__ == '__main__':
data = load_data()
print(f"Data: {data}\n")
print(f"Part 1: {part1_and_2(data)}")
print(f"Part 2: {part1_and_2(data, draw_diagonal=True)}\n")
print(f"Alernate Solution: {alternate_solution(data, draw_diagonal=True)}\n")
```
#### File: 2021/day6/day6.py
```python
from copy import deepcopy
from pprint import pprint
def load_data():
#datafile = 'input-day6-example'
datafile = 'input-day6'
with open(datafile, 'r') as input:
for line in input:
str_data = line.strip().split(",")
data = [int(x) for x in str_data]
return data
def part1(fish):
"""
Each lanternfish creates a new lanternfish once every 7 days
New lanternfish need an extra 2 days for their first cycle
7-day timer is 0-6
How many lanternfish would there be after 80 days?
"""
for day in range(80):
for i in range(len(fish)):
if fish[i] > 0:
fish[i] -= 1
else:
fish[i] = 6
fish.append(8)
return len(fish)
def part2(fish_list):
"""
How many lanternfish would there be after 256 days?
"""
fish = { i:0 for i in range(9) }
for timer in fish_list:
fish[timer] += 1
for day in range(256):
new_fish = { i:0 for i in range(9) }
for timer in fish:
if timer > 0:
new_fish[timer-1] += fish[timer]
else:
new_fish[6] += fish[timer]
new_fish[8] += fish[timer]
fish = new_fish
# pprint(fish)
return sum(fish.values())
if __name__ == '__main__':
data = load_data()
print(f"Data: {data}\n")
print(f"Part 1: {part1(deepcopy(data))}")
print(f"Part 2: {part2(deepcopy(data))}\n")
```
#### File: 2021/day8/day8.py
```python
from copy import deepcopy
def load_data():
#datafile = 'input-day8-example'
datafile = 'input-day8'
data = []
with open(datafile, 'r') as input:
for line in input:
patterns_and_values = line.strip().split(" | ")
signal_patterns = patterns_and_values[0].split()
output_values = patterns_and_values[1].split()
data.append([signal_patterns, output_values])
return data
def part1(data):
"""
Each digit of a seven-segment display is rendered by turning on or off any of seven segments named a through g
There are ten unique signal patterns:
0: 1: 2: 3: 4:
aaaa .... aaaa aaaa ....
b c . c . c . c b c
b c . c . c . c b c
.... .... dddd dddd dddd
e f . f e . . f . f
e f . f e . . f . f
gggg .... gggg gggg ....
5: 6: 7: 8: 9:
aaaa aaaa aaaa aaaa aaaa
b . b . . c b c b c
b . b . . c b c b c
dddd dddd .... dddd dddd
. f e f . f e f . f
. f e f . f e f . f
gggg gggg .... gggg gggg
Each entry consists of ten unique signal patterns, a | delimiter, and finally the four digit output value
Within an entry, the same wire/segment connections are used (but you don't know what the connections actually are)
In the output values, how many times do digits 1, 4, 7, or 8 appear?
"""
count_1478 = 0
for item in data:
for output_value in item[1]:
if len(output_value) in [2, 3, 4, 7]:
count_1478 += 1
return count_1478
def part2(data):
"""
Solve for all 10 digits (0 - 9), then use the map of signals to numbers to figure out the output values
What do you get if you add up all of the output values?
Hmm I should try itertools.permutations
"""
total_output_value = 0
for item in data:
signal_map = map_signal_patterns(item[0])
total_output_value += compute_output_value(item[1], signal_map)
return total_output_value
def map_signal_patterns(signal_patterns):
number_map = {}
# First, solve for 1/4/7/8
for pattern in signal_patterns:
p = "".join(sorted(pattern))
if len(p) == 2:
number_map["1"] = p
elif len(p) == 4:
number_map["4"] = p
elif len(p) == 3:
number_map["7"] = p
elif len(p) == 7:
number_map["8"] = p
# Now that we know 1 and 4, we can use that knowledge to solve for the remaining numbers
for pattern in signal_patterns:
p = "".join(sorted(pattern))
# length 6 can be a 0, 6, or 9
if len(p) == 6:
# only 6 is not a superset of 1
if number_map["1"][0] not in p or number_map["1"][1] not in p:
number_map["6"] = p
# only 9 is a superset of 4
else:
is_nine = True
for char in number_map["4"]:
if char not in p:
is_nine = False
if is_nine:
number_map["9"] = p
else:
number_map["0"] = p
# length 5 can be a 2, 3, or 5
elif len(p) == 5:
# only 3 is a superset of 1
if number_map["1"][0] in p and number_map["1"][1] in p:
number_map["3"] = p
else:
four_overlap_count = 0
for char in number_map["4"]:
if char in p:
four_overlap_count += 1
# 4 shares exactly three characters with a 5
if four_overlap_count == 3:
number_map["5"] = p
# 4 shares exactly two characters with a 5
elif four_overlap_count == 2:
number_map["2"] = p
else:
print("ERROR in 2/3/5 logic")
# flip the keys and values in the number map to create the signal map
signal_map = { number_map[x]: x for x in number_map}
return signal_map
def compute_output_value(output_values, signal_map):
final_value = ""
for value in output_values:
sorted_value = "".join(sorted(value))
final_value += signal_map[sorted_value]
return int(final_value)
if __name__ == '__main__':
data = load_data()
print(f"Data1: {data}\n")
print(f"Part 1: {part1(deepcopy(data))}\n")
print(f"Part 2: {part2(deepcopy(data))}\n")
```
|
{
"source": "jeremygatineau/Emergent-Communication-in-MARL",
"score": 2
}
|
#### File: PredictionGame/agents/agent_AriaAC_ER.py
```python
import torch
import torchvision
import torch.nn as nn
import numpy as np
from torch.distributions import Categorical
import torch.nn.utils as utils
from torch.autograd import Variable
class AriaAC_ER(nn.Module):
def __init__(self,opt_params, with_memory=False, aidi=None):
super(AriaAC_ER, self).__init__()
self.aidi = aidi
self.batch_size = opt_params["batch_size"]
self.gamma = opt_params["gamma"]
self.vocabulary_size = opt_params["vocab_size"]
self.epsilon = opt_params["eps"]
self.replay_size = opt_params["replay_size"]
self.training_loops = opt_params["training_loops"]
self.eps = np.finfo(np.float32).eps.item()
self.hiddenDim = 8
self.memory_size = 8
self.with_memory = with_memory
self.obs_Mod = lin_Mod([2, self.hiddenDim//2])
self.msg_Enc = lin_Mod([4, self.hiddenDim//2, self.hiddenDim//2], sftmx = False)
self.rep_Mod = lin_Mod([self.hiddenDim, self.hiddenDim])
if self.with_memory:
self.memory = nn.LSTMCell(self.hiddenDim, self.memory_size)
self.memories = [torch.zeros([1, 2*self.memory_size], dtype=torch.float32) for _ in range(self.replay_size+1)]
self.action_Mod = nn.Sequential(lin_Mod([self.memory_size, 1], sftmx = False), nn.Sigmoid())
self.msg_Dec = lin_Mod([self.memory_size, self.memory_size, self.vocabulary_size], sftmx=True)
self.value_head = lin_Mod([self.memory_size, self.memory_size, 1])
self.hidden_statesTable = np.zeros(self.replay_size)
else :
self.memory = None
self.memories = None
self.action_Mod = lin_Mod([self.hiddenDim, 1], sftmx = True)
self.value_head = lin_Mod([self.hiddenDim, 1])
self.optimizer = torch.optim.Adam(self.parameters(), lr=opt_params["lr"])
self.saved_act_Logp = []
self.saved_values = []
self.saved_entropies = []
self.saved_msg_Logp = []
self.saved_hid_states = []
self.saved_rewards = []
self.saved_obs = []
self.saved_downlink_msgs = []
self.batch_counter = 0
def forward(self, obs, msg, memory):
o = self.obs_Mod(obs)
m = self.msg_Enc(msg)
z = self.rep_Mod(torch.cat([o, m], -1))
if self.with_memory:
hz, cz = self.memory(z, (memory[:, :self.memory_size], memory[:, self.memory_size:]))
out_memory = torch.cat([hz, cz], dim=1)
else:
hz = z
out_memory = None
action = self.action_Mod(hz)
message = self.msg_Dec(hz)
value = self.value_head(hz)
return action, message, out_memory, value
def select_action(self, obs, msg):
obs_t = torch.tensor([obs], dtype=torch.float32)
msg_t = torch.tensor([msg], dtype=torch.float32)
if self.with_memory:
action, message, hid_state, value = self.forward(obs_t.float(), msg_t.float(), self.memories[self.hidden_statesTable[-1]])
self.memories[self.replay_counter+1] = hid_state
else:
action, message, _, value = self.forward(obs_t.float(), msg_t.float(), None)
a_distrib = Categorical(torch.cat([action, 1-action], -1))
m_distrib = Categorical(message)
a = a_distrib.sample()
m = m_distrib.sample()
a_entropy = a_distrib.entropy()
m_entropy = m_distrib.entropy()
self.pushBufferSelect(a_distrib.log_prob(a), m_distrib.log_prob(m), value, a_entropy + m_entropy)
return a, m
def train_on_batch(self, state, reward):
#self.popBuffer()
self.pushBufferEpisode(state[0], state[1], reward)
self.batch_counter += 1
if self.batch_counter >= self.batch_size:
self.optimizer.zero_grad()
returns = self.getReturns(normalize=True)
returns = torch.tensor(returns)
rewards = torch.tensor(self.saved_rewards)
policy_loss = 0
value_loss = 0
for i in range(self.batch_size-1):
advantage = rewards[i]+self.saved_values[i+1].item()-self.saved_values[i].item()
policy_loss += -(self.saved_act_Logp[i] + self.saved_msg_Logp[i])*advantage.detach()
value_loss += advantage.pow(2)
"""for a_logp, m_logp, ret, val, entro in zip(self.saved_act_Logp, self.saved_msg_Logp, returns, self.saved_values, self.saved_entropies):
advantage = ret - val.item()
policy_loss += -(a_logp + m_logp)*advantage.detach()
value_loss += advantage.pow(2)"""
policy_loss /= self.batch_size
value_loss /= self.batch_size
entropy_loss = -self.epsilon*torch.cat(self.saved_entropies).sum()
loss = policy_loss+ value_loss + entropy_loss
loss.backward(retain_graph=True)
self.optimizer.step()
mean_policy = torch.cat(self.saved_act_Logp, 0).exp().mean(dim=0)
rewards = np.copy(self.saved_rewards)
self.reset_Buffer()
return np.round([policy_loss.item(), value_loss.item(), entropy_loss.item()], 2), rewards, mean_policy
return None, None, None
def getReturns(self, normalize=False):
_R = 0
Gs = []
for r in self.saved_rewards[:self.batch_size]:
_R = r + self.gamma * _R
Gs.insert(0, _R)
if normalize==True:
return (Gs-np.mean(Gs))/np.std(Gs)
return Gs
def popBuffer(self):
self.saved_act_Logp[:-1] = self.saved_act_Logp[1:]
self.saved_values[:-1] = self.saved_values[1:]
self.saved_entropies[:-1] = self.saved_entropies[1:]
self.saved_msg_Logp[:-1] = self.saved_msg_Logp[1:]
self.saved_hid_states[:-1] = self.saved_hid_states[1:]
self.saved_rewards[:-1] = self.saved_rewards[1:]
self.saved_obs[:-1] = self.saved_obs[1:]
self.saved_downlink_msgs[:-1] = self.saved_downlink_msgs[1:]
self.memories[self.hidden_statesTable[0]] = torch.zeros([1, 2*self.memory_size], dtype=torch.float32)
self.memories[self.hidden_statesTable[1]] = self.hidden_statesTable[1].detach()
self.hidden_statesTable[:-1] = self.hidden_statesTable[1:]
def sampleBatch(self):
indices = np.random.randint(0, self.replay_size, self.batch_size)
return indices
def pushBufferEpisode(self, obs, msg, reward):
self.saved_obs.append(obs)
self.saved_downlink_msgs.append(msg)
self.saved_rewards.append(reward)
def pushBufferSelect(self, a_lp, m_lp, val, ent):
self.saved_act_Logp.append(a_lp)
self.saved_msg_Logp.append(m_lp)
self.saved_values.append(val)
self.saved_entropies.append(ent)
def reset_Buffer(self):
self.memories[0] = self.memories[-1].detach()
self.memories[1:] = [torch.zeros([1, 2*self.memory_size], dtype=torch.float32) for _ in range(self.batch_size)]
self.saved_act_Logp = []
self.saved_values = []
self.saved_entropies = []
self.saved_msg_Logp = []
self.saved_hid_states = []
self.saved_rewards = []
self.saved_obs = []
self.saved_downlink_msgs = []
self.batch_counter = 0
class lin_Mod(nn.Module):
def __init__(self, sizes, sftmx = False):
super(lin_Mod, self).__init__()
self.sizes = sizes
L = []
for i, s in enumerate(sizes[:-1]):
L.append(nn.Linear(sizes[i], sizes[i+1]))
if i==len(sizes)-2 and sftmx==True:
L.append(nn.Softmax(dim=-1))
else :
L.append(nn.ReLU())
self.mod = nn.ModuleList(L)
def forward(self, x):
x_ = x
for m in self.mod:
x_ = m(x_)
return x_
class ariaActor(nn.Module):
def __init__(self, hidden_dim=10):
super(ariaActor, self).__init__()
self.hiddenDim = hidden_dim
self.obs_Mod = lin_Mod([2+self.hiddenDim, 5])
self.action_Mod = lin_Mod([self.hiddenDim, 2], sftmx = True)
self.msg_Enc = lin_Mod([4, 5], sftmx = False)
self.msg_Dec = lin_Mod([self.hiddenDim, 4], sftmx=True)
self.rep_Mod = lin_Mod([self.hiddenDim, self.hiddenDim])
def forward(self, obs, msg, last_state):
inO = torch.cat([obs, last_state[self.batch_counter]], -1)
o = self.obs_Mod(inO)
m = self.msg_Enc(msg)
new_state = self.rep_Mod(torch.cat([o, m], -1))
action = self.action_Mod(new_state)
message = self.msg_Dec(new_state)
return action, message, new_state
```
#### File: PredictionGame/agents/agent_AriaAC.py
```python
import torch
import torchvision
import torch.nn as nn
import numpy as np
from torch.distributions import Categorical
import torch.nn.utils as utils
from torch.autograd import Variable
class AriaAC:
def __init__(self,opt_params, with_memory=True, split= False, aidi=None):
self.aidi = aidi
self.batch_size = opt_params["batch_size"]
self.gamma = opt_params["gamma"]
self.vocabulary_size = opt_params["vocab_size"]
self.memory_size = opt_params["memory_size"]
self.hidden_dim = opt_params["hidden_size"]
self.gc = opt_params["grad_clamp"]
self.eps = np.finfo(np.float32).eps.item()
self.with_memory = with_memory
self.split = split
if split:
self.modT = ariaModelSplit(batch_size=self.batch_size, vocabulary_size=self.vocabulary_size, memory_size=self.memory_size, hidden_dim=self.hidden_dim, with_memory=self.with_memory).float().train()
self.modI = ariaModelSplit(batch_size=self.batch_size, vocabulary_size=self.vocabulary_size, memory_size=self.memory_size, hidden_dim=self.hidden_dim, with_memory=self.with_memory).float().eval()
else:
self.modT = ariaModel(batch_size=self.batch_size, vocabulary_size=self.vocabulary_size, memory_size=self.memory_size, hidden_dim=self.hidden_dim, with_memory=self.with_memory).float().train()
self.modI = ariaModel(batch_size=self.batch_size, vocabulary_size=self.vocabulary_size, memory_size=self.memory_size, hidden_dim=self.hidden_dim, with_memory=self.with_memory).float().eval()
if self.with_memory:
if split:
self.hid_states = [[torch.zeros(1, 2*self.memory_size).detach(), torch.zeros(1, 2*self.memory_size).detach()]]
else:
self.hid_states = [torch.zeros(1, 2*self.memory_size).detach()]
else:
self.hid_states = None
self.optimizer = torch.optim.Adam(self.modT.parameters(), lr=opt_params["lr"])
self.saved_a_lp = torch.zeros(self.batch_size)
self.saved_m_lp = torch.zeros(self.batch_size)
self.saved_rewards = torch.zeros(self.batch_size)
self.saved_values = torch.zeros(self.batch_size)
self.saved_entropies = torch.zeros(self.batch_size)
def select_action(self, obs, msg, hiden_state):
obs_t = torch.tensor([obs], dtype=torch.float32)
msg_t = torch.tensor([msg], dtype=torch.float32)
if self.with_memory:
action, message, hid_state = self.modIActor.forward(obs_t.float(), msg_t.float(), self.memoriesActor[-1])
#hid_state_critic, _ = self.modTCritic.forward(obs_t.float(), msg_t.float(), self.memoriesCritic[-1], action, message)
if len(self.saved_obs) <self.replay_size:
self.memoriesActor.append(hid_state.detach())
#self.memoriesCritic.append(hid_state_critic.detach())
else:
self.memoriesActor[-1] = hid_state.detach()
#self.memoriesCritic[-1] = hid_state_critic.detach()
else:
action, message, _, = self.modIActor.forward(obs_t.float(), msg_t.float(), None)
a_distrib = Categorical(torch.cat([action, 1-action], -1))
m_distrib = Categorical(message)
a = torch.argmax(a_distrib.probs, axis=1)
m = torch.argmax(m_distrib.probs, axis=1)
return a, m
def select_actionTraing(self, obs, msg, bt):
obs_t = torch.tensor([obs], dtype=torch.float32)
msg_t = torch.tensor([msg], dtype=torch.float32)
if self.with_memory:
action, message, hid_state, value = self.modT.forward(obs_t.float(), msg_t.float(), self.hid_states[-1])
else:
action, message, _, value = self.modT.forward(obs_t.float(), msg_t.float(), None)
a_distrib = Categorical(torch.cat([action, 1-action], -1))
m_distrib = Categorical(message)
a = a_distrib.sample()
m = m_distrib.sample()
a_entropy = a_distrib.entropy()
m_entropy = m_distrib.entropy()
self.saved_a_lp[bt] = a_distrib.log_prob(a)
self.saved_m_lp[bt] = m_distrib.log_prob(m)
self.saved_entropies[bt] = a_entropy+m_entropy
if self.with_memory:
self.hid_states.append(hid_state)
self.saved_values[bt] = value
return a, m, torch.cat([action, 1-action], -1), message
def train_online(self, rewards):
returns = torch.tensor(self.getReturns(rewards))
adv = rewards[:-1]-self.saved_values[:-1]+self.gamma*self.saved_values[1:] # TD error
policy_loss = -(self.saved_a_lp[:-1] + self.saved_m_lp[:-1])*adv.detach()
value_loss = nn.functional.smooth_l1_loss(rewards[:-1]+self.gamma*self.saved_values[1:], self.saved_values[:-1], reduction='none')# adv.pow(2)
#adv = returns-self.saved_values # TD error with returns, should be better cause no bias
#policy_loss = -(self.saved_a_lp + self.saved_m_lp)*adv.detach()
#value_loss = nn.functional.smooth_l1_loss(returns, self.saved_values, reduction='none')# adv.pow(2)
entropy_loss = -self.saved_entropies.mean()
loss = policy_loss.mean() + value_loss.mean() + self.eps*entropy_loss
loss.backward(retain_graph=True)
if self.gc is not None:
self.grad_clamp(self.modT.parameters(), self.gc)
self.optimizer.step()
# Reset buffers after training on batch
if self.with_memory:
if self.split:
self.hid_states = [[self.hid_states[-1][0].detach(), self.hid_states[-1][1].detach()]]
else:
self.hid_states = [self.hid_states[-1].detach()]
self.saved_a_lp = torch.zeros(self.batch_size, dtype=torch.float32)
self.saved_m_lp = torch.zeros(self.batch_size, dtype=torch.float32)
self.saved_rewards = torch.zeros(self.batch_size, dtype=torch.float32)
self.saved_values = torch.zeros(self.batch_size, dtype=torch.float32)
self.saved_entropies = torch.zeros(self.batch_size, dtype=torch.float32)
return policy_loss, value_loss, entropy_loss
def train_offline(self, trajectory):
obs_t = torch.tensor([trajectory.obs], dtype=torch.float32)
msg_t = torch.tensor([trajectory.msg], dtype=torch.float32)
def getReturns(self, rewards, normalize=False):
_R = 0
Gs = []
for r in rewards:
_R = r + self.gamma * _R
Gs.insert(0, _R)
if normalize==True:
return (Gs-np.mean(Gs))/(np.std(Gs)+self.eps)
return Gs
def grad_clamp(self, parameters, clip=0.1):
for p in parameters:
if p.grad is not None:
p.grad.clamp_(min=-clip)
p.grad.clamp_(max=clip)
class ariaModel(nn.Module):
def __init__(self, batch_size, vocabulary_size, hidden_dim=10, memory_size=8, with_memory=True):
super(ariaModel, self).__init__()
self.hiddenDim = hidden_dim
self.memory_size = memory_size
self.with_memory = with_memory
self.batch_size = batch_size
self.vocabulary_size = vocabulary_size
self.obs_Mod = nn.Sequential(nn.Linear(2, self.hiddenDim//2), nn.ReLU())
self.msg_Enc = nn.Sequential(nn.Linear(self.vocabulary_size, self.hiddenDim//2), nn.ReLU(), nn.Linear(self.hiddenDim//2, self.hiddenDim//2), nn.ReLU())
self.rep_Mod = nn.Sequential(nn.Linear(self.hiddenDim, self.hiddenDim), nn.ReLU())
if self.with_memory:
self.memory = nn.LSTMCell(self.hiddenDim, self.memory_size)
self.action_Mod = nn.Sequential(nn.Linear(self.memory_size, 1), nn.Sigmoid())
self.msg_Dec = nn.Sequential(nn.Linear(self.memory_size, self.memory_size), nn.Linear(self.memory_size, self.vocabulary_size), nn.Softmax(dim=-1))
self.value_head = nn.Sequential(nn.Linear(self.memory_size, self.memory_size), nn.Linear(self.memory_size, 1))
else :
self.memory = None
self.action_Mod = nn.Sequential(nn.Linear(self.hiddenDim, 1), nn.Sigmoid())
self.msg_Dec = nn.Sequential(nn.Linear(self.hiddenDim, self.hiddenDim), nn.Linear(self.hiddenDim, self.vocabulary_size), nn.Softmax(dim=-1))
self.value_head = nn.Sequential(nn.Linear(self.hiddenDim, self.hiddenDim), nn.Linear(self.hiddenDim, 1))
def forward(self, obs, msg, memory):
o = self.obs_Mod(obs)
m = self.msg_Enc(msg)
z = self.rep_Mod(torch.cat([o, m], -1))
if self.with_memory:
hz, cz = self.memory(z, (memory[:, :self.memory_size], memory[:, self.memory_size:]))
out_memory = torch.cat([hz, cz], dim=1)
else:
hz = z
out_memory = None
action = self.action_Mod(hz)
message = self.msg_Dec(hz)
value = self.value_head(hz)
return action, message, out_memory, value
class ariaModelSplit(nn.Module):
def __init__(self, batch_size, vocabulary_size, hidden_dim=10, memory_size=8, with_memory=True):
super(ariaModelSplit, self).__init__()
self.hiddenDim = hidden_dim
self.memory_size = memory_size
self.with_memory = with_memory
self.batch_size = batch_size
self.vocabulary_size = vocabulary_size
self.obs_Mod_A = nn.Sequential(nn.Linear(2, self.hiddenDim//2), nn.ReLU())
self.msg_Enc_A = nn.Sequential(nn.Linear(self.vocabulary_size, self.hiddenDim//2), nn.ReLU(), nn.Linear(self.hiddenDim//2, self.hiddenDim//2), nn.ReLU())
self.rep_Mod_A = nn.Sequential(nn.Linear(self.hiddenDim, self.hiddenDim), nn.ReLU())
self.obs_Mod_M = nn.Sequential(nn.Linear(2, self.hiddenDim//2), nn.ReLU())
self.msg_Enc_M = nn.Sequential(nn.Linear(self.vocabulary_size, self.hiddenDim//2), nn.ReLU(), nn.Linear(self.hiddenDim//2, self.hiddenDim//2), nn.ReLU())
self.rep_Mod_M = nn.Sequential(nn.Linear(self.hiddenDim, self.hiddenDim), nn.ReLU(), nn.Linear(self.hiddenDim, self.hiddenDim), nn.ReLU(), nn.Linear(self.hiddenDim, self.hiddenDim), nn.ReLU(), nn.Linear(self.hiddenDim, self.hiddenDim), nn.ReLU())
if self.with_memory:
self.memory_A = nn.LSTMCell(self.hiddenDim, self.memory_size)
self.memory_M = nn.LSTMCell(self.hiddenDim, self.memory_size)
self.action_Mod = nn.Sequential(nn.Linear(self.memory_size, 1), nn.Sigmoid())
self.msg_Dec = nn.Sequential(nn.Linear(self.memory_size, self.memory_size), nn.Linear(self.memory_size, self.vocabulary_size), nn.Softmax(dim=-1))
self.value_head = nn.Sequential(nn.Linear(self.memory_size, self.memory_size), nn.Linear(self.memory_size, 1))
else :
self.memory_A = None
self.memory_M = None
self.action_Mod = nn.Sequential(nn.Linear(self.hiddenDim, 1), nn.Sigmoid())
self.msg_Dec = nn.Sequential(nn.Linear(self.hiddenDim, self.hiddenDim), nn.Linear(self.hiddenDim, self.vocabulary_size), nn.Softmax(dim=-1))
self.value_head = nn.Sequential(nn.Linear(self.hiddenDim, self.hiddenDim), nn.Linear(self.hiddenDim, 1))
def forward(self, obs, msg, memory, value_only=False, action_only=False):
memoryA, memoryM = memory
action = None
message = None
out_memory = [None, None]
value = None
if not value_only:
# action encoding
oA = self.obs_Mod_A(obs)
mA = self.msg_Enc_A(msg)
zA = self.rep_Mod_A(torch.cat([oA, mA], -1))
if self.with_memory:
hzA, czA = self.memory_A(zA, (memoryA[:, :self.memory_size], memoryA[:, self.memory_size:]))
out_memory[0] = torch.cat([hzA, czA], dim=1)
else:
hzA = zA
action = self.action_Mod(hzA)
message = self.msg_Dec(hzA)
if not action_only:
# value encoding
oM = self.obs_Mod_M(obs)
mM = self.msg_Enc_M(msg)
zM = self.rep_Mod_M(torch.cat([oM, mM], -1))
if self.with_memory:
hzM, czM = self.memory_M(zM, (memoryM[:, :self.memory_size], memoryM[:, self.memory_size:]))
out_memory[1] = torch.cat([hzM, czM], dim=1)
else:
hzM = zM
value = self.value_head(hzM)
return action, message, out_memory, value
```
#### File: PredictionGame/agents/agent_AriaOnlineAC.py
```python
import torch
import torchvision
import torch.nn as nn
import numpy as np
from torch.distributions import Categorical
import torch.nn.utils as utils
from torch.autograd import Variable
class AriaOnlineAC(nn.Module):
def __init__(self,opt_params, with_memory=False):
super(AriaOnlineAC, self).__init__()
self.batch_size = opt_params["batch_size"]
self.gamma = opt_params["gamma"]
self.eps = np.finfo(np.float32).eps.item()
self.hiddenDim = 10
self.memory_size = 10
self.with_memory = with_memory
self.obs_Mod = lin_Mod([2, self.hiddenDim//2])
self.action_Mod = lin_Mod([self.hiddenDim, 2], sftmx = True)
self.msg_Enc = lin_Mod([4, self.hiddenDim//2], sftmx = False)
self.msg_Dec = lin_Mod([self.hiddenDim, 4], sftmx=True)
self.rep_Mod = lin_Mod([self.hiddenDim, self.hiddenDim])
if self.with_memory:
self.memory = nn.LSTMCell(self.hiddenDim, self.memory_size)
self.memories = [torch.zeros([1, 2*self.memory_size], dtype=torch.float32), torch.zeros([1, 2*self.memory_size], dtype=torch.float32)]
else :
self.memory = None
self.memories = None
self.value_head = lin_Mod([self.hiddenDim, 1])
self.optimizer = torch.optim.Adam(self.parameters(), lr=opt_params["lr"])
self.saved_act_Logp = []
self.saved_values = []
self.saved_entropies = []
self.saved_msg_Logp = []
self.saved_hid_states = []
self.saved_rewards = []
self.saved_obs = []
self.saved_downlink_msgs = []
self.batch_counter = 0
"""def embed_Obs(self, obs):
self.zObs = self.obs_Mod(obs) """
def forward(self, obs, msg, memory):
o = self.obs_Mod(obs)
m = self.msg_Enc(msg)
z = self.rep_Mod(torch.cat([o, m], -1))
if self.with_memory:
hz, cz = self.memory(z, (memory[:, :self.memory_size], memory[:, self.memory_size:]))
out_memory = torch.cat([hz, cz], dim=1)
else:
hz = z
out_memory = None
action = self.action_Mod(hz)
message = self.msg_Dec(hz)
value = self.value_head(hz)
return action, message, out_memory, value
def select_action(self, obs, msg):
obs_t = torch.tensor([obs], dtype=torch.float32)
msg_t = torch.tensor([msg], dtype=torch.float32)
if self.with_memory:
action, message, hid_state, value = self.forward(obs_t.float(), msg_t.float(), self.memories[0])
self.memories[1] = hid_state
else:
action, message, _, value = self.forward(obs_t.float(), msg_t.float(), None)
a_distrib = Categorical(action)
m_distrib = Categorical(message)
a = a_distrib.sample()
m = m_distrib.sample()
a_entropy = a_distrib.entropy().sum()
m_entropy = m_distrib.entropy().sum()
self.saved_act_Logp.append(a_distrib.log_prob(a))
self.saved_msg_Logp.append(m_distrib.log_prob(m))
self.saved_entropies.append(a_entropy + m_entropy)
self.saved_values.append(value)
return a, m
def train_on_batch(self, state, reward, state_):
self.saved_obs.append(state[0])
self.saved_downlink_msgs.append(state[1])
self.saved_rewards.append(reward)
#self.batch_counter += 1
#if self.batch_counter >= self.batch_size:
self.optimizer.zero_grad()
policy_loss = 0
value_loss = 0
if self.with_memory:
_, _, _, value_ = self.forward(torch.tensor([state_[0]]).float(), torch.tensor([state_[1]]).float(), self.memories[1])
else:
_, _, _, value_ = self.forward(torch.tensor([state_[0]]).float(), torch.tensor([state_[1]]).float(), None)
advantage = reward + self.gamma*value_.detach()-self.saved_values[0]
policy_loss = -(self.saved_act_Logp[0] + self.saved_msg_Logp[0])*advantage.detach()
value_loss = advantage.pow(2)
loss = policy_loss+value_loss
loss.backward(retain_graph=True)
self.optimizer.step()
rewards = np.copy(self.saved_rewards)
self.reset_batch()
return (policy_loss.item(), value_loss.item()), rewards
#return None, None
def getReturns(self, normalize=False):
_R = 0
Gs = []
for r in self.saved_rewards[:self.batch_size]:
_R = r + self.gamma * _R
Gs.insert(0, _R)
if normalize==True:
return (Gs-np.mean(Gs))/np.std(Gs)
return Gs
def reset_batch(self):
if self.with_memory: self.memories = [self.memories[1], torch.zeros([1, 2*self.memory_size], dtype=torch.float32)]
self.saved_act_Logp = []
self.saved_values = []
self.saved_entropies = []
self.saved_msg_Logp = []
self.saved_hid_states = []
self.saved_rewards = []
self.saved_obs = []
self.saved_downlink_msgs = []
self.batch_counter = 0
class lin_Mod(nn.Module):
def __init__(self, sizes = [2, 5, 6, 10, 10], sftmx = False):
super(lin_Mod, self).__init__()
self.sizes = sizes
L = []
for i, s in enumerate(sizes[:-1]):
L.append(nn.Linear(sizes[i], sizes[i+1]))
if i==len(sizes)-2 and sftmx==True:
L.append(nn.Softmax(dim=-1))
else :
L.append(nn.Tanh())
self.mod = nn.ModuleList(L)
def forward(self, x):
x_ = x
for m in self.mod:
x_ = m(x_)
return x_
class ariaActor(nn.Module):
def __init__(self, hidden_dim=10):
super(ariaActor, self).__init__()
self.hiddenDim = hidden_dim
self.obs_Mod = lin_Mod([2+self.hiddenDim, 5])
self.action_Mod = lin_Mod([self.hiddenDim, 2], sftmx = True)
self.msg_Enc = lin_Mod([4, 5], sftmx = False)
self.msg_Dec = lin_Mod([self.hiddenDim, 4], sftmx=True)
self.rep_Mod = lin_Mod([self.hiddenDim, self.hiddenDim])
def forward(self, obs, msg, last_state):
inO = torch.cat([obs, last_state[self.batch_counter]], -1)
o = self.obs_Mod(inO)
m = self.msg_Enc(msg)
new_state = self.rep_Mod(torch.cat([o, m], -1))
action = self.action_Mod(new_state)
message = self.msg_Dec(new_state)
return action, message, new_state
class ariaCritic(nn.Module):
def __init__(self, hidden_dim=10):
super(ariaCritic, self).__init__()
self.hiddenDim = hidden_dim
self.obs_Mod = lin_Mod([2+self.hiddenDim, 5])
self.action_Mod = lin_Mod([self.hiddenDim, 2], sftmx = True)
self.msg_Enc = lin_Mod([4, 5], sftmx = False)
self.msg_Dec = lin_Mod([self.hiddenDim, 4], sftmx=True)
self.rep_Mod = lin_Mod([self.hiddenDim, self.hiddenDim])
def forward(self, obs, msg, last_state):
inO = torch.cat([obs, last_state[self.batch_counter]], -1)
o = self.obs_Mod(inO)
m = self.msg_Enc(msg)
new_state = self.rep_Mod(torch.cat([o, m], -1))
action = self.action_Mod(new_state)
message = self.msg_Dec(new_state)
return action, message, new_state
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.