metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jmpargana/StackOverflowDataset",
"score": 2
} |
#### File: StackOverflowDataset/plotter/main.py
```python
from plotter import Plotter
import os
def main():
plotter = Plotter(
os.getenv("MONGO_URI") or "mongodb://mongo_app:27017/",
os.getenv("MAX_TAGS") or 35,
)
plotter.create_graph()
if __name__ == "__main__":
main()
``` |
{
"source": "jmp-aws/aws-support-tools",
"score": 2
} |
#### File: Functions/TagEC2Dependencies/tag_ec2_dependencies.py
```python
from __future__ import print_function
print('Loading function')
import json, boto3, re
def lambda_handler(event, context):
# print("Received event: \n" + json.dumps(event))
# If CreateTags failed nothing to do
if 'errorCode' in event['detail']:
print('CreateTags failed with error code {} and error message "{}", nothing to do.'
.format(event['detail']['errorCode'], event['detail']['errorMessage']))
return
region = event['detail']['awsRegion']
ec2 = boto3.client('ec2', region_name=region)
instance_ids = []
is_instance = re.compile('i-[0-9a-f]+')
# Run instances may create several instances, then the event will contain
# several instances
for item in event['detail']['requestParameters']['resourcesSet']['items']:
if is_instance.match(item['resourceId']):
instance_ids.append(item['resourceId'])
# check if we were tagging any instances
if len(instance_ids) == 0:
return
tags = []
for tag in event['detail']['requestParameters']['tagSet']['items']:
tags.append({
'Key': tag['key'],
'Value': tag['value']
})
# If the number of created instances then describe instances may be paginated
paginator = ec2.get_paginator('describe_instances')
instances_iterator = paginator.paginate(
DryRun=False,
InstanceIds=instance_ids
)
for page in instances_iterator:
resources = []
for reservation in page['Reservations']:
for instance in reservation['Instances']:
for eni in instance['NetworkInterfaces']:
resources.append(eni['NetworkInterfaceId'])
for volume in instance['BlockDeviceMappings']:
if 'Ebs' in volume:
resources.append(volume['Ebs']['VolumeId'])
print("Tagging resorces for instance ids:\n[{}]".format(', '.join(instance_ids)))
print("Resources to be tagged:\n[{}]".format(', '.join(resources)))
ec2.create_tags(
DryRun=False,
Resources=resources,
Tags=tags
)
return
```
#### File: MWAA/tests/test_verify_env.py
```python
import argparse
import pytest
from verify_env import verify_env
def test_verify_boto3():
'''
test version equal to 1.16.25
test various version numbers below
'''
assert verify_env.verify_boto3('1.17.4')
assert verify_env.verify_boto3('1.17.33')
assert verify_env.verify_boto3('1.16.27')
assert verify_env.verify_boto3('1.16.26')
assert verify_env.verify_boto3('1.16.25')
assert not verify_env.verify_boto3('1.16.24')
assert not verify_env.verify_boto3('1.16.23')
assert not verify_env.verify_boto3('1.16.22')
assert not verify_env.verify_boto3('1.16.21')
assert not verify_env.verify_boto3('1.7.65')
assert not verify_env.verify_boto3('1.9.105')
assert not verify_env.verify_boto3('1.10.33')
def test_validation_region():
'''
test various inputs for regions and all valid MWAA regions
https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services/
'''
regions = [
'us-east-2',
'us-east-1',
'us-west-2',
'ap-southeast-1',
'ap-southeast-2',
'ap-northeast-1',
'eu-central-1',
'eu-west-1',
'eu-north-1'
]
for region in regions:
assert verify_env.validation_region(region) == region
unsupport_regions = [
'us-west-1',
'af-south-1',
'ap-east-1',
'ap-south-1',
'ap-northeast-3',
'ap-northeast-2',
'ca-central-1',
'eu-west-2',
'eu-south-1',
'eu-west-3',
'me-sourth-1',
'sa-east-1'
]
for unsupport_region in unsupport_regions:
with pytest.raises(argparse.ArgumentTypeError) as excinfo:
verify_env.validation_region(unsupport_region)
assert ("%s is an invalid REGION value" % unsupport_region) in str(excinfo.value)
bad_regions = [
'us-east-11',
'us-west-3',
'eu-wheat-3'
]
for region in bad_regions:
with pytest.raises(argparse.ArgumentTypeError) as excinfo:
verify_env.validation_region(region)
assert ("%s is an invalid REGION value" % region) in str(excinfo.value)
def test_validate_envname():
'''
test invalid and valid names for MWAA environment
'''
with pytest.raises(argparse.ArgumentTypeError) as excinfo:
env_name = '42'
verify_env.validate_envname(env_name)
assert ("%s is an invalid environment name value" % env_name) in str(excinfo.value)
env_name = 'test'
result = verify_env.validate_envname(env_name)
assert result == env_name
def test_validate_profile():
'''
test invalid and valid names for MWAA environment
'''
with pytest.raises(argparse.ArgumentTypeError) as excinfo:
profile_name = 'test space'
verify_env.validation_profile(profile_name)
assert ("%s is an invalid profile name value" % profile_name) in str(excinfo.value)
profile_name = 'test'
result = verify_env.validation_profile(profile_name)
assert result == profile_name
profile_name = '42'
result = verify_env.validation_profile(profile_name)
assert result == profile_name
profile_name = '4HelloWorld2'
result = verify_env.validation_profile(profile_name)
assert result == profile_name
profile_name = 'HelloWorld'
result = verify_env.validation_profile(profile_name)
assert result == profile_name
def test_check_ingress_acls():
''' goes through the following scenarios
* if no acls are passed
* if there is an allow
* if there is a deny but no allow
'''
acls = []
src_port_from = 5432
src_port_to = 5432
result = verify_env.check_ingress_acls(acls, src_port_from, src_port_to)
assert result == ''
acls = [
{
'CidrBlock': '0.0.0.0/0',
'Egress': False,
'Protocol': '-1',
'RuleAction': 'allow',
'RuleNumber': 1
},
{
'CidrBlock': '0.0.0.0/0',
'Egress': False,
'Protocol': '-1',
'RuleAction': 'deny',
'RuleNumber': 32767
}
]
result = verify_env.check_ingress_acls(acls, src_port_from, src_port_to)
assert result
acls = [
{
'CidrBlock': '0.0.0.0/0',
'Egress': False,
'Protocol': '-1',
'RuleAction': 'deny',
'RuleNumber': 32767
}
]
result = verify_env.check_ingress_acls(acls, src_port_from, src_port_to)
assert not result
def test_check_egress_acls():
''' goes through the following scenarios
* if no acls are passed
* if there is an allow
* if there is a deny but no allow
'''
acls = []
dest_port = 5432
result = verify_env.check_egress_acls(acls, dest_port)
assert result == ''
acls = [
{
'CidrBlock': '0.0.0.0/0',
'Egress': False,
'Protocol': '-1',
'RuleAction': 'allow',
'RuleNumber': 1
},
{
'CidrBlock': '0.0.0.0/0',
'Egress': False,
'Protocol': '-1',
'RuleAction': 'deny',
'RuleNumber': 32767
}
]
result = verify_env.check_egress_acls(acls, dest_port)
assert result
acls = [
{
'CidrBlock': '0.0.0.0/0',
'Egress': False,
'Protocol': '-1',
'RuleAction': 'deny',
'RuleNumber': 32767
}
]
result = verify_env.check_egress_acls(acls, dest_port)
assert not result
``` |
{
"source": "jmp/backmarker",
"score": 2
} |
#### File: backmarker/models/circuit.py
```python
from django.db import models
class Circuit(models.Model):
reference = models.CharField(max_length=255, unique=True, blank=False)
name = models.CharField(max_length=255, blank=False)
location = models.CharField(max_length=255, blank=False)
country = models.CharField(max_length=255, blank=False)
latitude = models.FloatField()
longitude = models.FloatField()
altitude = models.IntegerField()
wiki_url = models.URLField(db_column="url", unique=True)
def __str__(self):
return self.name
``` |
{
"source": "jmp/bead",
"score": 4
} |
#### File: jmp/bead/bead.py
```python
BEAD = 1
EMPTY = 0
def sort(items):
"""
Sort the given items using bead sort.
The items should be non-negative integers.
"""
if not all(isinstance(item, int) for item in items) or any(item < 0 for item in items):
raise ValueError("All items must be non-negative integers")
beads = create_beads(items)
beads = drop_beads(beads)
return count_beads(beads)
def create_beads(items):
"""
Create the beads from the given list of items.
"""
num_cols = max(items) if items else 0
rows = []
for item in items:
row = [BEAD] * item + [EMPTY] * (num_cols - item)
rows.append(row)
return rows
def drop_beads(beads):
"""
Lets all the beads "drop".
"""
num_rows = len(beads)
num_cols = len(beads[0]) if beads else 0
for row in range(num_rows - 1, 0, -1):
for col in range(num_cols):
_drop_from_above(beads, row, col)
return beads
def count_beads(beads):
"""
Returns the number of beads on each row.
"""
return [row.count(BEAD) for row in beads]
def _drop_from_above(beads, row, col):
# If there is a bead here, can't do anything
if beads[row][col] == BEAD:
return
# Otherwise find the next bead above,
# and drop it to the current position.
for row_above in range(row - 1, -1, -1):
if beads[row_above][col] == BEAD:
beads[row][col] = beads[row_above][col]
beads[row_above][col] = EMPTY
break
```
#### File: jmp/bead/test_bead.py
```python
import unittest
import bead
class TestSort(unittest.TestCase):
def test_smoke(self):
self.assertTrue(callable(bead.sort))
def test_sort_empty_list(self):
self.assertEqual([], bead.sort([]))
def test_sort_single_item(self):
self.assertEqual([1], bead.sort([1]))
def test_sort_invalid_items(self):
self.assertRaises(ValueError, bead.sort, ['a'])
self.assertRaises(ValueError, bead.sort, [1, 'a'])
def test_sort_sorted(self):
self.assertEqual([1, 2, 3], bead.sort([1, 2, 3]))
def test_sort_reversed(self):
self.assertEqual([1, 2, 3, 4, 5], bead.sort([5, 4, 3, 2, 1]))
def test_sort_repeating(self):
self.assertEqual([1, 2, 2, 2, 5, 5, 6], bead.sort([6, 5, 2, 5, 2, 2, 1]))
def test_sort_with_zero(self):
self.assertEqual([0, 1, 2, 3, 4, 5], bead.sort([1, 4, 0, 5, 2, 3]))
class TestCreateBeads(unittest.TestCase):
def test_create_beads_simple(self):
self.assertEqual([
[1, 0, 0],
[1, 1, 0],
[1, 1, 1],
], bead.create_beads([1, 2, 3]))
def test_create_beads_shuffled(self):
self.assertEqual([
[1, 1, 1, 1, 1],
[1, 1, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 0],
], bead.create_beads([5, 2, 1, 4, 4]))
class TestDropBeads(unittest.TestCase):
def test_drop_beads(self):
input_beads = [
[1, 1, 0, 0, 0],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 0],
[1, 0, 0, 0, 0],
]
expected_output = [
[1, 0, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
]
self.assertListEqual(expected_output, bead.drop_beads(input_beads))
``` |
{
"source": "jmp/csvloc",
"score": 3
} |
#### File: csvloc/csvloc/__main__.py
```python
from sys import argv, stdout
from .args import parse_args
from .reader import read_csv
from .writer import write_csv
def main() -> None:
args = parse_args(argv[1:])
values = {}
for in_file in args.files:
with in_file.open() as f:
read_csv(f, values)
write_csv(stdout, values)
if __name__ == "__main__":
main()
``` |
{
"source": "jmpews/torweb",
"score": 2
} |
#### File: torweb/app/cache.py
```python
from custor.logger import logger
from db.mysql_model.post import PostCategory, PostTopic, Post
import psutil, datetime, time
# Trick cache
topic_category_cache = {'categorys': [], 'topics': []}
hot_post_cache = {'reply': [], 'visit': []}
system_status_cache = [0, 0, 0, 0]
def update_topic_category_cache():
"""
update topic
:return:
"""
topic_category_cache['categorys'] = []
topic_category_cache['topics'] = []
categorys = PostCategory.select()
for t in range(len(categorys)):
topic_category_cache['categorys'].append(categorys[t])
tmp = []
topics = PostTopic.select().where(PostTopic.category == categorys[t])
for i in range(len(topics)):
tmp.append(topics[i])
topic_category_cache['topics'].append(tmp)
topics = PostTopic.select().where(PostTopic.category == None)
tmp = []
for i in range(len(topics)):
tmp.append(topics[i])
topic_category_cache['topics'].append(tmp)
def update_hot_post_cache():
"""
ignore...
:return:
"""
hot_post_cache['reply'] = []
hot_post_cache['visit'] = []
posts = Post.select().where(Post.is_delete == False).order_by(Post.reply_count.desc()).limit(4)
for post in posts:
hot_post_cache['reply'].append(post)
def update_system_status_cache():
"""
ignore...
:return:
"""
from threading import Thread
class MonitorWorker(Thread):
def __init__(self, name, system_status_cache):
Thread.__init__(self)
self.name = name
self.systatus = system_status_cache
def run(self):
logger.debug("start monitor system status...")
while True:
try:
s1 = psutil.cpu_percent()
s2 = psutil.virtual_memory()[2]
try:
s3 = len(psutil.net_connections())
except:
s3 = 'unkown'
s4 = datetime.datetime.fromtimestamp(psutil.boot_time()).strftime("%Y-%m-%d")
self.systatus[0] = s1
self.systatus[1] = s2
self.systatus[2] = s3
self.systatus[3] = s4
from app.api.api import SystemStatusWebsocketHandler
SystemStatusWebsocketHandler.write2all(self.systatus)
time.sleep(30)
except KeyboardInterrupt:
break
monitor = MonitorWorker('system', system_status_cache)
monitor.start()
def update_cache():
logger.debug('start update cache...')
update_topic_category_cache()
update_hot_post_cache()
update_system_status_cache()
```
#### File: torweb/custor/decorators.py
```python
import functools
import urllib.parse
import time
from tornado.concurrent import Future
from tornado.httpclient import HTTPError
from custor.utils import ColorPrint, get_cleaned_post_data, json_result, ThreadWorker
from custor.errors import RequestMissArgumentError, PageNotFoundError
from db.mysql_model import db_mysql
def run_with_thread_future(*args, **kwargs):
"""
future with thread
http://jmpews.github.io/posts/tornado-future-ioloop-yield.html
:param args:
:param kwargs:
:return:
"""
def wraps_func(func):
@functools.wraps(func)
def wraps_args(*args, **kwargs):
future = Future()
work = ThreadWorker(future, func, *args, **kwargs)
work.start()
return future
return wraps_args
return wraps_func
def exception_deal(exceptions):
"""
catch `get` and `post` Exception
捕获get, post函数异常
:param exceptions:
:return:
"""
def wrapper_func(func):
@functools.wraps(func)
def wrapper_args(handler, *args, **kwargs):
try:
func(handler, *args, **kwargs)
except Exception as ex:
if isinstance(ex, PageNotFoundError):
handler.redirect(ex.redirect_url)
elif isinstance(ex, RequestMissArgumentError):
handler.write(ex.msg)
else:
raise ex
# for e in exceptions:
# if isinstance(ex, e):
# handler.write('oh, catch exp in the args list...\n')
return wrapper_args
return wrapper_func
def timeit(func):
"""
profile function cost
:param func:
:return:
"""
def wrapper(*args, **kwargs):
start = time.clock()
func(*args, **kwargs)
end = time.clock()
# ColorPrint.print('> Profiler: '+func.__qualname__+'used: '+str((end - start) * 1e6) + 'us')
ColorPrint.print("> Profiler: ["+func.__qualname__+"] used: "+str((end - start)) + "us")
return wrapper
def check_captcha(errorcode, result):
"""
check captcha. (attention @decorator order!)
:param errorcode:
:param result:
:return:
"""
def wrap_func(method):
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
captcha_cookie = self.get_cookie('captcha', '')
captcha = get_cleaned_post_data(self, ['captcha'], blank=True)['captcha']
if not captcha or captcha != captcha_cookie:
self.write(json_result(errorcode, result))
return
return method(self, *args, **kwargs)
return wrapper
return wrap_func
def login_required(method):
"""
from "tornado.web.authenticated"
`self.current_user`是一个@property
:param method:
:return:
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self.current_user:
if self.request.method in ("GET", "HEAD"):
url = self.get_login_url()
if "?" not in url:
if urllib.parse.urlsplit(url).scheme:
# if login url is absolute, make next absolute too
next_url = self.request.full_url()
else:
next_url = self.request.uri
url += "?" + urllib.parse.urlencode(dict(next=next_url))
self.redirect(url)
return
raise HTTPError(403)
return method(self, *args, **kwargs)
return wrapper
def login_required_json(errorcode, result):
"""
same as `login_required` but return json
:param errorcode:
:param result:
:return:
"""
def wrap_func(method):
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self.current_user:
self.write(json_result(errorcode, result))
return
return method(self, *args, **kwargs)
return wrapper
return wrap_func
def ppeewwee(method):
"""
peewee hook, connect before request ,release after done.
:param method:
:return:
"""
@functools.wraps(method)
def wrapper(*args, **kwargs):
db_mysql.connect()
method(*args, **kwargs)
if not db_mysql.is_closed():
db_mysql.close()
return wrapper
```
#### File: custor/handlers/otherhandler.py
```python
from db.mongo_db.session import session, MongoSessionManager
from db.mongo_db.server_status import ServerStatus
from .basehandler import BaseRequestHandler
"""
主要包含一些高级的handler
"""
class Adv_BaseRequestHandler(BaseRequestHandler):
"""
mongo:记录url、session
增加了url访问记录
增加session,形成一个session属性进行调用,在finish时进行更新
"""
def __init__(self, application, request, **kwargs):
BaseRequestHandler.__init__(self, application, request, **kwargs)
self._session = None
def prepare(self):
# 增加
self.url_count = ServerStatus.visitor_url_up(self.request.path)
@property
def session(self):
if not self._session:
self._session = MongoSessionManager.load_session_from_request(self)
return self._session
def on_finish(self):
if self._session:
MongoSessionManager.update_session(self._session.get_session_id(), self._session)
```
#### File: torweb/custor/uimethods.py
```python
from settings.config import config
from custor.utils import TimeUtil
__all__ = ['datetime_delta', 'is_default_avatar']
def datetime_delta(handler, t):
"""
display friendly time
:param handler:
:param t:
:return:
"""
if not t:
return '???'
return TimeUtil.datetime_delta(t)
def is_default_avatar(handler, avatar):
"""
weather the avatar is default avatar
:param handler:
:param avatar:
:return:
"""
if avatar == config.default_avatar:
return True
if avatar is None:
return True
return False
```
#### File: db/mongo_db/session.py
```python
from settings.config import config
from db.mongo_db import DB_mongo
from custor.utils import random_str
import functools
import asyncio
import tornado.web
import greenado
from tornado.web import gen
class BaseSession(dict):
"""
session store in dict
"""
def __init__(self, session_id, data):
self._session_id = session_id
self.data = data
super(BaseSession, self).__init__()
def get_session_id(self):
return self._session_id
def __missing__(self):
return None
class MongoSessionManager():
"""
session manager
"""
_collection = DB_mongo['session']
def __init__(self, collection_name='sessions'):
self._collection = DB_mongo[collection_name]
@staticmethod
def generate_session_id():
return random_str(16)
@classmethod
def new_session(cls, session_id=None, data=None):
"""
new session
:param session_id:
:param data:
:return:
"""
if not data:
data = {}
if not session_id:
session_id = cls.generate_session_id()
greenado.gyield(cls._collection.save({'_id': session_id, 'data': data}))
# import pdb;pdb.set_trace()
return BaseSession(session_id, {})
@classmethod
def load_session(cls, session_id=None):
"""
load session
:param session_id:
:return:
"""
data = {}
if session_id:
session_data = greenado.gyield(cls._collection.find_one({'_id': session_id}))
if session_data:
data = session_data['data']
return BaseSession(session_id, data)
future = tornado.web.Future()
future.set_result(None)
result = greenado.gyield(future)
return result
@classmethod
def update_session(cls, session_id, data):
greenado.gyield(cls._collection.update({'_id': session_id}, {'$set': {'data': data}}))
@classmethod
def load_session_from_request(cls, handler):
session_id = handler.get_secure_cookie('session_id', None)
if session_id:
session_id = session_id.decode()
s = MongoSessionManager.load_session(session_id)
if s is not None:
return s
else:
s = MongoSessionManager.new_session()
handler.set_secure_cookie('session_id', s.get_session_id())
return s
def session(request):
@functools.wraps(request)
def _func(handler, *args, **kwargs):
s = MongoSessionManager.load_session_from_request(handler)
setattr(handler, 'session', s.data)
return_val = request(handler, *args, **kwargs)
MongoSessionManager.update_session(s.get_session_id(), s.data)
return return_val
return _func
```
#### File: torweb/tests/test_blog_load_from_md.py
```python
import sys, os
import os.path
sys.path.append(os.path.dirname(sys.path[0]))
from settings.config import config
from peewee import Model, MySQLDatabase
mysqldb = MySQLDatabase('',
user=config.BACKEND_MYSQL['user'],
password=config.BACKEND_MYSQL['password'],
host=config.BACKEND_MYSQL['host'],
port=config.BACKEND_MYSQL['port'])
from db.mysql_model.blog import BlogPostCategory, BlogPostLabel, BlogPost
md_path = './docs/articles'
def check_md_format(file_path):
fd = open(file_path)
md_info = {}
while True:
line = fd.readline().strip()
if len(line) == 0:
break
try:
i = line.index(':')
k = line[:i]
v = line[i+1:]
except:
fd.close()
return None
md_info[k.strip().lower()] = v.strip()
# 校验字段是否存在
# Necessary Args: title, tags
# Optional Args: date, category, auth, slug
keys = md_info.keys()
if 'title' in keys and 'tags' in keys and 'slug' in keys:
md_info['content'] = fd.read(-1)
fd.close()
return md_info
else:
fd.close()
return None
def convert_md_2_post(md_info):
category = md_info.get('category')
if not category:
category = 'UnClassified'
cate = BlogPostCategory.get_by_name(category)
post = BlogPost.create(title=md_info['title'],
category=cate,
slug=md_info['slug'],
content=md_info['content'])
BlogPostLabel.add_post_label(md_info['tags'], post)
def get_files(root_path):
files = os.listdir(root_path)
print(files)
for file_name in files:
_, suffix = os.path.splitext(file_name)
if suffix == '.md':
md_file_path = os.path.join(root_path, file_name)
md_info = check_md_format(md_file_path)
if md_info:
print(md_info['title'])
convert_md_2_post(md_info)
if __name__ == '__main__':
mysqldb.create_tables([BlogPostLabel, BlogPost, BlogPostCategory], safe=True)
t = BlogPostLabel.delete()
t.execute()
t = BlogPost.delete()
t.execute()
t = BlogPostCategory.delete()
t.execute()
get_files(md_path)
```
#### File: torweb/tests/test_greenlet.py
```python
from greenlet import greenlet
g3 = None
def test1():
global g3
print(12)
gr2.switch()
print(34)
print(90)
def test2():
g3 = greenlet()
gr1.parent = g3.parent
print(56)
gr1.switch()
print(78)
gr1 = greenlet(test1)
gr2 = greenlet(test2)
gr1.switch()
```
#### File: torweb/tests/test_thread_future.py
```python
import sys, os
import tornado.ioloop
from tornado import gen
sys.path.append(os.path.dirname(sys.path[0]))
import time
from custor.decorators import run_with_thread_future
@run_with_thread_future(None)
def thread_sleep(self, args):
time.sleep(5)
@gen.coroutine
def sleep_coroutine():
yield thread_sleep(None, None)
print('sleep finish.')
sleep_coroutine()
print('continue other work.')
tornado.ioloop.IOLoop.instance().start()
``` |
{
"source": "jmpf2018/ShipAI",
"score": 2
} |
#### File: jmpf2018/ShipAI/simulator.py
```python
import numpy as np
from scipy.integrate import RK45
class Simulator:
def __init__(self):
self.last_global_state = None
self.last_local_state = None
self.current_action = None
self.steps = 0
self.time_span = 10 # 20 seconds for each iteration
self.number_iterations = 100 # 100 iterations for each step
self.integrator = None
self.rk_mode = 'scipy_rk'
##Vessel Constants
self.M = 115000 *10**3
self.Iz = 414000000 * 10 ** 3
self.M11 = 14840.4 * 10**3
self.M22 = 174050 * 10**3
self.M26 = 38369.6 * 10**3
self.M66 = 364540000 * 10**3
self.M62 = 36103 * 10**3
self.D11 = 0.35370 * 10**3
self.D22 = 1.74129 * 10**3
self.D26 = 1.95949 * 10**3
self.D62 = 1.85586 * 10**3
self.D66 = 3.23266 * 10**3
self.L = 244.74 #length
self.Draft = 15.3
self.x_g = 2.2230# center mass
self.x_prop = -112 #propulsor position
self.force_prop_max = 1.6 * 10**6 # max porpulsor force
self.x_rudder = -115 # rudder position
self.rudder_area = 68
self.Cy = 0.06 # coeff de arrasto lateral
self.lp = 7.65 # cross-flow center
self.Cb = 0.85 # block coefficient
self.B = 42 # Beam
self.S = 27342 # wet surface
## Water constants
self.pho = 1.025 * 10**3# water density
self.mi = 10**-3 # water viscosity
## Rudder Constants
self.A_rud = 68 # propulsor thrus
self.delta_x = self.x_prop - self.x_rudder # distance between rudder and propulsor
self.r_aspect = 2 # aspect ration
## Propulsor constants:
self.D_prop = 7.2 # Diameter
self.n_prop = 1.6 # rotation
# some modes of simulator
self.system_dynamics = 'complex'
self.prop_dynamics = 'complex'
def reset_start_pos(self, global_vector):
x0, y0, theta0, vx0, vy0, theta_dot0 = global_vector[0], global_vector[1], global_vector[2], global_vector[3], global_vector[4], global_vector[5]
self.last_global_state = np.array([x0, y0, theta0, vx0, vy0, theta_dot0])
self.last_local_state = self._global_to_local(self.last_global_state)
if self.rk_mode == 'scipy_rk':
self.current_action = np.zeros(2)
self.integrator = self.scipy_runge_kutta(self.simulate_scipy, self.get_state(), t_bound=self.time_span)
def step(self, angle_level, rot_level):
self.current_action = np.array([angle_level, rot_level])
if self.rk_mode == 'ours_rk':
for i in range(self.number_iterations):
self.last_global_state = self.runge_kutta(self.get_state(), self.simulate_in_global, 6, self.time_span/self.number_iterations)
return self.last_global_state
if self.rk_mode == 'scipy_rk':
while not (self.integrator.status == 'finished'):
self.integrator.step()
self.last_global_state = self.integrator.y
self.last_local_state = self._global_to_local(self.last_global_state)
self.integrator = self.scipy_runge_kutta(self.simulate_scipy, self.get_state(), t0=self.integrator.t, t_bound=self.integrator.t+self.time_span)
return self.last_global_state
def simulate_scipy(self, t, global_states):
local_states = self._global_to_local(global_states)
return self._local_ds_global_ds(global_states[2], self.simulate(local_states))
def simulate_in_global(self, global_states):
local_states = self._global_to_local(global_states)
return self._local_ds_global_ds(global_states[2], self.simulate(local_states))
def simulate(self, local_states):
"""
:param local_states: Space state
:return df_local_states
"""
x1 = local_states[0] #u
x2 = local_states[1] #v
x3 = local_states[2] #theta (not used)
x4 = local_states[3] #du
x5 = local_states[4] #dv
x6 = local_states[5] #dtheta
beta = self.current_action[0]*np.pi/6 #leme (-30 à 30)
alpha = self.current_action[1] #propulsor
vc = np.sqrt(x4 ** 2 + x5 ** 2)
gamma = np.pi+np.arctan2(x5, x4)
# Composing resistivity forces
Re = self.pho * vc * self.L / self.mi
if Re == 0:
C0=0
else:
C0 = 0.0094 * self.S / (self.Draft * self.L) / (np.log10(Re) - 2) ** 2
C1 = C0 * np.cos(gamma) + (-np.cos(3 * gamma) + np.cos(gamma)) * np.pi * self.Draft / (8 * self.L)
F1u = 0.5 * self.pho * vc ** 2 * self.L * self.Draft * C1
C2 = (self.Cy - 0.5 * np.pi * self.Draft / self.L) * np.sin(gamma) * np.abs(np.sin(gamma)) + 0.5 * np.pi * self.Draft / self.L * (
np.sin(gamma) ** 3) + np.pi * self.Draft / self.L * (1 + 0.4 * self.Cb * self.B / self.Draft) * np.sin(gamma) * np.abs(np.cos(gamma))
F1v = 0.5 * self.pho * vc ** 2 * self.L * self.Draft * C2
C6 = -self.lp / self.L * self.Cy * np.sin(gamma) * np.abs(np.sin(gamma))
C6 = C6 - np.pi * self.Draft / self.L * np.sin(gamma) * np.cos(gamma)
C6 = C6 - (0.5 + 0.5 * np.abs(np.cos(gamma))) ** 2 * np.pi * self.Draft / self.L * (0.5 - 2.4 * self.Draft / self.L) * np.sin(gamma) * np.abs(np.cos(gamma))
F1z = 0.5 * self.pho * vc ** 2 * self.L**2 * self.Draft * C6
# Propulsion model
if self.prop_dynamics == 'simple':
Fpx = np.cos(beta) * self.force_prop_max * alpha * np.abs(2/(1+x1))
Fpy = -np.sin(beta) * self.force_prop_max * alpha * np.abs(2/(1+x1))
Fpz = Fpy * self.x_rudder
else:
#Propulsion model complex -- > the best one:
J = x4*0.6/(1.6*7.2)
kt = 0.5 - 0.5*J
n_prop_ctrl = self.n_prop*alpha
Fpx = kt*self.pho*n_prop_ctrl**2*self.D_prop**4
kr = 0.5 + 0.5 / (1 + 0.15 * self.delta_x/self.D_prop)
ur = np.sqrt(x4 ** 2 + kr * 4 * kt * n_prop_ctrl ** 2 * self.D_prop ** 2 / np.pi)
vr = -0.8 * x5
Ur = np.sqrt(ur ** 2 + vr ** 2)
fa = 6.13 * self.r_aspect / (self.r_aspect + 2.25)
ar = beta
FN = 0.5*self.pho*self.A_rud*fa*Ur**2*np.sin(ar)
Fpy = -FN * np.cos(beta)
Fpz = -FN * np.cos(beta) * self.x_rudder
# without resistence
#F1u, F1v, F1z = 0, 0, 0
# Derivative function
fx1 = x4
fx2 = x5
fx3 = x6
# simple model
if self.system_dynamics == 'complex':
Mrb = np.array([[self.M, 0, 0], [0, self.M, self.M*self.x_g], [0, self.M*self.x_g, self.Iz]])
Crb = np.array([[0, 0, -self.M*(self.x_g*x6+x5)], [0, 0, self.M*x4], [self.M*(self.x_g*x6+x5), -self.M*x4, 0]])
Ma = np.array([[self.M11, 0, 0], [0, self.M22, self.M26], [0, self.M62, self.M66]])
ca13 = -(self.M22*x5 + self.M26*x6)
ca23 = self.M11*x4
Ca = np.array([[0, 0, ca13], [0, 0, ca23], [-ca13, -ca23, 0]])
Dl = np.array([[self.D11, 0, 0], [0, self.D22, self.D26], [0, self.D62, self.D66]])
vv = np.array([x4, x5, x6])
MM = Mrb+Ma
CC = Crb+Dl
Fext = np.array([[F1u + Fpx], [F1v + Fpy], [0.21*F1z + 0.5*Fpz]])
A = MM
B = np.dot(CC, vv.transpose()) + Fext.transpose()
ff = np.linalg.solve(A, B.transpose())
fx4 = ff[0]
fx5 = ff[1]
fx6 = ff[2]
elif self.system_dynamics == 'linearized':
a11 = self.M + self.M11
b1 = -(self.M + self.M22) * x5 * x6 - (self.M * self.x_g + 0.5 * (self.M26 + self.M62)) * x6 ** 2
fx4 = (b1+F1u + Fpx)/ a11
A = np.array([[self.M + self.M26, self.M * self.x_g + self.M22], [self.M * self.x_g + self.M62, self.Iz + self.M66]])
B1 = [[self.D26, self.M * x4 + self.D22], [self.M62, self.x_g * x4 + self.D66]]
vv = np.array([x5, x6])
Fext = np.array([[F1v + Fpy], [F1z + Fpz]])
B = np.dot(B1, vv.transpose()) + Fext.transpose()
ff = np.linalg.solve(A, B.transpose())
fx5 = ff[0]
fx6 = ff[1]
else:
# main model simple -- > the best one:
fx4 = (F1u + Fpx)/(self.M + self.M11)
fx5 = (F1v + Fpy)/(self.M + self.M22)
fx6 = (F1z + Fpz)/(self.Iz + self.M66)
fx = np.array([fx1, fx2, fx3, fx4, fx5, fx6])
return fx
def scipy_runge_kutta(self, fun, y0, t0=0, t_bound=10):
return RK45(fun, t0, y0, t_bound, rtol=self.time_span/self.number_iterations, atol=1e-4)
def runge_kutta(self, x, fx, n, hs):
k1 = []
k2 = []
k3 = []
k4 = []
xk = []
ret = np.zeros([n])
for i in range(n):
k1.append(fx(x)[i]*hs)
for i in range(n):
xk.append(x[i] + k1[i]*0.5)
for i in range(n):
k2.append(fx(xk)[i]*hs)
for i in range(n):
xk[i] = x[i] + k2[i]*0.5
for i in range(n):
k3.append(fx(xk)[i]*hs)
for i in range(n):
xk[i] = x[i] + k3[i]
for i in range(n):
k4.append(fx(xk)[i]*hs)
for i in range(n):
ret[i] = x[i] + (k1[i] + 2*(k2[i] + k3[i]) + k4[i])/6
return ret
def get_state(self):
return self.last_global_state
def get_local_state(self):
return self.last_local_state
def _local_to_global(self, local_state):
# local_state: [ux, uy, theta, uxdot, uydot, thetadot]
theta = local_state[2]
c, s = np.cos(theta), np.sin(theta)
A = np.array([[c, -s], [s, c]])
B_l_pos = np.array([local_state[0], local_state[1]])
B_l_vel = np.array([local_state[3], local_state[4]])
B_g_pos = np.dot(A, B_l_pos.transpose())
B_g_vel = np.dot(A, B_l_vel.transpose())
return np.array([B_g_pos[0], B_g_pos[1], local_state[2], B_g_vel[0], B_g_vel[1], local_state[5]])
def _global_to_local(self, global_state):
# global_states: [x, y, theta, vx, vy, thetadot]
theta = global_state[2]
c, s = np.cos(theta), np.sin(theta)
A = np.array([[c, s], [-s, c]])
B_g_pos = np.array([global_state[0], global_state[1]])
B_g_vel = np.array([global_state[3], global_state[4]])
B_l_pos = np.dot(A, B_g_pos.transpose())
B_l_vel = np.dot(A, B_g_vel.transpose())
return np.array([B_l_pos[0], B_l_pos[1], global_state[2], B_l_vel[0], B_l_vel[1], global_state[5]])
def _local_ds_global_ds(self, theta, local_states):
"""
The function recieves two local states, one refering to the state before the runge-kutta and other refering to a
state after runge-kutta and then compute the global state based on the transition
:param local_states_0: Local state before the transition
:param local_states_1: Local state after the transition
:return: global states
"""
c, s = np.cos(theta), np.sin(theta)
A = np.array([[c, -s], [s, c]])
B_l_pos = np.array([local_states[0], local_states[1]])
B_l_vel = np.array([local_states[3], local_states[4]])
B_g_pos = np.dot(A, B_l_pos.transpose())
B_g_vel = np.dot(A, B_l_vel.transpose())
return np.array([B_g_pos[0], B_g_pos[1], local_states[2], B_g_vel[0], B_g_vel[1], local_states[5]])
```
#### File: jmpf2018/ShipAI/tests.py
```python
import unittest
from simulator import *
import numpy as np
from scipy.integrate import RK45
class TestSimulator(unittest.TestCase):
def test_global_to_local(self):
xg = np.array([1, 1, np.pi / 4, -1, -1, 0])
sim1 = Simulator()
x1 = sim1._global_to_local(xg)
xref = np.array([np.sqrt(2), 0, np.pi/4, -np.sqrt(2), 0, 0])
self.assertTrue((x1 == xref).all())
def test_local_to_global(self):
xg = np.array([np.sqrt(2), 0, np.pi / 4, -np.sqrt(2), 0, 1.5])
sim1 = Simulator()
x1 = sim1._local_to_global(xg)
xref = np.array([1, 1, np.pi/4, -1, -1, 1.5])
self.assertTrue(np.allclose(x1, xref))
def test_test_reset_pos(self):
x0 = np.array([1, 1, np.pi / 4, -1, -1, 0])
sim = Simulator()
sim.reset_start_pos(x0)
x0_set = sim.get_state()
self.assertTrue(np.allclose(x0, x0_set))
x0_local_local = sim.get_local_state()
x0_local_ref = np.array([np.sqrt(2), 0, np.pi/4, -np.sqrt(2), 0, 0])
self.assertTrue(np.allclose(x0_local_local, x0_local_ref))
def test_runge_kutta(self):
"""
Test 2-separeted mass-spring system dynamics
"""
states = np.array([1, 0, 1, 0])
t0 = 0
tmax1 = np.pi*2 # period/2 ==> opposite position
tmax2 = np.pi*5 # period/2 ==> opposite position
h = 0.01
N1 = int(np.round(tmax1/h))
N2 = int(np.round(tmax2/h))
sim = Simulator()
# (x, fx, n, hs)
for i in range(N1):
states = sim.runge_kutta(states, _mass_spring, 4, h)
self.assertAlmostEqual(states[0], -1, places=4)
for i in range(N2-N1):
states = sim.runge_kutta(states, _mass_spring, 4, h)
self.assertAlmostEqual(states[2], -1, places=4)
def test_simulation(self):
sim = Simulator()
# first case: Vessel with no velocity, no action and nothing should happen
actions = np.zeros(2)
sim.current_action = actions
states = np.array([10, 0, 0, 0, 0, 0])
df = sim.simulate(states)
self.assertTrue(np.allclose(df, np.zeros(6)))
# if the vessel has only velocity on x, only df[0] and df[3] should be not-null
states = np.array([10, 0, 0, 1, 0, 0])
df = sim.simulate(states)
self.assertTrue(df[0] > 0 and df[3] < 0)
# we acceleration test
states = np.array([10, 0, 0, 0, 0, 0])
sim.current_action = np.array([0, 1])
df = sim.simulate(states)
self.assertTrue(df[1] == 0)
self.assertTrue(df[3] > 0)
self.assertTrue(df[4] == 0)
self.assertTrue(df[5] == 0)
def test_local_ds_global_ds(self):
sim = Simulator()
# first case: Vessel with no velocity, no action and nothing should happen
local_s_0 = np.array([0, 0, 0, 0, 0, 0])
local_s_1 = np.array([1, 1, 0.1*np.pi/4, 1, 1, 0.1])
theta = local_s_0[2]
global_s = sim._local_ds_global_ds(theta, local_s_1)
self.assertTrue(np.allclose(global_s, np.array([1, 1, 0.1*np.pi/4, 1, 1, 0.1])))
local_s_0 = np.array([np.sqrt(2), 0, np.pi/4, np.sqrt(2), 0, 0.1*np.pi/4])
local_s_1 = np.array([1, 1, np.pi/2, 1, 1, 0.2*np.pi/4])
theta = local_s_0[2]
global_s = sim._local_ds_global_ds(theta, local_s_1)
self.assertTrue(np.allclose(global_s, np.array([0, np.sqrt(2), np.pi/2, 0, np.sqrt(2), 0.2*np.pi/4])))
local_s_0 = np.array([np.sqrt(2), 0, -np.pi / 4, np.sqrt(2), 0, -0.1 * np.pi / 4])
local_s_1 = np.array([1, -1, -np.pi / 2, 1, -1, -0.2 * np.pi / 4])
theta = local_s_0[2]
global_s = sim._local_ds_global_ds(theta, local_s_1)
self.assertTrue(np.allclose(global_s, np.array([0, -np.sqrt(2), -np.pi / 2, 0, -np.sqrt(2), -0.2 * np.pi / 4])))
def test_step(self):
sim = Simulator()
states = np.array([10, 0, 0, 0, 0, 0])
actions = np.array([0, 1])
sim.reset_start_pos(states)
sim.step(actions[0], actions[1])
new_states = sim.get_state()
self.assertTrue(new_states[0]> states[0])
def test_rotation(self):
sim = Simulator()
states = np.array([10, 10, np.pi/4, 0, 0, 0])
actions = np.array([0, 1])
sim.reset_start_pos(states)
sim.step(actions[0], actions[1])
new_states = sim.get_state()
self.assertTrue(new_states[0]> states[0])
def test_episode(self):
sim = Simulator()
states = np.array([0, 100, -np.pi/4, 1, -1, 0])
actions = np.array([0, 1])
sim.reset_start_pos(states)
for i in range(10):
sim.step(actions[0], actions[1])
new_states = sim.get_state()
self.assertTrue(new_states[0]>0)
def test_scipy_RK45(self):
t0 = 0
y0 = np.array([1, 0, 1, 0])
tmax1 = np.pi * 2 # period/2 ==> opposite position
tmax2 = np.pi * 5 # period/2 ==> opposite position
h = 0.01
integrator = RK45(_mass_spring_sp, t0, y0, rtol=h, atol=10**-6, t_bound=tmax1)
while not (integrator.status == 'finished'):
integrator.step()
Y1 = integrator.y
T1 = integrator.t
integrator = RK45(_mass_spring_sp, T1, Y1, rtol=h, atol=10 ** -6, t_bound=tmax2)
while not (integrator.status == 'finished'):
integrator.step()
Y2 = integrator.y
T2 = integrator.t
self.assertAlmostEqual(Y1[0], -1, places=2)
self.assertAlmostEqual(Y2[2], -1, places=2)
self.assertAlmostEqual(T1, tmax1, places=4)
self.assertAlmostEqual(T2, tmax2, places=4)
def _mass_spring_sp(t, x):
return _mass_spring(x)
def _mass_spring(x):
"""
2-separeted mass-spring system dynamics
"""
x1 = x[0]
dx1 = x[1]
x2 = x[2]
dx2 = x[3]
m1 = 16
k1 = 4
m2 = 25
k2 = 1
# m ddx1 + k1*x1 = 0
# T = 2*pi sqrt(m1/k1) = 8*pi
# m ddx2 + k2*x2 = 0
# T = 2*pi sqrt(m2/k2) = 10*pi
fx1 = dx1
fx2 = -k1/m1 * x1
fx3 = dx2
fx4 = -k2/m2 * x2
fx = np.array([fx1, fx2, fx3, fx4])
return fx
if __name__ == '__main__':
unittest.main()
```
#### File: jmpf2018/ShipAI/viewer.py
```python
import turtle
import math
import tkinter
class Viewer:
def __init__(self):
self.l_vessel = 50 # Metade do comprimento da embarcacao
#first we initialize the turtle settings
turtle.speed(0)
turtle.mode('logo')
turtle.setworldcoordinates(0, -500, 2000, 500)
turtle.setup()
turtle.screensize(4000, 1000, 'white')
w_vessel = 5 # Metade da largura da embarcacao
turtle.register_shape('vessel', (
(0, self.l_vessel), (w_vessel, self.l_vessel / 2), (w_vessel, -self.l_vessel), (-w_vessel, -self.l_vessel),
(-w_vessel, self.l_vessel / 2)))
turtle.register_shape('rudder', ((-1, 0), (1, 0), (1, -10), (-1, -10)))
turtle.degrees()
#
self.vessel = turtle.Turtle()
self.vessel.shape('vessel')
self.vessel.fillcolor('red')
self.vessel.penup()
self.rudder = turtle.Turtle()
self.rudder.shape('rudder')
self.rudder.fillcolor('green')
self.rudder.penup()
self.step_count = 0
self.steps_for_stamp = 30
def plot_position(self, x, y, theta, rud_angle):
converted_angle = theta*180/math.pi #convertion may apply if you use radians
# turtle.fillcolor('green')
self.vessel.setpos(x, y)
self.vessel.setheading(converted_angle)
self.rudder.setpos(x - self.l_vessel * math.cos(math.pi * converted_angle / 180),
y - self.l_vessel * math.sin(math.pi * converted_angle / 180))
self.rudder.setheading(converted_angle - rud_angle)
self.vessel.pendown()
def plot_guidance_line(self, point_a, point_b):
self.vessel.setpos(point_a[0], point_a[1])
self.vessel.pendown()
self.vessel.setpos(point_b[0], point_b[1])
self.vessel.penup()
def plot_goal(self, point, factor):
turtle.speed(0)
turtle.setpos(point[0] - factor, point[1] - factor)
turtle.pendown()
turtle.fillcolor('red')
turtle.begin_fill()
turtle.setpos(point[0] - factor, point[1] + factor)
turtle.setpos(point[0] + factor, point[1] + factor)
turtle.setpos(point[0] + factor, point[1] - factor)
turtle.end_fill()
turtle.penup()
def plot_boundary(self, points_list):
turtle.speed(0)
turtle.setpos(points_list[0][0], points_list[0][1])
turtle.pendown()
turtle.fillcolor('blue')
turtle.begin_fill()
for point in points_list:
turtle.setpos(point[0], point[1])
turtle.end_fill()
turtle.penup()
def freeze_scream(self, ):
turtle.mainloop()
def end_episode(self, ):
self.vessel.penup()
self.rudder.penup()
def restart_plot(self):
self.vessel.pendown()
if __name__ == '__main__':
viewer = Viewer()
viewer.plot_guidance_line((0, 0), (500, 0))
viewer.plot_position(100, 20, 20, 10)
viewer.freeze_scream()
``` |
{
"source": "jmp/farbfeld",
"score": 2
} |
#### File: jmp/farbfeld/test_farbfeld.py
```python
import io
import unittest
import farbfeld
class ReadTest(unittest.TestCase):
def test_read_empty_data(self):
self.assertRaises(
farbfeld.InvalidFormat,
farbfeld.read,
io.BytesIO(b''),
)
def test_read_header_only(self):
self.assertRaises(
farbfeld.InvalidFormat,
farbfeld.read,
io.BytesIO(b'farbfeld'),
)
def test_read_wrong_header_no_data(self):
self.assertRaises(
farbfeld.InvalidFormat,
farbfeld.read,
io.BytesIO(b'dlefbraf'),
)
def test_read_correct_data_wrong_header(self):
self.assertRaises(farbfeld.InvalidFormat, farbfeld.read, io.BytesIO(
b'dlefbraf' # magic
b'\x00\x00\x00\x01' # width
b'\x00\x00\x00\x01' # height
b'\x01\x02\x03\x04\x05\x06\x07\x08' # RGBA
))
def test_read_valid_but_no_pixels(self):
pixels = farbfeld.read(io.BytesIO(
b'farbfeld' # magic
b'\x00\x00\x00\x00' # width
b'\x00\x00\x00\x00' # height
))
self.assertListEqual([], pixels)
def test_read_valid_but_too_few_pixels(self):
self.assertRaises(
farbfeld.InvalidFormat,
farbfeld.read,
io.BytesIO(
b'farbfeld' # magic
b'\x00\x00\x00\x01' # width
b'\x00\x00\x00\x02' # height
b'\xff\xff\xff\xff\xff\xff\xff\xff' # RGBA
),
)
def test_read_valid_but_too_many_pixels(self):
self.assertRaises(
farbfeld.InvalidFormat,
farbfeld.read,
io.BytesIO(
b'farbfeld' # magic
b'\x00\x00\x00\x01' # width
b'\x00\x00\x00\x01' # height
b'\xff\xff\xff\xff\xff\xff\xff\xff' # RGBA
b'\xff\xff\xff\xff\xff\xff\xff\xff' # RGBA
),
)
def test_read_zero_width(self):
pixels = farbfeld.read(io.BytesIO(
b'farbfeld' # magic
b'\x00\x00\x00\x00' # width
b'\x00\x00\x00\x01' # height
))
self.assertListEqual([], pixels)
def test_read_zero_height(self):
pixels = farbfeld.read(io.BytesIO(
b'farbfeld' # magic
b'\x00\x00\x00\x01' # width
b'\x00\x00\x00\x00' # height
))
self.assertListEqual([], pixels)
def test_read_incomplete_pixel(self):
self.assertRaises(
farbfeld.InvalidFormat,
farbfeld.read,
io.BytesIO(
b'farbfeld' # magic
b'\x00\x00\x00\x01' # width
b'\x00\x00\x00\x01' # height
b'\x00\x20\x00\x40\x00\x80\x00' # RGBA
),
)
def test_read_single_pixel(self):
pixels = farbfeld.read(io.BytesIO(
b'farbfeld' # magic
b'\x00\x00\x00\x01' # width
b'\x00\x00\x00\x01' # height
b'\x00\x20\x00\x40\x00\x80\x00\xff' # RGBA
))
self.assertListEqual([[[32, 64, 128, 255]]], pixels)
def test_read_two_by_two(self):
pixels = farbfeld.read(io.BytesIO(
b'farbfeld' # magic
b'\x00\x00\x00\x02' # width
b'\x00\x00\x00\x02' # height
b'\x00\x01\x00\x02\x00\x03\x00\x04' # RGBA
b'\x00\x05\x00\x06\x00\x07\x00\x08' # RGBA
b'\x00\x09\x00\x0a\x00\x0b\x00\x0c' # RGBA
b'\x00\x0d\x00\x0e\x00\x0f\x00\x10' # RGBA
))
self.assertListEqual([
[[1, 2, 3, 4], [5, 6, 7, 8]],
[[9, 10, 11, 12], [13, 14, 15, 16]],
], pixels)
class WriteTest(unittest.TestCase):
def test_write_invalid_data(self):
self.assertRaises(ValueError, farbfeld.write, io.BytesIO(), None)
def test_write_zero_height(self):
file = io.BytesIO()
farbfeld.write(file, [])
file.seek(0)
self.assertEqual(
file.read(),
b'farbfeld' # magic
b'\x00\x00\x00\x00' # width
b'\x00\x00\x00\x00' # height
)
def test_write_zero_width(self):
file = io.BytesIO()
farbfeld.write(file, [[]])
file.seek(0)
self.assertEqual(
file.read(),
b'farbfeld' # magic
b'\x00\x00\x00\x00' # width
b'\x00\x00\x00\x01' # height
)
def test_write_incomplete_pixels(self):
self.assertRaises(ValueError, farbfeld.write, io.BytesIO(), [[[]]])
def test_write_too_few_components(self):
self.assertRaises(
ValueError,
farbfeld.write,
io.BytesIO(),
[[[1, 2, 3]]],
)
def test_write_too_many_components(self):
self.assertRaises(
ValueError,
farbfeld.write,
io.BytesIO(),
[[[1, 2, 3, 4, 5]]],
)
def test_write_component_out_of_range(self):
self.assertRaises(
ValueError,
farbfeld.write,
io.BytesIO(),
[[[0, 0, 0, -1]]],
)
self.assertRaises(
ValueError,
farbfeld.write,
io.BytesIO(),
[[[0, 0, 0, 65536]]],
)
def test_write_component_within_range(self):
try:
farbfeld.write(io.BytesIO(), [[[0, 0, 0, 0]]])
farbfeld.write(io.BytesIO(), [[[32767, 32767, 32767, 32767]]])
farbfeld.write(io.BytesIO(), [[[65535, 65535, 65535, 65535]]])
except ValueError:
self.fail('ValueError raised unexpectedly')
def test_write_invalid_component(self):
self.assertRaises(
ValueError,
farbfeld.write,
io.BytesIO(),
[[[0, 0, 0, 0.5]]],
)
self.assertRaises(
ValueError,
farbfeld.write,
io.BytesIO(),
[[[0, 0, 0, '1']]],
)
self.assertRaises(
ValueError,
farbfeld.write,
io.BytesIO(),
[[[0, 0, 0, None]]],
)
def test_write_inconsistent_width(self):
self.assertRaises(ValueError, farbfeld.write, io.BytesIO(), [[
[0, 0, 0, 0], [0, 0, 0, 0], # first row, two pixels
], [
[0, 0, 0, 0], # second row, only one pixel
]])
def test_write_single_pixel(self):
file = io.BytesIO()
farbfeld.write(file, [[[32, 64, 128, 255]]])
file.seek(0)
self.assertEqual(
file.read(),
b'farbfeld' # magic
b'\x00\x00\x00\x01' # width
b'\x00\x00\x00\x01' # height
b'\x00\x20\x00\x40\x00\x80\x00\xff' # RGBA
)
def test_write_two_by_two(self):
file = io.BytesIO()
farbfeld.write(file, [
[[1, 2, 3, 4], [5, 6, 7, 8]],
[[9, 10, 11, 12], [13, 14, 15, 16]],
])
file.seek(0)
self.assertEqual(
file.read(),
b'farbfeld' # magic
b'\x00\x00\x00\x02' # width
b'\x00\x00\x00\x02' # height
b'\x00\x01\x00\x02\x00\x03\x00\x04' # RGBA
b'\x00\x05\x00\x06\x00\x07\x00\x08' # RGBA
b'\x00\x09\x00\x0a\x00\x0b\x00\x0c' # RGBA
b'\x00\x0d\x00\x0e\x00\x0f\x00\x10' # RGBA
)
``` |
{
"source": "jmp/fast1",
"score": 2
} |
#### File: app/ports/api.py
```python
from typing import Optional, Protocol
from app.domain.circuit import Circuit
class GetCircuitUseCase(Protocol):
def get_circuit(self, ref: str) -> Optional[Circuit]:
raise NotImplementedError
```
#### File: tests/acceptance/test_circuits.py
```python
from typing import Any
from fastapi.testclient import TestClient
from pytest import mark
from pytest_bdd import given, scenario, then, when
from app.main import app
from tests.acceptance.conftest import no_database
from tests.fixtures.circuits import monza
@mark.skipif(
no_database(),
reason="Acceptance tests cannot be run without a database.",
)
@mark.acceptance
@scenario(
"features/circuits.feature",
"Getting the details of a single circuit",
) # type: ignore
def test_getting_single_circuit_details() -> None:
pass
@given("I'm an API client", target_fixture="client") # type: ignore
def client() -> TestClient:
return TestClient(app)
@given("I have a reference to a circuit", target_fixture="ref") # type: ignore
def ref() -> str:
return monza.ref
@when("I submit a request for the circuit", target_fixture="response") # type: ignore
def submit_request(client: TestClient, ref: str) -> Any:
return client.get(f"/circuits/{ref}")
@then("I should receive the circuit details") # type: ignore
def circuit_details_received(response: Any) -> None:
assert "ref" in response.json()
```
#### File: adapters/api/conftest.py
```python
from typing import Iterable, Iterator, Optional
from _pytest.fixtures import fixture
from starlette.testclient import TestClient
from app.adapters.api.dependencies import Dependencies, get_dependencies
from app.adapters.spi.session import SessionLocal
from app.domain.circuit import Circuit
from app.main import app
from app.ports.spi import GetCircuitPort
from app.services.circuit_service import CircuitService
from tests.fixtures.circuits import monza
def _get_fake_dependencies() -> Iterator[Dependencies]:
class InMemoryCircuitRepository(GetCircuitPort):
def get_circuit(self, ref: str) -> Optional[Circuit]:
return monza if ref == monza.ref else None
circuit_service = CircuitService(InMemoryCircuitRepository())
session = SessionLocal()
try:
yield Dependencies(session=session, get_circuit_use_case=circuit_service)
finally:
session.close()
@fixture
def client() -> Iterable[TestClient]:
original_dependencies = get_dependencies
app.dependency_overrides[original_dependencies] = _get_fake_dependencies
yield TestClient(app)
app.dependency_overrides[original_dependencies] = original_dependencies
```
#### File: adapters/api/test_circuits.py
```python
from fastapi.testclient import TestClient
from pytest import mark
@mark.integration
def test_get_circuit(client: TestClient) -> None:
response = client.get("/circuits/monza")
assert response.status_code == 200
assert response.json() == {
"ref": "monza",
"name": "<NAME>",
"location": "Monza",
"country": "Italy",
"latitude": 45.6156,
"longitude": 9.28111,
"altitude": 162,
"url": "http://en.wikipedia.org/wiki/Autodromo_Nazionale_Monza",
}
@mark.integration
def test_get_circuit_returns_404_if_not_found(client: TestClient) -> None:
response = client.get("/circuits/does_not_exist")
assert response.status_code == 404
```
#### File: adapters/spi/conftest.py
```python
from decimal import Decimal
from typing import Iterator
from _pytest.fixtures import fixture
from sqlalchemy import create_engine
from sqlalchemy.orm import Session, sessionmaker
from sqlalchemy.pool import StaticPool
from app.adapters.spi.entities.circuit_entity import CircuitEntity
from app.adapters.spi.entities.entity import Entity
from app.adapters.spi.session import db_session
_engine = create_engine(
"sqlite:///:memory:",
connect_args={"check_same_thread": False},
poolclass=StaticPool,
)
_SessionLocal = sessionmaker(autoflush=False, bind=_engine)
@fixture(scope="session")
def session() -> Iterator[Session]:
Entity.metadata.create_all(bind=_engine)
session = _SessionLocal()
entity = CircuitEntity(
circuit_id=14,
circuit_ref="monza",
name="Autodromo Nazionale di Monza",
location="Monza",
country="Italy",
lat=Decimal("45.6156"),
lng=Decimal("9.28111"),
alt=162,
url="http://en.wikipedia.org/wiki/Autodromo_Nazionale_Monza",
)
session.add(entity)
session.commit()
token = db_session.set(session)
yield session
db_session.reset(token)
session.close()
Entity.metadata.drop_all(bind=_engine)
``` |
{
"source": "jmphil09/mario_rl",
"score": 3
} |
#### File: jmphil09/mario_rl/GameRunner_v3.py
```python
import glob
import retro
import numpy as np
import cv2
import neat
import pickle
import time
from multiprocessing import Pool
from pathlib import Path
from nes_py.wrappers import JoypadSpace
import gym_super_mario_bros
from gym_super_mario_bros.actions import SIMPLE_MOVEMENT, COMPLEX_MOVEMENT
from pprint import saferepr
class GameRunner:
"""
This version of the GameRunner is for running multiple levels simultaneously.
General GameRunner class to have a NN generate a model to beat a game.
Args:
num_threads (int): Number of cpu threads to use
show_game (bool): Render the frames in real-time while training
show_nn_view (bool): Show what the Neural Network "sees" after the frame is processed
level_end_score (int): The maximum fitness score to cause the training to end
convolution_weight (int): Factor used to scale the image down before feeding it to the Neural Network
config_file_name (str): The prefix to use for the config file
worker_start_num (int): The cpu core number to start with
"""
def __init__(
self,
num_threads=1,
show_game=False,
show_nn_view=False,
level_end_score=3186,
convolution_weight=8,
config_file_name='config',
worker_start_num=0,
max_generation=200,
data_folder='data'
):
self.num_threads = num_threads
self.show_game = show_game
self.show_nn_view = show_nn_view
self.level_end_score = level_end_score
self.convolution_weight = convolution_weight
self.config_file_name = config_file_name
self.worker_start_num = worker_start_num
self.max_generation = max_generation
self.data_folder = data_folder
self.fitness_scores_for_generation = []
self.fitness_dict = {}
self.generation = 0
def run_all_threads(self):
p = Pool(processes=self.num_threads)
worker_range = range(self.worker_start_num, self.worker_start_num + self.num_threads)
worker_levels = ['SuperMarioBros-' + str(world) + '-' + str(level) + '-v0' for world in range(1, 9) for level in range(1, 5)]
#print('=========')
#print(tuple(zip(worker_range, worker_levels)))
p.map(self.run, tuple(zip(worker_range, worker_levels)))
def run_one_worker(self, worker_num):
self.run(worker_num)
def one_hot_encode(self, ls):
return ls.index(max(ls))
'''
Outline:
- Load the "main" NN model - can use a config file or base model for this
- Create a "random" set of genomes/species of the model
Until finished:
- Use the set of genomes/species
- Create a tuple of the form (all_levels, all_NN_variants)
- In parallel - run 1 episode on each worker (worker=core) over one of the tuples
- An episode gives a reward
- After all episodes are complete, gather results of the form dict: {NN_variant:(average of rewards on all levels)}
- Run the "breeding" algorithm on the results to create a new set of genomes/species
'''
'''
TODO:
- Implement a function to show current progress. Do this by loading the most current model and running the highest
score genome/species for each level.
- Implement automated hyperparameter search (similar to ConfigGenerator class)
'''
def run_episode():
pass
def run(self, map_args):
worker_num = map_args[0]
level = map_args[1]
env = gym_super_mario_bros.make(level)
env = JoypadSpace(env, COMPLEX_MOVEMENT)
def eval_genomes(genomes, config):
for genome_id, genome in genomes:
obs = env.reset()
#print(len(obs))
env.action_space.sample()
input_x, input_y, input_colors = env.observation_space.shape
#print('Original (x,y): ({},{})'.format(input_x, input_y))
input_x = 28#int(input_x/self.convolution_weight)
input_y = 30#int(input_y/self.convolution_weight)
#print('New (x,y): ({},{})'.format(input_x, input_y))
net = neat.nn.recurrent.RecurrentNetwork.create(genome, config)
current_max_fitness = 0
fitness_current = 0
frame = 0
frame_counter = 0
done = False
while not done:
if self.show_game:
env.render()
frame += 1
obs = cv2.resize(obs, (input_x, input_y))
obs = cv2.cvtColor(obs, cv2.COLOR_BGR2GRAY)
#cv2.imshow('image', obs)
#cv2.waitKey(0)
obs = np.reshape(obs, (input_x, input_y))
#cv2.imshow('image', obs)
#cv2.waitKey(0)
#Reshape input to a 1-d list.
imgarray = [num for row in obs for num in row]
#print(imgarray)
#There may be an issue with imgarray, the nn_output is always 0
nn_output = net.activate(imgarray)
#print('=================================================')
#print('HELLO')
#print('=================================================')
#print(nn_output)
#if nn_output != [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]:
# print(nn_output)
nn_output = self.one_hot_encode(nn_output)
#print(env.step(nn_output))
obs, reward, done, info = env.step(nn_output)
#print(reward)
#This reward function gives 1 point every time xscrollLo increases
#if reward > 0:
fitness_current += reward
#print('fitness_current, current_max_fitness: ({}, {})'.format(fitness_current, current_max_fitness))
#Replace the RHS with the xscrollLo value at the end of the level
#or end of the game
if fitness_current > self.level_end_score:
fitness_current += 100000
done = True
if fitness_current > current_max_fitness:
current_max_fitness = fitness_current
frame_counter = 0
else:
frame_counter += 1
if done or frame_counter == 250:
done = True
#TODO: try genome.fitness = float(fitness_current)
genome.fitness = float(fitness_current)
#genome.fitness = float(max(fitness_current, 0))
assert isinstance(genome.fitness, (int, float)), "Genome.fitness ({0!s}): type {1!s}, not int/float".format(saferepr(genome.fitness), type(genome.fitness))
#print('genome.fitness: {}'.format(genome.fitness))
self.fitness_scores_for_generation.append(fitness_current)
fitness_list_filename = Path('{}/{}/worker-{}-fitness_list.pkl'.format(self.data_folder, self.config_file_name, worker_num))
try:
with open(fitness_list_filename, 'rb') as input_file:
self.fitness_dict = pickle.load(input_file)
except:
self.fitness_dict = {}
with open(fitness_list_filename, 'wb') as output:
self.fitness_dict[self.generation] = self.fitness_scores_for_generation
pickle.dump(self.fitness_dict, output, 1)
self.fitness_dict = {}
self.fitness_scores_for_generation = []
self.generation += 1
```
#### File: jmphil09/mario_rl/new_test.py
```python
from ConfigGenerator import ConfigGenerator
from GameRunner_v2 import GameRunner
from FitnessPlot_v2 import FitnessPlot
import glob
from pathlib import Path
import time
# DONE - TODO: get the "best" config. For now just randomize
# DONE - Task1: use existing code to plot best config file results. Test the top ~5 and pick the fastest one.
#Task2: refactor GameRunner, consider writing server class similar to carm server
'''
Using "server style" and 16 raspberry pi 4's
1) When a pi comes online, it makes a request to the server
2) The server has 32 mario levels, it "assigns" a level to the pi (more accurately, to a pi cpu core)
"assigns", means it has the pi run that level
3) The server stores the Neural Network in memory. The pi downloads the network when it is assigned a level.
4) After N runs on the level, the pi syncs its Neural Network with the main network (need to come up with strategy for weighting the networks)
'''
#config = ConfigGenerator(filename='config_test')
#config.randomize()
#config.write_all_configs(0,1)
runner = GameRunner(
num_threads=1,
show_game=False,
level_end_score=3186,
convolution_weight=8,
config_file_name='config_test',
worker_start_num=0,
max_generation=3,
data_folder='data_test'
)
#runner.run_all_threads()
def get_top_results(min_result=500):
folders = glob.glob(str(Path('hyperparam_data/*')))
result = {}
for folder in folders:
#print(folder.split('/')[1])
prefix = 'data'
prefix = folder + '/' #'hyperparam_data/1565237431/'
plotter = FitnessPlot(config_file_name='config', worker_start_num=0, num_workers=16, folder_prefix=prefix)
#worker_dict = plotter.create_worker_dict()
worker_dict = plotter.return_avg_worker_dict()
#print(worker_dict[9])
if worker_dict[9] > min_result:
result[folder.split('/')[1]] = worker_dict[9]
#plotter.plot_workers_as_average()
return result
def get_top_times(top_results):
result = {}
for ts_path in top_results.keys():
prefix = str(Path('hyperparam_data/' + ts_path))
#print(prefix)
start_time = time.time()
runner = GameRunner(
num_threads=1,
show_game=False,
level_end_score=3186,
convolution_weight=8,
config_file_name=prefix + '/config',
worker_start_num=0,
max_generation=10,
data_folder='test_' + ts_path
)
runner.run_all_threads()
end_time = time.time()
print('Runtime: {0:01f} seconds'.format(end_time - start_time))
result[ts_path] = ('{0:01f} seconds'.format(end_time - start_time), top_results[ts_path])
return result
top_results = get_top_results(min_result=800)
#print(top_results)
times = get_top_times(top_results)
print('RUNTIMES, SCORES')
print(times)
#print('SCORES')
#print(top_results)
'''
RUNTIMES, SCORES
{
'1566017738': ('353.255976 seconds', 809.4375),
'1565252442': ('4239.686366 seconds', 808.0625),
'1565778890': ('3603.602892 seconds', 814.25),
'1565920874': ('6430.729112 seconds', 924.6875),
'1566010103': ('5932.586372 seconds', 981.75),
'1566080683': ('377.955044 seconds', 819.875),
'1566019329': ('403.268193 seconds', 854.125),
'1565775270': ('4813.206977 seconds', 1043.875),
'1565683053': ('4678.902288 seconds', 862.0625)}
'1566017738': ('353.255976 seconds', 809.4375),
'1566080683': ('377.955044 seconds', 819.875),
'1566019329': ('403.268193 seconds', 854.125),
'''
```
#### File: ongoing_projects/config_neatevolve/HyperparamRunner.py
```python
import glob
import retro
import numpy as np
import cv2
import neat
import pickle
from multiprocessing import Pool
from pathlib import Path
def run(self, worker_num):
def eval_genomes(genomes, config):
for genome_id, genome in genomes:
net = neat.nn.recurrent.RecurrentNetwork.create(genome, config)
current_max_fitness = 0
fitness_current = 0
frame_counter = 0
done = False
while not done:
#Reshape input to a 1-d list.
#TODO: use config values here
imgarray = [num for row in obs for num in row]
nn_output = net.activate(imgarray)
obs, reward, done, info = env.step(nn_output)
#TODO: generate output by running NN
#reward = output from NN's
#This reward function gives 1 point every time xscrollLo increases
fitness_current += reward
if fitness_current > current_max_fitness:
current_max_fitness = fitness_current
frame_counter = 0
else:
frame_counter += 1
if done or frame_counter == 250:
done = True
genome.fitness = fitness_current
#generate random config
config = neat.Config(
neat.DefaultGenome,
neat.DefaultReproduction,
neat.DefaultSpeciesSet,
neat.DefaultStagnation,
self.config_file_name
)
p = neat.Population(config)
print('No population checkpoint found, creating new population.')
#Show reporting statistics
p.add_reporter(neat.StdOutReporter(True))
stats = neat.StatisticsReporter()
p.add_reporter(stats)
winner = p.run(eval_genomes, n=self.max_generation)
#Save the winner
pickle_name = Path('data/{}/complete_models/winner{}.pkl'.format(self.config_file_name, worker_num))
pickle_dir = pickle_name.parent
pickle_dir.mkdir(parents=True, exist_ok=True)
with open(pickle_name, 'wb') as output:
pickle.dump(winner, output, 1)
``` |
{
"source": "jmphil09/starcraft2_ai",
"score": 3
} |
#### File: jmphil09/starcraft2_ai/convolutional_agent.py
```python
import numpy as np
from pysc2.lib import actions
from keras.layers import Input, Flatten, Dense, Dropout, Conv2D, concatenate, LSTM, Softmax
from keras.models import Model, model_from_json
import sc2_agent as sc2Agent
class ConvAgent ( sc2Agent.Agent ):
def __init__(self, envParams ):
self.welcomeStr = 'CONV-AGENT'
self.learningStrategyStr = 'backprop'
self.architectureStr = 'im a simple conv agent'
self.envParams = envParams
self.policyInds = {}
self.bringup()
return
def build_model ( self ):
# screen/visual features
spatialInput = Input( shape = ( self.envParams['screenChannelsRetained'] \
* self.envParams['nStackedFrames'],
self.envParams['screenResX'],
self.envParams['screenResY'] ), dtype=np.float32 )
# non-visual features (e.g., supply, current reward, cumulative score )
nonSpatialInput = Input ( shape = ( self.envParams['nonSpatialInputDimensions'] \
* self.envParams['nStackedFrames'],)
, dtype=np.float32)
''' feature building convolutional layers
these will hold a shared representation used for both action and value selection'''
firstConv = Conv2D ( filters = 32,
kernel_size = 4,
activation = 'relu',
padding = 'same',
data_format = 'channels_first',
dilation_rate = 1, strides = 1 ) ( spatialInput )
secondConv = Conv2D ( filters = 32,
kernel_size = 4,
activation = 'relu',
padding = 'same',
data_format = 'channels_first',
dilation_rate = 1, strides = 1 ) ( firstConv )
''' spatial action judgments will be made with two convolutional layers
this pair of filters (1 per coordinate) will be softmax transformed so as to contain...
...a probabilistic representation of the choice for the coordinate arguments
the first point in the action argument ( x1, y1 ) will be sampled from firstCoordinateConv
the second point in the action argument ( x2, y2 ) will be sampled from secondCoordinateConv
'''
firstCoordinateConv = Conv2D( 1, 3, activation = 'relu',
padding = 'same',
data_format = 'channels_first') (secondConv)
secondCoordinateConv = Conv2D(1, 3, activation = 'relu',
padding = 'same',
data_format = 'channels_first') (secondConv)
# flatten and softmax
flattenedFirstCoordinateConv = Flatten() (firstCoordinateConv)
softmaxFlattenedFirstCoordinateConv = Softmax()(flattenedFirstCoordinateConv)
flattenedSecondCoordinateConv = Flatten() (secondCoordinateConv)
softmaxFlattenedSecondCoordinateConv = Softmax()(flattenedSecondCoordinateConv)
''' linear and non-linear controllers -- used to make value and non-spatial action judgements '''
flattenedVisuaFeatures = Flatten() ( secondConv )
mergeSpatialNonSpatial = concatenate( [ nonSpatialInput, flattenedVisuaFeatures ], axis = 1)
linearControllerLayer1 = Dense ( 512, activation = 'linear' ) ( mergeSpatialNonSpatial )
linearControllerLayer2 = Dense ( 512, activation = 'linear' ) ( linearControllerLayer1 )
linearControllerLayer3 = Dense ( 512, activation = 'linear' ) ( linearControllerLayer2 )
nonlinearControllerLayer1 = Dense ( 512, activation = 'tanh' ) ( mergeSpatialNonSpatial )
nonlinearControllerLayer2 = Dense ( 512, activation = 'tanh' ) ( nonlinearControllerLayer1 )
nonlinearControllerLayer3 = Dense ( 512, activation = 'tanh' ) ( nonlinearControllerLayer2 )
linearNonlinearConcat = concatenate( [ linearControllerLayer3, nonlinearControllerLayer3 ] )
# outputs
value = Dense ( 1, activation = 'linear' ) ( linearNonlinearConcat )
actionID = Dense ( self.envParams['prunedActionSpaceSize'],
activation = 'softmax' ) ( linearNonlinearConcat )
actionModifierQueue = Dense ( 1, activation = 'sigmoid' ) ( linearNonlinearConcat )
actionModifierSelect = Dense ( 1, activation = 'sigmoid' ) ( linearNonlinearConcat )
finalLayerConcat = concatenate( [ value,
actionID,
actionModifierQueue,
actionModifierSelect,
softmaxFlattenedFirstCoordinateConv,
softmaxFlattenedSecondCoordinateConv], axis = 1)
self.model = Model ( inputs = [ spatialInput, nonSpatialInput ], outputs = [ finalLayerConcat ] )
self.policySize = self.model.layers[-1].output_shape[1]
self.policyInds = {}
# book-keeping for subsequent parsing of model output
self.policyInds['value'] = 0
self.policyInds['actionDistStart'] = 1
self.policyInds['actionDistEnd'] = self.policyInds['actionDistStart'] \
+ len(self.envParams['allowedActionIDs'])
self.policyInds['actionModifierQueue'] = self.policyInds['actionDistEnd']
self.policyInds['actionModifierSelect'] = self.policyInds['actionModifierQueue'] + 1
self.policyInds['actionCoord1Start'] = self.policyInds['actionModifierSelect'] + 1
self.policyInds['actionCoord1End'] = self.policyInds['actionCoord1Start'] \
+ self.envParams['screenResX'] * self.envParams['screenResY']
self.policyInds['actionCoord2Start'] = self.policyInds['actionCoord1End']
self.policyInds['actionCoord2End'] = self.policyInds['actionCoord2Start'] \
+ self.envParams['screenResX'] * self.envParams['screenResY']
self.model.compile ( optimizer = 'adam', loss = sc2Agent.compute_trajectory_loss)
return self.model
```
#### File: jmphil09/starcraft2_ai/sc2_agent.py
```python
import numpy as np
from pysc2.lib import actions
import tensorflow as tf
def compute_trajectory_loss ( y_true, y_pred ):
combinedLoss = tf.reduce_mean(y_true) - 0 * tf.reduce_mean(y_pred[-1])
return combinedLoss
class Agent():
def __init__(self, envParams ):
self.welcomeStr = 'PLACEHOLDER-AGENT'
self.learningStrategyStr = 'none'
self.architectureStr = 'none'
self.envParams = envParams
self.bringup()
def bringup ( self ):
self.hello_world()
self.model = self.build_model()
self.initialize_placeholders()
return
def initialize_placeholders ( self ):
nEnvs = self.envParams['simultaneousEnvironments']
nSteps = self.envParams['nTrajectorySteps']
nChannels = self.envParams['screenChannelsRetained'] \
* self.envParams['nStackedFrames']
nNonSpatialInputs = self.envParams['nonSpatialInputDimensions'] \
* self.envParams['nStackedFrames']
xRes = self.envParams['screenResX']
yRes = self.envParams['screenResX']
self.rewards = np.zeros( (nEnvs, nSteps+1), dtype=np.float32)
self.valuePredictions = np.zeros( (nEnvs, nSteps+1), dtype=np.float32)
self.nStepReturns = np.zeros((nEnvs, nSteps), dtype=np.float32)
self.advantages = np.zeros((nEnvs, nSteps), dtype=np.float32)
self.logProbs = np.zeros((nEnvs, nSteps), dtype=np.float32)
self.entropy = np.zeros((nEnvs, nSteps), dtype=np.float32)
self.loss = np.zeros((nEnvs, nSteps), dtype=np.float32)
# policy mask needs to keep track of which action arguments are active
self.policyMask = np.zeros( ( nEnvs, self.policySize ), dtype = np.float32)
# Initialize placeholders for spatial and non-spatial [ stacked] trajectory observations
self.nEnvTrajectoryBatch = np.zeros( ( nEnvs, nSteps, nChannels, xRes, yRes ), dtype=np.float32 )
self.nEnvOneStepBatch = np.zeros( ( nEnvs, 1, nChannels, xRes, yRes ), dtype=np.float32 )
# reward, cumulative score, player supply, enemy supply, action chosen, actionArgs
self.nEnvTrajectoryBatchNonSpatial = np.zeros( ( nEnvs, nSteps, nNonSpatialInputs, ), dtype=np.float32 )
self.nEnvOneStepBatchNonSpatial = np.zeros( ( nEnvs, 1, nNonSpatialInputs, ), dtype=np.float32 )
# say hello & share high level architecture & learning strategy
def hello_world( self ):
print('hi I\'m the %s\n | architecture: %s\n | learning strategy: %s'
% (self.welcomeStr, self.architectureStr, self.learningStrategyStr))
# define model architecture
def build_model( self ):
return None
def model_summary( self ):
if self.model is not None:
return self.model.summary()
else:
return 'i have no model, i go where the randomness takes me'
def choose_action ( self, actionProb, eGreedy = .9 ):
if np.random.random() > eGreedy:
if self.envParams['debugFlag']:
print('!venturing out in action selection')
actionID = np.random.choice( np.array( self.envParams['allowedActionIDs'], dtype=np.int ),
size=1, p=np.array(actionProb) )
actionID = actionID[0]
else:
if self.envParams['debugFlag']:
print('staying greedy in action selection')
actionID = self.envParams['allowedActionIDs'][ np.argmax( self.envParams['allowedActionIDs'] ) ]
return actionID
def normalize_array( self, arrayInput ):
return (arrayInput - arrayInput.min()) / (arrayInput - arrayInput.min()).sum()
def mask_unusable_actions ( self, availableActions, actionProbabilityDistribution ) :
for iAction in range( len(actionProbabilityDistribution) ):
if self.envParams['allowedActionIDs'][iAction] not in availableActions:
actionProbabilityDistribution[iAction] = 0
if not np.isclose( actionProbabilityDistribution.sum(), 1.00000 ):
actionProbabilityDistribution = self.normalize_array( actionProbabilityDistribution )
return actionProbabilityDistribution
def choose_coordinate ( self, coordinateArray, eGreedy = .9 ):
if np.random.random() > eGreedy:
if self.envParams['debugFlag']:
print('!venturing out in coordinate selection')
availableCoordinates = list( range( self.envParams['screenResX'] * self.envParams['screenResY'] ))
chosenIndex = np.random.choice( np.array( availableCoordinates, dtype=np.int ),
size=1, p = np.array(coordinateArray) )[0]
else:
if self.envParams['debugFlag']:
print('staying greedy in coordinate selection')
chosenIndex = np.argmax( coordinateArray )
maxCoord = np.unravel_index( chosenIndex, (self.envParams['screenResX'], self.envParams['screenResY']))
return maxCoord[0], maxCoord[1]
def sample_and_mask (self, obs, batchedOutputs ):
batchSelectedActionFunctionCalls = []
batchSelectedActionIDs = []
batchSelectedActionIndexes = []
batchSelectedActionArguments = []
batchSelectedActionModifiers = []
batchPredictedValues = []
for iEnv in range ( self.envParams['simultaneousEnvironments'] ):
policyIStart = self.policyInds['actionDistStart']
policyIEnd = self.policyInds['actionDistEnd']
point1IStart = self.policyInds['actionCoord1Start']
point1IEnd = self.policyInds['actionCoord1End']
point2IStart = self.policyInds['actionCoord2Start']
point2IEnd = self.policyInds['actionCoord2End']
# reset policy mask
self.policyMask[ iEnv, : ] = 0
actionProbabilityDistribution = self.mask_unusable_actions ( \
obs[iEnv].observation['available_actions'], \
batchedOutputs[iEnv][ policyIStart:policyIEnd ] )
actionId = self.choose_action ( actionProbabilityDistribution )
batchSelectedActionIDs += [ actionId ] # actionID
actionIndex = self.envParams['allowedActionIDs'].index( actionId )
self.policyMask[ iEnv, policyIStart:policyIEnd ] = 1
actionArguments = []
batchActionArguments = []
probabilisticPointMap1 = batchedOutputs[iEnv][point1IStart:point1IEnd]
probabilisticPointMap2 = batchedOutputs[iEnv][point2IStart:point2IEnd]
x1, y1 = self.choose_coordinate ( probabilisticPointMap1 )
x2, y2 = self.choose_coordinate ( probabilisticPointMap2 )
if self.envParams['allowedActionIDRequiresLocation'][actionIndex] == 1:
actionArguments = [ [ x1, y1 ]]
self.policyMask [ iEnv, point1IStart:point1IEnd ] = 1
elif self.envParams['allowedActionIDRequiresLocation'][actionIndex] == 2:
actionArguments = [[ x1, y1 ], [ x2, y2 ]]
self.policyMask [ iEnv, point1IStart:point1IEnd ] = 1
self.policyMask [ iEnv, point2IStart:point2IEnd ] = 1
# queued
if self.envParams['allowedActionIDRequiresModifier'][actionIndex] == 1:
queuedActionModifier = int( round( batchedOutputs[iEnv][ self.policyInds['actionModifierQueue']] ) ) # int
self.policyMask[ iEnv, self.policyInds['actionModifierQueue'] ] = 1
actionArguments.insert( 0, [ queuedActionModifier ] )
# select add
if self.envParams['allowedActionIDRequiresModifier'][actionIndex] == 2:
selectActionModifier = int( round( batchedOutputs[iEnv][ self.policyInds['actionModifierSelect']] ) ) # int
self.policyMask[ iEnv, self.policyInds['actionModifierSelect'] ] = 1
actionArguments.insert( 0, [ selectActionModifier ] )
batchSelectedActionFunctionCalls += [ actions.FunctionCall( actionId, actionArguments ) ]
batchActionArguments += [ actionArguments ]
if self.envParams['debugFlag']:
print('choosing action ' + str(actionId) + ', ' + str(actionArguments) )
return batchSelectedActionFunctionCalls, batchSelectedActionIDs, batchActionArguments
def batch_predict ( self, nEnvOneStepBatch, nEnvOneStepBatchNonSpatial ):
return self.model.predict( x = [ nEnvOneStepBatch, nEnvOneStepBatchNonSpatial ],
batch_size = self.envParams['simultaneousEnvironments'] )
def step_in_envs ( self, obs, localPipeEnds, batchSelectedActionFunctionCalls, batchSelectedActionIDs ):
for iEnv in range ( self.envParams['simultaneousEnvironments'] ):
selectedActionFunctionCall = batchSelectedActionFunctionCalls[iEnv]
selectedActionID = batchSelectedActionIDs[iEnv]
# ensure the agent action is possible
''' issue call '''
if selectedActionID in obs[iEnv].observation['available_actions']:
localPipeEnds[iEnv].send ( ( 'step', selectedActionFunctionCall ) )
obs[iEnv] = localPipeEnds[iEnv].recv()
# take no-op action and advance to game state where we can act again
else:
localPipeEnds[iEnv].send ( ('step', actions.FunctionCall( 0, [])) )
obs[iEnv] = localPipeEnds[iEnv].recv()
return obs, 0
def parse_rewards(self, obs):
return [ obs[iEnv].reward for iEnv in list(obs.keys()) ]
def inplace_update_trajectory_observations ( self, iStep, obs ): #, actionID, actionArguments ):
for iEnv in range( self.envParams['simultaneousEnvironments'] ):
newObs = obs[iEnv]
# spatial data
self.nEnvOneStepBatch[iEnv, 0, self.envParams['screenChannelsRetained']:, :, :] = \
self.nEnvOneStepBatch[iEnv, 0, 0:-self.envParams['screenChannelsRetained'], :, :]
self.nEnvOneStepBatch[iEnv, 0, 0:self.envParams['screenChannelsRetained'], :, :] = \
newObs.observation['screen'][self.envParams['screenChannelsToKeep'],:,:]
self.nEnvTrajectoryBatch[iEnv, iStep, :, :, : ] = self.nEnvOneStepBatch[iEnv, 0, :, :, :]
# non-spatial data
self.nEnvOneStepBatchNonSpatial[iEnv, 0, self.envParams['nonSpatialInputDimensions']:,] = \
self.nEnvOneStepBatchNonSpatial[iEnv, 0, 0:-self.envParams['nonSpatialInputDimensions'],]
self.nEnvOneStepBatchNonSpatial[iEnv, 0, 0:self.envParams['nonSpatialInputDimensions'],] = \
[ newObs.observation['game_loop'][0], # game time
newObs.observation['score_cumulative'][0], # cumulative score
newObs.reward, # prev reward
newObs.observation['player'][3], # used supply
np.sum(newObs.observation['multi_select'][:,2]), # total multi selected unit health
np.sum(newObs.observation['single_select'][:,2]), # total single selected unit health
0, # action
0, # action modifier
0, # action coordinate x1
0, # action coordinate y1
0, # action coordinate x2
0 ] # action coordinate y2
self.nEnvTrajectoryBatchNonSpatial[ iEnv, iStep, :,] = self.nEnvOneStepBatchNonSpatial[ iEnv, 0, :,]
def compute_returns_advantages ( self ):
nextRewards = self.rewards[:, 1:]
nextValues = self.valuePredictions[:, 1:]
for iEnv in range ( self.envParams['simultaneousEnvironments']):
# compute n-Step returns
for iStep in reversed( range ( self.envParams['nTrajectorySteps'] ) ) :
if iStep == ( self.envParams['nTrajectorySteps'] - 1 ) :
self.nStepReturns[ iEnv, iStep ] = nextValues[ iEnv, -1 ] # final return bootstrap
else:
self.nStepReturns[ iEnv, iStep ] = nextRewards[ iEnv, iStep ] + \
self.envParams['futureDiscountRate'] \
* self.nStepReturns[ iEnv, iStep + 1 ]
# prepare for training loop
self.advantages[iEnv, :] = self.nStepReturns[iEnv, :] - self.valuePredictions[iEnv, 0:-1]
def inplace_update_logProbs_and_entropy ( self, iStep, concatModelOutputNESS ) :
for iEnv in range ( self.envParams['simultaneousEnvironments'] ):
activePolicy = concatModelOutputNESS[iEnv] * self.policyMask[iEnv]
self.logProbs[iEnv, iStep] = np.sum( -1 * np.ma.log( activePolicy ).filled(0) )
self.entropy[iEnv, iStep] = -1 * np.sum( np.ma.log( activePolicy ).filled(0) * activePolicy )
def compute_loss (self):
self.compute_returns_advantages ( )
for iEnv in range ( self.envParams['simultaneousEnvironments'] ):
for iStep in range ( self.envParams['nTrajectorySteps'] ):
policyLoss = self.advantages[iEnv, iStep] * self.logProbs[iEnv, iStep]
valueLoss = np.square( self.nStepReturns[iEnv, iStep] - self.valuePredictions[iEnv, iStep] )/2.0
self.loss[ iEnv, iStep] = \
self.envParams['policyWeight'] * policyLoss \
+ self.envParams['valueWeight'] * valueLoss \
+ self.envParams['entropyWeight'] * self.entropy[iEnv, iStep]
if self.envParams['debugFlag']:
print( 'iEnv: ' + str(iEnv) + ' ; iStep: ' + str(iStep) )
print( '\t policyLossTerm: ' + str( policyLoss ))
print( '\t valueLossTerm: ' + str( valueLoss ))
print( '\t entropyLossTerm: ' + str( self.entropy[iEnv, iStep] ))
print( '\t totalLoss: ' + str(self.loss[ iEnv, iStep]))
if not np.isfinite( self.loss[ iEnv, iStep] ):
print( 'policyLossTerm: ' + str( policyLoss ))
print( 'valueLossTerm: ' + str( valueLoss ))
print( 'entropyLossTerm: ' + str( self.entropy[iEnv, iStep] ))
raise ValueError('non-finite loss encountered')
def flatten_first_dimensions ( self, inputData ):
inputDataShape = inputData.shape
outputShape = tuple( [inputDataShape[0]*inputDataShape[1] ] + [ i for i in inputDataShape[2:] ] )
output = np.reshape( inputData, outputShape )
return output
def train ( self ):
spatialInputs = self.flatten_first_dimensions( self.nEnvTrajectoryBatch )
nonSpatialInputs = self.flatten_first_dimensions( self.nEnvTrajectoryBatchNonSpatial )
loss = self.flatten_first_dimensions( self.loss )
verbosityLevel = 0
if self.envParams['debugFlag']:
verbosityLevel = 1
self.model.fit( x = [ spatialInputs, nonSpatialInputs ], y = loss, verbose = verbosityLevel)
def model_checkpoint( self ):
# serialize model to JSON
model_json = self.model.to_json()
filePath = self.envParams['experimentDirectory'] + self.welcomeStr
with open(filePath + '_model.json', 'w') as json_file:
json_file.write(model_json)
# serialize weights to HDF5
self.model.save_weights(filePath + '_model.h5')
print(' saved model to disk ')
``` |
{
"source": "jmphilli/clover-api-python",
"score": 3
} |
#### File: cloverapi/services/app_service.py
```python
import requests
class AppService(object):
def __init__(self, api_authorization, api_url, merchant_id):
self.url = api_url.rstrip('/')
self.merchant_id = merchant_id
self.auth = api_authorization
# Apps
# BillingInfo
def get_merchant_billing_info(self, app_id):
# Define Payload
payload = {}
# Send Request
r = requests.get(
self.url + '/v3/apps/' + app_id + '/merchants/' + self.merchant_id + '/billing_info',
auth=self.auth,
timeout=30,
params=payload)
return r.json()
# Metereds
def create_app_billing_metered_event(self, app_id, metered_id):
# Define Payload
payload = {}
# Send Request
r = requests.post(
self.url + '/v3/apps/' + app_id + '/merchants/' + self.merchant_id + '/metereds/' + metered_id,
auth=self.auth,
timeout=30,
params=payload)
return r.json()
def get_app_metered_billing_event(self, app_id, metered_id):
# Define Payload
payload = {}
# Send Request
r = requests.get(
self.url + '/v3/apps/' + app_id + '/merchants/' + self.merchant_id + '/metereds/' + metered_id,
auth=self.auth,
timeout=30,
params=payload)
return r.json()
# Events
def get_app_billing_metered_event(self, app_id, metered_id, event_id):
# Define Payload
payload = {}
# Send Request
r = requests.get(
self.url + '/v3/apps/' + app_id + '/merchants/' + self.merchant_id + '/metereds/'
+ metered_id + '/events/' + event_id,
auth=self.auth,
timeout=30,
params=payload)
return r.json()
def delete_app_billing_metered_event(self, app_id, metered_id, event_id):
# Define Payload
payload = {}
# Send Request
r = requests.delete(
self.url + '/v3/apps/' + app_id + '/merchants/' + self.merchant_id + '/metereds/'
+ metered_id + '/events/' + event_id,
auth=self.auth,
timeout=30,
params=payload)
return r.json()
``` |
{
"source": "jmpichar/2020_Arduino_Pi_IOT_Project",
"score": 3
} |
#### File: 2020_Arduino_Pi_IOT_Project/flask_webapp/app.py
```python
from flask import Flask, jsonify, request, redirect, render_template, Response
from camera import VideoCamera
import cv2
import sys
if sys.platform == 'win32':
print("Running on Windows OS. This is not supported yet.")
exit()
from src.device_list import BtDevContainer
Container = BtDevContainer()
app = Flask(__name__)
def gen_frames(camera):
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
@app.route("/")
def home():
"""
Brief:
Param(s):
Return:
"""
return render_template("index_new.html")
@app.route("/scan")
def scan():
"""
Brief:
Param(s):
Return:
"""
retDict = {}
try:
devices = Container.scan()
retDict["scan_devs"] = devices
#retDict["scan_devs"] = ['test1', 'test2', 'test3', 'test4']
except Exception as e:
print(f"Runtime error has occurred. {e}")
return jsonify(retDict)
@app.route("/connect", methods=['GET', 'POST'])
def connect():
"""
Brief:
Param(s):
Return:
"""
devices = request.get_json()
retValue = {}
for device in devices["selectedDevices"]:
try:
retValue[device] = Container.get_device(device).connect()
except Exception:
retValue[device] = False
return jsonify(retValue)
#print(devices)
#return jsonify({"test2": True, "test3": True}) #uncommented line
@app.route("/disconnect", methods=['GET', 'POST'])
def disconnect():
"""
Brief:
Param(s):
Return:
"""
devices = request.get_json()
retValue = {}
for device in devices["selectedDevices"]:
try:
Container.remove_device(device)
retValue[device] = True
except Exception:
retValue[device] = False
return jsonify(retValue)
@app.route("/send", methods=['GET', 'POST'])
def send():
"""
Brief:
send():
POST:
JSON => {selected_device_name : [ {method_name : {param_name : param_value} ] } ] }
GET:
JSON => {selected_device_name : {method_name : method_result} }
"""
devices = request.get_json()
retValue = {}
for device_name in devices["selectedDevices"]:
retValue[device_name] = {}
for msg_name in devices[device_name]:
print(f"Sending command: {msg_name} params: {devices[device_name][msg_name]}")
try:
retValue[device_name][msg_name] = Container.get_device(device_name).send_message(msg_name, **devices[device_name][msg_name])
except Exception:
print(f"Unexpected error occurred upon sending command: {msg_name}. Returning False.\n{Exception}")
retValue[device_name][msg_name] = False
return jsonify(retValue)
@app.route('/video_feed')
def video_feed():
print('Turn on Webcam')
# return the response generated along with the specific media
# type (mime type)
return Response(gen_frames(VideoCamera()),
mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
# setting the host to 0.0.0.0 makes the pi act as a server,
# this allows users to get to the site by typing in the pi's
# local ip address.
# NOTE : When running the webapp you must use "sudo" for super user
# rights to run as a server.
app.run(host="0.0.0.0", port=5000, debug=True)
``` |
{
"source": "jmpinit/github_stars_to_notion",
"score": 3
} |
#### File: github_stars_to_notion/github_stars_to_notion/__init__.py
```python
import sys
import os.path
import requests
import yaml
import json
from notion.client import NotionClient
# For testing use star cache to avoid GitHub rate limit
DEBUG_USE_CACHE = False
def gh_query(query, token):
"""Run a GraphQL query against the GitHub API using the given access token"""
headers = {'Authorization': f'Bearer {token}'}
request = requests.post('https://api.github.com/graphql', json={'query': query}, headers=headers)
if request.status_code == 200:
return request.json()
else:
raise Exception('Query failed to run by returning code of {}. {}'.format(request.status_code, query))
def get_stars(user, token):
"""Return a list of the name, description, and URL for each star of the given user"""
stars = []
end_cursor = None
while True:
after_clause = ''
if end_cursor:
after_clause = f', after: "{end_cursor}"'
result = gh_query(f"""
{{
user(login: "{user}") {{
starredRepositories(first: 100{after_clause}) {{
pageInfo {{
hasNextPage
endCursor
}}
edges {{
node {{
name
url
description
}}
}}
}}
}}
}}
""", token)
edges = result['data']['user']['starredRepositories']['edges']
for node in edges:
stars.append(node['node'])
page_info = result['data']['user']['starredRepositories']['pageInfo']
if not page_info['hasNextPage']:
# No more data to retrieve
break
end_cursor = page_info['endCursor']
return stars
def sync_star_table(url, token, stars, delete=False, name_col_name='Name', url_col_name='URL', description_col_name='Description'):
"""Sync the list of stars with name, description, and URL to the given Notion table
- If delete is True then rows in the Notion table which do not correspond to a star will be removed
- The names of the columns can optionally be specified
"""
client = NotionClient(token_v2=token)
cv = client.get_collection_view(url)
# Index the stars by the URL, which is the unique ID we care about
stars_by_url = {}
for star in stars:
stars_by_url[star['url']] = star
# Index the rows by the URL
rows_by_url = {}
for row in cv.collection.get_rows():
if len(getattr(row, url_col_name)) == 0:
print('Warning: skipping row with empty URL')
continue
if getattr(row, url_col_name) in rows_by_url:
print(f'Warning: found duplicate row for {getattr(row, url_col_name)}')
continue
rows_by_url[row.url] = row
# Add any GH stars that are not in the table rows
for star in stars:
if star['url'] not in rows_by_url:
new_row = cv.collection.add_row()
setattr(new_row, name_col_name, star['name'])
setattr(new_row, description_col_name, star['description'])
setattr(new_row, url_col_name, star['url'])
print(f'Added new row for {star["name"]}')
# Add any missing descriptions
for url, row in rows_by_url.items():
if len(getattr(row, description_col_name)) == 0:
if url not in stars_by_url:
# This star was deleted in the user's account but is still
# in the Notion table, so we'll skip it
continue
star = stars_by_url[url]
setattr(row, description_col_name, star['description'])
print(f'Filled missing description for {star["name"]}')
# Optionally delete rows for stars that are in the table but no longer
# listed in the user's account (deleted on GH)
if delete:
for url, row in rows_by_url.items():
if url not in stars_by_url:
row.remove()
print(f'Deleted row for {row[name_col_name]}')
def load_config(config_file_path):
"""Load and validate a configuration file containing user information"""
if not os.path.isfile(config_file_path):
raise Exception('config.yml file missing')
config = yaml.safe_load(open(config_file_path, 'r'))
# Validate configuration
if 'github' not in config:
raise Exception('Missing GitHub section in config')
if 'username' not in config['github']:
raise Exception('Missing GitHub username in config')
if 'token' not in config['github']:
raise Exception('Missing GitHub token in config')
if 'notion' not in config:
raise Exception('Missing Notion section in config')
if 'token_v2' not in config['notion']:
raise Exception('Missing Notion token_v2 in config')
if 'table_url' not in config['notion']:
raise Exception('Missing Notion table_url in config')
return config
def main():
if len(sys.argv) != 2:
print(f'Usage: {sys.argv[0]} config.yml')
sys.exit(1)
config_path = sys.argv[1]
config = load_config(config_path)
gh_username = config['github']['username']
gh_token = config['github']['token']
print('Retrieving stars for GitHub user {}'.format(gh_username))
if not DEBUG_USE_CACHE:
stars = get_stars(gh_username, gh_token)
else:
# Load stars from cache file if available,
# otherwise retrieve from GitHub and write to cache
if os.path.isfile('stars.json'):
with open('stars.json', 'r') as stars_file:
stars = json.load(stars_file)
print('Loaded stars from cache')
else:
stars = get_stars(gh_username, gh_token)
with open('stars.json', 'w') as stars_file:
json.dump(stars, stars_file)
print('Cached stars')
print('Syncing stars to Notion table')
n_table = config['notion']['table_url']
n_token = config['notion']['token_v2']
sync_star_table(n_table, n_token, stars)
if __name__ == '__main__':
main()
``` |
{
"source": "jmplonka/Importer3D",
"score": 2
} |
#### File: jmplonka/Importer3D/importGSM.py
```python
__title__ = "FreeCAD Maya file importer"
__author__ = "<NAME>"
import sys, shlex, struct, FreeCAD, numpy
from importUtils import newObject, getShort, getInt
class GsmHeader():
def __init__(self):
self.length = 0x28
self.blockCount = 0
class GsmMySg():
def __init__(self):
self.length = 0x08
class GsmDaeH():
def __init__(self):
self.length = 0x50
class GsmBlockHeader():
def __init__(self):
self.length = 0x10
self.blockPos = 0
self.blockLength = 0
self.key = None
class GsmBlock():
def __init__(self, data, bHdr):
self.pos = bHdr.blockPos + 4
self.key = data[bHdr.blockPos:self.pos]
self.subName = data[self.pos:self.pos + 2]
self.subKey, self.pos = getShort(data, self.pos + 2)
self.size, self.pos = getInt(data, self.pos)
self.version, self.pos = getInt(data, self.pos)
class GsmCsd1(GsmBlock):
def __init__(self, data, bHdr): GsmBlock.__init__(self, data, bHdr)
class GsmCsd2(GsmBlock): # Lines
def __init__(self, data, bHdr): GsmBlock.__init__(self, data, bHdr)
class GsmCsd3(GsmBlock):
def __init__(self, data, bHdr):
GsmBlock.__init__(self, data, bHdr)
end = bHdr.blockPos + bHdr.blockLength
if (data[bHdr.blockPos + 0x10:bHdr.blockPos + 0x13] == '\xEF\xBB\xBF'):
self.text = data[bHdr.blockPos + 0x13:end].decode('utf8')
else:
self.text = data[bHdr.blockPos + 0x10:end].decode('utf8')
class GsmCsiu(GsmBlock):
def __init__(self, data, bHdr): GsmBlock.__init__(self, data, bHdr)
class GsmCslv(GsmBlock):
def __init__(self, data, bHdr): GsmBlock.__init__(self, data, bHdr)
class GsmCsrp(GsmBlock):
def __init__(self, data, bHdr): GsmBlock.__init__(self, data, bHdr)
class GsmDrapInfo():
def __init__(self, data, pos):
self.pos = bHdr.blockPos + 4
self.key = data[bHdr.blockPos:self.pos]
self.varType, self.pos = readShort(data, self.pos)
if (self.varType == 0x02):
self.bytes = data[self.pos:self.pos + 0x12]
self.pos += 0x12
self.value, self.pos = readFloat(data, self.pos)
elif (self.varType == 0x04):
self.bytes = data[self.pos:self.pos + 0x0A]
self.pos += 0x0A
r, self.pos = readFloat(data, self.pos)
g, self.pos = readFloat(data, self.pos)
b, self.pos = readFloat(data, self.pos)
self.value = (r, g, b)
elif (self.varType == 0x0d):
self.bytes = readBytes(0x12);
self.bytes = data[self.pos:self.pos + 0x12]
self.pos += 0x12
self.value, self.pos = readInt(data, self.pos)
self.value = (self.value != 0)
elif (self.varType == 0x0f):
self.bytes = data[self.pos:self.pos + 0x12]
self.pos += 0x12
self.value, self.pos = readInt(data, self.pos)
else:
self.bytes = data[self.pos:self.pos + 0x16]
self.pos += 0x16
self.value = None
self.name = data[self.pos:self.pos + 0x20].decode('utf8')
self.pos = self.pos + 0x20
i = self.name.find('\0')
if (i != -1): self.name = self.name[0:i]
self.flag1, self.pos = readInt(data, self.pos);
self.flag2, self.pos = readInt(data, self.pos)
class GsmDrap(GsmBlock):
def __init__(self, data, bHdr):
GsmBlock.__init__(self, id, l)
self.count, self.pos = getShort(data, bHdr.blockPos + 0x32)
self.infos = {}
self.pos = bHdr.blockPos + 0x80
i = self.count
while (i > 0):
info = GsmDrapInfo(data, self.pos)
self.pos += 0x40
self.infos[info.key] = info
i -= 1
txt = data[self.pos, bHdr.blockPos + bHdr.blockLength].decode('UTF-16LE')
self.strings = txt.split('\0')
class GsmFfig(GsmBlock):
def __init__(self, data, bHdr): GsmBlock.__init__(self, id, l)
class GsmScna(GsmBlock):
def __init__(self, data, bHdr): GsmBlock.__init__(self, id, l)
class GsmSrcm(GsmBlock):
def __init__(self, data, bHdr): GsmBlock.__init__(self, id, l)
class GsmTxtc(GsmBlock):
def __init__(self, data, bHdr): GsmBlock.__init__(self, id, l)
def readGsmHeader(data):
header = GsmHeader()
header.blockCount, pos = getInt(data, 0x24)
return header, header.length
def readGsmMySg(data, pos):
mySg = GsmMySg()
return mySg, pos + mySg.length
def readGsmDaeH(data, pos):
daeH = GsmDaeH()
return daeH, pos + daeH.length
def readGsmBlockHeader(data, pos):
bHdr = GsmBlockHeader()
ofs = pos + 4
bHdr.key = data[pos:ofs].decode('utf8')
bHdr.blockPos, ofs = getInt(data, ofs)
bHdr.blockLength, ofs = getInt(data, ofs)
bHdr.flags, ofs = getInt(data, ofs)
return bHdr, pos + bHdr.length
def readGsmBlock(data, bHdr):
#if (bHdr.key == 'CSD1'): return GsmCsd1(data, bHdr)
#if (bHdr.key == 'CSD2'): return GsmCsd2(data, bHdr) # Lines
if (bHdr.key == 'CSD3'): return GsmCsd3(data, bHdr)
#if (bHdr.key == 'CSIU'): return GsmCsiu(data, bHdr)
#if (bHdr.key == 'CSLV'): return GsmCslv(data, bHdr)
#if (bHdr.key == 'CSRP'): return GsmCsrp(data, bHdr)
#if (bHdr.key == 'DRAP'): return GsmDrap(data, bHdr)
#if (bHdr.key == 'FFIG'): return GsmFfig(data, bHdr) #Thumbnail
#if (bHdr.key == 'SCNA'): return GsmScna(data, bHdr)
#if (bHdr.key == 'SRCM'): return GsmSrcm(data, bHdr)
#if (bHdr.key == 'TXTC'): return GsmTxtc(data, bHdr)
return None
def getFloat(st):
f = st.get_token()
try:
return float(f)
except:
raise Exception("Can't convert '%s' to float!" %(f))
def getInteger(st): return int(st.get_token())
def getColor(st):
r = getFloat(st)
g = getFloat(st)
b = getFloat(st)
return (r, g, b)
class GsmMaterial():
def __init__(self):
self.name = None
self.number = -1
class GsmReader():
def __init__(self):
self.currentName = None
self.materials = None
self.vertexList = None
self.textureList = None
self.edgeList = None
self.faceList = None
self.model = None
self.material = None
self.mat_block = 0
self.currentMaterial = None
def resetMaterial(self):
self.mat_block = 0
if (self.material is not None):
self.materials[self.material.name] = self.material
self.material = None
def readBase(self, st):
self.resetMaterial()
self.textureList = []
self.vertexList = []
self.edgeList = []
self.faceList = []
def adjustMaterial(self, mesh):
if (self.material is not None):
amb = self.currentMaterial.ambient
dif = self.currentMaterial.diffuse
mesh.ViewObject.ShapeMaterial.AmbientColor = (amb, amb, amb)
mesh.ViewObject.ShapeMaterial.DiffuseColor = (dif, dif, dif)
mesh.ViewObject.ShapeMaterial.EmissiveColor = self.currentMaterial.emissionRGB
mesh.ViewObject.ShapeMaterial.SpecularColor = self.currentMaterial.specularRGB
mesh.ViewObject.ShapeMaterial.Shininess = self.currentMaterial.shining
mesh.ViewObject.ShapeMaterial.Transparency = self.currentMaterial.transparency
def getFaces(self):
faces = []
for i in self.faceList:
i0 = i[0]
i1 = i[1]
i2 = i[2]
if (i0 < 0):
e0 = self.edgeList[-i0 - 1]
idx0 = e0[1] - 1
idx1 = e0[0] - 1
else:
e0 = self.edgeList[i0 - 1]
idx0 = e0[0] - 1
idx1 = e0[1] - 1
if (i1 < 0):
e1 = self.edgeList[-i1 - 1]
idx2 = e1[0] - 1
else:
e1 = self.edgeList[i1 - 1]
idx2 = e1[1] - 1
faces.append([idx0, idx1, idx2])
return faces
def readBody(self, st, doc):
self.resetMaterial()
number = getInteger(st)
if (len(self.vertexList) > 0):
data = []
faces = self.getFaces()
for f in faces:
p0 = self.vertexList[f[0]]
p1 = self.vertexList[f[1]]
p2 = self.vertexList[f[2]]
data.append([p0, p1, p2])
mesh = newObject(doc, self.currentName, data)
self.adjustMaterial(mesh)
def readDefine(self, st):
# DEFINE MATERIAL "name" 0,
# 0.713726, 0.482353, 0.403922, !surface RGB [0.0..1.0]x3
# 0.650000, 0.800000, 0.900000, 0.000000, !ambient, diffuse, specular, transparent coefficients [0.0..1.0]x4
# 2.000000, !shining [0.0..100.0]
# 1.000000, !transparency attenuation [0.0..4.0]
# 0.086275, 0.086275, 0.086275, !specular RGB [0.0..1.0]x3
# 0.000000, 0.000000, 0.000000, !emission RGB [0.0..1.0]x3
# 0.000000
#
if (self.mat_block == 0):
tok = st.get_token()
if (tok == 'MATERIAL'):
self.material = GsmMaterial()
self.material.name = st.get_token()
self.material.number = getInteger(st)
self.mat_block = 1
elif (tok == 'TEXTURE'):
self.mat_block = -1
else:
raise Exception("Unknown Token '%s' in DEFINE!" %(tok))
elif (self.mat_block == 1):
self.material.ambient = getFloat(st)
self.material.diffuse = getFloat(st)
self.material.specular = getFloat(st)
self.mat_block = 2
return
elif (self.mat_block == 2):
self.material.transparent = getFloat(st)
self.mat_block = 3
return
elif (self.mat_block == 3):
self.material.shining = getFloat(st)
self.mat_block = 4
return
elif (self.mat_block == 4):
self.material.transparencyAttentuation = getFloat(st)
self.mat_block = 5
return
elif (self.mat_block == 5):
self.material.specularRGB = getColor(st)
self.mat_block = 6
return
elif (self.mat_block == 6):
self.material.emissionRGB = getColor(st)
self.mat_block = 7
return
elif (self.mat_block == 7):
self.material.emissionAttentuation = getFloat(st)
self.resetMaterial()
return
elif (self.mat_block != -1):
raise Exception("Unrecognized token in DEFINE: '%s'!" %(tok))
def readEdge(self, st):
# EDGE 1, 2, 1, 17, 2 !#1
self.resetMaterial()
p1 = getInteger(st)
p2 = getInteger(st)
a = getInteger(st)
b = getInteger(st)
c = getInteger(st)
self.edgeList.append((p1, p2, a, b, c))
def readMaterial(self, st):
self.resetMaterial()
name = st.get_token()
mat = self.materials.get(name)
if (mat is not None):
self.currentMaterial = mat
def readModel(self, st):
# MODEL SURFACE
self.resetMaterial()
self.model = st.get_token()
def readPolygon(self, st):
# PGON 3, 0, 2, 1, 2, 3
self.resetMaterial()
a = getInteger(st)
b = getInteger(st)
c = getInteger(st)
d = getInteger(st)
e = getInteger(st)
f = getInteger(st)
self.faceList.append([d, e, f])
def readTextureVertex(self, st):
self.readVertex(st)
x = getFloat(st)
y = getFloat(st)
self.textureList.append([x, y])
def readVertex(self, st):
self.resetMaterial()
x = getFloat(st)
y = getFloat(st)
z = getFloat(st)
self.vertexList.append([x, y, z])
def readComment(self, st):
tok = st.get_token()
if (len(tok) > 0):
if (tok == 'Mesh'):
tok = st.get_token()
if (tok == 'name'):
tok = st.get_token() # skip ':'
self.currentName = st.get_token()
def read(self, doc, csd3):
self.materials = {}
self.vertexList = []
self.textureList = []
self.edgeList = []
self.faceList = []
lines = csd3.text.splitlines()
progressbar = FreeCAD.Base.ProgressIndicator()
progressbar.start(" reading ...", len(lines))
try:
for line in lines:
progressbar.next()
st = shlex.shlex(line.strip())
st.wordchars += '.-'
st.whitespace += ','
tok = st.get_token()
if (tok == ''): pass
elif (tok == 'BASE'): self.readBase(st)
elif (tok == 'BODY'): self.readBody(st, doc)
elif (tok == 'COOR'): self.resetMaterial()
elif (tok == 'DEFINE'): self.readDefine(st)
elif (tok == 'EDGE'): self.readEdge(st)
elif (tok == 'ELSE'): self.resetMaterial()
elif (tok == 'ENDIF'): self.resetMaterial()
elif (tok == 'hotspot'): self.resetMaterial()
elif (tok == 'IF'): pass
elif (tok == 'material'): self.readMaterial(st)
elif (tok == 'min'): pass
elif (tok == 'MODEL'): self.readModel(st)
elif (tok == 'MUL'): self.resetMaterial()
elif (tok == 'PEN'): self.resetMaterial()
elif (tok == 'PGON'): self.readPolygon(st)
elif (tok == 'TEVE'): self.readTextureVertex(st)
elif (tok == 'VERT'): self.readVertex(st)
elif (tok == '!'): self.readComment(st)
else:
if (self.mat_block != 0):
st.push_token(tok)
self.readDefine(st)
else:
raise Exception("Unrecognized token '%s' (mat_block=%d)" %(tok, self.mat_block))
except:
pass
progressbar.stop()
def read(doc, fileName):
with open(fileName, 'rb') as file:
data = file.read()
hdr, pos = readGsmHeader(data)
mySg, pos = readGsmMySg(data, pos)
daeH, pos = readGsmDaeH(data, pos)
blocks = {}
i = hdr.blockCount
while (i > 0):
bHdr, pos = readGsmBlockHeader(data, pos)
block = readGsmBlock(data, bHdr)
blocks[bHdr.key] = block
i -= 1
reader = GsmReader()
reader.read(doc, blocks.get('CSD3'))
if __name__ == '__main__':
if (len(sys.argv) > 1):
print(sys.argv[1])
read(FreeCAD.ActiveDocument, sys.argv[1])
else:
read(FreeCAD.ActiveDocument, u"D:/documents/NOTE/3d/ArmChair/Armchair fotel b6500.gsm")
#read(FreeCAD.ActiveDocument, u"D:/documents/NOTE/3d/motorristja.gsm")
```
#### File: jmplonka/Importer3D/triangulate.py
```python
from itertools import chain
from math import fabs
import numpy as np
# Ported from https://github.com/bjorkegeek/polytri
def loopedPairs(iterable):
'''
list(loopedPairs([1,2,3])) => [(1, 2), (2, 3), (3, 1)]
'''
iterable = iter(iterable)
first = last = next(iterable)
for x in iterable:
yield last, x
last = x
yield (last, first)
def nearlyZero(v, rTol = 1e-6, aTol = 1e-9):
'''
nearlyZero(0) => True
nearlyZero(.1) => False
nearlyZero(1E-10) => True
nearlyZero(np.array([0, 0, 0])) => True
nearlyZero(np.array([1E-10, 1E-10, 1E-10])) => True
nearlyZero(np.array([1E-10, 1E-10, 7])) => False
'''
if isinstance(v, (float, int)):
return fabs(v) > rTol
return np.allclose(v, np.zeros(np.shape(v), np.float32), rTol, aTol)
def calculateNormal(polygon):
'''
Returns polygon normal vector for 3d polygon
'''
n = np.array([0, 0, 0], np.float32)
for p1, p2 in loopedPairs(polygon):
m = np.subtract(p2, p1)
p = np.add(p2, p1)
n[0] += m[1] * p[2]
n[1] += m[2] * p[0]
n[2] += m[0] * p[1]
if nearlyZero(n):
raise ValueError("No normal found")
else:
return n
def loppedSlice(seq, start, count):
'''
list(loppedSlice([1,2,3],0,3)) => [1, 2, 3]
list(loppedSlice([1,2,3],1,2)) => [3, 1]
'''
l = len(seq)
for i in range(start, start + count):
yield seq[i % l]
def loppedSliceInv(seq, start, count):
'''
list(loppedSliceInv([1,2,3,4],0,3)) => [4]
list(loppedSliceInv([1,2,3,4],1,3)) => [1]
list(loppedSliceInv([1,2,3,4],2,3)) => [2]
list(loppedSliceInv([1,2,3,4],3,3)) => [3]
'''
if start + count > len(seq):
return seq[start + count - len(seq): start]
else:
return chain(seq[:start], seq[start + count:])
def anyPointInTriangle(triangle, points):
a, b, c = triangle
s = b - a
t = c - a
stk = [s, t, np.cross(s, t)]
mtx = np.linalg.inv(np.vstack(stk).transpose())[:2]
for p in points:
ps, pt = np.dot(mtx, p - a)
if ps >= 0 and pt >= 0 and ps + pt <= 1:
return True
return False
def getTriangles(ngon):
'''
Converts a polygon to a set of triangles that cover the same area.
* Convex and non-convex polygons are supported.
* Clockwise and counter-clockwise winding supported.
* Polygon vertices must all be within a single plane
* Inverted polygons are NOT supported
* Polygons with holes (multi-wires) are NOT supported.
Args:
ngon: A sequence of vertices making up the singe wire polygon, with each vertex
described as a 3D point.
The ngon is implicitly closed: a polygon with N sides should have N vertices.
Returns:
a generator of triangles, each specified in the same format as the input polygon
'''
polygon = [np.array(x, np.float32) for x in ngon]
normal = calculateNormal(polygon)
i = 0
while len(polygon) > 2:
if i >= len(polygon):
raise ValueError("Triangulation failed")
(a, b, c) = loppedSlice(polygon, i, 3)
if ((a == b).all() or (b == c).all()):
# Duplicate vertex, just skip
del polygon[(i + 1) % len(polygon)]
else:
x = np.cross(c - b, b - a)
dot = np.dot(normal, x)
yld = False
if dot > 1E-12:
triangle = (a, b, c)
if not anyPointInTriangle(triangle, loppedSliceInv(polygon, i, 3)):
del polygon[(i + 1) % len(polygon)]
yield triangle
i = 0
yld = True
if not yld:
i += 1
``` |
{
"source": "jmpmcmanus/adcirc2mbtiles",
"score": 2
} |
#### File: jmpmcmanus/adcirc2mbtiles/mesh2tiff.py
```python
import os, sys, json, warnings
from functools import wraps
import numpy as np
from PyQt5.QtGui import QColor
from qgis.core import (
Qgis,
QgsApplication,
QgsMeshLayer,
QgsMeshDatasetIndex,
QgsMeshUtils,
QgsProject,
QgsRasterLayer,
QgsRasterFileWriter,
QgsRasterPipe,
QgsCoordinateReferenceSystem,
QgsColorRampShader,
QgsRasterShader,
QgsSingleBandPseudoColorRenderer,
QgsRasterHistogram,
QgsErrorMessage
)
# Ignore warning function
def ignore_warnings(f):
@wraps(f)
def inner(*args, **kwargs):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("ignore")
response = f(*args, **kwargs)
return response
return inner
# Initialize application
def initialize_qgis_application():
sys.path.append('/opt/conda/envs/mbtiles/share/qgis')
sys.path.append('/opt/conda/envs/mbtiles/share/qgis/python/plugins')
app = QgsApplication([], False)
return (app)
# Add the path to processing so we can import it next
@ignore_warnings # Ignored because we want the output of this script to be a single value, and "import processing" is noisy
def initialize_processing(app):
# import processing module
import processing
from processing.core.Processing import Processing
# Initialize Processing
Processing.initialize()
return (app, processing)
# Convert mesh layer as raster and save as a GeoTiff
def exportRaster(parameters):
# Open layer from infile
infile = parameters['INPUT_LAYER']
meshfile = infile.strip().split('/')[-1]
meshlayer = meshfile.split('.')[0]
layer = QgsMeshLayer(infile, meshlayer, 'mdal')
# Check if layer is valid
if layer.isValid() is True:
# Get parameters for processing
dataset = parameters['INPUT_GROUP']
timestep = parameters['INPUT_TIMESTEP']
mupp = parameters['MAP_UNITS_PER_PIXEL']
extent = layer.extent()
output_layer = parameters['OUTPUT_RASTER']
width = extent.width()/mupp
height = extent.height()/mupp
crs = layer.crs()
crs.createFromSrid(4326)
transform_context = QgsProject.instance().transformContext()
output_format = QgsRasterFileWriter.driverForExtension(os.path.splitext(output_layer)[1])
# Open output file for writing
rfw = QgsRasterFileWriter(output_layer)
rfw.setOutputProviderKey('gdal')
rfw.setOutputFormat(output_format)
# Create one band raster
rdp = rfw.createOneBandRaster( Qgis.Float64, width, height, extent, crs)
# Get dataset index
dataset_index = QgsMeshDatasetIndex(dataset, timestep)
# Regred mesh layer to raster
block = QgsMeshUtils.exportRasterBlock( layer, dataset_index, crs,
transform_context, mupp, extent)
# Write raster to GeoTiff file
rdp.writeBlock(block, 1)
rdp.setNoDataValue(1, block.noDataValue())
rdp.setEditable(False)
return(output_layer)
if layer.isValid() is False:
raise Exception('Invalid mesh')
# Add color and set transparency to GeoTiff
def styleRaster(filename):
# Create outfile name
outfile = "".join(filename.strip().split('.raw'))
# Open layer from filename
rasterfile = filename.strip().split('/')[-1]
rasterlayer = rasterfile.split('.')[0]
rlayer = QgsRasterLayer(filename, rasterlayer, 'gdal')
# Check if layer is valid
if rlayer.isValid() is True:
# Get layer data provider
provider = rlayer.dataProvider()
# Calculate histrogram
provider.initHistogram(QgsRasterHistogram(),1,100)
hist = provider.histogram(1)
# Get histograms stats
nbins = hist.binCount
minv = hist.minimum
maxv = hist.maximum
# Create histogram array, bin array, and histogram index
hista = np.array(hist.histogramVector)
bins = np.arange(minv, maxv, (maxv - minv)/nbins)
index = np.where(hista > 5)
# Get bottom and top color values from bin values
bottomcolor = bins[index[0][0]]
topcolor = bins[index[0][-1]]
# Calculate range value between the bottom and top color values
if bottomcolor < 0:
vrange = topcolor + bottomcolor
else:
vrange = topcolor - bottomcolor
# Calculate values for bottom middle, and top middle color values
if rasterlayer == 'maxele':
bottommiddle = vrange * 0.3333
topmiddle = vrange * 0.6667
else:
bottommiddle = vrange * 0.375
topmiddle = vrange * 0.75
# Create list of color values
valueList =[bottomcolor, bottommiddle, topmiddle, topcolor]
# Create color dictionary
if rasterlayer == 'maxele':
colDic = {'bottomcolor':'#0000ff', 'bottommiddle':'#00ffff', 'topmiddle':'#ffff00', 'topcolor':'#ff0000'}
else:
colDic = {'bottomcolor':'#000000', 'bottommiddle':'#ff0000', 'topmiddle':'#ffff00', 'topcolor':'#ffffff'}
# Create color ramp function and add colors
fnc = QgsColorRampShader()
fnc.setColorRampType(QgsColorRampShader.Interpolated)
lst = [QgsColorRampShader.ColorRampItem(valueList[0], QColor(colDic['bottomcolor'])),\
QgsColorRampShader.ColorRampItem(valueList[1], QColor(colDic['bottommiddle'])), \
QgsColorRampShader.ColorRampItem(valueList[2], QColor(colDic['topmiddle'])), \
QgsColorRampShader.ColorRampItem(valueList[3], QColor(colDic['topcolor']))]
fnc.setColorRampItemList(lst)
# Create raster shader and add color ramp function
shader = QgsRasterShader()
shader.setRasterShaderFunction(fnc)
# Create color render and set opacity
renderer = QgsSingleBandPseudoColorRenderer(provider, 1, shader)
renderer.setOpacity(0.75)
# Get output format
output_format = QgsRasterFileWriter.driverForExtension(os.path.splitext(outfile)[1])
# Open output file for writing
rfw = QgsRasterFileWriter(outfile)
rfw.setOutputProviderKey('gdal')
rfw.setOutputFormat(output_format)
# Add EPSG 4326 to layer crs
crs = QgsCoordinateReferenceSystem()
crs.createFromSrid(4326)
# Create Raster pipe and set provider and renderer
pipe = QgsRasterPipe()
pipe.set(provider.clone())
pipe.set(renderer.clone())
# Get transform context
transform_context = QgsProject.instance().transformContext()
# Write to file
rfw.writeRaster(
pipe,
provider.xSize(),
provider.ySize(),
provider.extent(),
crs,
transform_context
)
if not rlayer.isValid():
raise Exception('Invalid raster')
app = initialize_qgis_application()
app.initQgis()
app, processing = initialize_processing(app)
parameters = json.loads(sys.argv[1])
filename = exportRaster(parameters)
styleRaster(filename)
app.exitQgis()
``` |
{
"source": "jmpmcmanus/ingestGauges",
"score": 3
} |
#### File: jmpmcmanus/ingestGauges/createIngestData.py
```python
import argparse, os, glob, psycopg2
import pandas as pd
import numpy as np
from psycopg2.extensions import AsIs
from loguru import logger
# This function takes a dataset name as input, and uses it to query the drf_harvest_data_file_met table, creating a list
# of filenames. The list is converted to a DataFrame and returned.
def getInputFiles(inputDataset):
try:
# Create connection to database and get cursor
conn = psycopg2.connect("dbname='apsviz_gauges' user='apsviz_gauges' host='localhost' port='5432' password='<PASSWORD>'")
cur = conn.cursor()
# Set enviromnent
cur.execute("""SET CLIENT_ENCODING TO UTF8""")
cur.execute("""SET STANDARD_CONFORMING_STRINGS TO ON""")
cur.execute("""BEGIN""")
# Run query
cur.execute("""SELECT dir_path, file_name
FROM drf_harvest_data_file_meta
WHERE source = %(source)s AND ingested = False
ORDER BY data_date_time""",
{'source': inputDataset})
# convert query output to Pandas DataFrame
df = pd.DataFrame(cur.fetchall(), columns=['dir_path','file_name'])
# Close cursor and database connection
cur.close()
conn.close()
# Return Pandas dataframe
if inputDataset == 'adcirc':
return(df.head(40))
else:
return(df.head(20))
# If exception print error
except (Exception, psycopg2.DatabaseError) as error:
print(error)
# This function takes as input the source_archive (noaa, contrails), and a list of station_id(s), and returnst source_id(s) for
# observation data from the gauge_source table in the apsviz_gauges database. This funciton specifically gets source_id(s) for
# observation data, such as from NOAA and NCEM.
def getObsSourceID(source_archive,station_tuples):
try:
# Create connection to database and get cursor
conn = psycopg2.connect("dbname='apsviz_gauges' user='apsviz_gauges' host='localhost' port='5432' password='<PASSWORD>'")
cur = conn.cursor()
# Set enviromnent
cur.execute("""SET CLIENT_ENCODING TO UTF8""")
cur.execute("""SET STANDARD_CONFORMING_STRINGS TO ON""")
cur.execute("""BEGIN""")
# Run query
cur.execute("""SELECT s.source_id AS source_id, g.station_id AS station_id, g.station_name AS station_name,
s.data_source AS data_source, s.source_name AS source_name
FROM drf_gauge_station g INNER JOIN drf_gauge_source s ON s.station_id=g.station_id
WHERE source_archive = %(sourcearchive)s AND station_name IN %(stationtuples)s
ORDER BY station_name""",
{'sourcearchive': source_archive,'stationtuples': AsIs(station_tuples)})
# convert query output to Pandas dataframe
dfstations = pd.DataFrame(cur.fetchall(), columns=['source_id','station_id', 'station_name',
'data_source','source_name'])
# Close cursor and database connection
cur.close()
conn.close()
# Return Pandas dataframe
return(dfstations)
# If exception print error
except (Exception, psycopg2.DatabaseError) as error:
print(error)
# This function takes as input the data_source (hsofs...), and a list of station_id(s), and returns source_id(s) for
# model data from the drf_gauge_source table in the apsviz_gauges database. This funciton specifically gets source_id(s) for
# model data, such as from ADCIRC. The data_source, such is hsofs, is the grid that is used in the ADCIRC run.
def getModelSourceID(data_source,station_tuples):
try:
# Create connection to database and get cursor
conn = psycopg2.connect("dbname='apsviz_gauges' user='apsviz_gauges' host='localhost' port='5432' password='<PASSWORD>'")
cur = conn.cursor()
# Set enviromnent
cur.execute("""SET CLIENT_ENCODING TO UTF8""")
cur.execute("""SET STANDARD_CONFORMING_STRINGS TO ON""")
cur.execute("""BEGIN""")
# Run query
cur.execute("""SELECT s.source_id AS source_id, g.station_id AS station_id, g.station_name AS station_name,
s.data_source AS data_source, s.source_name AS source_name
FROM drf_gauge_station g INNER JOIN drf_gauge_source s ON s.station_id=g.station_id
WHERE data_source = %(datasource)s AND station_name IN %(stationtuples)s
ORDER BY station_name""",
{'datasource': data_source, 'stationtuples': AsIs(station_tuples)})
# convert query output to Pandas dataframe
dfstations = pd.DataFrame(cur.fetchall(), columns=['source_id','station_id','station_name','data_source',
'source_name'])
# Close cursor and database connection
cur.close()
conn.close()
# Return Pandas dataframe
return(dfstations)
# If exception print error
except (Exception, psycopg2.DatabaseError) as error:
print(error)
# This function takes as input a directory input path, directory output path and a filename, and returns a csv
# file that containes gauge data. the function uses the getObsSourceID and getModelSourceID functions above to get
# a list of existing source ids that it includes in the gauge data to enable joining the gauge data table with
# gauge source table. The function adds a timemark, that it gets from the input file name. The timemark values can
# be used to uniquely query an ADCIRC model run.
def addMeta(inputDir, outputDir, inputFile):
# Read input file, convert column name to lower case, rename station column to station_name, convert its data
# type to string, and add timemark and source_id columns
df = pd.read_csv(inputDir+inputFile)
df.columns= df.columns.str.lower()
df = df.rename(columns={'station': 'station_name'})
df = df.astype({"station_name": str})
df.insert(0,'timemark', '')
df.insert(0,'source_id', '')
# Extract list of stations from dataframe for querying the database, and get source_archive name from filename.
station_tuples = tuple(sorted([str(x) for x in df['station_name'].unique().tolist()]))
source_archive = inputFile.split('_')[0].lower().strip()
# check if source archive name is ADCIRC
if source_archive == 'adcirc':
# Get soure_name and data_source from filename, and use it along with the list of stations to run
# the getModelSourceID function to get the sourc_id(s)
data_source = inputFile.split('_')[2].lower().strip()+'_'+inputFile.split('_')[3].lower().strip()
dfstations = getModelSourceID(data_source,station_tuples)
# Get the timemark for the forecast and nowecast data
df['timemark'] = inputFile.split('_')[-1].split('.')[0].lower().strip()
else:
# Use source_archive and list of stations to get source_id(s) for the observation gauge data
dfstations = getObsSourceID(source_archive,station_tuples)
df['timemark'] = inputFile.split('_')[-1].split('.')[0].lower().strip()
# Add source id(s) to dataframe
for index, row in dfstations.iterrows():
df.loc[df['station_name'] == row['station_name'], 'source_id'] = row['source_id']
# Drom station_name column from dataframe
df.drop(columns=['station_name'], inplace=True)
# Write dataframe to csv file
df.to_csv(outputDir+'data_copy_'+inputFile, index=False)
# This function takes as input a directory input path, a directory output path and a dataset variable. It
# generates and list of input filenames, and uses them to run the addMeta function above.
def processData(outputDir, inputDataset):
dfDirFiles = getInputFiles(inputDataset)
for index, row in dfDirFiles.iterrows():
inputDir = row[0]
inputFile = row[1]
addMeta(inputDir, outputDir, inputFile)
# Main program function takes args as input, which contains the outputDir, and inputDataset values.
@logger.catch
def main(args):
# Add logger
logger.remove()
log_path = os.getenv('LOG_PATH', os.path.join(os.path.dirname(__file__), 'logs'))
logger.add(log_path+'/createIngestData.log', level='DEBUG')
# Extract args variables
outputDir = args.outputDir
inputDataset = args.inputDataset
logger.info('Start processing data for dataset '+inputDataset+'.')
processData(outputDir, inputDataset)
logger.info('Finished processing data for dataset '+inputDataset+'.')
# Run main function takes outputDir, and inputDataset as input.
if __name__ == "__main__":
""" This is executed when run from the command line """
parser = argparse.ArgumentParser()
# Optional argument which requires a parameter (eg. -d test)
parser.add_argument("--outputDir", action="store", dest="outputDir")
parser.add_argument("--inputDataset", action="store", dest="inputDataset")
args = parser.parse_args()
main(args)
```
#### File: jmpmcmanus/ingestGauges/createIngestSourceMeta.py
```python
import argparse, psycopg2, sys, os
import pandas as pd
from psycopg2.extensions import AsIs
from loguru import logger
# This function takes a gauge location type (COASTAL, TIDAL or RIVERS), and uses it to query the drf_gauge_station table,
# and return a list of station id(s), and station names.
def getStationID(locationType):
try:
# Create connection to database and get cursor
conn = psycopg2.connect("dbname='apsviz_gauges' user='apsviz_gauges' host='localhost' port='5432' password='<PASSWORD>'")
cur = conn.cursor()
# Set enviromnent
cur.execute("""SET CLIENT_ENCODING TO UTF8""")
cur.execute("""SET STANDARD_CONFORMING_STRINGS TO ON""")
cur.execute("""BEGIN""")
# Run query
cur.execute("""SELECT station_id, station_name FROM drf_gauge_station
WHERE location_type = %(location_type)s
ORDER BY station_name""",
{'location_type': locationType})
# convert query output to Pandas dataframe
df = pd.DataFrame(cur.fetchall(), columns=['station_id', 'station_name'])
# Close cursor and database connection
cur.close()
conn.close()
# Return Pandas dataframe
return(df)
# If exception print error
except (Exception, psycopg2.DatabaseError) as error:
print(error)
# This function takes a input a directory path and outputFile, and used them to read the input file
# and add station_id(s) that are extracted from the drf_gauge_station table in theapsviz_gauges database.
def addMeta(outputDir, outputFile):
# Extract list of stations from dataframe for query database using the getStationID function
locationType = outputFile.split('_')[2]
df = getStationID(locationType)
# Get source name from outputFilee
source = outputFile.split('_')[0]
# Check if source is ADCIRC, contrails or noaa, and make appropriate additions to DataFrame
if source == 'adcirc':
# Get source_name and data_source from outputFile, and add them to the dataframe along
# with the source_archive value
df['data_source'] = outputFile.split('_')[3].lower()+'_'+outputFile.split('_')[4].lower()
df['source_name'] = source
df['source_archive'] = 'renci'
elif source == 'contrails':
# Add data_source, source_name, and source_archive to dataframe
gtype = outputFile.split('_')[2].lower()
df['data_source'] = gtype+'_gauge'
df['source_name'] = 'ncem'
df['source_archive'] = source
elif source == 'noaa':
# Add data_source, source_name, and source_archive to dataframe
df['data_source'] = 'tidal_gauge'
df['source_name'] = source
df['source_archive'] = source
else:
# If source in incorrect print message and exit
sys.exit('Incorrect source')
# Drop station_name from DataFrame
df.drop(columns=['station_name'], inplace=True)
# Reorder column name and update indeces
newColsOrder = ['station_id','data_source','source_name','source_archive']
df=df.reindex(columns=newColsOrder)
# Write dataframe to csv file
df.to_csv(outputDir+'source_'+outputFile, index=False)
# Main program function takes args as input, which contains the outputDir, and outputFile values.
@logger.catch
def main(args):
# Add logger
logger.remove()
log_path = os.getenv('LOG_PATH', os.path.join(os.path.dirname(__file__), 'logs'))
logger.add(log_path+'/createIngestSourceMeta.log', level='DEBUG')
# Extract args variables
outputDir = args.outputDir
outputFile = args.outputFile
logger.info('Start processing source data for file '+outputFile+'.')
# Run addMeta function
addMeta(outputDir, outputFile)
logger.info('Finished processing source data for file '+outputFile+'.')
# Run main function takes outputDir, and outputFile as input.
if __name__ == "__main__":
""" This is executed when run from the command line """
parser = argparse.ArgumentParser()
# Optional argument which requires a parameter (eg. -d test)
parser.add_argument("--outputDir", action="store", dest="outputDir")
parser.add_argument("--outputFile", action="store", dest="outputFile")
# Parse input arguments
args = parser.parse_args()
# Run main
main(args)
``` |
{
"source": "jmpmulter/coolon-pipeline",
"score": 3
} |
#### File: jmpmulter/coolon-pipeline/generate_target.py
```python
import os
import sys
def main():
in_path = sys.argv[1]
out_path = sys.argv[2]
infile = open(in_path, "r")
outfile = open(out_path, "x")
for l0 in infile:
l0s = l0.split(",")
for item in l0s:
if "Dsec" in item:
outfile.write(item.split("\\")[1]+"\n")
if __name__ == "__main__":
main()
``` |
{
"source": "jmpolom/pyinfra",
"score": 2
} |
#### File: tests/test_connectors/test_util.py
```python
from __future__ import unicode_literals
from unittest import TestCase
from mock import patch
from pyinfra.api import Config, State
from pyinfra.api.connectors.util import (
make_unix_command,
make_unix_command_for_host,
split_combined_output,
)
from ..util import make_inventory
class TestConnectorUtil(TestCase):
def test_split_combined_output_works(self):
results = split_combined_output([
('stdout', 'stdout1'),
('stdout', 'stdout2'),
('stderr', 'stderr1'),
('stdout', 'stdout3'),
])
assert results == (['stdout1', 'stdout2', 'stdout3'], ['stderr1'])
def test_split_combined_output_raises(self):
with self.assertRaises(ValueError):
split_combined_output(['nope', ''])
class TestMakeUnixCommandConnectorUtil(TestCase):
def test_command(self):
command = make_unix_command('echo Šablony')
assert command.get_raw_value() == "sh -c 'echo Šablony'"
def test_doas_command(self):
command = make_unix_command('uptime', doas=True)
assert command.get_raw_value() == 'doas -n sh -c uptime'
def test_doas_user_command(self):
command = make_unix_command('uptime', doas=True, doas_user='pyinfra')
assert command.get_raw_value() == 'doas -n -u pyinfra sh -c uptime'
def test_sudo_command(self):
command = make_unix_command('uptime', sudo=True)
assert command.get_raw_value() == 'sudo -H -n sh -c uptime'
def test_sudo_multi_arg_command(self):
command = make_unix_command('echo hi', sudo=True, preserve_sudo_env=True)
assert command.get_raw_value() == "sudo -H -n -E sh -c 'echo hi'"
def test_sudo_preserve_env_command(self):
command = make_unix_command('uptime', sudo=True, preserve_sudo_env=True)
assert command.get_raw_value() == 'sudo -H -n -E sh -c uptime'
def test_use_sudo_login_command(self):
command = make_unix_command('uptime', sudo=True, use_sudo_login=True)
assert command.get_raw_value() == 'sudo -H -n -i sh -c uptime'
def test_sudo_user_command(self):
command = make_unix_command('uptime', sudo=True, sudo_user='pyinfra')
assert command.get_raw_value() == 'sudo -H -n -u pyinfra sh -c uptime'
def test_su_command(self):
command = make_unix_command('uptime', su_user='pyinfra')
assert command.get_raw_value() == "su pyinfra -c 'sh -c uptime'"
def test_su_multi_arg_command(self):
command = make_unix_command('echo hi', su_user='pyinfra')
assert command.get_raw_value() == "su pyinfra -c 'sh -c '\"'\"'echo hi'\"'\"''"
def test_use_su_login_command(self):
command = make_unix_command('uptime', su_user='pyinfra', use_su_login=True)
assert command.get_raw_value() == "su -l pyinfra -c 'sh -c uptime'"
def test_preserve_su_env_command(self):
command = make_unix_command('uptime', su_user='pyinfra', preserve_su_env=True)
assert command.get_raw_value() == "su -m pyinfra -c 'sh -c uptime'"
def test_su_shell_command(self):
command = make_unix_command('uptime', su_user='pyinfra', su_shell='bash')
assert command.get_raw_value() == "su -s `which bash` pyinfra -c 'sh -c uptime'"
def test_command_env(self):
command = make_unix_command('uptime', env={
'key': 'value',
'anotherkey': 'anothervalue',
})
assert command.get_raw_value() in [
"sh -c 'export \"key=value\" \"anotherkey=anothervalue\" && uptime'",
"sh -c 'export \"anotherkey=anothervalue\" \"key=value\" && uptime'",
]
def test_command_chdir(self):
command = make_unix_command('uptime', chdir='/opt/somedir')
assert command.get_raw_value() == "sh -c 'cd /opt/somedir && uptime'"
def test_custom_shell_command(self):
command = make_unix_command('uptime', shell_executable='bash')
assert command.get_raw_value() == 'bash -c uptime'
def test_mixed_command(self):
command = make_unix_command(
'echo hi',
chdir='/opt/somedir',
env={'key': 'value'},
sudo=True,
sudo_user='root',
preserve_sudo_env=True,
su_user='pyinfra',
shell_executable='bash',
)
assert command.get_raw_value() == (
'sudo -H -n -E -u root ' # sudo bit
'su pyinfra -c ' # su bit
"'bash -c '\"'\"'cd /opt/somedir && export \"key=value\" " # shell and export bit
"&& echo hi'\"'\"''" # command bit
)
def test_command_exists_su_config_only(self):
'''
This tests covers a bug that appeared when `make_unix_command` is called
with `su_user=False` (default) but `SU_USER` set on the config object,
resulting in an empty command output.
'''
state = State(make_inventory(), Config(SU_USER=True))
host = state.inventory.get_host('somehost')
command = make_unix_command_for_host(state, host, 'echo Šablony')
assert command.get_raw_value() == "sh -c 'echo Šablony'"
class TestMakeUnixCommandConnectorUtilWarnings(TestCase):
def test_doas_warnings(self):
state = State(make_inventory(), Config(SU_USER=True, SUDO=True))
host = state.inventory.get_host('somehost')
with patch('pyinfra.api.connectors.util._warn_invalid_auth_args') as fake_auth_args:
command = make_unix_command_for_host(state, host, 'echo Šablony')
assert command.get_raw_value() == "sh -c 'echo Šablony'"
fake_auth_args.assert_called_once()
_, args, kwargs = fake_auth_args.mock_calls[0]
assert args[1] == 'doas'
assert args[2] == ('doas_user',)
def test_sudo_warnings(self):
state = State(make_inventory(), Config(SU_USER=True, DOAS=True))
host = state.inventory.get_host('somehost')
with patch('pyinfra.api.connectors.util._warn_invalid_auth_args') as fake_auth_args:
command = make_unix_command_for_host(state, host, 'echo Šablony')
assert command.get_raw_value() == "sh -c 'echo Šablony'"
fake_auth_args.assert_called_once()
_, args, kwargs = fake_auth_args.mock_calls[0]
assert args[1] == 'sudo'
assert args[2] == ('use_sudo_password', 'use_sudo_login', 'preserve_sudo_env', 'sudo_user')
def test_su_warnings(self):
state = State(make_inventory(), Config(DOAS=True, SUDO=True))
host = state.inventory.get_host('somehost')
with patch('pyinfra.api.connectors.util._warn_invalid_auth_args') as fake_auth_args:
command = make_unix_command_for_host(state, host, 'echo Šablony')
assert command.get_raw_value() == "sh -c 'echo Šablony'"
fake_auth_args.assert_called_once()
_, args, kwargs = fake_auth_args.mock_calls[0]
assert args[1] == 'su_user'
assert args[2] == ('use_su_login', 'preserve_su_env', 'su_shell')
``` |
{
"source": "JmPotato/Bookshelf",
"score": 2
} |
#### File: JmPotato/Bookshelf/main.py
```python
import sys
import os.path
import tornado.web
import tornado.ioloop
import tornado.httpserver
import urls
from init_db import db, async_db
from settings import *
from tornado.options import define, options
major = sys.version_info[0]
if major < 3:
reload(sys)
sys.setdefaultencoding('utf-8')
define('port', default=8080, help='run on the given port', type=int)
class Application(tornado.web.Application):
def __init__(self):
settings = dict(
static_path = os.path.join(os.path.dirname(__file__), "static"),
template_path = os.path.join(os.path.dirname(__file__), "templates"),
autoescape = None,
google_analytics = google_analytics.lstrip(),
cookie_secret = cookie_secret,
api_key = api_key,
api_secret = api_secret,
callback = callback,
xsrf_cookies = True,
login_url = "/login",
debug = Debug,
)
tornado.web.Application.__init__(self, urls.handlers, **settings)
self.db = db
self.async_db = async_db
def main():
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application(), xheaders=True)
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == '__main__':
main()
``` |
{
"source": "JmPotato/DNS_Relay_Server",
"score": 3
} |
#### File: DNS_Relay_Server/Python/server.py
```python
import sys
import getopt #解析命令行参数的模块
import socketserver #多线程服务器
from dns_resolver import DNSResolver #导入dns_resolver类中的DNSResolver类,用来进行DNS报文的解析和查询
# Socket 服务器 Handler
class DNSHandler(socketserver.BaseRequestHandler):
def handle(self):
output_level = 1 # 默认输出等级为1
local_file = 'dnsrelay.txt' # 默认本地查询表文件名
remote_server = '192.168.3.11' # 默认远程转发服务器地址(阿里云DNS服务器)
try:
opts, args = getopt.getopt(sys.argv[1:], 'ho:f:s:', ['help', 'output=', 'filename=', 'server=']) #获取命令行参数,过滤掉第一个参数(脚本的文件名)
for opt, arg in opts: #opts是一个两元组的列表。每个元素为:(选项串,附加参数)
# -o 或 --output 获得输出信息,分为 1|2 两个等级
if opt in ("-o", "--output"):
output_level = int(arg)
# -f 或 --filename 指定本地查询的对照表文件
elif opt in ("-f", "--filename"):
local_file = arg
# -s 或 --server 指定远程查询DNS服务器地址
elif opt in ("-s", "--server"):
remote_server = arg
except getopt.GetoptError:
print("Usage:\n -o [1|2] -f [filename] -s [dns_server_upaddr]") #打印使用方法,并退出
sys.exit(1)
request_data = self.request[0] # 接收二进制 DNS 查询报文数据
request_socket = self.request[1] # 保存本次 Socket 链接信息,用于回传响应报文
# 进行 DNS 解析和查询
dns_server = DNSResolver(request_data, local_file, remote_server)
# 在屏幕上实时打印报文的信息
if output_level == 1: #调试输出级别为1
out = "QNAME: %s\nQTYPE: %-5s %-5s\tRCODE: %s\n" % (dns_server.request['question']['QNAME'],
dns_server.request['question']['QTYPE'],
dns_server.transFlag('TYPE', dns_server.request['question']['QTYPE']),
dns_server.transFlag('RCODE', dns_server.response['flags']['RCODE']))
out += "RESULT: %s\n" % dns_server.response['answer']['ARDATA']
out += "====================================================================\n"
else: #调试输出级别为2
out = "Client: %s:%s\n" % (self.client_address[0], self.client_address[1])
out += "#REQUEST#\n"
out += "Header:\n"
out += "ID: %-5s\tFlags: %-5s\nQDCOUNT: %-2s\tANCOUNT: %-2s\tNSCOUNT: %-2s\tARCOUNT: %-2s\n" % (
dns_server.request['header']['ID'], dns_server.request['header']['FLAGS'],
dns_server.request['header']['QDCOUNT'], dns_server.request['header']['ANCOUNT'],
dns_server.request['header']['NSCOUNT'], dns_server.request['header']['ARCOUNT'])
out += "Question:\n"
out += "QNAME: %s\nQTYPE: %-5s %-5s\tQCLASS: %s\tRCODE: %s\n" % (
dns_server.request['question']['QNAME'], dns_server.request['question']['QTYPE'],
dns_server.transFlag('TYPE', dns_server.request['question']['QTYPE']),
dns_server.transFlag('CLASS', dns_server.request['question']['QCLASS']),
dns_server.transFlag('RCODE', dns_server.request['flags']['RCODE']))
out += '\n#RESPONSE#\n'
out += "Header:\n"
out += "ID: %-5s\tFlags: %-5s\nQDCOUNT: %-2s\tANCOUNT: %-2s\tNSCOUNT: %-2s\tARCOUNT: %-2s\n" % (
dns_server.response['header']['ID'], dns_server.response['header']['FLAGS'],
dns_server.response['header']['QDCOUNT'], dns_server.response['header']['ANCOUNT'],
dns_server.response['header']['NSCOUNT'], dns_server.response['header']['ARCOUNT'])
out += "Question:\n"
out += "QNAME: %s\nQTYPE: %-5s %-5s\tQCLASS: %s\tRCODE: %s\n" % (
dns_server.response['question']['QNAME'], dns_server.response['question']['QTYPE'],
dns_server.transFlag('TYPE', dns_server.response['question']['QTYPE']),
dns_server.transFlag('CLASS', dns_server.response['question']['QCLASS']),
dns_server.transFlag('RCODE', dns_server.response['flags']['RCODE']))
if dns_server.response['header']['ANCOUNT']:
out += "Answer:\n"
out += "ANAME: %s\nATYPE: %-5s %-5s\tACLASS: %-2s\tATTL: %-5s\tARDLENGTH: %-2s\n" % (
dns_server.response['answer']['ANAME'], dns_server.response['answer']['ATYPE'],
dns_server.transFlag('TYPE', dns_server.response['answer']['ATYPE']),
dns_server.response['answer']['ACLASS'], dns_server.response['answer']['ATTL'],
dns_server.response['answer']['ARDLENGTH'])
out += "ARDATA: %s\n" % dns_server.response['answer']['ARDATA']
out += "====================================================================\n"
sys.stdout.write(out) #报文信息打印到控制台
# 回传响应报文,完成DNS查询与中转
request_socket.sendto(dns_server.response['data'], self.client_address)
if __name__ == "__main__":
try:
opts, args = getopt.getopt(sys.argv[1:], 'ho:f:s:', ['help', 'output=', 'filename=', 'server='])
for opt, arg in opts:
# -h 或 --help 获得命令行使用帮助
if opt in ("-h", "--help"):
print("Usage:\n -o [1|2] -f [filename] -s [dns_server_upaddr]")
sys.exit(1)
except getopt.GetoptError:
print("Usage:\n -o [1|2] -f [filename] -s [dns_server_upaddr]") #打印使用方法,并退出
sys.exit(1)
HOST, PORT = "127.0.0.1", 53
server = socketserver.ThreadingUDPServer((HOST, PORT), DNSHandler) #启动多线程 UDP 服务器,每个客户端请求连接到服务器时,服务器都会创建新线程专门负责处理当前客户端的所有请求。
server.serve_forever() #循环,持续不断监听端口
``` |
{
"source": "JmPotato/Dopamine",
"score": 2
} |
#### File: Dopamine/examples/hello.py
```python
from dopamine import Dopamine
app = Dopamine(listener=('127.0.0.1', 5299))
@app.route('/', ['GET'])
def hello(request, response):
html = 'Hello, dopamine!<br>'
html += 'Your host is {0}:{1}<br>'.format(
request.remote_addr, request.remote_port)
html += 'Your user agent is: {0}'.format(
request.headers['User-Agent'])
return html
app.run()
```
#### File: src/dopamine/app.py
```python
from gevent import monkey, pywsgi
from .request import Request
from .response import Response
monkey.patch_all()
class Dopamine(pywsgi.WSGIServer):
"""A simple and fast Python web framework which aims to help you build an
app agilely.
"""
def __init__(self, listener=None):
self._router = {}
self.listener = ('127.0.0.1', 2995) if listener is None else listener
pywsgi.WSGIServer.__init__(self, listener=self.listener,
application=self.application)
def __str__(self):
return "<class 'DopamineObeject'>"
def __repr__(self):
return 'DopamineObeject'
def route(self, url, method_list):
def decorator(f):
# Check the url
if isinstance(url, str) and len(url) > 0 and url[0] == '/':
if len(url) > 1 and url[-1] == '/':
new_url = url[:-1]
else:
new_url = url
else:
from .exceptions import RouterException
raise RouterException(
"Illeage URL '{0}' for handler '{1}'"
.format(url, f.__name__))
# Check the method list
if isinstance(method_list, list):
new_method_list = []
unknown_method_list = []
for method in method_list:
if isinstance(method, str) and \
method.upper() in {'GET', 'HEAD', 'POST', 'PUT',
'DELETE', 'CONNECT', 'OPTIONS',
'TRACE', 'PATCH'}:
new_method_list.append(method.upper())
else:
unknown_method_list.append(method)
if not new_method_list:
if not unknown_method_list:
new_method_list = ['GET']
else:
from .exceptions import RouterException
raise RouterException(
"Unsupported HTTP method '{0}'"
.format(unknown_method_list[0]))
self._router[new_url] = (new_method_list, f)
return f
return decorator
def application(self, env, start_response):
request = Request(env)
response = Response(content_type='text/html')
if request.path in self._router:
if request.method in self._router[request.path][0]:
response.body = self._router[request.path][1](
request, response)
start_response(response.status, response.headers)
else:
from .exceptions import MethodNotAllowed
response.status = MethodNotAllowed.status
start_response(response.status, response.headers)
response.body = MethodNotAllowed.description
else:
from .exceptions import NotFound
response.status = NotFound.status
response.body = NotFound.description
start_response(response.status, response.headers)
return [response.body]
def run(self):
self.serve_forever()
```
#### File: src/dopamine/exceptions.py
```python
class DopamineException(Exception):
pass
class RouterException(DopamineException):
pass
class HTTPException(Exception):
"""For HTTP exceptions.
"""
code = None
status = None
description = None
def __init__(self, description=None, response=None):
super(HTTPException, self).__init__()
if description is not None:
self.description = description
self.response = response
class NotFound(HTTPException):
"""*404* `Not Found`
Raise if a resource does not exist and never existed.
"""
code = 404
status = '404 Not Found'
description = (
"The requested URL was not found on the server. If you entered"
" the URL manually please check your spelling and try again."
)
class MethodNotAllowed(HTTPException):
"""*405* `Method Not Allowed`
Raise if the server used a method the resource does not handle. For
example `POST` if the resource is view only. Especially useful for REST.
The first argument for this exception should be a list of allowed methods.
Strictly speaking the response would be invalid if you don't provide valid
methods in the header which you can do with that list.
"""
code = 405
status = '405 Method Not Allowed'
description = "The method is not allowed for the requested URL."
``` |
{
"source": "JmPotato/Pobox",
"score": 2
} |
#### File: Pobox/backend/run.py
```python
from flask import Flask
from flask_cors import CORS
from models import init_all_tables
from routes import init_all_routes
app = Flask(__name__)
# Cross-origin
CORS(app)
@app.before_first_request
def init_db():
init_all_tables()
init_all_routes(app)
if __name__ == "__main__":
app.run(port=5000, debug=True)
``` |
{
"source": "JmPotato/Pomash",
"score": 2
} |
#### File: Pomash/libs/handler.py
```python
import re
import mistune
import tornado.web
from .models import *
from .markdown import *
from urllib.parse import unquote, quote
from tornado.escape import to_unicode, xhtml_escape
class BaseHandler(tornado.web.RequestHandler):
def get_pure_title(self, title):
return re.sub('''<("[^"]*"|'[^']*'|[^'">])*>''', "", title).strip()
def escape_string(self, s):
return xhtml_escape(s)
def description(self, text):
if len(text) <= 200:
return re.sub('(<.*?>)', '', text).replace('\n', ' ')[:int(len(text)/2-4)] + '...'
elif len(text) > 200:
return re.sub('(<.*?>)', '', text).replace('\n', ' ')[:195] + '...'
def md_to_html(self, text):
text = to_unicode(text)
renderer = MyRenderer()
md = mistune.create_markdown(
renderer=renderer,
plugins=['strikethrough']
)
return md(text)
def urlencode(self, text):
return quote(text.encode('utf8'))
def urldecode(self, text):
return unquote(text.encode('utf8'))
def get_custom_page(self):
return get_all_pages()
def get_current_user(self):
username = self.get_secure_cookie("username")
if not username:
return None
return username
def get_error_html(self, status_code, **kwargs):
if status_code == 404:
self.render("404.html",
title="404 Page Not Found",
)
else:
try:
exception = "%s\n\n%s" % (
kwargs["exception"],
traceback.format_exc()
)
if self.settings.get("debug"):
self.set_header('Content-Type', 'text/plain')
for line in exception:
self.write(line)
else:
self.write("oOps...! I made a mistake... ")
except Exception:
return super(BaseHandler, self).get_error_html(
status_code,
**kwargs
)
```
#### File: Pomash/libs/utils.py
```python
import os
import time
import string
import random
import dropbox
import hashlib
import pygments
import datetime
def to_md5(word):
return hashlib.md5(word.encode('utf-8')).hexdigest()
def make_token():
key = ''.join(random.sample(string.ascii_letters+string.digits, 20))
return key
def get_datetime():
return str(datetime.datetime.now()).split('.')[0]
def backup(dbx, file_path, upload_path):
path = '/' + upload_path
mode = dropbox.files.WriteMode.overwrite
mtime = os.path.getmtime(file_path)
with open(file_path, 'rb') as f:
data = f.read()
try:
dbx.files_upload(
data, path, mode,
client_modified=datetime.datetime(*time.gmtime(mtime)[:6]),
mute=True
)
except dropbox.exceptions.ApiError as err:
print('%s API error' % err)
return False
return True
def restore(dbx, file_name, download_path):
path = '/' + file_name
try:
dbx.files_download_to_file(download_path, path)
except dropbox.exceptions.HttpError as err:
print('%s API error' % err)
return False
return True
def trim(string):
return string.strip().lstrip()
``` |
{
"source": "JmPotato/Quantitative_Trading",
"score": 2
} |
#### File: JmPotato/Quantitative_Trading/strategy.py
```python
import sys
import json
import time
import getopt
import datetime
import traceback
import okex.spot_api as spot
import okex.swap_api as swap
import okex.futures_api as future
import okex.account_api as account
class Strategy(object):
def __init__(self, config_filename):
# 加载配置文件
self._config_filename = config_filename
self.equitySum = 0
self.currencyList = [
{
"currency": "BTC",
"instrument_id": "",
"equity": 0,
"gain": 0,
"insurance": 0,
"long": 0,
"short": 0,
"grid_long": 0,
"grid_short": 0,
"baseline": 0,
"changed": 0,
"best": 0,
"grid_order": None
},
{
"currency": "LTC",
"instrument_id": "",
"equity": 0,
"gain": 0,
"insurance": 0,
"long": 0,
"short": 0,
"grid_long": 0,
"grid_short": 0,
"baseline": 0,
"changed": 0,
"best": 0,
"grid_order": None
},
{
"currency": "ETH",
"instrument_id": "",
"equity": 0,
"gain": 0,
"insurance": 0,
"long": 0,
"short": 0,
"grid_long": 0,
"grid_short": 0,
"baseline": 0,
"changed": 0,
"best": 0,
"grid_order": None
},
{
"currency": "EOS",
"instrument_id": "",
"equity": 0,
"gain": 0,
"insurance": 0,
"long": 0,
"short": 0,
"grid_long": 0,
"grid_short": 0,
"baseline": 0,
"changed": 0,
"best": 0,
"grid_order": None
},
{
"currency": "BCH",
"instrument_id": "",
"equity": 0,
"gain": 0,
"insurance": 0,
"long": 0,
"short": 0,
"grid_long": 0,
"grid_short": 0,
"baseline": 0,
"changed": 0,
"best": 0,
"grid_order": None
},
{
"currency": "XRP",
"instrument_id": "",
"equity": 0,
"gain": 0,
"insurance": 0,
"long": 0,
"short": 0,
"grid_long": 0,
"grid_short": 0,
"baseline": 0,
"changed": 0,
"best": 0,
"grid_order": None
}]
self.currentLong = [{
"currency": "",
"instrument_id": "",
"equity": 0,
"gain": 0,
"insurance": 0,
"long": 0,
"short": 0
},
{
"currency": "",
"instrument_id": "",
"equity": 0,
"gain": 0,
"insurance": 0,
"long": 0,
"short": 0
}]
self.currentShort = [{
"currency": "",
"instrument_id": "",
"equity": 0,
"gain": 0,
"insurance": 0,
"long": 0,
"short": 0
},
{
"currency": "",
"instrument_id": "",
"equity": 0,
"gain": 0,
"insurance": 0,
"long": 0,
"short": 0
}]
def get_config(self):
with open("./" + self._config_filename, 'r') as load_f:
return json.load(load_f)
def update_config(self):
config_json = self.get_config()
# 初始化 API 接口
self._account_api = account.AccountAPI(
config_json["auth"]["api_key"], config_json["auth"]["seceret_key"], config_json["auth"]["passphrase"], True)
self._spot_api = spot.SpotAPI(
config_json["auth"]["api_key"], config_json["auth"]["seceret_key"], config_json["auth"]["passphrase"], True)
self._future_api = future.FutureAPI(
config_json["auth"]["api_key"], config_json["auth"]["seceret_key"], config_json["auth"]["passphrase"], True)
self._swap_api = swap.SwapAPI(
config_json["auth"]["api_key"], config_json["auth"]["seceret_key"], config_json["auth"]["passphrase"], True)
# 初始化参数
self._strategy_id = config_json["strategy_id"]
self._k_line_period = config_json["k_line_period"]
self._sampling_num = config_json["sampling_num"]
self._leverage = config_json["leverage"]
self._coin_usdt = config_json["coin_usdt"]
self._coin_usdt_overflow = config_json["coin_usdt_overflow"]
self._insurance = config_json["insurance"]
self._long = config_json["long"]
self._short = config_json["short"]
self._grid = config_json["grid"]
# 计算参数
self._sampling_sum = (self._sampling_num *
(1 + self._sampling_num)) / 2
def get_bar_time(self):
timestamp = self._future_api.get_kline(
self.currencyList[0]["instrument_id"], 14400)[0][0]
return timestamp
def get_all_instuments_id(self):
all_instuments_id = self._future_api.get_products()
for currency in self.currencyList:
for instument_id in all_instuments_id:
if(instument_id["alias"] == "quarter" and instument_id["underlying_index"] == currency["currency"]):
currency["instrument_id"] = instument_id["instrument_id"]
break
def get_all_position(self):
long_index = 0
for currency in self.currencyList:
position = self._future_api.get_specific_position(
currency["instrument_id"])
if(position["holding"]):
if(currency["grid_long"] > 0):
currency["long"] = int(
position["holding"][0]["long_qty"]) - currency["grid_long"]
else:
currency["long"] = int(
position["holding"][0]["long_qty"]) + currency["grid_long"]
if(currency["long"] > 0):
if(long_index < 2):
self.currentLong[long_index] = currency
long_index += 1
short_index = 0
for currency in self.currencyList:
position = self._future_api.get_specific_position(
currency["instrument_id"])
if(position["holding"]):
if(currency["grid_short"] > 0):
currency["short"] = int(
position["holding"][0]["short_qty"]) - currency["grid_short"]
else:
currency["short"] = int(
position["holding"][0]["short_qty"]) + currency["grid_short"]
if(currency["currency"] == "BTC"):
if(currency["short"] > self._short["btc_instrument_amount"]):
currency["insurance"] = self._insurance["btc_insurance_amount"]
currency["short"] = self._short["btc_instrument_amount"]
if(short_index < 2):
self.currentShort[short_index] = currency
short_index += 1
elif(currency["short"] == self._short["btc_instrument_amount"]):
currency["insurance"] = 0
currency["short"] = self._short["btc_instrument_amount"]
if(short_index < 2):
self.currentShort[short_index] = currency
short_index += 1
elif(currency["short"] == self._insurance["btc_insurance_amount"]):
currency["insurance"] = self._insurance["btc_insurance_amount"]
currency["short"] = 0
elif(currency["short"] > 0):
currency["insurance"] = 0
if(short_index < 2):
self.currentShort[short_index] = currency
short_index += 1
else:
currency["insurance"] = 0
currency["short"] = 0
else:
if(currency["short"] > self._short["other_instrument_amount"]):
currency["insurance"] = self._insurance["other_insurance_amount"]
currency["short"] = self._short["other_instrument_amount"]
if(short_index < 2):
self.currentShort[short_index] = currency
short_index += 1
elif(currency["short"] == self._short["other_instrument_amount"]):
currency["insurance"] = 0
currency["short"] = self._short["other_instrument_amount"]
if(short_index < 2):
self.currentShort[short_index] = currency
short_index += 1
elif(currency["short"] == self._insurance["other_insurance_amount"]):
currency["insurance"] = self._insurance["other_insurance_amount"]
currency["short"] = 0
elif(currency["short"] > 0):
currency["insurance"] = 0
if(short_index < 2):
self.currentShort[short_index] = currency
short_index += 1
else:
currency["insurance"] = 0
currency["short"] = 0
self.currentLong = sorted(
self.currentLong, key=lambda e: e.__getitem__("gain"), reverse=True)
self.currentShort = sorted(
self.currentShort, key=lambda e: e.__getitem__("gain"))
def get_all_equity(self):
self.equitySum = 0
for currency in self.currencyList:
close = self._spot_api.get_kline(
currency["currency"] + "-USDT", "", "", self._k_line_period)[0][4]
equity = self._future_api.get_coin_account(
currency["currency"])["equity"]
currency["equity"] = float(close) * float(equity)
self.equitySum += currency["equity"]
spotAccountInfo = self._spot_api.get_account_info()
for currency in spotAccountInfo:
if(currency["currency"] == "USDT"):
self.equitySum += float(currency["balance"])
break
def get_all_gain(self):
for currency in self.currencyList:
_k_line_datas = self._future_api.get_kline(
currency["instrument_id"], self._k_line_period)
grandGain = [0] * 7
weightedGain = 0
for index, data in enumerate(_k_line_datas[0:self._sampling_num]):
grandGain[index] = (float(data[4]) / float(data[1]) - 1) * 100
if(index > 0):
grandGain[index] = grandGain[index] + grandGain[index - 1]
weightedGain += grandGain[index] * (index + 1)
currency["gain"] = weightedGain / self._sampling_sum
def init_insurance(self):
for currency in self.currencyList:
if(currency["insurance"] == 0):
currency_to_insurance_result = {}
if(currency["currency"] == "BTC"):
currency_to_insurance_result = self._future_api.take_order(
"", currency["instrument_id"], 2, 0, self._insurance["btc_insurance_amount"], 1, self._leverage)
if(currency_to_insurance_result["result"]):
currency["insurance"] = self._insurance["btc_insurance_amount"]
else:
currency_to_insurance_result = self._future_api.take_order(
"", currency["instrument_id"], 2, 0, self._insurance["other_insurance_amount"], 1, self._leverage)
if(currency_to_insurance_result["result"]):
currency["insurance"] = self._insurance["other_insurance_amount"]
def update_insurance(self):
for currency in self.currencyList:
if(currency["insurance"] != 0 and currency["gain"] > 0):
self._future_api.take_order(
"", currency["instrument_id"], 4, 0, currency["insurance"], 1, self._leverage)
currency["changed"] = 1
currency["insurance"] = 0
if(currency["insurance"] == 0 and currency["gain"] < 0):
if(currency["currency"] == "BTC"):
self._future_api.take_order(
"", currency["instrument_id"], 2, 0, self._insurance["btc_insurance_amount"], 1, self._leverage)
currency["insurance"] = self._insurance["btc_insurance_amount"]
currency["changed"] = 1
else:
self._future_api.take_order(
"", currency["instrument_id"], 2, 0, self._insurance["other_insurance_amount"], 1, self._leverage)
currency["insurance"] = self._insurance["other_insurance_amount"]
currency["changed"] = 1
currency_future_amount = self._future_api.get_coin_account(
currency["currency"])
overflow_amount = float(
currency_future_amount["equity"]) - self._insurance["usdt_insurance_amount"] / float(self._spot_api.get_kline(
currency["currency"] + "-USDT", "", "", self._k_line_period)[0][4])
print(currency["currency"] + " " + str(abs(overflow_amount)) +
" " + str(currency_future_amount["total_avail_balance"]))
if(overflow_amount > 0 and abs(overflow_amount) > float(currency_future_amount["total_avail_balance"])):
print("Skip")
continue
if(overflow_amount > 0):
transfer_result = self._account_api.coin_transfer(
currency["currency"], overflow_amount, 3, 1)
if(transfer_result["result"]):
time.sleep(10)
self._spot_api.take_order(
"market", "sell", currency["currency"] + "-USDT", overflow_amount, 0)
elif(overflow_amount < 0):
original_amount = float(self._spot_api.get_coin_account_info(
currency["currency"])["balance"])
usdt_amount = (-overflow_amount) * float(self._spot_api.get_kline(
currency["currency"] + "-USDT", "", "", self._k_line_period)[0][4])
usdt_to_currency_result = self._spot_api.take_order(
"market", "buy", currency["currency"] + "-USDT", 0, usdt_amount)
if(usdt_to_currency_result["result"]):
time.sleep(10)
transfer_amount = float(self._spot_api.get_coin_account_info(
currency["currency"])["balance"]) - original_amount
transfer_result = self._account_api.coin_transfer(
currency["currency"], transfer_amount, 1, 3)
def open_long_order(self):
self.currencyList = sorted(
self.currencyList, key=lambda e: e.__getitem__("gain"), reverse=True)
for i in range(0, 2):
if(self.currencyList[i]["gain"] > 0 and (self.currencyList[i]["currency"] != self.currentLong[0]["currency"] and self.currencyList[i]["currency"] != self.currentLong[1]["currency"])):
print("当前操作于 " + self.currencyList[i]["currency"])
if(self.currentLong[i]["currency"]):
self._future_api.take_order(
"", self.currentLong[i]["instrument_id"], 3, 0, self.currentLong[i]["long"], 1, self._leverage)
self.set_changed(self.currentLong[i]["currency"])
self.set_best(self.currentLong[i]["currency"], 0)
order_result = {}
if(self.currencyList[i]["currency"] == "BTC"):
order_result = self._future_api.take_order(
"", self.currencyList[i]["instrument_id"], 1, 0, self._long["btc_instrument_amount"], 1, self._leverage)
self.set_changed(self.currencyList[i]["currency"])
self.set_best(self.currencyList[i]["currency"], 1)
else:
order_result = self._future_api.take_order(
"", self.currencyList[i]["instrument_id"], 1, 0, self._long["other_instrument_amount"], 1, self._leverage)
self.set_changed(self.currencyList[i]["currency"])
self.set_best(self.currencyList[i]["currency"], 1)
if(order_result["result"]):
self.currentLong[i] = self.currencyList[i]
if(self.currencyList[i]["gain"] < 0 and (self.currencyList[i]["currency"] == self.currentLong[0]["currency"] or self.currencyList[i]["currency"] == self.currentLong[1]["currency"])):
print("当前操作于 " + self.currencyList[i]["currency"])
close_result = self._future_api.take_order(
"", self.currencyList[i]["instrument_id"], 3, 0, self.currentLong[i]["long"], 1, self._leverage)
self.set_changed(self.currencyList[i]["currency"])
self.set_best(self.currencyList[i]["currency"], 0)
if(close_result["result"]):
self.currentLong[self.currentLong.index(self.currencyList[i])] = {
"currency": "",
"instrument_id": "",
"equity": 0,
"gain": 0,
"insurance": 0,
"long": 0,
"short": 0
}
def open_short_order(self):
self.currencyList = sorted(
self.currencyList, key=lambda e: e.__getitem__("gain"))
for i in range(0, 2):
if(self.currencyList[i]["gain"] < 0 and (self.currencyList[i]["currency"] != self.currentShort[0]["currency"] and self.currencyList[i]["currency"] != self.currentShort[1]["currency"])):
print("当前操作于 " + self.currencyList[i]["currency"])
if(self.currentShort[i]["currency"]):
self._future_api.take_order(
"", self.currentShort[i]["instrument_id"], 4, 0, self.currentShort[i]["short"], 1, self._leverage)
self.set_changed(self.currentShort[i]["currency"])
self.set_best(self.currentShort[i]["currency"], 0)
order_result = {}
if(self.currencyList[i]["currency"] == "BTC"):
order_result = self._future_api.take_order(
"", self.currencyList[i]["instrument_id"], 2, 0, self._short["btc_instrument_amount"], 1, self._leverage)
self.set_changed(self.currencyList[i]["currency"])
self.set_best(self.currencyList[i]["currency"], 1)
else:
order_result = self._future_api.take_order(
"", self.currencyList[i]["instrument_id"], 2, 0, self._short["other_instrument_amount"], 1, self._leverage)
self.set_changed(self.currencyList[i]["currency"])
self.set_best(self.currencyList[i]["currency"], 1)
if(order_result["result"]):
self.currentShort[i] = self.currencyList[i]
if(self.currencyList[i]["gain"] > 0 and (self.currencyList[i]["currency"] == self.currentShort[0]["currency"] or self.currencyList[i]["currency"] == self.currentShort[1]["currency"])):
close_result = self._future_api.take_order(
"", self.currencyList[i]["instrument_id"], 4, 0, self.currentShort[i]["short"], 1, self._leverage)
self.set_changed(self.currencyList[i]["currency"])
self.set_best(self.currencyList[i]["currency"], 0)
if(close_result["result"]):
self.currentShort[self.currentShort.index(self.currencyList[i])] = {
"currency": "",
"instrument_id": "",
"equity": 0,
"gain": 0,
"insurance": 0,
"long": 0,
"short": 0
}
def dynamicEquilibrium(self):
try:
print("[动态平衡中]")
self.update_config()
for currency in self.currencyList:
currency_account = self._spot_api.get_coin_account_info(
currency["currency"])
k_line_data = float(self._spot_api.get_kline(
currency["currency"] + "-USDT", "", "", self._k_line_period)[0][4])
overflow_amount = float(
currency_account["balance"]) * k_line_data - self._coin_usdt
if(overflow_amount * 100 / self._coin_usdt > self._coin_usdt_overflow):
result = self._spot_api.take_order(
"market", "sell", currency["currency"] + "-USDT", overflow_amount/k_line_data, 0)
elif(overflow_amount * 100 / self._coin_usdt < -self._coin_usdt_overflow):
result = self._spot_api.take_order(
"market", "buy", currency["currency"] + "-USDT", "", -overflow_amount)
except Exception as e:
print("[动态平衡错误信息]")
traceback.print_exc()
# 网格算法
def set_changed(self, name):
for currency in self.currencyList:
if(currency["currency"] == name):
currency["changed"] = 1
break
def set_best(self, name, whether):
for currency in self.currencyList:
if(currency["currency"] == name):
currency["best"] = whether
break
def get_baseline(self, instrument_id):
_k_line_datas = self._future_api.get_kline(
instrument_id, self._k_line_period)
return float(_k_line_datas[0][4])
def get_all_baseline(self):
for currency in self.currencyList:
if(currency["currency"] == "BTC"):
continue
_k_line_datas = self._future_api.get_kline(
currency["instrument_id"], self._k_line_period)
currency["baseline"] = float(_k_line_datas[0][4])
def reset_grid(self):
for currency in self.currencyList:
if(currency["currency"] == "BTC"):
continue
if(currency["changed"] == 1):
currency["baseline"] = self.get_baseline(
currency["instrument_id"])
all_orders = self._future_api.get_order_list(
"6", currency["instrument_id"], "", "", "")
for order in all_orders["order_info"]:
if(order["client_oid"]):
self._future_api.revoke_order(
currency["instrument_id"], "", order["client_oid"])
if(currency["grid_long"] > 0):
self._future_api.take_order(
"", currency["instrument_id"], 3, 0, currency["grid_long"], 1, self._leverage)
if(currency["grid_short"] > 0):
self._future_api.take_order(
"", currency["instrument_id"], 4, 0, currency["grid_short"], 1, self._leverage)
currency["grid_long"] = 0
currency["grid_short"] = 0
currency["grid_order"] = None
if(not currency["best"]):
continue
if(not currency["grid_order"]):
currency["grid_order"] = [None for n in range(
self._grid["max_grid_distence"] * 2)]
for order_num, order in enumerate(currency["grid_order"]):
if(not order):
if(currency["gain"] > 0):
if(order_num < 5):
order_id = "OL" + \
str(order_num+1) + currency["currency"]
order_price = currency["baseline"] * (
1 - self._grid["grid_distence"] * (order_num + 1) * 0.01)
order_result = self._future_api.take_order(
order_id, currency["instrument_id"], 1, order_price, self._grid["instrument_amount"], 0, self._leverage)
print(order_result)
if(order_result["result"]):
currency["grid_order"][order_num] = order_id
else:
order_id = "CL" + \
str(order_num-5+1) + currency["currency"]
order_price = currency["baseline"] * (
1 + self._grid["grid_distence"] * (order_num-5+1) * 0.01)
order_result = self._future_api.take_order(
order_id, currency["instrument_id"], 3, order_price, self._grid["instrument_amount"], 0, self._leverage)
print(order_result)
if(order_result["result"]):
currency["grid_order"][order_num] = order_id
else:
if(order_num < 5):
order_id = "OS" + \
str(order_num+1) + currency["currency"]
order_price = currency["baseline"] * (
1 + self._grid["grid_distence"] * (order_num + 1) * 0.01)
order_result = self._future_api.take_order(
order_id, currency["instrument_id"], 2, order_price, self._grid["instrument_amount"], 0, self._leverage)
print(order_result)
if(order_result["result"]):
currency["grid_order"][order_num] = order_id
else:
order_id = "CS" + \
str(order_num - 5 + 1) + \
currency["currency"]
order_price = currency["baseline"] * (
1 - self._grid["grid_distence"] * (order_num-5+1) * 0.01)
order_result = self._future_api.take_order(
order_id, currency["instrument_id"], 4, order_price, self._grid["instrument_amount"], 0, self._leverage)
print(order_result)
if(order_result["result"]):
currency["grid_order"][order_num] = order_id
currency["changed"] = 0
def check_orders(self):
for currency in self.currencyList:
if(currency["currency"] == "BTC"):
continue
if(not currency["best"]):
continue
for order_num, order in enumerate(currency["grid_order"]):
if(order):
order_info = self._future_api.get_order_info(
currency["instrument_id"], "", order)
if(order[:2] == "OL" and order_info["order_type"] == "2"):
currency["grid_long"] += self._grid["instrument_amount"]
order_id = "CL" + \
order[2] + currency["currency"]
order_price = order_info["price"] * \
(1 + self._grid["grid_distence"] * 0.01)
order_result = self._future_api.take_order(
order_id, currency["instrument_id"], 3, order_price, self._grid["instrument_amount"], 0, self._leverage)
print(order_result)
if(order_result["result"]):
currency["grid_order"][order_num] = order_id
elif(order[:2] == "OS" and order_info["order_type"] == "2"):
currency["grid_short"] += self._grid["instrument_amount"]
order_id = "CS" + \
order[2] + currency["currency"]
order_price = order_info["price"] * \
(1 - self._grid["grid_distence"] * 0.01)
order_result = self._future_api.take_order(
order_id, currency["instrument_id"], 4, order_price, self._grid["instrument_amount"], 0, self._leverage)
print(order_result)
if(order_result["result"]):
currency["grid_order"][order_num] = order_id
elif(order[:2] == "CL" and order_info["order_type"] == "2"):
currency["grid_long"] -= self._grid["instrument_amount"]
order_id = "OL" + \
order[2] + currency["currency"]
order_price = order_info["price"] * \
(1 - self._grid["grid_distence"] * 0.01)
order_result = self._future_api.take_order(
order_id, currency["instrument_id"], 1, order_price, self._grid["instrument_amount"], 0, self._leverage)
print(order_result)
if(order_result["result"]):
currency["grid_order"][order_num] = order_id
elif(order[:2] == "CS" and order_info["order_type"] == "2"):
currency["grid_short"] -= self._grid["instrument_amount"]
order_id = "OS" + \
order[2] + currency["currency"]
order_price = order_info["price"] * \
(1 + self._grid["grid_distence"] * 0.01)
order_result = self._future_api.take_order(
order_id, currency["instrument_id"], 2, order_price, self._grid["instrument_amount"], 0, self._leverage)
print(order_result)
if(order_result["result"]):
currency["grid_order"][order_num] = order_id
def init(self):
self.update_config()
self.get_all_instuments_id()
# 输出当前参数信息
# print("BTC 开多: %d 其他开多: %d" % (
# self._long["btc_instrument_amount"], self._long["other_instrument_amount"]))
# print("BTC 开空: %d 其他开空: %d" % (
# self._short["btc_instrument_amount"], self._short["other_instrument_amount"]))
# print("周期: %d 采样个数: %d 杠杆: %d" % (
# self._k_line_period, self._sampling_num, self._leverage))
def start_grid(self):
try:
print("[动态网格模块检测]")
self.reset_grid()
self.check_orders()
print("[当前网格信息]")
self.get_all_position()
for currency in self.currencyList:
print(currency)
print("\n")
except Exception as e:
print("[错误信息]")
traceback.print_exc()
print("[当前网格信息]")
self.get_all_position()
for currency in self.currencyList:
print(currency)
print("\n")
def start(self):
try:
print(datetime.datetime.now())
self.get_all_gain()
print("[更新套保中]")
self.get_all_position()
time.sleep(3)
self.update_insurance()
# self.init_insurance()
print("[更新持仓信息中]")
self.get_all_position()
print("[开始运行策略 %d]" % self._strategy_id)
time.sleep(10)
self.open_long_order()
time.sleep(10)
self.open_short_order()
time.sleep(10)
print("[当前开多]")
print(self.currentLong)
print("[当前开空]")
print(self.currentShort)
# print("[拉取收益信息]")
# self.get_all_equity()
# print("当前净值: " + str(self.equitySum))
print("[当前数据信息]")
self.get_all_position()
for currency in self.currencyList:
print(currency)
print("\n")
except Exception as e:
print("[错误信息]")
traceback.print_exc()
try:
print("[尝试重新运行策略 %d]" % self._strategy_id)
self.get_all_position()
time.sleep(10)
self.open_long_order()
time.sleep(10)
self.open_short_order()
time.sleep(10)
print("\n[当前开多]")
print(self.currentLong)
print("[当前开空]")
print(self.currentShort)
# print("[拉取收益信息]")
# self.get_all_equity()
# print("当前净值: " + str(self.equitySum))
print("[当前数据信息]")
self.get_all_position()
for currency in self.currencyList:
print(currency)
print("\n")
except Exception as e_again:
print("[错误信息x2]")
traceback.print_exc()
def clear(self):
for currency in self.currencyList:
all_orders = self._future_api.get_order_list(
"6", currency["instrument_id"], "", "", "")
for order in all_orders["order_info"]:
if(order["client_oid"]):
self._future_api.revoke_order(
currency["instrument_id"], "", order["client_oid"])
position = self._future_api.get_specific_position(
currency["instrument_id"])
if(int(position["holding"][0]["long_qty"])):
self._future_api.take_order(
"", currency["instrument_id"], 3, 0, int(position["holding"][0]["long_qty"]), 1, self._leverage)
if(int(position["holding"][0]["short_qty"])):
self._future_api.take_order(
"", currency["instrument_id"], 4, 0, int(position["holding"][0]["short_qty"]), 1, self._leverage)
if __name__ == '__main__':
config_filename = "config.json"
try:
opts, args = getopt.getopt(sys.argv[1:], 'c:', ['config='])
for opt, arg in opts:
if opt in ("-c", "--config"):
f = open("./" + arg, "r")
config_filename = arg
except (getopt.GetoptError, FileNotFoundError):
print("命令行参数错误: 获取配置文件失败")
sys.exit(1)
strategy = Strategy(config_filename)
# strategy.dynamicEquilibrium()
strategy.init()
strategy.clear()
strategy.start()
# strategy.start_grid()
last_bar_time = strategy.get_bar_time()
now_bar_time = last_bar_time
while(True):
strategy.init()
# strategy.dynamicEquilibrium()
# strategy.start_grid()
now_bar_time = strategy.get_bar_time()
if(now_bar_time != last_bar_time):
strategy.start()
last_bar_time = now_bar_time
time.sleep(60)
``` |
{
"source": "jmpounders/radtrans",
"score": 3
} |
#### File: models/utils/pincell.py
```python
def annulusPoints(nRad,nAzim,R,Ri,square=False) :
Atot = np.pi*(R**2-Ri**2)
Aann = Atot/nRad
# select the radii to conserve annular volume
rs = np.zeros(nRad)
rprev = Ri
for i in range(nRad) :
rs[i] = np.sqrt(Aann/np.pi + rprev**2)
rprev = rs[i]
# select the radii to be equally spaced
#dr = (R-Ri)/nRad
#rs = np.linspace(Ri+dr,R,nRad)
if Ri < 1.0e-9 :
nRad += 1
rs = np.insert(rs,0,(rs[0]+Ri)/2.0)
# decide whether to create a center point
# and create the coordinate arrays
thetai = np.pi/2.0/nAzim
if Ri < 1.0e-9 :
x = np.zeros(nRad*(nAzim+1)+1)
y = np.zeros(nRad*(nAzim+1)+1)
x[0] = 0.0
y[0] = 0.0
cntr = 1
else :
x = np.zeros(nRad*(nAzim+1))
y = np.zeros(nRad*(nAzim+1))
cntr = 0
# mesh the annulus
for ri in rs :
for j in range(nAzim+1) :
x[cntr] = ri*np.cos(j*thetai)
y[cntr] = ri*np.sin(j*thetai)
cntr += 1
# fill in the square part
if square :
if nRad == 1 :
div = R - Ri
else :
div = rs[nRad-1]-rs[nRad-2]
for j in range(nAzim+1) :
if j*thetai < np.pi/4.0 :
dr = R/np.cos(j*thetai) - R
else :
dr = R/np.sin(j*thetai) - R
if dr < 0.6*div :
nPnts = 0
x[(nRad-1)*(nAzim+1)+j] = (R+dr)*np.cos(j*thetai)
y[(nRad-1)*(nAzim+1)+j] = (R+dr)*np.sin(j*thetai)
else :
nPnts = int(np.floor(dr/div))
for i in range(nPnts) :
x = np.append(x, (R+(i+1)*dr/nPnts)*np.cos(j*thetai))
y = np.append(y, (R+(i+1)*dr/nPnts)*np.sin(j*thetai))
if nAzim%2 == 1 :
x = np.append(x,R)
y = np.append(y,R)
return x,y
def getPinCellMesh(Rf,Rc,qpitch,fuelDisc,cladDisc,modDisc) :
(x1,y1) = annulusPoints(fuelDisc[0],fuelDisc[1],Rf,0.0)
(x2,y2) = annulusPoints(cladDisc[0],cladDisc[1],Rc,Rf)
(x3,y3) = annulusPoints(modDisc[0],modDisc[1],qpitch,Rc, True)
x = np.concatenate((x1,x2,x3))
y = np.concatenate((y1,y2,y3))
tri = Delaunay(zip(x,y))
convex_hull = [[tri.simplices[s,i] for i in range(3) if tri.neighbors[s,i] > -1]
for s in range(len(tri.simplices)) if -1 in tri.neighbors[s,:]]
convex_hull = np.array(convex_hull)
materials = np.zeros(tri.simplices.shape[0], dtype=np.int32)
for i,s in enumerate(tri.simplices) :
xm = (tri.points[s[0],0] + tri.points[s[1],0] + tri.points[s[2],0])/3.0
ym = (tri.points[s[0],1] + tri.points[s[1],1] + tri.points[s[2],1])/3.0
r = np.sqrt(xm**2 + ym**2)
if r < Rf :
materials[i] = 1
elif r < Rc :
materials[i] = 2
else :
materials[i] = 3
return simpleMesh(tri.points,
tri.simplices,
materials,
tri.neighbors,
convex_hull)
``` |
{
"source": "jmp/picopic-backend-optimization-service",
"score": 2
} |
#### File: functions/create_download_url/index.py
```python
from datetime import datetime, timedelta, timezone
from json import dumps
from logging import INFO, getLogger
from os import environ
from typing import Optional
from boto3 import client
from botocore.client import Config
from botocore.exceptions import ClientError
from zopfli import ZopfliPNG
logger = getLogger()
logger.setLevel(INFO)
s3_client = client("s3", config=Config(s3={"addressing_style": "path"}))
PNG_HEADER = bytes([0x89, 0x50, 0x4E, 0x47, 0x0D, 0x0A, 0x1A, 0x0A])
ALLOWED_FILE_TYPES = {"image/png": "png"}
def handler(event, context):
bucket = environ["BUCKET"]
key = event["pathParameters"]["key"]
# Fetch the object from the bucket
body = _get_object_by_key(key, bucket)
if body is None:
return _error("Image does not exist.", status=404)
# Detect the file type
mime = _guess_mime_type(body)
if mime not in ALLOWED_FILE_TYPES.keys():
s3_client.delete_object(Bucket=bucket, Key=key)
return _error("File type not allowed.")
# Optimize the image
optimized_body = _optimize(body)
if optimized_body is None:
s3_client.delete_object(Bucket=bucket, Key=key)
return _error("Image could not be optimized.")
# Overwrite the original image with the optimized one
s3_client.put_object(
Bucket=bucket,
Key=key,
Body=optimized_body,
Metadata={"optimized": "true"},
)
# Generate presigned URL for downloading the file from S3
url = _create_download_url(key, bucket, mime)
return {
"statusCode": 200,
"body": dumps({"url": url}),
}
def _get_object_by_key(key: str, bucket: str) -> Optional[bytes]:
try:
obj = s3_client.get_object(Bucket=bucket, Key=key)
# Don't allow downloading already downloaded or to old images
is_optimized = "optimized" in obj["Metadata"]
if is_optimized or obj["LastModified"] < _five_seconds_ago():
s3_client.delete_object(Bucket=bucket, Key=key)
return None
except ClientError as e:
if e.response["Error"]["Code"] == "NoSuchKey":
return None
raise
body = obj["Body"].read()
return body
def _five_seconds_ago() -> datetime:
return datetime.now(timezone.utc) - timedelta(seconds=5)
def _create_download_url(key: str, bucket: str, mime: str) -> dict:
filename = f"optimized.{ALLOWED_FILE_TYPES[mime]}"
url = s3_client.generate_presigned_url(
"get_object",
Params={
"Bucket": bucket,
"Key": key,
"ResponseContentDisposition": f"attachment; filename={filename}",
},
ExpiresIn=10,
)
return url
def _error(message: str, status: int = 400) -> dict:
return {
"statusCode": status,
"body": dumps({"message": message}),
}
def _guess_mime_type(data: bytes) -> str:
"""Returns the MIME type based on magic bytes."""
if data.startswith(PNG_HEADER):
return "image/png"
return "application/octet-stream"
def _optimize(data: bytes) -> Optional[bytes]:
"""Optimizes the given image."""
try:
return ZopfliPNG().optimize(data)
except ValueError:
logger.exception("Failed to optimize image.")
return None
```
#### File: functions/tests/test_create_upload_url.py
```python
from json import loads
from os import environ
from unittest.mock import patch
from moto import mock_s3
from pytest import fixture
@fixture(scope="function")
def mock_aws():
environ["AWS_ACCESS_KEY_ID"] = "testing"
environ["AWS_SECRET_ACCESS_KEY"] = "testing"
environ["AWS_SECURITY_TOKEN"] = "testing"
environ["AWS_SESSION_TOKEN"] = "testing"
environ["AWS_DEFAULT_REGION"] = "us-east-1"
@mock_s3
@patch.dict(environ, {"BUCKET": "test_bucket", "AWS_REGION": "us-east-1"})
def test_handler(mock_aws):
from ..create_upload_url.index import handler
response = handler({}, {})
body = loads(response["body"])
assert response["statusCode"] == 200
assert "fields" in body
assert "url" in body
assert "key" in body["fields"]
assert "policy" in body["fields"]
``` |
{
"source": "jmppmj/sentimentanalysis",
"score": 3
} |
#### File: sentimentanalysis/RedditbotSpidernews/vis.py
```python
import sqlite3
import pandas as pd
import plotly
import plotly.graph_objs
from plotly.graph_objs import Bar, Scatter, Marker, Layout, Margin
#Generates bar chart from SQLite data (sentiment analysis results and title)
def graphRes():
conn = sqlite3.connect("test.db")
sq = "SELECT title as Post_Title, sent as Sentiment_Analysis FROM merge;"
df = pd.read_sql(sq, conn)
plotly.offline.plot({
"data": [Bar(x = df.Post_Title, y = df.Sentiment_Analysis)],
"layout": Layout(title = "Comment Sentiment Analysis on Top 25 /r/news Posts (from Last 24 Hrs)", margin=Margin(b=200))
})
#close connection
conn.close()
```
#### File: sentimentanalysis/RedditbotSpiderworldnews/sen.py
```python
import sqlite3
#Analyzes the sentiment of each title's corresponding group of comments.
#Certain words are defined as indicating positive and negative emotion.
#If a pre-defined positive word is detected, 1 is added to the over finalCount.
#If a pre-defined negative word is detected, -1 is subtracted from the overall finalCount.
#Populates column for corresponding database row with final sentiment number.
def senti1():
finalCount = 0
#is the word 'good' present? == +1
conn = sqlite3.connect("test.db")
cur = conn.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 1 AND comments LIKE '% good %');"
cur.execute(sq)
resulta = cur.fetchone()
resultFinala = int(resulta[0])
cur.close()
conn.close()
if resultFinala == 1:
finalCount = finalCount + 1
#is the word 'great' present? == +1
conn2 = sqlite3.connect("test.db")
cur2 = conn2.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 1 AND comments LIKE '% great %');"
cur2.execute(sq)
resultb = cur2.fetchone()
resultFinalb = int(resultb[0])
cur2.close()
conn2.close()
if resultFinalb == 1:
finalCount = finalCount + 1
#is the word 'happy' present? == +1
conn3 = sqlite3.connect("test.db")
cur3 = conn3.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 1 AND comments LIKE '% happy %');"
cur3.execute(sq)
resultc = cur3.fetchone()
resultFinalc = int(resultc[0])
cur3.close()
conn3.close()
if resultFinalc == 1:
finalCount = finalCount + 1
#is the word 'win' present? == +1
conn4 = sqlite3.connect("test.db")
cur4 = conn4.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 1 AND comments LIKE '% win %');"
cur4.execute(sq)
resultd = cur4.fetchone()
resultFinald = int(resultd[0])
cur4.close()
conn4.close()
if resultFinald == 1:
finalCount = finalCount + 1
#is the word 'love' present? == +1
conn5 = sqlite3.connect("test.db")
cur5 = conn5.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 1 AND comments LIKE '% love %');"
cur5.execute(sq)
resulte = cur5.fetchone()
resultFinale = int(resulte[0])
cur5.close()
conn5.close()
if resultFinale == 1:
finalCount = finalCount + 1
#is the word 'nice' present? == +1
conn6 = sqlite3.connect("test.db")
cur6 = conn6.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 1 AND comments LIKE '% nice %');"
cur6.execute(sq)
resultf = cur6.fetchone()
resultFinalf = int(resultf[0])
cur6.close()
conn6.close()
if resultFinalf == 1:
finalCount = finalCount + 1
#is the word 'authentic' present? == +1
conn7 = sqlite3.connect("test.db")
cur7 = conn7.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 1 AND comments LIKE '% authentic %');"
cur7.execute(sq)
resultg = cur7.fetchone()
resultFinalg = int(resultg[0])
cur7.close()
conn7.close()
if resultFinalg == 1:
finalCount = finalCount + 1
#is the word 'like' present? == +1
conn8 = sqlite3.connect("test.db")
cur8 = conn8.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 1 AND comments LIKE '% like %');"
cur8.execute(sq)
resulth = cur8.fetchone()
resultFinalh = int(resulth[0])
cur8.close()
conn8.close()
if resultFinalh == 1:
finalCount = finalCount + 1
#is the word 'fun' present? == +1
conn9 = sqlite3.connect("test.db")
cur9 = conn9.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 1 AND comments LIKE '% fun %');"
cur9.execute(sq)
resulti = cur9.fetchone()
resultFinali = int(resulti[0])
cur9.close()
conn9.close()
if resultFinali == 1:
finalCount = finalCount + 1
#is the word 'appreciate' present? == +1
conn10 = sqlite3.connect("test.db")
cur10 = conn10.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 1 AND comments LIKE '% appreciate %');"
cur10.execute(sq)
resultj = cur10.fetchone()
resultFinalj = int(resultj[0])
cur10.close()
conn10.close()
if resultFinalj == 1:
finalCount = finalCount + 1
#is the word 'fuck' present? == -1
conn11 = sqlite3.connect("test.db")
cur11 = conn11.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 1 AND comments LIKE '% good %');"
cur11.execute(sq)
resultk = cur11.fetchone()
resultFinalk = int(resultk[0])
cur11.close()
conn11.close()
if resultFinalk == 1:
finalCount = finalCount - 1
#is the word 'corrupt' present? == -1
conn12 = sqlite3.connect("test.db")
cur12 = conn12.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 1 AND comments LIKE '% corrupt %');"
cur12.execute(sq)
resultm = cur12.fetchone()
resultFinalm = int(resultm[0])
cur12.close()
conn12.close()
if resultFinalm == 1:
finalCount = finalCount - 1
#is the word 'stupid' present? == -1
conn13 = sqlite3.connect("test.db")
cur13 = conn13.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 1 AND comments LIKE '% stupid %');"
cur13.execute(sq)
resultn = cur13.fetchone()
resultFinaln = int(resultn[0])
cur13.close()
conn13.close()
if resultFinaln == 1:
finalCount = finalCount - 1
#is the word 'irrelevant' present? == -1
conn14 = sqlite3.connect("test.db")
cur14 = conn14.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 1 AND comments LIKE '% irrelevant %');"
cur14.execute(sq)
resulto = cur14.fetchone()
resultFinalo = int(resulto[0])
cur14.close()
conn14.close()
if resultFinalo == 1:
finalCount = finalCount - 1
#is the word 'colluding' present? == -1
conn15 = sqlite3.connect("test.db")
cur15 = conn15.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 1 AND comments LIKE '% colluding %');"
cur15.execute(sq)
resultp = cur15.fetchone()
resultFinalp = int(resultp[0])
cur15.close()
conn15.close()
if resultFinalp == 1:
finalCount = finalCount - 1
#is the word 'horrible' present? == -1
conn16 = sqlite3.connect("test.db")
cur16 = conn16.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 1 AND comments LIKE '% horrible %');"
cur16.execute(sq)
resultq = cur16.fetchone()
resultFinalq = int(resultq[0])
cur16.close()
conn16.close()
if resultFinalq == 1:
finalCount = finalCount - 1
#is the word 'unfair' present? == -1
conn17 = sqlite3.connect("test.db")
cur17 = conn17.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 1 AND comments LIKE '% unfair %');"
cur17.execute(sq)
resultr = cur17.fetchone()
resultFinalr = int(resultr[0])
cur17.close()
conn17.close()
if resultFinalr == 1:
finalCount = finalCount - 1
#is the word 'guilty' present? == -1
conn18 = sqlite3.connect("test.db")
cur18 = conn18.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 1 AND comments LIKE '% guilty %');"
cur18.execute(sq)
resultz = cur18.fetchone()
resultFinalz = int(resultz[0])
cur18.close()
conn18.close()
if resultFinalz == 1:
finalCount = finalCount - 1
#is the word 'foolish' present? == -1
conn19 = sqlite3.connect("test.db")
cur19 = conn19.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 1 AND comments LIKE '% foolish %');"
cur19.execute(sq)
resultx = cur19.fetchone()
resultFinalx = int(resultx[0])
cur19.close()
conn19.close()
if resultFinalx == 1:
finalCount = finalCount - 1
#is the word 'hateful' present? == -1
conn20 = sqlite3.connect("test.db")
cur20 = conn20.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 1 AND comments LIKE '% hateful %');"
cur20.execute(sq)
resulty = cur20.fetchone()
resultFinaly = int(resulty[0])
cur20.close()
conn20.close()
if resultFinaly == 1:
finalCount = finalCount - 1
#adds final sentiment number to corresponding database row
conn = sqlite3.connect("test.db")
cur = conn.cursor()
cur.execute("UPDATE merge SET sent = ? WHERE id = 1;", [finalCount])
conn.commit()
cur.close()
conn.close()
def senti2():
finalCount = 0
#is the word 'good' present? == +1
conn = sqlite3.connect("test.db")
cur = conn.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 2 AND comments LIKE '% good %');"
cur.execute(sq)
resulta = cur.fetchone()
resultFinala = int(resulta[0])
cur.close()
conn.close()
if resultFinala == 1:
finalCount = finalCount + 1
#is the word 'great' present? == +1
conn2 = sqlite3.connect("test.db")
cur2 = conn2.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 2 AND comments LIKE '% great %');"
cur2.execute(sq)
resultb = cur2.fetchone()
resultFinalb = int(resultb[0])
cur2.close()
conn2.close()
if resultFinalb == 1:
finalCount = finalCount + 1
#is the word 'happy' present? == +1
conn3 = sqlite3.connect("test.db")
cur3 = conn3.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 2 AND comments LIKE '% happy %');"
cur3.execute(sq)
resultc = cur3.fetchone()
resultFinalc = int(resultc[0])
cur3.close()
conn3.close()
if resultFinalc == 1:
finalCount = finalCount + 1
#is the word 'win' present? == +1
conn4 = sqlite3.connect("test.db")
cur4 = conn4.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 2 AND comments LIKE '% win %');"
cur4.execute(sq)
resultd = cur4.fetchone()
resultFinald = int(resultd[0])
cur4.close()
conn4.close()
if resultFinald == 1:
finalCount = finalCount + 1
#is the word 'love' present? == +1
conn5 = sqlite3.connect("test.db")
cur5 = conn5.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 2 AND comments LIKE '% love %');"
cur5.execute(sq)
resulte = cur5.fetchone()
resultFinale = int(resulte[0])
cur5.close()
conn5.close()
if resultFinale == 1:
finalCount = finalCount + 1
#is the word 'nice' present? == +1
conn6 = sqlite3.connect("test.db")
cur6 = conn6.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 2 AND comments LIKE '% nice %');"
cur6.execute(sq)
resultf = cur6.fetchone()
resultFinalf = int(resultf[0])
cur6.close()
conn6.close()
if resultFinalf == 1:
finalCount = finalCount + 1
#is the word 'authentic' present? == +1
conn7 = sqlite3.connect("test.db")
cur7 = conn7.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 2 AND comments LIKE '% authentic %');"
cur7.execute(sq)
resultg = cur7.fetchone()
resultFinalg = int(resultg[0])
cur7.close()
conn7.close()
if resultFinalg == 1:
finalCount = finalCount + 1
#is the word 'like' present? == +1
conn8 = sqlite3.connect("test.db")
cur8 = conn8.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 2 AND comments LIKE '% like %');"
cur8.execute(sq)
resulth = cur8.fetchone()
resultFinalh = int(resulth[0])
cur8.close()
conn8.close()
if resultFinalh == 1:
finalCount = finalCount + 1
#is the word 'fun' present? == +1
conn9 = sqlite3.connect("test.db")
cur9 = conn9.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 2 AND comments LIKE '% fun %');"
cur9.execute(sq)
resulti = cur9.fetchone()
resultFinali = int(resulti[0])
cur9.close()
conn9.close()
if resultFinali == 1:
finalCount = finalCount + 1
#is the word 'appreciate' present? == +1
conn10 = sqlite3.connect("test.db")
cur10 = conn10.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 2 AND comments LIKE '% appreciate %');"
cur10.execute(sq)
resultj = cur10.fetchone()
resultFinalj = int(resultj[0])
cur10.close()
conn10.close()
if resultFinalj == 1:
finalCount = finalCount + 1
#is the word 'fuck' present? == -1
conn11 = sqlite3.connect("test.db")
cur11 = conn11.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 2 AND comments LIKE '% good %');"
cur11.execute(sq)
resultk = cur11.fetchone()
resultFinalk = int(resultk[0])
cur11.close()
conn11.close()
if resultFinalk == 1:
finalCount = finalCount - 1
#is the word 'corrupt' present? == -1
conn12 = sqlite3.connect("test.db")
cur12 = conn12.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 2 AND comments LIKE '% corrupt %');"
cur12.execute(sq)
resultm = cur12.fetchone()
resultFinalm = int(resultm[0])
cur12.close()
conn12.close()
if resultFinalm == 1:
finalCount = finalCount - 1
#is the word 'stupid' present? == -1
conn13 = sqlite3.connect("test.db")
cur13 = conn13.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 2 AND comments LIKE '% stupid %');"
cur13.execute(sq)
resultn = cur13.fetchone()
resultFinaln = int(resultn[0])
cur13.close()
conn13.close()
if resultFinaln == 1:
finalCount = finalCount - 1
#is the word 'irrelevant' present? == -1
conn14 = sqlite3.connect("test.db")
cur14 = conn14.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 2 AND comments LIKE '% irrelevant %');"
cur14.execute(sq)
resulto = cur14.fetchone()
resultFinalo = int(resulto[0])
cur14.close()
conn14.close()
if resultFinalo == 1:
finalCount = finalCount - 1
#is the word 'colluding' present? == -1
conn15 = sqlite3.connect("test.db")
cur15 = conn15.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 2 AND comments LIKE '% colluding %');"
cur15.execute(sq)
resultp = cur15.fetchone()
resultFinalp = int(resultp[0])
cur15.close()
conn15.close()
if resultFinalp == 1:
finalCount = finalCount - 1
#is the word 'horrible' present? == -1
conn16 = sqlite3.connect("test.db")
cur16 = conn16.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 2 AND comments LIKE '% horrible %');"
cur16.execute(sq)
resultq = cur16.fetchone()
resultFinalq = int(resultq[0])
cur16.close()
conn16.close()
if resultFinalq == 1:
finalCount = finalCount - 1
#is the word 'unfair' present? == -1
conn17 = sqlite3.connect("test.db")
cur17 = conn17.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 2 AND comments LIKE '% unfair %');"
cur17.execute(sq)
resultr = cur17.fetchone()
resultFinalr = int(resultr[0])
cur17.close()
conn17.close()
if resultFinalr == 1:
finalCount = finalCount - 1
#is the word 'guilty' present? == -1
conn18 = sqlite3.connect("test.db")
cur18 = conn18.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 2 AND comments LIKE '% guilty %');"
cur18.execute(sq)
resultz = cur18.fetchone()
resultFinalz = int(resultz[0])
cur18.close()
conn18.close()
if resultFinalz == 1:
finalCount = finalCount - 1
#is the word 'foolish' present? == -1
conn19 = sqlite3.connect("test.db")
cur19 = conn19.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 2 AND comments LIKE '% foolish %');"
cur19.execute(sq)
resultx = cur19.fetchone()
resultFinalx = int(resultx[0])
cur19.close()
conn19.close()
if resultFinalx == 1:
finalCount = finalCount - 1
#is the word 'hateful' present? == -1
conn20 = sqlite3.connect("test.db")
cur20 = conn20.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 2 AND comments LIKE '% hateful %');"
cur20.execute(sq)
resulty = cur20.fetchone()
resultFinaly = int(resulty[0])
cur20.close()
conn20.close()
if resultFinaly == 1:
finalCount = finalCount - 1
#adds final sentiment number to corresponding database row
conn = sqlite3.connect("test.db")
cur = conn.cursor()
cur.execute("UPDATE merge SET sent = ? WHERE id = 2;", [finalCount])
conn.commit()
cur.close()
conn.close()
def senti3():
finalCount = 0
#is the word 'good' present? == +1
conn = sqlite3.connect("test.db")
cur = conn.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 3 AND comments LIKE '% good %');"
cur.execute(sq)
resulta = cur.fetchone()
resultFinala = int(resulta[0])
cur.close()
conn.close()
if resultFinala == 1:
finalCount = finalCount + 1
#is the word 'great' present? == +1
conn2 = sqlite3.connect("test.db")
cur2 = conn2.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 3 AND comments LIKE '% great %');"
cur2.execute(sq)
resultb = cur2.fetchone()
resultFinalb = int(resultb[0])
cur2.close()
conn2.close()
if resultFinalb == 1:
finalCount = finalCount + 1
#is the word 'happy' present? == +1
conn3 = sqlite3.connect("test.db")
cur3 = conn3.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 3 AND comments LIKE '% happy %');"
cur3.execute(sq)
resultc = cur3.fetchone()
resultFinalc = int(resultc[0])
cur3.close()
conn3.close()
if resultFinalc == 1:
finalCount = finalCount + 1
#is the word 'win' present? == +1
conn4 = sqlite3.connect("test.db")
cur4 = conn4.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 3 AND comments LIKE '% win %');"
cur4.execute(sq)
resultd = cur4.fetchone()
resultFinald = int(resultd[0])
cur4.close()
conn4.close()
if resultFinald == 1:
finalCount = finalCount + 1
#is the word 'love' present? == +1
conn5 = sqlite3.connect("test.db")
cur5 = conn5.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 3 AND comments LIKE '% love %');"
cur5.execute(sq)
resulte = cur5.fetchone()
resultFinale = int(resulte[0])
cur5.close()
conn5.close()
if resultFinale == 1:
finalCount = finalCount + 1
#is the word 'nice' present? == +1
conn6 = sqlite3.connect("test.db")
cur6 = conn6.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 3 AND comments LIKE '% nice %');"
cur6.execute(sq)
resultf = cur6.fetchone()
resultFinalf = int(resultf[0])
cur6.close()
conn6.close()
if resultFinalf == 1:
finalCount = finalCount + 1
#is the word 'authentic' present? == +1
conn7 = sqlite3.connect("test.db")
cur7 = conn7.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 3 AND comments LIKE '% authentic %');"
cur7.execute(sq)
resultg = cur7.fetchone()
resultFinalg = int(resultg[0])
cur7.close()
conn7.close()
if resultFinalg == 1:
finalCount = finalCount + 1
#is the word 'like' present? == +1
conn8 = sqlite3.connect("test.db")
cur8 = conn8.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 3 AND comments LIKE '% like %');"
cur8.execute(sq)
resulth = cur8.fetchone()
resultFinalh = int(resulth[0])
cur8.close()
conn8.close()
if resultFinalh == 1:
finalCount = finalCount + 1
#is the word 'fun' present? == +1
conn9 = sqlite3.connect("test.db")
cur9 = conn9.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 3 AND comments LIKE '% fun %');"
cur9.execute(sq)
resulti = cur9.fetchone()
resultFinali = int(resulti[0])
cur9.close()
conn9.close()
if resultFinali == 1:
finalCount = finalCount + 1
#is the word 'appreciate' present? == +1
conn10 = sqlite3.connect("test.db")
cur10 = conn10.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 3 AND comments LIKE '% appreciate %');"
cur10.execute(sq)
resultj = cur10.fetchone()
resultFinalj = int(resultj[0])
cur10.close()
conn10.close()
if resultFinalj == 1:
finalCount = finalCount + 1
#is the word 'fuck' present? == -1
conn11 = sqlite3.connect("test.db")
cur11 = conn11.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 3 AND comments LIKE '% good %');"
cur11.execute(sq)
resultk = cur11.fetchone()
resultFinalk = int(resultk[0])
cur11.close()
conn11.close()
if resultFinalk == 1:
finalCount = finalCount - 1
#is the word 'corrupt' present? == -1
conn12 = sqlite3.connect("test.db")
cur12 = conn12.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 3 AND comments LIKE '% corrupt %');"
cur12.execute(sq)
resultm = cur12.fetchone()
resultFinalm = int(resultm[0])
cur12.close()
conn12.close()
if resultFinalm == 1:
finalCount = finalCount - 1
#is the word 'stupid' present? == -1
conn13 = sqlite3.connect("test.db")
cur13 = conn13.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 3 AND comments LIKE '% stupid %');"
cur13.execute(sq)
resultn = cur13.fetchone()
resultFinaln = int(resultn[0])
cur13.close()
conn13.close()
if resultFinaln == 1:
finalCount = finalCount - 1
#is the word 'irrelevant' present? == -1
conn14 = sqlite3.connect("test.db")
cur14 = conn14.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 3 AND comments LIKE '% irrelevant %');"
cur14.execute(sq)
resulto = cur14.fetchone()
resultFinalo = int(resulto[0])
cur14.close()
conn14.close()
if resultFinalo == 1:
finalCount = finalCount - 1
#is the word 'colluding' present? == -1
conn15 = sqlite3.connect("test.db")
cur15 = conn15.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 3 AND comments LIKE '% colluding %');"
cur15.execute(sq)
resultp = cur15.fetchone()
resultFinalp = int(resultp[0])
cur15.close()
conn15.close()
if resultFinalp == 1:
finalCount = finalCount - 1
#is the word 'horrible' present? == -1
conn16 = sqlite3.connect("test.db")
cur16 = conn16.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 3 AND comments LIKE '% horrible %');"
cur16.execute(sq)
resultq = cur16.fetchone()
resultFinalq = int(resultq[0])
cur16.close()
conn16.close()
if resultFinalq == 1:
finalCount = finalCount - 1
#is the word 'unfair' present? == -1
conn17 = sqlite3.connect("test.db")
cur17 = conn17.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 3 AND comments LIKE '% unfair %');"
cur17.execute(sq)
resultr = cur17.fetchone()
resultFinalr = int(resultr[0])
cur17.close()
conn17.close()
if resultFinalr == 1:
finalCount = finalCount - 1
#is the word 'guilty' present? == -1
conn18 = sqlite3.connect("test.db")
cur18 = conn18.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 3 AND comments LIKE '% guilty %');"
cur18.execute(sq)
resultz = cur18.fetchone()
resultFinalz = int(resultz[0])
cur18.close()
conn18.close()
if resultFinalz == 1:
finalCount = finalCount - 1
#is the word 'foolish' present? == -1
conn19 = sqlite3.connect("test.db")
cur19 = conn19.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 3 AND comments LIKE '% foolish %');"
cur19.execute(sq)
resultx = cur19.fetchone()
resultFinalx = int(resultx[0])
cur19.close()
conn19.close()
if resultFinalx == 1:
finalCount = finalCount - 1
#is the word 'hateful' present? == -1
conn20 = sqlite3.connect("test.db")
cur20 = conn20.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 3 AND comments LIKE '% hateful %');"
cur20.execute(sq)
resulty = cur20.fetchone()
resultFinaly = int(resulty[0])
cur20.close()
conn20.close()
if resultFinaly == 1:
finalCount = finalCount - 1
#adds final sentiment number to corresponding database row
conn = sqlite3.connect("test.db")
cur = conn.cursor()
cur.execute("UPDATE merge SET sent = ? WHERE id = 3;", [finalCount])
conn.commit()
cur.close()
conn.close()
def senti4():
finalCount = 0
#is the word 'good' present? == +1
conn = sqlite3.connect("test.db")
cur = conn.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 4 AND comments LIKE '% good %');"
cur.execute(sq)
resulta = cur.fetchone()
resultFinala = int(resulta[0])
cur.close()
conn.close()
if resultFinala == 1:
finalCount = finalCount + 1
#is the word 'great' present? == +1
conn2 = sqlite3.connect("test.db")
cur2 = conn2.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 4 AND comments LIKE '% great %');"
cur2.execute(sq)
resultb = cur2.fetchone()
resultFinalb = int(resultb[0])
cur2.close()
conn2.close()
if resultFinalb == 1:
finalCount = finalCount + 1
#is the word 'happy' present? == +1
conn3 = sqlite3.connect("test.db")
cur3 = conn3.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 4 AND comments LIKE '% happy %');"
cur3.execute(sq)
resultc = cur3.fetchone()
resultFinalc = int(resultc[0])
cur3.close()
conn3.close()
if resultFinalc == 1:
finalCount = finalCount + 1
#is the word 'win' present? == +1
conn4 = sqlite3.connect("test.db")
cur4 = conn4.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 4 AND comments LIKE '% win %');"
cur4.execute(sq)
resultd = cur4.fetchone()
resultFinald = int(resultd[0])
cur4.close()
conn4.close()
if resultFinald == 1:
finalCount = finalCount + 1
#is the word 'love' present? == +1
conn5 = sqlite3.connect("test.db")
cur5 = conn5.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 4 AND comments LIKE '% love %');"
cur5.execute(sq)
resulte = cur5.fetchone()
resultFinale = int(resulte[0])
cur5.close()
conn5.close()
if resultFinale == 1:
finalCount = finalCount + 1
#is the word 'nice' present? == +1
conn6 = sqlite3.connect("test.db")
cur6 = conn6.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 4 AND comments LIKE '% nice %');"
cur6.execute(sq)
resultf = cur6.fetchone()
resultFinalf = int(resultf[0])
cur6.close()
conn6.close()
if resultFinalf == 1:
finalCount = finalCount + 1
#is the word 'authentic' present? == +1
conn7 = sqlite3.connect("test.db")
cur7 = conn7.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 4 AND comments LIKE '% authentic %');"
cur7.execute(sq)
resultg = cur7.fetchone()
resultFinalg = int(resultg[0])
cur7.close()
conn7.close()
if resultFinalg == 1:
finalCount = finalCount + 1
#is the word 'like' present? == +1
conn8 = sqlite3.connect("test.db")
cur8 = conn8.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 4 AND comments LIKE '% like %');"
cur8.execute(sq)
resulth = cur8.fetchone()
resultFinalh = int(resulth[0])
cur8.close()
conn8.close()
if resultFinalh == 1:
finalCount = finalCount + 1
#is the word 'fun' present? == +1
conn9 = sqlite3.connect("test.db")
cur9 = conn9.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 4 AND comments LIKE '% fun %');"
cur9.execute(sq)
resulti = cur9.fetchone()
resultFinali = int(resulti[0])
cur9.close()
conn9.close()
if resultFinali == 1:
finalCount = finalCount + 1
#is the word 'appreciate' present? == +1
conn10 = sqlite3.connect("test.db")
cur10 = conn10.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 4 AND comments LIKE '% appreciate %');"
cur10.execute(sq)
resultj = cur10.fetchone()
resultFinalj = int(resultj[0])
cur10.close()
conn10.close()
if resultFinalj == 1:
finalCount = finalCount + 1
#is the word 'fuck' present? == -1
conn11 = sqlite3.connect("test.db")
cur11 = conn11.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 4 AND comments LIKE '% good %');"
cur11.execute(sq)
resultk = cur11.fetchone()
resultFinalk = int(resultk[0])
cur11.close()
conn11.close()
if resultFinalk == 1:
finalCount = finalCount - 1
#is the word 'corrupt' present? == -1
conn12 = sqlite3.connect("test.db")
cur12 = conn12.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 4 AND comments LIKE '% corrupt %');"
cur12.execute(sq)
resultm = cur12.fetchone()
resultFinalm = int(resultm[0])
cur12.close()
conn12.close()
if resultFinalm == 1:
finalCount = finalCount - 1
#is the word 'stupid' present? == -1
conn13 = sqlite3.connect("test.db")
cur13 = conn13.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 4 AND comments LIKE '% stupid %');"
cur13.execute(sq)
resultn = cur13.fetchone()
resultFinaln = int(resultn[0])
cur13.close()
conn13.close()
if resultFinaln == 1:
finalCount = finalCount - 1
#is the word 'irrelevant' present? == -1
conn14 = sqlite3.connect("test.db")
cur14 = conn14.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 4 AND comments LIKE '% irrelevant %');"
cur14.execute(sq)
resulto = cur14.fetchone()
resultFinalo = int(resulto[0])
cur14.close()
conn14.close()
if resultFinalo == 1:
finalCount = finalCount - 1
#is the word 'colluding' present? == -1
conn15 = sqlite3.connect("test.db")
cur15 = conn15.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 4 AND comments LIKE '% colluding %');"
cur15.execute(sq)
resultp = cur15.fetchone()
resultFinalp = int(resultp[0])
cur15.close()
conn15.close()
if resultFinalp == 1:
finalCount = finalCount - 1
#is the word 'horrible' present? == -1
conn16 = sqlite3.connect("test.db")
cur16 = conn16.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 4 AND comments LIKE '% horrible %');"
cur16.execute(sq)
resultq = cur16.fetchone()
resultFinalq = int(resultq[0])
cur16.close()
conn16.close()
if resultFinalq == 1:
finalCount = finalCount - 1
#is the word 'unfair' present? == -1
conn17 = sqlite3.connect("test.db")
cur17 = conn17.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 4 AND comments LIKE '% unfair %');"
cur17.execute(sq)
resultr = cur17.fetchone()
resultFinalr = int(resultr[0])
cur17.close()
conn17.close()
if resultFinalr == 1:
finalCount = finalCount - 1
#is the word 'guilty' present? == -1
conn18 = sqlite3.connect("test.db")
cur18 = conn18.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 4 AND comments LIKE '% guilty %');"
cur18.execute(sq)
resultz = cur18.fetchone()
resultFinalz = int(resultz[0])
cur18.close()
conn18.close()
if resultFinalz == 1:
finalCount = finalCount - 1
#is the word 'foolish' present? == -1
conn19 = sqlite3.connect("test.db")
cur19 = conn19.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 4 AND comments LIKE '% foolish %');"
cur19.execute(sq)
resultx = cur19.fetchone()
resultFinalx = int(resultx[0])
cur19.close()
conn19.close()
if resultFinalx == 1:
finalCount = finalCount - 1
#is the word 'hateful' present? == -1
conn20 = sqlite3.connect("test.db")
cur20 = conn20.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 4 AND comments LIKE '% hateful %');"
cur20.execute(sq)
resulty = cur20.fetchone()
resultFinaly = int(resulty[0])
cur20.close()
conn20.close()
if resultFinaly == 1:
finalCount = finalCount - 1
#adds final sentiment number to corresponding database row
conn = sqlite3.connect("test.db")
cur = conn.cursor()
cur.execute("UPDATE merge SET sent = ? WHERE id = 4;", [finalCount])
conn.commit()
cur.close()
conn.close()
def senti5():
finalCount = 0
#is the word 'good' present? == +1
conn = sqlite3.connect("test.db")
cur = conn.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 5 AND comments LIKE '% good %');"
cur.execute(sq)
resulta = cur.fetchone()
resultFinala = int(resulta[0])
cur.close()
conn.close()
if resultFinala == 1:
finalCount = finalCount + 1
#is the word 'great' present? == +1
conn2 = sqlite3.connect("test.db")
cur2 = conn2.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 5 AND comments LIKE '% great %');"
cur2.execute(sq)
resultb = cur2.fetchone()
resultFinalb = int(resultb[0])
cur2.close()
conn2.close()
if resultFinalb == 1:
finalCount = finalCount + 1
#is the word 'happy' present? == +1
conn3 = sqlite3.connect("test.db")
cur3 = conn3.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 5 AND comments LIKE '% happy %');"
cur3.execute(sq)
resultc = cur3.fetchone()
resultFinalc = int(resultc[0])
cur3.close()
conn3.close()
if resultFinalc == 1:
finalCount = finalCount + 1
#is the word 'win' present? == +1
conn4 = sqlite3.connect("test.db")
cur4 = conn4.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 5 AND comments LIKE '% win %');"
cur4.execute(sq)
resultd = cur4.fetchone()
resultFinald = int(resultd[0])
cur4.close()
conn4.close()
if resultFinald == 1:
finalCount = finalCount + 1
#is the word 'love' present? == +1
conn5 = sqlite3.connect("test.db")
cur5 = conn5.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 5 AND comments LIKE '% love %');"
cur5.execute(sq)
resulte = cur5.fetchone()
resultFinale = int(resulte[0])
cur5.close()
conn5.close()
if resultFinale == 1:
finalCount = finalCount + 1
#is the word 'nice' present? == +1
conn6 = sqlite3.connect("test.db")
cur6 = conn6.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 5 AND comments LIKE '% nice %');"
cur6.execute(sq)
resultf = cur6.fetchone()
resultFinalf = int(resultf[0])
cur6.close()
conn6.close()
if resultFinalf == 1:
finalCount = finalCount + 1
#is the word 'authentic' present? == +1
conn7 = sqlite3.connect("test.db")
cur7 = conn7.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 5 AND comments LIKE '% authentic %');"
cur7.execute(sq)
resultg = cur7.fetchone()
resultFinalg = int(resultg[0])
cur7.close()
conn7.close()
if resultFinalg == 1:
finalCount = finalCount + 1
#is the word 'like' present? == +1
conn8 = sqlite3.connect("test.db")
cur8 = conn8.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 5 AND comments LIKE '% like %');"
cur8.execute(sq)
resulth = cur8.fetchone()
resultFinalh = int(resulth[0])
cur8.close()
conn8.close()
if resultFinalh == 1:
finalCount = finalCount + 1
#is the word 'fun' present? == +1
conn9 = sqlite3.connect("test.db")
cur9 = conn9.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 5 AND comments LIKE '% fun %');"
cur9.execute(sq)
resulti = cur9.fetchone()
resultFinali = int(resulti[0])
cur9.close()
conn9.close()
if resultFinali == 1:
finalCount = finalCount + 1
#is the word 'appreciate' present? == +1
conn10 = sqlite3.connect("test.db")
cur10 = conn10.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 5 AND comments LIKE '% appreciate %');"
cur10.execute(sq)
resultj = cur10.fetchone()
resultFinalj = int(resultj[0])
cur10.close()
conn10.close()
if resultFinalj == 1:
finalCount = finalCount + 1
#is the word 'fuck' present? == -1
conn11 = sqlite3.connect("test.db")
cur11 = conn11.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 5 AND comments LIKE '% good %');"
cur11.execute(sq)
resultk = cur11.fetchone()
resultFinalk = int(resultk[0])
cur11.close()
conn11.close()
if resultFinalk == 1:
finalCount = finalCount - 1
#is the word 'corrupt' present? == -1
conn12 = sqlite3.connect("test.db")
cur12 = conn12.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 5 AND comments LIKE '% corrupt %');"
cur12.execute(sq)
resultm = cur12.fetchone()
resultFinalm = int(resultm[0])
cur12.close()
conn12.close()
if resultFinalm == 1:
finalCount = finalCount - 1
#is the word 'stupid' present? == -1
conn13 = sqlite3.connect("test.db")
cur13 = conn13.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 5 AND comments LIKE '% stupid %');"
cur13.execute(sq)
resultn = cur13.fetchone()
resultFinaln = int(resultn[0])
cur13.close()
conn13.close()
if resultFinaln == 1:
finalCount = finalCount - 1
#is the word 'irrelevant' present? == -1
conn14 = sqlite3.connect("test.db")
cur14 = conn14.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 5 AND comments LIKE '% irrelevant %');"
cur14.execute(sq)
resulto = cur14.fetchone()
resultFinalo = int(resulto[0])
cur14.close()
conn14.close()
if resultFinalo == 1:
finalCount = finalCount - 1
#is the word 'colluding' present? == -1
conn15 = sqlite3.connect("test.db")
cur15 = conn15.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 5 AND comments LIKE '% colluding %');"
cur15.execute(sq)
resultp = cur15.fetchone()
resultFinalp = int(resultp[0])
cur15.close()
conn15.close()
if resultFinalp == 1:
finalCount = finalCount - 1
#is the word 'horrible' present? == -1
conn16 = sqlite3.connect("test.db")
cur16 = conn16.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 5 AND comments LIKE '% horrible %');"
cur16.execute(sq)
resultq = cur16.fetchone()
resultFinalq = int(resultq[0])
cur16.close()
conn16.close()
if resultFinalq == 1:
finalCount = finalCount - 1
#is the word 'unfair' present? == -1
conn17 = sqlite3.connect("test.db")
cur17 = conn17.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 5 AND comments LIKE '% unfair %');"
cur17.execute(sq)
resultr = cur17.fetchone()
resultFinalr = int(resultr[0])
cur17.close()
conn17.close()
if resultFinalr == 1:
finalCount = finalCount - 1
#is the word 'guilty' present? == -1
conn18 = sqlite3.connect("test.db")
cur18 = conn18.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 5 AND comments LIKE '% guilty %');"
cur18.execute(sq)
resultz = cur18.fetchone()
resultFinalz = int(resultz[0])
cur18.close()
conn18.close()
if resultFinalz == 1:
finalCount = finalCount - 1
#is the word 'foolish' present? == -1
conn19 = sqlite3.connect("test.db")
cur19 = conn19.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 5 AND comments LIKE '% foolish %');"
cur19.execute(sq)
resultx = cur19.fetchone()
resultFinalx = int(resultx[0])
cur19.close()
conn19.close()
if resultFinalx == 1:
finalCount = finalCount - 1
#is the word 'hateful' present? == -1
conn20 = sqlite3.connect("test.db")
cur20 = conn20.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 5 AND comments LIKE '% hateful %');"
cur20.execute(sq)
resulty = cur20.fetchone()
resultFinaly = int(resulty[0])
cur20.close()
conn20.close()
if resultFinaly == 1:
finalCount = finalCount - 1
#adds final sentiment number to corresponding database row
conn = sqlite3.connect("test.db")
cur = conn.cursor()
cur.execute("UPDATE merge SET sent = ? WHERE id = 5;", [finalCount])
conn.commit()
cur.close()
conn.close()
def senti6():
finalCount = 0
#is the word 'good' present? == +1
conn = sqlite3.connect("test.db")
cur = conn.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 6 AND comments LIKE '% good %');"
cur.execute(sq)
resulta = cur.fetchone()
resultFinala = int(resulta[0])
cur.close()
conn.close()
if resultFinala == 1:
finalCount = finalCount + 1
#is the word 'great' present? == +1
conn2 = sqlite3.connect("test.db")
cur2 = conn2.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 6 AND comments LIKE '% great %');"
cur2.execute(sq)
resultb = cur2.fetchone()
resultFinalb = int(resultb[0])
cur2.close()
conn2.close()
if resultFinalb == 1:
finalCount = finalCount + 1
#is the word 'happy' present? == +1
conn3 = sqlite3.connect("test.db")
cur3 = conn3.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 6 AND comments LIKE '% happy %');"
cur3.execute(sq)
resultc = cur3.fetchone()
resultFinalc = int(resultc[0])
cur3.close()
conn3.close()
if resultFinalc == 1:
finalCount = finalCount + 1
#is the word 'win' present? == +1
conn4 = sqlite3.connect("test.db")
cur4 = conn4.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 6 AND comments LIKE '% win %');"
cur4.execute(sq)
resultd = cur4.fetchone()
resultFinald = int(resultd[0])
cur4.close()
conn4.close()
if resultFinald == 1:
finalCount = finalCount + 1
#is the word 'love' present? == +1
conn5 = sqlite3.connect("test.db")
cur5 = conn5.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 6 AND comments LIKE '% love %');"
cur5.execute(sq)
resulte = cur5.fetchone()
resultFinale = int(resulte[0])
cur5.close()
conn5.close()
if resultFinale == 1:
finalCount = finalCount + 1
#is the word 'nice' present? == +1
conn6 = sqlite3.connect("test.db")
cur6 = conn6.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 6 AND comments LIKE '% nice %');"
cur6.execute(sq)
resultf = cur6.fetchone()
resultFinalf = int(resultf[0])
cur6.close()
conn6.close()
if resultFinalf == 1:
finalCount = finalCount + 1
#is the word 'authentic' present? == +1
conn7 = sqlite3.connect("test.db")
cur7 = conn7.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 6 AND comments LIKE '% authentic %');"
cur7.execute(sq)
resultg = cur7.fetchone()
resultFinalg = int(resultg[0])
cur7.close()
conn7.close()
if resultFinalg == 1:
finalCount = finalCount + 1
#is the word 'like' present? == +1
conn8 = sqlite3.connect("test.db")
cur8 = conn8.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 6 AND comments LIKE '% like %');"
cur8.execute(sq)
resulth = cur8.fetchone()
resultFinalh = int(resulth[0])
cur8.close()
conn8.close()
if resultFinalh == 1:
finalCount = finalCount + 1
#is the word 'fun' present? == +1
conn9 = sqlite3.connect("test.db")
cur9 = conn9.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 6 AND comments LIKE '% fun %');"
cur9.execute(sq)
resulti = cur9.fetchone()
resultFinali = int(resulti[0])
cur9.close()
conn9.close()
if resultFinali == 1:
finalCount = finalCount + 1
#is the word 'appreciate' present? == +1
conn10 = sqlite3.connect("test.db")
cur10 = conn10.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 6 AND comments LIKE '% appreciate %');"
cur10.execute(sq)
resultj = cur10.fetchone()
resultFinalj = int(resultj[0])
cur10.close()
conn10.close()
if resultFinalj == 1:
finalCount = finalCount + 1
#is the word 'fuck' present? == -1
conn11 = sqlite3.connect("test.db")
cur11 = conn11.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 6 AND comments LIKE '% good %');"
cur11.execute(sq)
resultk = cur11.fetchone()
resultFinalk = int(resultk[0])
cur11.close()
conn11.close()
if resultFinalk == 1:
finalCount = finalCount - 1
#is the word 'corrupt' present? == -1
conn12 = sqlite3.connect("test.db")
cur12 = conn12.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 6 AND comments LIKE '% corrupt %');"
cur12.execute(sq)
resultm = cur12.fetchone()
resultFinalm = int(resultm[0])
cur12.close()
conn12.close()
if resultFinalm == 1:
finalCount = finalCount - 1
#is the word 'stupid' present? == -1
conn13 = sqlite3.connect("test.db")
cur13 = conn13.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 6 AND comments LIKE '% stupid %');"
cur13.execute(sq)
resultn = cur13.fetchone()
resultFinaln = int(resultn[0])
cur13.close()
conn13.close()
if resultFinaln == 1:
finalCount = finalCount - 1
#is the word 'irrelevant' present? == -1
conn14 = sqlite3.connect("test.db")
cur14 = conn14.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 6 AND comments LIKE '% irrelevant %');"
cur14.execute(sq)
resulto = cur14.fetchone()
resultFinalo = int(resulto[0])
cur14.close()
conn14.close()
if resultFinalo == 1:
finalCount = finalCount - 1
#is the word 'colluding' present? == -1
conn15 = sqlite3.connect("test.db")
cur15 = conn15.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 6 AND comments LIKE '% colluding %');"
cur15.execute(sq)
resultp = cur15.fetchone()
resultFinalp = int(resultp[0])
cur15.close()
conn15.close()
if resultFinalp == 1:
finalCount = finalCount - 1
#is the word 'horrible' present? == -1
conn16 = sqlite3.connect("test.db")
cur16 = conn16.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 6 AND comments LIKE '% horrible %');"
cur16.execute(sq)
resultq = cur16.fetchone()
resultFinalq = int(resultq[0])
cur16.close()
conn16.close()
if resultFinalq == 1:
finalCount = finalCount - 1
#is the word 'unfair' present? == -1
conn17 = sqlite3.connect("test.db")
cur17 = conn17.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 6 AND comments LIKE '% unfair %');"
cur17.execute(sq)
resultr = cur17.fetchone()
resultFinalr = int(resultr[0])
cur17.close()
conn17.close()
if resultFinalr == 1:
finalCount = finalCount - 1
#is the word 'guilty' present? == -1
conn18 = sqlite3.connect("test.db")
cur18 = conn18.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 6 AND comments LIKE '% guilty %');"
cur18.execute(sq)
resultz = cur18.fetchone()
resultFinalz = int(resultz[0])
cur18.close()
conn18.close()
if resultFinalz == 1:
finalCount = finalCount - 1
#is the word 'foolish' present? == -1
conn19 = sqlite3.connect("test.db")
cur19 = conn19.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 6 AND comments LIKE '% foolish %');"
cur19.execute(sq)
resultx = cur19.fetchone()
resultFinalx = int(resultx[0])
cur19.close()
conn19.close()
if resultFinalx == 1:
finalCount = finalCount - 1
#is the word 'hateful' present? == -1
conn20 = sqlite3.connect("test.db")
cur20 = conn20.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 6 AND comments LIKE '% hateful %');"
cur20.execute(sq)
resulty = cur20.fetchone()
resultFinaly = int(resulty[0])
cur20.close()
conn20.close()
if resultFinaly == 1:
finalCount = finalCount - 1
#adds final sentiment number to corresponding database row
conn = sqlite3.connect("test.db")
cur = conn.cursor()
cur.execute("UPDATE merge SET sent = ? WHERE id = 6;", [finalCount])
conn.commit()
cur.close()
conn.close()
def senti7():
finalCount = 0
#is the word 'good' present? == +1
conn = sqlite3.connect("test.db")
cur = conn.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 7 AND comments LIKE '% good %');"
cur.execute(sq)
resulta = cur.fetchone()
resultFinala = int(resulta[0])
cur.close()
conn.close()
if resultFinala == 1:
finalCount = finalCount + 1
#is the word 'great' present? == +1
conn2 = sqlite3.connect("test.db")
cur2 = conn2.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 7 AND comments LIKE '% great %');"
cur2.execute(sq)
resultb = cur2.fetchone()
resultFinalb = int(resultb[0])
cur2.close()
conn2.close()
if resultFinalb == 1:
finalCount = finalCount + 1
#is the word 'happy' present? == +1
conn3 = sqlite3.connect("test.db")
cur3 = conn3.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 7 AND comments LIKE '% happy %');"
cur3.execute(sq)
resultc = cur3.fetchone()
resultFinalc = int(resultc[0])
cur3.close()
conn3.close()
if resultFinalc == 1:
finalCount = finalCount + 1
#is the word 'win' present? == +1
conn4 = sqlite3.connect("test.db")
cur4 = conn4.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 7 AND comments LIKE '% win %');"
cur4.execute(sq)
resultd = cur4.fetchone()
resultFinald = int(resultd[0])
cur4.close()
conn4.close()
if resultFinald == 1:
finalCount = finalCount + 1
#is the word 'love' present? == +1
conn5 = sqlite3.connect("test.db")
cur5 = conn5.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 7 AND comments LIKE '% love %');"
cur5.execute(sq)
resulte = cur5.fetchone()
resultFinale = int(resulte[0])
cur5.close()
conn5.close()
if resultFinale == 1:
finalCount = finalCount + 1
#is the word 'nice' present? == +1
conn6 = sqlite3.connect("test.db")
cur6 = conn6.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 7 AND comments LIKE '% nice %');"
cur6.execute(sq)
resultf = cur6.fetchone()
resultFinalf = int(resultf[0])
cur6.close()
conn6.close()
if resultFinalf == 1:
finalCount = finalCount + 1
#is the word 'authentic' present? == +1
conn7 = sqlite3.connect("test.db")
cur7 = conn7.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 7 AND comments LIKE '% authentic %');"
cur7.execute(sq)
resultg = cur7.fetchone()
resultFinalg = int(resultg[0])
cur7.close()
conn7.close()
if resultFinalg == 1:
finalCount = finalCount + 1
#is the word 'like' present? == +1
conn8 = sqlite3.connect("test.db")
cur8 = conn8.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 7 AND comments LIKE '% like %');"
cur8.execute(sq)
resulth = cur8.fetchone()
resultFinalh = int(resulth[0])
cur8.close()
conn8.close()
if resultFinalh == 1:
finalCount = finalCount + 1
#is the word 'fun' present? == +1
conn9 = sqlite3.connect("test.db")
cur9 = conn9.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 7 AND comments LIKE '% fun %');"
cur9.execute(sq)
resulti = cur9.fetchone()
resultFinali = int(resulti[0])
cur9.close()
conn9.close()
if resultFinali == 1:
finalCount = finalCount + 1
#is the word 'appreciate' present? == +1
conn10 = sqlite3.connect("test.db")
cur10 = conn10.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 7 AND comments LIKE '% appreciate %');"
cur10.execute(sq)
resultj = cur10.fetchone()
resultFinalj = int(resultj[0])
cur10.close()
conn10.close()
if resultFinalj == 1:
finalCount = finalCount + 1
#is the word 'fuck' present? == -1
conn11 = sqlite3.connect("test.db")
cur11 = conn11.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 7 AND comments LIKE '% good %');"
cur11.execute(sq)
resultk = cur11.fetchone()
resultFinalk = int(resultk[0])
cur11.close()
conn11.close()
if resultFinalk == 1:
finalCount = finalCount - 1
#is the word 'corrupt' present? == -1
conn12 = sqlite3.connect("test.db")
cur12 = conn12.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 7 AND comments LIKE '% corrupt %');"
cur12.execute(sq)
resultm = cur12.fetchone()
resultFinalm = int(resultm[0])
cur12.close()
conn12.close()
if resultFinalm == 1:
finalCount = finalCount - 1
#is the word 'stupid' present? == -1
conn13 = sqlite3.connect("test.db")
cur13 = conn13.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 7 AND comments LIKE '% stupid %');"
cur13.execute(sq)
resultn = cur13.fetchone()
resultFinaln = int(resultn[0])
cur13.close()
conn13.close()
if resultFinaln == 1:
finalCount = finalCount - 1
#is the word 'irrelevant' present? == -1
conn14 = sqlite3.connect("test.db")
cur14 = conn14.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 7 AND comments LIKE '% irrelevant %');"
cur14.execute(sq)
resulto = cur14.fetchone()
resultFinalo = int(resulto[0])
cur14.close()
conn14.close()
if resultFinalo == 1:
finalCount = finalCount - 1
#is the word 'colluding' present? == -1
conn15 = sqlite3.connect("test.db")
cur15 = conn15.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 7 AND comments LIKE '% colluding %');"
cur15.execute(sq)
resultp = cur15.fetchone()
resultFinalp = int(resultp[0])
cur15.close()
conn15.close()
if resultFinalp == 1:
finalCount = finalCount - 1
#is the word 'horrible' present? == -1
conn16 = sqlite3.connect("test.db")
cur16 = conn16.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 7 AND comments LIKE '% horrible %');"
cur16.execute(sq)
resultq = cur16.fetchone()
resultFinalq = int(resultq[0])
cur16.close()
conn16.close()
if resultFinalq == 1:
finalCount = finalCount - 1
#is the word 'unfair' present? == -1
conn17 = sqlite3.connect("test.db")
cur17 = conn17.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 7 AND comments LIKE '% unfair %');"
cur17.execute(sq)
resultr = cur17.fetchone()
resultFinalr = int(resultr[0])
cur17.close()
conn17.close()
if resultFinalr == 1:
finalCount = finalCount - 1
#is the word 'guilty' present? == -1
conn18 = sqlite3.connect("test.db")
cur18 = conn18.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 7 AND comments LIKE '% guilty %');"
cur18.execute(sq)
resultz = cur18.fetchone()
resultFinalz = int(resultz[0])
cur18.close()
conn18.close()
if resultFinalz == 1:
finalCount = finalCount - 1
#is the word 'foolish' present? == -1
conn19 = sqlite3.connect("test.db")
cur19 = conn19.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 7 AND comments LIKE '% foolish %');"
cur19.execute(sq)
resultx = cur19.fetchone()
resultFinalx = int(resultx[0])
cur19.close()
conn19.close()
if resultFinalx == 1:
finalCount = finalCount - 1
#is the word 'hateful' present? == -1
conn20 = sqlite3.connect("test.db")
cur20 = conn20.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 7 AND comments LIKE '% hateful %');"
cur20.execute(sq)
resulty = cur20.fetchone()
resultFinaly = int(resulty[0])
cur20.close()
conn20.close()
if resultFinaly == 1:
finalCount = finalCount - 1
#adds final sentiment number to corresponding database row
conn = sqlite3.connect("test.db")
cur = conn.cursor()
cur.execute("UPDATE merge SET sent = ? WHERE id = 7;", [finalCount])
conn.commit()
cur.close()
conn.close()
def senti8():
finalCount = 0
#is the word 'good' present? == +1
conn = sqlite3.connect("test.db")
cur = conn.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 8 AND comments LIKE '% good %');"
cur.execute(sq)
resulta = cur.fetchone()
resultFinala = int(resulta[0])
cur.close()
conn.close()
if resultFinala == 1:
finalCount = finalCount + 1
#is the word 'great' present? == +1
conn2 = sqlite3.connect("test.db")
cur2 = conn2.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 8 AND comments LIKE '% great %');"
cur2.execute(sq)
resultb = cur2.fetchone()
resultFinalb = int(resultb[0])
cur2.close()
conn2.close()
if resultFinalb == 1:
finalCount = finalCount + 1
#is the word 'happy' present? == +1
conn3 = sqlite3.connect("test.db")
cur3 = conn3.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 8 AND comments LIKE '% happy %');"
cur3.execute(sq)
resultc = cur3.fetchone()
resultFinalc = int(resultc[0])
cur3.close()
conn3.close()
if resultFinalc == 1:
finalCount = finalCount + 1
#is the word 'win' present? == +1
conn4 = sqlite3.connect("test.db")
cur4 = conn4.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 8 AND comments LIKE '% win %');"
cur4.execute(sq)
resultd = cur4.fetchone()
resultFinald = int(resultd[0])
cur4.close()
conn4.close()
if resultFinald == 1:
finalCount = finalCount + 1
#is the word 'love' present? == +1
conn5 = sqlite3.connect("test.db")
cur5 = conn5.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 8 AND comments LIKE '% love %');"
cur5.execute(sq)
resulte = cur5.fetchone()
resultFinale = int(resulte[0])
cur5.close()
conn5.close()
if resultFinale == 1:
finalCount = finalCount + 1
#is the word 'nice' present? == +1
conn6 = sqlite3.connect("test.db")
cur6 = conn6.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 8 AND comments LIKE '% nice %');"
cur6.execute(sq)
resultf = cur6.fetchone()
resultFinalf = int(resultf[0])
cur6.close()
conn6.close()
if resultFinalf == 1:
finalCount = finalCount + 1
#is the word 'authentic' present? == +1
conn7 = sqlite3.connect("test.db")
cur7 = conn7.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 8 AND comments LIKE '% authentic %');"
cur7.execute(sq)
resultg = cur7.fetchone()
resultFinalg = int(resultg[0])
cur7.close()
conn7.close()
if resultFinalg == 1:
finalCount = finalCount + 1
#is the word 'like' present? == +1
conn8 = sqlite3.connect("test.db")
cur8 = conn8.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 8 AND comments LIKE '% like %');"
cur8.execute(sq)
resulth = cur8.fetchone()
resultFinalh = int(resulth[0])
cur8.close()
conn8.close()
if resultFinalh == 1:
finalCount = finalCount + 1
#is the word 'fun' present? == +1
conn9 = sqlite3.connect("test.db")
cur9 = conn9.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 8 AND comments LIKE '% fun %');"
cur9.execute(sq)
resulti = cur9.fetchone()
resultFinali = int(resulti[0])
cur9.close()
conn9.close()
if resultFinali == 1:
finalCount = finalCount + 1
#is the word 'appreciate' present? == +1
conn10 = sqlite3.connect("test.db")
cur10 = conn10.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 8 AND comments LIKE '% appreciate %');"
cur10.execute(sq)
resultj = cur10.fetchone()
resultFinalj = int(resultj[0])
cur10.close()
conn10.close()
if resultFinalj == 1:
finalCount = finalCount + 1
#is the word 'fuck' present? == -1
conn11 = sqlite3.connect("test.db")
cur11 = conn11.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 8 AND comments LIKE '% good %');"
cur11.execute(sq)
resultk = cur11.fetchone()
resultFinalk = int(resultk[0])
cur11.close()
conn11.close()
if resultFinalk == 1:
finalCount = finalCount - 1
#is the word 'corrupt' present? == -1
conn12 = sqlite3.connect("test.db")
cur12 = conn12.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 8 AND comments LIKE '% corrupt %');"
cur12.execute(sq)
resultm = cur12.fetchone()
resultFinalm = int(resultm[0])
cur12.close()
conn12.close()
if resultFinalm == 1:
finalCount = finalCount - 1
#is the word 'stupid' present? == -1
conn13 = sqlite3.connect("test.db")
cur13 = conn13.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 8 AND comments LIKE '% stupid %');"
cur13.execute(sq)
resultn = cur13.fetchone()
resultFinaln = int(resultn[0])
cur13.close()
conn13.close()
if resultFinaln == 1:
finalCount = finalCount - 1
#is the word 'irrelevant' present? == -1
conn14 = sqlite3.connect("test.db")
cur14 = conn14.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 8 AND comments LIKE '% irrelevant %');"
cur14.execute(sq)
resulto = cur14.fetchone()
resultFinalo = int(resulto[0])
cur14.close()
conn14.close()
if resultFinalo == 1:
finalCount = finalCount - 1
#is the word 'colluding' present? == -1
conn15 = sqlite3.connect("test.db")
cur15 = conn15.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 8 AND comments LIKE '% colluding %');"
cur15.execute(sq)
resultp = cur15.fetchone()
resultFinalp = int(resultp[0])
cur15.close()
conn15.close()
if resultFinalp == 1:
finalCount = finalCount - 1
#is the word 'horrible' present? == -1
conn16 = sqlite3.connect("test.db")
cur16 = conn16.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 8 AND comments LIKE '% horrible %');"
cur16.execute(sq)
resultq = cur16.fetchone()
resultFinalq = int(resultq[0])
cur16.close()
conn16.close()
if resultFinalq == 1:
finalCount = finalCount - 1
#is the word 'unfair' present? == -1
conn17 = sqlite3.connect("test.db")
cur17 = conn17.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 8 AND comments LIKE '% unfair %');"
cur17.execute(sq)
resultr = cur17.fetchone()
resultFinalr = int(resultr[0])
cur17.close()
conn17.close()
if resultFinalr == 1:
finalCount = finalCount - 1
#is the word 'guilty' present? == -1
conn18 = sqlite3.connect("test.db")
cur18 = conn18.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 8 AND comments LIKE '% guilty %');"
cur18.execute(sq)
resultz = cur18.fetchone()
resultFinalz = int(resultz[0])
cur18.close()
conn18.close()
if resultFinalz == 1:
finalCount = finalCount - 1
#is the word 'foolish' present? == -1
conn19 = sqlite3.connect("test.db")
cur19 = conn19.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 8 AND comments LIKE '% foolish %');"
cur19.execute(sq)
resultx = cur19.fetchone()
resultFinalx = int(resultx[0])
cur19.close()
conn19.close()
if resultFinalx == 1:
finalCount = finalCount - 1
#is the word 'hateful' present? == -1
conn20 = sqlite3.connect("test.db")
cur20 = conn20.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 8 AND comments LIKE '% hateful %');"
cur20.execute(sq)
resulty = cur20.fetchone()
resultFinaly = int(resulty[0])
cur20.close()
conn20.close()
if resultFinaly == 1:
finalCount = finalCount - 1
#adds final sentiment number to corresponding database row
conn = sqlite3.connect("test.db")
cur = conn.cursor()
cur.execute("UPDATE merge SET sent = ? WHERE id = 8;", [finalCount])
conn.commit()
cur.close()
conn.close()
def senti9():
finalCount = 0
#is the word 'good' present? == +1
conn = sqlite3.connect("test.db")
cur = conn.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 9 AND comments LIKE '% good %');"
cur.execute(sq)
resulta = cur.fetchone()
resultFinala = int(resulta[0])
cur.close()
conn.close()
if resultFinala == 1:
finalCount = finalCount + 1
#is the word 'great' present? == +1
conn2 = sqlite3.connect("test.db")
cur2 = conn2.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 9 AND comments LIKE '% great %');"
cur2.execute(sq)
resultb = cur2.fetchone()
resultFinalb = int(resultb[0])
cur2.close()
conn2.close()
if resultFinalb == 1:
finalCount = finalCount + 1
#is the word 'happy' present? == +1
conn3 = sqlite3.connect("test.db")
cur3 = conn3.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 9 AND comments LIKE '% happy %');"
cur3.execute(sq)
resultc = cur3.fetchone()
resultFinalc = int(resultc[0])
cur3.close()
conn3.close()
if resultFinalc == 1:
finalCount = finalCount + 1
#is the word 'win' present? == +1
conn4 = sqlite3.connect("test.db")
cur4 = conn4.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 9 AND comments LIKE '% win %');"
cur4.execute(sq)
resultd = cur4.fetchone()
resultFinald = int(resultd[0])
cur4.close()
conn4.close()
if resultFinald == 1:
finalCount = finalCount + 1
#is the word 'love' present? == +1
conn5 = sqlite3.connect("test.db")
cur5 = conn5.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 9 AND comments LIKE '% love %');"
cur5.execute(sq)
resulte = cur5.fetchone()
resultFinale = int(resulte[0])
cur5.close()
conn5.close()
if resultFinale == 1:
finalCount = finalCount + 1
#is the word 'nice' present? == +1
conn6 = sqlite3.connect("test.db")
cur6 = conn6.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 9 AND comments LIKE '% nice %');"
cur6.execute(sq)
resultf = cur6.fetchone()
resultFinalf = int(resultf[0])
cur6.close()
conn6.close()
if resultFinalf == 1:
finalCount = finalCount + 1
#is the word 'authentic' present? == +1
conn7 = sqlite3.connect("test.db")
cur7 = conn7.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 9 AND comments LIKE '% authentic %');"
cur7.execute(sq)
resultg = cur7.fetchone()
resultFinalg = int(resultg[0])
cur7.close()
conn7.close()
if resultFinalg == 1:
finalCount = finalCount + 1
#is the word 'like' present? == +1
conn8 = sqlite3.connect("test.db")
cur8 = conn8.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 9 AND comments LIKE '% like %');"
cur8.execute(sq)
resulth = cur8.fetchone()
resultFinalh = int(resulth[0])
cur8.close()
conn8.close()
if resultFinalh == 1:
finalCount = finalCount + 1
#is the word 'fun' present? == +1
conn9 = sqlite3.connect("test.db")
cur9 = conn9.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 9 AND comments LIKE '% fun %');"
cur9.execute(sq)
resulti = cur9.fetchone()
resultFinali = int(resulti[0])
cur9.close()
conn9.close()
if resultFinali == 1:
finalCount = finalCount + 1
#is the word 'appreciate' present? == +1
conn10 = sqlite3.connect("test.db")
cur10 = conn10.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 9 AND comments LIKE '% appreciate %');"
cur10.execute(sq)
resultj = cur10.fetchone()
resultFinalj = int(resultj[0])
cur10.close()
conn10.close()
if resultFinalj == 1:
finalCount = finalCount + 1
#is the word 'fuck' present? == -1
conn11 = sqlite3.connect("test.db")
cur11 = conn11.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 9 AND comments LIKE '% good %');"
cur11.execute(sq)
resultk = cur11.fetchone()
resultFinalk = int(resultk[0])
cur11.close()
conn11.close()
if resultFinalk == 1:
finalCount = finalCount - 1
#is the word 'corrupt' present? == -1
conn12 = sqlite3.connect("test.db")
cur12 = conn12.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 9 AND comments LIKE '% corrupt %');"
cur12.execute(sq)
resultm = cur12.fetchone()
resultFinalm = int(resultm[0])
cur12.close()
conn12.close()
if resultFinalm == 1:
finalCount = finalCount - 1
#is the word 'stupid' present? == -1
conn13 = sqlite3.connect("test.db")
cur13 = conn13.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 9 AND comments LIKE '% stupid %');"
cur13.execute(sq)
resultn = cur13.fetchone()
resultFinaln = int(resultn[0])
cur13.close()
conn13.close()
if resultFinaln == 1:
finalCount = finalCount - 1
#is the word 'irrelevant' present? == -1
conn14 = sqlite3.connect("test.db")
cur14 = conn14.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 9 AND comments LIKE '% irrelevant %');"
cur14.execute(sq)
resulto = cur14.fetchone()
resultFinalo = int(resulto[0])
cur14.close()
conn14.close()
if resultFinalo == 1:
finalCount = finalCount - 1
#is the word 'colluding' present? == -1
conn15 = sqlite3.connect("test.db")
cur15 = conn15.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 9 AND comments LIKE '% colluding %');"
cur15.execute(sq)
resultp = cur15.fetchone()
resultFinalp = int(resultp[0])
cur15.close()
conn15.close()
if resultFinalp == 1:
finalCount = finalCount - 1
#is the word 'horrible' present? == -1
conn16 = sqlite3.connect("test.db")
cur16 = conn16.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 9 AND comments LIKE '% horrible %');"
cur16.execute(sq)
resultq = cur16.fetchone()
resultFinalq = int(resultq[0])
cur16.close()
conn16.close()
if resultFinalq == 1:
finalCount = finalCount - 1
#is the word 'unfair' present? == -1
conn17 = sqlite3.connect("test.db")
cur17 = conn17.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 9 AND comments LIKE '% unfair %');"
cur17.execute(sq)
resultr = cur17.fetchone()
resultFinalr = int(resultr[0])
cur17.close()
conn17.close()
if resultFinalr == 1:
finalCount = finalCount - 1
#is the word 'guilty' present? == -1
conn18 = sqlite3.connect("test.db")
cur18 = conn18.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 9 AND comments LIKE '% guilty %');"
cur18.execute(sq)
resultz = cur18.fetchone()
resultFinalz = int(resultz[0])
cur18.close()
conn18.close()
if resultFinalz == 1:
finalCount = finalCount - 1
#is the word 'foolish' present? == -1
conn19 = sqlite3.connect("test.db")
cur19 = conn19.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 9 AND comments LIKE '% foolish %');"
cur19.execute(sq)
resultx = cur19.fetchone()
resultFinalx = int(resultx[0])
cur19.close()
conn19.close()
if resultFinalx == 1:
finalCount = finalCount - 1
#is the word 'hateful' present? == -1
conn20 = sqlite3.connect("test.db")
cur20 = conn20.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 9 AND comments LIKE '% hateful %');"
cur20.execute(sq)
resulty = cur20.fetchone()
resultFinaly = int(resulty[0])
cur20.close()
conn20.close()
if resultFinaly == 1:
finalCount = finalCount - 1
#adds final sentiment number to corresponding database row
conn = sqlite3.connect("test.db")
cur = conn.cursor()
cur.execute("UPDATE merge SET sent = ? WHERE id = 9;", [finalCount])
conn.commit()
cur.close()
conn.close()
def senti10():
finalCount = 0
#is the word 'good' present? == +1
conn = sqlite3.connect("test.db")
cur = conn.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 10 AND comments LIKE '% good %');"
cur.execute(sq)
resulta = cur.fetchone()
resultFinala = int(resulta[0])
cur.close()
conn.close()
if resultFinala == 1:
finalCount = finalCount + 1
#is the word 'great' present? == +1
conn2 = sqlite3.connect("test.db")
cur2 = conn2.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 10 AND comments LIKE '% great %');"
cur2.execute(sq)
resultb = cur2.fetchone()
resultFinalb = int(resultb[0])
cur2.close()
conn2.close()
if resultFinalb == 1:
finalCount = finalCount + 1
#is the word 'happy' present? == +1
conn3 = sqlite3.connect("test.db")
cur3 = conn3.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 10 AND comments LIKE '% happy %');"
cur3.execute(sq)
resultc = cur3.fetchone()
resultFinalc = int(resultc[0])
cur3.close()
conn3.close()
if resultFinalc == 1:
finalCount = finalCount + 1
#is the word 'win' present? == +1
conn4 = sqlite3.connect("test.db")
cur4 = conn4.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 10 AND comments LIKE '% win %');"
cur4.execute(sq)
resultd = cur4.fetchone()
resultFinald = int(resultd[0])
cur4.close()
conn4.close()
if resultFinald == 1:
finalCount = finalCount + 1
#is the word 'love' present? == +1
conn5 = sqlite3.connect("test.db")
cur5 = conn5.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 10 AND comments LIKE '% love %');"
cur5.execute(sq)
resulte = cur5.fetchone()
resultFinale = int(resulte[0])
cur5.close()
conn5.close()
if resultFinale == 1:
finalCount = finalCount + 1
#is the word 'nice' present? == +1
conn6 = sqlite3.connect("test.db")
cur6 = conn6.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 10 AND comments LIKE '% nice %');"
cur6.execute(sq)
resultf = cur6.fetchone()
resultFinalf = int(resultf[0])
cur6.close()
conn6.close()
if resultFinalf == 1:
finalCount = finalCount + 1
#is the word 'authentic' present? == +1
conn7 = sqlite3.connect("test.db")
cur7 = conn7.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 10 AND comments LIKE '% authentic %');"
cur7.execute(sq)
resultg = cur7.fetchone()
resultFinalg = int(resultg[0])
cur7.close()
conn7.close()
if resultFinalg == 1:
finalCount = finalCount + 1
#is the word 'like' present? == +1
conn8 = sqlite3.connect("test.db")
cur8 = conn8.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 10 AND comments LIKE '% like %');"
cur8.execute(sq)
resulth = cur8.fetchone()
resultFinalh = int(resulth[0])
cur8.close()
conn8.close()
if resultFinalh == 1:
finalCount = finalCount + 1
#is the word 'fun' present? == +1
conn9 = sqlite3.connect("test.db")
cur9 = conn9.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 10 AND comments LIKE '% fun %');"
cur9.execute(sq)
resulti = cur9.fetchone()
resultFinali = int(resulti[0])
cur9.close()
conn9.close()
if resultFinali == 1:
finalCount = finalCount + 1
#is the word 'appreciate' present? == +1
conn10 = sqlite3.connect("test.db")
cur10 = conn10.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 10 AND comments LIKE '% appreciate %');"
cur10.execute(sq)
resultj = cur10.fetchone()
resultFinalj = int(resultj[0])
cur10.close()
conn10.close()
if resultFinalj == 1:
finalCount = finalCount + 1
#is the word 'fuck' present? == -1
conn11 = sqlite3.connect("test.db")
cur11 = conn11.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 10 AND comments LIKE '% good %');"
cur11.execute(sq)
resultk = cur11.fetchone()
resultFinalk = int(resultk[0])
cur11.close()
conn11.close()
if resultFinalk == 1:
finalCount = finalCount - 1
#is the word 'corrupt' present? == -1
conn12 = sqlite3.connect("test.db")
cur12 = conn12.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 10 AND comments LIKE '% corrupt %');"
cur12.execute(sq)
resultm = cur12.fetchone()
resultFinalm = int(resultm[0])
cur12.close()
conn12.close()
if resultFinalm == 1:
finalCount = finalCount - 1
#is the word 'stupid' present? == -1
conn13 = sqlite3.connect("test.db")
cur13 = conn13.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 10 AND comments LIKE '% stupid %');"
cur13.execute(sq)
resultn = cur13.fetchone()
resultFinaln = int(resultn[0])
cur13.close()
conn13.close()
if resultFinaln == 1:
finalCount = finalCount - 1
#is the word 'irrelevant' present? == -1
conn14 = sqlite3.connect("test.db")
cur14 = conn14.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 10 AND comments LIKE '% irrelevant %');"
cur14.execute(sq)
resulto = cur14.fetchone()
resultFinalo = int(resulto[0])
cur14.close()
conn14.close()
if resultFinalo == 1:
finalCount = finalCount - 1
#is the word 'colluding' present? == -1
conn15 = sqlite3.connect("test.db")
cur15 = conn15.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 10 AND comments LIKE '% colluding %');"
cur15.execute(sq)
resultp = cur15.fetchone()
resultFinalp = int(resultp[0])
cur15.close()
conn15.close()
if resultFinalp == 1:
finalCount = finalCount - 1
#is the word 'horrible' present? == -1
conn16 = sqlite3.connect("test.db")
cur16 = conn16.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 10 AND comments LIKE '% horrible %');"
cur16.execute(sq)
resultq = cur16.fetchone()
resultFinalq = int(resultq[0])
cur16.close()
conn16.close()
if resultFinalq == 1:
finalCount = finalCount - 1
#is the word 'unfair' present? == -1
conn17 = sqlite3.connect("test.db")
cur17 = conn17.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 10 AND comments LIKE '% unfair %');"
cur17.execute(sq)
resultr = cur17.fetchone()
resultFinalr = int(resultr[0])
cur17.close()
conn17.close()
if resultFinalr == 1:
finalCount = finalCount - 1
#is the word 'guilty' present? == -1
conn18 = sqlite3.connect("test.db")
cur18 = conn18.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 10 AND comments LIKE '% guilty %');"
cur18.execute(sq)
resultz = cur18.fetchone()
resultFinalz = int(resultz[0])
cur18.close()
conn18.close()
if resultFinalz == 1:
finalCount = finalCount - 1
#is the word 'foolish' present? == -1
conn19 = sqlite3.connect("test.db")
cur19 = conn19.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 10 AND comments LIKE '% foolish %');"
cur19.execute(sq)
resultx = cur19.fetchone()
resultFinalx = int(resultx[0])
cur19.close()
conn19.close()
if resultFinalx == 1:
finalCount = finalCount - 1
#is the word 'hateful' present? == -1
conn20 = sqlite3.connect("test.db")
cur20 = conn20.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 10 AND comments LIKE '% hateful %');"
cur20.execute(sq)
resulty = cur20.fetchone()
resultFinaly = int(resulty[0])
cur20.close()
conn20.close()
if resultFinaly == 1:
finalCount = finalCount - 1
#adds final sentiment number to corresponding database row
conn = sqlite3.connect("test.db")
cur = conn.cursor()
cur.execute("UPDATE merge SET sent = ? WHERE id = 10;", [finalCount])
conn.commit()
cur.close()
conn.close()
def senti11():
finalCount = 0
#is the word 'good' present? == +1
conn = sqlite3.connect("test.db")
cur = conn.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 11 AND comments LIKE '% good %');"
cur.execute(sq)
resulta = cur.fetchone()
resultFinala = int(resulta[0])
cur.close()
conn.close()
if resultFinala == 1:
finalCount = finalCount + 1
#is the word 'great' present? == +1
conn2 = sqlite3.connect("test.db")
cur2 = conn2.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 11 AND comments LIKE '% great %');"
cur2.execute(sq)
resultb = cur2.fetchone()
resultFinalb = int(resultb[0])
cur2.close()
conn2.close()
if resultFinalb == 1:
finalCount = finalCount + 1
#is the word 'happy' present? == +1
conn3 = sqlite3.connect("test.db")
cur3 = conn3.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 11 AND comments LIKE '% happy %');"
cur3.execute(sq)
resultc = cur3.fetchone()
resultFinalc = int(resultc[0])
cur3.close()
conn3.close()
if resultFinalc == 1:
finalCount = finalCount + 1
#is the word 'win' present? == +1
conn4 = sqlite3.connect("test.db")
cur4 = conn4.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 11 AND comments LIKE '% win %');"
cur4.execute(sq)
resultd = cur4.fetchone()
resultFinald = int(resultd[0])
cur4.close()
conn4.close()
if resultFinald == 1:
finalCount = finalCount + 1
#is the word 'love' present? == +1
conn5 = sqlite3.connect("test.db")
cur5 = conn5.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 11 AND comments LIKE '% love %');"
cur5.execute(sq)
resulte = cur5.fetchone()
resultFinale = int(resulte[0])
cur5.close()
conn5.close()
if resultFinale == 1:
finalCount = finalCount + 1
#is the word 'nice' present? == +1
conn6 = sqlite3.connect("test.db")
cur6 = conn6.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 11 AND comments LIKE '% nice %');"
cur6.execute(sq)
resultf = cur6.fetchone()
resultFinalf = int(resultf[0])
cur6.close()
conn6.close()
if resultFinalf == 1:
finalCount = finalCount + 1
#is the word 'authentic' present? == +1
conn7 = sqlite3.connect("test.db")
cur7 = conn7.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 11 AND comments LIKE '% authentic %');"
cur7.execute(sq)
resultg = cur7.fetchone()
resultFinalg = int(resultg[0])
cur7.close()
conn7.close()
if resultFinalg == 1:
finalCount = finalCount + 1
#is the word 'like' present? == +1
conn8 = sqlite3.connect("test.db")
cur8 = conn8.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 11 AND comments LIKE '% like %');"
cur8.execute(sq)
resulth = cur8.fetchone()
resultFinalh = int(resulth[0])
cur8.close()
conn8.close()
if resultFinalh == 1:
finalCount = finalCount + 1
#is the word 'fun' present? == +1
conn9 = sqlite3.connect("test.db")
cur9 = conn9.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 11 AND comments LIKE '% fun %');"
cur9.execute(sq)
resulti = cur9.fetchone()
resultFinali = int(resulti[0])
cur9.close()
conn9.close()
if resultFinali == 1:
finalCount = finalCount + 1
#is the word 'appreciate' present? == +1
conn10 = sqlite3.connect("test.db")
cur10 = conn10.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 11 AND comments LIKE '% appreciate %');"
cur10.execute(sq)
resultj = cur10.fetchone()
resultFinalj = int(resultj[0])
cur10.close()
conn10.close()
if resultFinalj == 1:
finalCount = finalCount + 1
#is the word 'fuck' present? == -1
conn11 = sqlite3.connect("test.db")
cur11 = conn11.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 11 AND comments LIKE '% good %');"
cur11.execute(sq)
resultk = cur11.fetchone()
resultFinalk = int(resultk[0])
cur11.close()
conn11.close()
if resultFinalk == 1:
finalCount = finalCount - 1
#is the word 'corrupt' present? == -1
conn12 = sqlite3.connect("test.db")
cur12 = conn12.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 11 AND comments LIKE '% corrupt %');"
cur12.execute(sq)
resultm = cur12.fetchone()
resultFinalm = int(resultm[0])
cur12.close()
conn12.close()
if resultFinalm == 1:
finalCount = finalCount - 1
#is the word 'stupid' present? == -1
conn13 = sqlite3.connect("test.db")
cur13 = conn13.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 11 AND comments LIKE '% stupid %');"
cur13.execute(sq)
resultn = cur13.fetchone()
resultFinaln = int(resultn[0])
cur13.close()
conn13.close()
if resultFinaln == 1:
finalCount = finalCount - 1
#is the word 'irrelevant' present? == -1
conn14 = sqlite3.connect("test.db")
cur14 = conn14.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 11 AND comments LIKE '% irrelevant %');"
cur14.execute(sq)
resulto = cur14.fetchone()
resultFinalo = int(resulto[0])
cur14.close()
conn14.close()
if resultFinalo == 1:
finalCount = finalCount - 1
#is the word 'colluding' present? == -1
conn15 = sqlite3.connect("test.db")
cur15 = conn15.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 11 AND comments LIKE '% colluding %');"
cur15.execute(sq)
resultp = cur15.fetchone()
resultFinalp = int(resultp[0])
cur15.close()
conn15.close()
if resultFinalp == 1:
finalCount = finalCount - 1
#is the word 'horrible' present? == -1
conn16 = sqlite3.connect("test.db")
cur16 = conn16.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 11 AND comments LIKE '% horrible %');"
cur16.execute(sq)
resultq = cur16.fetchone()
resultFinalq = int(resultq[0])
cur16.close()
conn16.close()
if resultFinalq == 1:
finalCount = finalCount - 1
#is the word 'unfair' present? == -1
conn17 = sqlite3.connect("test.db")
cur17 = conn17.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 11 AND comments LIKE '% unfair %');"
cur17.execute(sq)
resultr = cur17.fetchone()
resultFinalr = int(resultr[0])
cur17.close()
conn17.close()
if resultFinalr == 1:
finalCount = finalCount - 1
#is the word 'guilty' present? == -1
conn18 = sqlite3.connect("test.db")
cur18 = conn18.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 11 AND comments LIKE '% guilty %');"
cur18.execute(sq)
resultz = cur18.fetchone()
resultFinalz = int(resultz[0])
cur18.close()
conn18.close()
if resultFinalz == 1:
finalCount = finalCount - 1
#is the word 'foolish' present? == -1
conn19 = sqlite3.connect("test.db")
cur19 = conn19.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 11 AND comments LIKE '% foolish %');"
cur19.execute(sq)
resultx = cur19.fetchone()
resultFinalx = int(resultx[0])
cur19.close()
conn19.close()
if resultFinalx == 1:
finalCount = finalCount - 1
#is the word 'hateful' present? == -1
conn20 = sqlite3.connect("test.db")
cur20 = conn20.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 11 AND comments LIKE '% hateful %');"
cur20.execute(sq)
resulty = cur20.fetchone()
resultFinaly = int(resulty[0])
cur20.close()
conn20.close()
if resultFinaly == 1:
finalCount = finalCount - 1
#adds final sentiment number to corresponding database row
conn = sqlite3.connect("test.db")
cur = conn.cursor()
cur.execute("UPDATE merge SET sent = ? WHERE id = 11;", [finalCount])
conn.commit()
cur.close()
conn.close()
def senti12():
finalCount = 0
#is the word 'good' present? == +1
conn = sqlite3.connect("test.db")
cur = conn.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 12 AND comments LIKE '% good %');"
cur.execute(sq)
resulta = cur.fetchone()
resultFinala = int(resulta[0])
cur.close()
conn.close()
if resultFinala == 1:
finalCount = finalCount + 1
#is the word 'great' present? == +1
conn2 = sqlite3.connect("test.db")
cur2 = conn2.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 12 AND comments LIKE '% great %');"
cur2.execute(sq)
resultb = cur2.fetchone()
resultFinalb = int(resultb[0])
cur2.close()
conn2.close()
if resultFinalb == 1:
finalCount = finalCount + 1
#is the word 'happy' present? == +1
conn3 = sqlite3.connect("test.db")
cur3 = conn3.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 12 AND comments LIKE '% happy %');"
cur3.execute(sq)
resultc = cur3.fetchone()
resultFinalc = int(resultc[0])
cur3.close()
conn3.close()
if resultFinalc == 1:
finalCount = finalCount + 1
#is the word 'win' present? == +1
conn4 = sqlite3.connect("test.db")
cur4 = conn4.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 12 AND comments LIKE '% win %');"
cur4.execute(sq)
resultd = cur4.fetchone()
resultFinald = int(resultd[0])
cur4.close()
conn4.close()
if resultFinald == 1:
finalCount = finalCount + 1
#is the word 'love' present? == +1
conn5 = sqlite3.connect("test.db")
cur5 = conn5.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 12 AND comments LIKE '% love %');"
cur5.execute(sq)
resulte = cur5.fetchone()
resultFinale = int(resulte[0])
cur5.close()
conn5.close()
if resultFinale == 1:
finalCount = finalCount + 1
#is the word 'nice' present? == +1
conn6 = sqlite3.connect("test.db")
cur6 = conn6.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 12 AND comments LIKE '% nice %');"
cur6.execute(sq)
resultf = cur6.fetchone()
resultFinalf = int(resultf[0])
cur6.close()
conn6.close()
if resultFinalf == 1:
finalCount = finalCount + 1
#is the word 'authentic' present? == +1
conn7 = sqlite3.connect("test.db")
cur7 = conn7.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 12 AND comments LIKE '% authentic %');"
cur7.execute(sq)
resultg = cur7.fetchone()
resultFinalg = int(resultg[0])
cur7.close()
conn7.close()
if resultFinalg == 1:
finalCount = finalCount + 1
#is the word 'like' present? == +1
conn8 = sqlite3.connect("test.db")
cur8 = conn8.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 12 AND comments LIKE '% like %');"
cur8.execute(sq)
resulth = cur8.fetchone()
resultFinalh = int(resulth[0])
cur8.close()
conn8.close()
if resultFinalh == 1:
finalCount = finalCount + 1
#is the word 'fun' present? == +1
conn9 = sqlite3.connect("test.db")
cur9 = conn9.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 12 AND comments LIKE '% fun %');"
cur9.execute(sq)
resulti = cur9.fetchone()
resultFinali = int(resulti[0])
cur9.close()
conn9.close()
if resultFinali == 1:
finalCount = finalCount + 1
#is the word 'appreciate' present? == +1
conn10 = sqlite3.connect("test.db")
cur10 = conn10.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 12 AND comments LIKE '% appreciate %');"
cur10.execute(sq)
resultj = cur10.fetchone()
resultFinalj = int(resultj[0])
cur10.close()
conn10.close()
if resultFinalj == 1:
finalCount = finalCount + 1
#is the word 'fuck' present? == -1
conn11 = sqlite3.connect("test.db")
cur11 = conn11.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 12 AND comments LIKE '% good %');"
cur11.execute(sq)
resultk = cur11.fetchone()
resultFinalk = int(resultk[0])
cur11.close()
conn11.close()
if resultFinalk == 1:
finalCount = finalCount - 1
#is the word 'corrupt' present? == -1
conn12 = sqlite3.connect("test.db")
cur12 = conn12.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 12 AND comments LIKE '% corrupt %');"
cur12.execute(sq)
resultm = cur12.fetchone()
resultFinalm = int(resultm[0])
cur12.close()
conn12.close()
if resultFinalm == 1:
finalCount = finalCount - 1
#is the word 'stupid' present? == -1
conn13 = sqlite3.connect("test.db")
cur13 = conn13.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 12 AND comments LIKE '% stupid %');"
cur13.execute(sq)
resultn = cur13.fetchone()
resultFinaln = int(resultn[0])
cur13.close()
conn13.close()
if resultFinaln == 1:
finalCount = finalCount - 1
#is the word 'irrelevant' present? == -1
conn14 = sqlite3.connect("test.db")
cur14 = conn14.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 12 AND comments LIKE '% irrelevant %');"
cur14.execute(sq)
resulto = cur14.fetchone()
resultFinalo = int(resulto[0])
cur14.close()
conn14.close()
if resultFinalo == 1:
finalCount = finalCount - 1
#is the word 'colluding' present? == -1
conn15 = sqlite3.connect("test.db")
cur15 = conn15.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 12 AND comments LIKE '% colluding %');"
cur15.execute(sq)
resultp = cur15.fetchone()
resultFinalp = int(resultp[0])
cur15.close()
conn15.close()
if resultFinalp == 1:
finalCount = finalCount - 1
#is the word 'horrible' present? == -1
conn16 = sqlite3.connect("test.db")
cur16 = conn16.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 12 AND comments LIKE '% horrible %');"
cur16.execute(sq)
resultq = cur16.fetchone()
resultFinalq = int(resultq[0])
cur16.close()
conn16.close()
if resultFinalq == 1:
finalCount = finalCount - 1
#is the word 'unfair' present? == -1
conn17 = sqlite3.connect("test.db")
cur17 = conn17.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 12 AND comments LIKE '% unfair %');"
cur17.execute(sq)
resultr = cur17.fetchone()
resultFinalr = int(resultr[0])
cur17.close()
conn17.close()
if resultFinalr == 1:
finalCount = finalCount - 1
#is the word 'guilty' present? == -1
conn18 = sqlite3.connect("test.db")
cur18 = conn18.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 12 AND comments LIKE '% guilty %');"
cur18.execute(sq)
resultz = cur18.fetchone()
resultFinalz = int(resultz[0])
cur18.close()
conn18.close()
if resultFinalz == 1:
finalCount = finalCount - 1
#is the word 'foolish' present? == -1
conn19 = sqlite3.connect("test.db")
cur19 = conn19.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 12 AND comments LIKE '% foolish %');"
cur19.execute(sq)
resultx = cur19.fetchone()
resultFinalx = int(resultx[0])
cur19.close()
conn19.close()
if resultFinalx == 1:
finalCount = finalCount - 1
#is the word 'hateful' present? == -1
conn20 = sqlite3.connect("test.db")
cur20 = conn20.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 12 AND comments LIKE '% hateful %');"
cur20.execute(sq)
resulty = cur20.fetchone()
resultFinaly = int(resulty[0])
cur20.close()
conn20.close()
if resultFinaly == 1:
finalCount = finalCount - 1
#adds final sentiment number to corresponding database row
conn = sqlite3.connect("test.db")
cur = conn.cursor()
cur.execute("UPDATE merge SET sent = ? WHERE id = 12;", [finalCount])
conn.commit()
cur.close()
conn.close()
def senti13():
finalCount = 0
#is the word 'good' present? == +1
conn = sqlite3.connect("test.db")
cur = conn.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 13 AND comments LIKE '% good %');"
cur.execute(sq)
resulta = cur.fetchone()
resultFinala = int(resulta[0])
cur.close()
conn.close()
if resultFinala == 1:
finalCount = finalCount + 1
#is the word 'great' present? == +1
conn2 = sqlite3.connect("test.db")
cur2 = conn2.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 13 AND comments LIKE '% great %');"
cur2.execute(sq)
resultb = cur2.fetchone()
resultFinalb = int(resultb[0])
cur2.close()
conn2.close()
if resultFinalb == 1:
finalCount = finalCount + 1
#is the word 'happy' present? == +1
conn3 = sqlite3.connect("test.db")
cur3 = conn3.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 13 AND comments LIKE '% happy %');"
cur3.execute(sq)
resultc = cur3.fetchone()
resultFinalc = int(resultc[0])
cur3.close()
conn3.close()
if resultFinalc == 1:
finalCount = finalCount + 1
#is the word 'win' present? == +1
conn4 = sqlite3.connect("test.db")
cur4 = conn4.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 13 AND comments LIKE '% win %');"
cur4.execute(sq)
resultd = cur4.fetchone()
resultFinald = int(resultd[0])
cur4.close()
conn4.close()
if resultFinald == 1:
finalCount = finalCount + 1
#is the word 'love' present? == +1
conn5 = sqlite3.connect("test.db")
cur5 = conn5.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 13 AND comments LIKE '% love %');"
cur5.execute(sq)
resulte = cur5.fetchone()
resultFinale = int(resulte[0])
cur5.close()
conn5.close()
if resultFinale == 1:
finalCount = finalCount + 1
#is the word 'nice' present? == +1
conn6 = sqlite3.connect("test.db")
cur6 = conn6.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 13 AND comments LIKE '% nice %');"
cur6.execute(sq)
resultf = cur6.fetchone()
resultFinalf = int(resultf[0])
cur6.close()
conn6.close()
if resultFinalf == 1:
finalCount = finalCount + 1
#is the word 'authentic' present? == +1
conn7 = sqlite3.connect("test.db")
cur7 = conn7.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 13 AND comments LIKE '% authentic %');"
cur7.execute(sq)
resultg = cur7.fetchone()
resultFinalg = int(resultg[0])
cur7.close()
conn7.close()
if resultFinalg == 1:
finalCount = finalCount + 1
#is the word 'like' present? == +1
conn8 = sqlite3.connect("test.db")
cur8 = conn8.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 13 AND comments LIKE '% like %');"
cur8.execute(sq)
resulth = cur8.fetchone()
resultFinalh = int(resulth[0])
cur8.close()
conn8.close()
if resultFinalh == 1:
finalCount = finalCount + 1
#is the word 'fun' present? == +1
conn9 = sqlite3.connect("test.db")
cur9 = conn9.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 13 AND comments LIKE '% fun %');"
cur9.execute(sq)
resulti = cur9.fetchone()
resultFinali = int(resulti[0])
cur9.close()
conn9.close()
if resultFinali == 1:
finalCount = finalCount + 1
#is the word 'appreciate' present? == +1
conn10 = sqlite3.connect("test.db")
cur10 = conn10.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 13 AND comments LIKE '% appreciate %');"
cur10.execute(sq)
resultj = cur10.fetchone()
resultFinalj = int(resultj[0])
cur10.close()
conn10.close()
if resultFinalj == 1:
finalCount = finalCount + 1
#is the word 'fuck' present? == -1
conn11 = sqlite3.connect("test.db")
cur11 = conn11.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 13 AND comments LIKE '% good %');"
cur11.execute(sq)
resultk = cur11.fetchone()
resultFinalk = int(resultk[0])
cur11.close()
conn11.close()
if resultFinalk == 1:
finalCount = finalCount - 1
#is the word 'corrupt' present? == -1
conn12 = sqlite3.connect("test.db")
cur12 = conn12.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 13 AND comments LIKE '% corrupt %');"
cur12.execute(sq)
resultm = cur12.fetchone()
resultFinalm = int(resultm[0])
cur12.close()
conn12.close()
if resultFinalm == 1:
finalCount = finalCount - 1
#is the word 'stupid' present? == -1
conn13 = sqlite3.connect("test.db")
cur13 = conn13.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 13 AND comments LIKE '% stupid %');"
cur13.execute(sq)
resultn = cur13.fetchone()
resultFinaln = int(resultn[0])
cur13.close()
conn13.close()
if resultFinaln == 1:
finalCount = finalCount - 1
#is the word 'irrelevant' present? == -1
conn14 = sqlite3.connect("test.db")
cur14 = conn14.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 13 AND comments LIKE '% irrelevant %');"
cur14.execute(sq)
resulto = cur14.fetchone()
resultFinalo = int(resulto[0])
cur14.close()
conn14.close()
if resultFinalo == 1:
finalCount = finalCount - 1
#is the word 'colluding' present? == -1
conn15 = sqlite3.connect("test.db")
cur15 = conn15.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 13 AND comments LIKE '% colluding %');"
cur15.execute(sq)
resultp = cur15.fetchone()
resultFinalp = int(resultp[0])
cur15.close()
conn15.close()
if resultFinalp == 1:
finalCount = finalCount - 1
#is the word 'horrible' present? == -1
conn16 = sqlite3.connect("test.db")
cur16 = conn16.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 13 AND comments LIKE '% horrible %');"
cur16.execute(sq)
resultq = cur16.fetchone()
resultFinalq = int(resultq[0])
cur16.close()
conn16.close()
if resultFinalq == 1:
finalCount = finalCount - 1
#is the word 'unfair' present? == -1
conn17 = sqlite3.connect("test.db")
cur17 = conn17.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 13 AND comments LIKE '% unfair %');"
cur17.execute(sq)
resultr = cur17.fetchone()
resultFinalr = int(resultr[0])
cur17.close()
conn17.close()
if resultFinalr == 1:
finalCount = finalCount - 1
#is the word 'guilty' present? == -1
conn18 = sqlite3.connect("test.db")
cur18 = conn18.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 13 AND comments LIKE '% guilty %');"
cur18.execute(sq)
resultz = cur18.fetchone()
resultFinalz = int(resultz[0])
cur18.close()
conn18.close()
if resultFinalz == 1:
finalCount = finalCount - 1
#is the word 'foolish' present? == -1
conn19 = sqlite3.connect("test.db")
cur19 = conn19.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 13 AND comments LIKE '% foolish %');"
cur19.execute(sq)
resultx = cur19.fetchone()
resultFinalx = int(resultx[0])
cur19.close()
conn19.close()
if resultFinalx == 1:
finalCount = finalCount - 1
#is the word 'hateful' present? == -1
conn20 = sqlite3.connect("test.db")
cur20 = conn20.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 13 AND comments LIKE '% hateful %');"
cur20.execute(sq)
resulty = cur20.fetchone()
resultFinaly = int(resulty[0])
cur20.close()
conn20.close()
if resultFinaly == 1:
finalCount = finalCount - 1
#adds final sentiment number to corresponding database row
conn = sqlite3.connect("test.db")
cur = conn.cursor()
cur.execute("UPDATE merge SET sent = ? WHERE id = 13;", [finalCount])
conn.commit()
cur.close()
conn.close()
def senti14():
finalCount = 0
#is the word 'good' present? == +1
conn = sqlite3.connect("test.db")
cur = conn.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 14 AND comments LIKE '% good %');"
cur.execute(sq)
resulta = cur.fetchone()
resultFinala = int(resulta[0])
cur.close()
conn.close()
if resultFinala == 1:
finalCount = finalCount + 1
#is the word 'great' present? == +1
conn2 = sqlite3.connect("test.db")
cur2 = conn2.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 14 AND comments LIKE '% great %');"
cur2.execute(sq)
resultb = cur2.fetchone()
resultFinalb = int(resultb[0])
cur2.close()
conn2.close()
if resultFinalb == 1:
finalCount = finalCount + 1
#is the word 'happy' present? == +1
conn3 = sqlite3.connect("test.db")
cur3 = conn3.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 14 AND comments LIKE '% happy %');"
cur3.execute(sq)
resultc = cur3.fetchone()
resultFinalc = int(resultc[0])
cur3.close()
conn3.close()
if resultFinalc == 1:
finalCount = finalCount + 1
#is the word 'win' present? == +1
conn4 = sqlite3.connect("test.db")
cur4 = conn4.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 14 AND comments LIKE '% win %');"
cur4.execute(sq)
resultd = cur4.fetchone()
resultFinald = int(resultd[0])
cur4.close()
conn4.close()
if resultFinald == 1:
finalCount = finalCount + 1
#is the word 'love' present? == +1
conn5 = sqlite3.connect("test.db")
cur5 = conn5.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 14 AND comments LIKE '% love %');"
cur5.execute(sq)
resulte = cur5.fetchone()
resultFinale = int(resulte[0])
cur5.close()
conn5.close()
if resultFinale == 1:
finalCount = finalCount + 1
#is the word 'nice' present? == +1
conn6 = sqlite3.connect("test.db")
cur6 = conn6.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 14 AND comments LIKE '% nice %');"
cur6.execute(sq)
resultf = cur6.fetchone()
resultFinalf = int(resultf[0])
cur6.close()
conn6.close()
if resultFinalf == 1:
finalCount = finalCount + 1
#is the word 'authentic' present? == +1
conn7 = sqlite3.connect("test.db")
cur7 = conn7.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 14 AND comments LIKE '% authentic %');"
cur7.execute(sq)
resultg = cur7.fetchone()
resultFinalg = int(resultg[0])
cur7.close()
conn7.close()
if resultFinalg == 1:
finalCount = finalCount + 1
#is the word 'like' present? == +1
conn8 = sqlite3.connect("test.db")
cur8 = conn8.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 14 AND comments LIKE '% like %');"
cur8.execute(sq)
resulth = cur8.fetchone()
resultFinalh = int(resulth[0])
cur8.close()
conn8.close()
if resultFinalh == 1:
finalCount = finalCount + 1
#is the word 'fun' present? == +1
conn9 = sqlite3.connect("test.db")
cur9 = conn9.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 14 AND comments LIKE '% fun %');"
cur9.execute(sq)
resulti = cur9.fetchone()
resultFinali = int(resulti[0])
cur9.close()
conn9.close()
if resultFinali == 1:
finalCount = finalCount + 1
#is the word 'appreciate' present? == +1
conn10 = sqlite3.connect("test.db")
cur10 = conn10.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 14 AND comments LIKE '% appreciate %');"
cur10.execute(sq)
resultj = cur10.fetchone()
resultFinalj = int(resultj[0])
cur10.close()
conn10.close()
if resultFinalj == 1:
finalCount = finalCount + 1
#is the word 'fuck' present? == -1
conn11 = sqlite3.connect("test.db")
cur11 = conn11.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 14 AND comments LIKE '% good %');"
cur11.execute(sq)
resultk = cur11.fetchone()
resultFinalk = int(resultk[0])
cur11.close()
conn11.close()
if resultFinalk == 1:
finalCount = finalCount - 1
#is the word 'corrupt' present? == -1
conn12 = sqlite3.connect("test.db")
cur12 = conn12.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 14 AND comments LIKE '% corrupt %');"
cur12.execute(sq)
resultm = cur12.fetchone()
resultFinalm = int(resultm[0])
cur12.close()
conn12.close()
if resultFinalm == 1:
finalCount = finalCount - 1
#is the word 'stupid' present? == -1
conn13 = sqlite3.connect("test.db")
cur13 = conn13.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 14 AND comments LIKE '% stupid %');"
cur13.execute(sq)
resultn = cur13.fetchone()
resultFinaln = int(resultn[0])
cur13.close()
conn13.close()
if resultFinaln == 1:
finalCount = finalCount - 1
#is the word 'irrelevant' present? == -1
conn14 = sqlite3.connect("test.db")
cur14 = conn14.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 14 AND comments LIKE '% irrelevant %');"
cur14.execute(sq)
resulto = cur14.fetchone()
resultFinalo = int(resulto[0])
cur14.close()
conn14.close()
if resultFinalo == 1:
finalCount = finalCount - 1
#is the word 'colluding' present? == -1
conn15 = sqlite3.connect("test.db")
cur15 = conn15.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 14 AND comments LIKE '% colluding %');"
cur15.execute(sq)
resultp = cur15.fetchone()
resultFinalp = int(resultp[0])
cur15.close()
conn15.close()
if resultFinalp == 1:
finalCount = finalCount - 1
#is the word 'horrible' present? == -1
conn16 = sqlite3.connect("test.db")
cur16 = conn16.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 14 AND comments LIKE '% horrible %');"
cur16.execute(sq)
resultq = cur16.fetchone()
resultFinalq = int(resultq[0])
cur16.close()
conn16.close()
if resultFinalq == 1:
finalCount = finalCount - 1
#is the word 'unfair' present? == -1
conn17 = sqlite3.connect("test.db")
cur17 = conn17.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 14 AND comments LIKE '% unfair %');"
cur17.execute(sq)
resultr = cur17.fetchone()
resultFinalr = int(resultr[0])
cur17.close()
conn17.close()
if resultFinalr == 1:
finalCount = finalCount - 1
#is the word 'guilty' present? == -1
conn18 = sqlite3.connect("test.db")
cur18 = conn18.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 14 AND comments LIKE '% guilty %');"
cur18.execute(sq)
resultz = cur18.fetchone()
resultFinalz = int(resultz[0])
cur18.close()
conn18.close()
if resultFinalz == 1:
finalCount = finalCount - 1
#is the word 'foolish' present? == -1
conn19 = sqlite3.connect("test.db")
cur19 = conn19.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 14 AND comments LIKE '% foolish %');"
cur19.execute(sq)
resultx = cur19.fetchone()
resultFinalx = int(resultx[0])
cur19.close()
conn19.close()
if resultFinalx == 1:
finalCount = finalCount - 1
#is the word 'hateful' present? == -1
conn20 = sqlite3.connect("test.db")
cur20 = conn20.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 14 AND comments LIKE '% hateful %');"
cur20.execute(sq)
resulty = cur20.fetchone()
resultFinaly = int(resulty[0])
cur20.close()
conn20.close()
if resultFinaly == 1:
finalCount = finalCount - 1
#adds final sentiment number to corresponding database row
conn = sqlite3.connect("test.db")
cur = conn.cursor()
cur.execute("UPDATE merge SET sent = ? WHERE id = 14;", [finalCount])
conn.commit()
cur.close()
conn.close()
def senti15():
finalCount = 0
#is the word 'good' present? == +1
conn = sqlite3.connect("test.db")
cur = conn.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 15 AND comments LIKE '% good %');"
cur.execute(sq)
resulta = cur.fetchone()
resultFinala = int(resulta[0])
cur.close()
conn.close()
if resultFinala == 1:
finalCount = finalCount + 1
#is the word 'great' present? == +1
conn2 = sqlite3.connect("test.db")
cur2 = conn2.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 15 AND comments LIKE '% great %');"
cur2.execute(sq)
resultb = cur2.fetchone()
resultFinalb = int(resultb[0])
cur2.close()
conn2.close()
if resultFinalb == 1:
finalCount = finalCount + 1
#is the word 'happy' present? == +1
conn3 = sqlite3.connect("test.db")
cur3 = conn3.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 15 AND comments LIKE '% happy %');"
cur3.execute(sq)
resultc = cur3.fetchone()
resultFinalc = int(resultc[0])
cur3.close()
conn3.close()
if resultFinalc == 1:
finalCount = finalCount + 1
#is the word 'win' present? == +1
conn4 = sqlite3.connect("test.db")
cur4 = conn4.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 15 AND comments LIKE '% win %');"
cur4.execute(sq)
resultd = cur4.fetchone()
resultFinald = int(resultd[0])
cur4.close()
conn4.close()
if resultFinald == 1:
finalCount = finalCount + 1
#is the word 'love' present? == +1
conn5 = sqlite3.connect("test.db")
cur5 = conn5.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 15 AND comments LIKE '% love %');"
cur5.execute(sq)
resulte = cur5.fetchone()
resultFinale = int(resulte[0])
cur5.close()
conn5.close()
if resultFinale == 1:
finalCount = finalCount + 1
#is the word 'nice' present? == +1
conn6 = sqlite3.connect("test.db")
cur6 = conn6.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 15 AND comments LIKE '% nice %');"
cur6.execute(sq)
resultf = cur6.fetchone()
resultFinalf = int(resultf[0])
cur6.close()
conn6.close()
if resultFinalf == 1:
finalCount = finalCount + 1
#is the word 'authentic' present? == +1
conn7 = sqlite3.connect("test.db")
cur7 = conn7.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 15 AND comments LIKE '% authentic %');"
cur7.execute(sq)
resultg = cur7.fetchone()
resultFinalg = int(resultg[0])
cur7.close()
conn7.close()
if resultFinalg == 1:
finalCount = finalCount + 1
#is the word 'like' present? == +1
conn8 = sqlite3.connect("test.db")
cur8 = conn8.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 15 AND comments LIKE '% like %');"
cur8.execute(sq)
resulth = cur8.fetchone()
resultFinalh = int(resulth[0])
cur8.close()
conn8.close()
if resultFinalh == 1:
finalCount = finalCount + 1
#is the word 'fun' present? == +1
conn9 = sqlite3.connect("test.db")
cur9 = conn9.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 15 AND comments LIKE '% fun %');"
cur9.execute(sq)
resulti = cur9.fetchone()
resultFinali = int(resulti[0])
cur9.close()
conn9.close()
if resultFinali == 1:
finalCount = finalCount + 1
#is the word 'appreciate' present? == +1
conn10 = sqlite3.connect("test.db")
cur10 = conn10.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 15 AND comments LIKE '% appreciate %');"
cur10.execute(sq)
resultj = cur10.fetchone()
resultFinalj = int(resultj[0])
cur10.close()
conn10.close()
if resultFinalj == 1:
finalCount = finalCount + 1
#is the word 'fuck' present? == -1
conn11 = sqlite3.connect("test.db")
cur11 = conn11.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 15 AND comments LIKE '% good %');"
cur11.execute(sq)
resultk = cur11.fetchone()
resultFinalk = int(resultk[0])
cur11.close()
conn11.close()
if resultFinalk == 1:
finalCount = finalCount - 1
#is the word 'corrupt' present? == -1
conn12 = sqlite3.connect("test.db")
cur12 = conn12.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 15 AND comments LIKE '% corrupt %');"
cur12.execute(sq)
resultm = cur12.fetchone()
resultFinalm = int(resultm[0])
cur12.close()
conn12.close()
if resultFinalm == 1:
finalCount = finalCount - 1
#is the word 'stupid' present? == -1
conn13 = sqlite3.connect("test.db")
cur13 = conn13.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 15 AND comments LIKE '% stupid %');"
cur13.execute(sq)
resultn = cur13.fetchone()
resultFinaln = int(resultn[0])
cur13.close()
conn13.close()
if resultFinaln == 1:
finalCount = finalCount - 1
#is the word 'irrelevant' present? == -1
conn14 = sqlite3.connect("test.db")
cur14 = conn14.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 15 AND comments LIKE '% irrelevant %');"
cur14.execute(sq)
resulto = cur14.fetchone()
resultFinalo = int(resulto[0])
cur14.close()
conn14.close()
if resultFinalo == 1:
finalCount = finalCount - 1
#is the word 'colluding' present? == -1
conn15 = sqlite3.connect("test.db")
cur15 = conn15.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 15 AND comments LIKE '% colluding %');"
cur15.execute(sq)
resultp = cur15.fetchone()
resultFinalp = int(resultp[0])
cur15.close()
conn15.close()
if resultFinalp == 1:
finalCount = finalCount - 1
#is the word 'horrible' present? == -1
conn16 = sqlite3.connect("test.db")
cur16 = conn16.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 15 AND comments LIKE '% horrible %');"
cur16.execute(sq)
resultq = cur16.fetchone()
resultFinalq = int(resultq[0])
cur16.close()
conn16.close()
if resultFinalq == 1:
finalCount = finalCount - 1
#is the word 'unfair' present? == -1
conn17 = sqlite3.connect("test.db")
cur17 = conn17.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 15 AND comments LIKE '% unfair %');"
cur17.execute(sq)
resultr = cur17.fetchone()
resultFinalr = int(resultr[0])
cur17.close()
conn17.close()
if resultFinalr == 1:
finalCount = finalCount - 1
#is the word 'guilty' present? == -1
conn18 = sqlite3.connect("test.db")
cur18 = conn18.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 15 AND comments LIKE '% guilty %');"
cur18.execute(sq)
resultz = cur18.fetchone()
resultFinalz = int(resultz[0])
cur18.close()
conn18.close()
if resultFinalz == 1:
finalCount = finalCount - 1
#is the word 'foolish' present? == -1
conn19 = sqlite3.connect("test.db")
cur19 = conn19.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 15 AND comments LIKE '% foolish %');"
cur19.execute(sq)
resultx = cur19.fetchone()
resultFinalx = int(resultx[0])
cur19.close()
conn19.close()
if resultFinalx == 1:
finalCount = finalCount - 1
#is the word 'hateful' present? == -1
conn20 = sqlite3.connect("test.db")
cur20 = conn20.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 15 AND comments LIKE '% hateful %');"
cur20.execute(sq)
resulty = cur20.fetchone()
resultFinaly = int(resulty[0])
cur20.close()
conn20.close()
if resultFinaly == 1:
finalCount = finalCount - 1
#adds final sentiment number to corresponding database row
conn = sqlite3.connect("test.db")
cur = conn.cursor()
cur.execute("UPDATE merge SET sent = ? WHERE id = 15;", [finalCount])
conn.commit()
cur.close()
conn.close()
def senti16():
finalCount = 0
#is the word 'good' present? == +1
conn = sqlite3.connect("test.db")
cur = conn.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 16 AND comments LIKE '% good %');"
cur.execute(sq)
resulta = cur.fetchone()
resultFinala = int(resulta[0])
cur.close()
conn.close()
if resultFinala == 1:
finalCount = finalCount + 1
#is the word 'great' present? == +1
conn2 = sqlite3.connect("test.db")
cur2 = conn2.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 16 AND comments LIKE '% great %');"
cur2.execute(sq)
resultb = cur2.fetchone()
resultFinalb = int(resultb[0])
cur2.close()
conn2.close()
if resultFinalb == 1:
finalCount = finalCount + 1
#is the word 'happy' present? == +1
conn3 = sqlite3.connect("test.db")
cur3 = conn3.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 16 AND comments LIKE '% happy %');"
cur3.execute(sq)
resultc = cur3.fetchone()
resultFinalc = int(resultc[0])
cur3.close()
conn3.close()
if resultFinalc == 1:
finalCount = finalCount + 1
#is the word 'win' present? == +1
conn4 = sqlite3.connect("test.db")
cur4 = conn4.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 16 AND comments LIKE '% win %');"
cur4.execute(sq)
resultd = cur4.fetchone()
resultFinald = int(resultd[0])
cur4.close()
conn4.close()
if resultFinald == 1:
finalCount = finalCount + 1
#is the word 'love' present? == +1
conn5 = sqlite3.connect("test.db")
cur5 = conn5.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 16 AND comments LIKE '% love %');"
cur5.execute(sq)
resulte = cur5.fetchone()
resultFinale = int(resulte[0])
cur5.close()
conn5.close()
if resultFinale == 1:
finalCount = finalCount + 1
#is the word 'nice' present? == +1
conn6 = sqlite3.connect("test.db")
cur6 = conn6.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 16 AND comments LIKE '% nice %');"
cur6.execute(sq)
resultf = cur6.fetchone()
resultFinalf = int(resultf[0])
cur6.close()
conn6.close()
if resultFinalf == 1:
finalCount = finalCount + 1
#is the word 'authentic' present? == +1
conn7 = sqlite3.connect("test.db")
cur7 = conn7.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 16 AND comments LIKE '% authentic %');"
cur7.execute(sq)
resultg = cur7.fetchone()
resultFinalg = int(resultg[0])
cur7.close()
conn7.close()
if resultFinalg == 1:
finalCount = finalCount + 1
#is the word 'like' present? == +1
conn8 = sqlite3.connect("test.db")
cur8 = conn8.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 16 AND comments LIKE '% like %');"
cur8.execute(sq)
resulth = cur8.fetchone()
resultFinalh = int(resulth[0])
cur8.close()
conn8.close()
if resultFinalh == 1:
finalCount = finalCount + 1
#is the word 'fun' present? == +1
conn9 = sqlite3.connect("test.db")
cur9 = conn9.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 16 AND comments LIKE '% fun %');"
cur9.execute(sq)
resulti = cur9.fetchone()
resultFinali = int(resulti[0])
cur9.close()
conn9.close()
if resultFinali == 1:
finalCount = finalCount + 1
#is the word 'appreciate' present? == +1
conn10 = sqlite3.connect("test.db")
cur10 = conn10.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 16 AND comments LIKE '% appreciate %');"
cur10.execute(sq)
resultj = cur10.fetchone()
resultFinalj = int(resultj[0])
cur10.close()
conn10.close()
if resultFinalj == 1:
finalCount = finalCount + 1
#is the word 'fuck' present? == -1
conn11 = sqlite3.connect("test.db")
cur11 = conn11.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 16 AND comments LIKE '% good %');"
cur11.execute(sq)
resultk = cur11.fetchone()
resultFinalk = int(resultk[0])
cur11.close()
conn11.close()
if resultFinalk == 1:
finalCount = finalCount - 1
#is the word 'corrupt' present? == -1
conn12 = sqlite3.connect("test.db")
cur12 = conn12.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 16 AND comments LIKE '% corrupt %');"
cur12.execute(sq)
resultm = cur12.fetchone()
resultFinalm = int(resultm[0])
cur12.close()
conn12.close()
if resultFinalm == 1:
finalCount = finalCount - 1
#is the word 'stupid' present? == -1
conn13 = sqlite3.connect("test.db")
cur13 = conn13.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 16 AND comments LIKE '% stupid %');"
cur13.execute(sq)
resultn = cur13.fetchone()
resultFinaln = int(resultn[0])
cur13.close()
conn13.close()
if resultFinaln == 1:
finalCount = finalCount - 1
#is the word 'irrelevant' present? == -1
conn14 = sqlite3.connect("test.db")
cur14 = conn14.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 16 AND comments LIKE '% irrelevant %');"
cur14.execute(sq)
resulto = cur14.fetchone()
resultFinalo = int(resulto[0])
cur14.close()
conn14.close()
if resultFinalo == 1:
finalCount = finalCount - 1
#is the word 'colluding' present? == -1
conn15 = sqlite3.connect("test.db")
cur15 = conn15.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 16 AND comments LIKE '% colluding %');"
cur15.execute(sq)
resultp = cur15.fetchone()
resultFinalp = int(resultp[0])
cur15.close()
conn15.close()
if resultFinalp == 1:
finalCount = finalCount - 1
#is the word 'horrible' present? == -1
conn16 = sqlite3.connect("test.db")
cur16 = conn16.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 16 AND comments LIKE '% horrible %');"
cur16.execute(sq)
resultq = cur16.fetchone()
resultFinalq = int(resultq[0])
cur16.close()
conn16.close()
if resultFinalq == 1:
finalCount = finalCount - 1
#is the word 'unfair' present? == -1
conn17 = sqlite3.connect("test.db")
cur17 = conn17.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 16 AND comments LIKE '% unfair %');"
cur17.execute(sq)
resultr = cur17.fetchone()
resultFinalr = int(resultr[0])
cur17.close()
conn17.close()
if resultFinalr == 1:
finalCount = finalCount - 1
#is the word 'guilty' present? == -1
conn18 = sqlite3.connect("test.db")
cur18 = conn18.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 16 AND comments LIKE '% guilty %');"
cur18.execute(sq)
resultz = cur18.fetchone()
resultFinalz = int(resultz[0])
cur18.close()
conn18.close()
if resultFinalz == 1:
finalCount = finalCount - 1
#is the word 'foolish' present? == -1
conn19 = sqlite3.connect("test.db")
cur19 = conn19.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 16 AND comments LIKE '% foolish %');"
cur19.execute(sq)
resultx = cur19.fetchone()
resultFinalx = int(resultx[0])
cur19.close()
conn19.close()
if resultFinalx == 1:
finalCount = finalCount - 1
#is the word 'hateful' present? == -1
conn20 = sqlite3.connect("test.db")
cur20 = conn20.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 16 AND comments LIKE '% hateful %');"
cur20.execute(sq)
resulty = cur20.fetchone()
resultFinaly = int(resulty[0])
cur20.close()
conn20.close()
if resultFinaly == 1:
finalCount = finalCount - 1
#adds final sentiment number to corresponding database row
conn = sqlite3.connect("test.db")
cur = conn.cursor()
cur.execute("UPDATE merge SET sent = ? WHERE id = 16;", [finalCount])
conn.commit()
cur.close()
conn.close()
def senti17():
finalCount = 0
#is the word 'good' present? == +1
conn = sqlite3.connect("test.db")
cur = conn.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 17 AND comments LIKE '% good %');"
cur.execute(sq)
resulta = cur.fetchone()
resultFinala = int(resulta[0])
cur.close()
conn.close()
if resultFinala == 1:
finalCount = finalCount + 1
#is the word 'great' present? == +1
conn2 = sqlite3.connect("test.db")
cur2 = conn2.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 17 AND comments LIKE '% great %');"
cur2.execute(sq)
resultb = cur2.fetchone()
resultFinalb = int(resultb[0])
cur2.close()
conn2.close()
if resultFinalb == 1:
finalCount = finalCount + 1
#is the word 'happy' present? == +1
conn3 = sqlite3.connect("test.db")
cur3 = conn3.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 17 AND comments LIKE '% happy %');"
cur3.execute(sq)
resultc = cur3.fetchone()
resultFinalc = int(resultc[0])
cur3.close()
conn3.close()
if resultFinalc == 1:
finalCount = finalCount + 1
#is the word 'win' present? == +1
conn4 = sqlite3.connect("test.db")
cur4 = conn4.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 17 AND comments LIKE '% win %');"
cur4.execute(sq)
resultd = cur4.fetchone()
resultFinald = int(resultd[0])
cur4.close()
conn4.close()
if resultFinald == 1:
finalCount = finalCount + 1
#is the word 'love' present? == +1
conn5 = sqlite3.connect("test.db")
cur5 = conn5.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 17 AND comments LIKE '% love %');"
cur5.execute(sq)
resulte = cur5.fetchone()
resultFinale = int(resulte[0])
cur5.close()
conn5.close()
if resultFinale == 1:
finalCount = finalCount + 1
#is the word 'nice' present? == +1
conn6 = sqlite3.connect("test.db")
cur6 = conn6.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 17 AND comments LIKE '% nice %');"
cur6.execute(sq)
resultf = cur6.fetchone()
resultFinalf = int(resultf[0])
cur6.close()
conn6.close()
if resultFinalf == 1:
finalCount = finalCount + 1
#is the word 'authentic' present? == +1
conn7 = sqlite3.connect("test.db")
cur7 = conn7.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 17 AND comments LIKE '% authentic %');"
cur7.execute(sq)
resultg = cur7.fetchone()
resultFinalg = int(resultg[0])
cur7.close()
conn7.close()
if resultFinalg == 1:
finalCount = finalCount + 1
#is the word 'like' present? == +1
conn8 = sqlite3.connect("test.db")
cur8 = conn8.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 17 AND comments LIKE '% like %');"
cur8.execute(sq)
resulth = cur8.fetchone()
resultFinalh = int(resulth[0])
cur8.close()
conn8.close()
if resultFinalh == 1:
finalCount = finalCount + 1
#is the word 'fun' present? == +1
conn9 = sqlite3.connect("test.db")
cur9 = conn9.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 17 AND comments LIKE '% fun %');"
cur9.execute(sq)
resulti = cur9.fetchone()
resultFinali = int(resulti[0])
cur9.close()
conn9.close()
if resultFinali == 1:
finalCount = finalCount + 1
#is the word 'appreciate' present? == +1
conn10 = sqlite3.connect("test.db")
cur10 = conn10.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 17 AND comments LIKE '% appreciate %');"
cur10.execute(sq)
resultj = cur10.fetchone()
resultFinalj = int(resultj[0])
cur10.close()
conn10.close()
if resultFinalj == 1:
finalCount = finalCount + 1
#is the word 'fuck' present? == -1
conn11 = sqlite3.connect("test.db")
cur11 = conn11.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 17 AND comments LIKE '% good %');"
cur11.execute(sq)
resultk = cur11.fetchone()
resultFinalk = int(resultk[0])
cur11.close()
conn11.close()
if resultFinalk == 1:
finalCount = finalCount - 1
#is the word 'corrupt' present? == -1
conn12 = sqlite3.connect("test.db")
cur12 = conn12.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 17 AND comments LIKE '% corrupt %');"
cur12.execute(sq)
resultm = cur12.fetchone()
resultFinalm = int(resultm[0])
cur12.close()
conn12.close()
if resultFinalm == 1:
finalCount = finalCount - 1
#is the word 'stupid' present? == -1
conn13 = sqlite3.connect("test.db")
cur13 = conn13.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 17 AND comments LIKE '% stupid %');"
cur13.execute(sq)
resultn = cur13.fetchone()
resultFinaln = int(resultn[0])
cur13.close()
conn13.close()
if resultFinaln == 1:
finalCount = finalCount - 1
#is the word 'irrelevant' present? == -1
conn14 = sqlite3.connect("test.db")
cur14 = conn14.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 17 AND comments LIKE '% irrelevant %');"
cur14.execute(sq)
resulto = cur14.fetchone()
resultFinalo = int(resulto[0])
cur14.close()
conn14.close()
if resultFinalo == 1:
finalCount = finalCount - 1
#is the word 'colluding' present? == -1
conn15 = sqlite3.connect("test.db")
cur15 = conn15.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 17 AND comments LIKE '% colluding %');"
cur15.execute(sq)
resultp = cur15.fetchone()
resultFinalp = int(resultp[0])
cur15.close()
conn15.close()
if resultFinalp == 1:
finalCount = finalCount - 1
#is the word 'horrible' present? == -1
conn16 = sqlite3.connect("test.db")
cur16 = conn16.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 17 AND comments LIKE '% horrible %');"
cur16.execute(sq)
resultq = cur16.fetchone()
resultFinalq = int(resultq[0])
cur16.close()
conn16.close()
if resultFinalq == 1:
finalCount = finalCount - 1
#is the word 'unfair' present? == -1
conn17 = sqlite3.connect("test.db")
cur17 = conn17.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 17 AND comments LIKE '% unfair %');"
cur17.execute(sq)
resultr = cur17.fetchone()
resultFinalr = int(resultr[0])
cur17.close()
conn17.close()
if resultFinalr == 1:
finalCount = finalCount - 1
#is the word 'guilty' present? == -1
conn18 = sqlite3.connect("test.db")
cur18 = conn18.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 17 AND comments LIKE '% guilty %');"
cur18.execute(sq)
resultz = cur18.fetchone()
resultFinalz = int(resultz[0])
cur18.close()
conn18.close()
if resultFinalz == 1:
finalCount = finalCount - 1
#is the word 'foolish' present? == -1
conn19 = sqlite3.connect("test.db")
cur19 = conn19.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 17 AND comments LIKE '% foolish %');"
cur19.execute(sq)
resultx = cur19.fetchone()
resultFinalx = int(resultx[0])
cur19.close()
conn19.close()
if resultFinalx == 1:
finalCount = finalCount - 1
#is the word 'hateful' present? == -1
conn20 = sqlite3.connect("test.db")
cur20 = conn20.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 17 AND comments LIKE '% hateful %');"
cur20.execute(sq)
resulty = cur20.fetchone()
resultFinaly = int(resulty[0])
cur20.close()
conn20.close()
if resultFinaly == 1:
finalCount = finalCount - 1
#adds final sentiment number to corresponding database row
conn = sqlite3.connect("test.db")
cur = conn.cursor()
cur.execute("UPDATE merge SET sent = ? WHERE id = 17;", [finalCount])
conn.commit()
cur.close()
conn.close()
def senti18():
finalCount = 0
#is the word 'good' present? == +1
conn = sqlite3.connect("test.db")
cur = conn.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 18 AND comments LIKE '% good %');"
cur.execute(sq)
resulta = cur.fetchone()
resultFinala = int(resulta[0])
cur.close()
conn.close()
if resultFinala == 1:
finalCount = finalCount + 1
#is the word 'great' present? == +1
conn2 = sqlite3.connect("test.db")
cur2 = conn2.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 18 AND comments LIKE '% great %');"
cur2.execute(sq)
resultb = cur2.fetchone()
resultFinalb = int(resultb[0])
cur2.close()
conn2.close()
if resultFinalb == 1:
finalCount = finalCount + 1
#is the word 'happy' present? == +1
conn3 = sqlite3.connect("test.db")
cur3 = conn3.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 18 AND comments LIKE '% happy %');"
cur3.execute(sq)
resultc = cur3.fetchone()
resultFinalc = int(resultc[0])
cur3.close()
conn3.close()
if resultFinalc == 1:
finalCount = finalCount + 1
#is the word 'win' present? == +1
conn4 = sqlite3.connect("test.db")
cur4 = conn4.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 18 AND comments LIKE '% win %');"
cur4.execute(sq)
resultd = cur4.fetchone()
resultFinald = int(resultd[0])
cur4.close()
conn4.close()
if resultFinald == 1:
finalCount = finalCount + 1
#is the word 'love' present? == +1
conn5 = sqlite3.connect("test.db")
cur5 = conn5.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 18 AND comments LIKE '% love %');"
cur5.execute(sq)
resulte = cur5.fetchone()
resultFinale = int(resulte[0])
cur5.close()
conn5.close()
if resultFinale == 1:
finalCount = finalCount + 1
#is the word 'nice' present? == +1
conn6 = sqlite3.connect("test.db")
cur6 = conn6.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 18 AND comments LIKE '% nice %');"
cur6.execute(sq)
resultf = cur6.fetchone()
resultFinalf = int(resultf[0])
cur6.close()
conn6.close()
if resultFinalf == 1:
finalCount = finalCount + 1
#is the word 'authentic' present? == +1
conn7 = sqlite3.connect("test.db")
cur7 = conn7.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 18 AND comments LIKE '% authentic %');"
cur7.execute(sq)
resultg = cur7.fetchone()
resultFinalg = int(resultg[0])
cur7.close()
conn7.close()
if resultFinalg == 1:
finalCount = finalCount + 1
#is the word 'like' present? == +1
conn8 = sqlite3.connect("test.db")
cur8 = conn8.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 18 AND comments LIKE '% like %');"
cur8.execute(sq)
resulth = cur8.fetchone()
resultFinalh = int(resulth[0])
cur8.close()
conn8.close()
if resultFinalh == 1:
finalCount = finalCount + 1
#is the word 'fun' present? == +1
conn9 = sqlite3.connect("test.db")
cur9 = conn9.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 18 AND comments LIKE '% fun %');"
cur9.execute(sq)
resulti = cur9.fetchone()
resultFinali = int(resulti[0])
cur9.close()
conn9.close()
if resultFinali == 1:
finalCount = finalCount + 1
#is the word 'appreciate' present? == +1
conn10 = sqlite3.connect("test.db")
cur10 = conn10.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 18 AND comments LIKE '% appreciate %');"
cur10.execute(sq)
resultj = cur10.fetchone()
resultFinalj = int(resultj[0])
cur10.close()
conn10.close()
if resultFinalj == 1:
finalCount = finalCount + 1
#is the word 'fuck' present? == -1
conn11 = sqlite3.connect("test.db")
cur11 = conn11.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 18 AND comments LIKE '% good %');"
cur11.execute(sq)
resultk = cur11.fetchone()
resultFinalk = int(resultk[0])
cur11.close()
conn11.close()
if resultFinalk == 1:
finalCount = finalCount - 1
#is the word 'corrupt' present? == -1
conn12 = sqlite3.connect("test.db")
cur12 = conn12.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 18 AND comments LIKE '% corrupt %');"
cur12.execute(sq)
resultm = cur12.fetchone()
resultFinalm = int(resultm[0])
cur12.close()
conn12.close()
if resultFinalm == 1:
finalCount = finalCount - 1
#is the word 'stupid' present? == -1
conn13 = sqlite3.connect("test.db")
cur13 = conn13.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 18 AND comments LIKE '% stupid %');"
cur13.execute(sq)
resultn = cur13.fetchone()
resultFinaln = int(resultn[0])
cur13.close()
conn13.close()
if resultFinaln == 1:
finalCount = finalCount - 1
#is the word 'irrelevant' present? == -1
conn14 = sqlite3.connect("test.db")
cur14 = conn14.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 18 AND comments LIKE '% irrelevant %');"
cur14.execute(sq)
resulto = cur14.fetchone()
resultFinalo = int(resulto[0])
cur14.close()
conn14.close()
if resultFinalo == 1:
finalCount = finalCount - 1
#is the word 'colluding' present? == -1
conn15 = sqlite3.connect("test.db")
cur15 = conn15.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 18 AND comments LIKE '% colluding %');"
cur15.execute(sq)
resultp = cur15.fetchone()
resultFinalp = int(resultp[0])
cur15.close()
conn15.close()
if resultFinalp == 1:
finalCount = finalCount - 1
#is the word 'horrible' present? == -1
conn16 = sqlite3.connect("test.db")
cur16 = conn16.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 18 AND comments LIKE '% horrible %');"
cur16.execute(sq)
resultq = cur16.fetchone()
resultFinalq = int(resultq[0])
cur16.close()
conn16.close()
if resultFinalq == 1:
finalCount = finalCount - 1
#is the word 'unfair' present? == -1
conn17 = sqlite3.connect("test.db")
cur17 = conn17.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 18 AND comments LIKE '% unfair %');"
cur17.execute(sq)
resultr = cur17.fetchone()
resultFinalr = int(resultr[0])
cur17.close()
conn17.close()
if resultFinalr == 1:
finalCount = finalCount - 1
#is the word 'guilty' present? == -1
conn18 = sqlite3.connect("test.db")
cur18 = conn18.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 18 AND comments LIKE '% guilty %');"
cur18.execute(sq)
resultz = cur18.fetchone()
resultFinalz = int(resultz[0])
cur18.close()
conn18.close()
if resultFinalz == 1:
finalCount = finalCount - 1
#is the word 'foolish' present? == -1
conn19 = sqlite3.connect("test.db")
cur19 = conn19.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 18 AND comments LIKE '% foolish %');"
cur19.execute(sq)
resultx = cur19.fetchone()
resultFinalx = int(resultx[0])
cur19.close()
conn19.close()
if resultFinalx == 1:
finalCount = finalCount - 1
#is the word 'hateful' present? == -1
conn20 = sqlite3.connect("test.db")
cur20 = conn20.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 18 AND comments LIKE '% hateful %');"
cur20.execute(sq)
resulty = cur20.fetchone()
resultFinaly = int(resulty[0])
cur20.close()
conn20.close()
if resultFinaly == 1:
finalCount = finalCount - 1
#adds final sentiment number to corresponding database row
conn = sqlite3.connect("test.db")
cur = conn.cursor()
cur.execute("UPDATE merge SET sent = ? WHERE id = 18;", [finalCount])
conn.commit()
cur.close()
conn.close()
def senti19():
finalCount = 0
#is the word 'good' present? == +1
conn = sqlite3.connect("test.db")
cur = conn.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 19 AND comments LIKE '% good %');"
cur.execute(sq)
resulta = cur.fetchone()
resultFinala = int(resulta[0])
cur.close()
conn.close()
if resultFinala == 1:
finalCount = finalCount + 1
#is the word 'great' present? == +1
conn2 = sqlite3.connect("test.db")
cur2 = conn2.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 19 AND comments LIKE '% great %');"
cur2.execute(sq)
resultb = cur2.fetchone()
resultFinalb = int(resultb[0])
cur2.close()
conn2.close()
if resultFinalb == 1:
finalCount = finalCount + 1
#is the word 'happy' present? == +1
conn3 = sqlite3.connect("test.db")
cur3 = conn3.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 19 AND comments LIKE '% happy %');"
cur3.execute(sq)
resultc = cur3.fetchone()
resultFinalc = int(resultc[0])
cur3.close()
conn3.close()
if resultFinalc == 1:
finalCount = finalCount + 1
#is the word 'win' present? == +1
conn4 = sqlite3.connect("test.db")
cur4 = conn4.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 19 AND comments LIKE '% win %');"
cur4.execute(sq)
resultd = cur4.fetchone()
resultFinald = int(resultd[0])
cur4.close()
conn4.close()
if resultFinald == 1:
finalCount = finalCount + 1
#is the word 'love' present? == +1
conn5 = sqlite3.connect("test.db")
cur5 = conn5.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 19 AND comments LIKE '% love %');"
cur5.execute(sq)
resulte = cur5.fetchone()
resultFinale = int(resulte[0])
cur5.close()
conn5.close()
if resultFinale == 1:
finalCount = finalCount + 1
#is the word 'nice' present? == +1
conn6 = sqlite3.connect("test.db")
cur6 = conn6.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 19 AND comments LIKE '% nice %');"
cur6.execute(sq)
resultf = cur6.fetchone()
resultFinalf = int(resultf[0])
cur6.close()
conn6.close()
if resultFinalf == 1:
finalCount = finalCount + 1
#is the word 'authentic' present? == +1
conn7 = sqlite3.connect("test.db")
cur7 = conn7.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 19 AND comments LIKE '% authentic %');"
cur7.execute(sq)
resultg = cur7.fetchone()
resultFinalg = int(resultg[0])
cur7.close()
conn7.close()
if resultFinalg == 1:
finalCount = finalCount + 1
#is the word 'like' present? == +1
conn8 = sqlite3.connect("test.db")
cur8 = conn8.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 19 AND comments LIKE '% like %');"
cur8.execute(sq)
resulth = cur8.fetchone()
resultFinalh = int(resulth[0])
cur8.close()
conn8.close()
if resultFinalh == 1:
finalCount = finalCount + 1
#is the word 'fun' present? == +1
conn9 = sqlite3.connect("test.db")
cur9 = conn9.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 19 AND comments LIKE '% fun %');"
cur9.execute(sq)
resulti = cur9.fetchone()
resultFinali = int(resulti[0])
cur9.close()
conn9.close()
if resultFinali == 1:
finalCount = finalCount + 1
#is the word 'appreciate' present? == +1
conn10 = sqlite3.connect("test.db")
cur10 = conn10.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 19 AND comments LIKE '% appreciate %');"
cur10.execute(sq)
resultj = cur10.fetchone()
resultFinalj = int(resultj[0])
cur10.close()
conn10.close()
if resultFinalj == 1:
finalCount = finalCount + 1
#is the word 'fuck' present? == -1
conn11 = sqlite3.connect("test.db")
cur11 = conn11.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 19 AND comments LIKE '% good %');"
cur11.execute(sq)
resultk = cur11.fetchone()
resultFinalk = int(resultk[0])
cur11.close()
conn11.close()
if resultFinalk == 1:
finalCount = finalCount - 1
#is the word 'corrupt' present? == -1
conn12 = sqlite3.connect("test.db")
cur12 = conn12.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 19 AND comments LIKE '% corrupt %');"
cur12.execute(sq)
resultm = cur12.fetchone()
resultFinalm = int(resultm[0])
cur12.close()
conn12.close()
if resultFinalm == 1:
finalCount = finalCount - 1
#is the word 'stupid' present? == -1
conn13 = sqlite3.connect("test.db")
cur13 = conn13.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 19 AND comments LIKE '% stupid %');"
cur13.execute(sq)
resultn = cur13.fetchone()
resultFinaln = int(resultn[0])
cur13.close()
conn13.close()
if resultFinaln == 1:
finalCount = finalCount - 1
#is the word 'irrelevant' present? == -1
conn14 = sqlite3.connect("test.db")
cur14 = conn14.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 19 AND comments LIKE '% irrelevant %');"
cur14.execute(sq)
resulto = cur14.fetchone()
resultFinalo = int(resulto[0])
cur14.close()
conn14.close()
if resultFinalo == 1:
finalCount = finalCount - 1
#is the word 'colluding' present? == -1
conn15 = sqlite3.connect("test.db")
cur15 = conn15.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 19 AND comments LIKE '% colluding %');"
cur15.execute(sq)
resultp = cur15.fetchone()
resultFinalp = int(resultp[0])
cur15.close()
conn15.close()
if resultFinalp == 1:
finalCount = finalCount - 1
#is the word 'horrible' present? == -1
conn16 = sqlite3.connect("test.db")
cur16 = conn16.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 19 AND comments LIKE '% horrible %');"
cur16.execute(sq)
resultq = cur16.fetchone()
resultFinalq = int(resultq[0])
cur16.close()
conn16.close()
if resultFinalq == 1:
finalCount = finalCount - 1
#is the word 'unfair' present? == -1
conn17 = sqlite3.connect("test.db")
cur17 = conn17.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 19 AND comments LIKE '% unfair %');"
cur17.execute(sq)
resultr = cur17.fetchone()
resultFinalr = int(resultr[0])
cur17.close()
conn17.close()
if resultFinalr == 1:
finalCount = finalCount - 1
#is the word 'guilty' present? == -1
conn18 = sqlite3.connect("test.db")
cur18 = conn18.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 19 AND comments LIKE '% guilty %');"
cur18.execute(sq)
resultz = cur18.fetchone()
resultFinalz = int(resultz[0])
cur18.close()
conn18.close()
if resultFinalz == 1:
finalCount = finalCount - 1
#is the word 'foolish' present? == -1
conn19 = sqlite3.connect("test.db")
cur19 = conn19.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 19 AND comments LIKE '% foolish %');"
cur19.execute(sq)
resultx = cur19.fetchone()
resultFinalx = int(resultx[0])
cur19.close()
conn19.close()
if resultFinalx == 1:
finalCount = finalCount - 1
#is the word 'hateful' present? == -1
conn20 = sqlite3.connect("test.db")
cur20 = conn20.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 19 AND comments LIKE '% hateful %');"
cur20.execute(sq)
resulty = cur20.fetchone()
resultFinaly = int(resulty[0])
cur20.close()
conn20.close()
if resultFinaly == 1:
finalCount = finalCount - 1
#adds final sentiment number to corresponding database row
conn = sqlite3.connect("test.db")
cur = conn.cursor()
cur.execute("UPDATE merge SET sent = ? WHERE id = 19;", [finalCount])
conn.commit()
cur.close()
conn.close()
def senti20():
finalCount = 0
#is the word 'good' present? == +1
conn = sqlite3.connect("test.db")
cur = conn.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 20 AND comments LIKE '% good %');"
cur.execute(sq)
resulta = cur.fetchone()
resultFinala = int(resulta[0])
cur.close()
conn.close()
if resultFinala == 1:
finalCount = finalCount + 1
#is the word 'great' present? == +1
conn2 = sqlite3.connect("test.db")
cur2 = conn2.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 20 AND comments LIKE '% great %');"
cur2.execute(sq)
resultb = cur2.fetchone()
resultFinalb = int(resultb[0])
cur2.close()
conn2.close()
if resultFinalb == 1:
finalCount = finalCount + 1
#is the word 'happy' present? == +1
conn3 = sqlite3.connect("test.db")
cur3 = conn3.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 20 AND comments LIKE '% happy %');"
cur3.execute(sq)
resultc = cur3.fetchone()
resultFinalc = int(resultc[0])
cur3.close()
conn3.close()
if resultFinalc == 1:
finalCount = finalCount + 1
#is the word 'win' present? == +1
conn4 = sqlite3.connect("test.db")
cur4 = conn4.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 20 AND comments LIKE '% win %');"
cur4.execute(sq)
resultd = cur4.fetchone()
resultFinald = int(resultd[0])
cur4.close()
conn4.close()
if resultFinald == 1:
finalCount = finalCount + 1
#is the word 'love' present? == +1
conn5 = sqlite3.connect("test.db")
cur5 = conn5.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 20 AND comments LIKE '% love %');"
cur5.execute(sq)
resulte = cur5.fetchone()
resultFinale = int(resulte[0])
cur5.close()
conn5.close()
if resultFinale == 1:
finalCount = finalCount + 1
#is the word 'nice' present? == +1
conn6 = sqlite3.connect("test.db")
cur6 = conn6.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 20 AND comments LIKE '% nice %');"
cur6.execute(sq)
resultf = cur6.fetchone()
resultFinalf = int(resultf[0])
cur6.close()
conn6.close()
if resultFinalf == 1:
finalCount = finalCount + 1
#is the word 'authentic' present? == +1
conn7 = sqlite3.connect("test.db")
cur7 = conn7.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 20 AND comments LIKE '% authentic %');"
cur7.execute(sq)
resultg = cur7.fetchone()
resultFinalg = int(resultg[0])
cur7.close()
conn7.close()
if resultFinalg == 1:
finalCount = finalCount + 1
#is the word 'like' present? == +1
conn8 = sqlite3.connect("test.db")
cur8 = conn8.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 20 AND comments LIKE '% like %');"
cur8.execute(sq)
resulth = cur8.fetchone()
resultFinalh = int(resulth[0])
cur8.close()
conn8.close()
if resultFinalh == 1:
finalCount = finalCount + 1
#is the word 'fun' present? == +1
conn9 = sqlite3.connect("test.db")
cur9 = conn9.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 20 AND comments LIKE '% fun %');"
cur9.execute(sq)
resulti = cur9.fetchone()
resultFinali = int(resulti[0])
cur9.close()
conn9.close()
if resultFinali == 1:
finalCount = finalCount + 1
#is the word 'appreciate' present? == +1
conn10 = sqlite3.connect("test.db")
cur10 = conn10.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 20 AND comments LIKE '% appreciate %');"
cur10.execute(sq)
resultj = cur10.fetchone()
resultFinalj = int(resultj[0])
cur10.close()
conn10.close()
if resultFinalj == 1:
finalCount = finalCount + 1
#is the word 'fuck' present? == -1
conn11 = sqlite3.connect("test.db")
cur11 = conn11.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 20 AND comments LIKE '% good %');"
cur11.execute(sq)
resultk = cur11.fetchone()
resultFinalk = int(resultk[0])
cur11.close()
conn11.close()
if resultFinalk == 1:
finalCount = finalCount - 1
#is the word 'corrupt' present? == -1
conn12 = sqlite3.connect("test.db")
cur12 = conn12.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 20 AND comments LIKE '% corrupt %');"
cur12.execute(sq)
resultm = cur12.fetchone()
resultFinalm = int(resultm[0])
cur12.close()
conn12.close()
if resultFinalm == 1:
finalCount = finalCount - 1
#is the word 'stupid' present? == -1
conn13 = sqlite3.connect("test.db")
cur13 = conn13.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 20 AND comments LIKE '% stupid %');"
cur13.execute(sq)
resultn = cur13.fetchone()
resultFinaln = int(resultn[0])
cur13.close()
conn13.close()
if resultFinaln == 1:
finalCount = finalCount - 1
#is the word 'irrelevant' present? == -1
conn14 = sqlite3.connect("test.db")
cur14 = conn14.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 20 AND comments LIKE '% irrelevant %');"
cur14.execute(sq)
resulto = cur14.fetchone()
resultFinalo = int(resulto[0])
cur14.close()
conn14.close()
if resultFinalo == 1:
finalCount = finalCount - 1
#is the word 'colluding' present? == -1
conn15 = sqlite3.connect("test.db")
cur15 = conn15.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 20 AND comments LIKE '% colluding %');"
cur15.execute(sq)
resultp = cur15.fetchone()
resultFinalp = int(resultp[0])
cur15.close()
conn15.close()
if resultFinalp == 1:
finalCount = finalCount - 1
#is the word 'horrible' present? == -1
conn16 = sqlite3.connect("test.db")
cur16 = conn16.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 20 AND comments LIKE '% horrible %');"
cur16.execute(sq)
resultq = cur16.fetchone()
resultFinalq = int(resultq[0])
cur16.close()
conn16.close()
if resultFinalq == 1:
finalCount = finalCount - 1
#is the word 'unfair' present? == -1
conn17 = sqlite3.connect("test.db")
cur17 = conn17.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 20 AND comments LIKE '% unfair %');"
cur17.execute(sq)
resultr = cur17.fetchone()
resultFinalr = int(resultr[0])
cur17.close()
conn17.close()
if resultFinalr == 1:
finalCount = finalCount - 1
#is the word 'guilty' present? == -1
conn18 = sqlite3.connect("test.db")
cur18 = conn18.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 20 AND comments LIKE '% guilty %');"
cur18.execute(sq)
resultz = cur18.fetchone()
resultFinalz = int(resultz[0])
cur18.close()
conn18.close()
if resultFinalz == 1:
finalCount = finalCount - 1
#is the word 'foolish' present? == -1
conn19 = sqlite3.connect("test.db")
cur19 = conn19.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 20 AND comments LIKE '% foolish %');"
cur19.execute(sq)
resultx = cur19.fetchone()
resultFinalx = int(resultx[0])
cur19.close()
conn19.close()
if resultFinalx == 1:
finalCount = finalCount - 1
#is the word 'hateful' present? == -1
conn20 = sqlite3.connect("test.db")
cur20 = conn20.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 20 AND comments LIKE '% hateful %');"
cur20.execute(sq)
resulty = cur20.fetchone()
resultFinaly = int(resulty[0])
cur20.close()
conn20.close()
if resultFinaly == 1:
finalCount = finalCount - 1
#adds final sentiment number to corresponding database row
conn = sqlite3.connect("test.db")
cur = conn.cursor()
cur.execute("UPDATE merge SET sent = ? WHERE id = 20;", [finalCount])
conn.commit()
cur.close()
conn.close()
def senti21():
finalCount = 0
#is the word 'good' present? == +1
conn = sqlite3.connect("test.db")
cur = conn.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 21 AND comments LIKE '% good %');"
cur.execute(sq)
resulta = cur.fetchone()
resultFinala = int(resulta[0])
cur.close()
conn.close()
if resultFinala == 1:
finalCount = finalCount + 1
#is the word 'great' present? == +1
conn2 = sqlite3.connect("test.db")
cur2 = conn2.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 21 AND comments LIKE '% great %');"
cur2.execute(sq)
resultb = cur2.fetchone()
resultFinalb = int(resultb[0])
cur2.close()
conn2.close()
if resultFinalb == 1:
finalCount = finalCount + 1
#is the word 'happy' present? == +1
conn3 = sqlite3.connect("test.db")
cur3 = conn3.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 21 AND comments LIKE '% happy %');"
cur3.execute(sq)
resultc = cur3.fetchone()
resultFinalc = int(resultc[0])
cur3.close()
conn3.close()
if resultFinalc == 1:
finalCount = finalCount + 1
#is the word 'win' present? == +1
conn4 = sqlite3.connect("test.db")
cur4 = conn4.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 21 AND comments LIKE '% win %');"
cur4.execute(sq)
resultd = cur4.fetchone()
resultFinald = int(resultd[0])
cur4.close()
conn4.close()
if resultFinald == 1:
finalCount = finalCount + 1
#is the word 'love' present? == +1
conn5 = sqlite3.connect("test.db")
cur5 = conn5.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 21 AND comments LIKE '% love %');"
cur5.execute(sq)
resulte = cur5.fetchone()
resultFinale = int(resulte[0])
cur5.close()
conn5.close()
if resultFinale == 1:
finalCount = finalCount + 1
#is the word 'nice' present? == +1
conn6 = sqlite3.connect("test.db")
cur6 = conn6.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 21 AND comments LIKE '% nice %');"
cur6.execute(sq)
resultf = cur6.fetchone()
resultFinalf = int(resultf[0])
cur6.close()
conn6.close()
if resultFinalf == 1:
finalCount = finalCount + 1
#is the word 'authentic' present? == +1
conn7 = sqlite3.connect("test.db")
cur7 = conn7.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 21 AND comments LIKE '% authentic %');"
cur7.execute(sq)
resultg = cur7.fetchone()
resultFinalg = int(resultg[0])
cur7.close()
conn7.close()
if resultFinalg == 1:
finalCount = finalCount + 1
#is the word 'like' present? == +1
conn8 = sqlite3.connect("test.db")
cur8 = conn8.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 21 AND comments LIKE '% like %');"
cur8.execute(sq)
resulth = cur8.fetchone()
resultFinalh = int(resulth[0])
cur8.close()
conn8.close()
if resultFinalh == 1:
finalCount = finalCount + 1
#is the word 'fun' present? == +1
conn9 = sqlite3.connect("test.db")
cur9 = conn9.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 21 AND comments LIKE '% fun %');"
cur9.execute(sq)
resulti = cur9.fetchone()
resultFinali = int(resulti[0])
cur9.close()
conn9.close()
if resultFinali == 1:
finalCount = finalCount + 1
#is the word 'appreciate' present? == +1
conn10 = sqlite3.connect("test.db")
cur10 = conn10.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 21 AND comments LIKE '% appreciate %');"
cur10.execute(sq)
resultj = cur10.fetchone()
resultFinalj = int(resultj[0])
cur10.close()
conn10.close()
if resultFinalj == 1:
finalCount = finalCount + 1
#is the word 'fuck' present? == -1
conn11 = sqlite3.connect("test.db")
cur11 = conn11.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 21 AND comments LIKE '% good %');"
cur11.execute(sq)
resultk = cur11.fetchone()
resultFinalk = int(resultk[0])
cur11.close()
conn11.close()
if resultFinalk == 1:
finalCount = finalCount - 1
#is the word 'corrupt' present? == -1
conn12 = sqlite3.connect("test.db")
cur12 = conn12.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 21 AND comments LIKE '% corrupt %');"
cur12.execute(sq)
resultm = cur12.fetchone()
resultFinalm = int(resultm[0])
cur12.close()
conn12.close()
if resultFinalm == 1:
finalCount = finalCount - 1
#is the word 'stupid' present? == -1
conn13 = sqlite3.connect("test.db")
cur13 = conn13.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 21 AND comments LIKE '% stupid %');"
cur13.execute(sq)
resultn = cur13.fetchone()
resultFinaln = int(resultn[0])
cur13.close()
conn13.close()
if resultFinaln == 1:
finalCount = finalCount - 1
#is the word 'irrelevant' present? == -1
conn14 = sqlite3.connect("test.db")
cur14 = conn14.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 21 AND comments LIKE '% irrelevant %');"
cur14.execute(sq)
resulto = cur14.fetchone()
resultFinalo = int(resulto[0])
cur14.close()
conn14.close()
if resultFinalo == 1:
finalCount = finalCount - 1
#is the word 'colluding' present? == -1
conn15 = sqlite3.connect("test.db")
cur15 = conn15.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 21 AND comments LIKE '% colluding %');"
cur15.execute(sq)
resultp = cur15.fetchone()
resultFinalp = int(resultp[0])
cur15.close()
conn15.close()
if resultFinalp == 1:
finalCount = finalCount - 1
#is the word 'horrible' present? == -1
conn16 = sqlite3.connect("test.db")
cur16 = conn16.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 21 AND comments LIKE '% horrible %');"
cur16.execute(sq)
resultq = cur16.fetchone()
resultFinalq = int(resultq[0])
cur16.close()
conn16.close()
if resultFinalq == 1:
finalCount = finalCount - 1
#is the word 'unfair' present? == -1
conn17 = sqlite3.connect("test.db")
cur17 = conn17.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 21 AND comments LIKE '% unfair %');"
cur17.execute(sq)
resultr = cur17.fetchone()
resultFinalr = int(resultr[0])
cur17.close()
conn17.close()
if resultFinalr == 1:
finalCount = finalCount - 1
#is the word 'guilty' present? == -1
conn18 = sqlite3.connect("test.db")
cur18 = conn18.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 21 AND comments LIKE '% guilty %');"
cur18.execute(sq)
resultz = cur18.fetchone()
resultFinalz = int(resultz[0])
cur18.close()
conn18.close()
if resultFinalz == 1:
finalCount = finalCount - 1
#is the word 'foolish' present? == -1
conn19 = sqlite3.connect("test.db")
cur19 = conn19.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 21 AND comments LIKE '% foolish %');"
cur19.execute(sq)
resultx = cur19.fetchone()
resultFinalx = int(resultx[0])
cur19.close()
conn19.close()
if resultFinalx == 1:
finalCount = finalCount - 1
#is the word 'hateful' present? == -1
conn20 = sqlite3.connect("test.db")
cur20 = conn20.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 21 AND comments LIKE '% hateful %');"
cur20.execute(sq)
resulty = cur20.fetchone()
resultFinaly = int(resulty[0])
cur20.close()
conn20.close()
if resultFinaly == 1:
finalCount = finalCount - 1
#adds final sentiment number to corresponding database row
conn = sqlite3.connect("test.db")
cur = conn.cursor()
cur.execute("UPDATE merge SET sent = ? WHERE id = 21;", [finalCount])
conn.commit()
cur.close()
conn.close()
def senti22():
finalCount = 0
#is the word 'good' present? == +1
conn = sqlite3.connect("test.db")
cur = conn.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 22 AND comments LIKE '% good %');"
cur.execute(sq)
resulta = cur.fetchone()
resultFinala = int(resulta[0])
cur.close()
conn.close()
if resultFinala == 1:
finalCount = finalCount + 1
#is the word 'great' present? == +1
conn2 = sqlite3.connect("test.db")
cur2 = conn2.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 22 AND comments LIKE '% great %');"
cur2.execute(sq)
resultb = cur2.fetchone()
resultFinalb = int(resultb[0])
cur2.close()
conn2.close()
if resultFinalb == 1:
finalCount = finalCount + 1
#is the word 'happy' present? == +1
conn3 = sqlite3.connect("test.db")
cur3 = conn3.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 22 AND comments LIKE '% happy %');"
cur3.execute(sq)
resultc = cur3.fetchone()
resultFinalc = int(resultc[0])
cur3.close()
conn3.close()
if resultFinalc == 1:
finalCount = finalCount + 1
#is the word 'win' present? == +1
conn4 = sqlite3.connect("test.db")
cur4 = conn4.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 22 AND comments LIKE '% win %');"
cur4.execute(sq)
resultd = cur4.fetchone()
resultFinald = int(resultd[0])
cur4.close()
conn4.close()
if resultFinald == 1:
finalCount = finalCount + 1
#is the word 'love' present? == +1
conn5 = sqlite3.connect("test.db")
cur5 = conn5.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 22 AND comments LIKE '% love %');"
cur5.execute(sq)
resulte = cur5.fetchone()
resultFinale = int(resulte[0])
cur5.close()
conn5.close()
if resultFinale == 1:
finalCount = finalCount + 1
#is the word 'nice' present? == +1
conn6 = sqlite3.connect("test.db")
cur6 = conn6.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 22 AND comments LIKE '% nice %');"
cur6.execute(sq)
resultf = cur6.fetchone()
resultFinalf = int(resultf[0])
cur6.close()
conn6.close()
if resultFinalf == 1:
finalCount = finalCount + 1
#is the word 'authentic' present? == +1
conn7 = sqlite3.connect("test.db")
cur7 = conn7.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 22 AND comments LIKE '% authentic %');"
cur7.execute(sq)
resultg = cur7.fetchone()
resultFinalg = int(resultg[0])
cur7.close()
conn7.close()
if resultFinalg == 1:
finalCount = finalCount + 1
#is the word 'like' present? == +1
conn8 = sqlite3.connect("test.db")
cur8 = conn8.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 22 AND comments LIKE '% like %');"
cur8.execute(sq)
resulth = cur8.fetchone()
resultFinalh = int(resulth[0])
cur8.close()
conn8.close()
if resultFinalh == 1:
finalCount = finalCount + 1
#is the word 'fun' present? == +1
conn9 = sqlite3.connect("test.db")
cur9 = conn9.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 22 AND comments LIKE '% fun %');"
cur9.execute(sq)
resulti = cur9.fetchone()
resultFinali = int(resulti[0])
cur9.close()
conn9.close()
if resultFinali == 1:
finalCount = finalCount + 1
#is the word 'appreciate' present? == +1
conn10 = sqlite3.connect("test.db")
cur10 = conn10.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 22 AND comments LIKE '% appreciate %');"
cur10.execute(sq)
resultj = cur10.fetchone()
resultFinalj = int(resultj[0])
cur10.close()
conn10.close()
if resultFinalj == 1:
finalCount = finalCount + 1
#is the word 'fuck' present? == -1
conn11 = sqlite3.connect("test.db")
cur11 = conn11.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 22 AND comments LIKE '% good %');"
cur11.execute(sq)
resultk = cur11.fetchone()
resultFinalk = int(resultk[0])
cur11.close()
conn11.close()
if resultFinalk == 1:
finalCount = finalCount - 1
#is the word 'corrupt' present? == -1
conn12 = sqlite3.connect("test.db")
cur12 = conn12.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 22 AND comments LIKE '% corrupt %');"
cur12.execute(sq)
resultm = cur12.fetchone()
resultFinalm = int(resultm[0])
cur12.close()
conn12.close()
if resultFinalm == 1:
finalCount = finalCount - 1
#is the word 'stupid' present? == -1
conn13 = sqlite3.connect("test.db")
cur13 = conn13.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 22 AND comments LIKE '% stupid %');"
cur13.execute(sq)
resultn = cur13.fetchone()
resultFinaln = int(resultn[0])
cur13.close()
conn13.close()
if resultFinaln == 1:
finalCount = finalCount - 1
#is the word 'irrelevant' present? == -1
conn14 = sqlite3.connect("test.db")
cur14 = conn14.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 22 AND comments LIKE '% irrelevant %');"
cur14.execute(sq)
resulto = cur14.fetchone()
resultFinalo = int(resulto[0])
cur14.close()
conn14.close()
if resultFinalo == 1:
finalCount = finalCount - 1
#is the word 'colluding' present? == -1
conn15 = sqlite3.connect("test.db")
cur15 = conn15.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 22 AND comments LIKE '% colluding %');"
cur15.execute(sq)
resultp = cur15.fetchone()
resultFinalp = int(resultp[0])
cur15.close()
conn15.close()
if resultFinalp == 1:
finalCount = finalCount - 1
#is the word 'horrible' present? == -1
conn16 = sqlite3.connect("test.db")
cur16 = conn16.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 22 AND comments LIKE '% horrible %');"
cur16.execute(sq)
resultq = cur16.fetchone()
resultFinalq = int(resultq[0])
cur16.close()
conn16.close()
if resultFinalq == 1:
finalCount = finalCount - 1
#is the word 'unfair' present? == -1
conn17 = sqlite3.connect("test.db")
cur17 = conn17.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 22 AND comments LIKE '% unfair %');"
cur17.execute(sq)
resultr = cur17.fetchone()
resultFinalr = int(resultr[0])
cur17.close()
conn17.close()
if resultFinalr == 1:
finalCount = finalCount - 1
#is the word 'guilty' present? == -1
conn18 = sqlite3.connect("test.db")
cur18 = conn18.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 22 AND comments LIKE '% guilty %');"
cur18.execute(sq)
resultz = cur18.fetchone()
resultFinalz = int(resultz[0])
cur18.close()
conn18.close()
if resultFinalz == 1:
finalCount = finalCount - 1
#is the word 'foolish' present? == -1
conn19 = sqlite3.connect("test.db")
cur19 = conn19.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 22 AND comments LIKE '% foolish %');"
cur19.execute(sq)
resultx = cur19.fetchone()
resultFinalx = int(resultx[0])
cur19.close()
conn19.close()
if resultFinalx == 1:
finalCount = finalCount - 1
#is the word 'hateful' present? == -1
conn20 = sqlite3.connect("test.db")
cur20 = conn20.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 22 AND comments LIKE '% hateful %');"
cur20.execute(sq)
resulty = cur20.fetchone()
resultFinaly = int(resulty[0])
cur20.close()
conn20.close()
if resultFinaly == 1:
finalCount = finalCount - 1
#adds final sentiment number to corresponding database row
conn = sqlite3.connect("test.db")
cur = conn.cursor()
cur.execute("UPDATE merge SET sent = ? WHERE id = 22;", [finalCount])
conn.commit()
cur.close()
conn.close()
def senti23():
finalCount = 0
#is the word 'good' present? == +1
conn = sqlite3.connect("test.db")
cur = conn.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 23 AND comments LIKE '% good %');"
cur.execute(sq)
resulta = cur.fetchone()
resultFinala = int(resulta[0])
cur.close()
conn.close()
if resultFinala == 1:
finalCount = finalCount + 1
#is the word 'great' present? == +1
conn2 = sqlite3.connect("test.db")
cur2 = conn2.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 23 AND comments LIKE '% great %');"
cur2.execute(sq)
resultb = cur2.fetchone()
resultFinalb = int(resultb[0])
cur2.close()
conn2.close()
if resultFinalb == 1:
finalCount = finalCount + 1
#is the word 'happy' present? == +1
conn3 = sqlite3.connect("test.db")
cur3 = conn3.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 23 AND comments LIKE '% happy %');"
cur3.execute(sq)
resultc = cur3.fetchone()
resultFinalc = int(resultc[0])
cur3.close()
conn3.close()
if resultFinalc == 1:
finalCount = finalCount + 1
#is the word 'win' present? == +1
conn4 = sqlite3.connect("test.db")
cur4 = conn4.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 23 AND comments LIKE '% win %');"
cur4.execute(sq)
resultd = cur4.fetchone()
resultFinald = int(resultd[0])
cur4.close()
conn4.close()
if resultFinald == 1:
finalCount = finalCount + 1
#is the word 'love' present? == +1
conn5 = sqlite3.connect("test.db")
cur5 = conn5.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 23 AND comments LIKE '% love %');"
cur5.execute(sq)
resulte = cur5.fetchone()
resultFinale = int(resulte[0])
cur5.close()
conn5.close()
if resultFinale == 1:
finalCount = finalCount + 1
#is the word 'nice' present? == +1
conn6 = sqlite3.connect("test.db")
cur6 = conn6.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 23 AND comments LIKE '% nice %');"
cur6.execute(sq)
resultf = cur6.fetchone()
resultFinalf = int(resultf[0])
cur6.close()
conn6.close()
if resultFinalf == 1:
finalCount = finalCount + 1
#is the word 'authentic' present? == +1
conn7 = sqlite3.connect("test.db")
cur7 = conn7.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 23 AND comments LIKE '% authentic %');"
cur7.execute(sq)
resultg = cur7.fetchone()
resultFinalg = int(resultg[0])
cur7.close()
conn7.close()
if resultFinalg == 1:
finalCount = finalCount + 1
#is the word 'like' present? == +1
conn8 = sqlite3.connect("test.db")
cur8 = conn8.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 23 AND comments LIKE '% like %');"
cur8.execute(sq)
resulth = cur8.fetchone()
resultFinalh = int(resulth[0])
cur8.close()
conn8.close()
if resultFinalh == 1:
finalCount = finalCount + 1
#is the word 'fun' present? == +1
conn9 = sqlite3.connect("test.db")
cur9 = conn9.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 23 AND comments LIKE '% fun %');"
cur9.execute(sq)
resulti = cur9.fetchone()
resultFinali = int(resulti[0])
cur9.close()
conn9.close()
if resultFinali == 1:
finalCount = finalCount + 1
#is the word 'appreciate' present? == +1
conn10 = sqlite3.connect("test.db")
cur10 = conn10.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 23 AND comments LIKE '% appreciate %');"
cur10.execute(sq)
resultj = cur10.fetchone()
resultFinalj = int(resultj[0])
cur10.close()
conn10.close()
if resultFinalj == 1:
finalCount = finalCount + 1
#is the word 'fuck' present? == -1
conn11 = sqlite3.connect("test.db")
cur11 = conn11.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 23 AND comments LIKE '% good %');"
cur11.execute(sq)
resultk = cur11.fetchone()
resultFinalk = int(resultk[0])
cur11.close()
conn11.close()
if resultFinalk == 1:
finalCount = finalCount - 1
#is the word 'corrupt' present? == -1
conn12 = sqlite3.connect("test.db")
cur12 = conn12.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 23 AND comments LIKE '% corrupt %');"
cur12.execute(sq)
resultm = cur12.fetchone()
resultFinalm = int(resultm[0])
cur12.close()
conn12.close()
if resultFinalm == 1:
finalCount = finalCount - 1
#is the word 'stupid' present? == -1
conn13 = sqlite3.connect("test.db")
cur13 = conn13.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 23 AND comments LIKE '% stupid %');"
cur13.execute(sq)
resultn = cur13.fetchone()
resultFinaln = int(resultn[0])
cur13.close()
conn13.close()
if resultFinaln == 1:
finalCount = finalCount - 1
#is the word 'irrelevant' present? == -1
conn14 = sqlite3.connect("test.db")
cur14 = conn14.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 23 AND comments LIKE '% irrelevant %');"
cur14.execute(sq)
resulto = cur14.fetchone()
resultFinalo = int(resulto[0])
cur14.close()
conn14.close()
if resultFinalo == 1:
finalCount = finalCount - 1
#is the word 'colluding' present? == -1
conn15 = sqlite3.connect("test.db")
cur15 = conn15.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 23 AND comments LIKE '% colluding %');"
cur15.execute(sq)
resultp = cur15.fetchone()
resultFinalp = int(resultp[0])
cur15.close()
conn15.close()
if resultFinalp == 1:
finalCount = finalCount - 1
#is the word 'horrible' present? == -1
conn16 = sqlite3.connect("test.db")
cur16 = conn16.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 23 AND comments LIKE '% horrible %');"
cur16.execute(sq)
resultq = cur16.fetchone()
resultFinalq = int(resultq[0])
cur16.close()
conn16.close()
if resultFinalq == 1:
finalCount = finalCount - 1
#is the word 'unfair' present? == -1
conn17 = sqlite3.connect("test.db")
cur17 = conn17.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 23 AND comments LIKE '% unfair %');"
cur17.execute(sq)
resultr = cur17.fetchone()
resultFinalr = int(resultr[0])
cur17.close()
conn17.close()
if resultFinalr == 1:
finalCount = finalCount - 1
#is the word 'guilty' present? == -1
conn18 = sqlite3.connect("test.db")
cur18 = conn18.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 23 AND comments LIKE '% guilty %');"
cur18.execute(sq)
resultz = cur18.fetchone()
resultFinalz = int(resultz[0])
cur18.close()
conn18.close()
if resultFinalz == 1:
finalCount = finalCount - 1
#is the word 'foolish' present? == -1
conn19 = sqlite3.connect("test.db")
cur19 = conn19.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 23 AND comments LIKE '% foolish %');"
cur19.execute(sq)
resultx = cur19.fetchone()
resultFinalx = int(resultx[0])
cur19.close()
conn19.close()
if resultFinalx == 1:
finalCount = finalCount - 1
#is the word 'hateful' present? == -1
conn20 = sqlite3.connect("test.db")
cur20 = conn20.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 23 AND comments LIKE '% hateful %');"
cur20.execute(sq)
resulty = cur20.fetchone()
resultFinaly = int(resulty[0])
cur20.close()
conn20.close()
if resultFinaly == 1:
finalCount = finalCount - 1
#adds final sentiment number to corresponding database row
conn = sqlite3.connect("test.db")
cur = conn.cursor()
cur.execute("UPDATE merge SET sent = ? WHERE id = 23;", [finalCount])
conn.commit()
cur.close()
conn.close()
def senti24():
finalCount = 0
#is the word 'good' present? == +1
conn = sqlite3.connect("test.db")
cur = conn.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 24 AND comments LIKE '% good %');"
cur.execute(sq)
resulta = cur.fetchone()
resultFinala = int(resulta[0])
cur.close()
conn.close()
if resultFinala == 1:
finalCount = finalCount + 1
#is the word 'great' present? == +1
conn2 = sqlite3.connect("test.db")
cur2 = conn2.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 24 AND comments LIKE '% great %');"
cur2.execute(sq)
resultb = cur2.fetchone()
resultFinalb = int(resultb[0])
cur2.close()
conn2.close()
if resultFinalb == 1:
finalCount = finalCount + 1
#is the word 'happy' present? == +1
conn3 = sqlite3.connect("test.db")
cur3 = conn3.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 24 AND comments LIKE '% happy %');"
cur3.execute(sq)
resultc = cur3.fetchone()
resultFinalc = int(resultc[0])
cur3.close()
conn3.close()
if resultFinalc == 1:
finalCount = finalCount + 1
#is the word 'win' present? == +1
conn4 = sqlite3.connect("test.db")
cur4 = conn4.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 24 AND comments LIKE '% win %');"
cur4.execute(sq)
resultd = cur4.fetchone()
resultFinald = int(resultd[0])
cur4.close()
conn4.close()
if resultFinald == 1:
finalCount = finalCount + 1
#is the word 'love' present? == +1
conn5 = sqlite3.connect("test.db")
cur5 = conn5.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 24 AND comments LIKE '% love %');"
cur5.execute(sq)
resulte = cur5.fetchone()
resultFinale = int(resulte[0])
cur5.close()
conn5.close()
if resultFinale == 1:
finalCount = finalCount + 1
#is the word 'nice' present? == +1
conn6 = sqlite3.connect("test.db")
cur6 = conn6.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 24 AND comments LIKE '% nice %');"
cur6.execute(sq)
resultf = cur6.fetchone()
resultFinalf = int(resultf[0])
cur6.close()
conn6.close()
if resultFinalf == 1:
finalCount = finalCount + 1
#is the word 'authentic' present? == +1
conn7 = sqlite3.connect("test.db")
cur7 = conn7.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 24 AND comments LIKE '% authentic %');"
cur7.execute(sq)
resultg = cur7.fetchone()
resultFinalg = int(resultg[0])
cur7.close()
conn7.close()
if resultFinalg == 1:
finalCount = finalCount + 1
#is the word 'like' present? == +1
conn8 = sqlite3.connect("test.db")
cur8 = conn8.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 24 AND comments LIKE '% like %');"
cur8.execute(sq)
resulth = cur8.fetchone()
resultFinalh = int(resulth[0])
cur8.close()
conn8.close()
if resultFinalh == 1:
finalCount = finalCount + 1
#is the word 'fun' present? == +1
conn9 = sqlite3.connect("test.db")
cur9 = conn9.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 24 AND comments LIKE '% fun %');"
cur9.execute(sq)
resulti = cur9.fetchone()
resultFinali = int(resulti[0])
cur9.close()
conn9.close()
if resultFinali == 1:
finalCount = finalCount + 1
#is the word 'appreciate' present? == +1
conn10 = sqlite3.connect("test.db")
cur10 = conn10.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 24 AND comments LIKE '% appreciate %');"
cur10.execute(sq)
resultj = cur10.fetchone()
resultFinalj = int(resultj[0])
cur10.close()
conn10.close()
if resultFinalj == 1:
finalCount = finalCount + 1
#is the word 'fuck' present? == -1
conn11 = sqlite3.connect("test.db")
cur11 = conn11.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 24 AND comments LIKE '% good %');"
cur11.execute(sq)
resultk = cur11.fetchone()
resultFinalk = int(resultk[0])
cur11.close()
conn11.close()
if resultFinalk == 1:
finalCount = finalCount - 1
#is the word 'corrupt' present? == -1
conn12 = sqlite3.connect("test.db")
cur12 = conn12.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 24 AND comments LIKE '% corrupt %');"
cur12.execute(sq)
resultm = cur12.fetchone()
resultFinalm = int(resultm[0])
cur12.close()
conn12.close()
if resultFinalm == 1:
finalCount = finalCount - 1
#is the word 'stupid' present? == -1
conn13 = sqlite3.connect("test.db")
cur13 = conn13.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 24 AND comments LIKE '% stupid %');"
cur13.execute(sq)
resultn = cur13.fetchone()
resultFinaln = int(resultn[0])
cur13.close()
conn13.close()
if resultFinaln == 1:
finalCount = finalCount - 1
#is the word 'irrelevant' present? == -1
conn14 = sqlite3.connect("test.db")
cur14 = conn14.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 24 AND comments LIKE '% irrelevant %');"
cur14.execute(sq)
resulto = cur14.fetchone()
resultFinalo = int(resulto[0])
cur14.close()
conn14.close()
if resultFinalo == 1:
finalCount = finalCount - 1
#is the word 'colluding' present? == -1
conn15 = sqlite3.connect("test.db")
cur15 = conn15.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 24 AND comments LIKE '% colluding %');"
cur15.execute(sq)
resultp = cur15.fetchone()
resultFinalp = int(resultp[0])
cur15.close()
conn15.close()
if resultFinalp == 1:
finalCount = finalCount - 1
#is the word 'horrible' present? == -1
conn16 = sqlite3.connect("test.db")
cur16 = conn16.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 24 AND comments LIKE '% horrible %');"
cur16.execute(sq)
resultq = cur16.fetchone()
resultFinalq = int(resultq[0])
cur16.close()
conn16.close()
if resultFinalq == 1:
finalCount = finalCount - 1
#is the word 'unfair' present? == -1
conn17 = sqlite3.connect("test.db")
cur17 = conn17.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 24 AND comments LIKE '% unfair %');"
cur17.execute(sq)
resultr = cur17.fetchone()
resultFinalr = int(resultr[0])
cur17.close()
conn17.close()
if resultFinalr == 1:
finalCount = finalCount - 1
#is the word 'guilty' present? == -1
conn18 = sqlite3.connect("test.db")
cur18 = conn18.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 24 AND comments LIKE '% guilty %');"
cur18.execute(sq)
resultz = cur18.fetchone()
resultFinalz = int(resultz[0])
cur18.close()
conn18.close()
if resultFinalz == 1:
finalCount = finalCount - 1
#is the word 'foolish' present? == -1
conn19 = sqlite3.connect("test.db")
cur19 = conn19.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 24 AND comments LIKE '% foolish %');"
cur19.execute(sq)
resultx = cur19.fetchone()
resultFinalx = int(resultx[0])
cur19.close()
conn19.close()
if resultFinalx == 1:
finalCount = finalCount - 1
#is the word 'hateful' present? == -1
conn20 = sqlite3.connect("test.db")
cur20 = conn20.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 24 AND comments LIKE '% hateful %');"
cur20.execute(sq)
resulty = cur20.fetchone()
resultFinaly = int(resulty[0])
cur20.close()
conn20.close()
if resultFinaly == 1:
finalCount = finalCount - 1
#adds final sentiment number to corresponding database row
conn = sqlite3.connect("test.db")
cur = conn.cursor()
cur.execute("UPDATE merge SET sent = ? WHERE id = 24;", [finalCount])
conn.commit()
cur.close()
conn.close()
def senti25():
finalCount = 0
#is the word 'good' present? == +1
conn = sqlite3.connect("test.db")
cur = conn.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 25 AND comments LIKE '% good %');"
cur.execute(sq)
resulta = cur.fetchone()
resultFinala = int(resulta[0])
cur.close()
conn.close()
if resultFinala == 1:
finalCount = finalCount + 1
#is the word 'great' present? == +1
conn2 = sqlite3.connect("test.db")
cur2 = conn2.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 25 AND comments LIKE '% great %');"
cur2.execute(sq)
resultb = cur2.fetchone()
resultFinalb = int(resultb[0])
cur2.close()
conn2.close()
if resultFinalb == 1:
finalCount = finalCount + 1
#is the word 'happy' present? == +1
conn3 = sqlite3.connect("test.db")
cur3 = conn3.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 25 AND comments LIKE '% happy %');"
cur3.execute(sq)
resultc = cur3.fetchone()
resultFinalc = int(resultc[0])
cur3.close()
conn3.close()
if resultFinalc == 1:
finalCount = finalCount + 1
#is the word 'win' present? == +1
conn4 = sqlite3.connect("test.db")
cur4 = conn4.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 25 AND comments LIKE '% win %');"
cur4.execute(sq)
resultd = cur4.fetchone()
resultFinald = int(resultd[0])
cur4.close()
conn4.close()
if resultFinald == 1:
finalCount = finalCount + 1
#is the word 'love' present? == +1
conn5 = sqlite3.connect("test.db")
cur5 = conn5.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 25 AND comments LIKE '% love %');"
cur5.execute(sq)
resulte = cur5.fetchone()
resultFinale = int(resulte[0])
cur5.close()
conn5.close()
if resultFinale == 1:
finalCount = finalCount + 1
#is the word 'nice' present? == +1
conn6 = sqlite3.connect("test.db")
cur6 = conn6.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 25 AND comments LIKE '% nice %');"
cur6.execute(sq)
resultf = cur6.fetchone()
resultFinalf = int(resultf[0])
cur6.close()
conn6.close()
if resultFinalf == 1:
finalCount = finalCount + 1
#is the word 'authentic' present? == +1
conn7 = sqlite3.connect("test.db")
cur7 = conn7.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 25 AND comments LIKE '% authentic %');"
cur7.execute(sq)
resultg = cur7.fetchone()
resultFinalg = int(resultg[0])
cur7.close()
conn7.close()
if resultFinalg == 1:
finalCount = finalCount + 1
#is the word 'like' present? == +1
conn8 = sqlite3.connect("test.db")
cur8 = conn8.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 25 AND comments LIKE '% like %');"
cur8.execute(sq)
resulth = cur8.fetchone()
resultFinalh = int(resulth[0])
cur8.close()
conn8.close()
if resultFinalh == 1:
finalCount = finalCount + 1
#is the word 'fun' present? == +1
conn9 = sqlite3.connect("test.db")
cur9 = conn9.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 25 AND comments LIKE '% fun %');"
cur9.execute(sq)
resulti = cur9.fetchone()
resultFinali = int(resulti[0])
cur9.close()
conn9.close()
if resultFinali == 1:
finalCount = finalCount + 1
#is the word 'appreciate' present? == +1
conn10 = sqlite3.connect("test.db")
cur10 = conn10.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 25 AND comments LIKE '% appreciate %');"
cur10.execute(sq)
resultj = cur10.fetchone()
resultFinalj = int(resultj[0])
cur10.close()
conn10.close()
if resultFinalj == 1:
finalCount = finalCount + 1
#is the word 'fuck' present? == -1
conn11 = sqlite3.connect("test.db")
cur11 = conn11.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 25 AND comments LIKE '% good %');"
cur11.execute(sq)
resultk = cur11.fetchone()
resultFinalk = int(resultk[0])
cur11.close()
conn11.close()
if resultFinalk == 1:
finalCount = finalCount - 1
#is the word 'corrupt' present? == -1
conn12 = sqlite3.connect("test.db")
cur12 = conn12.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 25 AND comments LIKE '% corrupt %');"
cur12.execute(sq)
resultm = cur12.fetchone()
resultFinalm = int(resultm[0])
cur12.close()
conn12.close()
if resultFinalm == 1:
finalCount = finalCount - 1
#is the word 'stupid' present? == -1
conn13 = sqlite3.connect("test.db")
cur13 = conn13.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 25 AND comments LIKE '% stupid %');"
cur13.execute(sq)
resultn = cur13.fetchone()
resultFinaln = int(resultn[0])
cur13.close()
conn13.close()
if resultFinaln == 1:
finalCount = finalCount - 1
#is the word 'irrelevant' present? == -1
conn14 = sqlite3.connect("test.db")
cur14 = conn14.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 25 AND comments LIKE '% irrelevant %');"
cur14.execute(sq)
resulto = cur14.fetchone()
resultFinalo = int(resulto[0])
cur14.close()
conn14.close()
if resultFinalo == 1:
finalCount = finalCount - 1
#is the word 'colluding' present? == -1
conn15 = sqlite3.connect("test.db")
cur15 = conn15.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 25 AND comments LIKE '% colluding %');"
cur15.execute(sq)
resultp = cur15.fetchone()
resultFinalp = int(resultp[0])
cur15.close()
conn15.close()
if resultFinalp == 1:
finalCount = finalCount - 1
#is the word 'horrible' present? == -1
conn16 = sqlite3.connect("test.db")
cur16 = conn16.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 25 AND comments LIKE '% horrible %');"
cur16.execute(sq)
resultq = cur16.fetchone()
resultFinalq = int(resultq[0])
cur16.close()
conn16.close()
if resultFinalq == 1:
finalCount = finalCount - 1
#is the word 'unfair' present? == -1
conn17 = sqlite3.connect("test.db")
cur17 = conn17.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 25 AND comments LIKE '% unfair %');"
cur17.execute(sq)
resultr = cur17.fetchone()
resultFinalr = int(resultr[0])
cur17.close()
conn17.close()
if resultFinalr == 1:
finalCount = finalCount - 1
#is the word 'guilty' present? == -1
conn18 = sqlite3.connect("test.db")
cur18 = conn18.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 25 AND comments LIKE '% guilty %');"
cur18.execute(sq)
resultz = cur18.fetchone()
resultFinalz = int(resultz[0])
cur18.close()
conn18.close()
if resultFinalz == 1:
finalCount = finalCount - 1
#is the word 'foolish' present? == -1
conn19 = sqlite3.connect("test.db")
cur19 = conn19.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 25 AND comments LIKE '% foolish %');"
cur19.execute(sq)
resultx = cur19.fetchone()
resultFinalx = int(resultx[0])
cur19.close()
conn19.close()
if resultFinalx == 1:
finalCount = finalCount - 1
#is the word 'hateful' present? == -1
conn20 = sqlite3.connect("test.db")
cur20 = conn20.cursor()
sq = "SELECT EXISTS (SELECT * FROM merge WHERE id = 25 AND comments LIKE '% hateful %');"
cur20.execute(sq)
resulty = cur20.fetchone()
resultFinaly = int(resulty[0])
cur20.close()
conn20.close()
if resultFinaly == 1:
finalCount = finalCount - 1
#adds final sentiment number to corresponding database row
conn = sqlite3.connect("test.db")
cur = conn.cursor()
cur.execute("UPDATE merge SET sent = ? WHERE id = 25;", [finalCount])
conn.commit()
cur.close()
conn.close()
``` |
{
"source": "jmppmj/thesis_recurrent_neural_nets",
"score": 3
} |
#### File: Thesis/RNNs/to_create_2012.py
```python
import pandas as pd
import drms #https://pypi.org/project/drms/
#compile :::2012::: feature dataframe
#multiple queries needed due to record return limit
def get_2012_Features():
h = drms.Client()
k = h.query('hmi.sharp_720s[][2012.01.01_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = k
k = h.query('hmi.sharp_720s[][2012.01.08_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.01.15_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.01.22_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.01.29_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.02.05_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.02.12_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.02.19_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.02.26_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.03.04_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.03.11_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.03.18_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.03.25_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.04.01_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.04.08_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.04.15_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.04.22_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.04.29_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.05.06_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.05.13_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.05.20_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.05.27_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.06.03_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.06.10_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.06.17_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.06.24_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.07.01_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.07.08_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.07.15_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.07.22_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.07.29_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.08.05_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.08.12_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.08.19_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.08.26_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.09.02_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.09.09_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.09.16_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.09.23_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.09.30_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.10.07_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.10.14_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.10.21_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.10.28_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.11.04_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.11.11_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.11.18_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.11.25_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.12.02_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.12.09_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.12.16_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.12.23_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2012.12.30_TAI/2d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
f_dataframe.to_csv('create_2012_features.csv')
return()
``` |
{
"source": "jmprdi/binja-division-deoptimization",
"score": 2
} |
#### File: jmprdi/binja-division-deoptimization/__init__.py
```python
from binaryninja.plugin import PluginCommand
from binaryninja.interaction import get_choice_input, show_message_box
from binaryninja import MessageBoxButtonSet, log
def register_commands():
from .deoptimization import (
annotate_operations_ending_at_address,
annotate_operations_in_function,
)
PluginCommand.register_for_address(
"Deoptimize Operations - Line",
"Uses z3 to deoptimize divisions and modulos ending at the specified line.",
action=annotate_operations_ending_at_address,
)
PluginCommand.register_for_function(
"Deoptimize Operations - Function",
"Uses z3 to deoptimize divisions and modulos through the current function.",
action=annotate_operations_in_function,
)
try:
import z3
register_commands()
except ImportError:
choice = show_message_box("Binja Deoptimizer - Error", "z3-solver is not installed in your current environment and is required to run the deoptimization plugin. Please install z3-solver and restart binaryninja.", MessageBoxButtonSet.OKButtonSet)
log.log_error("Binja Deoptimizer - z3-solver not installed, unable to run.")
```
#### File: jmprdi/binja-division-deoptimization/state.py
```python
from binaryninja import SSAVariable, BinaryView, Function
from z3 import BitVecRef, BitVec
from copy import copy
from .instructions import MLILInstructionExecutor
class State:
"""
State of the current execution
"""
def __init__(self, bv: BinaryView, function: Function):
self.bv = bv
self.function = function
def get_ssa_variable(self, variable: SSAVariable):
raise NotImplementedError
def set_ssa_variable(self, variable: SSAVariable, value: BitVecRef):
raise NotImplementedError
def get_ssa_memory_at(self, location: BitVecRef, ssa_index: BitVecRef):
raise NotImplementedError
class BacktrackingState(State):
"""
Backtracking state that can look up requested variables via the SSA variable definitions.
"""
def __init__(self, bv: BinaryView, function: Function, depth: int):
super().__init__(bv, function)
# TODO: Make variables an object that errors upon assigning the same value twice?
# NOTE: This variables object is shared by all states that are copies of this one.
self.variables = {}
self.depth = depth
# This might not be needed. The variables object may be useable... (oldest variable in it...?)
# NOTE: This potential_inputs object is shared by all states that are copies of this one.
self.potential_inputs = []
def get_ssa_variable(self, variable: SSAVariable):
"""
Look up SSA variable by executing the instruction in which it was defined
:variable: SSAVariable to look up.
"""
definition_instruction = self.function.mlil.ssa_form.get_ssa_var_definition(
variable
)
result = None
if definition_instruction and self.depth > 0:
MLILInstructionExecutor(self.bv, definition_instruction).execute(
self.next_state()
)
result = self.variables[variable]
else:
name = repr(variable)
size = variable.var.type.width * 8
result = BitVec(name, size)
self.potential_inputs.append(result)
self.variables[variable] = result
return result
def get_unconstrained_variable(self, name: str, size_bytes: int):
"""
Return an unconstrained BitVec
:name: Name of the bitvector
:size_bytes: Size of the bitvector in bytes
"""
size = size_bytes * 8
result = BitVec(name, size)
self.potential_inputs.append(result)
return result
def set_ssa_variable(self, variable: SSAVariable, value: BitVecRef):
"""
Set a SSA variable to a value
:variable: Variable to set
:value: Value to set the variable to
"""
self.variables[variable] = value
def get_ssa_memory_at(self, location: BitVecRef, ssa_index: BitVecRef):
"""
Read ssa memory. Currently only returns a bitvec.
:location: Location to read memory from
:ssa_index: SSA memory index
"""
# TODO: This can be much more better.
name = repr(location)
size = self.bv.arch.address_size * 8
result = BitVec(name, size)
self.potential_inputs.append(result)
return result
def next_state(self):
"""
Get the next state for a newly executed instruction
"""
state = BacktrackingState(self.bv, self.function, self.depth - 1)
state.variables = self.variables
state.potential_inputs = self.potential_inputs
return state
``` |
{
"source": "jmprdi/binja-shared-object-symbol-resolution",
"score": 2
} |
#### File: jmprdi/binja-shared-object-symbol-resolution/__init__.py
```python
from binaryninja.plugin import PluginCommand
from binaryninja.mainthread import execute_on_main_thread_and_wait
from binaryninja import BinaryViewType, SymbolType, interaction
from binaryninjaui import UIContext, DockHandler
import binaryninjaui
import subprocess
import os
# TODO: Custom list of libraries to look through
def display_block(bv, addr):
view = bv.view
# Navigate to location in view
result = bv.file.navigate(view, addr)
if result is False:
view = "Linear:" + view.split(":")[1]
result = bv.file.navigate(view, addr)
# Switch displayed view
UIContext.activeContext().navigateForBinaryView(bv, addr)
def get_all_binaryviews():
all_binaryviews = []
dock = DockHandler.getActiveDockHandler()
if not dock:
log_error("No dock handler. This should not happen.")
return
viewFrame = dock.getViewFrame()
if not viewFrame:
log_error("No open binary")
return
stackedViewFrames = viewFrame.parent() # QStackedWidget
for i in range(stackedViewFrames.count()):
viewFrame = stackedViewFrames.widget(i)
if isinstance(viewFrame, binaryninjaui.ViewFrame):
# New tab is not a ViewFrame
viewInterface = viewFrame.getCurrentViewInterface()
binaryview = viewInterface.getData()
all_binaryviews.append(binaryview)
return all_binaryviews
def open_file_tab(filename: str):
# TODO: Save libc analysis? (Renaming symbols might cause some issues...)
execute_on_main_thread_and_wait(
lambda: UIContext.allContexts()[0].openFilename(filename)
)
def get_linked_libraries(bv):
stdout = subprocess.check_output("ldd {}".format(bv.file.filename), shell=True)
a = stdout.split(b" ")
libraries = []
for line in a:
if line.startswith(b"/"):
libraries.append(os.path.realpath(str(line, "utf8")))
return libraries
def resolve_imports(bv, address):
library_bvs = []
libraries = get_linked_libraries(bv)
needed_libraries = libraries.copy()
all_bvs = get_all_binaryviews()
for potential_bv in all_bvs:
if potential_bv.file.filename in libraries:
try:
needed_libraries.remove(potential_bv.file.filename)
except ValueError:
pass
for library in needed_libraries:
open_file_tab(library)
# TODO: Wait for symbols to be resolved, or does this already happen
all_bvs = get_all_binaryviews()
for potential_bv in all_bvs:
if potential_bv.file.filename in libraries:
library_bvs.append(potential_bv)
# TODO: Get currently selected symbol instead of checking the address
symbols = bv.get_symbols_of_type(SymbolType.ExternalSymbol)
external_symbol = False
for symbol in symbols:
if symbol.address == address:
for library_bv in library_bvs:
for library_symbol in library_bv.get_symbols_by_name(symbol.name):
if (
library_symbol.auto
): # Ensure that renamed symbols are not counted
display_block(library_bv, library_symbol.address)
return
interaction.show_message_box(
"Shared Object Symbol Resolution",
"Selected symbol not found in shared libraries: {}".format(library_bvs),
)
return
interaction.show_message_box(
"Shared Object Symbol Resolution", "Address not an external symbol."
)
def is_valid(bv, address):
return bv.view_type == "ELF"
def register_commands():
"""
Register commands
"""
PluginCommand.register_for_address(
"Resolve Shared Library Import",
"Resolves an import from a shared library, and jumps to its definition.",
action=resolve_imports,
is_valid=is_valid,
)
register_commands()
``` |
{
"source": "jmp/react-native-bundle-extractor",
"score": 3
} |
#### File: react-native-bundle-extractor/extractor/apk.py
```python
import zipfile
from .decorators import log
from .exceptions import FileNotFoundInAPKError
MANIFEST_FILENAME = "AndroidManifest.xml"
CLASSES_FILENAME = "classes.dex"
@log("Extracting file")
def extract_file(zip_path, in_path, out_path):
try:
with zipfile.ZipFile(zip_path) as z:
data = z.read(in_path)
with open(out_path, "wb") as f:
f.write(data)
except KeyError:
raise FileNotFoundInAPKError(f'Bundle file "{in_path}" not found')
@log("Checking if APK exists")
def is_apk(filename):
def zip_contains(zip_file, name):
return any(x == name for x in zip_file.namelist())
if not zipfile.is_zipfile(filename):
return False
with zipfile.ZipFile(filename, "r") as z:
return zip_contains(z, CLASSES_FILENAME) and zip_contains(z, MANIFEST_FILENAME)
```
#### File: react-native-bundle-extractor/extractor/decorators.py
```python
import functools
def log(message):
def decorator(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
try:
print(f"{message}...", end=" ")
result = function(*args, **kwargs)
print("OK")
return result
except Exception:
print("FAIL")
raise
return wrapper
return decorator
```
#### File: react-native-bundle-extractor/tests/test_apk.py
```python
import io
import zipfile
import pytest
from extractor.apk import CLASSES_FILENAME, MANIFEST_FILENAME, extract_file, is_apk
from extractor.exceptions import FileNotFoundInAPKError
def create_zip(filenames):
zip_io = io.BytesIO()
with zipfile.ZipFile(zip_io, mode="w") as zf:
for filename in filenames:
zf.writestr(filename, f"this is {filename}")
return zip_io.getvalue()
def test_extract_file_path_exists(tmp_path):
txt_file_path = "some/directory/test.txt"
zip_path = tmp_path / "test_extract.zip"
zip_path.write_bytes(create_zip([txt_file_path]))
out_path = tmp_path / "test.txt"
extract_file(zip_path, txt_file_path, out_path)
with out_path.open("rt") as f:
assert f.read() == f"this is {txt_file_path}"
def test_extract_file_path_does_not_exist(tmp_path):
zip_path = tmp_path / "test_extract.zip"
zip_path.write_bytes(create_zip([]))
with pytest.raises(FileNotFoundInAPKError):
extract_file(zip_path, "this/does/not/exist", tmp_path / "tmp.txt")
def test_is_apk_succeeds_with_manifest_and_classes(tmp_path):
apk_path = tmp_path / "with_manifest_and_classes.apk"
apk_path.write_bytes(create_zip([MANIFEST_FILENAME, CLASSES_FILENAME]))
assert is_apk(apk_path)
def test_is_apk_fails_with_manifest_only(tmp_path):
apk_path = tmp_path / "with_manifest.apk"
apk_path.write_bytes(create_zip([MANIFEST_FILENAME]))
assert not is_apk(apk_path)
def test_is_apk_fails_with_classes_only(tmp_path):
apk_path = tmp_path / "with_classes.apk"
apk_path.write_bytes(create_zip([CLASSES_FILENAME]))
assert not is_apk(apk_path)
def test_is_apk_fails_without_manifest_or_classes(tmp_path):
apk_path = tmp_path / "without_manifest_or_classes.apk"
apk_path.write_bytes(create_zip([]))
assert not is_apk(apk_path)
def test_is_apk_fails_with_non_zip_file(tmp_path):
non_zip_path = tmp_path / "invalid_zip.zip"
non_zip_path.write_text("This is a test.")
assert not is_apk(non_zip_path)
def test_is_apk_fails_with_non_existent_file(tmp_path):
assert not is_apk(tmp_path / "this_file_does_not_exist")
``` |
{
"source": "jmprkables/webapp",
"score": 3
} |
#### File: jmprkables/webapp/server.py
```python
from flask import Flask, request
import rethinkdb as r
import time
from datetime import datetime
import json
app = Flask(__name__)
conn = r.connect("192.168.6.26", 28015)
conn.use('hackiiitd')
@app.route('/')
def hello_world():
return "Welcome to jmprkableserver"
@app.route('/fall', methods=['GET'])
def fall():
timezone = time.strftime("%z")
reql_tz = r.make_timezone(timezone[:3] + ":" + timezone[3:])
the_date = datetime.now(reql_tz)
timestamp = time.mktime(the_date.timetuple())
json_date = the_date.isoformat()
r.table('fall').run(conn) # refers to r.db('marvel').table('heroes')
data = request.args.get('fallen')
''''
dataDict = json.loads(data)
try:
fallen = dataDict["fallen"]
except:
return("Invalid data")
'''
r.table("fall").insert({
"fallen": data,
'from_object': the_date,
'from_epoch': r.epoch_time(timestamp),
'from_iso': r.iso8601(json_date)
}).run(conn)
return "insertion successful"
@app.route('/medicine', methods=['GET'])
def medicine():
timezone = time.strftime("%z")
reql_tz = r.make_timezone(timezone[:3] + ":" + timezone[3:])
the_date = datetime.now(reql_tz)
timestamp = time.mktime(the_date.timetuple())
json_date = the_date.isoformat()
conn = r.connect("192.168.6.26", 28015)
data = request.data
dataDict = json.loads(data)
try:
status = dataDict["status"]
except:
return("Invalid data")
r.table("fall").insert({
"status": status,
'from_object': the_date,
'from_epoch': r.epoch_time(timestamp),
'from_iso': r.iso8601(json_date)
}).run(conn)
@app.route('/door', methods=['GET'])
def door():
r.table('door').run(conn)
status = request.args.get('status')
r.table("door").insert({
"door_id": 1,
"status": status
}, conflict="replace");
return "insertion successful"
if __name__ == "__main__":
app.run(port=8085, debug=False, host="0.0.0.0")
``` |
{
"source": "jm-projects/AdventOfCode2021",
"score": 3
} |
#### File: jm-projects/AdventOfCode2021/day5.py
```python
import pandas as pd
import re
import numpy as np
data = pd.read_csv("data/day5.csv", header = None, sep='\n', engine='python')[0]
class Line():
def __init__(self, start_coords, end_coords):
self.start = start_coords
self.end = end_coords
if self.start[0] == self.end[0]:
self.straight = True
self.orientation = 'vert'
elif self.start[1] == self.end[1]:
self.straight = True
self.orientation = 'hori'
elif self.start[1] - self.end[1] == self.start[0] - self.end[0]:
self.orientation = 'd_up'
else:
self.orientation = 'd_down'
def return_points(l:Line, hmap):
if l.orientation == 'hori':
m = np.sort([l.start[0], l.end[0]])
for coord in [[i,l.start[1]] for i in range(m[0], m[1]+1)]:
hmap[coord[0], coord[1]] += 1
elif l.orientation =='vert':
m = np.sort([l.start[1], l.end[1]])
for coord in [[l.start[0], i] for i in range(m[0], m[1]+1)]:
hmap[coord[0], coord[1]] += 1
elif l.orientation == 'd_up':
m = np.sort([l.start[0], l.end[0]])
n = np.sort([l.start[1], l.end[1]])
for coord in [[m[0]+i,n[0]+i] for i in range(0, n[1]-n[0]+1)]:
hmap[coord[0], coord[1]] += 1
else:
m = np.sort([l.start[0], l.end[0]])
n = np.sort([l.start[1], l.end[1]])
for coord in [[m[0]+i,n[1]-i] for i in range(0, n[1]-n[0]+1)]:
hmap[coord[0], coord[1]] += 1
return hmap
inst = [re.split("\s\S\S\s", word) for word in data]
inst = [[re.split(",", word[0]), re.split(",", word[1])] for word in inst]
inst = [[[int(word[0][0]), int(word[0][1])], [int(word[1][0]), int(word[1][1])]] for word in inst]
lines = [Line(l[0], l[1]) for l in inst]
# Challenge 1
s_lines = [l for l in lines if (l.orientation == 'hori' or l.orientation == 'vert')]
hashmap = np.tile(0, (1000,1000))
for l in s_lines:
hashmap = return_points(l, hashmap)
print(np.sum(hashmap > 1))
# Challenge 2
hashmap = np.tile(0, (1000,1000))
for l in lines:
hashmap = return_points(l, hashmap)
print(np.sum(hashmap > 1))
``` |
{
"source": "JmpsWork/easiest-game",
"score": 4
} |
#### File: JmpsWork/easiest-game/shapes.py
```python
from base import Base
import pygame
class Circle(Base):
def __init__(self, coords: tuple, radius: int, color: tuple):
super().__init__()
self.x, self.y = coords
self.radius = radius
self.color = color
def draw(self, screen, offsets):
ox, oy = offsets
pygame.draw.circle(screen, self.color, (self.x - ox, self.y - oy), self.radius)
def set_color(self, color: tuple):
self.color = color
class Rectangle(Base):
def __init__(self, dimensions: tuple, color: tuple):
super().__init__()
self.x, self.width, self.y, self.height = dimensions[0], dimensions[1], dimensions[2], dimensions[3]
self.color = color
self.surf = pygame.Surface((self.width, self.height))
self.surf.fill(self.color)
self.collide = True
def draw(self, screen, offsets: tuple):
"""Gets called every frame to draw the image."""
ox, oy = offsets
screen.blit(self.surf, (self.x - ox, self.y - oy))
def set_color(self, color: tuple):
self.color = color
self.surf.fill(self.color)
def within(self, other):
within_x = self.x + self.width >= other.x and other.x + other.width >= self.x
within_y = self.y + self.height >= other.y and other.y + other.height >= self.y
return within_x and within_y
class Line(Base):
def __init__(self, first_point: tuple, destination_point: tuple, color: tuple=(0, 0, 0), width: int=5):
super().__init__()
self.start = first_point
self.dest = destination_point
self.color = color
self.width = width
self.hide = False
def draw(self, screen, offsets: tuple):
if not self.hide:
start = self.start[0] - offsets[0], self.start[1] - offsets[1]
end = self.dest[0] - offsets[0], self.dest[1] - offsets[1]
pygame.draw.line(screen, self.color, start, end, self.width)
def set_color(self, color: tuple):
self.color = color
def reset_points(self, p1: tuple, p2: tuple):
self.start = p1
self.dest = p2
class Text(Base):
"""Text. Used with UIElement."""
def __init__(self, coords: tuple, color: tuple, text: str, font, *, centered: bool=True, update=False):
super().__init__()
self.x, self.y = coords
self.color = color
self.text = text
self.centered = centered
self.font = font
self.update = update
self.hide = False
def __repr__(self):
return f'{self.text}'
def draw(self, screen, offsets):
if self.hide is False:
text = self.font.render(self.text, True, self.color)
x, y = self.x + offsets[0], self.y + offsets[1]
if self.centered:
center = text.get_rect(center=(x, y))
screen.blit(text, center)
else:
screen.blit(text, (x, y))
def set_text(self, text: str):
self.text = text
```
#### File: JmpsWork/easiest-game/sprite_loader.py
```python
import os
allowed_formats = ['jpg', 'png', 'bmp', 'gif']
os.chdir('.')
def get_images() -> dict:
all_images = {}
for root, dirs, files in os.walk('images'):
for file_name in files:
file_path = f'{root}/{file_name}'.replace('\\', '/')
key = '/'.join(file_path.split('/')[1:])
key, file_format = key.split('.')
if file_format in allowed_formats:
all_images[key] = file_path
print(f'Loaded {file_path} as {key}')
return all_images
``` |
{
"source": "jmptable/get-thingiverse-things",
"score": 3
} |
#### File: jmptable/get-thingiverse-things/get_thing.py
```python
import sys
import os
import json
from urlparse import urlparse
from bs4 import BeautifulSoup
import requests
def die_with_usage():
""" Print usage of program and then exit with error """
print 'Usage: {} <thing URL>'.format(sys.argv[0] or '')
sys.exit(1)
def download_thing_page(url):
""" GET the HTML content of a page or raise an Exception """
req = requests.get(url)
if req.status_code == 200:
return req.text
else:
raise Exception('Request for Thing page failed with status code {}'.format(req.status_code))
def find_thing_models(url):
""" Retrieve the links to 3D models on a Thing page """
thing_view_attr = 'data-thingiview-url'
thing_page = download_thing_page(url)
soup = BeautifulSoup(thing_page, 'html.parser')
thing_view_tags = soup.findAll(lambda tag: thing_view_attr in tag.attrs)
thing_view_urls = []
for thing_view_tag in thing_view_tags:
thing_view_urls += [thing_view_tag.attrs[thing_view_attr]]
unique_urls = list(set(thing_view_urls))
return filter(None, unique_urls)
def model_name(url):
""" Takes the URL of a Thingiverse threejs_json file and returns the name """
unique_id_len = 8
ext_len = 3 # Length of '.js'
parsed_url = urlparse(url)
filename = os.path.basename(parsed_url.path)
if len(filename) > (unique_id_len + ext_len):
return filename[8:-3]
else:
raise Exception('Model has unexpectedly short name "{}"'.format(filename))
def download_things(url, directory='./'):
""" Retrieve the 3D models linked from a Thing page """
if not os.path.isdir(directory):
raise Exception('Cannot download to nonexistent directory "{}"'.format(directory))
model_urls = find_thing_models(url)
for model_url in model_urls:
name = model_name(model_url)
req = requests.get(model_url)
if not req.status_code == 200:
raise Exception('Download of {} failed with status code {}'.format(name, req.status_code))
try:
model_json = req.json()
except ValueError as err:
print 'Failed to download {} because {}'.format(name, str(err))
model_path = os.path.join(directory, '{}.json'.format(name))
# TODO handle case of model with same name already existing
with open(model_path, 'w') as model_file:
model_file.write(json.dumps(model_json, indent=4))
def main():
""" Entrypoint """
if len(sys.argv) != 2:
die_with_usage()
url = sys.argv[1]
download_things(url, './models')
main()
``` |
{
"source": "jmptable/vc_mini_valve_controller_py",
"score": 2
} |
#### File: tests/hardware/test_fire.py
```python
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
from vc_mini_valve_controller import Microvalve
def test_fire():
"""Connects to VC Mini Valve Controller, sets some parameters, and then fires a test shot"""
valve_port = os.getenv('VALVE_PORT')
if valve_port is None:
raise Exception('Must set VALVE_PORT environment variable to the port path or name before running the test')
microvalve = Microvalve(valve_port)
microvalve.init()
# Setup test parameters
microvalve.load_parameters(0, 0)
microvalve.set_peak_time(400)
microvalve.set_open_time(1000)
microvalve.set_cycle_time(60000)
microvalve.set_peak_current(13)
microvalve.set_shot_count(100)
# Fire once
microvalve.single_shot(True, False)
if __name__ == '__main__':
test_fire()
```
#### File: vc_mini_valve_controller_py/vc_mini_valve_controller/__init__.py
```python
import serial
import time
ADDRESS_VALVE = 0
ADDRESS_MASTER = 8
class Microvalve:
def __init__(self, port_name, baud_rate=38400):
self.port = serial.Serial(port_name, baud_rate, timeout=1)
self.buffer = ''
self.current_address = 0
def read_line(self):
"""Blocking read of the next line received on the serial port"""
while True:
self.buffer += self.port.read().decode('utf8')
if '\n' in self.buffer:
lines = self.buffer.split('\n')
line = lines[0]
self.buffer = '\n'.join(lines[1:])
return line
def disconnect(self):
self.port.close()
def reset(self):
# Send ^R to reset and then escape to enter terminal mode
self.port.write(bytes([0x12, 0x1b]))
# Wait for welcome/mode message to be printed
while True:
if self.read_line().strip() == 'TERMINAL-Mode':
break
def init(self):
self.reset()
self.command('0*')
self.command('0n')
def command(self, cmd):
self.port.write(cmd.encode('utf8'))
# print('tx:', cmd)
reply = self.read_line()
# print('rx:', reply)
# TODO: validate reply
# if not reply.startswith('>'):
# raise Exception('Unexpected reply to command')
return reply
def set_address(self, address):
if address < 0 or address > 8:
raise Exception('Address out of range')
self.command(f'{address}*')
self.current_address = address
def get_address(self):
return int(self.command('='))
def set_plc_standard_mode(self):
if not self.current_address == ADDRESS_MASTER:
self.set_address(ADDRESS_MASTER)
self.command('00F')
def set_plc_last_state_restore_mode(self):
if not self.current_address == ADDRESS_MASTER:
self.set_address(ADDRESS_MASTER)
self.command('01F')
def set_param_selection_type(self, sel_type):
# TODO
raise Exception('Unimplemented')
def get_param_selection_type(self):
# TODO
raise Exception('Unimplemented')
def set_baud_rate(self, baud_rate):
if not self.current_address == ADDRESS_MASTER:
self.set_address(ADDRESS_MASTER)
if baud_rate == 9600:
self.command('0%')
elif baud_rate == 19200:
self.command('1%')
elif baud_rate == 38400:
self.command('2%')
elif baud_rate == 57600:
self.command('3%')
elif baud_rate == 115200:
self.command('4%')
elif baud_rate == 230400:
self.command('5%')
else:
raise Exception('Cannot set specified baud rate')
def set_shot_trigger_mode(self):
"""Sets single shot trigger mode.
The valve is opened according to the shot settings at a positive edge
of the external hardware input"""
if not self.current_address == ADDRESS_VALVE:
self.set_address(ADDRESS_VALVE)
self.command('X')
def set_continuous_trigger_mode(self):
"""Sets continuous trigger mode.
The valve is opened as long as the hardware input is high"""
if not self.current_address == ADDRESS_VALVE:
self.set_address(ADDRESS_VALVE)
self.command('T')
def set_series_trigger_mode(self):
"""Sets series trigger mode.
The valve is opened according to the shot settings, including the number
of shots configured via the G parameter, at a positive edge on the
external hardware input"""
if not self.current_address == ADDRESS_VALVE:
self.set_address(ADDRESS_VALVE)
self.command('P')
def set_endless_trigger_mode(self):
"""Sets series trigger mode.
Valve shots are fired according to the configured shot settings as long
as the external hardware input is high"""
if not self.current_address == ADDRESS_VALVE:
self.set_address(ADDRESS_VALVE)
self.command('L')
def stop_triggering(self):
if not self.current_address == ADDRESS_VALVE:
self.set_address(ADDRESS_VALVE)
self.command('S')
def single_shot(self, v1, v2):
if not self.current_address == ADDRESS_VALVE:
self.set_address(ADDRESS_VALVE)
if v1 and v2:
self.command('V')
else:
if v1:
self.command('Y')
else:
self.command('Z')
def series_shot(self, v1, v2):
if not self.current_address == ADDRESS_VALVE:
self.set_address(ADDRESS_VALVE)
if v1 and v2:
self.command('U')
else:
if v1:
self.command('Q')
else:
self.command('R')
def series_shot_stop(self):
if not self.current_address == ADDRESS_VALVE:
self.set_address(ADDRESS_VALVE)
self.command('S')
def load_parameters(self, valve, set_index):
assert valve == 0 or valve == 1
assert 0 <= set_index <= 3
if not self.current_address == ADDRESS_VALVE:
self.set_address(ADDRESS_VALVE)
if valve == 0:
self.command(f'{set_index}n')
else:
self.command(f'{set_index + 4}n')
def store_parameters(self, valve, set_index):
assert valve == 0 or valve == 1
if not self.current_address == ADDRESS_VALVE:
self.set_address(ADDRESS_VALVE)
if valve == 0:
self.command(f'{set_index}N')
else:
self.command(f'{set_index + 4}N')
def set_peak_time(self, value):
assert 10 <= value <= 65535
if not self.current_address == ADDRESS_VALVE:
self.set_address(ADDRESS_VALVE)
self.command(f'{int(value)}A')
def set_open_time(self, value):
assert 10 <= value <= 9999999
if not self.current_address == ADDRESS_VALVE:
self.set_address(ADDRESS_VALVE)
self.command(f'{int(value)}B')
def set_cycle_time(self, value):
assert 10 <= value <= 9999999
if not self.current_address == ADDRESS_VALVE:
self.set_address(ADDRESS_VALVE)
self.command(f'{int(value)}C')
def set_peak_current(self, value):
assert 0 <= value <= 15
if not self.current_address == ADDRESS_VALVE:
self.set_address(ADDRESS_VALVE)
# TODO: input current instead of index
# Ip = 450mA + (D * 50mA)
self.command(f'{int(value)}D')
def set_shot_count(self, value):
assert 0 <= value <= 65535
if not self.current_address == ADDRESS_VALVE:
self.set_address(ADDRESS_VALVE)
self.command(f'{int(value)}G')
def zero_shot_counter(self, valve):
raise Exception('Unimplemented')
# TODO: implement parameter reading
``` |
{
"source": "jmptbl/puka",
"score": 3
} |
#### File: puka/tests/base.py
```python
import functools
import logging
import os
import puka
import random
import unittest
class TestCase(unittest.TestCase):
def setUp(self):
self.name = 'test%s' % (random.random(),)
self.name1 = 'test%s' % (random.random(),)
self.name2 = 'test%s' % (random.random(),)
self.msg = '%s' % (random.random(),)
self.msg1 = '%s' % (random.random(),)
self.msg2 = '%s' % (random.random(),)
self.amqp_url = os.getenv('AMQP_URL', 'amqp:///')
self.client = None
self._promise_cleanup = []
def tearDown(self):
self.run_cleanup_promises()
def cleanup_promise(self, fn, *args, **kwargs):
self._promise_cleanup.append((fn, args, kwargs))
def run_cleanup_promises(self, client=None):
if not client:
client = self.client
if not client:
return
promises, self._promise_cleanup = self._promise_cleanup, []
for cb, args, kwargs in reversed(promises):
try:
client.wait(cb(*args, **kwargs))
except Exception as e:
logging.error("failed to run client cleanup callback %r: %s", cb, e)
def connect(method):
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
self.client = client = puka.Client(self.amqp_url)
promise = client.connect()
client.wait(promise)
r = None
try:
r = method(self, client, *args, **kwargs)
finally:
self.run_cleanup_promises()
promise = client.close()
client.wait(promise)
self.client = None
return r
return wrapper
```
#### File: puka/tests/tests.py
```python
from __future__ import print_function
import sys
import glob
import time
import os, os.path
import doctest
import unittest
try:
import coverage
except ImportError:
print("No 'coverage' module found. Try:")
print(" sudo apt-get install python-coverage")
sys.exit(1)
import logging
FORMAT_CONS = '%(asctime)s %(name)-12s %(levelname)8s\t%(message)s'
logging.basicConfig(level=logging.DEBUG, format=FORMAT_CONS)
VERBOSE=False
def my_import(name):
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
def main_coverage(TESTS):
TEST_NAMES = [f.rpartition('.')[0] for f in glob.glob("test_*.py")]
TEST_NAMES.sort()
pwd=os.getcwd()
os.chdir(sys.argv[1])
BAD_MODULES=(sys.argv[3] if len(sys.argv) >= 4 else '').split(',')
MODULE_NAMES=[sys.argv[2] + '.' +f[0:-3] for f in glob.glob("*.py") if f not in BAD_MODULES]
MODULE_NAMES.sort()
os.chdir(pwd)
modulenames = MODULE_NAMES
try:
cov = coverage.coverage(branch=True)
except TypeError:
cov = coverage
cov.erase()
cov.exclude('#pragma[: ]+[nN][oO] [cC][oO][vV][eE][rR]')
cov.start()
modules = []
for modulename in modulenames:
mod = my_import(modulename)
modules.append(mod)
if 'unittest' in TESTS:
print("***** Unittest *****")
test_args = {'verbosity': 1}
suite = unittest.TestLoader().loadTestsFromNames(TEST_NAMES)
unittest.TextTestRunner(**test_args).run(suite)
if 'doctest' in TESTS:
t0 = time.time()
print("\n***** Doctest *****")
for mod in modules:
doctest.testmod(mod, verbose=VERBOSE)
td = time.time() - t0
print(" Tests took %.3f seconds" % (td, ))
print("\n***** Coverage Python *****")
cov.stop()
cov.report(modules, ignore_errors=1, show_missing=1)
#cov.html_report(morfs=modules, directory='/tmp')
cov.erase()
if __name__ == '__main__':
main_coverage(['unittest', 'doctest'])
def run_unittests(g):
test_args = {'verbosity': 1}
for t in [t for t in g.keys()
if (t.startswith('Test') and issubclass(g[t], unittest.TestCase)) ]:
suite = unittest.TestLoader().loadTestsFromTestCase(g[t])
unittest.TextTestRunner(**test_args).run(suite)
``` |
{
"source": "jmpu/NoiseScope",
"score": 2
} |
#### File: NoiseScope/CSD-SVM/util_CSD_svm.py
```python
import matlab.engine
import numpy as np
from sklearn.svm import OneClassSVM
import os
import glob
from sklearn.model_selection import GridSearchCV
import joblib
from sklearn import metrics
import argparse
import logging
import random
random.seed(6666)
eng = matlab.engine.start_matlab()
def get_color_feat(img_paths):
'''
Extract color feature for all the images
:param img_paths: image paths
:return: a list of feature
'''
all_feats = []
for path in img_paths:
if os.path.exists(path):
feat = eng.gan_img_detection_fea(path)
all_feats.append(feat)
else:
logging.info('this file does not exist. HELP!')
logging.info("The length of all the feat", len(all_feats))
return all_feats
def train_CSD_SVM(args):
'''
Train a SVM outlier detector using real images
:param real_img_dir: A directory contains real images
:param svm_model_path: A path for saving trained model
:return:
'''
train_paths = list(map(lambda x: args.real_img_dir + x, os.listdir(args.real_img_dir)))
logging.info("Training file paths: {}".format(len(train_paths)))
train_feat = get_color_feat(train_paths)
train_feat = np.squeeze(train_feat, axis=1)
y_true = [1] * np.shape(train_feat)[0]
# train SVM
parameters = {'gamma': [0.001, 0.0001, 1 / 588, 0.01, 0.1]}
svm_model = OneClassSVM(nu=0.1, kernel="rbf")
clf = GridSearchCV(svm_model, parameters, cv=5, scoring='accuracy')
clf.fit(train_feat, y_true)
logging.info(clf.best_estimator_.get_params())
# save the model
joblib.dump(clf.best_estimator_, args.svm_model_path)
logging.info('model saved')
def test_CSD_SVM(args):
'''
Test the trained CSD-SVM model
:param real_img_dir: Directory of real images
:param fake_img_dir: Directory of fake images
:param svm_model_path: Trained model
:return: Detection performance
'''
real_paths = list(map(lambda x: args.real_img_dir + x, random.sample(os.listdir(args.real_img_dir), args.num_real)))
real_feat = get_color_feat(real_paths)
fake_paths = list(map(lambda x: args.fake_img_dir + x, random.sample(os.listdir(args.fake_img_dir), args.num_fake)))
fake_feat = get_color_feat(fake_paths)
test_feat = real_feat + fake_feat
test_label = [1] * len(real_feat) + [-1] * len(fake_feat)
test_feat = np.squeeze(test_feat, axis=1)
svm_model = joblib.load(args.svm_model_path)
pred_labels = svm_model.predict(test_feat)
metric_scores = {
"accuracy": metrics.accuracy_score(test_label, pred_labels),
"precision": metrics.precision_score(test_label, pred_labels),
"recall": metrics.recall_score(test_label, pred_labels),
"f1_score": metrics.f1_score(test_label, pred_labels)
}
logging.info("F1 score", metric_scores['f1_score'])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--train_img_dir', default='/rdata/jiameng/DeepLens/alldata/original_imgs/flickr_winter/refer/', help='path to training image dir, which includes real images only')
parser.add_argument('--real_img_dir', default='/rdata/jiameng/DeepLens/alldata/original_imgs/flickr_winter/real/', help='path to real image dir for testing')
parser.add_argument('--fake_img_dir', default='/rdata/jiameng/DeepLens/alldata/original_imgs/CycleGAN_winter/fake/', help='path to fake image dir for testing')
parser.add_argument('--num_real', default=500, help='The number of real images in the test set')
parser.add_argument('--num_fake', default=500, help='The number of fake images in the test set')
parser.add_argument('--svm_model_path', default='./winter_example.pkl', help='The path that trained SVM model will be saved')
args = parser.parse_args()
logging.basicConfig(filename='./csd_svm.log', filemode='w', level=logging.INFO, format='%(levelname)s:%(message)s')
train_CSD_SVM(args)
test_CSD_SVM(args)
```
#### File: jmpu/NoiseScope/noisescope_clustering.py
```python
import os
import numpy as np
import random
from sklearn.utils import shuffle
import scipy.io
import matlab.engine
import time
import glob
import argparse
from utils_noisescope import *
import logging
import joblib
random.seed(6666)
eng = matlab.engine.start_matlab()
def extract_fingerpint_via_clustering(all_res_paths, ground_truth_label,
thre_pce,
cluster_list_with_image_idx,
iter_round, img_dim, outlier_model_path, result_dir,
reduce_matrix=None, merged_cluster=None):
'''
Fingerprint Step 2 + 3.
:param all_res_paths: noise residuals of the test set.
:param ground_truth_label: gound truth labels for the test set.
:param thre_pce: T merge calibrated using function 'correlation_between_real_fps' in pipeline.py
:param cluster_list_with_image_idx: A list of residual clusters. Each cluster is a tuple, which includes residual indexes.
:param iter_round: clustering/merging iteration round
:param img_dim: image/residual dimension
:param outlier_model_path: fingerprint outlier detector
:param result_dir: save log, middle products like .mat files
:param logfile: log file
:param reduce_matrix: previous pair-wise correlation reused for this round of merging iteration
:param merged_cluster: Newly merged clusters from the last merging step
:return: ret_fake_cluster_list: A list of fake (model) clusters flagged; ret_cluster_list_with_image_idx: residual indexs in the flagged clusters
'''
logging.info("++++++++++PERFORM THE NEXT MERGING ITERATION++++++++++++\n")
logging.info('Currently, there are {} clusters\n'.format(len(
cluster_list_with_image_idx))) # cluster_list_with_image_idx show the latest cluster distribution and clusters
for cluster_with_img_idx in cluster_list_with_image_idx:
if len(cluster_with_img_idx) > 10:
fake_purity = compute_cluster_fake_purity(cluster_with_img_idx, ground_truth_label)
logging.info(
'This cluster has {} images with a fake purity: {} \n'.format(len(cluster_with_img_idx), fake_purity))
num_cluster = len(cluster_list_with_image_idx)
### calculate PCE matrix ###
if iter_round > 0:
pce_matrix = np.full((num_cluster, num_cluster), 0, dtype=float)
pce_matrix[0:num_cluster - len(merged_cluster), 0: num_cluster - len(merged_cluster)] = reduce_matrix # 98, 98
eng.get_pce_matrix_iterate(all_res_paths, cluster_list_with_image_idx, len(merged_cluster), img_dim,
result_dir,
iter_round)
new_pce_matrix = scipy.io.loadmat(result_dir + '{}_partial.mat'.format(iter_round))
pce_matrix[num_cluster - len(merged_cluster):, :] = np.array(new_pce_matrix['matrix'])
else:
t1 = time.time()
eng.get_pce_matrix_noise_average(all_res_paths, cluster_list_with_image_idx, result_dir, iter_round,
img_dim)
t2 = time.time()
logging.info('The first iteration takes {} seconds. \n'.format(t2 - t1))
pce_matrix = scipy.io.loadmat(result_dir + '{}.mat'.format(iter_round))
pce_matrix = np.array(pce_matrix['matrix'])
large_pce_pos_array = np.where(pce_matrix > thre_pce)
x_axis_idx = large_pce_pos_array[0]
y_axis_idx = large_pce_pos_array[1]
logging.info("{} pairs in the matrix is larger than the threshold. \n".format(len(list(x_axis_idx))))
# return cases for early stopping
sorted_cluster_list_with_image_idx = sorted(cluster_list_with_image_idx, key=len, reverse=True)
# if len(sorted_cluster_list_with_image_idx[0]) > 200: # if we have a big cluster >200, we test it
if len(sorted_cluster_list_with_image_idx[
0]) > 150: # if we have a big cluster > 150, we start the early stopping strategy
feed_list = []
for idx_tuple in sorted_cluster_list_with_image_idx:
if len(idx_tuple) > 50: # pick cluster size [50, X)
feed_list.append(idx_tuple)
else:
break
# return feed_list, tuple_tree_dict, cluster_list_with_image_idx # for skipping
fake_cluster_list, fake_flagged = fingerprint_classifier(feed_list, all_res_paths,
outlier_model_path, img_dim)
if fake_flagged:
logging.info(
"We detected suspicious fake clusters, NoiseScope will perform fingerprint classifier next.")
return fake_cluster_list, cluster_list_with_image_idx
else:
logging.info(
"Available candidate clusters are not recognized outliers, NoiseScope continues to do clustering.")
# another return case, when there is no more high correlated pairs
if len(list(x_axis_idx)) == 0:
fake_cluster_list, fake_flagged = fingerprint_classifier(sorted_cluster_list_with_image_idx, all_res_paths,
outlier_model_path, img_dim)
if fake_flagged:
return fake_cluster_list, cluster_list_with_image_idx
else:
logging.info("No fake clusters are flagged, NoiseScope will stop the detection.")
return fake_cluster_list, cluster_list_with_image_idx
# confirm how many pairs can be merged
idx_pairs = list(zip(x_axis_idx, y_axis_idx)) # idx_pairs includes all pair positions
idx_pairs_with_pce = list(map(lambda x: x + (pce_matrix[x[0], x[1]],), idx_pairs))
sorted_idx_pairs_with_pce = sorted(idx_pairs_with_pce, key=lambda x: x[2], reverse=True)
idx_pair_for_merge = []
delete_idxs = []
while len(sorted_idx_pairs_with_pce) > 0: # which means still having pairs to merge
x_idx_max_pce = sorted_idx_pairs_with_pce[0][0]
y_idx_max_pce = sorted_idx_pairs_with_pce[0][1]
assert pce_matrix[x_idx_max_pce][y_idx_max_pce] == sorted_idx_pairs_with_pce[0][2]
idx_pair_for_merge.append((x_idx_max_pce, y_idx_max_pce))
logging.info(
'Maximum pce value from current idx pairs is: {}\n'.format(pce_matrix[x_idx_max_pce][y_idx_max_pce]))
delete_idxs.append(x_idx_max_pce)
delete_idxs.append(y_idx_max_pce)
sorted_idx_pairs_with_pce[:] = [idx_pair for idx_pair in sorted_idx_pairs_with_pce if
(x_idx_max_pce not in idx_pair) and (y_idx_max_pce not in idx_pair)]
### merging rules ###
merge_clusters_set = set([]) # contain merged tuples that should be added
delete_clusters_set = set([]) # contain tuples that need to be deleted
for idx_pair in idx_pair_for_merge:
# record all the clusters need to be deleted from cluster_list_with_image_idx
delete_clusters_set.add(cluster_list_with_image_idx[idx_pair[0]])
delete_clusters_set.add(cluster_list_with_image_idx[idx_pair[1]])
# record all the merged cluster need to be added into cluster_list_with_image_idx
merge_tuple = cluster_list_with_image_idx[idx_pair[0]] + cluster_list_with_image_idx[idx_pair[1]]
merge_clusters_set.add(merge_tuple)
# here we remove clusters in delete_clusters_set
for delete_tuple in delete_clusters_set:
cluster_list_with_image_idx.remove(delete_tuple)
# here we add merged clusters in all_merge_set
for merge_tuple in merge_clusters_set:
cluster_list_with_image_idx.append(merge_tuple)
pce_values_for_next_iter = []
for i in range(0, num_cluster):
if i in delete_idxs:
continue
for j in range(0, num_cluster):
if j in delete_idxs:
continue
pce_values_for_next_iter.append(pce_matrix[i, j])
pce_matrix = np.reshape(pce_values_for_next_iter, (num_cluster - len(delete_idxs), num_cluster - len(delete_idxs)))
ret_fake_cluster_list, ret_cluster_list_with_image_idx = extract_fingerpint_via_clustering(all_res_paths,
ground_truth_label,
thre_pce,
cluster_list_with_image_idx,
iter_round + 1,
img_dim,
outlier_model_path,
result_dir,
pce_matrix,
merge_clusters_set)
return ret_fake_cluster_list, ret_cluster_list_with_image_idx
def fake_image_detector(fake_cluster_list, test_res_paths, ground_truth, img_dim, refer_dir):
'''
NoiseScope step 4.
:param fake_cluster_list: A list of fake clusters. Each cluster includes all the residual indexes.
:param test_res_paths: noise residual paths for test set.
:param ground_truth: Ground truth label for the test residuals.
:param img_dim: image/residual size
:param logfile: log file
:param refer_dir: reference dir
:return: detection F1 score
'''
if len(fake_cluster_list) == 0:
logging.info('No model fingerprint found! The detection will stop here! \n')
return
refer_res_paths = glob.glob(refer_dir + '*.mat')
test_max_pce = []
refer_max_pce = []
all_test_pce = []
all_refer_pce = []
cluster_stat = []
single_cluster_f1_scores = []
for i, fake_cluster in enumerate(fake_cluster_list):
logging.info('This fake cluster includes residual id: {}. \n'.format(fake_cluster))
# adjust the index, because in matlab, index starts from 1.
fake_cluster_idx_minus = list(map(lambda x: x - 1, fake_cluster))
fake_pos = np.where(np.array(ground_truth) == 1)
fake_purity = len(set(fake_pos[0]).intersection(set(fake_cluster_idx_minus))) / len(fake_cluster)
cluster_stat.append((len(fake_cluster), fake_purity))
logging.info('This cluster has a fake purity of {}. \n'.format(fake_purity))
logging.info('This cluster has image samples{} \n'.format(len(fake_cluster)))
model_fingerprint = compute_fp_from_cluster(fake_cluster, test_res_paths, img_dim)
logging.info('The shape of fake fingerprint: {}. \n'.format(np.shape(model_fingerprint)))
test_pce_corr = compute_pce_with_fingerprint(test_res_paths, model_fingerprint)
refer_pce_corr = compute_pce_with_fingerprint(refer_res_paths, model_fingerprint)
all_test_pce.append(test_pce_corr[0])
all_refer_pce.append(refer_pce_corr[0])
if i == 0:
test_max_pce = test_pce_corr[0]
refer_max_pce = refer_pce_corr[0]
else:
test_max_pce = list(map(lambda x, y: max(x, y), test_max_pce, test_pce_corr[0]))
refer_max_pce = list(map(lambda x, y: max(x, y), refer_max_pce, refer_pce_corr[0]))
calibrate_thres = np.percentile(refer_max_pce, 99.5)
logging.info('Calibrated PCE threshold for fake image detector, {} \n'.format(calibrate_thres))
label = list(map(lambda x: 1 if x > calibrate_thres else 0, test_max_pce))
conf_matrix, metric_scores = compute_confusion_matrix(ground_truth, label)
logging.info("Clustered with PCE threshold: {}. \n".format(calibrate_thres))
logging.info("TN, FP, FN, TP: {} \n".format(conf_matrix))
logging.info("+++++++++++++++++++++++++++++++ \n")
logging.info("Accuracy: {0:.2f}% \n".format(metric_scores["accuracy"] * 100))
logging.info("Precision: {0:.2f}% \n".format(metric_scores["precision"] * 100))
logging.info("Recall: {0:.2f}% \n".format(metric_scores["recall"] * 100))
logging.info("F1 score: {0:.2f}% \n".format(metric_scores["f1_score"] * 100))
final_f1 = metric_scores["f1_score"]
for test_pce in all_test_pce:
label = list(map(lambda x: 1 if x > calibrate_thres else 0, test_pce))
conf_matrix, metric_scores = compute_confusion_matrix(ground_truth, label)
logging.info("========Single cluster performance=========\n")
logging.info("TN, FP, FN, TP: {} \n".format(conf_matrix))
logging.info("+++++++++++++++++++++++++++++++ \n")
logging.info("Accuracy: {0:.2f}% \n".format(metric_scores["accuracy"] * 100))
logging.info("Precision: {0:.2f}% \n".format(metric_scores["precision"] * 100))
logging.info("Recall: {0:.2f}% \n".format(metric_scores["recall"] * 100))
logging.info("F1 score: {0:.2f}% \n".format(metric_scores["f1_score"] * 100))
single_cluster_f1_scores.append(metric_scores["f1_score"])
return final_f1
def fingerprint_classifier(cluster_list_with_image_idx, res_list, outlier_model_path, img_dim):
'''
NoiseScope Step 3: fingerprint classifier
:param cluster_list_with_image_idx: A list of residual clusters. Each cluster is a tuple, which includes residual indexes.
:param res_list: Noise residuals of test set.
:param outlier_model_path: Fingerprint outlier detector, which will flag model fingerprints as outliers
:param img_dim: image/residual size
:param logfile: log file
:return: a list of fake (model) clusters
'''
fake_cluster_list = []
fake_flagged = False
detection_model = joblib.load(outlier_model_path)
# cluster_list_with_image_idx = sorted(cluster_list_with_image_idx, key=len, reverse=True)
for cluster_with_img_idx in cluster_list_with_image_idx:
if len(cluster_with_img_idx) > 50: # find the fake set whose size is larger than 50
sampled_idx = random.sample(cluster_with_img_idx, 50) # sample cluster_list_with_image_idx
cluster_fp = compute_fp_from_cluster(sampled_idx, res_list, img_dim)
clipped_fp = clip_fp(cluster_fp)
haralick_feat = extract_haralick_features(clipped_fp)
pred_label = detection_model.predict(np.array(haralick_feat).reshape(1, -1))
if pred_label == -1:
fake_cluster_list.append(cluster_with_img_idx)
logging.info("One fake cluster is flagged, with {} images.\n".format(len(cluster_with_img_idx)))
else:
break
logging.info("{} fake clusters have been flagged.".format(len(fake_cluster_list)))
if len(fake_cluster_list) > 0: fake_flagged = True
return fake_cluster_list, fake_flagged
def detection_NoiseScope(args):
if args.result_dir[-1] != '/': args.result_dir = args.result_dir + '/'
if not os.path.exists(args.result_dir): os.mkdir(args.result_dir)
logging.basicConfig(filename='{}detection.log'.format(args.result_dir), filemode='w', level=logging.DEBUG, format='%(levelname)s:%(message)s')
real_res_list = random.sample(glob.glob(args.real_res_dir + '/*.mat'), args.num_real)
fake_res_list = random.sample(glob.glob(args.fake_res_dir + '/*.mat'), args.num_fake)
all_res_paths = real_res_list + fake_res_list
ground_truth_label = [0] * len(real_res_list) + [1] * len(fake_res_list)
shuffle_data = shuffle(list(zip(ground_truth_label, all_res_paths)))
[ground_truth_label_, all_res_paths_] = zip(*shuffle_data)
# logfile = open("{}logfile.txt".format(args.result_dir), "w")
all_res_paths = list(all_res_paths_)
ground_truth_label = ground_truth_label_
cluster_list_with_image_idx = [tuple([i]) for i in range(1, len(all_res_paths) + 1)]
############ find fake indexs and compute the fake fingerprint ################
logging.info('Merging threshold: {}\n'.format(args.pce_thre))
fake_cluster_list, cluster_list_with_image_idx = extract_fingerpint_via_clustering(all_res_paths,
ground_truth_label,
args.pce_thre,
cluster_list_with_image_idx,
0,
args.img_dim,
args.outlier_model_path,
args.result_dir)
f1_score = fake_image_detector(fake_cluster_list, all_res_paths, ground_truth_label, args.img_dim,
args.refer_res_dir)
return f1_score
if __name__ == '__main__':
'''
We grab 'num_real' samples from 'real_res_dir' and 'num_fake' samples from 'fake_res_dir'
specify the 'outlier_model_path' trained from prep_steps.py
specify 'pce_thre' calibrated from prep_steps.py
'''
parser = argparse.ArgumentParser()
parser.add_argument('--real_res_dir', default='', help='the path to REAL noise residual dir')
parser.add_argument('--fake_res_dir', default='', help='the path to FAKE noise residual dir')
parser.add_argument('--refer_res_dir', default='', help='the path to REFERENCE noise residual dir')
parser.add_argument('--num_real', type=int, help='The number of real images in the test set', default=500)
parser.add_argument('--num_fake', type=int, help='The number of fake images in the test set', default=500)
parser.add_argument('--img_dim', type=int, default=256, help='images should be in square shape.')
parser.add_argument('--outlier_model_path', default='', help='the path to pre-trained fingerprint outlier detector')
parser.add_argument('--result_dir', default='',
help='Specify the folder which saves log file and some matrix files produced in the middle')
parser.add_argument('--pce_thre', type=float, help='T merging threshold estimated')
args = parser.parse_args()
detection_NoiseScope(args)
```
#### File: jmpu/NoiseScope/utils_noisescope.py
```python
import os
import scipy
import numpy as np
import random
from sklearn import metrics
import math
import scipy.io
from sklearn.metrics import mean_squared_error
from sklearn.metrics import confusion_matrix
import matlab.engine
import glob
import mahotas as mt
import random
import argparse
random.seed(6666)
eng = matlab.engine.start_matlab()
def clip_fp(fp):
'''
clip values into (0, 255)
:param fp: A fingerprint
:return: Clipped fingerprint
'''
clipped_fp = np.clip(fp, 0, 1)
ret_fp = (clipped_fp * 255).astype(int)
return ret_fp
def extract_haralick_features(image):
'''
Extract haralick feature for an image
:param image: a clipped fingerprint output by clip_fp
:return: haralick texture feature of the fingerprint
'''
textures = mt.features.haralick(image)
ht_mean = textures.mean(axis=0)
return ht_mean
def texture_feat_extract(res_folder, res_dim, total_round=5, fp_size=50):
'''
This function will 1) create a bunch of fingerprints by randomly sampling 2) will extract texture
feature from those fingerprints 3) real a feature set
:param res_folder: noise residual folder of reference set. should only include real image residuals
:param res_dim: image dimension
:param total_round: randomly sample for 5 rounds by default
:param fp_size: each fingerprint is extracted from 50 residuals
:return: A set of feature of reference fingerprints (real residuals calculated)
'''
feat_set = []
for round in range(0, total_round):
res = os.listdir(res_folder)
random.shuffle(res)
print("There are {} available noise residuals".format(len(res)))
seg_idxs = [tuple(range(x, x + fp_size)) for x in range(0, len(res) - fp_size, fp_size)]
for i, seg_idx in enumerate(seg_idxs):
print("[STATUS] Creating fingerprint {}".format(i))
res_paths_for_one_fp = list(map(lambda x: res_folder + res[x], seg_idx))
FP = eng.compute_fp_from_path(res_paths_for_one_fp, res_dim)
clipped_fp = clip_fp(np.array(FP))
feat_vector = extract_haralick_features(clipped_fp)
feat_set.append(feat_vector)
print('[STATUS] TRAIN feature extraction DONE')
return feat_set
def compute_pce_with_fingerprint(res_list, fingerprint):
'''
For each residual in a list of noise residuals, compute its pce correlation with a fingerpint.
:param res_list: A list of noise residuals (can be all the test residuals or all the reference residuals)
:param fingerprint: A fingerprint.
:return: an array of PCE correlation.
'''
ret_pce = eng.compute_pce_with_fingerprint(res_list, matlab.double(fingerprint.tolist()))
return np.array(ret_pce)
def compute_fp_from_cluster(idxs, res_list, img_dim):
'''
compute a fingerprint out of a cluster of residuals by averaging.
:param idxs: the indexes of a residual cluster
:param res_list: noise residuals of test set.
:param img_dim: image/residual dimension.
:return: A fingerprint.
'''
averaged_fp = np.zeros((img_dim, img_dim))
for idx in idxs:
fp = scipy.io.loadmat(res_list[idx - 1]) # type: 'dict'
averaged_fp += fp['Noise'] / len(idxs)
return np.array(averaged_fp)
def compute_cluster_fake_purity(cluster_with_img_idx, ground_truth):
'''
Compute the percentage of fake images/residuals in a cluster
:param cluster_with_img_idx: A list of residual clusters. Each cluster is a tuple, which includes residual indexes.
:param ground_truth: ground truth labels of the test set
:return: a percentage
'''
cluster_idx_minus = list(map(lambda x: x - 1, cluster_with_img_idx))
fake_pos = np.where(np.array(ground_truth) == 1)
fake_purity = len(set(fake_pos[0]).intersection(set(cluster_idx_minus))) / len(cluster_with_img_idx)
return fake_purity
def compute_confusion_matrix(ground_truth, label):
'''
compute detection performance given ground truth label and prediction label
:param ground_truth: ground truth label of the test set
:param label: prediction label of the test set.
:return: metric scores
'''
tn, fp, fn, tp = confusion_matrix(ground_truth, label).ravel()
conf_matrix = (tn, fp, fn, tp)
metric_scores = {
"accuracy": metrics.accuracy_score(ground_truth, label),
"precision": metrics.precision_score(ground_truth, label),
"recall": metrics.recall_score(ground_truth, label),
"f1_score": metrics.f1_score(ground_truth, label)
}
return conf_matrix, metric_scores
def save_fingerprint_imgs(res_folder, img_dim, num_res=150):
'''
To visualize fingerprint.
:param res_folder: the path to noise residuals of images from a specific camera/GAN model
:param img_dim: image/noise dimension
:param num_res: the number of noise residuals used for creating a fingerprint
:return:
'''
files = glob.glob(res_folder + '*.mat')[:num_res]
eng.visualize_fingerprint(files, img_dim, './StyleGAN_bedroom_FP.png')
print('fingerprint saved')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gan_res_dir', default='/alldata/residuals/StyleGAN_bedroom/', help='PATH to directory that contains noise residuals from a GAN model')
args = parser.parse_args()
save_fingerprint_imgs(args.gan_res_dir, 256)
``` |
{
"source": "jmpurtle/wc-poe-site",
"score": 2
} |
#### File: app/wc_poe_site/sitePage.py
```python
from .template import render
class SitePage:
__dispatch__ = 'resource'
def __init__(self, context, site, page):
self._ctx = context # The "request context" we were constructed for.
self._site = site # The parent (containing) Site instance.
self._page = page # The data associated with our current site page
def get(self):
"""Retrieve the page data or render an HTML page."""
candidates = ['text/html'] + list(self._ctx.serialize.types)
match = self._ctx.request.accept.best_match(candidates, default_match='text/html')
if match == 'text/html':
return render(self._ctx, self, self._page)
return self._page
def post(self, content):
"""Update the in-database content for the current page.
This will create the page if one by this name doesn't already exist.
"""
result = self._ctx.db.sitepages.update_one(
{'_id': self._page['_id']}, # A query identifying the document to update.
{ # The following are the MongoDB update operations to apply to the document.
'$set': { # Update the page content.
'content': content,
},
'$currentDate': { # Also update the last-modified time.
'modified': True,
}
}
)
if not result.matched_count: # Nothing was updated... so let's create instead.
return self._site.post(self._page['_id'], content) # Internally POST
return {
'ok': True,
'acknowledged': result.acknowledged,
'name': self._page['_id']
}
def delete(self):
"""Delete this page from the site"""
result = self._ctx.db.sitepages.delete_one({'_id': self._page['_id']})
if not result.deleted_count: # Nothing was deleted, page likely did not exist.
return {
'ok': False,
'reason': 'missing',
'message': 'Cannot delete something that does not exist.',
'name': self._page['_id'],
}
return {
'ok': True,
'acknowledged': result.acknowledged,
'name': self._page['_id'],
}
``` |
{
"source": "Jmq14/FCOS",
"score": 2
} |
#### File: fcos_core/engine/feature_extractor.py
```python
import logging
import time
import os
import torch
from tqdm import tqdm
import numpy as np
from fcos_core.config import cfg
from fcos_core.modeling.poolers import Pooler
from fcos_core.data.datasets.evaluation import evaluate
from ..utils.comm import is_main_process, get_world_size
from ..utils.comm import all_gather
from ..utils.comm import synchronize
from ..utils.timer import Timer, get_time_str
def compute_on_dataset(model, data_loader, device, pooler, timer=None, output_folder=None):
model.eval()
results_dict = {}
cpu_device = torch.device("cpu")
if output_folder:
gt_feature_output_dir = os.path.join(output_folder, "ground_truth_feature")
pred_feature_output_dir = os.path.join(output_folder, "prediction_feature")
os.makedirs(gt_feature_output_dir, exist_ok=True)
os.makedirs(pred_feature_output_dir, exist_ok=True)
for _, batch in enumerate(tqdm(data_loader)):
images, targets, image_ids, boxes_ids = batch
with torch.no_grad():
if timer:
timer.tic()
features, predictions = model(images.to(device))
if timer:
torch.cuda.synchronize()
timer.toc()
p3_features = features[0]
# predictions = [prediction.to(cpu_device) for prediction in predictions]
targets = [target.to(device) for target in targets]
gt_box_features = pooler([p3_features], targets).to(cpu_device)
pred_box_features = pooler([p3_features], predictions).to(cpu_device)
flatten_boxes_id = [item for sublist in boxes_ids for item in sublist]
if output_folder:
for box_id, box in zip(flatten_boxes_id, gt_box_features):
path = os.path.join(gt_feature_output_dir, "{}.npz".format(box_id))
np.savez_compressed(path, feature=box)
cnt = 0
for i, image_id in enumerate(image_ids):
path = os.path.join(pred_feature_output_dir, "{}.npz".format(image_id))
box_num = len(predictions[i])
np.savez_compressed(path, feature=pred_box_features[cnt:cnt+box_num,:,:,:])
cnt += box_num
results_dict.update(
{img_id: (result, target_box_ids) for img_id, result, target_box_ids in zip(image_ids, predictions, boxes_ids)}
)
return results_dict
def _accumulate_predictions_from_multiple_gpus(predictions_per_gpu):
all_predictions = all_gather(predictions_per_gpu)
if not is_main_process():
return
# merge the list of dicts
predictions = {}
for p in all_predictions:
predictions.update(p)
# convert a dict where the key is the index in a list
image_ids = list(sorted(predictions.keys()))
# convert to a list
box_ids = [box_id for i in image_ids for box_id in predictions[i][1]]
predictions = [predictions[i][0] for i in image_ids]
# print(predictions)
return predictions, image_ids, box_ids
def get_box_feature(
model,
data_loader,
dataset_name,
device="cuda",
output_folder=None,
resolution=14,
scales=(1./8.,),
sampling_ratio=0,
expected_results=(),
expected_results_sigma_tol=4,
):
# convert to a torch.device for efficiency
device = torch.device(device)
num_devices = get_world_size()
logger = logging.getLogger("fcos_core.inference")
dataset = data_loader.dataset
logger.info("Start evaluation on {} dataset({} images).".format(dataset_name, len(dataset)))
total_timer = Timer()
inference_timer = Timer()
total_timer.tic()
# ROI align
pooler = Pooler(
output_size=(resolution, resolution),
scales=scales,
sampling_ratio=sampling_ratio,
)
predictions = compute_on_dataset(model, data_loader, device, pooler, inference_timer, output_folder)
# wait for all processes to complete before measuring the time
synchronize()
total_time = total_timer.toc()
total_time_str = get_time_str(total_time)
logger.info(
"Total run time: {} ({} s / img per device, on {} devices)".format(
total_time_str, total_time * num_devices / len(dataset), num_devices
)
)
total_infer_time = get_time_str(inference_timer.total_time)
logger.info(
"Model inference time: {} ({} s / img per device, on {} devices)".format(
total_infer_time,
inference_timer.total_time * num_devices / len(dataset),
num_devices,
)
)
predictions, image_ids, box_ids = _accumulate_predictions_from_multiple_gpus(predictions)
if not is_main_process():
return
if output_folder:
torch.save(predictions, os.path.join(output_folder, "predictions.pth"))
torch.save(box_ids, os.path.join(output_folder, 'box_ids.pth'))
torch.save(image_ids, os.path.join(output_folder, 'image_ids.pth'))
extra_args = dict(
box_only=False,
iou_types=("bbox",),
expected_results=expected_results,
expected_results_sigma_tol=expected_results_sigma_tol,
)
return evaluate(dataset=dataset,
predictions=predictions,
output_folder=output_folder,
**extra_args)
```
#### File: FCOS/tools/generate_pseudo_label.py
```python
import os
import numpy as np
from pycocotools.coco import COCO
import cv2
from tqdm import tqdm
import argparse
import json
import torch
from fcos_core.structures.bounding_box import BoxList
from fcos_core.structures.boxlist_ops import boxlist_iou
def generate_pseudo_label_with_confidence_score(boxes, image_id, score_thre):
scores = boxes.get_field("scores")
_, idx = scores.sort(0, descending=True)
if isinstance(score_thre, float):
keep = torch.nonzero(scores >= score_thre).squeeze(1)
else:
labels = boxes.get_field("labels")
keep = torch.nonzero(scores >= score_thre[labels]).squeeze(1)
return idx[:len(keep)]
def parse_predictions():
pass
def new_annotation_json(pseudo_labels, img_id, ann_id):
labels = pseudo_labels.get_field("labels").tolist()
boxes = pseudo_labels.convert("xywh").bbox
annos = []
for box, c in zip(boxes, labels):
annos.append({
"id": ann_id,
"image_id": img_id,
"category_id": c,
"bbox": box.tolist(),
"segmentation": [[0., 0.]],
"area": float(box[2] * box[3]),
"iscrowd": 0,
"ispseudo": True,
})
ann_id = ann_id + 1
return annos, ann_id
def main(args):
annFile = 'datasets/coco/annotations/instances_train2017_0.5.json'
coco = COCO(annFile)
with open(annFile, 'r') as f:
result_json = json.load(f)
annos_json = result_json['annotations']
# anno_id = max([ann['id'] for ann in annos_json]) + 1
output_dir = os.path.join(args.predictions, 'coco_2017_train_partial')
image_ids = torch.load(os.path.join(output_dir, 'image_ids.pth'))
predictions = torch.load(os.path.join(output_dir, 'predictions.pth'))
anno_id = max(torch.load(os.path.join(output_dir, 'box_ids.pth'))) + 1
imgIds=sorted(coco.getImgIds())
threshold = args.confidence
# threshold = torch.tensor([-1.0, 0.46633365750312805, 0.4409848749637604, 0.47267603874206543, 0.4707889258861542, 0.5220812559127808, 0.5358721613883972, 0.5226702690124512, 0.45160290598869324])
iou_threshold = 0.5
cpu_device = torch.device("cpu")
partial_box_num = 0
N = len(image_ids)
for i in tqdm(range(N)):
im_idx = image_ids[i]
bbox = predictions[i]
imginfo = coco.loadImgs(imgIds[im_idx])[0]
image_width = imginfo['width']
image_height = imginfo['height']
# load annotations
partial_anns = coco.loadAnns(coco.getAnnIds(imgIds=(imgIds[im_idx],)))
# full_anns = coco_full.loadAnns(coco_full.getAnnIds(imgIds=(imgIds[im_idx],), catIds=catIds))
partial_boxes = [obj["bbox"] for obj in partial_anns]
partial_boxes_ids = set([obj["id"] for obj in partial_anns])
partial_boxes = torch.as_tensor(partial_boxes).reshape(-1, 4) # guard against no boxes
partial_boxes = BoxList(partial_boxes, (image_width, image_height), mode="xywh").convert(
"xyxy"
)
partial_box_num += len(partial_boxes_ids)
# get predictions
bbox = bbox.resize((image_width, image_height))
bbox = bbox.to(cpu_device)
# generate pseudo labels
idx = generate_pseudo_label_with_confidence_score(bbox, im_idx, threshold)
if len(idx) > 0:
pseudo_labels = bbox[idx]
scores = pseudo_labels.get_field("scores").tolist()
# compute iou
overlaps = boxlist_iou(partial_boxes, pseudo_labels)
matched_id = [True] * len(pseudo_labels)
# remove predictions for partial labels
for i in range(len(partial_boxes)):
matched = np.argmax(overlaps[i])
if overlaps[i, matched] >= iou_threshold:
matched_id[matched] = False
pseudo_labels = pseudo_labels[matched_id]
# print(num, len(pseudo_labels))
pseudo_annos, anno_id = new_annotation_json(pseudo_labels, imgIds[im_idx], anno_id)
annos_json.extend(pseudo_annos)
print('confidence threshold: {}'.format(threshold))
result_json['annotations'] = annos_json
with open(args.annotation, 'w') as f:
json.dump(result_json, f)
print(partial_box_num, len(result_json['annotations']))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--predictions", help="prediction directory path. e.g output/stage1/",
type=str, default="/home/mengqinj/capstone/output/stage1/")
parser.add_argument("--annotation", help="output annotation path. e.g instances_train_2017.json",
type=str, default="instances_train_2017.json")
parser.add_argument("--confidence", help="confidence score threshold",
type=float, default=0.5)
args = parser.parse_args()
main(args)
```
#### File: FCOS/tools/NN_query.py
```python
import numpy as np
from tqdm import tqdm
import json
import os
import torch
import faiss
from pycocotools.coco import COCO
def build_index(feat, d):
cpu_index = faiss.IndexFlatL2(d)
cpu_index.add(feat)
return cpu_index
def find_nearest_neighbor(index, query_feat):
D, I = index.search(query_feat, 1)
D /= query_feat.shape[1]
return D.reshape(-1).tolist(), I.reshape(-1).tolist()
if __name__ == "__main__":
feature_dir = '/home/mengqinj/capstone/output/stage1/coco_2017_train_partial/'
anno_ids = torch.load(feature_dir + 'box_ids.pth')
image_ids = torch.load(feature_dir + 'image_ids.pth')
predictions = torch.load(feature_dir + 'predictions.pth')
dataDir=''
annFile='datasets/coco/annotations/instances_train2017_0.5.json'
coco=COCO(annFile)
raw_image_ids=sorted(coco.getImgIds())
feat = []
print('Loading ground truth features...')
for i in tqdm(anno_ids):
try:
roi = np.load(os.path.join(feature_dir, 'ground_truth_feature/{}.npz'.format(i)))['feature']
except:
print(i)
feat.append(roi.reshape(-1))
feat = np.stack(feat, axis=0)
print(feat.shape)
d = 256 * 7 * 7
print('Building database index...')
index = build_index(feat, d)
results = []
print('Loading prediction features and querying...')
i = 0
for i in tqdm(image_ids):
roi = np.load(os.path.join(feature_dir, 'prediction_feature/{}.npz'.format(i)))['feature']
boxlist = predictions[i]
img_info = coco.loadImgs(raw_image_ids[i])[0]
image_width = img_info["width"]
image_height = img_info["height"]
boxlist = boxlist.resize((image_width, image_height))
boxlist = boxlist.convert("xywh")
bboxes = boxlist.bbox.tolist()
labels = boxlist.get_field('labels').tolist()
scores = boxlist.get_field('scores').tolist()
if len(roi.shape) > 0 and roi.shape[0] > 0:
if len(roi.shape) >= 4:
query_feat = roi.reshape(roi.shape[0], -1)
else:
query_feat = roi.reshape(1, -1)
# print(query_feat.shape[0], len(boxlist))
D, I = find_nearest_neighbor(index, query_feat)
for j in range(len(boxlist)):
results.append({
'image_id': i,
'NN_distance': D[j],
'NN_id': I[j],
'score': scores[j],
'bbox': bboxes[j],
'category_id': labels[j],
})
# print(results)
with open('results.json', 'w') as f:
json.dump(results, f)
```
#### File: FCOS/tools/visualize_pseudo_label.py
```python
import os
import numpy as np
from pycocotools.coco import COCO
import cv2
from tqdm import tqdm
import argparse
import json
import torch
np.random.seed(0)
def convert_to_xyxy(bbox):
top_left = (int(bbox[0]), int(bbox[1]))
bottom_right = (int(bbox[0]+bbox[2]), int(bbox[1]+bbox[3]))
return top_left, bottom_right
if __name__ == "__main__":
dataDir = 'datasets/coco'
dataType = 'train2017'
image_dir = dataDir + '/' + dataType + '/'
output_dir = 'output_images/'
annFile_pseudo = '{}/annotations/instances_{}_pseudo.json'.format(dataDir,dataType)
annFile_full = '{}/annotations/instances_{}_full.json'.format(dataDir,dataType)
coco_pseudo =COCO(annFile_pseudo)
coco_full = COCO(annFile_full)
sample_num = 100
img_ids = np.random.choice(sorted(coco_pseudo.getImgIds()), sample_num, replace=False)
print(img_ids)
catIds = list(range(2, 10))
for img_id in img_ids:
imginfo = coco_pseudo.loadImgs([img_id,])[0]
img = cv2.imread(image_dir + imginfo['file_name'])
pseudo_anns = coco_pseudo.loadAnns(coco_pseudo.getAnnIds(imgIds=(img_id,)))
full_anns = coco_full.loadAnns(coco_full.getAnnIds(imgIds=(img_id,), catIds=catIds))
pseudo_boxes = [obj["bbox"] for obj in pseudo_anns if "ispseudo" in obj.keys()]
partial_boxes = [obj["bbox"] for obj in pseudo_anns if "ispseudo" not in obj.keys()]
full_boxes = [obj["bbox"] for obj in full_anns]
# print(len(pseudo_boxes), len(partial_boxes), len(full_boxes))
for box in full_boxes:
# blue
top_left, bottom_right = convert_to_xyxy(box)
img = cv2.rectangle(img, top_left, bottom_right, (255, 0, 0), 2)
for box in partial_boxes:
# red
top_left, bottom_right = convert_to_xyxy(box)
img = cv2.rectangle(img, top_left, bottom_right, (0, 0, 255), 2)
for box in pseudo_boxes:
# yellow
top_left, bottom_right = convert_to_xyxy(box)
img = cv2.rectangle(img, top_left, bottom_right, (0, 255, 255), 2)
cv2.imwrite(output_dir+imginfo['file_name'], img)
``` |
{
"source": "Jmq14/VQA",
"score": 2
} |
#### File: VQA/student_code/save_feature.py
```python
import argparse
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision import models, transforms
import numpy as np
import os
from student_code.vqa_dataset import VqaDataset
def extract_features(model, batch_data, output_path):
images = batch_data['image']
if torch.cuda.is_available():
images = images.cuda()
image_path = batch_data['image_path']
feat = model(images)
feat = feat.data.cpu().numpy()
# print(feat.shape)
feat = feat.reshape((feat.shape[0], feat.shape[1], -1))
# print(feat.shape)
for i in range(feat.shape[0]):
path = os.path.join(
output_path, os.path.splitext(image_path[i])[0] + '_resnet_feature')
# print(path)
np.save(path, feat[i])
def save_features(args):
transform = transforms.Compose([
transforms.Resize((448, 448)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
train_dataset = VqaDataset(image_dir=args.train_image_dir,
question_json_file_path=args.train_question_path,
annotation_json_file_path=args.train_annotation_path,
image_filename_pattern="COCO_train2014_{}.jpg",
is_training=True,
transform=transform)
val_dataset = VqaDataset(image_dir=args.test_image_dir,
question_json_file_path=args.test_question_path,
annotation_json_file_path=args.test_annotation_path,
image_filename_pattern="COCO_val2014_{}.jpg",
is_training=False,
transform=transform)
train_data = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_data_loader_workers)
val_data = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_data_loader_workers)
model = models.resnet152(pretrained=True)
model = nn.Sequential(*list(model.children())[:-2])
if torch.cuda.is_available():
model = model.cuda()
model.eval()
for batch_id, batch_data in enumerate(train_data):
print('Training data {}/{}'.format(batch_id, len(train_data)))
extract_features(model, batch_data, os.path.join(args.output_path, 'train2014'))
for batch_id, batch_data in enumerate(val_data):
print('Validation data {}/{}'.format(batch_id, len(train_data)))
extract_features(model, batch_data, os.path.join(args.output_path, 'val2014'))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Load feature.')
parser.add_argument('--train_image_dir', type=str)
parser.add_argument('--train_question_path', type=str)
parser.add_argument('--train_annotation_path', type=str)
parser.add_argument('--test_image_dir', type=str)
parser.add_argument('--test_question_path', type=str)
parser.add_argument('--test_annotation_path', type=str)
parser.add_argument('--output_path', type=str)
parser.add_argument('--num_data_loader_workers', type=int, default=10)
parser.add_argument('--batch_size', type=int, default=10)
args = parser.parse_args()
save_features(args)
```
#### File: VQA/student_code/simple_baseline_experiment_runner.py
```python
from student_code.simple_baseline_net import SimpleBaselineNet
from student_code.experiment_runner_base import ExperimentRunnerBase
from student_code.vqa_dataset import VqaDataset
import torch.nn as nn
import torch.optim as optim
from torchvision import transforms
class SimpleBaselineExperimentRunner(ExperimentRunnerBase):
"""
Sets up the Simple Baseline model for training. This class is specifically responsible for creating the model and optimizing it.
"""
def __init__(self, train_image_dir, train_question_path, train_annotation_path,
test_image_dir, test_question_path,test_annotation_path, batch_size, num_epochs,
num_data_loader_workers):
train_image_transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
val_image_transform = transforms.Compose([
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
train_dataset = VqaDataset(image_dir=train_image_dir,
question_json_file_path=train_question_path,
annotation_json_file_path=train_annotation_path,
image_filename_pattern="COCO_train2014_{}.jpg",
is_training=True,
transform=train_image_transform)
val_dataset = VqaDataset(image_dir=test_image_dir,
question_json_file_path=test_question_path,
annotation_json_file_path=test_annotation_path,
image_filename_pattern="COCO_val2014_{}.jpg",
is_training=False,
transform=val_image_transform)
model = SimpleBaselineNet(len(train_dataset.dictionary), len(train_dataset.answers))
super().__init__(train_dataset, val_dataset, model, batch_size, num_epochs, num_data_loader_workers)
self.criterion = nn.CrossEntropyLoss()
self.optimizer = optim.SGD([
{'params': model.ques_feat.parameters(), 'lr': 0.8},
{'params': model.fc.parameters()}], lr=0.01, momentum=0.9)
def _optimize(self, predicted_answers, true_answer_ids):
loss = self.criterion(predicted_answers, true_answer_ids)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return loss.data.item()
``` |
{
"source": "jmquintana79/ADA",
"score": 3
} |
#### File: jmquintana79/ADA/classes.py
```python
from functions import clean_string
import stats
import numpy as np
import pandas as pd
class ADA:
def __init__(self, df, depth = 'all'):
self.df = df
self.depth = depth
#self.columns = dict()
self.columns = Columns()
# rename columns if it is necessary
for name in self.df.columns:
self.df.rename(columns={name: clean_string(name)}, inplace=True)
# collect column names per type
self.num_columns = self.df.select_dtypes(include=['float64']).columns.tolist() # numerical columns
self.cat_columns = self.df.select_dtypes(include=['object', 'int64']).columns.tolist() # categorical columns
# create columns instances
for name in self.df.columns:
col = Column(name)
col.type = 'numerical' if name in self.num_columns else 'categorical'
setattr(self.columns, name, col)
def get_column_names(self):
return list(self.columns.__dict__.keys())
def get_column(self, name):
return getattr(self.columns, name)
def get_data(self, names:'str or list'):
return self.df[names].values
def num2cat_binning(self, name, nbins = 10):
# create new column name
name_new = name+'_cat%s'%nbins
# get columnt instance
Col = self.get_column(name)
# type validation
assert Col.type == 'numerical', 'only possible numerical columns.'
# get data
data_num = self.get_data(name)
# calculate bins
bins = np.linspace(np.min(data_num), np.max(data_num), nbins+1, endpoint=True)
labels = np.arange(1,nbins+1,1)
self.df[name_new] = pd.cut(data_num, bins = bins, labels = labels)
# create a new column
col = Column(name_new)
col.type = 'categorical'
setattr(self.columns, name_new, col)
# save new column name
self.cat_columns.append(name_new)
def calculate_stats_num(self, name, per = [5,25,50,75,95]):
# get columnt instance
Col = self.get_column(name)
# type validation
assert Col.type == 'numerical', 'only possible numerical columns.'
# get data
data = self.get_data(name)
# initialize
dstats = dict()
# calculate statistics
dstats['mean'] = stats.mean(data)
dstats['median'] = stats.median(data)
dstats['std'] = stats.std(data)
dstats['min'] = stats.min(data)
dstats['max'] = stats.max(data)
dstats['skew'] = stats.skew(data)
dstats['kurtosis'] = stats.kurtosis(data)
for ip in per:
dstats['per%s'%ip] = stats.percentile(data, ip)
# return
Col.stats = dstats
def calculate_stats_cat(self, name):
# get columnt instance
Col = self.get_column(name)
# type validation
assert Col.type == 'categorical', 'only possible categorical columns.'
# get data
data = self.get_data(name)
# initialize
dstats = dict()
dstats['count'] = dict()
dstats['probability'] = dict()
# count categorical values
cat, count = np.unique(data, return_index=False, return_inverse=False, return_counts=True, axis=None)
for icat, icount in zip(cat, count):
dstats['count'][icat] = icount
dstats['probability'][icat] = icount / len(data)
# set a new attributes with the cagories
setattr(Col, 'categories', list(cat))
# return
Col.stats = dstats
class Columns():
pass
class Column():
def __init__(self, name):
self.name = name
self.type = None
self.stats = None
def __str__(self):
print('\n"%s" (%s)'%(self.name, self.type))
print('Stats:')
for k,v in self.stats.items():
print('\t%s = %s'%(k,v))
return '\n'
``` |
{
"source": "jmquintana79/DStools",
"score": 3
} |
#### File: jmquintana79/DStools/build_readme.py
```python
import os
from datetime import datetime
## FUNCTIONS
def display(path, root):
print((len(path) - 2) * '---', os.path.basename(root), f'[directory]')
def intersection(lst1, lst2):
return list(set(lst1) & set(lst2))
def count_files(files:list)->int:
return len([file for file in files if 'py' in file or '.ipynb' in file])
def write_readme_file(file_output:str, records:list):
with open(file_output, 'w') as outfile:
# loop of records
for r in records:
outfile.write(f'{r}\n')
## MAIN FUNCTION
def main(file_output:str):
# initialize
omossions_root = ['.', '.ipynb_checkpoints']
omossions_in_path = ['.git', '.img']
records = list()
stab = ' '
url_root = "https://github.com/jmquintana79/utilsDS/blob/master"
# header
records.append('# Tools for a Data Science projects')
# introduction
records.append("This repository is a compendium of notebooks and scripts to be used in my daily work for Data Science projects. I will try to use this landscape as reference:")
# add picture
records.append('<img align="center" width="100%" src=".img/DS_picture.jpg">')
# add content title
records.append('## Content')
# traverse root directory, and list directories as dirs and files as files
for root, dirs, files in os.walk("."):
# get path (splitted root)
path = root.split(os.sep)
# build name
name = os.path.basename(root)
if name == 'nlp' or name == 'ADA' or name == 'gam' or name == 'EDA' or name == 'KDE':
s_name = name.upper()
else:
s_name = f"{name.replace('_', ' ').capitalize() }"
# build counter files
nfiles = count_files(files)
s_nfiles = f'*[{count_files(files)}]*'
# number of tabs
ntabs = len(path)-2
if ntabs >= 2:
ntabs = 2**(ntabs - 1)
# build url
url = os.path.join(url_root, root[2:])
# build name with url
s_name_url = f'[{s_name}]({url})'
# omit folders in path
if len(intersection(path, omossions_in_path)) > 0:
pass
else:
# omit folders
if not os.path.basename(root) in omossions_root:
# display
display(path, root)
# add sub-headers
if ntabs == 0:
records.append(f'### {s_name_url}')
elif ntabs == 1:
records.append(f'- {s_name_url} *[{nfiles}]*')
else:
records.append(f'{ntabs*stab}- {s_name_url} {s_nfiles}')
# foot
s_foot = f'> Updated on {datetime.now().strftime("%Y-%m-%d %H:%M:%S")}'
records.append(s_foot)
## WRITE FILE
write_readme_file(file_output, records)
## MAIN ENV
if __name__ == '__main__':
# output file
file_output = 'README.md'
# launching
main(file_output)
# end
quit('done')
``` |
{
"source": "jmquintana79/utilsDS",
"score": 3
} |
#### File: scripts/datasets/wine.py
```python
import numpy as np
import sys
sys.path.append('../')
from tools.reader import columns, csv2df
import click
import os
this_dir, this_filename = os.path.split(__file__)
FILE = 'data/dataset.wine.csv.gz'
PATH = os.path.join(this_dir, FILE)
def load()->tuple:
"""
Load wine dataset (without target variable).
return -- tuple(dataframe data, dictionary columns)
"""
# header
click.secho('Load data..', fg='green')
# read data
df, dcol = csv2df(PATH)
# format
df.Proline = df.Proline.astype(float)
df.Magnesium = df.Magnesium.astype(float)
df.Alcohol = df.Alcohol.astype(int)
# update dcol
col = columns()
col.get(df)
# return
return (df, col)
def save(path: str):
# load dataset
data, dcol = load()
# store into a csv file
try:
data.to_csv(path, index=False)
# screen
click.secho('It was saved "%s" successfully.' % click.format_filename(path, shorten=True), fg='green')
except Exception as e:
click.secho('It could not be saved "%s" successfully.' % click.format_filename(path, shorten=True), fg='red')
click.echo(str(e))
# return
return None
if __name__ == '__main__':
# load data
data, col = load()
# print(col.all)
# save data
# save('dataset.wine.csv')
```
#### File: scripts/models/tuning.py
```python
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import sys
sys.path.append('../')
from datasets import solar
from tools.reader import get_dcol
from tools.timer import *
from preprocessing.scalers.normalization import Scaler
from models.metrics import metrics_regression
# from xgboost.sklearn import XGBClassifier
from xgboost.sklearn import XGBRegressor
from sklearn.model_selection import cross_val_score, train_test_split
import scipy.stats as st
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score
def main():
# init timer
t = Timer()
""" DATA PREPARATION """
# load data
data, dcol = solar.load()
# select data
ly = ['y']
lx = ['doy', 'hour', 'LCDC267', 'MCDC267', 'HCDC267', 'TCDC267', 'logAPCP267', 'RH267', 'TMP267', 'DSWRF267']
data = data[lx + ly]
dcol = get_dcol(data, ltarget=ly)
# select one hour data
hour = 11
idata = data[data.hour == hour]
idata.drop('hour', axis=1, inplace=True)
idcol = get_dcol(idata, ltarget=['y'])
# clean
del(data)
del(dcol)
# filtering outliers (ghi vs power)
from preprocessing.outliers import median2D
isoutlier = median2D.launch(idata['DSWRF267'].values, idata.y.values, percent=20.)
idata['isoutlier'] = isoutlier
idata = idata[idata.isoutlier == False]
idata.drop('isoutlier', axis=1, inplace=True)
# prepare data
X = idata[idcol['lx']].values
scaler = Scaler()
y = scaler.fit_transform(idata[idcol['ly']].values).ravel()
print('Prepared data: X: %s y: %s' % (X.shape, y.shape))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
print('Prepared data: X_train: %s y_train: %s' % (X_train.shape, y_train.shape))
print('Prepared data: X_test: %s y_test: %s' % (X_test.shape, y_test.shape))
""" ESTIMATOR WITHOUT TUNING """
t.add('no_tuning')
clf = XGBRegressor(nthreads=-1)
clf.fit(X_train, y_train)
y_hat = clf.predict(X_test)
dscores = metrics_regression(y_test, y_hat, X.shape[1])
tf = t.since('no_tuning')
print('Without tuning: bias = %.3f mae = %.3f r2 = %.3f (time: %s)' % (dscores['bias'], dscores['mae'], dscores['r2'], format_duration(tf)))
""" ESTIMATOR WITH RANDOM TUNING """
t.add('random_tuning')
clf = XGBRegressor(nthreads=-1)
one_to_left = st.beta(10, 1)
from_zero_positive = st.expon(0, 50)
dparams = {
"n_estimators": st.randint(3, 40),
"max_depth": st.randint(3, 40),
"learning_rate": st.uniform(0.05, 0.4),
"colsample_bytree": one_to_left,
"subsample": one_to_left,
"gamma": st.uniform(0, 10),
'reg_alpha': from_zero_positive,
"min_child_weight": from_zero_positive,
}
gs = RandomizedSearchCV(clf, dparams, cv=5, n_jobs=1, scoring='r2')
gs.fit(X_train, y_train)
y_hat = gs.best_estimator_.predict(X_test)
dscores = metrics_regression(y_test, y_hat, X.shape[1])
tf = t.since('random_tuning')
print('Random tuning: bias = %.3f mae = %.3f r2 = %.3f (time: %s)' % (dscores['bias'], dscores['mae'], dscores['r2'], format_duration(tf)))
""" ESTIMATOR WITH EXHAUSTIVE TUNING """
t.add('exhaustive_tuning')
clf = XGBRegressor(nthreads=-1)
dparams = {
"n_estimators": [3, 10, 25, 40],
"max_depth": [3, 10, 25, 40],
"learning_rate": [0.05, 0.1, 0.25, 0.5],
"gamma": np.arange(0, 11, 1),
}
gs = GridSearchCV(clf, param_grid=dparams, cv=5, n_jobs=1, scoring='r2')
gs.fit(X_train, y_train)
y_hat = gs.best_estimator_.predict(X_test)
dscores = metrics_regression(y_test, y_hat, X.shape[1])
tf = t.since('exhaustive_tuning')
print('Exhaustive tuning: bias = %.3f mae = %.3f r2 = %.3f (time: %s)' % (dscores['bias'], dscores['mae'], dscores['r2'], format_duration(tf)))
""" ESTIMATOR WITH BAYESIAN TUNING """
from hpsklearn import HyperoptEstimator, xgboost_regression
from hyperopt import tpe
import os
os.environ['OMP_NUM_THREADS'] = str(2)
t.add('bayesian_tuning')
# Instantiate a HyperoptEstimator with the search space and number of evaluations
clf = HyperoptEstimator(regressor=xgboost_regression('my_clf'),
preprocessing=[],
algo=tpe.suggest,
max_evals=250,
trial_timeout=300)
clf.fit(X_train, y_train)
y_hat = clf.predict(X_test)
dscores = metrics_regression(y_test, y_hat, X.shape[1])
tf = t.since('bayesian_tuning')
print('Bayesian tuning: bias = %.3f mae = %.3f r2 = %.3f (time: %s)' % (dscores['bias'], dscores['mae'], dscores['r2'], format_duration(tf)))
if __name__ == '__main__':
main()
```
#### File: models/xgboost/test-xgboost_tuning3.py
```python
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import sys
sys.path.append('../../')
from datasets import solar
from tools.reader import get_dcol
from preprocessing.scalers.normalization import Scaler
from models.metrics import metrics_regression
from tools.timer import *
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold, KFold
import time
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
import xgboost as xgb
from sklearn.metrics import r2_score, mean_absolute_error
import os
os.environ['OMP_NUM_THREADS'] = str(2)
def main():
# init timer
t = Timer()
t.add('test')
""" DATA PREPARATION """
# load data
data, dcol = solar.load()
# select data
ly = ['y']
lx = ['doy', 'hour', 'LCDC267', 'MCDC267', 'HCDC267', 'TCDC267', 'logAPCP267', 'RH267', 'TMP267', 'DSWRF267']
data = data[lx + ly]
dcol = get_dcol(data, ltarget=ly)
# select one hour data
hour = 11
idata = data[data.hour == hour]
idata.drop('hour', axis=1, inplace=True)
idcol = get_dcol(idata, ltarget=['y'])
# clean
del(data)
del(dcol)
# filtering outliers (ghi vs power)
from preprocessing.outliers import median2D
isoutlier = median2D.launch(idata['DSWRF267'].values, idata.y.values, percent=20.)
idata['isoutlier'] = isoutlier
idata = idata[idata.isoutlier == False]
idata.drop('isoutlier', axis=1, inplace=True)
# prepare data
X = idata[idcol['lx']].values
scaler = Scaler()
y = scaler.fit_transform(idata[idcol['ly']].values).ravel()
print('Prepared data: X: %s y: %s' % (X.shape, y.shape))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
print('Prepared data: X_train: %s y_train: %s' % (X_train.shape, y_train.shape))
print('Prepared data: X_test: %s y_test: %s' % (X_test.shape, y_test.shape))
# replace training dataset
X = X_train
y = y_train
""" ESTIMATOR WITH BAYESIAN TUNING """
from hpsklearn import HyperoptEstimator, xgboost_regression
from hyperopt import tpe
# Instantiate a HyperoptEstimator with the search space and number of evaluations
clf = HyperoptEstimator(regressor=xgboost_regression('my_clf'),
preprocessing=[],
algo=tpe.suggest,
max_evals=250,
trial_timeout=300)
clf.fit(X, y)
print(clf.best_model())
y_hat = clf.predict(X_test)
dscores = metrics_regression(y_test, y_hat, X.shape[1])
tf = t.since('test')
print('\nBayesian tuning -test: bias = %.3f mae = %.3f r2 = %.3f (time: %s)' %
(dscores['bias'], dscores['mae'], dscores['r2'], format_duration(tf)))
# training
y_hat = clf.predict(X)
dscores = metrics_regression(y, y_hat, X.shape[1])
print('Bayesian tuning - train: bias = %.3f mae = %.3f r2 = %.3f (time: %s)' %
(dscores['bias'], dscores['mae'], dscores['r2'], format_duration(tf)))
if __name__ == '__main__':
main()
```
#### File: scripts/plot/github_activity.py
```python
def show(df:'df', slabel_x:str='', slabel_y:str='', stitle:str=''):
"""
Plot chart similar than GitHub activity chart.
df-- data to be plotted with these column names: "x", "y", "activity".
slabel_x -- label to me printed on X-axis.
slabel_y -- label to me printed on Y-axis.
stitle -- title to be printed.
"""
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
# validation
assert "x" in df.columns, 'It is required the column "x".'
assert "y" in df.columns, 'It is required the column "y".'
assert "activity" in df.columns, 'It is required the column "activity".'
# reshape the data and plot it
df2 = df.pivot(columns="x", index="y", values="activity")
df2.fillna(0, inplace=True)
# data collection
IY, IX = np.mgrid[:df2.shape[0]+1, :df2.shape[1]+1]
# chart
fig, ax = plt.subplots(figsize=(12, 4))
ax.set_aspect("equal")
divider = make_axes_locatable(ax)
img = ax.pcolormesh(IX, IY, df2.values, cmap="Greens", edgecolor="w", vmin=(0.75)*np.min(df2.values), vmax=np.max(df2.values))
plt.xlim(0, df2.shape[1])
plt.title(stitle)
plt.xlabel(slabel_x)
plt.ylabel(slabel_y)
cax = divider.append_axes("right", size="2.5%", pad=0.05)
fig.colorbar(img, cax=cax)
plt.show()
if __name__=="__main__":
import numpy as np
import pandas as pd
# some random data
N = 100
np.random.seed(0)
weekday = np.random.randint(0, 7, N)
week = np.random.randint(0, 40, N)
activity = np.random.randint(0, 100, N)
# preparation
df = pd.DataFrame({"weekday":weekday, "week":week, "activity":activity})
df.drop_duplicates(subset=["weekday", "week"], inplace=True)
df.rename(columns={'weekday':'y', 'week':'x'},inplace = True)
# launching example
show(df, 'weeks', 'weekdays', 'Contributions')
```
#### File: preprocessing/outliers/median2D.py
```python
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import r2_score
import numpy as np
def _get_median_residues(signal: 'array', threshold: float, isreplace: bool =False):
"""
Filter based on Median Absolute Deviation technique.
signal -- signal to be filtered.
threshold -- threshold.
isreplace -- if it is True, replace identified outliers per the
median of the signal. If it is False, is inputed as
Nan (default False).
returns: filtered signal where outliers have been inputed.
"""
# identify outliers
difference = np.abs(signal - np.nanmedian(signal))
median_difference = np.nanmedian(difference)
s = 0 if median_difference == 0 else difference / float(median_difference)
mask = s > threshold
# validate if smallest residues are inputed as outliers
iback = np.where((mask) & (signal < np.nanmedian(signal)))[0]
if len(iback) > 0:
mask[iback] = False
# inpute outliers
if isreplace:
signal[mask] = np.nanmedian(signal) # replace
else:
signal[mask] = np.nan # delete
return signal
def launch(x1: 'array', x2: 'array', percent: float = 20., isplot: bool = False):
"""
Outilers identifier for 2D data.
x1 -- array of the first variable.
x2 -- array of the second variable.
percent -- maximum percent of data to be filtered (default 20%).
isplot -- plor or not results (default False)
return -- isoutlier mask array with True/False values.
"""
# prepare data
X = np.c_[x1, x2]
# min max scaler
scaler = MinMaxScaler()
X_t = scaler.fit_transform(X)
# calculate the residues signal
signal = np.abs(X_t[:, 0] - X_t[:, 1])
# looking for outliers
for ith in np.arange(0., 5., 0.1):
signal_filtered = _get_median_residues(signal.copy(), ith)
ilout = np.where(np.isnan(signal_filtered))[0]
ilin = np.where(np.isnan(signal_filtered) == False)[0]
p = len(ilout)*100/len(signal)
vr2 = r2_score(X_t[ilin, 0], X_t[ilin, 1])
if p < percent:
print('[info] outliers detection: threshold = %s / num. outliers(total) = %s(%s) / percent of filtered data: %.3f %s / r2 score = %.3f' %
(ith, len(ilout), len(signal), p, '%', vr2))
break
# validation
if len(ilin) <= 50.:
print('[warning] outliers detection: the set of filtered data is too small: len(x) = %s .' % (len(ilin)))
if vr2 < 0.5:
print('[warning] outliers detection: the set of filtered data has got a poor correlation: r2 score = %.3f .' % (vr2))
# plot
if isplot:
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(20, 5))
# trend of residules
ax1 = plt.subplot2grid((1, 4), (0, 0), colspan=3)
x = np.arange(len(signal))
y0 = signal
y1 = signal_filtered
ax1.plot(x, y0, 'b--')
ax1.scatter(x, y0, color='red')
ax1.scatter(x, y1, color='blue')
ax1.set_title('Residues')
ax1.set_xlabel("time")
ax1.set_ylabel("abs(residue)")
# scatter of 2d filtering
ax2 = plt.subplot2grid((1, 4), (0, 3))
ax2.scatter(X[:, 0], X[:, 1], color='blue')
ax2.scatter(X[ilout, 0], X[ilout, 1], color='red')
ax2.set_title('Outliers')
ax2.set_xlabel("v1")
ax2.set_ylabel("v2")
# adjust and display
plt.subplots_adjust(wspace=0.3)
plt.show()
# return
isoutlier = np.ones(len(signal)) * False
isoutlier[ilout] = True
return isoutlier
```
#### File: scripts/preprocessing/preparation.py
```python
import sys
sys.path.append('../')
from pipelines import xpipes as transformer
import click
def full(df: 'dataframe')->'array':
"""
Launch a pre-processing Pipeline with numerical and categorical variables.
df -- data to be transformed.
"""
# validate if there are NaN values.
if df.isnull().sum().sum() > 0:
click.secho('[error] the dataframe to be transformated contains NaN values.', fg='red', bold=True)
print(df.isnull().sum())
quit('Aborted!')
# fit, transform and return
return transformer.full_pipeline.fit_transform(df)
def numerical(df: 'dataframe')->'array':
"""
Launch a pre-processing Pipeline with only numerical variables.
df -- data to be transformed.
"""
# validate if there are NaN values.
if df.isnull().sum().sum() > 0:
click.secho('[error] the dataframe to be transformated contains NaN values.', fg='red', bold=True)
print(df.isnull().sum())
quit('Aborted!')
# fit, transform and return
return transformer.num_pipeline.fit_transform(df)
if __name__ == '__main__':
from tools import reader
# read data
data, dcol = reader.csv2df('../../datasets/dataset.weather.csv', lindex=['datetime'])
# get a sample
dfX = data[dcol['lc_float'][:1] + dcol['lc_cat'][:1]]
# transformation
X = full(dfX.dropna().head())
print(X[:, :5])
```
#### File: scripts/utils/datasets.py
```python
from pandas import DataFrame
class Data():
def __init__(self):
self.menu = ['boston', 'iris', 'diabetes']
def load(self, sname: str):
# cases
if sname == 'boston':
## boston (regression)
from sklearn.datasets import load_boston
dataset = load_boston()
elif sname == 'iris':
## iris (classification)
from sklearn.datasets import load_iris
dataset = load_iris()
elif sname == 'diabetes':
## diabetes (regression)
from sklearn.datasets import load_diabetes
dataset = load_diabetes()
else:
print('[error] this dataset is not available.')
return None
# display shape
print('[info] shape of loaded data: ', dataset.data.shape)
# store data in df
dfdata = DataFrame(dataset.data, columns=dataset.feature_names)
dfdata['target'] = dataset.target
lcol = list(dfdata.columns)
# list of columns per type
lcol_float = list(dfdata[lcol].select_dtypes(
include=['float64']).columns.values)
lcol_int = list(dfdata[lcol].select_dtypes(
include=['int64']).columns.values)
lcol_cat = list(dfdata[lcol].select_dtypes(
include=['object']).columns.values)
# store column names
dcol = {
'ly': ['target'],
'lx': [icol for icol in lcol if not icol is 'target'],
'lc_float': lcol_float,
'lc_int': lcol_int,
'lc_cat': lcol_cat
}
# return
return (dfdata, dcol)
if __name__ == '__main__':
# object
dataset = Data()
print('Available dataset: %s' % dataset.menu)
# load dataset
print('Load boston dataset:')
data, dcol = dataset.load('boston')
print(dcol)
print(data.head())
```
#### File: scripts/utils/saver.py
```python
import timer
# save submission data into csv file
def df2csv_submition(df: 'df', filename: str='sub_{}.csv.gz'):
"""Saves the passed dataframe with index=False, and enables GZIP compression if a '.gz' extension is passed.
If '{}' exists in the filename, this is replaced with the current time from mlcrate.time.now()
Keyword arguments:
df -- The pandas DataFrame of the submission
filename -- The filename to save the submission to. Autodetects '.gz'
"""
if '{}' in filename:
filename = filename.format(timer.now())
if filename.endswith('.gz'):
compression = 'gzip'
else:
compression = None
try:
df.to_csv(filename, index=False, compression=compression)
print('[success] the file "%s" was saved.' % filename)
except Exception as e:
print('[error-save_sub] there are any problem saving "%s"' % filename)
print(str(e))
if __name__ == '__main__':
from pandas import DataFrame
# dataset to df
from sklearn.datasets import load_boston
boston = load_boston()
dfboston = DataFrame(boston.data, columns=boston.feature_names)
# save submition
df2csv_submit(dfboston, 'data-boston_{}.csv.gz')
```
#### File: scripts/utils/timer.py
```python
import time
from warnings import warn
# Function wrapper that warns is deprecated, and call the function anyway (by <NAME>)
def _deprecated(func, old_name, new_name):
def new_func(*args, **kwargs):
message = '{}() has been deprecated in favour of {}() and will be removed soon'.format(old_name, new_name)
warn(message)
return func(*args, **kwargs)
return new_func
class Timer:
"""A class for tracking timestamps and time elapsed since events. Useful for profiling code.
Usage:
>>> t = Timer()
>>> t.since() # Seconds since the timetracker was initialised
>>> t.add('func') # Save the current timestamp as 'func'
>>> t.since('func') # Seconds since 'func' was added
>>> t['func'] # Get the absolute timestamp of 'func' for other uses
"""
def __init__(self):
self.times = {}
self.add(0)
def __getitem__(self, key):
return self.times[key]
def add(self, key):
"""Add the current time to the index with the specified key"""
self.times[key] = time.time()
def since(self, key=0):
"""Get the time elapsed in seconds since the specified key was added to the index"""
return time.time() - self.times[key]
def fsince(self, key=0, max_fields=3):
"""Get the time elapsed in seconds, nicely formatted by format_duration()"""
return format_duration(self.since(key), max_fields)
elapsed = _deprecated(since, 'Timer.elapsed', 'Timer.since')
format_elapsed = _deprecated(fsince, 'Timer.format_elapsed', 'Timer.fsince')
def now():
"""Returns the current time as a string in the format 'YYYY_MM_DD_HH_MM_SS'. Useful for timestamping filenames etc."""
return time.strftime("%Y_%m_%d_%H_%M_%S")
# Alias for backwards-compatibility
str_time_now = _deprecated(now, 'mlcrate.time.str_time_now', 'mlcrate.time.now')
def format_duration(seconds, max_fields=3):
"""Formats a number of seconds in a pretty readable format, in terms of seconds, minutes, hours and days.
Example:
>>> format_duration(3825.21)
'1h03m45s'
>>> format_duration(3825.21, max_fields=2)
'1h03m'
Keyword arguments:
seconds -- A duration to be nicely formatted, in seconds
max_fields (default: 3) -- The number of units to display (eg. if max_fields is 1 and the time is three days it will only display the days unit)
Returns: A string representing the duration
"""
seconds = float(seconds)
s = int(seconds % 60)
m = int((seconds / 60) % 60)
h = int((seconds / 3600) % 24)
d = int(seconds / 86400)
fields = []
for unit, value in zip(['d', 'h', 'm', 's'], [d, h, m, s]):
if len(fields) > 0: # If it's not the first value, pad with 0s
fields.append('{}{}'.format(str(value).rjust(2, '0'), unit))
elif value > 0: # If there are no existing values, we don't add this unit unless it's >0
fields.append('{}{}'.format(value, unit))
fields = fields[:max_fields]
# If the time was less than a second, we just return '<1s' TODO: Maybe return ms instead?
if len(fields) == 0:
fields.append('<1s')
return ''.join(fields)
# main
if __name__ == '__main__':
from datetime import datetime
# initialize
t = Timer()
# save the current timestamp as 'func' into a TIME LOGGER
print('add "event"')
t.add('event')
# sleep (simulate any function usage)
print('sleep...')
time.sleep(5)
# seconds since 'func' was added
tf = t.since('event')
print('time from .. %s sec' % tf)
# format1
stf1 = format_duration(tf)
print('time from (formated by function).. %s' % stf1)
# format2
stf2 = t.fsince('event')
print('time from (formated by attribute).. %s' % stf2)
# get the absolute timestamp of 'func' for other uses
print('absolute timestamp: %s' % t['event'])
print('absolute timestemp (into datetime): %s ' % datetime.fromtimestamp(t['event']).strftime('%Y-%m-%d %H:%M:%S'))
# datetime now (formated for output file names)
print('now is: %s' % now())
``` |
{
"source": "jmrafael/Streamlit-Authentication",
"score": 2
} |
#### File: authlib/common/dt_helpers.py
```python
from datetime import datetime
def dt_to_str(dt):
return str(dt.strftime("%Y-%m-%dT%H:%M:%S.%fZ"))
def tnow_iso():
return datetime.now()
def tnow_iso_str():
return dt_to_str(datetime.now())
def dt_from_str(dt_str):
return datetime.strptime(dt_str, '%Y-%m-%dT%H:%M:%S.%fZ')
def dt_from_ts(ts):
return datetime.fromtimestamp(int(ts))
```
#### File: authlib/common/__init__.py
```python
from functools import wraps
# Easy inteceptor for tracing
def trace_activity(fn, trace=True):
@wraps(fn)
def wrapper(*args, **kwargs):
if trace:
print(f'TRACE: calling {fn.__name__}(), positional args: {args}, named args: {kwargs}')
return fn(*args, **kwargs)
return wrapper
# Error handlers
class AppError(Exception):
def __init__(self, error, status_code):
self.error = error
self.status_code = status_code
class DatabaseError(AppError):
def __init__(self, error, status_code):
super.__init__(self, error, status_code)
```
#### File: jmrafael/Streamlit-Authentication/env.py
```python
import os
from os import environ as osenv
from dotenv import load_dotenv, find_dotenv
import logging
# ======== GLOBAL SETTINGS ========
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
osenv['BASE_DIR'] = BASE_DIR
# ======== LOAD SECRET ENVIRONMENT VARS (from .env) ========
ENV_FILE = find_dotenv()
if ENV_FILE:
load_dotenv(ENV_FILE)
def verify():
logging.info(f'>>> Environment loading status <<<')
logging.info(f'-- Application base directory: {BASE_DIR}')
logging.info(f'-- Dotenv file: {ENV_FILE}\n\n')
``` |
{
"source": "jmrafael/Streamlit-Multipage",
"score": 3
} |
#### File: Streamlit-Multipage/apps/home.py
```python
import streamlit as st
import pandas as pd
import numpy as np
from data.create_data import create_table
def app():
st.title('Home')
st.write("This is a sample home page in the mutliapp.")
st.write("See `apps/home.py` to know how to use it.")
st.markdown("### Sample Data")
df = create_table()
st.write(df)
st.write('Navigate to `Data Stats` page to visualize the data')
``` |
{
"source": "JMRaichDev/Open-NGRadio",
"score": 3
} |
#### File: JMRaichDev/Open-NGRadio/main.py
```python
import discord
from discord.ext import tasks
from discord import FFmpegPCMAudio
from discord.ext import commands
from discord.utils import get
from colorama import Fore
from ng_api import *
from utils import *
bot = commands.Bot(command_prefix="ong!", description="A remake of NGRadio#4551 in Open-Source by JMimosa#2495")
bot.remove_command("help")
# Task
@static_vars(last_title=getRadioApiJSON()['now_playing']['song']['title'])
@tasks.loop(seconds=25) # repeat after every 25 seconds
async def updateStatus():
source = getRadioApiJSON()
current_title = source['now_playing']['song']['title']
if updateStatus.last_title != current_title: # song has changed
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.listening,
name=f"Listening {source['now_playing']['song']['title']} by {source['now_playing']['song']['artist']}"))
updateStatus.last_title = current_title
# Event
@bot.event
async def on_ready():
updateStatus.start()
print(Fore.GREEN + "[+] Bot is now started and ready")
print(Fore.GREEN + '[+] Logged in as:'
f'\n [-->] {bot.user.name}#{bot.user.discriminator} -> {bot.user.id}') # printing into the console some information about the bot
@bot.command(pass_context=True, name='help', aliases=['h'])
async def _help(ctx):
await ctx.send("```Help : "
"\n --> ong!help -> (ong!h) : Show this message and help you."
"\n --> ong!play -> (ong!p) : Join your voice channel and play NG-radio."
"\n --> ong!leave -> (ong!l) : Leave current voice channel."
"\n --> ong!song -> (ong!sg) : Shows current played song.```") # sending help to user
@bot.command(pass_context=True, name='song', aliases=['sg'])
async def _song(ctx):
source = getRadioApiJSON() # setting source to NGRadio-Api.json
# creating an embed with all the information about current music
embed = discord.Embed(
title=":musical_note: Quel est le titre en cours ?",
description=f"Actuellement sur NGRadio : {source['now_playing']['song']['title']} de {source['now_playing']['song']['artist']}",
color=discord.Colour.blue()
)
embed.set_footer(text="NGRadio • NationsGlory.fr")
embed.set_thumbnail(url=f"{source['now_playing']['song']['art']}")
await ctx.send(embed=embed) # sending to user the embed with all information about current music
@bot.command(pass_context=True, name='play', aliases=['p'])
async def _play(ctx):
channel = ctx.message.author.voice.channel
if not channel: # check if channel is None (= user not connected to any voice channel)
await ctx.send("You are not connected to a voice channel") # telling to user that he isn't in a voice channel
return
voice = get(bot.voice_clients, guild=ctx.guild) # getting bot voice client in message's guild
if voice and voice.is_connected(): # check if bot is already connected to any channel and if voice isn't None
await voice.move_to(channel) # moving to user's voice channel
else:
voice = await channel.connect() # creating a voice object and connecting to user's channel
FFMPEG_OPTIONS = {'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5',
'options': '-vn'}
NG_RADIO_URL = getRadioApiJSON()['station'][
'listen_url'] # or https://radio.nationsglory.fr:8000/ngradio
voice = get(bot.voice_clients, guild=ctx.guild)
if not voice.is_playing(): # check if bot is already playing NGRadio
voice.play(FFmpegPCMAudio(NG_RADIO_URL, **FFMPEG_OPTIONS)) # playing to microphone NGRadio sound
await ctx.message.delete() # deleting command message
@bot.command(pass_context=True, name='leave', aliases=['l'])
async def _leave(ctx):
await ctx.message.delete() # deleting command message
await ctx.voice_client.disconnect() # disconnect bot voice_client
bot.run("") # connecting to discord with a token and starting the bot
```
#### File: JMRaichDev/Open-NGRadio/utils.py
```python
def static_vars(**kwargs):
def decorate(func):
for k in kwargs:
setattr(func, k, kwargs[k])
return func
return decorate
``` |
{
"source": "jmren168/botorch",
"score": 2
} |
#### File: botorch/acquisition/utils.py
```python
r"""
Utilities for acquisition functions.
"""
from typing import Callable, Optional
from torch import Tensor
from ..models.model import Model
from ..sampling.samplers import IIDNormalSampler, SobolQMCNormalSampler
from ..utils.transforms import squeeze_last_dim
from . import analytic, monte_carlo
from .acquisition import AcquisitionFunction
from .monte_carlo import MCAcquisitionFunction
from .objective import MCAcquisitionObjective
def get_acquisition_function(
acquisition_function_name: str,
model: Model,
objective: MCAcquisitionObjective,
X_observed: Tensor,
X_pending: Optional[Tensor] = None,
mc_samples: int = 500,
qmc: bool = True,
seed: Optional[int] = None,
**kwargs,
) -> MCAcquisitionFunction:
r"""Convenience function for initializing botorch acquisition functions.
Args:
acquisition_function_name: Name of the acquisition function.
model: A fitted model.
objective: A MCAcquisitionObjective.
X_observed: A `m1 x d`-dim Tensor of `m1` design points that have
already been observed.
X_pending: A `m2 x d`-dim Tensor of `m2` design points whose evaluation
is pending.
mc_samples: The number of samples to use for (q)MC evaluation of the
acquisition function.
qmc: If True, use quasi-Monte-Carlo sampling (instead of iid).
seed: If provided, perform deterministic optimization (i.e. the
function to optimize is fixed and not stochastic).
Returns:
The requested acquisition function.
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> obj = LinearMCObjective(weights=torch.tensor([1.0, 2.0]))
>>> acqf = get_acquisition_function("qEI", model, obj, train_X)
"""
# initialize the sampler
if qmc:
sampler = SobolQMCNormalSampler(num_samples=mc_samples, seed=seed)
else:
sampler = IIDNormalSampler(num_samples=mc_samples, seed=seed)
# instantiate and return the requested acquisition function
if acquisition_function_name == "qEI":
best_f = objective(model.posterior(X_observed).mean).max().item()
return monte_carlo.qExpectedImprovement(
model=model,
best_f=best_f,
sampler=sampler,
objective=objective,
X_pending=X_pending,
)
elif acquisition_function_name == "qPI":
best_f = objective(model.posterior(X_observed).mean).max().item()
return monte_carlo.qProbabilityOfImprovement(
model=model,
best_f=best_f,
sampler=sampler,
objective=objective,
X_pending=X_pending,
tau=kwargs.get("tau", 1e-3),
)
elif acquisition_function_name == "qNEI":
return monte_carlo.qNoisyExpectedImprovement(
model=model,
X_baseline=X_observed,
sampler=sampler,
objective=objective,
X_pending=X_pending,
)
elif acquisition_function_name == "qSR":
return monte_carlo.qSimpleRegret(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
elif acquisition_function_name == "qUCB":
if "beta" not in kwargs:
raise ValueError("`beta` must be specified in kwargs for qUCB.")
return monte_carlo.qUpperConfidenceBound(
model=model,
beta=kwargs["beta"],
sampler=sampler,
objective=objective,
X_pending=X_pending,
)
raise NotImplementedError(
f"Unknown acquisition function {acquisition_function_name}"
)
def get_infeasible_cost(
X: Tensor, model: Model, objective: Callable[[Tensor], Tensor] = squeeze_last_dim
) -> float:
r"""Get infeasible cost for a model and objective.
Computes an infeasible cost `M` such that `-M < min_x f(x)` almost always,
so that feasible points are preferred.
Args:
X: A `n x d` Tensor of `n` design points to use in evaluating the
minimum. These points should cover the design space well. The more
points the better the estimate, at the expense of added computation.
model: A fitted botorch model.
objective: The objective with which to evaluate the model output.
Returns:
The infeasible cost `M` value.
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> objective = lambda Y: Y[..., -1] ** 2
>>> M = get_infeasible_cost(train_X, model, obj)
"""
posterior = model.posterior(X)
lb = objective(posterior.mean - 6 * posterior.variance.clamp_min(0).sqrt()).min()
M = -lb.clamp_max(0.0)
return M.item()
def is_nonnegative(acq_function: AcquisitionFunction) -> bool:
r"""Determine whether a given acquisition function is non-negative.
Args:
acq_function: The `AcquisitionFunction` instance.
Returns:
True if `acq_function` is non-negative, False if not, or if the behavior
is unknown (for custom acquisition functions).
Example:
>>> qEI = qExpectedImprovement(model, best_f=0.1)
>>> is_nonnegative(qEI) # returns True
"""
return isinstance(
acq_function,
(
analytic.ExpectedImprovement,
analytic.ConstrainedExpectedImprovement,
analytic.ProbabilityOfImprovement,
analytic.NoisyExpectedImprovement,
monte_carlo.qExpectedImprovement,
monte_carlo.qNoisyExpectedImprovement,
monte_carlo.qProbabilityOfImprovement,
),
)
```
#### File: botorch/posteriors/gpytorch.py
```python
r"""
Posterior Module to be used with GPyTorch models.
"""
from typing import Optional
import gpytorch
import torch
from gpytorch.distributions import MultitaskMultivariateNormal, MultivariateNormal
from torch import Tensor
from ..exceptions.errors import UnsupportedError
from .posterior import Posterior
class GPyTorchPosterior(Posterior):
r"""A posterior based on GPyTorch's multi-variate Normal distributions."""
def __init__(self, mvn: MultivariateNormal) -> None:
r"""A posterior based on GPyTorch's multi-variate Normal distributions.
Args:
mvn: A GPyTorch MultivariateNormal (single-output case) or
MultitaskMultivariateNormal (multi-output case).
"""
self.mvn = mvn
self._is_mt = isinstance(mvn, MultitaskMultivariateNormal)
@property
def device(self) -> torch.device:
r"""The torch device of the posterior."""
return self.mvn.loc.device
@property
def dtype(self) -> torch.dtype:
r"""The torch dtype of the posterior."""
return self.mvn.loc.dtype
@property
def event_shape(self) -> torch.Size:
r"""The event shape (i.e. the shape of a single sample) of the posterior."""
shape = self.mvn.batch_shape + self.mvn.event_shape
if not self._is_mt:
shape += torch.Size([1])
return shape
def rsample(
self,
sample_shape: Optional[torch.Size] = None,
base_samples: Optional[Tensor] = None,
) -> Tensor:
r"""Sample from the posterior (with gradients).
Args:
sample_shape: A `torch.Size` object specifying the sample shape. To
draw `n` samples, set to `torch.Size([n])`. To draw `b` batches
of `n` samples each, set to `torch.Size([b, n])`.
base_samples: An (optional) Tensor of `N(0, I)` base samples of
appropriate dimension, typically obtained from a `Sampler`.
This is used for deterministic optimization.
Returns:
A `sample_shape x event_shape`-dim Tensor of samples from the posterior.
"""
if sample_shape is None:
sample_shape = torch.Size([1])
if base_samples is not None:
if base_samples.shape[: len(sample_shape)] != sample_shape:
raise RuntimeError("sample_shape disagrees with shape of base_samples.")
# get base_samples to the correct shape
base_samples = base_samples.expand(sample_shape + self.event_shape)
# remove output dimension in single output case
if not self._is_mt:
base_samples = base_samples.squeeze(-1)
with gpytorch.settings.fast_computations(covar_root_decomposition=False):
samples = self.mvn.rsample(
sample_shape=sample_shape, base_samples=base_samples
)
# make sure there always is an output dimension
if not self._is_mt:
samples = samples.unsqueeze(-1)
return samples
@property
def mean(self) -> Tensor:
r"""The posterior mean."""
mean = self.mvn.mean
if not self._is_mt:
mean = mean.unsqueeze(-1)
return mean
@property
def variance(self) -> Tensor:
r"""The posterior variance."""
variance = self.mvn.variance
if not self._is_mt:
variance = variance.unsqueeze(-1)
return variance
def scalarize_posterior(
posterior: GPyTorchPosterior, weights: Tensor, offset: float = 0.0
) -> GPyTorchPosterior:
r"""Affine transformation of a multi-output posterior.
Args:
posterior: The posterior to be transformed. Must be single-point (`q=1`).
Supports `t`-batching.
weights: An single-dimensional tensor of weights. Number of elements
must be the numbe of outputs of the posterior.
offset: The offset of the affine transformation.
Returns:
The transformed (single-output) posterior. If the input posterior has
mean `mu` and covariance matrix `Sigma`, this posterior has mean
`weights^T * mu` and variance `weights^T Sigma w`.
Example:
Example for a model with two outcomes:
>>> X = torch.rand(1, 2)
>>> posterior = model.posterior(X)
>>> weights = torch.tensor([0.5, 0.25])
>>> new_posterior = scalarize_posterior(posterior, weights=weights)
"""
mean = posterior.mean
if mean.shape[-1] != len(weights):
raise RuntimeError("Output shape not equal to that of weights")
if mean.shape[-2] != 1:
raise UnsupportedError("scalarize_posterior currently not supported for q>1")
# no need to use lazy here since q=1
cov = posterior.mvn.covariance_matrix
batch_shape = cov.shape[:-2]
new_cov = ((cov @ weights) @ weights).view(*batch_shape, 1, 1)
new_mean = offset + (mean @ weights).view(*batch_shape, 1)
new_mvn = MultivariateNormal(new_mean, new_cov)
return GPyTorchPosterior(new_mvn)
```
#### File: botorch/test_functions/aug_hartmann6.py
```python
import torch
from torch import Tensor
from .hartmann6 import ALPHA, A, P
GLOBAL_MAXIMIZER = [0.20169, 0.150011, 0.476874, 0.275332, 0.311652, 0.6573, 1]
def neg_aug_hartmann6(X: Tensor) -> Tensor:
r"""Negative augmented Hartmann6 test function.
The last dimension of X is the fidelity parameter.
7-dimensional function (typically evaluated on `[0, 1]^7`):
H(x) = -(ALPHA_1 - 0.1 * (1-x_7)) * exp(- sum_{j=1}^6 A_1j (x_j - P_1j) ** 2) -
sum_{i=2}^4 ALPHA_i exp( - sum_{j=1}^6 A_ij (x_j - P_ij) ** 2)
H has unique global minimizer
x = [0.20169, 0.150011, 0.476874, 0.275332, 0.311652, 0.6573, 1]
with `H_min = -3.32237`
Args:
X: A Tensor of size `7` or `k x 7` (k batch evaluations).
Returns:
`-H(X)`, the negative value of the augmented Hartmann6 function.
"""
batch = X.ndimension() > 1
X = X if batch else X.unsqueeze(0)
inner_sum = torch.sum(
X.new(A) * (X[:, :6].unsqueeze(1) - 0.0001 * X.new(P)) ** 2, dim=2
)
alpha1 = ALPHA[0] - 0.1 * (1 - X[:, 6])
H = (
-torch.sum(X.new(ALPHA)[1:] * torch.exp(-inner_sum)[:, 1:], dim=1)
- alpha1 * torch.exp(-inner_sum)[:, 0]
)
result = -H
return result if batch else result.squeeze(0)
```
#### File: botorch/test_functions/cosine8.py
```python
import math
import torch
GLOBAL_MAXIMIZER = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
GLOBAL_MAXIMUM = 0.8
def cosine8(X):
r"""8d Cosine Mixture test function.
8-dimensional function (usually evaluated on `[-1, 1]^8`):
`f(x) = 0.1 sum_{i=1}^8 cos(5 pi x_i) - sum_{i=1}^8 x_i^2'
f has one maximizer for its global maximum at
`z_1 = (0, 0, ..., 0)`
with `f(z_1) = 0.8`
Args:
X: A Tensor of size `8` or `k x 8` (`k` batch evaluations).
Returns:
`f(X)`, the value of the 8d Cosine Mixture function.
"""
batch = X.ndimension() > 1
X = X if batch else X.unsqueeze(0)
result = 0.1 * (torch.cos(5.0 * math.pi * X)).sum(dim=-1) - (X ** 2).sum(dim=-1)
return result if batch else result.squeeze(0)
```
#### File: test/sampling/test_qmc.py
```python
import math
import numpy as np
import torch
from botorch.sampling.qmc import MultivariateNormalQMCEngine, NormalQMCEngine
from botorch.utils.sampling import manual_seed
from scipy.stats import shapiro
from ..botorch_test_case import BotorchTestCase
class NormalQMCTests(BotorchTestCase):
def test_NormalQMCEngine(self):
# d = 1
engine = NormalQMCEngine(d=1)
samples = engine.draw()
self.assertEqual(samples.dtype, torch.float)
self.assertEqual(samples.shape, torch.Size([1, 1]))
samples = engine.draw(n=5)
self.assertEqual(samples.shape, torch.Size([5, 1]))
# d = 2
engine = NormalQMCEngine(d=2)
samples = engine.draw()
self.assertEqual(samples.shape, torch.Size([1, 2]))
samples = engine.draw(n=5)
self.assertEqual(samples.shape, torch.Size([5, 2]))
# test double dtype
samples = engine.draw(dtype=torch.double)
self.assertEqual(samples.dtype, torch.double)
def test_NormalQMCEngineInvTransform(self):
# d = 1
engine = NormalQMCEngine(d=1, inv_transform=True)
samples = engine.draw()
self.assertEqual(samples.dtype, torch.float)
self.assertEqual(samples.shape, torch.Size([1, 1]))
samples = engine.draw(n=5)
self.assertEqual(samples.shape, torch.Size([5, 1]))
# d = 2
engine = NormalQMCEngine(d=2, inv_transform=True)
samples = engine.draw()
self.assertEqual(samples.shape, torch.Size([1, 2]))
samples = engine.draw(n=5)
self.assertEqual(samples.shape, torch.Size([5, 2]))
# test double dtype
samples = engine.draw(dtype=torch.double)
self.assertEqual(samples.dtype, torch.double)
def test_NormalQMCEngineSeeded(self):
# test even dimension
engine = NormalQMCEngine(d=2, seed=12345)
samples = engine.draw(n=2)
self.assertEqual(samples.dtype, torch.float)
samples_expected = torch.tensor(
[[-0.63099602, -1.32950772], [0.29625805, 1.86425618]]
)
self.assertTrue(torch.allclose(samples, samples_expected))
# test odd dimension
engine = NormalQMCEngine(d=3, seed=12345)
samples = engine.draw(n=2)
samples_expected = torch.tensor(
[
[1.83169884, -1.40473647, 0.24334828],
[0.36596099, 1.2987395, -1.47556275],
]
)
self.assertTrue(torch.allclose(samples, samples_expected))
def test_NormalQMCEngineSeededOut(self):
# test even dimension
engine = NormalQMCEngine(d=2, seed=12345)
out = torch.empty(2, 2)
self.assertIsNone(engine.draw(n=2, out=out))
samples_expected = torch.tensor(
[[-0.63099602, -1.32950772], [0.29625805, 1.86425618]]
)
self.assertTrue(torch.allclose(out, samples_expected))
# test odd dimension
engine = NormalQMCEngine(d=3, seed=12345)
out = torch.empty(2, 3)
self.assertIsNone(engine.draw(n=2, out=out))
samples_expected = torch.tensor(
[
[1.83169884, -1.40473647, 0.24334828],
[0.36596099, 1.2987395, -1.47556275],
]
)
self.assertTrue(torch.allclose(out, samples_expected))
def test_NormalQMCEngineSeededInvTransform(self):
# test even dimension
engine = NormalQMCEngine(d=2, seed=12345, inv_transform=True)
samples = engine.draw(n=2)
self.assertEqual(samples.dtype, torch.float)
samples_expected = torch.tensor(
[[-0.41622922, 0.46622792], [-0.96063897, -0.75568963]]
)
self.assertTrue(torch.allclose(samples, samples_expected))
# test odd dimension
engine = NormalQMCEngine(d=3, seed=12345, inv_transform=True)
samples = engine.draw(n=2)
samples_expected = torch.tensor(
[
[-1.40525266, 1.37652443, -0.8519666],
[-0.166497, -2.3153681, -0.15975676],
]
)
self.assertTrue(torch.allclose(samples, samples_expected))
def test_NormalQMCEngineShapiro(self):
engine = NormalQMCEngine(d=2, seed=12345)
samples = engine.draw(n=250)
self.assertEqual(samples.dtype, torch.float)
self.assertTrue(torch.all(torch.abs(samples.mean(dim=0)) < 1e-2))
self.assertTrue(torch.all(torch.abs(samples.std(dim=0) - 1) < 1e-2))
# perform Shapiro-Wilk test for normality
for i in (0, 1):
_, pval = shapiro(samples[:, i])
self.assertGreater(pval, 0.9)
# make sure samples are uncorrelated
cov = np.cov(samples.numpy().transpose())
self.assertLess(np.abs(cov[0, 1]), 1e-2)
def test_NormalQMCEngineShapiroInvTransform(self):
engine = NormalQMCEngine(d=2, seed=12345, inv_transform=True)
samples = engine.draw(n=250)
self.assertEqual(samples.dtype, torch.float)
self.assertTrue(torch.all(torch.abs(samples.mean(dim=0)) < 1e-2))
self.assertTrue(torch.all(torch.abs(samples.std(dim=0) - 1) < 1e-2))
# perform Shapiro-Wilk test for normality
for i in (0, 1):
_, pval = shapiro(samples[:, i])
self.assertGreater(pval, 0.9)
# make sure samples are uncorrelated
cov = np.cov(samples.numpy().transpose())
self.assertLess(np.abs(cov[0, 1]), 1e-2)
class MultivariateNormalQMCTests(BotorchTestCase):
def test_MultivariateNormalQMCEngineShapeErrors(self):
with self.assertRaises(ValueError):
MultivariateNormalQMCEngine(mean=torch.zeros(2), cov=torch.zeros(2, 1))
with self.assertRaises(ValueError):
MultivariateNormalQMCEngine(mean=torch.zeros(1), cov=torch.eye(2))
def test_MultivariateNormalQMCEngineNonPSD(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
for dtype in (torch.float, torch.double):
# try with non-psd, non-pd cov and expect an assertion error
mean = torch.zeros(2, device=device, dtype=dtype)
cov = torch.tensor([[1, 2], [2, 1]], device=device, dtype=dtype)
with self.assertRaises(ValueError):
MultivariateNormalQMCEngine(mean=mean, cov=cov)
def test_MultivariateNormalQMCEngineNonPSD_cuda(self):
if torch.cuda.is_available():
self.test_MultivariateNormalQMCEngineNonPSD(cuda=True)
def test_MultivariateNormalQMCEngineNonPD(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
for dtype in (torch.float, torch.double):
mean = torch.zeros(3, device=device, dtype=dtype)
cov = torch.tensor(
[[1, 0, 1], [0, 1, 1], [1, 1, 2]], device=device, dtype=dtype
)
# try with non-pd but psd cov; should work
engine = MultivariateNormalQMCEngine(mean=mean, cov=cov)
self.assertTrue(engine._corr_matrix is not None)
def test_MultivariateNormalQMCEngineNonPD_cuda(self):
if torch.cuda.is_available():
self.test_MultivariateNormalQMCEngineNonPD(cuda=True)
def test_MultivariateNormalQMCEngineSymmetric(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
for dtype in (torch.float, torch.double):
# try with non-symmetric cov and expect an error
mean = torch.zeros(2, device=device, dtype=dtype)
cov = torch.tensor([[1, 0], [2, 1]], device=device, dtype=dtype)
with self.assertRaises(ValueError):
MultivariateNormalQMCEngine(mean=mean, cov=cov)
def test_MultivariateNormalQMCEngineSymmetric_cuda(self):
if torch.cuda.is_available():
self.test_MultivariateNormalQMCEngineSymmetric(cuda=True)
def test_MultivariateNormalQMCEngine(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
for dtype in (torch.float, torch.double):
# d = 1 scalar
mean = torch.tensor([0], device=device, dtype=dtype)
cov = torch.tensor([[5]], device=device, dtype=dtype)
engine = MultivariateNormalQMCEngine(mean=mean, cov=cov)
samples = engine.draw()
self.assertEqual(samples.dtype, dtype)
self.assertEqual(samples.device.type, device.type)
self.assertEqual(samples.shape, torch.Size([1, 1]))
samples = engine.draw(n=5)
self.assertEqual(samples.shape, torch.Size([5, 1]))
# d = 2 list
mean = torch.tensor([0, 1], device=device, dtype=dtype)
cov = torch.eye(2, device=device, dtype=dtype)
engine = MultivariateNormalQMCEngine(mean=mean, cov=cov)
samples = engine.draw()
self.assertEqual(samples.dtype, dtype)
self.assertEqual(samples.device.type, device.type)
self.assertEqual(samples.shape, torch.Size([1, 2]))
samples = engine.draw(n=5)
self.assertEqual(samples.shape, torch.Size([5, 2]))
# d = 3 Tensor
mean = torch.tensor([0, 1, 2], device=device, dtype=dtype)
cov = torch.eye(3, device=device, dtype=dtype)
engine = MultivariateNormalQMCEngine(mean=mean, cov=cov)
samples = engine.draw()
self.assertEqual(samples.dtype, dtype)
self.assertEqual(samples.device.type, device.type)
self.assertEqual(samples.shape, torch.Size([1, 3]))
samples = engine.draw(n=5)
self.assertEqual(samples.shape, torch.Size([5, 3]))
def test_MultivariateNormalQMCEngine_cuda(self):
if torch.cuda.is_available():
self.test_MultivariateNormalQMCEngine(cuda=True)
def test_MultivariateNormalQMCEngineInvTransform(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
for dtype in (torch.float, torch.double):
# d = 1 scalar
mean = torch.tensor([0], device=device, dtype=dtype)
cov = torch.tensor([[5]], device=device, dtype=dtype)
engine = MultivariateNormalQMCEngine(mean=mean, cov=cov, inv_transform=True)
samples = engine.draw()
self.assertEqual(samples.dtype, dtype)
self.assertEqual(samples.device.type, device.type)
self.assertEqual(samples.shape, torch.Size([1, 1]))
samples = engine.draw(n=5)
self.assertEqual(samples.shape, torch.Size([5, 1]))
# d = 2 list
mean = torch.tensor([0, 1], device=device, dtype=dtype)
cov = torch.eye(2, device=device, dtype=dtype)
engine = MultivariateNormalQMCEngine(mean=mean, cov=cov, inv_transform=True)
samples = engine.draw()
self.assertEqual(samples.dtype, dtype)
self.assertEqual(samples.device.type, device.type)
self.assertEqual(samples.shape, torch.Size([1, 2]))
samples = engine.draw(n=5)
self.assertEqual(samples.shape, torch.Size([5, 2]))
# d = 3 Tensor
mean = torch.tensor([0, 1, 2], device=device, dtype=dtype)
cov = torch.eye(3, device=device, dtype=dtype)
engine = MultivariateNormalQMCEngine(mean=mean, cov=cov, inv_transform=True)
samples = engine.draw()
self.assertEqual(samples.dtype, dtype)
self.assertEqual(samples.device.type, device.type)
self.assertEqual(samples.shape, torch.Size([1, 3]))
samples = engine.draw(n=5)
self.assertEqual(samples.shape, torch.Size([5, 3]))
def test_MultivariateNormalQMCEngineInvTransform_cuda(self):
if torch.cuda.is_available():
self.test_MultivariateNormalQMCEngineInvTransform(cuda=True)
def test_MultivariateNormalQMCEngineSeeded(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
for dtype in (torch.float, torch.double):
# test even dimension
with manual_seed(54321):
a = torch.randn(2, 2)
cov = a @ a.transpose(-1, -2) + torch.rand(2).diag()
mean = torch.zeros(2, device=device, dtype=dtype)
cov = cov.to(device=device, dtype=dtype)
engine = MultivariateNormalQMCEngine(mean=mean, cov=cov, seed=12345)
samples = engine.draw(n=2)
self.assertEqual(samples.dtype, dtype)
self.assertEqual(samples.device.type, device.type)
samples_expected = torch.tensor(
[[-0.849047422, -0.713852942], [0.398635030, 1.350660801]],
device=device,
dtype=dtype,
)
self.assertTrue(torch.allclose(samples, samples_expected))
# test odd dimension
with manual_seed(54321):
a = torch.randn(3, 3)
cov = a @ a.transpose(-1, -2) + torch.rand(3).diag()
mean = torch.zeros(3, device=device, dtype=dtype)
cov = cov.to(device=device, dtype=dtype)
engine = MultivariateNormalQMCEngine(mean, cov, seed=12345)
samples = engine.draw(n=2)
self.assertEqual(samples.dtype, dtype)
self.assertEqual(samples.device.type, device.type)
samples_expected = torch.tensor(
[
[3.113158941, -3.262257099, -0.819938779],
[0.621987879, 2.352285624, -1.992680788],
],
device=device,
dtype=dtype,
)
self.assertTrue(torch.allclose(samples, samples_expected))
def test_MultivariateNormalQMCEngineSeeded_cuda(self):
if torch.cuda.is_available():
self.test_MultivariateNormalQMCEngineSeeded(cuda=True)
def test_MultivariateNormalQMCEngineSeededOut(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
for dtype in (torch.float, torch.double):
# test even dimension
with manual_seed(54321):
a = torch.randn(2, 2)
cov = a @ a.transpose(-1, -2) + torch.rand(2).diag()
mean = torch.zeros(2, device=device, dtype=dtype)
cov = cov.to(device=device, dtype=dtype)
engine = MultivariateNormalQMCEngine(mean=mean, cov=cov, seed=12345)
out = torch.empty(2, 2, device=device, dtype=dtype)
self.assertIsNone(engine.draw(n=2, out=out))
samples_expected = torch.tensor(
[[-0.849047422, -0.713852942], [0.398635030, 1.350660801]],
device=device,
dtype=dtype,
)
self.assertTrue(torch.allclose(out, samples_expected))
# test odd dimension
with manual_seed(54321):
a = torch.randn(3, 3)
cov = a @ a.transpose(-1, -2) + torch.rand(3).diag()
mean = torch.zeros(3, device=device, dtype=dtype)
cov = cov.to(device=device, dtype=dtype)
engine = MultivariateNormalQMCEngine(mean, cov, seed=12345)
out = torch.empty(2, 3, device=device, dtype=dtype)
self.assertIsNone(engine.draw(n=2, out=out))
samples_expected = torch.tensor(
[
[3.113158941, -3.262257099, -0.819938779],
[0.621987879, 2.352285624, -1.992680788],
],
device=device,
dtype=dtype,
)
self.assertTrue(torch.allclose(out, samples_expected))
def test_MultivariateNormalQMCEngineSeededOut_cuda(self):
if torch.cuda.is_available():
self.test_MultivariateNormalQMCEngineSeededOut(cuda=True)
def test_MultivariateNormalQMCEngineSeededInvTransform(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
for dtype in (torch.float, torch.double):
# test even dimension
with manual_seed(54321):
a = torch.randn(2, 2)
cov = a @ a.transpose(-1, -2) + torch.rand(2).diag()
mean = torch.zeros(2, device=device, dtype=dtype)
cov = cov.to(device=device, dtype=dtype)
engine = MultivariateNormalQMCEngine(
mean=mean, cov=cov, seed=12345, inv_transform=True
)
samples = engine.draw(n=2)
self.assertEqual(samples.dtype, dtype)
self.assertEqual(samples.device.type, device.type)
samples_expected = torch.tensor(
[[-0.560064316, 0.629113674], [-1.292604208, -0.048077226]],
device=device,
dtype=dtype,
)
self.assertTrue(torch.allclose(samples, samples_expected))
# test odd dimension
with manual_seed(54321):
a = torch.randn(3, 3)
cov = a @ a.transpose(-1, -2) + torch.rand(3).diag()
mean = torch.zeros(3, device=device, dtype=dtype)
cov = cov.to(device=device, dtype=dtype)
engine = MultivariateNormalQMCEngine(
mean=mean, cov=cov, seed=12345, inv_transform=True
)
samples = engine.draw(n=2)
self.assertEqual(samples.dtype, dtype)
self.assertEqual(samples.device.type, device.type)
samples_expected = torch.tensor(
[
[-2.388370037, 3.071142435, -0.319439292],
[-0.282978594, -4.350236893, -1.085214734],
],
device=device,
dtype=dtype,
)
self.assertTrue(torch.allclose(samples, samples_expected))
def test_MultivariateNormalQMCEngineSeededInvTransform_cuda(self):
if torch.cuda.is_available():
self.test_MultivariateNormalQMCEngineSeededInvTransform(cuda=True)
def test_MultivariateNormalQMCEngineShapiro(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
for dtype in (torch.float, torch.double):
# test the standard case
mean = torch.zeros(2, device=device, dtype=dtype)
cov = torch.eye(2, device=device, dtype=dtype)
engine = MultivariateNormalQMCEngine(mean=mean, cov=cov, seed=12345)
samples = engine.draw(n=250)
self.assertEqual(samples.dtype, dtype)
self.assertEqual(samples.device.type, device.type)
self.assertTrue(torch.all(torch.abs(samples.mean(dim=0)) < 1e-2))
self.assertTrue(torch.all(torch.abs(samples.std(dim=0) - 1) < 1e-2))
# perform Shapiro-Wilk test for normality
samples = samples.cpu().numpy()
for i in (0, 1):
_, pval = shapiro(samples[:, i])
self.assertGreater(pval, 0.9)
# make sure samples are uncorrelated
cov = np.cov(samples.transpose())
self.assertLess(np.abs(cov[0, 1]), 1e-2)
# test the correlated, non-zero mean case
mean = torch.tensor([1.0, 2.0], device=device, dtype=dtype)
cov = torch.tensor([[1.5, 0.5], [0.5, 1.5]], device=device, dtype=dtype)
engine = MultivariateNormalQMCEngine(mean=mean, cov=cov, seed=12345)
samples = engine.draw(n=250)
self.assertEqual(samples.dtype, dtype)
self.assertEqual(samples.device.type, device.type)
self.assertTrue(torch.all(torch.abs(samples.mean(dim=0) - mean) < 1e-2))
self.assertTrue(
torch.all(torch.abs(samples.std(dim=0) - math.sqrt(1.5)) < 1e-2)
)
# perform Shapiro-Wilk test for normality
samples = samples.cpu().numpy()
for i in (0, 1):
_, pval = shapiro(samples[:, i])
self.assertGreater(pval, 0.9)
# check covariance
cov = np.cov(samples.transpose())
self.assertLess(np.abs(cov[0, 1] - 0.5), 1e-2)
def test_MultivariateNormalQMCEngineShapiro_cuda(self):
if torch.cuda.is_available():
self.test_MultivariateNormalQMCEngineShapiro(cuda=True)
def test_MultivariateNormalQMCEngineShapiroInvTransform(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
for dtype in (torch.float, torch.double):
# test the standard case
mean = torch.zeros(2, device=device, dtype=dtype)
cov = torch.eye(2, device=device, dtype=dtype)
engine = MultivariateNormalQMCEngine(
mean=mean, cov=cov, seed=12345, inv_transform=True
)
samples = engine.draw(n=250)
self.assertEqual(samples.dtype, dtype)
self.assertEqual(samples.device.type, device.type)
self.assertTrue(torch.all(torch.abs(samples.mean(dim=0)) < 1e-2))
self.assertTrue(torch.all(torch.abs(samples.std(dim=0) - 1) < 1e-2))
# perform Shapiro-Wilk test for normality
samples = samples.cpu().numpy()
for i in (0, 1):
_, pval = shapiro(samples[:, i])
self.assertGreater(pval, 0.9)
# make sure samples are uncorrelated
cov = np.cov(samples.transpose())
self.assertLess(np.abs(cov[0, 1]), 1e-2)
# test the correlated, non-zero mean case
mean = torch.tensor([1.0, 2.0], device=device, dtype=dtype)
cov = torch.tensor([[1.5, 0.5], [0.5, 1.5]], device=device, dtype=dtype)
engine = MultivariateNormalQMCEngine(
mean=mean, cov=cov, seed=12345, inv_transform=True
)
samples = engine.draw(n=250)
self.assertEqual(samples.dtype, dtype)
self.assertEqual(samples.device.type, device.type)
self.assertTrue(torch.all(torch.abs(samples.mean(dim=0) - mean) < 1e-2))
self.assertTrue(
torch.all(torch.abs(samples.std(dim=0) - math.sqrt(1.5)) < 1e-2)
)
# perform Shapiro-Wilk test for normality
samples = samples.cpu().numpy()
for i in (0, 1):
_, pval = shapiro(samples[:, i])
self.assertGreater(pval, 0.9)
# check covariance
cov = np.cov(samples.transpose())
self.assertLess(np.abs(cov[0, 1] - 0.5), 1e-2)
def test_MultivariateNormalQMCEngineShapiroInvTransform_cuda(self):
if torch.cuda.is_available():
self.test_MultivariateNormalQMCEngineShapiroInvTransform(cuda=True)
def test_MultivariateNormalQMCEngineDegenerate(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
for dtype in (torch.float, torch.double):
# X, Y iid standard Normal and Z = X + Y, random vector (X, Y, Z)
mean = torch.zeros(3, device=device, dtype=dtype)
cov = torch.tensor(
[[1, 0, 1], [0, 1, 1], [1, 1, 2]], device=device, dtype=dtype
)
engine = MultivariateNormalQMCEngine(mean=mean, cov=cov, seed=12345)
samples = engine.draw(n=2000)
self.assertEqual(samples.dtype, dtype)
self.assertEqual(samples.device.type, device.type)
self.assertTrue(torch.all(torch.abs(samples.mean(dim=0)) < 1e-2))
self.assertTrue(torch.abs(torch.std(samples[:, 0]) - 1) < 1e-2)
self.assertTrue(torch.abs(torch.std(samples[:, 1]) - 1) < 1e-2)
self.assertTrue(torch.abs(torch.std(samples[:, 2]) - math.sqrt(2)) < 1e-2)
for i in (0, 1, 2):
_, pval = shapiro(samples[:, i].cpu().numpy())
self.assertGreater(pval, 0.9)
cov = np.cov(samples.cpu().numpy().transpose())
self.assertLess(np.abs(cov[0, 1]), 1e-2)
self.assertLess(np.abs(cov[0, 2] - 1), 1e-2)
# check to see if X + Y = Z almost exactly
self.assertTrue(
torch.all(
torch.abs(samples[:, 0] + samples[:, 1] - samples[:, 2]) < 1e-5
)
)
def test_MultivariateNormalQMCEngineDegenerate_cuda(self):
if torch.cuda.is_available():
self.test_MultivariateNormalQMCEngineDegenerate(cuda=True)
```
#### File: test/test_functions/test_aug_hartmann6.py
```python
import torch
from botorch.test_functions.aug_hartmann6 import GLOBAL_MAXIMIZER, neg_aug_hartmann6
from botorch.test_functions.hartmann6 import GLOBAL_MAXIMUM
from ..botorch_test_case import BotorchTestCase
class TestNegAugHartmann6(BotorchTestCase):
def test_single_eval_neg_aug_hartmann6(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
for dtype in (torch.float, torch.double):
X = torch.zeros(7, device=device, dtype=dtype)
res = neg_aug_hartmann6(X)
self.assertEqual(res.dtype, dtype)
self.assertEqual(res.device.type, device.type)
self.assertEqual(res.shape, torch.Size())
def test_single_eval_neg_aug_hartmann6_cuda(self):
if torch.cuda.is_available():
self.test_single_eval_neg_aug_hartmann6(cuda=True)
def test_batch_eval_neg_aug_hartmann6(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
for dtype in (torch.float, torch.double):
X = torch.zeros(2, 7, device=device, dtype=dtype)
res = neg_aug_hartmann6(X)
self.assertEqual(res.dtype, dtype)
self.assertEqual(res.device.type, device.type)
self.assertEqual(res.shape, torch.Size([2]))
def test_batch_eval_neg_aug_hartmann6_cuda(self):
if torch.cuda.is_available():
self.test_batch_eval_neg_aug_hartmann6(cuda=True)
def test_neg_aug_hartmann6_global_maximum(self, cuda=False):
device = torch.device("scuda") if cuda else torch.device("cpu")
for dtype in (torch.float, torch.double):
X = torch.tensor(GLOBAL_MAXIMIZER, device=device, dtype=dtype)
res = neg_aug_hartmann6(X)
self.assertAlmostEqual(res.item(), GLOBAL_MAXIMUM, places=4)
def test_neg_aug_hartmann6_global_maximum_cuda(self):
if torch.cuda.is_available():
self.test_neg_aug_hartmann6_global_maximum(cuda=False)
```
#### File: test/test_functions/test_holder_table.py
```python
import torch
from botorch.test_functions.holder_table import (
GLOBAL_MAXIMIZERS,
GLOBAL_MAXIMUM,
neg_holder_table,
)
from ..botorch_test_case import BotorchTestCase
class TestNegHolderTable(BotorchTestCase):
def test_single_eval_neg_holder_table(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
for dtype in (torch.float, torch.double):
X = torch.zeros(2, device=device, dtype=dtype)
res = neg_holder_table(X)
self.assertEqual(res.dtype, dtype)
self.assertEqual(res.device.type, device.type)
self.assertEqual(res.shape, torch.Size())
self.assertTrue(res.abs().item() < 1e-6)
def test_single_eval_neg_holder_table_cuda(self):
if torch.cuda.is_available():
self.test_single_eval_neg_holder_table(cuda=True)
def test_batch_eval_neg_holder_table(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
for dtype in (torch.float, torch.double):
X = torch.zeros(2, 2, device=device, dtype=dtype)
res = neg_holder_table(X)
self.assertEqual(res.dtype, dtype)
self.assertEqual(res.device.type, device.type)
self.assertEqual(res.shape, torch.Size([2]))
self.assertTrue(res.abs().sum().item() < 1e-6)
def test_batch_eval_neg_holder_table_cuda(self):
if torch.cuda.is_available():
self.test_batch_eval_neg_holder_table(cuda=True)
def test_neg_holder_table_global_maxima(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
for dtype in (torch.float, torch.double):
X = torch.tensor(
GLOBAL_MAXIMIZERS, device=device, dtype=dtype, requires_grad=True
)
res = neg_holder_table(X)
grad = torch.autograd.grad([*res], X)[0]
self.assertLess((res - GLOBAL_MAXIMUM).abs().max().item(), 1e-5)
self.assertLess(grad.abs().max().item(), 1e-3)
def test_neg_holder_table_global_maxima_cuda(self):
if torch.cuda.is_available():
self.test_neg_holder_table_global_maxima(cuda=True)
``` |
{
"source": "jmrf/active-qa",
"score": 2
} |
#### File: px/environments/bidaf.py
```python
import json
import math
import nltk
import os
import tensorflow as tf
from third_party.bi_att_flow.basic import read_data as bidaf_data
from third_party.bi_att_flow.basic import cli as bidaf_cli
from third_party.bi_att_flow.basic import evaluator as bidaf_eval
from third_party.bi_att_flow.basic import graph_handler as bidaf_graph
from third_party.bi_att_flow.basic import model as bidaf_model
class BidafEnvironment(object):
"""Environment containing the BiDAF model.
This environment loads a BiDAF model and preprocessed data for a chosen SQuAD
dataset. The environment is queried with a pointer to an existing datapoint,
which contains a preprocessed SQuAD document, and a question. BiDAF is run
using the given question against the document and the top answer with its
score is returned.
Attributes:
config: BiDAF configuration read from cli.py
data: Pre-processed SQuAD dataset.
evaluator: BiDAF evaluation object.
graph_handler: BiDAF object used to manage the TF graph.
sess: single Tensorflow session used by the environment.
model: A BiDAF Model object.
"""
def __init__(self,
data_dir,
shared_path,
model_dir,
docid_separator='###',
debug_mode=False,
load_test=False,
load_impossible_questions=False):
"""Constructor loads the BiDAF configuration, model and data.
Args:
data_dir: Directory containing preprocessed SQuAD data.
shared_path: Path to shared data generated at training time.
model_dir: Directory contining parameters of a pre-trained BiDAF model.
docid_separator: Separator used to split suffix off the docid string.
debug_mode: If true logs additional debug information.
load_test: Whether the test set should be loaded as well.
load_impossible_questions: Whether info about impossibility of questions
should be loaded.
"""
self.config = bidaf_cli.get_config()
self.config.save_dir = model_dir
self.config.data_dir = data_dir
self.config.shared_path = shared_path
self.config.mode = 'forward'
self.docid_separator = docid_separator
self.debug_mode = debug_mode
self.datasets = ['train', 'dev']
if load_test:
self.datasets.append('test')
data_filter = None
self.data = dict()
for dataset in self.datasets:
self.data[dataset] = bidaf_data.read_data(
self.config, dataset, True, data_filter=data_filter)
bidaf_data.update_config(self.config, list(self.data.values()))
models = bidaf_model.get_model(self.config)
self.evaluator = bidaf_eval.MultiGPUF1Evaluator(self.config, models)
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
self.graph_handler = bidaf_graph.GraphHandler(self.config, models[0])
self.graph_handler.initialize(self.sess)
nltk_data_path = os.path.join(os.path.expanduser('~'), 'data')
nltk.data.path.append(nltk_data_path)
self.impossible_ids = set()
if load_impossible_questions:
tf.logging.info('Loading impossible question ids.')
for dataset in self.datasets:
self.impossible_ids.update(self._ReadImpossiblities(dataset, data_dir))
if self.debug_mode:
tf.logging.info('Loaded {} impossible question ids.'.format(
len(self.impossible_ids)))
def _ReadImpossiblities(self, dataset, data_dir):
"""Collects all the docids for impossible questions."""
data_path = os.path.join(data_dir, '{}-v2.0.json'.format(dataset))
impossible_ids = []
with tf.gfile.Open(data_path, 'r') as fh:
data = json.load(fh)
for document in data['data']:
for paragraph in document['paragraphs']:
for question in paragraph['qas']:
if question['is_impossible']:
impossible_ids.append(question['id'])
if self.debug_mode:
tf.logging.info('Loaded {} impossible question ids from {}.'.format(
len(impossible_ids), dataset))
return impossible_ids
def _WordTokenize(self, text):
"""Tokenizes the text NLTK for consistency with BiDAF."""
return [
token.replace("''", '"').replace('``', '"')
for token in nltk.word_tokenize(text)
]
def _PreprocessQaData(self, questions, document_ids):
"""Prepares the BiDAF Data object.
Loads a batch of SQuAD datapoints, identified by their 'ids' field. The
questions are replaced with those specified in the input. All datapoints
must come from the same original dataset (train, dev or test), else the
shared data will be incorrect. The first id in document_ids is used to
determine the dataset, a KeyError is thrown if the other ids are not in this
dataset.
Args:
questions: List of strings used to replace the original question.
document_ids: Identifiers for the SQuAD datapoints to use.
Returns:
data: BiDAF Data object containing the desired datapoints only.
data.shared: The appropriate shared data from the dataset containing
the ids in document_ids
id2questions_dict: A dict mapping docids to original questions and
rewrites.
Raises:
KeyError: Occurs if it is not the case that all document_ids are present
in a single preloaded dataset.
"""
first_docid = document_ids[0].split(self.docid_separator)[0]
if first_docid in self.data['train'].data['ids']:
dataset = self.data['train']
elif first_docid in self.data['dev'].data['ids']:
dataset = self.data['dev']
elif 'test' in self.data and first_docid in self.data['test'].data['ids']:
dataset = self.data['test']
else:
raise KeyError('Document id not present: {}'.format(first_docid))
data_indices = [
dataset.data['ids'].index(document_ids[i].split(
self.docid_separator)[0]) for i in range(len(document_ids))
]
data_out = dict()
# Copies relevant datapoint, retaining the input docids.
for key in dataset.data.keys():
if key == 'ids':
data_out[key] = document_ids
else:
data_out[key] = [dataset.data[key][i] for i in data_indices]
if self.debug_mode:
for q in data_out['q']:
tf.logging.info('Original question: {}'.format(
' '.join(q).encode('utf-8')))
# Replaces the question in the datapoint for the rewrite.
id2questions_dict = dict()
for i in range(len(questions)):
id2questions_dict[data_out['ids'][i]] = dict()
id2questions_dict[data_out['ids'][i]]['original'] = ' '.join(
data_out['q'][i])
data_out['q'][i] = self._WordTokenize(questions[i])
if len(data_out['q'][i]) > self.config.max_ques_size:
tf.logging.info('Truncated question from {} to {}'.format(
len(data_out['q'][i]), self.config.max_ques_size))
data_out['q'][i] = data_out['q'][i][:self.config.max_ques_size]
id2questions_dict[data_out['ids'][i]]['raw_rewrite'] = questions[i]
id2questions_dict[data_out['ids'][i]]['rewrite'] = ' '.join(
data_out['q'][i])
data_out['cq'][i] = [list(qij) for qij in data_out['q'][i]]
if self.debug_mode:
for q in data_out['q']:
tf.logging.info('New question: {}'.format(
' '.join(q).encode('utf-8')))
return data_out, dataset.shared, id2questions_dict
def IsImpossible(self, document_id):
return document_id in self.impossible_ids
def GetAnswers(self, questions, document_ids):
"""Computes an answer for a given question from a SQuAD datapoint.
Runs a BiDAF model on a specified SQuAD datapoint, but using the input
question in place of the original.
Args:
questions: List of strings used to replace the original question.
document_ids: Identifiers for the SQuAD datapoints to use.
Returns:
e.id2answer_dict: A dict containing the answers and their scores.
e.loss: Scalar training loss for the entire batch.
id2questions_dict: A dict mapping docids to original questions and
rewrites.
Raises:
ValueError: If the number of questions and document_ids differ.
ValueError: If the document_ids are not unique.
"""
if len(questions) != len(document_ids):
raise ValueError('Number of questions and document_ids must be equal.')
if len(document_ids) > len(set(document_ids)):
raise ValueError('document_ids must be unique.')
raw_data, shared, id2questions_dict = self._PreprocessQaData(
questions, document_ids)
data = bidaf_data.DataSet(raw_data, data_type='', shared=shared)
num_batches = int(math.ceil(data.num_examples / self.config.batch_size))
e = None
for multi_batch in data.get_multi_batches(
self.config.batch_size, self.config.num_gpus, num_steps=num_batches):
ei = self.evaluator.get_evaluation(self.sess, multi_batch)
e = ei if e is None else e + ei
if self.debug_mode:
tf.logging.info(e)
self.graph_handler.dump_answer(e, path=self.config.answer_path)
self.graph_handler.dump_eval(e, path=self.config.eval_path)
return e.id2answer_dict, id2questions_dict, e.loss
```
#### File: px/environments/bidaf_server.py
```python
r"""gRPC server for the BiDAF environment.
Implementation of a gRPC server for the BiDAF model. Requests contain
questions and document IDs that identify SQuAD datapoints. The responses
contain answers from the BiDAF environment and associated scores.
"""
from concurrent import futures
import time
from absl import app
from absl import flags
from absl import logging
import grpc
from px.environments import bidaf
from px.proto import aqa_pb2
from px.proto import aqa_pb2_grpc
FLAGS = flags.FLAGS
flags.DEFINE_integer('port', 10000, 'Port to listen on.')
flags.DEFINE_string('squad_data_dir', '', 'Directory containing squad data.')
flags.DEFINE_string('bidaf_shared_file', '', 'Shared file produced by BiDAF.')
flags.DEFINE_string('bidaf_model_dir', '', 'Directory of trained BiDAF model.')
flags.DEFINE_integer('worker_threads', 10,
'Number of worker threads running on the server.')
flags.DEFINE_integer('sleep_seconds', 10,
'Number of seconds to wait for a termination event.')
flags.DEFINE_bool('load_test', False,
'Load test data in addition to dev and train.')
flags.DEFINE_bool(
'load_impossible_questions', False, 'For SQuAD v2 impossible '
'questions can be loaded to return a modified reward.')
flags.DEFINE_bool('debug_mode', False,
'If true, log questions, answers, and scores.')
DOCID_SEPARATOR = '###'
class BidafServer(aqa_pb2_grpc.EnvironmentServerServicer):
"""A gRPC server for the BiDAF environment.
Attributes:
environment: A BidafEnvironment object that returns scored answers to
questions.
"""
def __init__(self, *args, **kwargs):
""""Constructor for the BiDAF server."""
data_dir = kwargs.pop('squad_data_dir', None)
shared_file = kwargs.pop('bidaf_shared_file', None)
model_dir = kwargs.pop('bidaf_model_dir', None)
load_test = kwargs.pop('load_test', False)
load_impossible_questions = kwargs.pop('load_impossible_questions', False)
debug_mode = kwargs.pop('debug_mode', False)
self.debug_mode = debug_mode
self._InitializeEnvironment(
data_dir=data_dir,
shared_file=shared_file,
model_dir=model_dir,
load_test=load_test,
load_impossible_questions=load_impossible_questions,
debug_mode=debug_mode)
def _InitializeEnvironment(self, data_dir, shared_file, model_dir, load_test,
load_impossible_questions, debug_mode):
"""Initilizes the BiDAF model environment.
Args:
data_dir: Directory containing preprocessed SQuAD data.
shared_file: Path to shared data generated at training time.
model_dir: Directory contining parameters of a pre-trained BiDAF
model.
load_test: Whether the test set should be loaded as well.
load_impossible_questions: Whether info about impossibility of questions
should be loaded.
debug_mode: Whether to log debug information.
"""
self._environment = bidaf.BidafEnvironment(
data_dir,
shared_file,
model_dir,
docid_separator=DOCID_SEPARATOR,
load_test=load_test,
load_impossible_questions=load_impossible_questions,
debug_mode=debug_mode)
def GetObservations(self, request, context):
"""Returns answers to given questions.
Passes questions and document ids contained in the request to the Bidaf
environment and repackages the scored answers coming from the environment
into the response.
Args:
request: An EnvironmentRequest containing questions and docids.
context: The RPC context.
Returns:
The EnvironmentResponse filled with the resulting answers.
"""
if self.debug_mode:
start_time = time.time()
response = aqa_pb2.EnvironmentResponse()
if not request.queries:
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details('Empty list of queries provided in the request')
return response
questions = list()
document_ids = list()
index = 0
impossible_ids = set()
for query in request.queries:
questions.append(query.question)
if query.is_impossible:
impossible_ids.add(query.id)
# Add an index to each id to make them unique, as required by BiDAF. This
# augmentation of the docid is for BiDAF internal use and is not visible
# to the client.
unique_id = '{:s}{:s}{:d}'.format(query.id, DOCID_SEPARATOR, index)
index += 1
document_ids.append(unique_id)
if self.debug_mode:
logging.info('Batch contains %s impossible questions.',
len(impossible_ids))
try:
answer_dict, questions_dict, _ = self._environment.GetAnswers(
questions, document_ids)
except KeyError as e:
context.set_code(grpc.StatusCode.INTERNAL)
context.set_details('KeyError: {}'.format(e))
return response
# -2 for the entry containing the scores and f1_scores.
if not len(answer_dict) - 2 == len(request.queries):
context.set_code(grpc.StatusCode.INTERNAL)
context.set_details('Unexpected number of answers: {} vs. {}'.format(
len(answer_dict) - 1, len(request.queries)))
return response
for docid in answer_dict.keys():
if docid == 'scores' or docid == 'f1_scores':
continue
output_response = response.responses.add()
output_response.id = docid.split(DOCID_SEPARATOR)[0]
answer = output_response.answers.add()
answer.text = answer_dict[docid]
answer.scores['environment_confidence'] = answer_dict['scores'][docid]
output_response.question = questions_dict[docid]['raw_rewrite']
output_response.processed_question = questions_dict[docid]['rewrite']
# Set an F1 score of 1.0 for impossible questions if the is_impossible
# flag was set to true in the request. If is_impossible is set for
# possible questions an F1 score of 0.0 is returned.
if output_response.id in impossible_ids:
if self._environment.IsImpossible(output_response.id):
answer.scores['f1'] = 1.0
else:
answer.scores['f1'] = 0.0
else:
answer.scores['f1'] = answer_dict['f1_scores'][docid]
if self.debug_mode:
logging.info('{} questions processed in {}'.format(
len(request.queries),
time.time() - start_time))
return response
def main(unused_argv):
logging.info('Loading server...')
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=FLAGS.worker_threads))
aqa_pb2_grpc.add_EnvironmentServerServicer_to_server(
BidafServer(
'active_qa.EnvironmentServer',
'BiDAF environment server',
squad_data_dir=FLAGS.squad_data_dir,
bidaf_shared_file=FLAGS.bidaf_shared_file,
bidaf_model_dir=FLAGS.bidaf_model_dir,
load_test=FLAGS.load_test,
load_impossible_questions=FLAGS.load_impossible_questions,
debug_mode=FLAGS.debug_mode), server)
port = FLAGS.port
logging.info('Running server on port {}...'.format(port))
server.add_insecure_port('[::]:{}'.format(port))
server.start()
# Prevent the main thread from exiting.
try:
while True:
time.sleep(FLAGS.sleep_seconds)
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
app.run(main)
```
#### File: px/proto/aqa_pb2_grpc.py
```python
import grpc
from px.proto import aqa_pb2 as aqa__pb2
class EnvironmentServerStub(object):
"""gRPC service for an environment.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetObservations = channel.unary_unary(
'/active_qa.EnvironmentServer/GetObservations',
request_serializer=aqa__pb2.EnvironmentRequest.SerializeToString,
response_deserializer=aqa__pb2.EnvironmentResponse.FromString,
)
class EnvironmentServerServicer(object):
"""gRPC service for an environment.
"""
def GetObservations(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_EnvironmentServerServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetObservations': grpc.unary_unary_rpc_method_handler(
servicer.GetObservations,
request_deserializer=aqa__pb2.EnvironmentRequest.FromString,
response_serializer=aqa__pb2.EnvironmentResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'active_qa.EnvironmentServer', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
``` |
{
"source": "jmribeiro/NumPyNeuralNetworkFromScratch",
"score": 2
} |
#### File: jmribeiro/NumPyNeuralNetworkFromScratch/utils.py
```python
from itertools import count
from collections import defaultdict
import numpy as np
import matplotlib.pyplot as plt
def load_ocr_dataset(path, dev_fold=8, test_fold=9):
label_counter = count()
labels = defaultdict(lambda: next(label_counter))
X, y, fold = [], [], []
with open(path) as f:
for line in f:
tokens = line.split()
pixel_value = [int(t) for t in tokens[6:]]
letter_class = labels[tokens[1]]
fold.append(int(tokens[5]))
X.append(pixel_value)
y.append(letter_class)
X, y = np.array(X, dtype='int8'), np.array(y, dtype='int8')
fold = np.array(fold, dtype='int8')
train_idx = (fold != dev_fold) & (fold != test_fold)
X_train, y_train = X[train_idx], y[train_idx]
val_idx = fold == dev_fold
X_val, y_val = X[val_idx], y[val_idx]
test_idx = fold == test_fold
X_test, y_test = X[test_idx], y[test_idx]
return {
"train": (X_train, y_train),
"dev": (X_val, y_val),
"test": (X_test, y_test)
}
def sample_batch(X, y, batch_size):
M = len(X)
B = batch_size
min_batch_indices = np.random.choice(M, B)
X_batch = np.array([X[i] for i in min_batch_indices])
y_batch = np.array([y[i] for i in min_batch_indices])
return X_batch, y_batch
def one_hot_encoding(y, num_classes):
y_one_hot = np.zeros((num_classes, y.shape[0]))
for i, value in enumerate(y):
y_one_hot[value, i] = 1
return y_one_hot
def plot(epochs, validation_accuracies, save=False):
plt.title(f"Training {epochs} epochs on the OCR dataset")
epochs = np.arange(1, epochs + 1)
plt.xlabel('Epoch')
plt.ylabel('Validation Set Accuracy')
plt.xticks(epochs)
plt.plot(epochs, validation_accuracies, color="orange")
if save:
plt.savefig("plot.png")
plt.show()
plt.close()
``` |
{
"source": "jmribeiro/pddpg-hfo",
"score": 3
} |
#### File: pddpg-hfo/agents/base_agent.py
```python
class BaseAgent(object):
def __init__(self, observation_space, action_space):
self.observation_space = observation_space
self.action_space = action_space
self.obs_dim = self.observation_space.shape[0] # dimension for single agent
self.act_dim = self.action_space.spaces[0].n
def act(self, state):
"""
Returns action with parameters to take in given state.
"""
raise NotImplementedError
# def step(self, state, action, reward, next_state, next_action, terminal, time_steps=1):
# """
# Performs a learning step given a (s,a,r,s',a') sample.
#
# :param state: previous observed state (s)
# :param action: action taken in previous state (a)
# :param reward: reward for the transition (r)
# :param next_state: the resulting observed state (s')
# :param next_action: action taken in next state (a')
# :param terminal: whether the episode is over
# :param time_steps: number of time steps the action took to execute (default=1)
# :return:
# """
# raise NotImplementedError
```
#### File: pddpg-hfo/envs/hfo_connector.py
```python
import os
import signal
import socket
import subprocess
import time
from contextlib import closing
from gym import error
try:
import hfo_py
except ImportError as e:
raise error.DependencyNotInstalled(
"{}. (Try 'pip install -e .' to install HFO dependencies.')".format(e))
import logging
logger = logging.getLogger(__name__)
def find_free_port():
"""Find a random free port. Does not guarantee that the port will still be free after return.
Note: HFO takes three consecutive port numbers, this only checks one.
Source: https://github.com/crowdAI/marLo/blob/master/marlo/utils.py
:rtype: `int`
"""
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(('', 0))
return s.getsockname()[1]
class HFOConnector(object):
def __init__(self):
self.viewer = None
self.server_process = None
self.server_port = 6000
self.hfo_path = hfo_py.get_hfo_path()
print('HFO path: ', self.hfo_path)
def __del__(self):
os.kill(self.server_process.pid, signal.SIGINT)
if self.viewer is not None:
os.kill(self.viewer.pid, signal.SIGKILL)
def start_hfo_server(self, frames_per_trial=1000,
untouched_time=100, start_viewer=True,
offense_agents=1,
defense_agents=0, offense_npcs=0,
defense_npcs=0, sync_mode=True, port=None,
offense_on_ball=0, fullstate=True, seed=-1,
ball_x_min=0.0, ball_x_max=0.2,
verbose=False, log_game=False,
agent_play_goalie=True,
log_dir="log"):
"""
Starts the Half-Field-Offense server.
frames_per_trial: Episodes end after this many steps.
untouched_time: Episodes end if the ball is untouched for this many steps.
offense_agents: Number of user-controlled offensive players.
defense_agents: Number of user-controlled defenders.
offense_npcs: Number of offensive bots.
defense_npcs: Number of defense bots.
sync_mode: Disabling sync mode runs server in real time (SLOW!).
port: Port to start the server on.
offense_on_ball: Player to give the ball to at beginning of episode.
fullstate: Enable noise-free perception.
seed: Seed the starting positions of the players and ball.
ball_x_[min/max]: Initialize the ball this far downfield: [0,1]
verbose: Verbose server messages.
log_game: Enable game logging. Logs can be used for replay + visualization.
log_dir: Directory to place game logs (*.rcg).
"""
if port is None:
port = find_free_port()
self.server_port = port
cmd = self.hfo_path + " --frames-per-trial %i" \
" --offense-npcs %i --defense-npcs %i --port %i --offense-on-ball %i --seed %i" \
" --ball-x-min %f --ball-x-max %f --log-dir %s" \
% (frames_per_trial, offense_npcs, defense_npcs,
port, offense_on_ball, seed, ball_x_min, ball_x_max, log_dir)
if offense_agents:
cmd += ' --offense-agents %i' % offense_agents
if defense_agents:
cmd += ' --defense-agents %i' % defense_agents
if not start_viewer:
cmd += ' --headless'
if not sync_mode:
cmd += " --no-sync"
if fullstate:
cmd += " --fullstate"
if verbose:
cmd += " --verbose"
if not log_game:
cmd += " --no-logging"
if agent_play_goalie:
cmd += " --agent-play-goalie"
print('Starting server with command: %s' % cmd)
# return cmd
self.server_process = subprocess.Popen(cmd.split(' '), shell=False)
print('HFO Server Connected')
self.server_process.wait()
# time.sleep(5) # Wait for server to startup before connecting a player
def _start_viewer(self):
"""
Starts the SoccerWindow visualizer. Note the viewer may also be
used with a *.rcg logfile to replay a game. See details at
https://github.com/LARG/HFO/blob/master/doc/manual.pdf.
"""
cmd = hfo_py.get_viewer_path() + " --connect --port %d" % (self.server_port)
self.viewer = subprocess.Popen(cmd.split(' '), shell=False)
def _render(self, mode='human', close=False):
""" Viewer only supports human mode currently. """
if close:
if self.viewer is not None:
os.kill(self.viewer.pid, signal.SIGKILL)
else:
if self.viewer is None:
self._start_viewer()
def close(self):
if self.server_process is not None:
try:
os.kill(self.server_process.pid, signal.SIGKILL)
except Exception:
pass
# class SoccerMultiAgentEnv:
# def __init__(self, num_offense_agents=1, num_defense_agents=1, defense_goalie=True, share_reward=True):
# self.t = {}
# self.observations = {}
# self.rewards = {}
# self.actions = {}
#
# def register(self, agent_name, agent_type):
# self.t[agent_name] = agent_type
# self.observations[agent_name] = []
# self.actions[agent_name] = []
# self.rewards[agent_name] = None
#
# def collect_obs(self, agent_name, features):
# self.observations[agent_name] = features
#
# def get_action(self, agent_name):
# return self.actions[agent_name]
#
# def get_reward(self, agent_name, features):
# return self.rewards[agent_name]
#
# def reset(self):
# pass
# # obs_n = []
# # for agent in self.t:
# # obs_n.append(agent.env.getState())
# # """ Repeats NO-OP action until a new episode begins. """
# # while agent.status == hfo_py.IN_GAME:
# # agent.env.act(hfo_py.NOOP)
# # agent.status = agent.env.step()
# # while agent.status != hfo_py.IN_GAME:
# # agent.env.act(hfo_py.NOOP)
# # agent.status = agent.env.step()
# # # prevent infinite output when server dies
# # if agent.status == hfo_py.SERVER_DOWN:
# # raise ServerDownException("HFO server down!")
# # return obs_n
```
#### File: pddpg-hfo/envs/soccer_offense.py
```python
import logging
import math
import numpy as np
from gym import error, spaces
from envs.soccer_env import SoccerEnv
from envs.soccer_env import MID_LEVEL_OFFENSE_ACTION_LOOKUP, STATUS_LOOKUP
import hfo_py
logger = logging.getLogger(__name__)
class SoccerOffenseEnv(SoccerEnv):
"""
SoccerScoreGoal is the same task as SoccerEmptyGoal, which tasks the
agent with approaching the ball, dribbling, and scoring a goal. Rewards
are given as the agent nears the ball, kicks the ball towards the goal,
and scores a goal.
The difference is that the reward structure is altered to be consistent
with the Hausknecht paper: "Deep Reinforcement Learning with Parameterised
Action Spaces".
"""
def __init__(self, hfo):
super(SoccerOffenseEnv, self).__init__(hfo)
self.old_ball_prox = 0
self.old_kickable = 0
self.old_ball_dist_goal = 0
self.got_kickable_reward = False
self.first_step = True
# mid level action space
self.low0 = np.array([-1, -1, 0], dtype=np.float32)
self.high0 = np.array([1, 1, 3], dtype=np.float32)
self.low1 = np.array([-1, -1], dtype=np.float32)
self.high1 = np.array([1, 1], dtype=np.float32)
self.low2 = np.array([-1, -1], dtype=np.float32)
self.high2 = np.array([1, 1], dtype=np.float32)
self.action_space = spaces.Tuple((spaces.Discrete(4),
spaces.Box(low=self.low0, high=self.high0, dtype=np.float32),
spaces.Box(low=self.low1, high=self.high1, dtype=np.float32),
spaces.Box(low=self.low2, high=self.high2, dtype=np.float32)))
# # low level action space
# # omits the Tackle/Catch actions, which are useful on defense
# self.low0 = np.array([0, -180], dtype=np.float32)
# self.high0 = np.array([100, 180], dtype=np.float32)
# self.low1 = np.array([-180], dtype=np.float32)
# self.high1 = np.array([180], dtype=np.float32)
# self.low2 = np.array([0, -180], dtype=np.float32)
# self.high2 = np.array([100, 180], dtype=np.float32)
# self.low3 = np.array([-180], dtype=np.float32)
# self.high3 = np.array([180], dtype=np.float32)
# # dash, turn, kick
# self.action_space = spaces.Tuple((spaces.Discrete(3),
# spaces.Box(low=self.low0, high=self.high0, dtype=np.float32),
# spaces.Box(low=self.low1, high=self.high1, dtype=np.float32),
# spaces.Box(low=self.low2, high=self.high2, dtype=np.float32)))
# take mid level actions
def _take_action(self, action):
""" Converts the action space into an HFO action. """
action_type = MID_LEVEL_OFFENSE_ACTION_LOOKUP[action[0]]
if action_type == hfo_py.KICK_TO:
np.clip(action[1:4], self.low0, self.high0, out=action[1:4])
self.hfo.act(action_type, action[1], action[2], action[3])
elif action_type == hfo_py.MOVE_TO:
np.clip(action[4:6], self.low1, self.high1, out=action[4:6])
self.hfo.act(action_type, action[4], action[5])
elif action_type == hfo_py.DRIBBLE_TO:
np.clip(action[6:8], self.low2, self.high2, out=action[6:8])
self.hfo.act(action_type, action[6], action[7])
elif action_type == hfo_py.SHOOT:
self.hfo.act(action_type)
else:
print('Unrecognized action %d' % action_type)
self.hfo.act(hfo_py.NOOP)
# print('\rTaking action:', 'type:', action_type, 'params:', action[1:], end='')
# # take low level actions
# def _take_action(self, action):
# """ Converts the action space into an HFO action. """
# action_type = ACTION_LOOKUP[action[0]]
# if action_type == hfo_py.DASH:
# self.env.act(action_type, action[1], action[2])
# elif action_type == hfo_py.TURN:
# self.env.act(action_type, action[3])
# elif action_type == hfo_py.KICK:
# self.env.act(action_type, action[4], action[5])
# elif action_type == hfo_py.TACKLE:
# self.env.act(action_type, action[6])
# else:
# print('Unrecognized action %d' % action_type)
# self.env.act(hfo_py.NOOP)
# def _get_reward(self):
# """
# Agent is rewarded for minimizing the distance between itself and
# the ball, minimizing the distance between the ball and the goal,
# and scoring a goal.
# """
# current_state = self.hfo.getState()
# # print("State =",current_state)
# # print("len State =",len(current_state))
# ball_proximity = current_state[53]
# goal_proximity = current_state[15]
# ball_dist = 1.0 - ball_proximity
# goal_dist = 1.0 - goal_proximity
# kickable = current_state[12]
# ball_ang_sin_rad = current_state[51]
# ball_ang_cos_rad = current_state[52]
# ball_ang_rad = math.acos(ball_ang_cos_rad)
# if ball_ang_sin_rad < 0:
# ball_ang_rad *= -1.
# goal_ang_sin_rad = current_state[13]
# goal_ang_cos_rad = current_state[14]
# goal_ang_rad = math.acos(goal_ang_cos_rad)
# if goal_ang_sin_rad < 0:
# goal_ang_rad *= -1.
# alpha = max(ball_ang_rad, goal_ang_rad) - min(ball_ang_rad, goal_ang_rad)
# ball_dist_goal = math.sqrt(ball_dist * ball_dist + goal_dist * goal_dist -
# 2. * ball_dist * goal_dist * math.cos(alpha))
# # Compute the difference in ball proximity from the last step
# if not self.first_step:
# ball_prox_delta = ball_proximity - self.old_ball_prox
# kickable_delta = kickable - self.old_kickable
# ball_dist_goal_delta = ball_dist_goal - self.old_ball_dist_goal
# self.old_ball_prox = ball_proximity
# self.old_kickable = kickable
# self.old_ball_dist_goal = ball_dist_goal
# # print(self.env.playerOnBall())
# # print(self.env.playerOnBall().unum)
# # print(self.env.getUnum())
# reward = 0
# if not self.first_step:
# '''# Reward the agent for moving towards the ball
# reward += ball_prox_delta
# if kickable_delta > 0 and not self.got_kickable_reward:
# reward += 1.
# self.got_kickable_reward = True
# # Reward the agent for kicking towards the goal
# reward += 0.6 * -ball_dist_goal_delta
# # Reward the agent for scoring
# if self.status == hfo_py.GOAL:
# reward += 5.0'''
# '''reward = self.__move_to_ball_reward(kickable_delta, ball_prox_delta) + \
# 3. * self.__kick_to_goal_reward(ball_dist_goal_delta) + \
# self.__EOT_reward();'''
# mtb = self.__move_to_ball_reward(kickable_delta, ball_prox_delta)
# ktg = 3. * self.__kick_to_goal_reward(ball_dist_goal_delta)
# eot = self.__EOT_reward()
# reward = mtb + ktg + eot
# # print("mtb: %.06f ktg: %.06f eot: %.06f"%(mtb,ktg,eot))
#
# self.first_step = False
# # print("r =",reward)
# return reward
def __move_to_ball_reward(self, kickable_delta, ball_prox_delta):
reward = 0.
if self.hfo.playerOnBall().unum < 0 or self.hfo.playerOnBall().unum == self.unum:
reward += ball_prox_delta
if kickable_delta >= 1 and not self.got_kickable_reward:
reward += 1.
self.got_kickable_reward = True
return reward
def __kick_to_goal_reward(self, ball_dist_goal_delta):
if (self.hfo.playerOnBall().unum == self.unum):
return -ball_dist_goal_delta
elif self.got_kickable_reward == True:
return 0.2 * -ball_dist_goal_delta
return 0.
def __EOT_reward(self):
if self.status == hfo_py.GOAL:
return 5.
elif self.status == hfo_py.CAPTURED_BY_DEFENSE:
return -1.
return 0.
def reset(self):
self.old_ball_prox = 0
self.old_kickable = 0
self.old_ball_dist_goal = 0
self.got_kickable_reward = False
self.first_step = True
return super(SoccerOffenseEnv, self).reset()
``` |
{
"source": "jmribeiro/PLASTIC-Algorithms",
"score": 2
} |
#### File: jmribeiro/PLASTIC-Algorithms/experiment_utils.py
```python
import random
import shutil
import time
from os import listdir, path, remove
from os.path import isfile, join
from random import getrandbits, choice
import numpy as np
from agents.DQNAgent import DQNAgent
from agents.PLASTICModelAgent import PLASTICModelAgent
from agents.PLASTICPolicyAgent import PLASTICPolicyAgent
from agents.teammates.GreedyAgent import GreedyAgent
from agents.teammates.ProbabilisticDestinationsAgent import ProbabilisticDestinationsAgent
from agents.teammates.TeammateAwareAgent import TeammateAwareAgent
from environment.Pursuit import Pursuit
from metrics.PreyCapturesEveryTimesteps import PreyCapturesEveryTimesteps
from yaaf.agents import RandomAgent
from yaaf.execution import TimestepRunner
from yaaf import mkdir
RESULT_DIR = "resources/results"
def find_agent_runs(experiment_dir, agent):
directory = f"{experiment_dir}/{agent}"
mkdir(directory)
runs_done = [file for file in listdir(directory) if isfile(join(directory, file)) and ".running" not in file]
runs_running = [file for file in listdir(directory) if isfile(join(directory, file)) and ".running" in file]
return len(runs_done) + len(runs_running)
def find_available_tasks(agents, world_size, team, runs_per_agent, experiment_name, verbose=True):
tasks = {}
directory = f"{RESULT_DIR}/{experiment_name}/{world_size[0]}x{world_size[1]}/{team}"
for agent in agents:
runs_so_far = find_agent_runs(directory, agent)
runs_needed = max(runs_per_agent - runs_so_far, 0)
if verbose:
print(f"{agent} w/ {team} in {world_size[0]}x{world_size[1]}, runs: {runs_so_far}, runs needed: {runs_needed}", flush=True)
if runs_needed > 0:
tasks[agent] = runs_needed
return tasks
# ########## #
# START MAIN #
# ########## #
def do_task(agents, world_size, team, runs_per_agent, timesteps, eval_interval, log_interval, experiment_name):
time.sleep(random.randint(0, 2))
available_tasks = find_available_tasks(agents, world_size, team, runs_per_agent, experiment_name, verbose=False)
agents = list(available_tasks.keys())
if len(agents) == 0: return
agent = choice(agents)
main_run(agent, world_size, team, timesteps, eval_interval, log_interval, experiment_name)
def main_run(agent_name, world_size, team, timesteps, eval_interval, log_interval, experiment_name):
# Run preparations
directory = f"{RESULT_DIR}/{experiment_name}/{world_size[0]}x{world_size[1]}/{team}/{agent_name}"
mkdir(directory)
# Temporary run indicator
run_id = getrandbits(128)
tmp = f"{directory}/{run_id}.running"
np.save(tmp, np.zeros(2))
try:
print(f"***Starting fresh agent {agent_name}***", flush=True)
agent = setup_agent(agent_name, world_size)
print(f"***Pretraining adhoc agent '{agent_name}'***", flush=True)
if agent_name == "adhoc" or agent_name == "plastic policy":
teams_to_pretrain = ["greedy", "teammate aware"] if team == "mixed" else [team]
pretrain_adhoc_agent(agent, world_size, timesteps, eval_interval, log_interval, teams_to_pretrain, experiment_name)
print(f"***Running***", flush=True)
metric = PreyCapturesEveryTimesteps(eval_interval, verbose=True, log_interval=log_interval)
env = Pursuit(teammates=team, world_size=world_size)
runner = TimestepRunner(timesteps, agent, env, observers=[metric])
runner.run()
print(f"***Done: {metric.result()}***", flush=True)
main_result = metric.result()
run_filename = f"{directory}/{run_id}"
np.save(run_filename, main_result)
if path.exists(tmp + ".npy"):
remove(tmp + ".npy")
return main_result
except KeyboardInterrupt:
pass
def pretrain_adhoc_agent(agent, world_size, timesteps, eval_interval, log_interval, teams_to_pre_train, experiment_name):
tmp_dir = f"tmp_{getrandbits(64)}_{agent.name}"
shutil.rmtree(tmp_dir, ignore_errors=True)
for team in teams_to_pre_train:
dir = f"{RESULT_DIR}/pretrains_{experiment_name}/{world_size[0]}x{world_size[0]}/{team.lower()}/{agent.name.lower()}"
print(f"***{agent.name}'s prior population: {team} team***", flush=True)
metric = PreyCapturesEveryTimesteps(eval_interval, verbose=True, log_interval=log_interval)
runner = TimestepRunner(timesteps, agent, Pursuit(team, 3, world_size), observers=[metric])
runner.run()
print(f"***{agent.name}'s prior population: {team} team: Done -> {metric.result()}***", flush=True)
agent.save_learning_prior(tmp_dir, team)
mkdir(dir)
np.save(f"{dir}/{getrandbits(64)}.npy", metric.result())
for team in teams_to_pre_train:
agent.load_learnt_prior(f"{tmp_dir}/{team}", team)
shutil.rmtree(tmp_dir, ignore_errors=True)
# ######## #
# END MAIN #
# ######## #
def setup_agent(agent_name, world_size):
agents = {
# Model Free
"dqn": lambda: DQNAgent(world_size),
# AdHoc
"plastic model": lambda: PLASTICModelAgent(3, world_size),
"plastic policy": lambda: PLASTICPolicyAgent(3, world_size),
# Handcoded
"teammate aware": lambda: TeammateAwareAgent(0, world_size),
"greedy": lambda: GreedyAgent(0, world_size),
"dummy": lambda: RandomAgent(4),
"probabilistic destinations": lambda: ProbabilisticDestinationsAgent(0, world_size),
}
return agents[agent_name]()
```
#### File: PLASTIC-Algorithms/metrics/TotalPreyCaptures.py
```python
import numpy as np
from yaaf.evaluation import Metric
class TotalPreyCaptures(Metric):
def __init__(self):
super(TotalPreyCaptures, self).__init__(f"Total Prey Captures")
self._captures = []
self._current_captures = 0
self._timestep_counter = 0
def reset(self):
self._captures = []
self._current_captures = 0
self._timestep_counter = 0
def __call__(self, timestep):
self._timestep_counter += 1
capture = timestep.is_terminal
if capture:
self._current_captures += 1
self._captures.append(self._current_captures)
return self._current_captures
def result(self):
return np.array(self._captures)
``` |
{
"source": "jmribeiro/Pruning-and-Sparsemax-Methods-for-Hierarchical-Attention-Networks",
"score": 3
} |
#### File: jmribeiro/Pruning-and-Sparsemax-Methods-for-Hierarchical-Attention-Networks/models.py
```python
import torch
from torch import nn
from torch.nn.utils.rnn import pad_sequence
# ############################# #
# Main Models (HAN, HPAN, HSAN) #
# ############################# #
class HierarchicalAttentionNetwork(nn.Module):
""" Original model from https://www.cs.cmu.edu/~./hovy/papers/16HLT-hierarchical-attention-networks.pdf"""
def __init__(self, n_classes, n_words, embeddings, layers, hidden_sizes, dropout, padding_value, eos_value, device, attention_function="softmax", pruned_attention=False, attention_threshold=None):
super(HierarchicalAttentionNetwork, self).__init__()
self.padding_value = padding_value
self.end_of_sentence_value = eos_value
self.embedder = nn.Embedding(n_words, embeddings.shape[1], padding_idx=padding_value).from_pretrained(embeddings, padding_idx=padding_value)
self.word_encoder = nn.GRU(embeddings.shape[1], hidden_sizes, layers, batch_first=True, bidirectional=True, dropout=dropout)
self.word_hidden_representation = nn.Sequential(nn.Linear(hidden_sizes * 2, hidden_sizes * 2), nn.Tanh())
self.word_context_vector = nn.Parameter(torch.Tensor(hidden_sizes * 2))
self.word_context_vector.data.uniform_(-1, 1)
self.sentence_encoder = nn.GRU(hidden_sizes * 2, hidden_sizes, layers, batch_first=True, bidirectional=True, dropout=dropout)
self.sentence_hidden_representation = nn.Sequential(nn.Linear(hidden_sizes * 2, hidden_sizes * 2), nn.Tanh())
self.sentence_context_vector = nn.Parameter(torch.Tensor(hidden_sizes * 2))
self.sentence_context_vector.data.uniform_(-1, 1)
if attention_function == "sparsemax": self.attention_function = Sparsemax(dim=1, device=device)
elif attention_function == "softmax": self.attention_function = torch.nn.Softmax(dim=1)
else: raise ValueError(f"Unregistered attention function {attention_function}. Please pick on of the following: [sparsemax, softmax]")
self.pruned_attention = pruned_attention
if self.pruned_attention: self.attention_threshold = attention_threshold
self.hidden_to_label = nn.Linear(hidden_sizes * 2, n_classes)
self.device = device
self.to(device)
def forward(self, X):
# B x S x 2H
X = self.process_words(X)
# B x S x 2H
hidden, _ = self.sentence_encoder(X)
# B x S x 2H
hidden_representations = self.sentence_hidden_representation(hidden)
# B x S
attention_weights = self.attention_function(hidden_representations @ self.sentence_context_vector)
attention_weights = prune_attentions(attention_weights, self.attention_threshold) if self.pruned_attention else attention_weights.reshape(attention_weights.shape[0], 1, attention_weights.shape[1])
# B x 2H
documents = (attention_weights @ hidden).squeeze(dim=1)
# B x K
scores = self.hidden_to_label(documents)
return scores
def process_words(self, documents):
sentences = []
for document in documents:
# S x L
words = split_into_sentences(document, self.padding_value, self.end_of_sentence_value)
# S x L x E
words = self.embedder(words)
# S x L x 2H
hidden, _ = self.word_encoder(words)
# S x L x 2H
hidden_representations = self.word_hidden_representation(hidden)
# S x L
attention_weights = self.attention_function(hidden_representations @ self.word_context_vector)
attention_weights = prune_attentions(attention_weights, self.attention_threshold) if self.pruned_attention else attention_weights.reshape(attention_weights.shape[0], 1, attention_weights.shape[1])
# S x 2H
sentences.append((attention_weights @ hidden).squeeze(dim=1))
# B x S x 2H
sentences = pad_sequence(sentences, batch_first=True)
return sentences
# ####################### #
# Basic Models (LSTM, HN) #
# ####################### #
class LSTMClassifier(nn.Module):
"""
Very simple LSTM Classifier to test the datasets.
"""
def __init__(self, n_classes, n_words, embeddings, layers, hidden_sizes, bidirectional, dropout, padding_value,
device):
super().__init__()
self.embedder = nn.Embedding(n_words, embeddings.shape[1], padding_idx=padding_value).from_pretrained(
embeddings, padding_idx=padding_value)
self.lstm = nn.LSTM(embeddings.shape[1], hidden_sizes, layers, dropout=dropout, batch_first=True,
bidirectional=bidirectional)
if bidirectional: hidden_sizes *= 2
self.bidirectional = bidirectional
self.hidden_to_label = nn.Linear(hidden_sizes, n_classes)
self.device = device
self.to(device)
def forward(self, X):
embeddings = self.embedder(X)
_, (hidden, _) = self.lstm(embeddings)
hidden_last = torch.cat((hidden[-2], hidden[-1]), dim=1) if self.bidirectional else hidden[-1]
scores = self.hidden_to_label(hidden_last)
return scores
class HierarchicalNetwork(nn.Module):
"""
Original model from https://www.cs.cmu.edu/~./hovy/papers/16HLT-hierarchical-attention-networks.pdf
but without attention
"""
def __init__(self, n_classes, n_words, embeddings, layers, hidden_sizes, dropout, padding_value, eos_value, device):
super(HierarchicalNetwork, self).__init__()
self.padding_value = padding_value
self.end_of_sentence_value = eos_value
self.embedder = nn.Embedding(n_words, embeddings.shape[1], padding_idx=padding_value).from_pretrained(
embeddings, padding_idx=padding_value)
self.word_encoder = nn.GRU(embeddings.shape[1], hidden_sizes, layers, batch_first=True, bidirectional=True,
dropout=dropout)
self.sentence_encoder = nn.GRU(hidden_sizes * 2, hidden_sizes, layers, batch_first=True, bidirectional=True,
dropout=dropout)
self.hidden_to_label = nn.Linear(hidden_sizes * 2, n_classes)
self.device = device
self.to(device)
def forward(self, X):
documents_as_sentences = []
for x in X:
# Sentence batch: L words [SxL]
document = split_into_sentences(x, self.padding_value, self.end_of_sentence_value)
# Sentence batch: L words, E embeddings [SxLxE]
words = self.embedder(document)
word_encodings = self.word_encoder(words)[1]
# Document: S sentences of 2H gru-units [1xSx2H]
sentences = torch.cat((word_encodings[-2], word_encodings[-1]), dim=1)
documents_as_sentences.append(sentences)
del X
# Documents batch: S sentences, 2H gru-units [BxSx2H]
documents_as_sentences = pad_sequence(documents_as_sentences, batch_first=True)
sentence_encodings = self.sentence_encoder(documents_as_sentences)[1]
# Batch of document "features": 2H gru-units [Bx2H]
document = torch.cat((sentence_encodings[-2], sentence_encodings[-1]), dim=1)
# Batch of document "scores": num_classes outputs [BxK]
scores = self.hidden_to_label(document)
return scores
# ############### #
# Model Utilities #
# ############### #
def split_into_sentences(document, padding_value, eos_value):
"""
Given a document as sequence (shape L1: total length)
Returns a document as sentences (shape SxL2)
"""
ends_of_sentence = (document == eos_value).nonzero()
sentences = [document[0:eos + 1] if i == 0 else document[ends_of_sentence[i - 1] + 1:eos + 1] for i, eos in enumerate(ends_of_sentence)]
last = document[ends_of_sentence[-1] + 1:]
if False in last == padding_value: sentences.append(last)
document = pad_sequence(sentences, batch_first=True, padding_value=padding_value)
return document
def prune_attentions(attention_weights, attention_threshold):
pruned_attention_weights = (attention_weights < attention_threshold).float() * attention_weights
sums = pruned_attention_weights.sum(dim=1).reshape(attention_weights.shape[0], 1)
pruned_attentions = pruned_attention_weights / sums
pruned_attentions[torch.isnan(pruned_attentions)] = 0.0
pruned_attentions = pruned_attentions.reshape(pruned_attentions.shape[0], 1, pruned_attentions.shape[1])
return pruned_attentions
class Sparsemax(nn.Module):
"""Sparsemax function.
Pytorch implementation of Sparsemax function from:
-- "From https://github.com/KrisKorrel/sparsemax-pytorch:
-- "From Softmax to Sparsemax: A Sparse Model of Attention and Multi-Label Classification"
-- <NAME>, <NAME> (http://arxiv.org/abs/1602.02068)
"""
def __init__(self, device, dim=None):
"""
Args: dim (int, optional): The dimension over which to apply the sparsemax function.
"""
super(Sparsemax, self).__init__()
self.dim = -1 if dim is None else dim
self.device = device
def forward(self, input):
"""
Args:
input (torch.Tensor): Input tensor. First dimension should be the batch size
Returns:
torch.Tensor: [batch_size x number_of_logits] Output tensor
"""
# Sparsemax currently only handles 2-dim tensors,
# so we reshape to a convenient shape and reshape back after sparsemax
input = input.transpose(0, self.dim)
original_size = input.size()
input = input.reshape(input.size(0), -1)
input = input.transpose(0, 1)
dim = 1
number_of_logits = input.size(dim)
# Translate input by max for numerical stability
input = input - torch.max(input, dim=dim, keepdim=True)[0].expand_as(input)
# Sort input in descending order.
# (NOTE: Can be replaced with linear time selection method described here:
# http://stanford.edu/~jduchi/projects/DuchiShSiCh08.html)
zs = torch.sort(input=input, dim=dim, descending=True)[0]
range = torch.arange(start=1, end=number_of_logits + 1, step=1, device=self.device, dtype=input.dtype).view(1, -1)
range = range.expand_as(zs)
# Determine sparsity of projection
bound = 1 + range * zs
cumulative_sum_zs = torch.cumsum(zs, dim)
is_gt = torch.gt(bound, cumulative_sum_zs).type(input.type())
k = torch.max(is_gt * range, dim, keepdim=True)[0]
# Compute threshold function
zs_sparse = is_gt * zs
# Compute taus
taus = (torch.sum(zs_sparse, dim, keepdim=True) - 1) / k
taus = taus.expand_as(input)
# Sparsemax
self.output = torch.max(torch.zeros_like(input), input - taus)
# Reshape back to original shape
output = self.output
output = output.transpose(0, 1)
output = output.reshape(original_size)
output = output.transpose(0, self.dim)
return output
def backward(self, grad_output):
dim = 1
nonzeros = torch.ne(self.output, 0)
sum = torch.sum(grad_output * nonzeros, dim=dim) / torch.sum(nonzeros, dim=dim)
self.grad_input = nonzeros * (grad_output - sum.expand_as(grad_output))
return self.grad_input
``` |
{
"source": "jmribeiro/PyTorch-NLP",
"score": 2
} |
#### File: tests/samplers/test_bucket_batch_sampler.py
```python
import pickle
from torch.utils.data.sampler import SequentialSampler
from torchnlp.random import fork_rng_wrap
from torchnlp.samplers import BucketBatchSampler
def test_bucket_batch_sampler_length():
data_source = [[1], [2], [3], [4], [5], [6]]
sort_key = lambda i: len(data_source[i])
batch_size = 2
sampler = SequentialSampler(data_source)
batch_sampler = BucketBatchSampler(
sampler,
batch_size=batch_size,
sort_key=sort_key,
drop_last=False,
bucket_size_multiplier=2)
batches = list(batch_sampler)
assert len(batches) == 3
assert len(batch_sampler) == 3
def test_bucket_batch_sampler_uneven_length():
data_source = [[1], [2], [3], [4], [5]]
sort_key = lambda i: len(data_source[i])
batch_size = 2
sampler = SequentialSampler(data_source)
batch_sampler = BucketBatchSampler(
sampler, batch_size, sort_key=sort_key, drop_last=False, bucket_size_multiplier=2)
batches = list(batch_sampler)
assert len(batches) == 3
assert len(batch_sampler) == 3
batch_sampler = BucketBatchSampler(
sampler, batch_size, sort_key=sort_key, drop_last=True, bucket_size_multiplier=2)
batches = list(batch_sampler)
assert len(batches) == 2
assert len(batch_sampler) == 2
def test_bucket_batch_sampler_sorted():
data_source = [[1], [2], [3], [4], [5]]
sort_key = lambda i: data_source[i]
batch_size = len(data_source)
sampler = SequentialSampler(data_source)
batches = list(
BucketBatchSampler(
sampler, batch_size, sort_key=sort_key, drop_last=False, bucket_size_multiplier=1))
for i, batch in enumerate(batches):
assert batch[0] == i
@fork_rng_wrap(seed=123)
def test_bucket_batch_sampler():
sampler = SequentialSampler(list(range(10)))
batch_sampler = BucketBatchSampler(
sampler, batch_size=3, drop_last=False, bucket_size_multiplier=2)
assert len(batch_sampler) == 4
assert list(batch_sampler) == [[0, 1, 2], [3, 4, 5], [9], [6, 7, 8]]
def test_bucket_batch_sampler__drop_last():
sampler = SequentialSampler(list(range(10)))
batch_sampler = BucketBatchSampler(
sampler, batch_size=3, drop_last=True, bucket_size_multiplier=2)
assert len(batch_sampler) == 3
assert len(list(iter(batch_sampler))) == 3
def test_pickleable():
sampler = SequentialSampler(list(range(10)))
batch_sampler = BucketBatchSampler(
sampler, batch_size=2, drop_last=False, bucket_size_multiplier=2)
pickle.dumps(batch_sampler)
```
#### File: tests/samplers/test_distributed_sampler.py
```python
import pickle
from torch.utils.data.sampler import SequentialSampler
from torchnlp.samplers import DistributedSampler
def test_distributed_batch_sampler():
sampler = SequentialSampler(list(range(15)))
distributed_sampler = DistributedSampler(sampler, num_replicas=3, rank=0)
assert list(distributed_sampler) == [0, 3, 6, 9, 12]
distributed_sampler = DistributedSampler(sampler, num_replicas=3, rank=1)
assert list(distributed_sampler) == [1, 4, 7, 10, 13]
distributed_sampler = DistributedSampler(sampler, num_replicas=3, rank=2)
assert list(distributed_sampler) == [2, 5, 8, 11, 14]
def test_pickleable():
sampler = SequentialSampler(list(range(15)))
sampler = DistributedSampler(sampler, num_replicas=3, rank=2)
pickle.dumps(sampler)
```
#### File: torchnlp/encoders/label_encoder.py
```python
from collections import Counter
from torchnlp.encoders.encoder import Encoder
import torch
DEFAULT_UNKNOWN_TOKEN = '<unk>'
DEFAULT_RESERVED = [DEFAULT_UNKNOWN_TOKEN]
class LabelEncoder(Encoder):
""" Encodes an label via a dictionary.
Args:
sample (list of strings): Sample of data used to build encoding dictionary.
min_occurrences (int, optional): Minimum number of occurrences for a label to be added to
the encoding dictionary.
reserved_labels (list, optional): List of reserved labels inserted in the beginning of the
dictionary.
unknown_index (int, optional): The unknown label is used to encode unseen labels. This is
the index that label resides at.
**kwargs: Keyword arguments passed onto ``Encoder``.
Example:
>>> samples = ['label_a', 'label_b']
>>> encoder = LabelEncoder(samples, reserved_labels=['unknown'], unknown_index=0)
>>> encoder.encode('label_a')
tensor(1)
>>> encoder.decode(encoder.encode('label_a'))
'label_a'
>>> encoder.encode('label_c')
tensor(0)
>>> encoder.decode(encoder.encode('label_c'))
'unknown'
>>> encoder.vocab
['unknown', 'label_a', 'label_b']
"""
def __init__(self,
sample,
min_occurrences=1,
reserved_labels=DEFAULT_RESERVED,
unknown_index=DEFAULT_RESERVED.index(DEFAULT_UNKNOWN_TOKEN),
**kwargs):
super().__init__(**kwargs)
if unknown_index and unknown_index >= len(reserved_labels):
raise ValueError('The `unknown_index` if provided must be also `reserved`.')
self.unknown_index = unknown_index
self.tokens = Counter(sample)
self.index_to_token = reserved_labels.copy()
self.token_to_index = {token: index for index, token in enumerate(reserved_labels)}
for token, count in self.tokens.items():
if count >= min_occurrences:
self.index_to_token.append(token)
self.token_to_index[token] = len(self.index_to_token) - 1
@property
def vocab(self):
"""
Returns:
list: List of labels in the dictionary.
"""
return self.index_to_token
@property
def vocab_size(self):
"""
Returns:
int: Number of labels in the dictionary.
"""
return len(self.vocab)
def encode(self, label):
""" Encodes a ``label``.
Args:
label (object): Label to encode.
Returns:
torch.Tensor: Encoding of the label.
"""
label = super().encode(label)
return torch.tensor(self.token_to_index.get(label, self.unknown_index))
def batch_encode(self, iterator, *args, dim=0, **kwargs):
"""
Args:
iterator (iterator): Batch of labels to encode.
*args: Arguments passed to ``Encoder.batch_encode``.
dim (int, optional): Dimension along which to concatenate tensors.
**kwargs: Keyword arguments passed to ``Encoder.batch_encode``.
Returns:
torch.Tensor: Tensor of encoded labels.
"""
return torch.stack(super().batch_encode(iterator, *args, **kwargs), dim=dim)
def decode(self, encoded):
""" Decodes ``encoded`` label.
Args:
encoded (torch.Tensor): Encoded label.
Returns:
object: Label decoded from ``encoded``.
"""
encoded = super().decode(encoded)
if encoded.numel() > 1:
raise ValueError(
'``decode`` decodes one label at a time, use ``batch_decode`` instead.')
return self.index_to_token[encoded.squeeze().item()]
def batch_decode(self, tensor, *args, dim=0, **kwargs):
"""
Args:
tensor (torch.Tensor): Batch of tensors.
*args: Arguments passed to ``Encoder.batch_decode``.
dim (int, optional): Dimension along which to split tensors.
**kwargs: Keyword arguments passed to ``Encoder.batch_decode``.
Returns:
list: Batch of decoded labels.
"""
return super().batch_decode([t.squeeze(0) for t in tensor.split(1, dim=dim)])
```
#### File: torchnlp/samplers/bucket_batch_sampler.py
```python
import math
from torch.utils.data.sampler import BatchSampler
from torch.utils.data.sampler import SubsetRandomSampler
from torchnlp.samplers.sorted_sampler import SortedSampler
from torchnlp.utils import identity
class BucketBatchSampler(BatchSampler):
""" `BucketBatchSampler` toggles between `sampler` batches and sorted batches.
Typically, the `sampler` will be a `RandomSampler` allowing the user to toggle between
random batches and sorted batches. A larger `bucket_size_multiplier` is more sorted and vice
versa.
Background:
``BucketBatchSampler`` is similar to a ``BucketIterator`` found in popular libraries like
``AllenNLP`` and ``torchtext``. A ``BucketIterator`` pools together examples with a similar
size length to reduce the padding required for each batch while maintaining some noise
through bucketing.
**AllenNLP Implementation:**
https://github.com/allenai/allennlp/blob/master/allennlp/data/iterators/bucket_iterator.py
**torchtext Implementation:**
https://github.com/pytorch/text/blob/master/torchtext/data/iterator.py#L225
Args:
sampler (torch.data.utils.sampler.Sampler):
batch_size (int): Size of mini-batch.
drop_last (bool): If `True` the sampler will drop the last batch if its size would be less
than `batch_size`.
sort_key (callable, optional): Callable to specify a comparison key for sorting.
bucket_size_multiplier (int, optional): Buckets are of size
`batch_size * bucket_size_multiplier`.
Example:
>>> from torchnlp.random import set_seed
>>> set_seed(123)
>>>
>>> from torch.utils.data.sampler import SequentialSampler
>>> sampler = SequentialSampler(list(range(10)))
>>> list(BucketBatchSampler(sampler, batch_size=3, drop_last=False))
[[6, 7, 8], [0, 1, 2], [3, 4, 5], [9]]
>>> list(BucketBatchSampler(sampler, batch_size=3, drop_last=True))
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
"""
def __init__(self,
sampler,
batch_size,
drop_last,
sort_key=identity,
bucket_size_multiplier=100):
super().__init__(sampler, batch_size, drop_last)
self.sort_key = sort_key
self.bucket_sampler = BatchSampler(sampler,
min(batch_size * bucket_size_multiplier, len(sampler)),
False)
def __iter__(self):
for bucket in self.bucket_sampler:
sorted_sampler = SortedSampler(bucket, self.sort_key)
for batch in SubsetRandomSampler(
list(BatchSampler(sorted_sampler, self.batch_size, self.drop_last))):
yield [bucket[i] for i in batch]
def __len__(self):
if self.drop_last:
return len(self.sampler) // self.batch_size
else:
return math.ceil(len(self.sampler) / self.batch_size)
```
#### File: torchnlp/samplers/distributed_batch_sampler.py
```python
from torch.utils.data.sampler import BatchSampler
from torchnlp.samplers.distributed_sampler import DistributedSampler
class DistributedBatchSampler(BatchSampler):
""" `BatchSampler` wrapper that distributes across each batch multiple workers.
Args:
batch_sampler (torch.utils.data.sampler.BatchSampler)
num_replicas (int, optional): Number of processes participating in distributed training.
rank (int, optional): Rank of the current process within num_replicas.
Example:
>>> from torch.utils.data.sampler import BatchSampler
>>> from torch.utils.data.sampler import SequentialSampler
>>> sampler = SequentialSampler(list(range(12)))
>>> batch_sampler = BatchSampler(sampler, batch_size=4, drop_last=False)
>>>
>>> list(DistributedBatchSampler(batch_sampler, num_replicas=2, rank=0))
[[0, 2], [4, 6], [8, 10]]
>>> list(DistributedBatchSampler(batch_sampler, num_replicas=2, rank=1))
[[1, 3], [5, 7], [9, 11]]
"""
def __init__(self, batch_sampler, **kwargs):
self.batch_sampler = batch_sampler
self.kwargs = kwargs
def __iter__(self):
for batch in self.batch_sampler:
yield list(DistributedSampler(batch, **self.kwargs))
def __len__(self):
return len(self.batch_sampler)
``` |
{
"source": "jmribeiro/PyTorch-Sequence-to-Sequence",
"score": 3
} |
#### File: jmribeiro/PyTorch-Sequence-to-Sequence/dataset.py
```python
import torch
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data.dataset import Dataset
import numpy as np
class Vocabulary:
""" Encapsulates a vocabulary of characters for a given language """
def __init__(self, language):
self.language = language
self.characters = ["$START", "$STOP", "$UNK", "$PAD"]
def add_character(self, character):
""" Adds a character to the vocabulary if not in it already. """
if character not in self.characters:
self.characters.append(character)
def __len__(self):
"""
Returns the total number of characters in the vocabulary.
"""
return len(self.characters)
def __repr__(self):
"""
Returns the words in the vocabulary (including special symbols).
"""
return "| " + " | ".join(self.characters) + " |"
def __getitem__(self, item):
if isinstance(item, str):
try:
return self.characters.index(item)
except ValueError:
print(f"WARN: Character '{item}' does not belong to {self.language} vocabulary, returning $UNK.")
return self.characters.index("$UNK")
elif isinstance(item, int):
return self.characters[item]
else:
raise IndexError("Invalid item for indexation. Pass only int or str.")
class Ar2EnDataset(Dataset):
""" Encapsulates the ar2en dataset """
def __init__(self, path, reverse_source_string):
# ############# #
# Load from txt #
# ############# #
training = np.loadtxt(f"{path}/ar2en-train.txt", dtype=np.str)
validation = np.loadtxt(f"{path}/ar2en-eval.txt", dtype=np.str)
test = np.loadtxt(f"{path}/ar2en-test.txt", dtype=np.str)
# ################### #
# Create Vocabularies #
# ################### #
self.english_vocabulary = Vocabulary("english")
self.arabic_vocabulary = Vocabulary("arabic")
for ar_word, en_word in list(training) + list(validation) + list(test):
[self.arabic_vocabulary.add_character(ar_char) for ar_char in ar_word]
[self.english_vocabulary.add_character(en_char) for en_char in en_word]
self.n_inputs = len(self.arabic_vocabulary)
self.n_outputs = len(self.english_vocabulary)
print(f"Arabic vocabulary ({self.n_inputs} unique characters):\n\t {self.arabic_vocabulary}\n", flush=True)
print(f"English vocabulary ({self.n_outputs} unique characters):\n\t {self.english_vocabulary}\n", flush=True)
# ######## #
# Features #
# ######## #
self.X_train, self.y_train = self.to_index_features(training, reverse_source_string)
self.X_dev, self.y_dev = self.to_index_features(validation, reverse_source_string)
self.X_test, self.y_test = self.to_index_features(test, reverse_source_string)
def to_index_features(self, data, reverse_source_string):
X, y = [], []
for ar_word, en_word in data:
source_seq = [self.arabic_vocabulary[ar_char] for ar_char in ar_word]
source_seq.reverse() if reverse_source_string else source_seq
xseq = [self.arabic_vocabulary["$START"]] + source_seq + [self.arabic_vocabulary["$STOP"]]
xseq = torch.tensor(xseq, dtype=torch.float)
target_seq = [self.english_vocabulary[en_char] for en_char in en_word]
yseq = [self.english_vocabulary["$START"]] + target_seq + [self.english_vocabulary["$STOP"]]
yseq = torch.tensor(yseq, dtype=torch.float)
X.append(xseq)
y.append(yseq)
X = pad_sequence(X, batch_first=True, padding_value=self.arabic_vocabulary["$PAD"])
y = pad_sequence(y, batch_first=True, padding_value=self.english_vocabulary["$PAD"])
X = X.long()
y = y.long()
return X, y
def __len__(self):
return len(self.X_train)
def __getitem__(self, idx):
return self.X_train[idx], self.y_train[idx]
```
#### File: jmribeiro/PyTorch-Sequence-to-Sequence/model.py
```python
import torch
from torch import nn
class EncoderDecoder(nn.Module):
def __init__(self, n_inputs, n_outputs,
embeddings_size, attention, bidirectional,
hidden_sizes, layers, dropout, input_vocab, output_vocab, device):
super(EncoderDecoder, self).__init__()
self.input_vocab = input_vocab
self.output_vocab = output_vocab
self.padding_value = output_vocab["$PAD"]
self.bidirectional = bidirectional
self.encoder_embeddings = nn.Embedding(n_inputs, embeddings_size, padding_idx=input_vocab["$PAD"])
self.encoder = nn.LSTM(embeddings_size, hidden_sizes, layers, dropout=dropout, batch_first=True, bidirectional=bidirectional)
self.decoder_embeddings = nn.Embedding(n_inputs, embeddings_size, padding_idx=output_vocab["$PAD"])
if bidirectional: hidden_sizes *= 2
self.decoder = nn.LSTM(embeddings_size, hidden_sizes, layers, dropout=dropout, batch_first=True)
if attention is not None:
from torchnlp.nn import Attention
self.attention = Attention(hidden_sizes, attention_type=attention)
self.hidden_to_output = nn.Linear(hidden_sizes, n_outputs)
self.device = device
self.to(device)
# ############## #
# Main Interface #
# ############## #
def forward(self, X, y):
""" Uses full teacher forcing when training (i.e., uses y to decode instead re-feeding the generated tokens)"""
encodings, hidden = self.encode(X)
y = y[:, :-1] # Start at $START, but don't decode the last token (since it has no followup)
scores, last_hidden = self.decode(y, encodings, hidden)
return scores
def predict(self, X, max_length):
""" Vectorized computation without teaching forcing """
encodings, hidden = self.encode(X)
Z = torch.zeros(X.shape[0], 1, len(self.output_vocab)).to(self.device)
tokens = torch.full((X.shape[0], 1), fill_value=self.output_vocab["$START"], dtype=torch.long).to(self.device)
for i in range(max_length):
scores, hidden = self.decode(tokens, encodings, hidden)
tokens = scores.argmax(dim=2)
Z = torch.cat((Z, scores), 1)
return Z[:, 1:] # Zeroed scores for $START are ignored (from Z = torch.zeros before concat)
# ################# #
# Auxiliary Methods #
# ################# #
def encode(self, tokens):
embeddings = self.encoder_embeddings(tokens)
encodings, hidden = self.encoder(embeddings)
if self.bidirectional: hidden = self.bidirectional_reshape(hidden)
return encodings, hidden
def decode(self, tokens, encodings, previous_hidden):
embeddings = self.decoder_embeddings(tokens)
decodings, hidden = self.decoder(embeddings, hx=previous_hidden)
if hasattr(self, "attention"): decodings, attention_weights = self.attention(decodings, encodings)
scores = self.hidden_to_output(decodings)
return scores, hidden
def bidirectional_reshape(self, hidden):
hn, cn = hidden
H, B, E = hn.shape
H = int(H/2)
hn_new = torch.zeros((H, B, E * 2)).to(self.device)
cn_new = torch.zeros((H, B, E * 2)).to(self.device)
for h in range(H):
i = h*2
h_lr, h_rl = hn[i], hn[i + 1]
c_lr, c_rl = cn[i], cn[i + 1]
hn_new[h] = torch.cat((h_lr, h_rl), dim=1)
cn_new[h] = torch.cat((c_lr, c_rl), dim=1)
return hn_new, cn_new
def train_batch(X, y, model, optimizer, criterion):
model.train()
optimizer.zero_grad()
X = X.to(model.device)
y = y.to(model.device)
scores = model(X, y)
# Drop $START token since model doesn't return its score
y = y[:, 1:]
y_flat = y.contiguous().view(-1)
scores_flat = scores.reshape(scores.shape[0] * scores.shape[1], -1)
loss = criterion(scores_flat, y_flat)
loss.backward()
optimizer.step()
return loss.detach().item()
def evaluate(model, X, y):
model.eval()
X = X.to(model.device)
y = y.to(model.device)
scores = model.predict(X, max_length=y.shape[1])
# Drop $START token since model doesn't return its score
y = y[:, 1:]
n_correct_words, n_possible_words = 0, 0
n_correct_chars, n_possible_chars = 0, 0
""" Im sure this can be written in a more clean an efficient way...
But all main computation is vectorized already so I didn't bother"""
for b in range(y.shape[0]):
word = y[b]
word_scores = scores[b]
predicted_chars = word_scores.argmax(dim=1)
correct_word = True
for c, char in enumerate(word):
if char == model.output_vocab["$PAD"]: break
predicted_char = predicted_chars[c]
n_possible_chars += 1
correct_char = (char == predicted_char).item()
if correct_char: n_correct_chars += 1
else: correct_word = False
n_possible_words += 1
n_correct_words += correct_word
char_acc = n_correct_chars / n_possible_chars
word_acc = n_correct_words / n_possible_words
return word_acc, char_acc
```
#### File: jmribeiro/PyTorch-Sequence-to-Sequence/train.py
```python
import argparse
import sys
import matplotlib.pyplot as plt
import torch
from torch import nn
from torch.utils.data.dataloader import DataLoader
from dataset import Ar2EnDataset
from model import EncoderDecoder, train_batch, evaluate
def plot(epochs, plottable, ylabel, title, name):
plt.clf()
plt.xlabel('Epoch')
if isinstance(plottable, tuple):
assert isinstance(ylabel, tuple) and len(plottable) == len(ylabel)
for i in range(len(plottable)):
plt.plot(epochs, plottable[i], label=ylabel[i])
plt.legend()
plt.ylabel("Accuracy")
else:
plt.ylabel(ylabel)
plt.plot(epochs, plottable)
plt.title(title)
plt.savefig('%s.pdf' % name, bbox_inches='tight')
plt.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-data', help="Path to ar2en dataset.", default='./ar2en_dataset')
parser.add_argument('-embeddings_size', type=int, default=300)
parser.add_argument('-layers', type=int, default=2)
parser.add_argument('-hidden_sizes', type=int, default=300)
parser.add_argument('-dropout', type=float, default=0.1)
parser.add_argument('-epochs', type=int, default=20)
parser.add_argument('-optimizer', choices=['sgd', 'adam'], default='adam')
parser.add_argument('-learning_rate', type=float, default=0.001)
parser.add_argument('-l2_decay', type=float, default=0.0)
parser.add_argument('-batch_size', type=int, default=64)
parser.add_argument('-cuda', action='store_true',
help='Whether or not to use cuda for parallelization (if devices available)')
parser.add_argument('-name', type=str, required=False, default=None,
help="Filename for the plot")
parser.add_argument('-quiet', action='store_true',
help='No execution output.')
parser.add_argument('-tqdm', action='store_true',
help='Whether or not to use TQDM progress bar in training.')
parser.add_argument('-display_vocabularies', action="store_true",
help="Only display the vocabularies (no further execution).")
parser.add_argument('-reverse_source_string', action="store_true",
help="Whether or not to reverse the source arabic string.")
parser.add_argument('-bidirectional', action="store_true",
help="Whether or not to use a bidirectional encoder LSTM.")
parser.add_argument('-attention', type=str, choices=["dot", "general"], required=False, default=None,
help="Attention mechanism in the decoder.")
opt = parser.parse_args()
# ############# #
# 1 - Load Data #
# ############# #
dataset = Ar2EnDataset(opt.data, opt.reverse_source_string)
if opt.display_vocabularies:
sys.exit(0)
dataloader = DataLoader(dataset, batch_size=opt.batch_size, shuffle=True)
X_dev, y_dev = dataset.X_dev, dataset.y_dev
X_test, y_test = dataset.X_test, dataset.y_test
# ################ #
# 2 - Create Model #
# ################ #
device = torch.device("cuda:0" if torch.cuda.is_available() and opt.cuda else "cpu")
if not opt.quiet: print(f"Using device '{device}'", flush=True)
model = EncoderDecoder(dataset.n_inputs, dataset.n_outputs,
opt.embeddings_size, opt.attention, opt.bidirectional,
opt.hidden_sizes, opt.layers, opt.dropout,
dataset.arabic_vocabulary, dataset.english_vocabulary, device)
# ############# #
# 3 - Optimizer #
# ############# #
optimizer = {
"adam": torch.optim.Adam,
"sgd": torch.optim.SGD
}[opt.optimizer](
model.parameters(),
lr=opt.learning_rate,
weight_decay=opt.l2_decay
)
criterion = nn.CrossEntropyLoss(ignore_index=dataset.english_vocabulary["$PAD"])
# ###################### #
# 4 - Train and Evaluate #
# ###################### #
epochs = torch.arange(1, opt.epochs + 1)
train_mean_losses = []
val_word_acc = []
val_char_acc = []
train_losses = []
for epoch in epochs:
if not opt.quiet: print('\nTraining epoch {}'.format(epoch), flush=True)
if opt.tqdm:
from tqdm import tqdm
dataloader = tqdm(dataloader)
for X_batch, y_batch in dataloader:
loss = train_batch(X_batch, y_batch, model, optimizer, criterion)
train_losses.append(loss)
mean_loss = torch.tensor(train_losses).mean().item()
word_acc, char_acc = evaluate(model, X_dev, y_dev)
train_mean_losses.append(mean_loss)
val_word_acc.append(word_acc)
val_char_acc.append(char_acc)
if not opt.quiet:
print('Training loss: %.4f' % mean_loss, flush=True)
print('Valid word acc: %.4f' % val_word_acc[-1], flush=True)
print('Valid char acc: %.4f' % val_char_acc[-1], flush=True)
final_test_accuracy_words, final_test_accuracy_chars = evaluate(model, X_test, y_test)
if not opt.quiet:
print('\nFinal Test Word Acc: %.4f' % final_test_accuracy_words, flush=True)
print('Final Test Char Acc: %.4f' % final_test_accuracy_chars, flush=True)
# ######## #
# 5 - Plot #
# ######## #
name = opt.name if opt.name is not None else "encoder_decoder"
plot(epochs, train_mean_losses, ylabel='Loss', name=name+"_loss", title="Training Loss")
plot(epochs, val_word_acc, ylabel='Word Val Acc', name=name+"_acc", title=f"Word Validation Accuracy\n(Final Word Test Accuracy: {round(final_test_accuracy_words,3)})")
return final_test_accuracy_words
if __name__ == '__main__':
main()
``` |
{
"source": "jmribeiro/SLM-Lab",
"score": 2
} |
#### File: agent/algorithm/sil.py
```python
from slm_lab.agent import net, memory
from slm_lab.agent.algorithm import policy_util
from slm_lab.agent.algorithm.actor_critic import ActorCritic
from slm_lab.agent.algorithm.ppo import PPO
from slm_lab.lib import logger, math_util, util
from slm_lab.lib.decorator import lab_api
import numpy as np
import pydash as ps
import torch
logger = logger.get_logger(__name__)
class SIL(ActorCritic):
'''
Implementation of Self-Imitation Learning (SIL) https://arxiv.org/abs/1806.05635
This is actually just A2C with an extra SIL loss function
e.g. algorithm_spec
"algorithm": {
"name": "SIL",
"action_pdtype": "default",
"action_policy": "default",
"explore_var_spec": null,
"gamma": 0.99,
"lam": 0.95,
"num_step_returns": 100,
"entropy_coef_spec": {
"name": "linear_decay",
"start_val": 0.01,
"end_val": 0.001,
"start_step": 100,
"end_step": 5000,
},
"policy_loss_coef": 1.0,
"val_loss_coef": 0.01,
"sil_policy_loss_coef": 1.0,
"sil_val_loss_coef": 0.01,
"training_batch_iter": 8,
"training_frequency": 1,
"training_iter": 8,
}
e.g. special memory_spec
"memory": {
"name": "OnPolicyReplay",
"sil_replay_name": "Replay",
"batch_size": 32,
"max_size": 10000,
"use_cer": true
}
'''
def __init__(self, agent, global_nets=None):
super().__init__(agent, global_nets)
# create the extra replay memory for SIL
MemoryClass = getattr(memory, self.memory_spec['sil_replay_name'])
self.body.replay_memory = MemoryClass(self.memory_spec, self.body)
@lab_api
def init_algorithm_params(self):
'''Initialize other algorithm parameters'''
# set default
util.set_attr(self, dict(
action_pdtype='default',
action_policy='default',
explore_var_spec=None,
entropy_coef_spec=None,
policy_loss_coef=1.0,
val_loss_coef=1.0,
))
util.set_attr(self, self.algorithm_spec, [
'action_pdtype',
'action_policy',
# theoretically, AC does not have policy update; but in this implementation we have such option
'explore_var_spec',
'gamma', # the discount factor
'lam',
'num_step_returns',
'entropy_coef_spec',
'policy_loss_coef',
'val_loss_coef',
'sil_policy_loss_coef',
'sil_val_loss_coef',
'training_frequency',
'training_batch_iter',
'training_iter',
])
super().init_algorithm_params()
def sample(self):
'''Modify the onpolicy sample to also append to replay'''
batch = self.body.memory.sample()
if self.body.memory.is_episodic:
batch = {k: np.concatenate(v) for k, v in batch.items()} # concat episodic memory
for idx in range(len(batch['dones'])):
tuples = [batch[k][idx] for k in self.body.replay_memory.data_keys]
self.body.replay_memory.add_experience(*tuples)
batch = util.to_torch_batch(batch, self.net.device, self.body.replay_memory.is_episodic)
return batch
def replay_sample(self):
'''Samples a batch from memory'''
batch = self.body.replay_memory.sample()
batch = util.to_torch_batch(batch, self.net.device, self.body.replay_memory.is_episodic)
return batch
def calc_sil_policy_val_loss(self, batch, pdparams):
'''
Calculate the SIL policy losses for actor and critic
sil_policy_loss = -log_prob * max(R - v_pred, 0)
sil_val_loss = (max(R - v_pred, 0)^2) / 2
This is called on a randomly-sample batch from experience replay
'''
v_preds = self.calc_v(batch['states'], use_cache=False)
rets = math_util.calc_returns(batch['rewards'], batch['dones'], self.gamma)
clipped_advs = torch.clamp(rets - v_preds, min=0.0)
action_pd = policy_util.init_action_pd(self.body.ActionPD, pdparams)
actions = batch['actions']
if self.body.env.is_venv:
actions = math_util.venv_unpack(actions)
log_probs = action_pd.log_prob(actions)
sil_policy_loss = - self.sil_policy_loss_coef * (log_probs * clipped_advs).mean()
sil_val_loss = self.sil_val_loss_coef * clipped_advs.pow(2).mean() / 2
logger.debug(f'SIL actor policy loss: {sil_policy_loss:g}')
logger.debug(f'SIL critic value loss: {sil_val_loss:g}')
return sil_policy_loss, sil_val_loss
def train(self):
clock = self.body.env.clock
if self.to_train == 1:
# onpolicy update
super_loss = super().train()
# offpolicy sil update with random minibatch
total_sil_loss = torch.tensor(0.0)
for _ in range(self.training_iter):
batch = self.replay_sample()
for _ in range(self.training_batch_iter):
pdparams, _v_preds = self.calc_pdparam_v(batch)
sil_policy_loss, sil_val_loss = self.calc_sil_policy_val_loss(batch, pdparams)
sil_loss = sil_policy_loss + sil_val_loss
self.net.train_step(sil_loss, self.optim, self.lr_scheduler, clock=clock, global_net=self.global_net)
total_sil_loss += sil_loss
sil_loss = total_sil_loss / self.training_iter
loss = super_loss + sil_loss
logger.debug(f'Trained {self.name} at epi: {clock.epi}, frame: {clock.frame}, t: {clock.t}, total_reward so far: {self.body.env.total_reward}, loss: {loss:g}')
return loss.item()
else:
return np.nan
class PPOSIL(SIL, PPO):
'''
SIL extended from PPO. This will call the SIL methods and use PPO as super().
e.g. algorithm_spec
"algorithm": {
"name": "PPOSIL",
"action_pdtype": "default",
"action_policy": "default",
"explore_var_spec": null,
"gamma": 0.99,
"lam": 0.95,
"clip_eps_spec": {
"name": "linear_decay",
"start_val": 0.01,
"end_val": 0.001,
"start_step": 100,
"end_step": 5000,
},
"entropy_coef_spec": {
"name": "linear_decay",
"start_val": 0.01,
"end_val": 0.001,
"start_step": 100,
"end_step": 5000,
},
"sil_policy_loss_coef": 1.0,
"sil_val_loss_coef": 0.01,
"time_horizon": 32,
"training_batch_iter": 8,
"training_iter": 8,
"training_epoch": 8,
}
e.g. special memory_spec
"memory": {
"name": "OnPolicyReplay",
"sil_replay_name": "Replay",
"batch_size": 32,
"max_size": 10000,
"use_cer": true
}
'''
pass
```
#### File: agent/memory/replay.py
```python
from collections import deque
from copy import deepcopy
from slm_lab.agent.memory.base import Memory
from slm_lab.lib import logger, math_util, util
from slm_lab.lib.decorator import lab_api
import numpy as np
import pydash as ps
logger = logger.get_logger(__name__)
def sample_next_states(head, max_size, ns_idx_offset, batch_idxs, states, ns_buffer):
'''Method to sample next_states from states, with proper guard for next_state idx being out of bound'''
# idxs for next state is state idxs with offset, modded
ns_batch_idxs = (batch_idxs + ns_idx_offset) % max_size
# if head < ns_idx <= head + ns_idx_offset, ns is stored in ns_buffer
ns_batch_idxs = ns_batch_idxs % max_size
buffer_ns_locs = np.argwhere(
(head < ns_batch_idxs) & (ns_batch_idxs <= head + ns_idx_offset)).flatten()
# find if there is any idxs to get from buffer
to_replace = buffer_ns_locs.size != 0
if to_replace:
# extract the buffer_idxs first for replacement later
# given head < ns_idx <= head + offset, and valid buffer idx is [0, offset)
# get 0 < ns_idx - head <= offset, or equiv.
# get -1 < ns_idx - head - 1 <= offset - 1, i.e.
# get 0 <= ns_idx - head - 1 < offset, hence:
buffer_idxs = ns_batch_idxs[buffer_ns_locs] - head - 1
# set them to 0 first to allow sampling, then replace later with buffer
ns_batch_idxs[buffer_ns_locs] = 0
# guard all against overrun idxs from offset
ns_batch_idxs = ns_batch_idxs % max_size
next_states = util.batch_get(states, ns_batch_idxs)
if to_replace:
# now replace using buffer_idxs and ns_buffer
buffer_ns = util.batch_get(ns_buffer, buffer_idxs)
next_states[buffer_ns_locs] = buffer_ns
return next_states
class Replay(Memory):
'''
Stores agent experiences and samples from them for agent training
An experience consists of
- state: representation of a state
- action: action taken
- reward: scalar value
- next state: representation of next state (should be same as state)
- done: 0 / 1 representing if the current state is the last in an episode
The memory has a size of N. When capacity is reached, the oldest experience
is deleted to make space for the lastest experience.
- This is implemented as a circular buffer so that inserting experiences are O(1)
- Each element of an experience is stored as a separate array of size N * element dim
When a batch of experiences is requested, K experiences are sampled according to a random uniform distribution.
If 'use_cer', sampling will add the latest experience.
e.g. memory_spec
"memory": {
"name": "Replay",
"batch_size": 32,
"max_size": 10000,
"use_cer": true
}
'''
def __init__(self, memory_spec, body):
super().__init__(memory_spec, body)
util.set_attr(self, self.memory_spec, [
'batch_size',
'max_size',
'use_cer',
])
self.is_episodic = False
self.batch_idxs = None
self.size = 0 # total experiences stored
self.seen_size = 0 # total experiences seen cumulatively
self.head = -1 # index of most recent experience
# generic next_state buffer to store last next_states (allow for multiple for venv)
self.ns_idx_offset = self.body.env.num_envs if body.env.is_venv else 1
self.ns_buffer = deque(maxlen=self.ns_idx_offset)
# declare what data keys to store
self.data_keys = ['states', 'actions', 'rewards', 'next_states', 'dones']
self.reset()
def reset(self):
'''Initializes the memory arrays, size and head pointer'''
# set self.states, self.actions, ...
for k in self.data_keys:
if k != 'next_states': # reuse self.states
# list add/sample is over 10x faster than np, also simpler to handle
setattr(self, k, [None] * self.max_size)
self.size = 0
self.head = -1
self.ns_buffer.clear()
@lab_api
def update(self, state, action, reward, next_state, done):
'''Interface method to update memory'''
if self.body.env.is_venv:
for sarsd in zip(state, action, reward, next_state, done):
self.add_experience(*sarsd)
else:
self.add_experience(state, action, reward, next_state, done)
def add_experience(self, state, action, reward, next_state, done):
'''Implementation for update() to add experience to memory, expanding the memory size if necessary'''
# Move head pointer. Wrap around if necessary
self.head = (self.head + 1) % self.max_size
self.states[self.head] = state.astype(np.float16)
self.actions[self.head] = action
self.rewards[self.head] = reward
self.ns_buffer.append(next_state.astype(np.float16))
self.dones[self.head] = done
# Actually occupied size of memory
if self.size < self.max_size:
self.size += 1
self.seen_size += 1
# set to_train using memory counters head, seen_size instead of tick since clock will step by num_envs when on venv; to_train will be set to 0 after training step
algorithm = self.body.agent.algorithm
algorithm.to_train = algorithm.to_train or (self.seen_size > algorithm.training_start_step and self.head % algorithm.training_frequency == 0)
@lab_api
def sample(self):
'''
Returns a batch of batch_size samples. Batch is stored as a dict.
Keys are the names of the different elements of an experience. Values are an array of the corresponding sampled elements
e.g.
batch = {
'states' : states,
'actions' : actions,
'rewards' : rewards,
'next_states': next_states,
'dones' : dones}
'''
self.batch_idxs = self.sample_idxs(self.batch_size)
batch = {}
for k in self.data_keys:
if k == 'next_states':
batch[k] = sample_next_states(self.head, self.max_size, self.ns_idx_offset, self.batch_idxs, self.states, self.ns_buffer)
else:
batch[k] = util.batch_get(getattr(self, k), self.batch_idxs)
return batch
def sample_idxs(self, batch_size):
'''Batch indices a sampled random uniformly'''
batch_idxs = np.random.randint(self.size, size=batch_size)
if self.use_cer: # add the latest sample
batch_idxs[-1] = self.head
return batch_idxs
``` |
{
"source": "jmrichardson/fracdiff",
"score": 3
} |
#### File: tests/sklearn/test_stat.py
```python
import numpy as np
import pytest
from fracdiff.sklearn.stat import StatTester
class TestStat:
"""
Test `StatTester`.
"""
def _make_stationary(self, seed, n_samples):
np.random.seed(seed)
return np.random.randn(n_samples)
def _make_nonstationary(self, seed, n_samples):
np.random.seed(seed)
return np.random.randn(n_samples).cumsum()
@pytest.mark.parametrize("seed", [42])
@pytest.mark.parametrize("n_samples", [100, 1000, 10000])
def test_stationary(self, seed, n_samples):
X = self._make_stationary(seed, n_samples)
assert StatTester().pvalue(X) < 0.1
assert StatTester().is_stat(X)
@pytest.mark.parametrize("seed", [42])
@pytest.mark.parametrize("n_samples", [100, 1000, 10000])
def test_nonstationary(self, seed, n_samples):
X = self._make_nonstationary(seed, n_samples)
assert StatTester().pvalue(X) > 0.1
assert not StatTester().is_stat(X)
def test_method_valueerror(self):
tester = StatTester(method="nonexistent")
with pytest.raises(ValueError):
_ = tester.null_hypothesis
with pytest.raises(ValueError):
_ = tester.pvalue(np.ones(100))
``` |
{
"source": "jmrichardson/kappa",
"score": 2
} |
#### File: ingest/alphavantage/alphavantage.py
```python
import logplus
import plac
import configparser
import json
import uuid
from kafka import KafkaProducer
from celery import Celery
from alpha_vantage.timeseries import TimeSeries
# from alpha_vantage.techindicators import TechIndicators
# from alpha_vantage.sectorperformance import SectorPerformances
# from alpha_vantage.cryptocurrencies import CryptoCurrencies
# amqp://user:[email protected]:5672//
app = Celery('alphavantage', broker='amqp://guest:guest@localhost//')
@app.task
def getDailyAdjusted(symbol: "Equity ticker symbol"):
""" Download Alpha Vantage daily adjusted price history for equity
Keyword Arguments:
symbol: Symbol of equity
"""
# Create a unique id for this execution instance
uid = uuid.uuid4().hex.upper()[0:6]
# Setup logging (Log file path and context arguments)
lp = logplus.setup('../../../logs/alphavantage.log', '../../config/logging.yaml', symbol=symbol, uid=uid)
# Total execution time
lp.info('Start ingest module Alphavantage: ' + symbol )
# Get AV API user key
config = configparser.ConfigParser()
config.read("config.ini")
key = config.get("default", "key")
# Get json string of daily adjusted values
# ts = TimeSeries(key=key, output_format='pandas')
ts = TimeSeries(key=key)
data, meta_data = ts.get_daily_adjusted(symbol)
# Create producer to kafka topic
producer = KafkaProducer(value_serializer=lambda v: json.dumps(v).encode('utf-8'), bootstrap_servers='192.168.1.100:9092')
# Append data to topic
lp.info('Sending data to topic')
topic = config.get("default", "topic")
producer.send(topic, data)
# Close producer
lp.info('Closing producer')
producer.flush()
producer.close()
if __name__ == '__main__':
plac.call(getDailyAdjusted)
``` |
{
"source": "jmrichardson/numpy_ext",
"score": 2
} |
#### File: jmrichardson/numpy_ext/numpy_ext.py
```python
from functools import partial
from typing import Callable, Any, Union, Generator, Tuple, List
import numpy as np
from joblib import Parallel, delayed
Number = Union[int, float]
def expstep_range(
start: Number,
end: Number,
min_step: Number = 1,
step_mult: Number = 1,
round_func: Callable = None
) -> np.ndarray:
"""
Return spaced values within a given interval. Step is increased by a multiplier on each iteration.
Parameters
----------
start : int or float
Start of interval, inclusive
end : int or float
End of interval, exclusive
min_step : int or float, optional
Minimal step between values. Must be bigger than 0. Default is 1.
step_mult : int or float, optional
Multiplier by which to increase the step on each iteration. Must be bigger than 0. Default is 1.
round_func: Callable, optional
Vectorized rounding function, e.g. np.ceil, np.floor, etc. Default is None.
Returns
-------
np.ndarray
Array of exponentially spaced values.
Examples
--------
>>> expstep_range(1, 100, min_step=1, step_mult=1.5)
array([ 1. , 2. , 3.5 , 5.75 , 9.125 ,
14.1875 , 21.78125 , 33.171875 , 50.2578125 , 75.88671875])
>>> expstep_range(1, 100, min_step=1, step_mult=1.5, round_func=np.ceil)
array([ 1., 2., 4., 6., 10., 15., 22., 34., 51., 76.])
>>> expstep_range(start=-1, end=-100, min_step=1, step_mult=1.5)
array([ -1. , -2. , -3.5 , -5.75 ,
-9.125 , -14.1875 , -21.78125 , -33.171875 ,
-50.2578125 , -75.88671875])
Generate array of ints
>>> expstep_range(start=100, end=1, min_step=1, step_mult=1.5).astype(int)
array([100, 99, 97, 95, 91, 86, 79, 67, 50, 25])
"""
if step_mult <= 0:
raise ValueError('mult_step should be bigger than 0')
if min_step <= 0:
raise ValueError('min_step should be bigger than 0')
last = start
values = []
step = min_step
sign = 1 if start < end else -1
while start < end and last < end or start > end and last > end:
values.append(last)
last += max(step, min_step) * sign
step = abs(step * step_mult)
values = np.array(values)
if not round_func:
return values
values = np.array(round_func(values))
_, idx = np.unique(values, return_index=True)
return values[np.sort(idx)]
def apply_map(func: Callable[[Any], Any], array: Union[List, np.ndarray]) -> np.ndarray:
"""
Apply a function element-wise to an array.
Parameters
----------
func : Callable[[Any], Any]
Function that accepts one argument and returns a single value.
array : Union[List, np.ndarray]
Input array or a list. Any lists will be converted to np.ndarray first.
Returns
-------
np.ndarray
Resulting array.
Examples
--------
>>> apply_map(lambda x: 0 if x < 3 else 1, [[2, 2], [3, 3]])
array([[0, 0],
[1, 1]])
"""
array = np.array(array)
array_view = array.flat
array_view[:] = [func(x) for x in array_view]
return array
#############################
# Operations with nans
#############################
def nans(shape: Union[int, Tuple[int]], dtype=np.float64) -> np.ndarray:
"""
Return a new array of a given shape and type, filled with np.nan values.
Parameters
----------
shape : int or tuple of ints
Shape of the new array, e.g., (2, 3) or 2.
dtype: data-type, optional
Returns
-------
np.ndarray
Array of np.nans of the given shape.
Examples
--------
>>> nans(3)
array([nan, nan, nan])
>>> nans((2, 2))
array([[nan, nan],
[nan, nan]])
>>> nans(2, np.datetime64)
array(['NaT', 'NaT'], dtype=datetime64)
"""
if np.issubdtype(dtype, np.integer):
dtype = np.float
arr = np.empty(shape, dtype=dtype)
arr.fill(np.nan)
return arr
def drop_na(array: np.ndarray) -> np.ndarray:
"""
Return a given array flattened and with nans dropped.
Parameters
----------
array : np.ndarray
Input array.
Returns
-------
np.ndarray
New array without nans.
Examples
--------
>>> drop_na(np.array([np.nan, 1, 2]))
array([1., 2.])
"""
return array[~np.isnan(array)]
def fill_na(array: np.ndarray, value: Any) -> np.ndarray:
"""
Return a copy of array with nans replaced with a given value.
Parameters
----------
array : np.ndarray
Input array.
value : Any
Value to replace nans with.
Returns
-------
np.ndarray
A copy of array with nans replaced with the given value.
Examples
--------
>>> fill_na(np.array([np.nan, 1, 2]), -1)
array([-1., 1., 2.])
"""
ar = array.copy()
ar[np.isnan(ar)] = value
return ar
def fill_not_finite(array: np.ndarray, value: Any = 0) -> np.ndarray:
"""
Return a copy of array with nans and infs replaced with a given value.
Parameters
----------
array : np.ndarray
Input array.
value : Any, optional
Value to replace nans and infs with. Default is 0.
Returns
-------
np.ndarray
A copy of array with nans and infs replaced with the given value.
Examples
--------
>>> fill_not_finite(np.array([np.nan, np.inf, 1, 2]), 99)
array([99., 99., 1., 2.])
"""
ar = array.copy()
ar[~np.isfinite(array)] = value
return ar
def prepend_na(array: np.ndarray, n: int) -> np.ndarray:
"""
Return a copy of array with nans inserted at the beginning.
Parameters
----------
array : np.ndarray
Input array.
n : int
Number of elements to insert.
Returns
-------
np.ndarray
New array with nans added at the beginning.
Examples
--------
>>> prepend_na(np.array([1, 2]), 2)
array([nan, nan, 1., 2.])
"""
return np.hstack(
(
nans(n, array[0].dtype) if len(array) and hasattr(array[0], 'dtype') else nans(n),
array
)
)
#############################
# window operations
#############################
def rolling(
array: np.ndarray,
window: int,
skip_na: bool = False,
as_array: bool = False
) -> Union[Generator[np.ndarray, None, None], np.ndarray]:
"""
Roll a fixed-width window over an array.
The result is either a 2-D array or a generator of slices, controlled by `as_array` parameter.
Parameters
----------
array : np.ndarray
Input array.
window : int
Size of the rolling window.
skip_na : bool, optional
If False, the sequence starts with (window-1) windows filled with nans. If True, those are omitted.
Default is False.
as_array : bool, optional
If True, return a 2-D array. Otherwise, return a generator of slices. Default is False.
Returns
-------
np.ndarray or Generator[np.ndarray, None, None]
Rolling window matrix or generator
Examples
--------
>>> rolling(np.array([1, 2, 3, 4, 5]), 2, as_array=True)
array([[nan, 1.],
[ 1., 2.],
[ 2., 3.],
[ 3., 4.],
[ 4., 5.]])
Usage with numpy functions
>>> arr = rolling(np.array([1, 2, 3, 4, 5]), 2, as_array=True)
>>> np.sum(arr, axis=1)
array([nan, 3., 5., 7., 9.])
"""
if not any(isinstance(window, t) for t in [int, np.integer]):
raise TypeError(f'Wrong window type ({type(window)}) int expected')
window = int(window)
if array.size < window:
raise ValueError('array.size should be bigger than window')
def rows_gen():
if not skip_na:
yield from (prepend_na(array[:i + 1], (window - 1) - i) for i in np.arange(window - 1))
starts = np.arange(array.size - (window - 1))
yield from (array[start:end] for start, end in zip(starts, starts + window))
return np.array([row for row in rows_gen()]) if as_array else rows_gen()
def rolling_apply(func: Callable, window: int, *arrays: np.ndarray, n_jobs: int = 1, **kwargs) -> np.ndarray:
"""
Roll a fixed-width window over an array or a group of arrays, producing slices.
Apply a function to each slice / group of slices, transforming them into a value.
Perform computations in parallel, optionally.
Return a new np.ndarray with the resulting values.
Parameters
----------
func : Callable
The function to apply to each slice or a group of slices.
window : int
Window size.
*arrays : list
List of input arrays.
n_jobs : int, optional
Parallel tasks count for joblib. If 1, joblib won't be used. Default is 1.
**kwargs : dict
Input parameters (passed to func, must be named).
Returns
-------
np.ndarray
Examples
--------
>>> arr = np.array([1, 2, 3, 4, 5])
>>> rolling_apply(sum, 2, arr)
array([nan, 3., 5., 7., 9.])
>>> arr2 = np.array([1.5, 2.5, 3.5, 4.5, 5.5])
>>> func = lambda a1, a2, k: (sum(a1) + max(a2)) * k
>>> rolling_apply(func, 2, arr, arr2, k=-1)
array([ nan, -5.5, -8.5, -11.5, -14.5])
"""
if not any(isinstance(window, t) for t in [int, np.integer]):
raise TypeError(f'Wrong window type ({type(window)}) int expected')
window = int(window)
if max(len(x.shape) for x in arrays) != 1:
raise ValueError('Wrong array shape. Supported only 1D arrays')
if len({array.size for array in arrays}) != 1:
raise ValueError('Arrays must be the same length')
def _apply_func_to_arrays(idxs):
return func(*[array[idxs[0]:idxs[-1] + 1] for array in arrays], **kwargs)
array = arrays[0]
rolls = rolling(
array if len(arrays) == n_jobs == 1 else np.arange(len(array)),
window=window,
skip_na=True
)
if n_jobs == 1:
if len(arrays) == 1:
arr = list(map(partial(func, **kwargs), rolls))
else:
arr = list(map(_apply_func_to_arrays, rolls))
else:
f = delayed(_apply_func_to_arrays)
arr = Parallel(n_jobs=n_jobs)(f(idxs[[0, -1]]) for idxs in rolls)
return prepend_na(arr, n=window - 1)
def expanding(
array: np.ndarray,
min_periods: int = 1,
skip_na: bool = True,
as_array: bool = False
) -> Union[Generator[np.ndarray, None, None], np.ndarray]:
"""
Roll an expanding window over an array.
The window size starts at min_periods and gets incremented by 1 on each iteration.
The result is either a 2-D array or a generator of slices, controlled by `as_array` parameter.
Parameters
----------
array : np.ndarray
Input array.
min_periods : int, optional
Minimum size of the window. Default is 1.
skip_na : bool, optional
If False, the windows of size less than min_periods are filled with nans. If True, they're dropped.
Default is True.
as_array : bool, optional
If True, return a 2-D array. Otherwise, return a generator of slices. Default is False.
Returns
-------
np.ndarray or Generator[np.ndarray, None, None]
Examples
--------
>>> expanding(np.array([1, 2, 3, 4, 5]), 3, as_array=True)
array([array([1, 2, 3]), array([1, 2, 3, 4]), array([1, 2, 3, 4, 5])],
dtype=object)
"""
if not any(isinstance(min_periods, t) for t in [int, np.integer]):
raise TypeError(f'Wrong min_periods type ({type(min_periods)}) int expected')
min_periods = int(min_periods)
if array.size < min_periods:
raise ValueError('array.size should be bigger than min_periods')
def rows_gen():
if not skip_na:
yield from (nans(i) for i in np.arange(1, min_periods))
yield from (array[:i] for i in np.arange(min_periods, array.size + 1))
return np.array([row for row in rows_gen()]) if as_array else rows_gen()
def expanding_apply(func: Callable, min_periods: int, *arrays: np.ndarray, n_jobs: int = 1, **kwargs) -> np.ndarray:
"""
Roll an expanding window over an array or a group of arrays producing slices.
The window size starts at min_periods and gets incremented by 1 on each iteration.
Apply a function to each slice / group of slices, transforming them into a value.
Perform computations in parallel, optionally.
Return a new np.ndarray with the resulting values.
Parameters
----------
func : Callable
The function to apply to each slice or a group of slices.
min_periods : int
Minimal size of expanding window.
*arrays : list
List of input arrays.
n_jobs : int, optional
Parallel tasks count for joblib. If 1, joblib won't be used. Default is 1.
**kwargs : dict
Input parameters (passed to func, must be named).
Returns
-------
np.ndarray
Examples
--------
>>> arr = np.array([1, 2, 3, 4, 5])
>>> expanding_apply(sum, 2, arr)
array([nan, 3., 6., 10., 15.])
>>> arr2 = np.array([1.5, 2.5, 3.5, 4.5, 5.5])
>>> func = lambda a1, a2, k: (sum(a1) + max(a2)) * k
>>> expanding_apply(func, 2, arr, arr2, k=-1)
array([ nan, -5.5, -9.5, -14.5, -20.5])
"""
if not any(isinstance(min_periods, t) for t in [int, np.integer]):
raise TypeError(f'Wrong min_periods type ({type(min_periods)}) int expected')
min_periods = int(min_periods)
if max(len(x.shape) for x in arrays) != 1:
raise ValueError('Supported only 1-D arrays')
if len({array.size for array in arrays}) != 1:
raise ValueError('Arrays must be the same length')
def _apply_func_to_arrays(idxs):
return func(*[array[idxs.astype(np.int)] for array in arrays], **kwargs)
array = arrays[0]
rolls = expanding(
array if len(arrays) == n_jobs == 1 else np.arange(len(array)),
min_periods=min_periods,
skip_na=True
)
if n_jobs == 1:
if len(arrays) == 1:
arr = list(map(partial(func, **kwargs), rolls))
else:
arr = list(map(_apply_func_to_arrays, rolls))
else:
f = delayed(_apply_func_to_arrays)
arr = Parallel(n_jobs=n_jobs)(map(f, rolls))
return prepend_na(arr, n=min_periods - 1)
``` |
{
"source": "jmrichardson/pyts",
"score": 2
} |
#### File: approximation/tests/test_sfa.py
```python
import numpy as np
import pytest
from sklearn.feature_selection import f_classif
from pyts.approximation import MultipleCoefficientBinning
from pyts.approximation import SymbolicFourierApproximation
rng = np.random.RandomState(42)
n_samples, n_timestamps = 5, 8
X = rng.randn(n_samples, n_timestamps)
y = rng.randint(2, size=n_samples)
def _compute_expected_results(X, y=None, n_coefs=None, n_bins=4,
strategy='quantile', drop_sum=False, anova=False,
norm_mean=False, norm_std=False, alphabet=None):
"""Compute the expected results."""
X = np.asarray(X)
if norm_mean:
X -= X.mean(axis=1)[:, None]
if norm_std:
X /= X.std(axis=1)[:, None]
X_fft = np.fft.rfft(X)
X_fft = np.vstack([np.real(X_fft), np.imag(X_fft)])
X_fft = X_fft.reshape(n_samples, -1, order='F')
if drop_sum:
X_fft = X_fft[:, 2:-1]
else:
X_fft = np.hstack([X_fft[:, :1], X_fft[:, 2:-1]])
if n_coefs is not None:
if anova:
_, p = f_classif(X_fft, y)
support = np.argsort(p)[:n_coefs]
X_fft = X_fft[:, support]
else:
X_fft = X_fft[:, :n_coefs]
mcb = MultipleCoefficientBinning(n_bins=n_bins, strategy=strategy,
alphabet=alphabet)
arr_desired = mcb.fit_transform(X_fft)
return arr_desired
@pytest.mark.parametrize(
'params',
[({}),
({'n_coefs': 3}),
({'n_bins': 2}),
({'strategy': 'uniform'}),
({'drop_sum': True}),
({'anova': True}),
({'norm_mean': True, 'drop_sum': True}),
({'norm_std': True}),
({'norm_mean': True, 'norm_std': True, 'drop_sum': True}),
({'n_coefs': 2, 'drop_sum': True, 'anova': True})]
)
def test_actual_results(params):
"""Test that the actual results are the expected ones."""
arr_actual = SymbolicFourierApproximation(**params).fit_transform(X, y)
arr_desired = _compute_expected_results(X, y, **params)
np.testing.assert_array_equal(arr_actual, arr_desired)
@pytest.mark.parametrize(
'params',
[({}),
({'n_coefs': 3}),
({'n_bins': 2}),
({'strategy': 'uniform'}),
({'drop_sum': True}),
({'anova': True}),
({'norm_mean': True, 'drop_sum': True}),
({'norm_std': True}),
({'norm_mean': True, 'norm_std': True, 'drop_sum': True}),
({'n_coefs': 2, 'drop_sum': True, 'anova': True})]
)
def test_fit_transform(params):
"""Test that fit and transform yield the same results as fit_transform."""
arr_1 = SymbolicFourierApproximation(**params).fit(X, y).transform(X)
arr_2 = SymbolicFourierApproximation(**params).fit_transform(X, y)
np.testing.assert_array_equal(arr_1, arr_2)
```
#### File: classification/tests/test_saxvsm.py
```python
import numpy as np
from math import log
from sklearn.metrics.pairwise import cosine_similarity
from pyts.classification import SAXVSM
X = [[0, 0, 0, 1, 0, 0, 1, 1, 1],
[0, 1, 1, 1, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 0, 0, 0, 1, 0]]
y = [0, 0, 1]
def test_actual_results_strategy_uniform():
"""Test that the actual results are the expected ones."""
# Data
X = [[0, 0, 0, 1, 0, 0, 1, 1, 1],
[0, 1, 1, 1, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 0, 0, 0, 1, 0]]
y = [0, 0, 1]
clf = SAXVSM(window_size=4, word_size=4, n_bins=2, strategy='uniform',
numerosity_reduction=False, sublinear_tf=False)
decision_function_actual = clf.fit(X, y).decision_function(X)
# X_bow = ["aaab aaba abaa baab aabb abbb",
# "abbb bbba bbaa baab aabb abbb",
# "aaab aaba abaa baaa aaab aaba"]
assert clf.vocabulary_ == {0: 'aaab', 1: 'aaba', 2: 'aabb', 3: 'abaa',
4: 'abbb', 5: 'baaa', 6: 'baab', 7: 'bbaa',
8: 'bbba'}
freq = np.asarray([[1, 1, 1, 1, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 2, 0, 1, 1, 1],
[2, 2, 0, 1, 0, 1, 0, 0, 0]])
tf = np.asarray([[1, 1, 2, 1, 3, 0, 2, 1, 1],
[2, 2, 0, 1, 0, 1, 0, 0, 0]])
idf = np.asarray([1, 1, log(2) + 1, 1, log(2) + 1, log(2) + 1, log(2) + 1,
log(2) + 1, log(2) + 1])
decision_function_desired = cosine_similarity(freq, tf * idf[None, :])
np.testing.assert_allclose(decision_function_actual,
decision_function_desired, atol=1e-5, rtol=0.)
pred_actual = clf.predict(X)
pred_desired = cosine_similarity(freq, tf * idf[None, :]).argmax(axis=1)
np.testing.assert_array_equal(pred_actual, pred_desired)
def test_actual_results_strategy_quantile():
"""Test that the actual results are the expected ones."""
# Data
X = [[0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
[0.0, 0.3, 0.2, 0.4, 0.1, 0.5, 0.6, 0.7, 0.8, 0.9],
[0.0, 0.9, 0.1, 0.8, 0.2, 0.7, 0.3, 0.6, 0.4, 0.5]]
y = [0, 0, 1]
clf = SAXVSM(window_size=4, word_size=4, n_bins=2, strategy='quantile',
numerosity_reduction=False, sublinear_tf=False)
decision_function_actual = clf.fit(X, y).decision_function(X)
# X_bow = ["aabb aabb aabb aabb aabb aabb aabb",
# "abab baba abab aabb aabb aabb aabb",
# "abab baba abab baba abab baba abab"]
assert clf.vocabulary_ == {0: 'aabb', 1: 'abab', 2: 'baba'}
freq = np.asarray([[7, 0, 0],
[4, 2, 1],
[0, 4, 3]])
tf = np.asarray([[11, 2, 1],
[0, 4, 3]])
idf = np.asarray([log(2) + 1, 1, 1])
decision_function_desired = cosine_similarity(freq, tf * idf[None, :])
np.testing.assert_allclose(decision_function_actual,
decision_function_desired, atol=1e-5, rtol=0.)
pred_actual = clf.fit(X, y).predict(X)
pred_desired = cosine_similarity(freq, tf * idf[None, :]).argmax(axis=1)
np.testing.assert_array_equal(pred_actual, pred_desired)
```
#### File: pyts/classification/time_series_forest.py
```python
from math import ceil
from numba import njit
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin, TransformerMixin
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline
from sklearn.utils.validation import (
check_array, check_is_fitted, check_random_state)
@njit()
def extract_features(X, n_samples, n_windows, indices):
X_new = np.empty((n_samples, 3 * n_windows))
for j in range(n_windows):
start, end = indices[j]
arange = np.arange((start - end + 1) / 2, (end + 1 - start) / 2)
if end - start == 1:
var_arange = 1.
else:
var_arange = np.sum(arange ** 2)
for i in range(n_samples):
mean = np.mean(X[i, start:end])
X_new[i, 3 * j] = mean
X_new[i, 3 * j + 1] = np.std(X[i, start:end])
X_new[i, 3 * j + 2] = (
np.sum((X[i, start:end] - mean) * arange) / var_arange
)
return X_new
class WindowFeatureExtractor(BaseEstimator, TransformerMixin):
"""Feature extractor over a window.
This transformer extracts 3 features from each window: the mean, the
standard deviation and the slope.
Parameters
----------
n_windows : int or float (default = 1.)
The number of windows from which features are extracted.
min_window_size : int or float (default = 1)
The minimum length of the windows. If float, it represents a percentage
of the size of each time series.
random_state : None, int or RandomState instance (default = None)
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator. If RandomState instance, random_state is the random number
generator. If None, the random number generator is the RandomState
instance used by `np.random`.
Attributes
----------
indices_ : array, shape = (n_windows, 2)
The indices for the windows.
The first column consists of the starting indices (included)
of the windows. The second column consists of the ending indices
(excluded) of the windows.
"""
def __init__(self, n_windows=1., min_window_size=1, random_state=None):
self.n_windows = n_windows
self.min_window_size = min_window_size
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model according to the given training data.
It generates the indices of the windows from which the features will be
extracted.
Parameters
----------
X : array-like, shape = (n_samples, n_timestamps)
Univariate time series.
y
Ignored
Returns
-------
self : object
"""
# Check
X = check_array(X, dtype='float64')
n_timestamps = X.shape[1]
n_windows, min_window_size, rng = self._check_params(X)
# Generate the start and end indices
start = rng.randint(0, n_timestamps - min_window_size, size=n_windows)
end = rng.randint(start + min_window_size, n_timestamps + 1,
size=n_windows)
self.indices_ = np.c_[start, end]
return self
def transform(self, X):
"""Transform the provided data.
It extracts the three features from all the selected windows
for all the samples.
Parameters
----------
X : array-like, shape = (n_samples, n_timestamps)
Univariate time series.
Returns
-------
X_new : array, shape = (n_samples, 3 * n_windows)
Extracted features.
"""
X = check_array(X, dtype='float64')
check_is_fitted(self)
# Extract the features from each window
n_samples = X.shape[0]
n_windows = self.indices_.shape[0]
return extract_features(X, n_samples, n_windows, self.indices_)
def _check_params(self, X):
n_samples, n_timestamps = X.shape
if not isinstance(self.n_windows,
(int, np.integer, float, np.floating)):
raise TypeError("'n_windows' must be an integer or a float.")
if isinstance(self.n_windows, (int, np.integer)):
if self.n_windows < 1:
raise ValueError(
"If 'n_windows' is an integer, it must be positive "
"(got {0}).".format(self.n_windows)
)
n_windows = self.n_windows
else:
if self.n_windows <= 0:
raise ValueError(
"If 'n_windows' is a float, it must be greater "
"than 0 (got {0}).".format(self.n_windows)
)
n_windows = ceil(self.n_windows * n_timestamps)
if not isinstance(self.min_window_size,
(int, np.integer, float, np.floating)):
raise TypeError("'min_window_size' must be an integer or a float.")
if isinstance(self.min_window_size, (int, np.integer)):
if not 1 <= self.min_window_size <= n_timestamps:
raise ValueError(
"If 'min_window_size' is an integer, it must be greater "
"than or equal to 1 and lower than or equal to "
"n_timestamps (got {0}).".format(self.min_window_size)
)
min_window_size = self.min_window_size
else:
if not 0 < self.min_window_size <= 1:
raise ValueError(
"If 'min_window_size' is a float, it must be greater "
"than 0 and lower than or equal to 1 (got {}).".
format(self.min_window_size)
)
min_window_size = ceil(self.min_window_size * n_timestamps)
rng = check_random_state(self.random_state)
return n_windows, min_window_size, rng
class TimeSeriesForest(BaseEstimator, ClassifierMixin):
"""A random forest classifier for time series.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and uses averaging to
improve the predictive accuracy and control over-fitting.
This transformer extracts 3 features from each window: the mean, the
standard deviation and the slope. The total number of features is thus
equal to ``3 * n_windows``. Then a random forest is built using
these features as input data.
Parameters
----------
n_estimators : int (default = 500)
The number of trees in the forest.
n_windows : int or float (default = 1.)
The number of windows from which features are extracted.
min_window_size : int or float (default = 1)
The minimum length of the windows. If float, it represents a percentage
of the size of each time series.
criterion : str (default = "entropy")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_depth : integer or None (default = None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
``min_samples_split`` samples.
min_samples_split : int or float (default = 2)
The minimum number of samples required to split an internal node:
- If int, then consider ``min_samples_split`` as the minimum number.
- If float, then ``min_samples_split`` is a fraction and
``ceil(min_samples_split * n_samples)`` are the minimum
number of samples for each split.
min_samples_leaf : int or float (default = 1)
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model.
- If int, then consider ``min_samples_leaf`` as the minimum number.
- If float, then ``min_samples_leaf`` is a fraction and
``ceil(min_samples_leaf * n_samples)`` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float (default = 0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node.
max_features : int, float, str or None (default = "auto")
The number of features to consider when looking for the best split:
- If int, then consider ``max_features`` features at each split.
- If float, then ``max_features`` is a fraction and
``int(max_features * n_features)`` features are considered at each
split.
- If "auto", then ``max_features=sqrt(n_features)``.
- If "sqrt", then ``max_features=sqrt(n_features)`` (same as "auto").
- If "log2", then ``max_features=log2(n_features)``.
- If None, then ``max_features=n_features``.
max_leaf_nodes : int or None (default = None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float (default = 0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
bootstrap : bool (default = True)
Whether bootstrap samples are used when building trees. If False, the
whole datset is used to build each tree.
oob_score : bool (default = False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : int or None, optional (default = None)
The number of jobs to run in parallel. :meth:`fit`, :meth:`predict`,
:meth:`decision_path` and :meth:`apply` are all parallelized over the
trees. ``None`` means 1 unless in a ``joblib.parallel_backend``
context. ``-1`` means using all processors.
random_state : int, RandomState instance or None (default = None)
Controls both the randomness of the bootstrapping of the samples used
when building trees (if ``bootstrap=True``) and the sampling of the
features to consider when looking for the best split at each node
(if ``max_features < n_features``).
verbose : int (default = 0)
Controls the verbosity when fitting and predicting.
class_weight : dict, "balanced", "balanced_subsample" or None (default = None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that
weights are computed based on the bootstrap sample for every tree
grown.
ccp_alpha : float (default = 0.)
Complexity parameter used for Minimal Cost-Complexity Pruning. The
subtree with the largest cost complexity that is smaller than
``ccp_alpha`` will be chosen. By default, no pruning is performed.
It must be non-negative.
max_samples : int, float or None (default = None)
If bootstrap is True, the number of samples to draw from X
to train each base estimator:
- If None (default), then draw ``X.shape[0]`` samples.
- If int, then draw ``max_samples`` samples.
- If float, then draw ``max_samples * X.shape[0]`` samples. Thus,
``max_samples`` should be in the interval `(0, 1)`.
Attributes
----------
base_estimator_ : DecisionTreeClassifier
The child estimator template used to create the collection of fitted
sub-estimators.
classes_ : array of shape (n_classes,) or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
feature_importances_ : array, shape = (n_features,)
The feature importances (the higher, the more important the feature).
indices_ : array, shape = (n_windows, 2)
The indices for the windows.
The first column consists of the starting indices (included)
of the windows. The second column consists of the ending indices
(excluded) of the windows.
n_features_ : int
The number of features when ``fit`` is performed.
oob_decision_function_ : None array, shape = (n_samples, n_classes)
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN. This attribute is not None
only when ``oob_score`` is True.
oob_score_ : None or float
Score of the training dataset obtained using an out-of-bag estimate.
This attribute is not None only when ``oob_score`` is True.
Examples
--------
>>> from pyts.datasets import load_gunpoint
>>> from pyts.classification import TimeSeriesForest
>>> X_train, X_test, y_train, y_test = load_gunpoint(return_X_y=True)
>>> clf = TimeSeriesForest(random_state=43)
>>> clf.fit(X_train, y_train)
TimeSeriesForest(...)
>>> clf.score(X_test, y_test)
0.97333...
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data,
``max_features=n_features`` and ``bootstrap=False``, if the improvement
of the criterion is identical for several splits enumerated during the
search of the best split. To obtain a deterministic behaviour during
fitting, ``random_state`` has to be fixed.
References
----------
.. [1] <NAME>, <NAME>, <NAME> and <NAME>, "A Time Series
Forest for Classification and Feature Extraction".
Information Sciences, 239, 142-153 (2013).
.. [2] <NAME>, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
""" # noqa: E501
def __init__(self,
n_estimators=500,
n_windows=1.,
min_window_size=1,
criterion="entropy",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.,
bootstrap=True,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
class_weight=None,
ccp_alpha=0.0,
max_samples=None):
self.n_estimators = n_estimators
self.n_windows = n_windows
self.min_window_size = min_window_size
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.class_weight = class_weight
self.ccp_alpha = ccp_alpha
self.max_samples = max_samples
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like, shape = (n_samples, n_timestamps)
Univariate time series.
Returns
-------
X_leaves : array_like, shape = (n_samples, n_estimators)
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
check_is_fitted(self)
X = check_array(X, dtype='float64')
X_new = self._pipeline['fe'].transform(X)
return self._pipeline['rfc'].apply(X_new)
def decision_path(self, X):
"""Return the decision path in the forest.
Parameters
----------
X : array-like, shape = (n_samples, n_timestamps)
Univariate time series.
Returns
-------
indicator : sparse csr array, shape = (n_samples, n_nodes)
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
n_nodes_ptr : array, shape = (n_estimators + 1,)
The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]]
gives the indicator value for the i-th estimator.
"""
check_is_fitted(self)
X = check_array(X, dtype='float64')
X_new = self._pipeline['fe'].transform(X)
return self._pipeline['rfc'].decision_path(X_new)
def fit(self, X, y):
"""Fit the model according to the given training data.
It build a forest of trees from the training set.
Parameters
----------
X : array-like, shape = (n_samples, n_timestamps)
Univariate time series.
y : array-like, shape = (n_samples,)
Class labels for each sample.
Returns
-------
self : object
"""
# Create and fit the pipeline
feature_extractor = WindowFeatureExtractor(
n_windows=self.n_windows, min_window_size=self.min_window_size,
random_state=self.random_state
)
rfc = RandomForestClassifier(
n_estimators=self.n_estimators,
criterion=self.criterion,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
min_impurity_decrease=self.min_impurity_decrease,
bootstrap=self.bootstrap,
oob_score=self.oob_score,
n_jobs=self.n_jobs,
random_state=self.random_state,
verbose=self.verbose,
class_weight=self.class_weight,
ccp_alpha=self.ccp_alpha,
max_samples=self.max_samples,
warm_start=False
)
self._pipeline = Pipeline([('fe', feature_extractor), ('rfc', rfc)])
self._pipeline.fit(X, y)
# Get attributes
self.base_estimator_ = self._pipeline['rfc'].base_estimator_
self.classes_ = self._pipeline['rfc'].classes_
self.estimators_ = self._pipeline['rfc'].estimators_
self.feature_importances_ = self._pipeline['rfc'].feature_importances_
self.indices_ = self._pipeline['fe'].indices_
self.n_features_ = self._pipeline['rfc'].n_features_
self.oob_decision_function_ = getattr(
self._pipeline['rfc'], 'oob_decision_function_', None)
self.oob_score_ = getattr(self._pipeline['rfc'], 'oob_score_', None)
return self
def predict(self, X):
"""Predict class for X.
The predicted class of an input time series is a vote by the trees
in the forest, weighted by their probability estimates.
That is, the predicted class is the one with highest mean
probability estimate across the trees.
Parameters
----------
X : array-like, shape = (n_samples, n_timestamps)
Univariate time series.
Returns
-------
y : array, shape = (n_samples,)
The predicted classes.
"""
check_is_fitted(self)
return self._pipeline.predict(X)
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input time series are computed
as the mean predicted class probabilities of the trees in the forest.
The class probability of a single tree is the fraction of samples
of the same class in a leaf.
Parameters
----------
X : array-like, shape = (n_samples, n_timestamps)
Univariate time series.
Returns
-------
p : array, shape = (n_samples, n_classes)
The class probabilities of the input time series.
The order of the classes corresponds to that in the
attribute `classes_`.
"""
check_is_fitted(self)
return self._pipeline.predict_proba(X)
def score(self, X, y):
"""Return the mean accuracy on the given test data and labels.
Parameters
----------
X : array-like, shape = (n_samples, n_timestamps)
Test samples.
y : array-like, shape = (n_samples,)
True labels for X.
Returns
-------
score : float
Mean accuracy of self.predict(X) wrt. y.
"""
check_is_fitted(self)
return self._pipeline.score(X, y)
```
#### File: multivariate/transformation/multivariate.py
```python
import numpy as np
from scipy.sparse import csr_matrix, hstack
from sklearn.base import BaseEstimator, TransformerMixin, clone
from sklearn.utils.validation import check_is_fitted
from ..utils import check_3d_array
class MultivariateTransformer(BaseEstimator, TransformerMixin):
r"""Transformer for multivariate time series.
It provides a convenient class to transform multivariate time series with
transformers that can only deal with univariate time series.
Parameters
----------
estimator : estimator object or list thereof
Transformer. If one estimator is provided, it is cloned and each clone
transforms one feature. If a list of estimators is provided, each
estimator transforms one feature.
flatten : bool (default = True)
Affect shape of transform output. If True, ``transform``
returns an array with shape (n_samples, \*). If False, the output of
``transform`` from each estimator must have the same shape and
``transform`` returns an array with shape (n_samples, n_features, \*).
Ignored if the transformers return sparse matrices.
Attributes
----------
estimators_ : list of estimator objects
The collection of fitted transformers.
Examples
--------
>>> from pyts.datasets import load_basic_motions
>>> from pyts.multivariate.transformation import MultivariateTransformer
>>> from pyts.image import GramianAngularField
>>> X, _, _, _ = load_basic_motions(return_X_y=True)
>>> transformer = MultivariateTransformer(GramianAngularField(),
... flatten=False)
>>> X_new = transformer.fit_transform(X)
>>> X_new.shape
(40, 6, 100, 100)
"""
def __init__(self, estimator, flatten=True):
self.estimator = estimator
self.flatten = flatten
def fit(self, X, y=None):
"""Pass.
Parameters
----------
X : array-like, shape = (n_samples, n_features, n_timestamps)
Multivariate time series.
y : None or array-like, shape = (n_samples,) (default = None)
Class labels.
Returns
-------
self : object
"""
X = check_3d_array(X)
_, n_features, _ = X.shape
self._check_params(n_features)
for i, transformer in enumerate(self.estimators_):
transformer.fit(X[:, i, :], y)
return self
def transform(self, X):
r"""Apply transform to each feature.
Parameters
----------
X : array-like, shape = (n_samples, n_features, n_timestamps)
Multivariate time series.
Returns
-------
X_new : array, shape = (n_samples, *) or (n_samples, n_features, *)
Transformed time series.
"""
X = check_3d_array(X)
n_samples, _, _ = X.shape
check_is_fitted(self, 'estimators_')
X_transformed = [transformer.transform(X[:, i, :])
for i, transformer in enumerate(self.estimators_)]
all_sparse = np.all([isinstance(X_transformed_i, csr_matrix)
for X_transformed_i in X_transformed])
if all_sparse:
X_new = hstack(X_transformed)
else:
X_new = [self._convert_to_array(X_transformed_i)
for X_transformed_i in X_transformed]
ndims = [X_new_i.ndim for X_new_i in X_new]
shapes = [X_new_i.shape for X_new_i in X_new]
one_dim = (np.unique(ndims).size == 1)
if one_dim:
one_shape = np.unique(shapes, axis=0).shape[0] == 1
else:
one_shape = False
if (not one_shape) or self.flatten:
X_new = [X_new_i.reshape(n_samples, -1) for X_new_i in X_new]
X_new = np.concatenate(X_new, axis=1)
else:
X_new = np.asarray(X_new)
axes = [1, 0] + [i for i in range(2, X_new.ndim)]
X_new = np.transpose(X_new, axes=axes)
return X_new
def _check_params(self, n_features):
"""Check parameters."""
transformer = (isinstance(self.estimator, BaseEstimator)
and hasattr(self.estimator, 'transform'))
if transformer:
self.estimators_ = [clone(self.estimator)
for _ in range(n_features)]
elif isinstance(self.estimator, list):
if len(self.estimator) != n_features:
raise ValueError(
"If 'estimator' is a list, its length must be equal to "
"the number of features ({0} != {1})"
.format(len(self.estimator), n_features)
)
for i, estimator in enumerate(self.estimator):
if not (isinstance(estimator, BaseEstimator)
and hasattr(estimator, 'transform')):
raise ValueError("Estimator {} must be a transformer."
.format(i))
self.estimators_ = self.estimator
else:
raise TypeError(
"'estimator' must be a transformer that inherits from "
"sklearn.base.BaseEstimator or a list thereof.")
@staticmethod
def _convert_to_array(X):
"""Convert the input data to an array if necessary."""
if isinstance(X, csr_matrix):
return X.A
elif isinstance(X, np.ndarray):
return X
else:
raise ValueError('Unexpected type for X: {}.'
.format(type(X).__name__))
```
#### File: utils/tests/test_utils.py
```python
import numpy as np
import pytest
import re
from pyts.multivariate.utils import check_3d_array
n_samples, n_features, n_timestamps = 40, 3, 30
rng = np.random.RandomState(42)
X = rng.randn(n_samples, n_features, n_timestamps)
@pytest.mark.parametrize(
'X, err_msg',
[(X[0, 0], "X must be 3-dimensional (got 1)."),
(X[0], "X must be 3-dimensional (got 2).")]
)
def test_3d_input(X, err_msg):
"""Test input data validation."""
with pytest.raises(ValueError, match=re.escape(err_msg)):
check_3d_array(X)
```
#### File: preprocessing/tests/test_imputer.py
```python
import numpy as np
import pytest
import re
from pyts.preprocessing import InterpolationImputer
X = [[np.nan, 1, 2, 3, np.nan, 5, 6, np.nan]]
@pytest.mark.parametrize(
'params, error, err_msg',
[({'missing_values': np.inf}, ValueError,
"'missing_values' cannot be infinity."),
({'missing_values': "3"}, ValueError,
"'missing_values' must be an integer, a float, None or np.nan "
"(got {0!s})".format("3")),
({'strategy': 'whoops'}, ValueError,
"'strategy' must be an integer or one of 'linear', 'nearest', "
"'zero', 'slinear', 'quadratic', 'cubic', 'previous', 'next' "
"(got {0})".format('whoops'))]
)
def test_parameter_check(params, error, err_msg):
"""Test parameter validation."""
imputer = InterpolationImputer(**params)
with pytest.raises(error, match=re.escape(err_msg)):
imputer.transform(X)
@pytest.mark.parametrize(
'params, X, arr_desired',
[({'missing_values': None}, [[None, 10, 8, None, 4, 2, None]],
[[12, 10, 8, 6, 4, 2, 0]]),
({'missing_values': np.nan}, [[np.nan, 10, 8, np.nan, 4, 2, np.nan]],
[[12, 10, 8, 6, 4, 2, 0]]),
({'missing_values': 45.}, [[45., 10, 8, 45., 4, 2, 45.]],
[[12, 10, 8, 6, 4, 2, 0]]),
({'missing_values': 78}, [[78, 10, 8, 78, 4, 2, 78]],
[[12, 10, 8, 6, 4, 2, 0]]),
({'missing_values': None, 'strategy': 'quadratic'},
[[None, 9, 4, None, 0, 1, None]], [[16, 9, 4, 1, 0, 1, 4]]),
({'missing_values': None, 'strategy': 'previous'},
[[5, 9, 4, None, 0, 1, None]], [[5, 9, 4, 4, 0, 1, 1]]),
({'missing_values': None, 'strategy': 'next'},
[[None, 9, 4, None, 0, 1, 8]], [[9, 9, 4, 0, 0, 1, 8]])]
)
def test_actual_results(params, X, arr_desired):
"""Test that the actual results are the expected ones."""
imputer = InterpolationImputer(**params)
arr_actual = imputer.fit_transform(X)
np.testing.assert_allclose(arr_actual, arr_desired, rtol=0, atol=1e-5)
```
#### File: transformation/tests/test_bag_of_patterns.py
```python
import numpy as np
import pytest
from pyts.transformation import BagOfPatterns
X = [[0, 2, 1, 3, 4, 2, 1, 0, 3, 1, 2, 0],
[2, 0, 1, 3, 2, 4, 1, 2, 0, 1, 3, 2]]
@pytest.mark.parametrize(
'params, vocab_desired, arr_desired',
[({'window_size': 4, 'word_size': 4, 'sparse': False},
{0: 'abdc', 1: 'acbd', 2: 'acdb', 3: 'adbc', 4: 'bacd', 5: 'badb',
6: 'bdab', 7: 'cabd', 8: 'cbad', 9: 'cbda', 10: 'cdba', 11: 'dbca',
12: 'dcba'},
[[0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1],
[2, 1, 0, 0, 0, 0, 2, 2, 0, 1, 0, 1, 0]]),
({'window_size': 4, 'n_bins': 2, 'word_size': 4},
{0: 'aaba', 1: 'aabb', 2: 'abaa', 3: 'abab', 4: 'abba',
5: 'baab', 6: 'baba', 7: 'bbaa'},
[[1, 1, 0, 2, 1, 1, 1, 1], [0, 2, 2, 1, 0, 2, 2, 0]]),
({'window_size': 6, 'n_bins': 2, 'word_size': 2},
{0: 'ab', 1: 'ba'}, [[2, 2], [2, 1]]),
({'window_size': 6, 'n_bins': 2, 'word_size': 2,
'numerosity_reduction': False},
{0: 'ab', 1: 'ba'}, [[3, 4], [4, 3]])]
)
def test_actual_results(params, vocab_desired, arr_desired):
"""Test that the actual results are the expected ones."""
bop = BagOfPatterns(**params)
arr_actual = bop.fit_transform(X)
assert bop.vocabulary_ == vocab_desired
if isinstance(arr_actual, np.ndarray):
np.testing.assert_array_equal(arr_actual, arr_desired)
else:
np.testing.assert_array_equal(arr_actual.A, arr_desired)
@pytest.mark.parametrize(
'params',
[{'window_size': 4, 'word_size': 4},
{'window_size': 4, 'word_size': 4, 'sparse': False},
{'window_size': 4, 'word_size': 4, 'n_bins': 2},
{'window_size': 4, 'word_size': 4, 'numerosity_reduction': False},
{'window_size': 4, 'word_size': 4, 'norm_mean': 1, 'norm_std': 1},
{'window_size': 4, 'word_size': 4, 'overlapping': False},
{'window_size': 4, 'word_size': 4, 'strategy': 'normal'},
{'window_size': 4, 'word_size': 4, 'window_step': 2},
{'window_size': 4, 'word_size': 4, 'alphabet': ['d', 'c', 'b', 'a']}]
)
def test_fit_transform(params):
"""Test that fit_transform and fit then transform yield same results."""
bop_1, bop_2 = BagOfPatterns(**params), BagOfPatterns(**params)
arr_1 = bop_1.fit_transform(X)
arr_2 = bop_2.fit(X).transform(X)
assert bop_1.vocabulary_ == bop_2.vocabulary_
if isinstance(arr_1, np.ndarray):
np.testing.assert_array_equal(arr_1, arr_2)
else:
np.testing.assert_array_equal(arr_1.A, arr_2.A)
```
#### File: transformation/tests/test_boss.py
```python
import numpy as np
import pytest
import re
from scipy.sparse import csr_matrix
from sklearn.feature_extraction.text import CountVectorizer
from pyts.transformation import BOSS
from pyts.approximation import SymbolicFourierApproximation
n_samples, n_timestamps, n_classes = 8, 200, 2
rng = np.random.RandomState(42)
X = rng.randn(n_samples, n_timestamps)
y = rng.randint(n_classes, size=n_samples)
@pytest.mark.parametrize(
'params, error, err_msg',
[({'word_size': "3"}, TypeError, "'word_size' must be an integer."),
({'window_size': {}}, TypeError,
"'window_size' must be an integer or a float."),
({'window_step': {}}, TypeError,
"'window_step' must be an integer or a float."),
({'word_size': 0}, ValueError, "'word_size' must be a positive integer."),
({'window_size': 0, 'drop_sum': True}, ValueError,
"If 'window_size' is an integer, it must be greater than or equal to 1 "
"and lower than or equal to (n_timestamps - 1) if 'drop_sum=True'."),
({'window_size': n_timestamps, 'drop_sum': True}, ValueError,
"If 'window_size' is an integer, it must be greater than or equal to 1 "
"and lower than or equal to (n_timestamps - 1) if 'drop_sum=True'."),
({'window_size': 0}, ValueError,
"If 'window_size' is an integer, it must be greater than or equal to 1 "
"and lower than or equal to n_timestamps if 'drop_sum=False'."),
({'window_size': n_timestamps + 1}, ValueError,
"If 'window_size' is an integer, it must be greater than or equal to 1 "
"and lower than or equal to n_timestamps if 'drop_sum=False'."),
({'window_size': 1.5}, ValueError,
"If 'window_size' is a float, it must be greater than 0 and lower than "
"or equal to 1."),
({'window_step': 0}, ValueError,
"If 'window_step' is an integer, it must be greater than or equal to 1 "
"and lower than or equal to n_timestamps."),
({'window_step': n_timestamps + 1}, ValueError,
"If 'window_step' is an integer, it must be greater than or equal to 1 "
"and lower than or equal to n_timestamps."),
({'window_step': 0.}, ValueError,
"If 'window_step' is a float, it must be greater than 0 and lower than "
"or equal to 1."),
({'window_step': 1.2}, ValueError,
"If 'window_step' is a float, it must be greater than 0 and lower than "
"or equal to 1."),
({'window_size': 4, 'drop_sum': True}, ValueError,
"'word_size' must be lower than or equal to (window_size - 1) if "
"'drop_sum=True'."),
({'window_size': 3}, ValueError,
"'word_size' must be lower than or equal to window_size if "
"'drop_sum=False'.")]
)
def test_parameter_check(params, error, err_msg):
"""Test parameter validation."""
boss = BOSS(**params)
with pytest.raises(error, match=re.escape(err_msg)):
boss.fit(X, y)
@pytest.mark.parametrize(
'sparse, instance', [(True, csr_matrix), (False, np.ndarray)])
def test_sparse_dense(sparse, instance):
"""Test that the expected type is returned."""
weasel = BOSS(sparse=sparse)
assert isinstance(weasel.fit(X, y).transform(X), instance)
assert isinstance(weasel.fit_transform(X, y), instance)
def test_accurate_results_without_numerosity_reduction():
"""Test that the actual results are the expected ones."""
boss = BOSS(
word_size=4, n_bins=3, window_size=100, window_step=100,
anova=False, drop_sum=False, norm_mean=False, norm_std=False,
strategy='quantile', alphabet=None, numerosity_reduction=False
)
X_windowed = X.reshape(8, 2, 100).reshape(16, 100)
sfa = SymbolicFourierApproximation(
n_coefs=4, drop_sum=False, anova=False, norm_mean=False,
norm_std=False, n_bins=3, strategy='quantile', alphabet=None
)
y_repeated = np.repeat(y, 2)
X_sfa = sfa.fit_transform(X_windowed, y_repeated)
X_word = np.asarray([''.join(X_sfa[i]) for i in range(16)])
X_word = X_word.reshape(8, 2)
X_bow = np.asarray([' '.join(X_word[i]) for i in range(8)])
vectorizer = CountVectorizer()
arr_desired = vectorizer.fit_transform(X_bow).toarray()
vocabulary_desired = {value: key for key, value in
vectorizer.vocabulary_.items()}
arr_actual = boss.fit_transform(X, y).toarray()
np.testing.assert_allclose(arr_actual, arr_desired, atol=1e-5, rtol=0)
assert boss.vocabulary_ == vocabulary_desired
arr_actual = boss.fit(X, y).transform(X).toarray()
np.testing.assert_allclose(arr_actual, arr_desired, atol=1e-5, rtol=0)
assert boss.vocabulary_ == vocabulary_desired
def test_accurate_results_floats():
"""Test that the actual results are the expected ones."""
boss = BOSS(
word_size=4, n_bins=3, window_size=0.5, window_step=0.5,
anova=False, drop_sum=False, norm_mean=False, norm_std=False,
strategy='quantile', alphabet=None, numerosity_reduction=True
)
X_windowed = X.reshape(8, 2, 100).reshape(16, 100)
sfa = SymbolicFourierApproximation(
n_coefs=4, drop_sum=False, anova=False, norm_mean=False,
norm_std=False, n_bins=3, strategy='quantile', alphabet=None
)
y_repeated = np.repeat(y, 2)
X_sfa = sfa.fit_transform(X_windowed, y_repeated)
X_word = np.asarray([''.join(X_sfa[i]) for i in range(16)])
X_word = X_word.reshape(8, 2)
not_equal = np.c_[X_word[:, 1:] != X_word[:, :-1], np.full(8, True)]
X_bow = np.asarray([' '.join(X_word[i, not_equal[i]]) for i in range(8)])
vectorizer = CountVectorizer()
arr_desired = vectorizer.fit_transform(X_bow).toarray()
vocabulary_desired = {value: key for key, value in
vectorizer.vocabulary_.items()}
arr_actual_1 = boss.fit_transform(X, None).toarray()
np.testing.assert_allclose(arr_actual_1, arr_desired, atol=1e-5, rtol=0)
assert boss.vocabulary_ == vocabulary_desired
arr_actual_2 = boss.fit(X, None).transform(X).toarray()
np.testing.assert_allclose(arr_actual_2, arr_desired, atol=1e-5, rtol=0)
assert boss.vocabulary_ == vocabulary_desired
``` |
{
"source": "jmrichardson/vmg",
"score": 2
} |
#### File: src/parking_spot/spots-parralel.py
```python
import os
from keras.models import load_model
import cv2
import numpy as np
import pickle
import configparser
from datetime import datetime, time
import multiprocessing
from functools import partial
# from keras import backend as K; K.set_session(K.tf.Session(config=K.tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)))
def init():
global class_dictionary
class_dictionary = {}
class_dictionary[0] = 'empty'
class_dictionary[1] = 'occupied'
global model
model = load_model('spots.h5')
def classify_image(image, spot):
# from keras.models import load_model
process = multiprocessing.current_process()
# Get coordinates
(x1, y1, x2, y2) = spot
# crop the image to just parking spot
spot_img = image[y1:y2, x1:x2]
spot_img = cv2.resize(spot_img, (210, 380))
spot_img = spot_img / 255
spot_img = np.expand_dims(spot_img, axis=0)
# Classify parking spot as empty or occupied
start = datetime.now()
class_predicted = model.predict(spot_img)
now = datetime.now()
print(str(process.pid) + ":" + str(now - start))
inID = np.argmax(class_predicted[0])
label = class_dictionary[inID]
return label
if __name__ == '__main__':
multiprocessing.freeze_support()
first_iter = True
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
# Get config
config = configparser.ConfigParser()
config.read("config.ini")
rtsp = config.get("vars", "rtsp")
with open(r"spots.pickle", "rb") as file:
spots = pickle.load(file)
color = [0, 255, 0, 255]
alpha = 0.5
# height = 720
# width = 1280
# num_cores = multiprocessing.cpu_count()
# num_cores = 4
# print(num_cores)
while True:
cnt_empty = 0
all_spots = 0
# cap = cv2.VideoCapture(rtsp)
# ret, image = cap.read()
image = cv2.imread('parking_lot.jpg')
# if not ret:
# print ("Error getting image...")
# continue
# del(cap)
print("Captured parking lot image ..")
# new_image = np.copy(image)
overlay = np.copy(image)
# overlay = np.zeros((height, width, 4), dtype=np.uint8)
# results = Parallel(n_jobs=num_cores)(delayed(classify_image)(spot, image) for spot in spots.values())
if first_iter:
pool = multiprocessing.Pool(processes=4, initializer=init)
first_iter = False
date1 = datetime.now()
# classify = partial(classify_image, model, image)
classify = partial(classify_image, image)
results = pool.map(classify, spots.values())
print(results)
date2 = datetime.now()
print(str(date2 - date1))
# for spot in spots.values():
continue
print(label)
if label == 'empty':
cv2.rectangle(overlay, (int(x1), int(y1)), (int(x2), int(y2)), color, -1)
cnt_empty += 1
cnt_occupied = all_spots - cnt_empty
cv2.addWeighted(overlay, alpha, image, 1 - alpha, 0, image)
cv2.imwrite('parking_lot_new.jpg', image)
template = """
<div id="info" class="card-deck">
<div class="card border-dark mb-3" style="max-width: 18rem;">
<div class="card-header">Total Parking Spots</div>
<div class="card-body text-dark">
<strong><font size="70">{all_spots}</font></strong>
</div>
</div>
<div class="card border-danger mb-3" style="max-width: 18rem;">
<div class="card-header">Total Spots Occupied</div>
<div class="card-body text-danger">
<strong><font size="70">{cnt_occupied}</font></strong>
</div>
</div>
<div class="card border-success mb-3" style="max-width: 18rem;">
<div class="card-header">Total Spots Available</div>
<div class="card-body text-success">
<strong><font size="70">{cnt_empty}</font></strong>
</div>
</div>
</div>
""".format(
all_spots =all_spots,
cnt_occupied=cnt_occupied,
cnt_empty=cnt_empty
)
print(template, file=open("../templates/spots.html", 'w'))
date2 = datetime.now()
print(str(date2 - date1))
``` |
{
"source": "jmriddell/mousesens",
"score": 3
} |
#### File: mousesens/mousesens/cli.py
```python
import click
import mousesens.xinput_interface as xi
@click.group()
def cli():
"""Command line utility to change the sensitivity of a pointer device."""
pass
@click.command(name="set")
@click.argument(
"device", type=click.Choice(xi.get_pointer_devices())
)
@click.argument("sensitivity", type=click.FLOAT)
def set_sensitivity(device, sensitivity):
"""Set the sensitivity of a pointer device."""
xi.set_sensitivity(device, float(sensitivity))
@click.command(name="list")
def list_devices():
"""List available pointer devices."""
devices = xi.get_pointer_devices()
for device in devices:
click.echo(message=device)
cli.add_command(list_devices)
cli.add_command(set_sensitivity)
``` |
{
"source": "jmriddell/ort-simpleroute",
"score": 3
} |
#### File: _examples/redone/vrp_pickup_delivery.py
```python
from ort_simpleroute.test_examples_same_output._examples.original import (
vrp_pickup_delivery as original,
)
import ort_simpleroute as hlp
def main():
"""Entry point of the program."""
# Instantiate the data problem.
data = original.create_data_model()
# Create the router.
router = hlp.RouteOptimizer(
len(data["distance_matrix"]), data["num_vehicles"], data["depot"]
)
# Define cost of each arc.
def distance_callback(from_node, to_node):
"""Returns the manhattan distance between the two nodes."""
return data["distance_matrix"][from_node][to_node]
router.set_global_arc_cost(distance_callback)
# Add Distance constraint.
# dimension_name = 'Distance'
distance_dimension = router.add_dimension(
distance_callback,
3000, # vehicle maximum travel distance
"Distance", # Dimension Name
)
distance_dimension.SetGlobalSpanCostCoefficient(100)
# Define Transportation Requests.
for request in data["pickups_deliveries"]:
router.add_delivery_request(request[0], request[1])
# Solve the problem using PARALLEL_CHEAPEST_INSERTION first solution heuristic.
solution = router.solve_using_fss(hlp.fss.PARALLEL_CHEAPEST_INSERTION)
if solution:
original.print_solution(data, router.manager, router.model, solution)
if __name__ == "__main__":
print("\n\nOriginal\n")
original.main()
print("\n\nNew\n")
main()
``` |
{
"source": "jmriego/pipelinewise-target-bigquery",
"score": 2
} |
#### File: pipelinewise-target-bigquery/target_bigquery/__init__.py
```python
import argparse
import copy
import io
import json
import logging
import os
import sys
from tempfile import NamedTemporaryFile, mkstemp
from fastavro import writer, parse_schema
from joblib import Parallel, delayed, parallel_backend
from jsonschema import Draft7Validator, FormatChecker
from singer import get_logger
from target_bigquery import stream_utils
from target_bigquery.db_sync import DbSync
from target_bigquery.exceptions import (
RecordValidationException,
InvalidValidationOperationException
)
LOGGER = get_logger('target_bigquery')
logging.getLogger('bigquery.connector').setLevel(logging.WARNING)
DEFAULT_BATCH_SIZE_ROWS = 100000
DEFAULT_PARALLELISM = 0 # 0 The number of threads used to flush tables
DEFAULT_MAX_PARALLELISM = 16 # Don't use more than this number of threads by default when flushing streams in parallel
def add_metadata_columns_to_schema(schema_message):
"""Metadata _sdc columns according to the stitch documentation at
https://www.stitchdata.com/docs/data-structure/integration-schemas#sdc-columns
Metadata columns gives information about data injections
"""
extended_schema_message = schema_message
extended_schema_message['schema']['properties']['_sdc_extracted_at'] = {'type': ['null', 'string'],
'format': 'date-time'}
extended_schema_message['schema']['properties']['_sdc_batched_at'] = {'type': ['null', 'string'],
'format': 'date-time'}
extended_schema_message['schema']['properties']['_sdc_deleted_at'] = {'type': ['null', 'string'],
'format': 'date-time'}
return extended_schema_message
def emit_state(state):
if state is not None:
line = json.dumps(state)
LOGGER.info('Emitting state {}'.format(line))
sys.stdout.write("{}\n".format(line))
sys.stdout.flush()
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
def persist_lines(config, lines) -> None:
state = None
flushed_state = None
schemas = {}
key_properties = {}
validators = {}
records_to_load = {}
csv_files_to_load = {}
row_count = {}
stream_to_sync = {}
total_row_count = {}
batch_size_rows = config.get('batch_size_rows', DEFAULT_BATCH_SIZE_ROWS)
# Loop over lines from stdin
for line in lines:
try:
o = json.loads(line)
except json.decoder.JSONDecodeError:
LOGGER.error("Unable to parse:\n{}".format(line))
raise
if 'type' not in o:
raise Exception("Line is missing required key 'type': {}".format(line))
t = o['type']
if t == 'RECORD':
if 'stream' not in o:
raise Exception("Line is missing required key 'stream': {}".format(line))
if o['stream'] not in schemas:
raise Exception(
"A record for stream {} was encountered before a corresponding schema".format(o['stream']))
# Get schema for this record's stream
stream = o['stream']
stream_utils.adjust_timestamps_in_record(o['record'], schemas[stream])
# Validate record
if config.get('validate_records'):
try:
validators[stream].validate(stream_utils.float_to_decimal(o['record']))
except Exception as ex:
if type(ex).__name__ == "InvalidOperation":
raise InvalidValidationOperationException(
f"Data validation failed and cannot load to destination. RECORD: {o['record']}\n"
"multipleOf validations that allows long precisions are not supported (i.e. with 15 digits"
"or more) Try removing 'multipleOf' methods from JSON schema.")
raise RecordValidationException(f"Record does not pass schema validation. RECORD: {o['record']}")
primary_key_string = stream_to_sync[stream].record_primary_key_string(o['record'])
if not primary_key_string:
primary_key_string = 'RID-{}'.format(total_row_count[stream])
if stream not in records_to_load:
records_to_load[stream] = {}
# increment row count only when a new PK is encountered in the current batch
if primary_key_string not in records_to_load[stream]:
row_count[stream] += 1
total_row_count[stream] += 1
# append record
if config.get('add_metadata_columns') or config.get('hard_delete'):
records_to_load[stream][primary_key_string] = stream_utils.add_metadata_values_to_record(o)
else:
records_to_load[stream][primary_key_string] = o['record']
if row_count[stream] >= batch_size_rows:
# flush all streams, delete records if needed, reset counts and then emit current state
if config.get('flush_all_streams'):
filter_streams = None
else:
filter_streams = [stream]
# Flush and return a new state dict with new positions only for the flushed streams
flushed_state = flush_streams(
records_to_load,
row_count,
stream_to_sync,
config,
state,
flushed_state,
filter_streams=filter_streams)
# emit last encountered state
emit_state(copy.deepcopy(flushed_state))
elif t == 'SCHEMA':
if 'stream' not in o:
raise Exception("Line is missing required key 'stream': {}".format(line))
stream = o['stream']
schemas[stream] = stream_utils.float_to_decimal(o['schema'])
validators[stream] = Draft7Validator(schemas[stream], format_checker=FormatChecker())
# flush records from previous stream SCHEMA
# if same stream has been encountered again, it means the schema might have been altered
# so previous records need to be flushed
if row_count.get(stream, 0) > 0:
flushed_state = flush_streams(records_to_load, row_count, stream_to_sync, config, state, flushed_state)
# emit latest encountered state
emit_state(flushed_state)
# key_properties key must be available in the SCHEMA message.
if 'key_properties' not in o:
raise Exception("key_properties field is required")
# Log based and Incremental replications on tables with no Primary Key
# cause duplicates when merging UPDATE events.
# Stop loading data by default if no Primary Key.
#
# If you want to load tables with no Primary Key:
# 1) Set ` 'primary_key_required': false ` in the target-bigquery config.json
# or
# 2) Use fastsync [postgres-to-bigquery, mysql-to-bigquery, etc.]
if config.get('primary_key_required', True) and len(o['key_properties']) == 0:
LOGGER.critical("Primary key is set to mandatory but not defined in the [{}] stream".format(stream))
raise Exception("key_properties field is required")
key_properties[stream] = o['key_properties']
if config.get('add_metadata_columns') or config.get('hard_delete'):
stream_to_sync[stream] = DbSync(config, add_metadata_columns_to_schema(o))
else:
stream_to_sync[stream] = DbSync(config, o)
try:
stream_to_sync[stream].create_schema_if_not_exists()
stream_to_sync[stream].sync_table()
except Exception as e:
LOGGER.error("""
Cannot sync table structure in BigQuery schema: {} .
""".format(
stream_to_sync[stream].schema_name))
raise e
row_count[stream] = 0
total_row_count[stream] = 0
with NamedTemporaryFile(mode='w+b') as fh:
csv_files_to_load[stream] = fh
elif t == 'ACTIVATE_VERSION':
LOGGER.debug('ACTIVATE_VERSION message')
elif t == 'STATE':
LOGGER.debug('Setting state to {}'.format(o['value']))
state = o['value']
# Initially set flushed state
if not flushed_state:
flushed_state = copy.deepcopy(state)
else:
raise Exception("Unknown message type {} in message {}"
.format(o['type'], o))
# if some bucket has records that need to be flushed but haven't reached batch size
# then flush all buckets.
if sum(row_count.values()) > 0:
# flush all streams one last time, delete records if needed, reset counts and then emit current state
flushed_state = flush_streams(records_to_load, row_count, stream_to_sync, config, state, flushed_state)
# emit latest state
emit_state(copy.deepcopy(flushed_state))
# pylint: disable=too-many-arguments
def flush_streams(
streams,
row_count,
stream_to_sync,
config,
state,
flushed_state,
filter_streams=None):
"""
Flushes all buckets and resets records count to 0 as well as empties records to load list
:param streams: dictionary with records to load per stream
:param row_count: dictionary with row count per stream
:param stream_to_sync: BigQuery db sync instance per stream
:param config: dictionary containing the configuration
:param state: dictionary containing the original state from tap
:param flushed_state: dictionary containing updated states only when streams got flushed
:param filter_streams: Keys of streams to flush from the streams dict. Default is every stream
:return: State dict with flushed positions
"""
parallelism = config.get("parallelism", DEFAULT_PARALLELISM)
max_parallelism = config.get("max_parallelism", DEFAULT_MAX_PARALLELISM)
# Parallelism 0 means auto parallelism:
#
# Auto parallelism trying to flush streams efficiently with auto defined number
# of threads where the number of threads is the number of streams that need to
# be loaded but it's not greater than the value of max_parallelism
if parallelism == 0:
n_streams_to_flush = len(streams.keys())
if n_streams_to_flush > max_parallelism:
parallelism = max_parallelism
else:
parallelism = n_streams_to_flush
# Select the required streams to flush
if filter_streams:
streams_to_flush = filter_streams
else:
streams_to_flush = streams.keys()
# Single-host, thread-based parallelism
with parallel_backend('threading', n_jobs=parallelism):
Parallel()(delayed(load_stream_batch)(
stream=stream,
records_to_load=streams[stream],
row_count=row_count,
db_sync=stream_to_sync[stream],
delete_rows=config.get('hard_delete')
) for stream in streams_to_flush)
# reset flushed stream records to empty to avoid flushing same records
for stream in streams_to_flush:
streams[stream] = {}
# Update flushed streams
if filter_streams:
# update flushed_state position if we have state information for the stream
if state is not None and stream in state.get('bookmarks', {}):
# Create bookmark key if not exists
if 'bookmarks' not in flushed_state:
flushed_state['bookmarks'] = {}
# Copy the stream bookmark from the latest state
flushed_state['bookmarks'][stream] = copy.deepcopy(state['bookmarks'][stream])
# If we flush every bucket use the latest state
else:
flushed_state = copy.deepcopy(state)
# Return with state message with flushed positions
return flushed_state
def load_stream_batch(stream, records_to_load, row_count, db_sync, delete_rows=False):
# Load into bigquery
if row_count[stream] > 0:
flush_records(stream, records_to_load, row_count[stream], db_sync)
# Delete soft-deleted, flagged rows - where _sdc_deleted at is not null
if delete_rows:
db_sync.delete_rows(stream)
# reset row count for the current stream
row_count[stream] = 0
def flush_records(stream, records_to_load, row_count, db_sync):
parsed_schema = parse_schema(db_sync.avro_schema())
csv_fd, csv_file = mkstemp()
with open(csv_file, 'wb') as out:
writer(out, parsed_schema, db_sync.records_to_avro(records_to_load.values()))
# Seek to the beginning of the file and load
with open(csv_file, 'r+b') as f:
db_sync.load_avro(f, row_count)
# Delete temp file
os.remove(csv_file)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', help='Config file')
args = parser.parse_args()
if args.config:
with open(args.config) as config_input:
config = json.load(config_input)
else:
config = {}
# Consume singer messages
singer_messages = io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8')
persist_lines(config, singer_messages)
LOGGER.debug("Exiting normally")
if __name__ == '__main__':
main()
``` |
{
"source": "jmriego/pipelinewise",
"score": 2
} |
#### File: fastsync/commons/test_fastsync_target_bigquery.py
```python
import pytest
from unittest.mock import Mock, patch, ANY, mock_open
from google.cloud import bigquery
from pipelinewise.fastsync.commons.target_bigquery import FastSyncTargetBigquery
@pytest.fixture
def query_result():
"""
Mocked Bigquery Run Query Results
"""
qr = Mock()
qr.return_value = []
return qr
@pytest.fixture
def bigquery_job(query_result):
"""
Mocked Bigquery Job Query
"""
qj = Mock()
qj.output_rows = 0
qj.job_id = 1
qj.result().total_rows = 0
return qj
@pytest.fixture
def bigquery_job_config():
"""
Mocked Bigquery Job Config
"""
qc = Mock()
return qc
class FastSyncTargetBigqueryMock(FastSyncTargetBigquery):
"""
Mocked FastSyncTargetBigquery class
"""
def __init__(self, connection_config, transformation_config=None):
super().__init__(connection_config, transformation_config)
# pylint: disable=attribute-defined-outside-init
class TestFastSyncTargetBigquery:
"""
Unit tests for fastsync target bigquery
"""
def setup_method(self):
"""Initialise test FastSyncTargetPostgres object"""
self.bigquery = FastSyncTargetBigqueryMock(connection_config={'project_id': 'dummy-project'},
transformation_config={})
@patch("pipelinewise.fastsync.commons.target_bigquery.bigquery.Client")
def test_create_schema(self, Client, bigquery_job):
"""Validate if create schema queries generated correctly"""
Client().query.return_value = bigquery_job
self.bigquery.create_schema('new_schema')
Client().create_dataset.assert_called_with('new_schema', exists_ok=True)
@patch("pipelinewise.fastsync.commons.target_bigquery.bigquery.Client")
def test_drop_table(self, Client, bigquery_job):
"""Validate if drop table queries generated correctly"""
Client().query.return_value = bigquery_job
self.bigquery.drop_table('test_schema', 'test_table')
Client().query.assert_called_with(
'DROP TABLE IF EXISTS test_schema.`test_table`', job_config=ANY)
self.bigquery.drop_table('test_schema', 'test_table', is_temporary=True)
Client().query.assert_called_with(
'DROP TABLE IF EXISTS test_schema.`test_table_temp`', job_config=ANY)
self.bigquery.drop_table('test_schema', 'UPPERCASE_TABLE')
Client().query.assert_called_with(
'DROP TABLE IF EXISTS test_schema.`uppercase_table`', job_config=ANY)
self.bigquery.drop_table('test_schema', 'UPPERCASE_TABLE', is_temporary=True)
Client().query.assert_called_with(
'DROP TABLE IF EXISTS test_schema.`uppercase_table_temp`', job_config=ANY)
self.bigquery.drop_table('test_schema', 'test_table_with_space')
Client().query.assert_called_with(
'DROP TABLE IF EXISTS test_schema.`test_table_with_space`', job_config=ANY)
self.bigquery.drop_table('test_schema', 'test table with space', is_temporary=True)
Client().query.assert_called_with(
'DROP TABLE IF EXISTS test_schema.`test_table_with_space_temp`', job_config=ANY)
@patch("pipelinewise.fastsync.commons.target_bigquery.bigquery.Client")
def test_create_table(self, Client, bigquery_job):
"""Validate if create table queries generated correctly"""
Client().query.return_value = bigquery_job
# Create table with standard table and column names
self.bigquery.create_table(target_schema='test_schema',
table_name='test_table',
columns=['`id` INTEGER',
'`txt` STRING'])
Client().query.assert_called_with(
'CREATE OR REPLACE TABLE test_schema.`test_table` ('
'`id` integer,`txt` string,'
'_sdc_extracted_at timestamp,'
'_sdc_batched_at timestamp,'
'_sdc_deleted_at timestamp)',
job_config=ANY)
# Create table with reserved words in table and column names
self.bigquery.create_table(target_schema='test_schema',
table_name='order',
columns=['`id` INTEGER',
'`txt` STRING',
'`select` STRING'])
Client().query.assert_called_with(
'CREATE OR REPLACE TABLE test_schema.`order` ('
'`id` integer,`txt` string,`select` string,'
'_sdc_extracted_at timestamp,'
'_sdc_batched_at timestamp,'
'_sdc_deleted_at timestamp)',
job_config=ANY)
# Create table with mixed lower and uppercase and space characters
self.bigquery.create_table(target_schema='test_schema',
table_name='TABLE with SPACE',
columns=['`ID` INTEGER',
'`COLUMN WITH SPACE` STRING'])
Client().query.assert_called_with(
'CREATE OR REPLACE TABLE test_schema.`table_with_space` ('
'`id` integer,`column with space` string,'
'_sdc_extracted_at timestamp,'
'_sdc_batched_at timestamp,'
'_sdc_deleted_at timestamp)',
job_config=ANY)
# Create table with no primary key
self.bigquery.create_table(target_schema='test_schema',
table_name='test_table_no_pk',
columns=['`ID` INTEGER',
'`TXT` STRING'])
Client().query.assert_called_with(
'CREATE OR REPLACE TABLE test_schema.`test_table_no_pk` ('
'`id` integer,`txt` string,'
'_sdc_extracted_at timestamp,'
'_sdc_batched_at timestamp,'
'_sdc_deleted_at timestamp)',
job_config=ANY)
@patch("pipelinewise.fastsync.commons.target_bigquery.bigquery.LoadJobConfig")
@patch("pipelinewise.fastsync.commons.target_bigquery.bigquery.Client")
def test_copy_to_table(self, Client, LoadJobConfig, bigquery_job_config, bigquery_job):
"""Validate if COPY command generated correctly"""
# COPY table with standard table and column names
Client().load_table_from_file.return_value = bigquery_job
LoadJobConfig.return_value = bigquery_job_config
m = mock_open()
with patch('pipelinewise.fastsync.commons.target_bigquery.open', m):
self.bigquery.copy_to_table(filepath='/path/to/dummy-file.csv.gz',
target_schema='test_schema',
table_name='test_table',
size_bytes=1000,
is_temporary=False,
skip_csv_header=False)
m.assert_called_with('/path/to/dummy-file.csv.gz', 'rb')
assert bigquery_job_config.source_format == bigquery.SourceFormat.CSV
assert bigquery_job_config.write_disposition == 'WRITE_TRUNCATE'
assert bigquery_job_config.allow_quoted_newlines == True
assert bigquery_job_config.skip_leading_rows == 0
Client().dataset.assert_called_with('test_schema')
Client().dataset().table.assert_called_with('test_table')
assert Client().load_table_from_file.call_count == 1
# COPY table with reserved word in table and column names in temp table
with patch('pipelinewise.fastsync.commons.target_bigquery.open', m):
self.bigquery.copy_to_table(filepath='/path/to/full-file.csv.gz',
target_schema='test_schema',
table_name='full',
size_bytes=1000,
is_temporary=True,
skip_csv_header=False)
m.assert_called_with('/path/to/full-file.csv.gz', 'rb')
assert bigquery_job_config.source_format == bigquery.SourceFormat.CSV
assert bigquery_job_config.write_disposition == 'WRITE_TRUNCATE'
assert bigquery_job_config.allow_quoted_newlines == True
assert bigquery_job_config.skip_leading_rows == 0
Client().dataset.assert_called_with('test_schema')
Client().dataset().table.assert_called_with('full_temp')
assert Client().load_table_from_file.call_count == 2
# COPY table with space and uppercase in table name and s3 key
with patch('pipelinewise.fastsync.commons.target_bigquery.open', m):
self.bigquery.copy_to_table(filepath='/path/to/file with space.csv.gz',
target_schema='test_schema',
table_name='table with SPACE and UPPERCASE',
size_bytes=1000,
is_temporary=True,
skip_csv_header=False)
m.assert_called_with('/path/to/file with space.csv.gz', 'rb')
assert bigquery_job_config.source_format == bigquery.SourceFormat.CSV
assert bigquery_job_config.write_disposition == 'WRITE_TRUNCATE'
assert bigquery_job_config.allow_quoted_newlines == True
assert bigquery_job_config.skip_leading_rows == 0
Client().dataset.assert_called_with('test_schema')
Client().dataset().table.assert_called_with('table_with_space_and_uppercase_temp')
assert Client().load_table_from_file.call_count == 3
@patch("pipelinewise.fastsync.commons.target_bigquery.bigquery.Client")
def test_grant_select_on_table(self, Client, bigquery_job):
"""Validate if GRANT command generated correctly"""
# GRANT table with standard table and column names
Client().query.return_value = bigquery_job
self.bigquery.grant_select_on_table(target_schema='test_schema',
table_name='test_table',
role='test_role',
is_temporary=False)
Client().query.assert_called_with(
'GRANT SELECT ON test_schema.`test_table` TO ROLE test_role', job_config=ANY)
# GRANT table with reserved word in table and column names in temp table
self.bigquery.grant_select_on_table(target_schema='test_schema',
table_name='full',
role='test_role',
is_temporary=False)
Client().query.assert_called_with(
'GRANT SELECT ON test_schema.`full` TO ROLE test_role', job_config=ANY)
# GRANT table with with space and uppercase in table name and s3 key
self.bigquery.grant_select_on_table(target_schema='test_schema',
table_name='table with SPACE and UPPERCASE',
role='test_role',
is_temporary=False)
Client().query.assert_called_with(
'GRANT SELECT ON test_schema.`table_with_space_and_uppercase` TO ROLE test_role', job_config=ANY)
@patch("pipelinewise.fastsync.commons.target_bigquery.bigquery.Client")
def test_grant_usage_on_schema(self, Client, bigquery_job):
"""Validate if GRANT command generated correctly"""
self.bigquery.grant_usage_on_schema(target_schema='test_schema',
role='test_role')
Client().query.assert_called_with(
'GRANT USAGE ON SCHEMA test_schema TO ROLE test_role', job_config=ANY)
@patch("pipelinewise.fastsync.commons.target_bigquery.bigquery.Client")
def test_grant_select_on_schema(self, Client, bigquery_job):
"""Validate if GRANT command generated correctly"""
self.bigquery.grant_select_on_schema(target_schema='test_schema',
role='test_role')
Client().query.assert_called_with(
'GRANT SELECT ON ALL TABLES IN SCHEMA test_schema TO ROLE test_role', job_config=ANY)
@patch("pipelinewise.fastsync.commons.target_bigquery.bigquery.CopyJobConfig")
@patch("pipelinewise.fastsync.commons.target_bigquery.bigquery.Client")
def test_swap_tables(self, Client, CopyJobConfig, bigquery_job_config, bigquery_job):
"""Validate if swap table commands generated correctly"""
# Swap tables with standard table and column names
Client().copy_table.return_value = bigquery_job
CopyJobConfig.return_value = bigquery_job_config
self.bigquery.swap_tables(schema='test_schema',
table_name='test_table')
assert bigquery_job_config.write_disposition == 'WRITE_TRUNCATE'
Client().copy_table.assert_called_with(
'dummy-project.test_schema.test_table_temp',
'dummy-project.test_schema.test_table',
job_config=ANY)
Client().delete_table.assert_called_with('dummy-project.test_schema.test_table_temp')
# Swap tables with reserved word in table and column names in temp table
self.bigquery.swap_tables(schema='test_schema',
table_name='full')
assert bigquery_job_config.write_disposition == 'WRITE_TRUNCATE'
Client().copy_table.assert_called_with(
'dummy-project.test_schema.full_temp',
'dummy-project.test_schema.full',
job_config=ANY)
Client().delete_table.assert_called_with('dummy-project.test_schema.full_temp')
# Swap tables with with space and uppercase in table name and s3 key
self.bigquery.swap_tables(schema='test_schema',
table_name='table with SPACE and UPPERCASE')
assert bigquery_job_config.write_disposition == 'WRITE_TRUNCATE'
Client().copy_table.assert_called_with(
'dummy-project.test_schema.table_with_space_and_uppercase_temp',
'dummy-project.test_schema.table_with_space_and_uppercase',
job_config=ANY)
Client().delete_table.assert_called_with('dummy-project.test_schema.table_with_space_and_uppercase_temp')
``` |
{
"source": "jmriego/queick",
"score": 3
} |
#### File: queick/queick/constants.py
```python
def enum(name, *sequential, **named):
values = dict(zip(sequential, range(len(sequential))), **named)
return type(str(name), (), values)
RETRY_TYPE = enum(
'RetryType',
CONSTANT='constant',
LINEAR_INCREASING='linear_increasing',
COUNT_INCREASING='count_increasing',
EXPONENTIAL='exponential',
)
NW_STATE = enum(
'State',
CONNECTED='connected',
DISCONNECTED='disconnected',
INITIATED='initiated',
)
TCP_SERVER_HOST = '127.0.0.1'
TCP_SERVER_PORT = 9999
```
#### File: queick/queick/scheduler.py
```python
import sched
import time
from logging import getLogger
logger = getLogger(__name__)
class Scheduler:
def __init__(self):
self.queue = sched.scheduler(time.time, time.sleep)
def put(self, job):
logger.debug('[Scheduler] Job is queued: %s', job)
self.queue.enterabs(job.start_at, job.priority,
job.func, argument=(job.args,))
def run(self):
self.queue.run()
```
#### File: queick/queick/scheduling_time.py
```python
import time
from datetime import date, timedelta
from .exceptions import IntervalMustAboveZeroError
class SchedulingTime:
def __init__(self):
self.interval = 600 # 10 minutes
self.start_at = None
def every(self, seconds=0, minutes=0, hours=0, days=0):
self.interval = seconds + minutes * 60 + hours * 3600 + days * 24 * 3600
return self
def starting_from(self, start_at):
self.start_at = start_at
return self
def from_now(self):
self.start_at = time.time()
return self
def from_midnight(self):
tomorrow = date.today + timedelta(days=1)
self.start_at = int(tomorrow.strftime('%s'))
return self
def validate(self):
if self.interval <= 0:
raise IntervalMustAboveZeroError(
'The value of interval must be over 0.')
elif self.start_at is None:
raise MustSetStartAtError(
'start_at have to be specified if starting_from() is used.')
```
#### File: queick/tests/test_queue_manager.py
```python
import unittest
import time
from queick.queue_manager import QueueManager
from queick.job import Job
class TestQueueManager(unittest.TestCase):
def setUp(self):
super().setUp()
def tearDown(self):
super().tearDown()
def test_enqueue(self):
qm = QueueManager()
qm.enqueue({ 'test': 'test' })
time.sleep(0.01) # Wait until enqueue finishes
self.assertEqual(qm.queue.qsize(), 1)
qm.dequeue()
def test_dequeue(self):
qm = QueueManager()
qm.enqueue({ 'test': 'test' })
time.sleep(0.01) # Wait until enqueue finishes
val = qm.dequeue()
self.assertEqual(val['test'], 'test')
def test_is_empty(self):
qm = QueueManager()
self.assertEqual(qm.is_empty(), True)
qm.enqueue({ 'test': 'test' })
time.sleep(0.01) # Wait until enqueue finishes
self.assertEqual(qm.is_empty(), False)
def test_create_job(self):
qm = QueueManager()
job = qm.create_job('tests.testfunc.func', ('test',), None, None, None)
self.assertEqual(type(job), Job)
``` |
{
"source": "jm-rivera/pydeflate",
"score": 3
} |
#### File: pydeflate/pydeflate/config.py
```python
import os
class Paths:
def __init__(self, project_dir):
self.project_dir = project_dir
@property
def data(self):
return os.path.join(self.project_dir, "pydeflate", "data")
paths = Paths(os.path.dirname(os.path.dirname(__file__)))
```
#### File: pydeflate/get_data/oecd_data.py
```python
import datetime
import pandas as pd
import requests
from pydeflate.config import paths
from pydeflate.utils import base_year, oecd_codes, value_index, update_update_date
import warnings
warnings.simplefilter("ignore", Warning, lineno=1013)
def _get_zip(url: str) -> requests.models.Response:
"""Download Zip File"""
try:
response = requests.get(url)
response.raise_for_status()
return response
except Exception:
try:
response = requests.get(url, verify=False)
return response
except:
raise ConnectionError("Could not download ZIP")
def _oecd_bulk_download(url: str, file_name: str) -> pd.DataFrame:
"""Download zip file from bulk download website"""
import io
import zipfile as z
import requests
from bs4 import BeautifulSoup as bs
try:
response = requests.get(url)
except Exception:
response = requests.get(url, verify=False)
soup = bs(response.text, "html.parser")
link = list(soup.find_all("a"))[0].attrs["onclick"][15:-3].replace("_", "-")
link = "https://stats.oecd.org/FileView2.aspx?IDFile=" + link
file = z.ZipFile(io.BytesIO(_get_zip(link).content))
try:
df = pd.read_csv(
file.open(file_name),
sep=",",
encoding="ISO-8859-1",
low_memory=False,
)
except:
df = pd.read_csv(
file.open(file_name),
sep="|",
encoding="ISO-8859-1",
low_memory=False,
)
return df
def _update_dac_deflators() -> None:
"""Update DAC deflators data to latest base year"""
year = datetime.datetime.now().year
t = True
while t:
url = (
f"https://www.oecd.org/dac/financing-sustainable-development/"
f"development-finance-data/Deflators-base-{year}.xls"
)
try:
try:
df = pd.read_excel(url, header=2)
except ImportError:
raise Exception("Could not download data")
df = df.dropna(how="all")
df.to_csv(paths.data + r"/dac_deflators.csv", index=False)
print(f"Updated OECD DAC deflators {year}")
update_update_date("oecd_dac_deflator")
t = False
except:
year = year - 1
def _update_dac_exchange() -> None:
"""Update DAC Exchange rates to latest available"""
try:
exchange = (
"https://www.oecd.org/dac/financing-sustainable-development/"
"development-finance-data/Exchange-rates.xls"
)
df = pd.read_excel(exchange, header=2)
df.to_csv(paths.data + r"/dac_exchange_rates.csv", index=False)
print("Updated OECD DAC exchange rates")
update_update_date("oecd_dac_exchange")
except:
print("Error downloading new exchange rates")
def _clean_dac1(df: pd.DataFrame) -> pd.DataFrame:
"""Clean DAC1 to keep only relevant information for deflators and exchange"""
cols = {
"DONOR": "donor_code",
"AMOUNTTYPE": "type",
"AIDTYPE": "aid",
"Year": "year",
"FLOWS": "flow",
"Value": "value",
}
return (
df.filter(cols.keys(), axis=1)
.rename(columns=cols)
.loc[lambda d: d.aid == 1010] # only Total ODA
.loc[lambda d: d.flow == 1140] # only net disbursements
.filter(["donor_code", "type", "year", "value"])
.pivot(index=["donor_code", "year"], columns=["type"], values="value")
.reset_index()
.assign(
exchange=lambda d: d.N / d.A,
deflator=lambda d: (100 * d.A / d.D).round(2),
iso_code=lambda d: d.donor_code.map(oecd_codes),
year=lambda d: pd.to_datetime(d.year, format="%Y"),
)
.dropna(subset=["iso_code"])
.filter(["iso_code", "year", "exchange", "deflator"], axis=1)
)
def _update_dac1() -> None:
"""Update dac1 data from OECD site and save as feather"""
url = "https://stats.oecd.org/DownloadFiles.aspx?DatasetCode=TABLE1"
file_name = "Table1_Data.csv"
try:
print("Downloading DAC1 data, which may take a bit")
df = _oecd_bulk_download(url, file_name).pipe(_clean_dac1)
df.to_feather(paths.data + r"/dac1.feather")
print("Sucessfully downloaded DAC1 data")
update_update_date("oecd_dac_data")
except:
raise ConnectionError("Could not download data")
def _read_dac1() -> pd.DataFrame:
"""Read the dac1 file with exchange rates and deflators"""
return pd.read_feather(paths.data + r"/dac1.feather")
def get_usd_exchange() -> pd.DataFrame:
"""Get the USD exchange rates used by the OECD"""
df = _read_dac1()
return df[["iso_code", "year", "exchange"]].rename(columns={"exchange": "value"})
def get_exchange2usd_dict(currency_iso: str) -> dict:
"""Dictionary of currency_iso to USD"""
df = get_usd_exchange()
return (
df.loc[df.iso_code == currency_iso]
.dropna()
.set_index("year")["value"]
.to_dict()
)
def get_exchange_rate(currency_iso: str) -> dict:
"""Get an exchange rate for a given ISO"""
df = get_usd_exchange()
target_xe = get_exchange2usd_dict(currency_iso)
df.value = df.value / df.year.map(target_xe)
return df
def get_dac_deflator() -> pd.DataFrame:
"""Get the deflator used by the OECD"""
df = _read_dac1()
return df[["iso_code", "year", "deflator"]].rename(columns={"deflator": "value"})
def get_xe_deflator(currency_iso: str) -> pd.DataFrame:
"""get exchange rate deflator based on OECD base year and exchange rates"""
# get exchange rates
xe = get_exchange_rate(currency_iso)
# If currency is not valid
if int(xe.value.sum()) == 0:
raise ValueError(f"No currency exchange data for {currency_iso}")
# get deflators and base year
defl = get_dac_deflator()
base = base_year(defl, "year")
# get the exchange rate as an index based on the base year
xe.value = value_index(xe, base)
return xe
def get_gdp_deflator() -> pd.DataFrame:
"""Deduce prices deflator based on exchange rate deflators and DAC
deflators data"""
dac_deflator = get_dac_deflator()
xe_deflator = get_xe_deflator(currency_iso="USA")
df = dac_deflator.merge(
xe_deflator,
on=["iso_code", "year"],
how="left",
suffixes=("_def", "_xe"),
)
df["value"] = round(df.value_def * (df.value_xe / 100), 3)
return df[["iso_code", "year", "value"]]
```
#### File: pydeflate/tools/exchange.py
```python
import pandas as pd
from pydeflate.get_data import oecd_data, wb_data
from pydeflate.utils import check_year_as_number
__exchange_source = {
"wb": wb_data.get_currency_exchange,
"oecd_dac": oecd_data.get_exchange_rate,
}
def _check_key_errors(
rates_source: str, columns: list, value_column: str, date_column: str
) -> None:
"""Check whether provided parameters are valid"""
if rates_source not in __exchange_source.keys():
raise KeyError(
f"{rates_source} is not a valid exchange rates source. "
f"Please choose from {__exchange_source.keys()}"
)
if value_column not in columns:
raise KeyError(
f"{value_column} is not a valid column in the provided DataFrame"
)
if date_column not in columns:
raise KeyError(f"{date_column} is not a valid column in the provided DataFrame")
def exchange(
df: pd.DataFrame,
source_currency: str,
target_currency: str,
rates_source: str = "wb",
value_column: str = "value",
target_column: str = "value_xe",
date_column: str = "date",
) -> pd.DataFrame:
"""
Parameters
----------
df : pd.DataFrame
A Pandas DataFrame, in long format, containing at least a date column,
an column with iso-3 codes to identify the source currency, and a
value column where the values to be converted are stored.
source_currency : str
The ISO-3 code of the country which owns the currency in which the data
is expressed. "LCU" can be used to indicate that data is in Local
Currency Unit. "emu" can be used for the EURO.
target_currency : str
The ISO-3 code of the country which owns the currency to which the data
will be converted. "LCU" can be used to convert from a given currency
(like the USD), back to each country's Local Currency.
rates_source : str, optional
The source of the exchange rate data. Current options include "wb" for
the World Bank and "oecd_dac" for the exchange rates used for ODA
statistics. The default is "wb".
value_column : str, optional
The name of the column containing the values to be converted.
The default is "value".
target_column : str, optional
The name of the column where the converted values will be stored.
The default is "value_xe".
date_column : str, optional
The name of the column where the date/year is stored.
The default is "date".
Returns
-------
df : pd.DataFrame
Returns a dataframe containing the converted data stored in the
target column.
"""
# Check whether provided parameters are valid
_check_key_errors(rates_source, df.columns, value_column, date_column)
# If source currency matches target currency, do nothing
if source_currency == target_currency:
df[target_column] = df[value_column]
return df
# check whether date is provided as integer
df, year_as_number = check_year_as_number(df, date_column)
# check whether target currency is LCU
if target_currency == "LCU":
target_currency = source_currency
source_currency = "LCU"
target_changed = True
else:
target_changed = False
# get the selected rates function
xe = __exchange_source[rates_source](target_currency)
xe = xe.rename(columns={"year": date_column})
# Check source and target currencies
if (source_currency not in set(xe.iso_code)) and (source_currency != "LCU"):
raise KeyError(f"{source_currency} not a valid currency code")
if (target_currency not in set(xe.iso_code)) and (target_currency != "LCU"):
raise KeyError(f"{target_currency} not a valid target currency")
if source_currency == "LCU":
df = df.merge(
xe,
on=["iso_code", date_column],
suffixes=("", "_xe"),
)
else:
xe = xe.loc[xe.iso_code == source_currency]
df = df.merge(
xe.drop("iso_code", axis=1),
on=[date_column],
suffixes=("", "_xe"),
)
# revert change to target_currency if target_changed
if target_changed:
source_currency = target_currency
target_currency = "LCU"
if target_currency == "LCU":
df[target_column] = df[value_column] * df[f"{value_column}_xe"]
else:
df[target_column] = df[value_column] / df[f"{value_column}_xe"]
if year_as_number:
df[date_column] = df[date_column].dt.year
return df.drop(["value_xe"], axis=1)
```
#### File: tests/test_get_data/test_imf_data.py
```python
import pytest
from pydeflate.get_data import imf_data
from pydeflate import config
import sys
import io
import os
def test__update_weo():
"""Capture print statements which are only printed if download successful"""
old_stdout = sys.stdout
new_stdout = io.StringIO()
sys.stdout = new_stdout
imf_data._update_weo(2020, 1)
output = new_stdout.getvalue()
sys.stdout = old_stdout
print(output)
assert output[-21:] == "2020-Apr WEO dataset\n"
old_stdout = sys.stdout
new_stdout = io.StringIO()
sys.stdout = new_stdout
imf_data._update_weo(2020, 1)
output = new_stdout.getvalue()
sys.stdout = old_stdout
print(output)
assert output[0:7] == "Already"
# cleaning
file = config.paths.data + r"/weo2020_1.csv"
if os.path.exists(file):
os.remove(file)
def test_get_implied_ppp_rate():
result = imf_data.get_implied_ppp_rate()
result = result.loc[
(result.iso_code == "GTM") & (result.year.dt.year == 1991), "value"
]
assert round(result.sum(), 1) == 1.4
```
#### File: tests/test_get_data/test_oecd_data.py
```python
import pytest
from pydeflate.get_data import oecd_data
def test__update_dac1():
"""Capture print statements which are only printed if download successful"""
oecd_data._update_dac1()
assert True
def test__update_dac_deflators():
"""Capture print statements which are only printed if successful"""
oecd_data._update_dac_deflators()
assert True
def test__update_dac_exchange():
"""Capture print statements which are only printed if successful"""
oecd_data._update_dac_exchange()
assert True
def test__get_zip_error():
with pytest.raises(ConnectionError):
oecd_data._get_zip("fake_url")
def test_get_xe_deflator_error():
with pytest.raises(ValueError):
oecd_data.get_xe_deflator("fake_currency")
```
#### File: tests/test_get_data/test_wb_data.py
```python
import pytest
from pydeflate.get_data import wb_data
import sys
import io
import datetime
def test_update_indicators():
"""Capture print statements which are only printed if download successful"""
old_stdout = sys.stdout
new_stdout = io.StringIO()
sys.stdout = new_stdout
wb_data.update_indicators()
output = new_stdout.getvalue()
sys.stdout = old_stdout
print(output)
assert (
output
== """Successfully updated NY.GDP.DEFL.ZS for 1950-2025
Successfully updated NY.GDP.DEFL.ZS.AD for 1950-2025
Successfully updated FP.CPI.TOTL for 1950-2025
Successfully updated PA.NUS.FCRF for 1950-2025
Successfully updated PX.REX.REER for 1950-2025
"""
)
def test_get_euro2usd():
key = datetime.datetime(2000, 1, 1)
result = wb_data.get_euro2usd()[key]
assert round(result, 1) == 1.1
def test_get_can2usd():
key = datetime.datetime(2000, 1, 1)
result = wb_data.get_can2usd()[key]
assert round(result, 1) == 1.5
def test_get_gbp2usd():
key = datetime.datetime(2000, 1, 1)
result = wb_data.get_gbp2usd()[key]
assert round(result, 1) == 0.7
def test_get_real_effective_exchange_index():
result = wb_data.get_real_effective_exchange_index()
result = result.loc[
(result.iso_code == "FRA") & (result.year.dt.year == 2005), "value"
]
assert round(result.sum(), 0) == 104
```
#### File: tests/test_tools/test_exchange.py
```python
from pydeflate.tools import exchange
import pandas as pd
import pytest
empty_df = pd.DataFrame([{}])
test_df = pd.DataFrame(
{
"iso_code": ["FRA", "GBR", "USA"],
"date": [2010, 2015, 2018],
"value": [100, 100, 100],
}
)
errors = (
(
"df, source_currency, target_currency, rates_source, value_column,"
"target_column, date_column"
),
[
(empty_df, "USA", "FRA", "wb", "value", "value_xe", "date"),
(test_df, "random_source", "FRA", "wb", "value", "value_xe", "date"),
(test_df, "USA", "random_target", "wb", "value", "value_xe", "date"),
(test_df, "USA", "FRA", "random_source", "value", "value_xe", "date"),
(test_df, "USA", "FRA", "wb", "random_col", "value_xe", "date"),
(test_df, "USA", "FRA", "wb", "value", "value_xe", "random_col"),
],
)
def test_exchange_to_LCU():
result = exchange.exchange(
df=test_df.copy(),
source_currency="GTM",
target_currency="LCU",
rates_source="wb",
value_column="value",
target_column="xe",
date_column="date",
)
result = result.set_index("iso_code")["xe"].to_dict()
assert round(result["FRA"], 0) == 9
assert round(result["GBR"], 0) == 9
assert round(result["USA"], 0) == 13
def test_exchange_to_nonUS():
result = exchange.exchange(
df=test_df.copy(),
source_currency="LCU",
target_currency="GTM",
rates_source="wb",
value_column="value",
target_column="value",
date_column="date",
)
result = result.set_index("iso_code")["value"].to_dict()
assert round(result["FRA"], 0) == 1068
assert round(result["GBR"], 0) == 1169
assert round(result["USA"], 0) == 752
def test_exchange_nonUS_to_nonUS():
result = exchange.exchange(
df=test_df.copy(),
source_currency="FRA",
target_currency="GTM",
rates_source="wb",
value_column="value",
target_column="xe",
date_column="date",
)
result = result.set_index("iso_code")["xe"].to_dict()
assert round(result["FRA"], 0) == 1068
assert round(result["GBR"], 0) == 849
assert round(result["USA"], 0) == 888
def test_exchange_target_to_source():
result = exchange.exchange(
df=test_df.copy(),
source_currency="FRA",
target_currency="FRA",
rates_source="wb",
value_column="value",
target_column="xe",
date_column="date",
)
result = result.set_index("iso_code")["xe"].to_dict()
assert round(result["FRA"], 0) == 100
assert round(result["GBR"], 0) == 100
assert round(result["USA"], 0) == 100
@pytest.mark.parametrize(*errors)
def test_exchange_errors(
df,
source_currency,
target_currency,
rates_source,
value_column,
target_column,
date_column,
) -> None:
with pytest.raises(Exception):
exchange.exchange(
df=df,
source_currency=source_currency,
target_currency=target_currency,
rates_source=rates_source,
value_column=value_column,
target_column=target_column,
date_column=date_column,
)
``` |
{
"source": "jmrocco/docsupport",
"score": 3
} |
#### File: docsupport/medium/my_parser.py
```python
import re
from urllib.parse import urlparse, unquote
import requests
from bs4 import BeautifulSoup
from medium.model import User, Post, Publication, Tag, Image, OutputFormat, to_dict
from medium.constant import ROOT_URL, HTML_PARSER
from medium.file_handler import IpfsHandler
def parse_user(payload):
user_dict = payload["payload"]["user"]
user_id = user_dict["userId"]
user = User(user_id)
username = user_dict["username"]
display_name = user_dict["name"]
avatar = user_dict["imageId"]
bio = user_dict["bio"]
twitter_name = user_dict["twitterScreenName"]
facebook_id = user_dict["facebookAccountId"]
user_meta_dict = payload["payload"]["userMeta"]
ref_dict = payload["payload"]["references"]
# interest_tags = user_meta_dict["interestTags"]
# user.interest_tags = parse_tags(interest_tags)
# author_tags = user_meta_dict["authorTags"]
# user.author_tags = parse_tags(author_tags)
publication_ids = ref_dict["Collection"]
if publication_ids is not None and len(publication_ids.keys()) > 0:
publication_list = []
for pub_id in publication_ids.keys():
publication = parse_publication(payload, pub_id)
publication_list.append(publication)
if len(publication_list) > 0:
user.publications = publication_list
stats_dict = ref_dict["SocialStats"][user_id]
following_count = stats_dict["usersFollowedCount"]
followby_count = stats_dict["usersFollowedByCount"]
user.user_id = user_id
user.username = username
user.display_name = display_name
user.avatar = avatar
user.bio = bio
user.twitter = twitter_name
user.facebook = facebook_id
user.following_count = following_count
user.followedby_count = followby_count
return to_dict(user)
def parse_publication(payload, pub_id=None):
if pub_id is None:
pub_id = payload["payload"]["collection"]["id"]
publication_dict = payload["payload"]["references"]["Collection"][pub_id]
publication = Publication(pub_id)
publication.display_name = publication_dict["name"]
publication.description = publication_dict["description"]
publication.creator_user_id = publication_dict["creatorId"]
image_dict = publication_dict["image"]
image = parse_images(image_dict)
if image is not None:
publication.image = image
logo_dict = publication_dict["logo"]
logo = parse_images(logo_dict)
if logo is not None:
publication.logo = logo
publication.follower_count = publication_dict["metadata"]["followerCount"]
publication.post_count = publication_dict["metadata"]["postCount"]
if "domain" in publication_dict:
publication.url = "http://" + publication_dict["domain"]
else:
publication.url = ROOT_URL + publication_dict["slug"]
publication.name = publication_dict["slug"]
return to_dict(publication)
def parse_post(payload):
# get the different parsing keys
post_detail_parsing_keys = ("payload", "references", "Post")
if post_detail_parsing_keys is None:
return
post_list_payload = payload
for key in post_detail_parsing_keys:
post_list_payload = post_list_payload.get(key)
def parse_post_dict(post_dict, post_id=None):
if post_id is None:
post_id = post_dict["id"]
post = Post(post_id)
unique_slug = post_dict["uniqueSlug"]
title = post_dict["title"]
post_date = post_dict["createdAt"]
publication_id = post_dict["approvedHomeCollectionId"]
url = ROOT_URL
ref_dict = payload["payload"]["references"]
if publication_id is not None and publication_id:
publication_dict = ref_dict["Collection"][publication_id]
# custom publication domain
if "domain" in publication_dict and publication_dict["domain"]:
url = "https://" + publication_dict["domain"]
else:
# simple publication
url += publication_dict["slug"]
else:
# personal post, no publication
creator_id = post_dict["creatorId"]
username = ref_dict["User"][creator_id]["username"]
url += "@{username}".format(username=username)
url += u"/{path}".format(path=unique_slug)
virtual_dict = post_dict["virtuals"]
recommend_count = virtual_dict["recommends"]
response_count = virtual_dict["responsesCreatedCount"]
read_time = virtual_dict["readingTime"]
word_count = virtual_dict["wordCount"]
image_count = virtual_dict["imageCount"]
preview_image = virtual_dict["previewImage"]
# post_tags = virtual_dict["tags"]
# post.post_tags = parse_tags(post_tags)
# post.unique_slug = unique_slug
post.title = title
post.post_date = post_date
post.url = url
post.recommend_count = recommend_count
post.response_count = response_count
post.read_time = read_time
post.word_count = word_count
post.image_count = image_count
image = parse_images(preview_image)
if image is not None:
post.preview_image = image
# print("{id}, {title}".format(id=post_id, title=title))
# print("{recommend}, {response}, {read}".format(
# recommend=recommend_count, response=response_count, read=read_time))
return to_dict(post)
post_list = []
# print(post_list_payload)
# payload -> references -> Post
if type(post_list_payload) is dict:
for post_id in post_list_payload.keys():
post_dict = post_list_payload.get(post_id)
post_list.append(parse_post_dict(post_dict, post_id))
# payload -> value
elif type(post_list_payload) is list:
for post_dict in post_list_payload:
post_list.append(parse_post_dict(post_dict))
return post_list
def parse_tags(tags_list_dict):
if tags_list_dict is not None and len(tags_list_dict) > 0:
tags_list = []
for tag_dict in tags_list_dict:
tag = Tag()
tag.unique_slug = tag_dict["slug"]
tag.name = tag_dict["name"]
tag.post_count = tag_dict["postCount"]
metadata_dict = tag_dict["metadata"]
if metadata_dict is not None:
tag.follower_count = metadata_dict["followerCount"]
tags_list.append(to_dict(tag))
return tags_list
def parse_images(image_dict):
if image_dict is not None:
image_id = image_dict["imageId"] if "imageId" in image_dict else image_dict["id"]
if image_id:
image = Image(image_id)
image.original_width = image_dict["originalWidth"]
image.original_height = image_dict["originalHeight"]
# This isn't working.
# image.url = u"https://cdn-images-1.medium.com/fit/t/{width}/{height}/{id}" \
# .format(width=image.original_width,
# height=image.original_height,
# id=image.image_id)
return to_dict(image)
else:
return None
def parse_post_detail(post_url, token):
print(post_url)
# driver = webdriver.Remote(desired_capabilities=DesiredCapabilities.CHROME)
# for json format, just return medium json response
# driver.get(post_url)
r = requests.get(post_url)
if r.ok:
# content_elements = driver.find_element_by_class_name("postArticle-content")
inner_html = BeautifulSoup(r.text, HTML_PARSER).find("div", {"class": "postArticle-content"})
content_tags = inner_html.find_all()
response = ""
for i in range(0, len(content_tags)):
tag = content_tags[i]
md = to_markdown(tag, token)
if md is not None and md and md is not 'None':
response += md + "\n"
print(response)
return response
def strip_space(text, trim_space=True):
text = re.sub(r'\s+', ' ', text)
if trim_space:
return text.strip()
else:
return text
def to_markdown(medium_tag, token):
text = strip_space(medium_tag.text)
if medium_tag.name == 'h3':
return '\n## {}'.format(text)
elif medium_tag.name == 'h4':
return '\n### {}'.format(text)
elif medium_tag.name == 'p': # text paragraph
# find style, link inside a paragraph
plain_text = ''
for child in medium_tag.children:
if child.name is None:
if len(strip_space(child.string)) > 0:
plain_text += strip_space(child.string)
else:
content = strip_space(child.text)
if child.name == 'strong':
plain_text += " \n**{0}**\n ".format(content)
elif child.name == 'em':
plain_text += " \n_{0}_\n ".format(content)
elif child.name == 'a':
plain_text += " \n[{0}]({1})\n ".format(content, child['href'])
elif child.name == 'br':
plain_text += "{} \n ".format(content)
elif child.name == 'code' or child.name == '':
plain_text += " \n`{0}`\n ".format(content)
return plain_text
elif medium_tag.name == 'figure': # image and comment
for child in medium_tag.children:
img_tag = child.find('img')
if img_tag is not None and img_tag.has_attr('src'):
x = IpfsHandler(img_tag['src'], token)
return '\n\n'.format(x.ipfs_url)
# Handle Tweets
iframe = child.find('iframe')
if iframe is not None:
iframe_url = 'https://medium.com' + iframe['src']
try:
r = requests.get(iframe_url)
# driver.get(iframe_url)
iframe_content = BeautifulSoup(r.text, HTML_PARSER).find('body')
# print(iframe_content)
if iframe_content.find('iframe'):
return None
if iframe_content.find('blockquote'):
return '\n{}\n'.format(iframe_content.find('blockquote'))
if iframe_content is not None:
return '\n{}\n'.format(iframe_content)
# if 'body' in frame_content):
# print(iframe_content.find('body'))
# return '\n{}\n'.format(iframe_content.find('body'))
# if iframe_content.find('iframe') is not None:
# return '\n{}\n'.format(iframe_content.find('iframe'))
except:
return None
elif medium_tag.name == 'blockquote': # quote
return '> {}\n'.format(strip_space(medium_tag.text))
elif medium_tag.name == 'ol' or medium_tag.name == 'ul':
return "\n"
elif medium_tag.name == 'li':
plain_text = ''
for child in medium_tag.children:
if child.name is None:
if len(strip_space(child.string)) > 0:
plain_text += strip_space(child.string)
else:
content = strip_space(child.text)
if child.name == 'strong':
plain_text += " **{0}** ".format(content)
elif child.name == 'em':
plain_text += " _{0}_ ".format(content)
elif child.name == 'a':
plain_text += " [{0}]({1}) ".format(content, child['href'])
elif child.name == 'code' or child.name == '':
plain_text += " `{0}` ".format(content)
return '\n * {0}'.format(plain_text)
elif medium_tag.name == 'pre': # code block (not inline code or embed code)
code_block = ''
code_tags = medium_tag.prettify().split('<br/>')
for i in range(len(code_tags)):
t = BeautifulSoup(code_tags[i], HTML_PARSER)
code = re.sub(r'\r\n(\s{10})', '', t.text).replace('\n', '')
code_block += '{}\n'.format(code)
# print(i, code)
return '\n```\n{}```\n\n'.format(code_block)
# elif medium_tag.name == 'hr':
# return '\n----\n'
# TODO: need more test and change to adopt to chrome driver
elif medium_tag.name == 'iframe':
# gist, video, github, link...etc.
iframe_url = ROOT_URL + medium_tag['src']
try:
r = requests.get(iframe_url)
# driver.get(iframe_url)
iframe_content = BeautifulSoup(r.text, HTML_PARSER).find('iframe')
if iframe_content is not None:
iframe_src = iframe_content['src']
try:
uq = unquote(iframe_src)
src_string = urlparse(uq)
src_string2 = src_string.query
rgx = re.search('(?<=src\=)(.*?[^?]*)', src_string2).group()
iframe_content['src'] = rgx
iframe_content['width'] = '512'
iframe_content['height'] = '300'
return '\n{}\n'.format(iframe_content)
except:
return None
else:
iframe_content = BeautifulSoup(r.text, HTML_PARSER).find('script')
try:
iframe_url = iframe_content['src']
except:
return None
r = requests.get(iframe_url)
if r.ok:
try:
raw_url = r.text.split('href=\\"')[1].split("\\")[0]
req = requests.get(raw_url)
if req.ok:
code_html = BeautifulSoup(req.content, HTML_PARSER)
return '\n```\n{}\n```\n\n'.format(code_html.prettify())
except:
return None
except (RuntimeError, requests.exceptions.MissingSchema):
pass
# print(e)
else:
return None
```
#### File: docsupport/wordpress/file_handler.py
```python
import requests
import os
import shutil
import configparser
from requests_toolbelt import MultipartEncoder
from urllib.parse import urlparse
import logging
class IpfsHandler:
def __init__(self, source_url, token):
# config variables
self.config = configparser.RawConfigParser()
self.config.read('config.ini')
self.kauri_ipfs = 'https://api.dev.kauri.io:443/ipfs/'
self.JWT_token = token
# file variables
self.ipfs_url = None
self.source_url = source_url
self.img_bytes = self.get_bytes(self.source_url)
if self.ipfs_url is None:
self.json_resp = self.make_a_mp_request(self.img_bytes)
self.build_url()
def get_bytes(self, source_url):
res = requests.get(self.source_url, stream=True)
f = os.path.basename(urlparse(source_url).path)
# check filesize, if >= 10MB, skip multipart post request and use CDN URL
if res.ok and (int(res.headers['content-length']) >= 10000000):
print(
'[ATTN] Content length > 10MB: ' + \
res.headers['content-length'] + '. ' + \
'Using CDN URL instead.'
)
self.ipfs_url = self.source_url
else:
print(
'[OK] Content length <= 10MB: ' + \
res.headers['content-length'] + '. ' + \
'POSTing image to Kauri Gateway'
)
return (f, res.content)
def make_a_mp_request(self, img_bytes):
mp = MultipartEncoder(
fields = {
'file': (self.img_bytes[0], self.img_bytes[1])
}
)
try:
response = requests.post(
self.kauri_ipfs, # kauri ipfs gateway
data=mp,
headers={
'Content-Type': mp.content_type,
'X-Auth-Token': 'Bearer ' + self.JWT_token
}
)
if response.ok:
return response.json()
else:
print('[!] Failed post request. Reason: ' + response.status_code)
except:
print('[!] Post request to api gateway failed')
return None
def build_url(self):
new_url = self.kauri_ipfs + self.json_resp['hash']
self.ipfs_url = new_url
return None
``` |
{
"source": "jmrodri/ocp-release-operator-sdk",
"score": 2
} |
#### File: module_utils/client/discovery.py
```python
import json
import os
from collections import defaultdict
import hashlib
import tempfile
import kubernetes.dynamic
import kubernetes.dynamic.discovery
from kubernetes import __version__
from kubernetes.dynamic.exceptions import (ResourceNotFoundError, ResourceNotUniqueError,
ServiceUnavailableError)
from ansible_collections.kubernetes.core.plugins.module_utils.client.resource import ResourceList
class Discoverer(kubernetes.dynamic.discovery.Discoverer):
def __init__(self, client, cache_file):
self.client = client
default_cache_file_name = 'k8srcp-{0}.json'.format(hashlib.sha256(self.__get_default_cache_id()).hexdigest())
self.__cache_file = cache_file or os.path.join(tempfile.gettempdir(), default_cache_file_name)
self.__init_cache()
def __get_default_cache_id(self):
user = self.__get_user()
if user:
cache_id = "{0}-{1}".format(self.client.configuration.host, user)
else:
cache_id = self.client.configuration.host
return cache_id.encode('utf-8')
def __get_user(self):
# This is intended to provide a portable method for getting a username.
# It could, and maybe should, be replaced by getpass.getuser() but, due
# to a lack of portability testing the original code is being left in
# place.
if hasattr(os, 'getlogin'):
try:
user = os.getlogin()
if user:
return str(user)
except OSError:
pass
if hasattr(os, 'getuid'):
try:
user = os.getuid()
if user:
return str(user)
except OSError:
pass
user = os.environ.get("USERNAME")
if user:
return str(user)
return None
def __init_cache(self, refresh=False):
if refresh or not os.path.exists(self.__cache_file):
self._cache = {'library_version': __version__}
refresh = True
else:
try:
with open(self.__cache_file, 'r') as f:
self._cache = json.load(f, cls=CacheDecoder(self.client))
if self._cache.get('library_version') != __version__:
# Version mismatch, need to refresh cache
self.invalidate_cache()
except Exception:
self.invalidate_cache()
self._load_server_info()
self.discover()
if refresh:
self._write_cache()
def get_resources_for_api_version(self, prefix, group, version, preferred):
""" returns a dictionary of resources associated with provided (prefix, group, version)"""
resources = defaultdict(list)
subresources = defaultdict(dict)
path = '/'.join(filter(None, [prefix, group, version]))
try:
resources_response = self.client.request('GET', path).resources or []
except ServiceUnavailableError:
resources_response = []
resources_raw = list(filter(lambda resource: '/' not in resource['name'], resources_response))
subresources_raw = list(filter(lambda resource: '/' in resource['name'], resources_response))
for subresource in subresources_raw:
resource, name = subresource['name'].split('/')
subresources[resource][name] = subresource
for resource in resources_raw:
# Prevent duplicate keys
for key in ('prefix', 'group', 'api_version', 'client', 'preferred'):
resource.pop(key, None)
resourceobj = kubernetes.dynamic.Resource(
prefix=prefix,
group=group,
api_version=version,
client=self.client,
preferred=preferred,
subresources=subresources.get(resource['name']),
**resource
)
resources[resource['kind']].append(resourceobj)
resource_lookup = {
'prefix': prefix,
'group': group,
'api_version': version,
'kind': resourceobj.kind,
'name': resourceobj.name
}
resource_list = ResourceList(self.client, group=group, api_version=version, base_kind=resource['kind'], base_resource_lookup=resource_lookup)
resources[resource_list.kind].append(resource_list)
return resources
def get(self, **kwargs):
"""
Same as search, but will throw an error if there are multiple or no
results. If there are multiple results and only one is an exact match
on api_version, that resource will be returned.
"""
results = self.search(**kwargs)
# If there are multiple matches, prefer exact matches on api_version
if len(results) > 1 and kwargs.get('api_version'):
results = [
result for result in results if result.group_version == kwargs['api_version']
]
# If there are multiple matches, prefer non-List kinds
if len(results) > 1 and not all([isinstance(x, ResourceList) for x in results]):
results = [result for result in results if not isinstance(result, ResourceList)]
# if multiple resources are found that share a GVK, prefer the one with the most supported verbs
if len(results) > 1 and len(set((x.group_version, x.kind) for x in results)) == 1:
if len(set(len(x.verbs) for x in results)) != 1:
results = [max(results, key=lambda x: len(x.verbs))]
if len(results) == 1:
return results[0]
elif not results:
raise ResourceNotFoundError('No matches found for {0}'.format(kwargs))
else:
raise ResourceNotUniqueError('Multiple matches found for {0}: {1}'.format(kwargs, results))
class LazyDiscoverer(Discoverer, kubernetes.dynamic.LazyDiscoverer):
def __init__(self, client, cache_file):
Discoverer.__init__(self, client, cache_file)
self.__update_cache = False
class CacheDecoder(json.JSONDecoder):
def __init__(self, client, *args, **kwargs):
self.client = client
json.JSONDecoder.__init__(self, object_hook=self.object_hook, *args, **kwargs)
def object_hook(self, obj):
if '_type' not in obj:
return obj
_type = obj.pop('_type')
if _type == 'Resource':
return kubernetes.dynamic.Resource(client=self.client, **obj)
elif _type == 'ResourceList':
return ResourceList(self.client, **obj)
elif _type == 'ResourceGroup':
return kubernetes.dynamic.discovery.ResourceGroup(obj['preferred'], resources=self.object_hook(obj['resources']))
return obj
``` |
{
"source": "jmrohwer/identifiability",
"score": 3
} |
#### File: jmrohwer/identifiability/identifiability.py
```python
from collections import OrderedDict
from lmfit.minimizer import Minimizer, MinimizerResult, MinimizerException
from lmfit.model import ModelResult
import numpy as np
import scipy as sp
import math
from matplotlib import pyplot as plt
from multiprocessing import Pool
__version__ = '0.3.3dev1'
CONF_ERR_GEN = 'Cannot determine Confidence Intervals'
CONF_ERR_NVARS = '%s with < 2 variables' % CONF_ERR_GEN
class ConfidenceInterval:
"""Class used to calculate the confidence interval."""
def __init__(self, minimizer, result, p_names=None, log=False):
assert isinstance(minimizer, Minimizer) or isinstance(
minimizer, ModelResult
), 'minimizer must be instance of `lmfit.minimizer.Minimizer` or `lmfit.model.ModelResult`'
assert isinstance(result, MinimizerResult) or isinstance(
result, ModelResult
), 'result must be instance of `lmfit.minimizer.MinimizerResult` or `lmfit.model.ModelResult`'
self.minimizer = minimizer
self.result = result
self.params = result.params.copy()
self.org = {}
for para_key in self.params:
self.org[para_key] = (
self.params[para_key].value,
self.params[para_key].stderr,
)
self.best_chi = result.chisqr
if not p_names:
p_names = [i for i in self.params if self.params[i].vary]
self.p_names = p_names
self.fit_params = [self.params[p] for p in self.p_names]
self.log = log
self._traces_calculated = False
self._k = 2 # degree of smoothing spline
# check that there are at least 2 true variables!
nvars = len([p for p in self.params.values() if p.vary])
if nvars < 2:
raise MinimizerException(CONF_ERR_NVARS)
self.trace_dict = {i: {} for i in self.p_names}
def calc_all_ci(self, limits=0.5, points=11, prob=0.95, method='leastsq', mp=True):
"""Calculate all confidence intervals."""
assert (
(type(prob) == float) & (prob > 0) & (prob < 1)
), 'Please provide a probability value between 0 and 1.'
self.prob = prob
self.method = method
self.ci_values = OrderedDict()
self.threshold = self._calc_threshold()
if not self._traces_calculated:
self._populate_traces(limits, points, mp)
for p in self.p_names:
self.ci_values[p] = self._process_ci(p)
return self.ci_values
def _populate_traces(self, limits, points, mp):
if mp:
proc_pool = Pool()
arl = []
results = []
for para in self.p_names:
if isinstance(para, str):
para = self.params[para]
if self.log:
para_vals = np.logspace(
np.log10(para.value * limits), np.log10(para.value / limits), points,
)
else:
para_vals = np.linspace(limits * para.value, (2 - limits) * para.value, points)
para.vary = False
self.trace_dict[para.name]['value'] = []
self.trace_dict[para.name]['dchi'] = []
self.trace_dict[para.name]['results'] = []
for val in para_vals:
self.trace_dict[para.name]['value'].append(val)
if mp:
arl.append(proc_pool.apply_async(self._calc_dchi, args=(self, para, val)))
else:
results.append(self.calc_dchi(para, val))
para.vary = True
self._reset_vals()
if mp:
arl[-1].wait()
for ar in arl:
results.append(ar.get())
proc_pool.close()
for (para, dchi, opt_res) in results:
self.trace_dict[para.name]['dchi'].append(dchi)
self.trace_dict[para.name]['results'].append(opt_res)
self._traces_calculated = True
def _process_ci(self, p_name):
xx = self.trace_dict[p_name]['value']
yy = self.trace_dict[p_name]['dchi']
t = self.threshold
spl = sp.interpolate.UnivariateSpline(xx, yy, k=self._k, s=0)
if self.log:
allx = np.logspace(np.log10(xx[0]), np.log10(xx[-1]), 20000)
else:
allx = np.linspace(xx[0], xx[-1], 20000)
lo = allx[spl(allx) <= t][0]
hi = allx[spl(allx) <= t][-1]
# catch non-identifiable cases
if lo == xx[0]:
lo = np.nan
if hi == xx[-1]:
hi = np.nan
return lo, hi
def _reset_vals(self):
"""Reset parameter values to best-fit values."""
for para_key in self.params:
(self.params[para_key].value, self.params[para_key].stderr,) = self.org[
para_key
]
@staticmethod
def _calc_dchi(ci_instance, para, val):
"""
Static method to calculate the normalised delta chi-squared
using multiprocessing.
"""
para.vary = False
para.value = val
save_para = ci_instance.params[para.name]
ci_instance.params[para.name] = para
ci_instance.minimizer.prepare_fit(ci_instance.params)
out = ci_instance.minimizer.minimize(method=ci_instance.method)
dchi = ci_instance._dchi(ci_instance.result, out)
ci_instance.params[para.name] = save_para
para.vary = True
return para, dchi, out
def calc_dchi(self, para, val, restore=False):
"""
Calculate the normalised delta chi-squared for
a given parameter value.
"""
if restore:
self._reset_vals()
para.value = val
save_para = self.params[para.name]
self.params[para.name] = para
self.minimizer.prepare_fit(self.params)
out = self.minimizer.minimize(method=self.method)
dchi = self._dchi(self.result, out)
self.params[para.name] = save_para
return para, dchi, out
def _dchi(self, best_fit, new_fit):
"""
Return the normalised delta chi-squared between the best fit
and the new fit.
"""
dchi = new_fit.chisqr / best_fit.chisqr - 1.0
return dchi
def _calc_threshold(self):
"""
Return the threshold of the normalised chi-squared for
the given probability.
"""
nfree = self.result.nfree
nfix = 1
threshold_scaled = sp.stats.chi2.ppf(self.prob, nfix)
threshold = threshold_scaled * nfix / nfree
return threshold
def plot_ci(self, para, ax=None):
assert para in self.p_names, 'para must be one of ' + str(self.p_names)
if not ax:
f, ax = plt.subplots()
xx = self.trace_dict[para]['value']
yy = self.trace_dict[para]['dchi']
t = self.threshold
spl = sp.interpolate.UnivariateSpline(xx, yy, k=self._k, s=0)
allx = np.linspace(xx[0], xx[-1], 20000)
ax.plot(xx, yy, '+')
ax.plot(allx, spl(allx), '-', lw=1)
ax.axhline(t, color='k', ls='--', lw=0.5)
ax.axvline(self.params[para].value, color='k', ls='-', lw=0.5)
lo, hi = self.ci_values[para]
if np.isnan(lo):
lo = ax.get_xlim()[0]
if np.isnan(hi):
hi = ax.get_xlim()[1]
ax.axvspan(lo, hi, alpha=0.1, color='b')
if self.log:
ax.semilogx()
ax.set_xlabel('Parameter value')
ax.set_ylabel(r'$\chi^2\left/\chi^2_0\right. - 1$')
ax.set_title(para)
def plot_all_ci(self):
num = len(self.p_names)
numcols = 3
numrows = math.ceil(num / numcols)
f, ax = plt.subplots(nrows=numrows, ncols=numcols, figsize=(9, 2.5 * numrows))
for i in range(num):
if num <= numcols:
theax = ax[i]
else:
theax = ax[i // numcols, i % numcols]
self.plot_ci(self.p_names[i], ax=theax)
# remove empty axes
if num % numcols != 0:
empty = numcols - num % numcols
for i in range(-empty, 0):
if num <= numcols:
ax[i].set_visible(False)
else:
ax[num // numcols, i].set_visible(False)
f.tight_layout()
def conf_interval(
minimizer,
result,
p_names=None,
prob=0.95,
limits=0.5,
log=False,
points=11,
method='leastsq',
return_CIclass=False,
mp=True,
):
"""
Calculate the confidence interval (CI) for parameters.
The parameter for which the CI is calculated will be varied, while the
remaining parameters are re-optimized to minimize the chi-square. The
resulting chi-square is used to calculate the probability with a given
statistic, i.e. chi-squared test.
Parameters
----------
minimizer : Minimizer or ModelResult
The minimizer to use, holding objective function.
result : MinimizerResult or ModelResult
The result of running Minimizer.minimize() or Model.fit().
p_names : list, optional
Names of the parameters for which the CI is calculated. If None
(default), the CI is calculated for every parameter.
prob : float, optional
The probability for the confidence interval (<1). If None,
the default is 0.95 (95 % confidence interval).
limits : float, optional
The limits (as a fraction of the original parameter value) within which
to vary the parameters for identifiability analysis (default is 0.5).
If ``log=False``, the parameter is varied from p*limits to p*(2 - limits),
where p is the original value.
If ``log=True``, the parameter is varied from p*limits to p/limits.
log : bool, optional
Whether to vary the parameter in a log (True) or a linear (False,
default) scale.
points : int, optional
The number of points for which to calculate the profile likelihood over
the given parameter range.
method : str, optional
The lmfit mimimize() method to use (default='leastsq')
return_CIclass : bool, optional
When true, return the instantiated ``ConfidenceInterval`` class to
access its methods directly (default=False).
mp : bool, optional
Run the optimization in parallel using ``multiprocessing`` (default=True)
Returns
-------
output : dict
A dictionary containing a list of ``(lower, upper)``-tuples containing
the confidence bounds for each parameter.
ci : ``ConfidenceInterval`` instance, optional
Instantiated ``ConfidenceInterval`` class to access the attached methods.
"""
assert (limits > 0) & (limits < 1), 'Please select a limits value between 0 and 1.'
ci = ConfidenceInterval(minimizer, result, p_names, log)
output = ci.calc_all_ci(limits, points, prob, method=method, mp=mp)
if return_CIclass:
return output, ci
return output
``` |
{
"source": "jmrohwer/pysces",
"score": 2
} |
#### File: pysces/pysces/PyscesJWSParse.py
```python
from __future__ import division, print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from pysces.version import __version__
__doc__ = "PySCeS JWS parser module -- uses PLY 1.5 or newer"
try:
input = raw_input # Py2 compatibility
except NameError:
pass
import os, copy
from .lib import lex
from .lib import yacc
from getpass import getuser
from time import sleep, strftime
from scipy import MachAr
MyMachArr = MachAr()
class JWSParser:
"""JWSParser written by Johann, based on Jannie's lexparse and integrated into PySCeS by brett -- converts PySCeS (.psc) files to JWS Online (jws) files"""
ReactionIDs = [] # List of reaction names
Names = [] # List of all reagent, parameter and function names
LexErrors = [] # List of lexing errors
NetworkDict = {} # Dictionary containing all reaction information
InitStrings = [] # Initialisation strings
InitParStrings = [] # Initialisation strings for parameters -- johann new
InitVarStrings = [] # Initialisation strings for variables -- johann new
Inits = [] # Initialised entities
Reagents = [] # All reagents found during parsing of reactions
VarReagents = [] # Variable reagents that occur in reactions
FixedReagents = [] # Fixed reagents
ReacParams = [] # Temporary list of reaction parameters
InitParams = [] # Initialised parameters
ParseErrors = []
mach_spec = MyMachArr
AllRateEqsGiven = 1 # Flag to check that all rate equations have been given
Debug = 0
##############
# Build the lexer
##############
# elementary regular expressions used as building blocks
Int = r'\d+' # Integer
Dec = Int + '\.' + Int # Decimal
# List of token names
tokens = (
'FIXDEC',
'IRREV',
#'REAL', # johann -- now build up real in a p function since we want to make exponent explicit
'INT',
'DEC', # johann -- added explicitly since we no longer have REAL token
'PLUS',
'MINUS',
'TIMES',
'DIVIDE',
'POWER',
'LPAREN',
'RPAREN',
'EQUALS',
'COMMA',
'REACTION_ID',
'STOICH_COEF',
'NAME',
'EXP',
) # johann -- new EXP token
# Simple tokens
t_IRREV = r'>'
# t_REAL = Real # johann -- no longer have a real token, now a p function
t_INT = Int
t_DEC = Dec # new DEC token
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_POWER = '\*\*'
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_EQUALS = r'='
t_COMMA = r','
t_ignore = ' \t\r' # Ignore spaces and tabs --- and windows return - brett 20040229
def t_comment(self, t):
r'\#.+\n' # Match from # to newline
t.lineno += 1 # Increment line number
def t_newline(self, t):
r'\n+' # Match newline
t.lineno += len(t.value) # Increment with number of consecutive newlines
def t_EXP(self, t): # johann -- need separate EXP token to replace for Mathematica
r'\d+\.?\d*[E|e][\+|\-]?' # define EXP token merely as digits[.]digits[E|e][+|-]
t.type = 'EXP' # parse final integer separately in 'Real' p-function to remove leading zeros
t.value = t.value.replace('e', ' 10^')
t.value = t.value.replace('E', ' 10^')
return t
def t_FIXDEC(self, t):
r'FIX:'
t.type = 'FIXDEC'
t.value = 'FIX:'
return t
def t_REACTION_ID(self, t):
r'[a-zA-Z]\w*:' # Match any letter followed by zero or more characters
# in [a-zA-Z0-9_] up to a colon
t.type = 'REACTION_ID'
if t.value[0] == 'v' and len(t.value) > 1:
t.value = t.value[
1:
] # remove initial 'v' if present to avoid constructions like 'v[vR1]'
t.value = (
'v[' + t.value[:-1] + ']'
) # remove the colon and add v[] for JWS -- johann
if t.value in self.ReactionIDs:
self.LexErrors.append(('Duplicate ReactionID ', t.lineno, t.value, t.type))
else:
self.ReactionIDs.append(t.value)
return t
def t_STOICH_COEF(self, t):
r'\{\d+\}|\{\d+\.\d+\}'
t.type = 'STOICH_COEF'
t.value = t.value[1:-1]
return t
def t_NAME(self, t):
r'[a-zA-Z][\w]*' # Match any letter followed by zero or characters in the set [a-zA-Z0-9_]
if (t.value + '[t]' not in self.Names) and (
t.value not in self.FuncNames
): # Only add to list if absent in list
# self.Names.append(t.value)
self.Names.append(t.value + '[t]') # -- johann
# print self.Names[-1]
# hack! - brett
if (
t.value not in self.FuncNames
): # make class attributes, ignore function names
# print 't value before', t.value
gt = t.value + '[t]'
t.value = gt
# print 't value after', t.value
t.type = 'NAME'
return t
def t_error(self, t):
self.LexErrors.append(('Lexer error ', t.lineno, t.value, t.type))
print('Illegal character, Line ' + str(t.lineno) + ' :' + str(t.value[0]))
t.skip(1)
##############
# The parser #
##############
FuncNames = (
'acos',
'asin',
'atan',
'atan2',
'ceil',
'cos',
'cosh',
'exp',
'fabs',
'floor',
'fmod',
'frexp',
'hypot',
'ldexp',
'log',
'log10',
'modf',
'pow',
'sin',
'sinh',
'sqrt',
'tan',
'tanh',
)
precedence = (
('left', 'PLUS', 'MINUS'),
('left', 'TIMES', 'DIVIDE'),
('left', 'POWER'),
('right', 'UMINUS'),
)
def Show(self, name, tok):
if self.Debug:
print(name, tok)
def p_error(self, t):
self.ParseErrors.append(('Syntax error ', t.lineno, t.value, t.type))
print('Syntax error, Line ' + str(t.lineno) + ' : ' + str(t.value))
tok = yacc.token()
while tok and tok.type != 'REACTION_ID':
tok = yacc.token()
return tok
def p_model(self, t):
'''Model : Statement
| Model Statement '''
self.Show('Model', t[0])
def p_statement(self, t):
'''Statement : Fixed
| ReactionLine
| Initialise'''
self.Show('Statement', t[0])
def p_fixed(self, t):
'''Fixed : FIXDEC FixList'''
self.Show('Fixed:', t[0])
def p_fixedreagents(self, t):
'''FixList : NAME
| NAME FixList'''
if t[1] != None:
self.FixedReagents.append(t[1][:-3]) # johann -- remove [t] off end
t[0] = [t[1]]
try:
t[0] += t[2]
except:
pass
self.Show('FixList', t[0])
def p_initialise(self, t):
'''Initialise : NAME EQUALS Expression'''
t[1] = t[1][:-3] + '[0]' # johann 20050302 -- Mathematica initialisation
t[0] = t[1] + t[2] + t[3]
## del temp
self.InitStrings.append(t[0].replace('=', ' = '))
self.Inits.append(t[1])
self.Show('Initialisation', t[0])
def p_reaction_line(self, t):
'''ReactionLine : REACTION_ID ReactionEq
| REACTION_ID ReactionEq Expression'''
# global self.AllRateEqsGiven, ReacParams
ReacID = t[1]
if ReacID in self.NetworkDict:
self.ParseErrors.append(('Duplicate Reaction ', t.lineno, ReacID, None))
self.NetworkDict[ReacID] = {} # Reaction dictionary for ReacID
self.NetworkDict[ReacID]['Reagents'] = {} # Reagent dictionary within ReacID
# brett: if an index exists sum the coefficients instead of adding a new one
# this seems to deal with multiple definitions like X + X > Y and 2{X} + Y > Z + X
for i in t[2][
0
]: # First tuple member of ReactionEq contains list of (name,stoichcoef)
if i[0] in self.NetworkDict[ReacID]['Reagents']:
self.NetworkDict[ReacID]['Reagents'][i[0]] = (
self.NetworkDict[ReacID]['Reagents'][i[0]] + i[1]
)
else:
self.NetworkDict[ReacID]['Reagents'][i[0]] = i[
1
] # Key for reagent with stoichcoef value
killList = []
# brett: however for the case of X + Y > Y + Z where the sum of the coefficients
# is zero we can delete the key (Y) out of the reaction list altgether (hopefully!)
for i in self.NetworkDict[ReacID]['Reagents']:
if (
abs(self.NetworkDict[ReacID]['Reagents'][i])
< self.mach_spec.eps * 100.0
):
killList.append(i)
# print self.mach_spec.eps*100.0, self.NetworkDict[ReacID]['Reagents']
# print killList, self.NetworkDict[ReacID]['Reagents']
# brett: and the easiest way of doing this is putting the zero keys in a list
# and deleting them out of the dictionary
if len(killList) != 0:
for i in killList:
del self.NetworkDict[ReacID]['Reagents'][i]
# print killList, self.NetworkDict[ReacID]['Reagents']
self.NetworkDict[ReacID]['Type'] = t[2][
1
] # Second tuple member of ReactionEq contains type
try: # Save rate equation and create parameter list
self.NetworkDict[ReacID]['RateEq'] = t[3]
self.NetworkDict[ReacID]['Params'] = self.ReacParams
self.ReacParams = [] # Reset global self.ReacParams list
except:
self.NetworkDict[ReacID]['RateEq'] = ''
self.NetworkDict[ReacID]['Params'] = []
self.AllRateEqsGiven = 0 # Set global flag to false
self.Show('ReactionLine', t[0])
self.Show('t1', t[1])
self.Show('t2', t[2])
self.Show('t3', t[3])
def p_reaction_eq(self, t):
'''ReactionEq : LeftHalfReaction EQUALS RightHalfReaction
| LeftHalfReaction IRREV RightHalfReaction'''
ReacType = ''
if t[2] == '=':
ReacType = 'Rever'
elif t[2] == '>':
ReacType = 'Irrev'
t[0] = (t[1] + t[3], ReacType)
self.Show('ReactionEq', t[0])
def p_left_half_reaction(self, t):
''' LeftHalfReaction : SubstrateTerm
| SubstrateTerm PLUS LeftHalfReaction'''
# Make a list of substrate terms
t[0] = [t[1]]
try:
t[0] += t[3]
except:
pass
# brett
# print "lhr ", t[0]
self.Show('LeftHalfReaction', t[0])
def p_right_half_reaction(self, t):
''' RightHalfReaction : ProductTerm
| ProductTerm PLUS RightHalfReaction'''
# Make a list of product terms
t[0] = [t[1]]
try:
t[0] += t[3]
except:
pass
# brett
# print "rhr ", t[0]
self.Show('RightHalfReaction', t[0])
def p_substrate_term(self, t):
'''SubstrateTerm : STOICH_COEF NAME
| NAME'''
# Make tuple of NAME and stoichiometric coefficient
# (< 0 because substrate)
try:
t[0] = (t[2], -float(t[1]))
if t[2] not in self.Reagents:
self.Reagents.append(t[2])
except:
t[0] = (t[1], -1.0)
if t[1] not in self.Reagents:
self.Reagents.append(t[1])
self.Show('SubstrateTerm', t[0])
def p_product_term(self, t):
'''ProductTerm : STOICH_COEF NAME
| NAME'''
# Make tuple of NAME and stoichiometric coefficient
# (> 0 because product)
try:
t[0] = (t[2], float(t[1]))
if t[2] not in self.Reagents:
self.Reagents.append(t[2])
except:
t[0] = (t[1], 1.0)
if t[1] not in self.Reagents:
self.Reagents.append(t[1])
self.Show('ProductTerm', t[0])
def p_rate_eq(self, t):
'''Expression : Expression PLUS Expression
| Expression MINUS Expression
| Expression TIMES Expression
| Expression DIVIDE Expression
| Power
| Number
| Func'''
# |UMINUS : add if the
# alternative for p_uminus is used
if len(t.slice) == 4:
t[0] = t[1] + t[2] + t[3]
else:
t[0] = t[1]
def p_power(self, t):
'''Power : Expression POWER Expression'''
t[0] = (
'Power[' + t[1] + ',' + t[3] + ']'
) # changed to Mathematica notation -- johann
def p_uminus(self, t):
'''Expression : MINUS Expression %prec UMINUS'''
# Alternative '''UMINUS : MINUS Expression'''
t[0] = t[1] + t[2]
def p_number(self, t):
'''Number : Real
| INT
| DEC
| NAME'''
# Build list of entities
try:
float(t[1]) # check for a number
except:
if (
(t[1] not in self.FuncNames)
and (t[1] not in self.ReacParams)
and (' 10^' not in t[1])
):
# ignore function names, duplications and exponentials
self.ReacParams.append(t[1])
# self.ReacParams.append('self.' + t[1])
t[0] = t[1]
def p_real(self, t):
'''Real : EXP INT'''
loop = 1
while loop == 1: # remove leading zeros from exponent
if t[2][0] == '0' and len(t[2]) > 1:
t[2] = t[2][1:]
else:
loop = 0
t[0] = t[1] + t[2]
def p_function(self, t):
'''Func : LPAREN ArgList RPAREN
| NAME LPAREN ArgList RPAREN'''
try:
t[0] = t[1] + t[2] + t[3] + t[4]
except:
t[0] = t[1] + t[2] + t[3]
def p_arglist(self, t):
'''ArgList : Expression
| Expression COMMA Expression'''
t[0] = t[1]
try:
t[0] += t[2] + t[3]
except:
pass
############################################
# end of lexer and parser definitions
############################################
def psc2jws(self, File, indir=None, outdir=None, quiet=1, debug=0):
"""
psc2jws(File,indir=None,outdir=None,quiet=1,debug=0)
Convert a PySCeS (.psc) file to a JWS Online (.jws) file. Call with the input file name, note the input (indir) and output (outdir) can optionally be specified.
Arguments:
=========
File: PSC input file
indir [default=None]: directory of PSC file
outdir [default=None]: output directory for JWS file
quiet [default=1]: turn lex/parse noise on/off
debug [default=0]: optionally output debug information
"""
if indir == None:
indir = os.getcwd()
if outdir == None:
outdir = os.getcwd()
if os.path.exists(os.path.join(indir, File)) and File[-4:] == '.psc':
go = 1
else:
print('\nIgnoring non-PySCeS model file: ' + os.path.join(indir, File))
go = 0
if go == 1:
# clean up the modules
reload(lex) # brett's bugbear code these have to be here ALWAYS!!
reload(yacc)
# clean up the instance
self.ReactionIDs = [] # List of reaction names
self.Names = [] # List of all reagent, parameter and function names
self.LexErrors = [] # List of lexing errors
self.NetworkDict = {} # Dictionary containing all reaction information
self.InitStrings = [] # Initialisation strings
self.Inits = [] # Initialised entities
self.Reagents = [] # All reagents found during parsing of reactions
self.FixedReagents = [] # Fixed reagents
self.ReacParams = [] # Temporary list of reaction parameters
self.ParseErrors = []
self.InitParStrings = (
[]
) # Initialisation strings for parameters -- johann new
self.InitVarStrings = (
[]
) # Initialisation strings for variables -- johann new
self.VarReagents = [] # Variable reagents that occur in reactions
self.InitParams = [] # Initialised parameters
print('\nParsing file: ' + os.path.join(indir, File))
Data = open(os.path.join(indir, File), 'r')
Model = Data.read()
Data.close()
self.Debug = debug
self.AllRateEqsGiven = (
1 # Flag to check that all rate equations have been given
)
# try and find a temporary workspace or use cwd
if 'TMP' in os.environ:
tempDir = os.environ['TMP']
elif 'TEMP' in os.environ:
tempDir = os.environ['TEMP']
else:
tempDir = os.getcwd()
os.chdir(tempDir)
# fix filenames for intermediary files - brett
if not File[:-4].isalnum():
FileL = list(File)
FileT = ''
for let in FileL:
if let.isalnum():
FileT += let
# instantiate the lexer and parser
self.debugfile = '_jws' + FileT[:-3] + ".dbg"
self.tabmodule = '_jws' + FileT[:-3] + "_" + "parsetab"
else:
self.debugfile = '_jws' + File[:-4] + ".dbg"
self.tabmodule = '_jws' + File[:-4] + "_" + "parsetab"
if self.Debug:
print(self.tabmodule)
print(self.debugfile)
lex.lex(module=self, debug=self.Debug)
lex.input(Model)
yacc.yacc(
module=self,
debug=self.Debug,
debugfile=self.debugfile,
tabmodule=self.tabmodule,
)
os.chdir(outdir)
while 1:
tok = lex.token()
if not tok:
break
if self.LexErrors != []:
print('self.LexErrors = ', self.LexErrors, '\n')
while 1:
p = yacc.parse(Model)
if not p:
break
# we have the dictionary get rid of this stuff
del Model, p
# Create list of variable reagents and remove '[t]' from fixed reagents
for i in range(
len(self.Reagents)
): # johann -- new construction otherwise list elements not replaced
if self.Reagents[i][:-3] not in self.FixedReagents:
self.VarReagents.append(self.Reagents[i])
if self.Reagents[i][:-3] in self.FixedReagents:
self.Reagents[i] = self.Reagents[i][:-3]
# Create list of initialised parameters
for i in range(len(self.Inits)): # johann -- reworked extensively
if self.Inits[i][:-3] + '[t]' not in self.VarReagents:
self.InitStrings[i] = self.InitStrings[i].replace('[0]', '')
self.InitStrings[i] = self.InitStrings[i].replace(
'[t]', ''
) # capture params initialised i.t.o. other params
self.Inits[i] = self.Inits[i][:-3]
self.InitParams.append(self.Inits[i])
self.InitParStrings.append(self.InitStrings[i])
elif self.Inits[i][:-3] + '[t]' in self.VarReagents:
self.InitVarStrings.append(self.InitStrings[i])
# In self.NetworkDict, clean rate equation parameter list of variables that occur in that reaction
# Add FixedReagent to Params even if not a parameter in rate eqn (requirement to add '$' below)
for id in list(self.NetworkDict.keys()):
for reag in self.VarReagents:
if reag in self.NetworkDict[id]['Params']:
self.NetworkDict[id]['Params'].remove(reag)
for reag in self.FixedReagents:
if (
reag + '[t]' in list(self.NetworkDict[id]['Reagents'].keys())
) and (reag not in self.NetworkDict[id]['Params']):
self.NetworkDict[id]['Params'].append(reag + '[t]')
# Warn if no reagents have been fixed
if self.FixedReagents == []:
print('Warning: No reagents have been fixed')
else: # Warn if a fixed reagent does not occur in a reaction equation
for reag in self.FixedReagents:
if reag not in self.Reagents:
print(
'Warning: '
+ reag
+ ' (fixed) does not occur in any reaction'
)
# Check whether all parameters have been initialised
# johann -- remove [t] from params
for id in list(self.NetworkDict.keys()):
for i in range(len(self.NetworkDict[id]['Params'])):
self.NetworkDict[id]['Params'][i] = self.NetworkDict[id]['Params'][
i
][:-3]
if self.NetworkDict[id]['Params'][i] not in self.InitParams:
print(
'Warning: Parameter '
+ self.NetworkDict[id]['Params'][i]
+ ' has not been initialised'
)
# Check whether all variable reagents have been initialised
for reag in self.VarReagents:
if reag[:-3] + '[0]' not in self.Inits:
print('Warning: Variable ' + reag + ' has not been initialised')
# Check that all initialised parameters actually occur in self.Inits
known = 0
for param in self.InitParams:
for id in list(self.NetworkDict.keys()):
if param in self.NetworkDict[id]['Params']:
known = 1
break
else:
known = 0
if not known:
print(
'Warning: '
+ param
+ ' has been initialised but does not occur in any rate equation'
)
# clean up rate equations in self.NetworkDict to remove [t] for Params
# clean up Reagents to remove [t] and add $ for fixed
for id in list(self.NetworkDict.keys()):
for param in self.NetworkDict[id]['Params']:
self.NetworkDict[id]['RateEq'] = self.NetworkDict[id][
'RateEq'
].replace(param + '[t]', param)
for reag in list(self.NetworkDict[id]['Reagents'].keys()):
if reag[:-3] in self.NetworkDict[id]['Params']:
saveval = self.NetworkDict[id]['Reagents'].pop(reag)
self.NetworkDict[id]['Reagents']['$' + reag[:-3]] = saveval
else:
saveval = self.NetworkDict[id]['Reagents'].pop(reag)
self.NetworkDict[id]['Reagents'][reag[:-3]] = saveval
# output errors
if self.ParseErrors != []:
print('Parse errors occurred: ', self.ParseErrors)
# debugging
if debug:
print('\n\n\n')
print('\nself.ReactionIDs: ', self.ReactionIDs)
print('\nself.NetworkDict: ', self.NetworkDict)
print('\nself.Names: ', self.Names)
print('\nself.Inits: ', self.Inits)
print('\nself.InitStrings: ', self.InitStrings)
print('\nself.InitParStrings: ', self.InitParStrings)
print('\nself.InitVarStrings: ', self.InitVarStrings)
print('\nself.InitParams: ', self.InitParams)
print('\nself.Reagents: ', self.Reagents)
print('\nself.FixedReagents: ', self.FixedReagents)
print('\nself.VarReagents: ', self.VarReagents)
print('\nParseErrors: ', self.ParseErrors)
# now write the jws output file
filename = File[:-4]
filename = self.chkjws(filename)
go = 0
loop = 0
filex = ''
while loop == 0:
try:
filex = os.path.join(outdir, filename)
f = open(filex, 'r')
f.close()
inp = input('\nFile "' + filex + '" exists.\nOverwrite? ([y]/n) ')
if inp == 'y' or inp == '':
go = 1
loop = 1
elif inp == 'n':
filename = input(
'\nFile "' + filename + '" exists. Enter a new filename: '
)
go = 1
filex = os.path.join(outdir, filename)
filename = self.chkjws(filename)
else:
print('\nInvalid input')
except:
print('\nFile "' + filex + '" does not exist, proceeding...')
loop = 1
go = 1
if go == 1:
try:
UseR = getuser()
except:
UseR = ''
outFile = open(filex, 'w')
header = ''
# header += '############################################################\n'
header += '# JWS model input file \n'
header += (
'# Generated by PySCeS ('
+ __version__
+ ') (http://pysces.sourceforge.net) \n'
)
header += '# Pysces input file: ' + File + '\n'
header += (
'# This file generated: '
+ strftime("%a, %d %b %Y %H:%M:%S")
+ ' by '
+ UseR
+ ' \n'
)
header += (
'###########################################################\n\n'
)
outFile.write(header)
# modelname
modelname = File[:-4]
outFile.write('begin name\n' + modelname + '\nend name\n\n')
# reactions and rate equations
reaction_list = []
rateeq_list = []
nd = self.NetworkDict
reaclist = copy.copy(
list(nd.keys())
) # johann -- to sort self.ReactionIDs neatly ;-)
reaclist.sort()
for key in reaclist: # key = reaction name
reagL = []
reagR = []
Req = copy.copy(nd[key]['RateEq'])
for reagent in nd[key]['Reagents']:
if nd[key]['Reagents'][reagent] > 0:
reagR.append(
'{'
+ str(abs(nd[key]['Reagents'][reagent]))
+ '}'
+ reagent
)
elif nd[key]['Reagents'][reagent] < 0:
reagL.append(
'{'
+ str(abs(nd[key]['Reagents'][reagent]))
+ '}'
+ reagent
)
substring = ''
count = 0
for x in reagL:
if count != 0:
substring += ' + '
substring += x.replace(' ', '')
count += 1
prodstring = ''
count = 0
for x in reagR:
if count != 0:
prodstring += ' + '
prodstring += x.replace(' ', '')
count += 1
symbol = ' = '
reaction_list.append(key + '\t' + substring + symbol + prodstring)
rateeq_list.append(key + ' = ' + Req)
outFile.write('begin reactions\n')
for x in reaction_list:
outFile.write(x + '\n')
outFile.write('end reactions\n\n')
outFile.write('begin rate equations\n')
for x in rateeq_list:
outFile.write(x + '\n')
outFile.write('end rate equations\n\n')
# parameters
outFile.write('begin parameters\n')
for x in self.InitParStrings:
outFile.write(x + '\n')
outFile.write('end parameters\n\n')
# species initial values
outFile.write('begin initial conditions\n')
for x in self.InitVarStrings:
outFile.write(x + '\n')
outFile.write('end initial conditions\n\n')
# close output file
outFile.close()
# print to stdout if quiet is set to zero
if quiet == 0:
print('\nModel name: ' + modelname)
print("\nReactions:")
for x in reaction_list:
print(x)
print("\nRate Equations:")
for x in rateeq_list:
print(x)
print('\nParameters:')
for x in self.InitParStrings:
print(x)
print('\nSpecies Initial Values:')
for x in self.InitVarStrings:
print(x)
def chkjws(self, File):
"""
chkjws(File)
Checks if a filename has a .jws extension and adds one to the returned filename if needed
Arguments:
=========
File: the filename to check
"""
try:
if File[-4:] == '.jws':
pass
else:
print('Assuming extension is .jws')
File += '.jws'
except:
print('Chkjws error')
return File
if __name__ == '__main__':
import os, sys
from time import sleep
inDiR = 'c://mypysces//pscmodels'
outDiR = 'c://mypysces//jws'
jwp = JWSParser()
for mod in os.listdir(inDiR):
jwp.psc2jws(mod, indir=inDiR, outdir=outDiR, quiet=1, debug=0)
psp = PySCeSParser(debug=0)
``` |
{
"source": "jmromer/approvals_validator",
"score": 3
} |
#### File: approvals_validator/approval_validator/change_set.py
```python
from functools import cached_property
from typing import IO, Any, Iterator, Tuple
from changed_directory import ChangedDirectory
class ChangeSet:
def __init__(self, approvers: Tuple[str, ...],
changed_files: Tuple[IO[Any], ...]):
self.approvals_received = approvers
self.changed_files = changed_files
@property
def affected_directories(self) -> Iterator[ChangedDirectory]:
"""
Iterator for the directories affected by the change set.
Includes transitively affected directories.
"""
return (ChangedDirectory(cf, approvals=self.approvals_received)
for cf in self.changed_files)
@cached_property
def approved(self) -> bool:
"""
Return True if all affected directories have received sufficient
approvals.
"""
return all(d.approved for d in self.affected_directories)
```
#### File: approvals_validator/approval_validator/file_utils.py
```python
from contextlib import contextmanager
from exceptions import ProjectRootNotFoundError
from functools import lru_cache
from pathlib import Path
from typing import IO, Any, List, Set
OWNERS_FILE: str = "OWNERS"
DEPS_FILE: str = "DEPENDENCIES"
PROJECT_ROOT_FILES: List[str] = [
".git",
"src/",
"setup.py",
]
@contextmanager
def open_file(path: str):
"""Open and yield the file at the given `path`. Fail quietly."""
try:
with open(path, "r") as f:
yield f
except FileNotFoundError:
yield ()
@lru_cache()
def owners_set(directory: Path) -> Set[str]:
"""
Return the list of the owners for the given `directory`.
"""
with open_file(f"{directory}/{OWNERS_FILE}") as f:
lines = (line.rstrip() for line in f)
return set(line for line in lines if line)
@lru_cache()
def approvers_set(directory: Path) -> Set[str]:
"""
Return the potential approvers listing for the given `directory`.
"""
curr_dir: Path = directory.absolute()
proj_root: Path = project_root(directory)
approvers = set()
while True:
approvers.update(owners_set(curr_dir))
if curr_dir == proj_root:
break
curr_dir = curr_dir.parent
return approvers
@lru_cache()
def dependencies_set(directory: Path) -> Set[str]:
"""Return the dependencies listing for the given `directory`."""
with open_file(f"{directory}/{DEPS_FILE}") as f:
lines = (line.rstrip() for line in f)
return set(line for line in lines if line)
def containing_directory(file_object: IO[Any]) -> Path:
"""Return the directory containing the given file."""
return Path(file_object.name).parent.absolute()
@lru_cache()
def is_project_root(directory: Path) -> bool:
"""Return True if the given `directory` is a project root."""
for root_file in PROJECT_ROOT_FILES:
if (directory.absolute() / root_file).exists():
return True
return False
@lru_cache()
def project_root(curr_dir: Path) -> Path:
"""
Search up the file hiearchy from the current directory `curr_dir` for a
project root, returning one if found.
"""
target_dir = curr_dir
curr_dir = curr_dir.expanduser().absolute()
sys_root = Path("/")
while not is_project_root(curr_dir) and curr_dir != sys_root:
curr_dir = curr_dir.parent
if curr_dir == sys_root:
raise ProjectRootNotFoundError(target_dir)
return curr_dir
@lru_cache()
def find_dependent_dirs(target_dir: Path) -> Set[Path]:
"""
Find all project directories dependent on directory `target_dir`, including
transitively dependent directories.
Return a set of (absolute) Paths.
"""
target_dir = target_dir.absolute()
proj_root = project_root(target_dir)
all_dep_dirs = set()
curr_deps = set([target_dir.relative_to(proj_root)])
while curr_deps:
direct_deps = find_direct_dependent_dirs(
from_root=proj_root,
for_dir=curr_deps.pop(),
)
curr_deps.update(direct_deps)
all_dep_dirs.update(curr_deps)
return set([proj_root / d for d in all_dep_dirs])
@lru_cache()
def find_direct_dependent_dirs(for_dir: Path, from_root: Path) -> Set[Path]:
"""
Collect all directories directly dependent on directory `for_dir`.
Search from root directory `from_root`.
Return a Set of Paths relative to the given project root.
"""
target_dirname = str(for_dir)
dependents = set()
candidates = (df.parent for df in from_root.rglob(DEPS_FILE))
for cand_dir in candidates:
if target_dirname in dependencies_set(cand_dir):
dependents.add(cand_dir.relative_to(from_root))
return dependents
``` |
{
"source": "jmroot/build",
"score": 2
} |
#### File: build/tests/test_module.py
```python
import sys
import pytest
import build
def test_version():
assert build.__version__
@pytest.mark.skipif(sys.version_info < (3, 7), reason='Python 3.7+ required for dir support')
def test_dir():
assert set(dir(build)) == set(build.__all__)
```
#### File: build/tests/test_self_packaging.py
```python
import subprocess
import sys
import tarfile
import zipfile
from pathlib import Path
import pytest
DIR = Path(__file__).parent.resolve()
MAIN_DIR = DIR.parent
sdist_files = {
'LICENSE',
'PKG-INFO',
'README.md',
'pyproject.toml',
'setup.cfg',
'setup.py',
'src',
'src/build',
'src/build.egg-info',
'src/build.egg-info/PKG-INFO',
'src/build.egg-info/SOURCES.txt',
'src/build.egg-info/dependency_links.txt',
'src/build.egg-info/entry_points.txt',
'src/build.egg-info/requires.txt',
'src/build.egg-info/top_level.txt',
'src/build/__init__.py',
'src/build/__main__.py',
'src/build/env.py',
'src/build/py.typed',
'src/build/util.py',
}
wheel_files = {
'build/__init__.py',
'build/__main__.py',
'build/env.py',
'build/py.typed',
'build/util.py',
'dist-info/LICENSE',
'dist-info/METADATA',
'dist-info/RECORD',
'dist-info/WHEEL',
'dist-info/entry_points.txt',
'dist-info/top_level.txt',
}
def test_build_sdist(monkeypatch, tmpdir):
monkeypatch.chdir(MAIN_DIR)
subprocess.run(
[
sys.executable,
'-m',
'build',
'--sdist',
'--outdir',
str(tmpdir),
],
check=True,
).stdout
(sdist,) = tmpdir.visit('*.tar.gz')
with tarfile.open(str(sdist), 'r:gz') as tar:
simpler = {n.split('/', 1)[-1] for n in tar.getnames()[1:]}
assert simpler == sdist_files
@pytest.mark.parametrize('args', ((), ('--wheel',)), ids=('from_sdist', 'direct'))
def test_build_wheel(monkeypatch, tmpdir, args):
monkeypatch.chdir(MAIN_DIR)
subprocess.run(
[
sys.executable,
'-m',
'build',
*args,
'--outdir',
str(tmpdir),
],
check=True,
)
(wheel,) = tmpdir.visit('*.whl')
with zipfile.ZipFile(str(wheel)) as z:
names = z.namelist()
trimmed = {n for n in names if 'dist-info' not in n}
trimmed |= {f"dist-info/{n.split('/', 1)[-1]}" for n in names if 'dist-info' in n}
assert trimmed == wheel_files
``` |
{
"source": "jmroot/kiwi",
"score": 3
} |
#### File: py/tests/test_expression.py
```python
import gc
import math
import operator
from typing import Tuple
import pytest
from kiwisolver import Constraint, Expression, Term, Variable, strength
def test_expression_creation() -> None:
"""Test the Term constructor."""
v = Variable("foo")
v2 = Variable("bar")
v3 = Variable("aux")
e1 = Expression((v * 1, v2 * 2, v3 * 3))
e2 = Expression((v * 1, v2 * 2, v3 * 3), 10)
for e, val in ((e1, 0), (e2, 10)):
t = e.terms()
assert (
len(t) == 3
and t[0].variable() is v
and t[0].coefficient() == 1
and t[1].variable() is v2
and t[1].coefficient() == 2
and t[2].variable() is v3
and t[2].coefficient() == 3
)
assert e.constant() == val
assert str(e2) == "1 * foo + 2 * bar + 3 * aux + 10"
with pytest.raises(TypeError) as excinfo:
Expression((1, v2 * 2, v3 * 3)) # type: ignore
assert "Term" in excinfo.exconly()
# ensure we test garbage collection.
del e2
gc.collect()
@pytest.fixture()
def expressions():
"""Build expressions, terms and variables to test operations."""
v = Variable("foo")
v2 = Variable("bar")
t = Term(v, 10)
t2 = Term(v2)
e = t + 5
e2 = v2 - 10
return e, e2, t, t2, v, v2
def test_expression_neg(
expressions: Tuple[Expression, Expression, Term, Term, Variable, Variable]
):
"""Test neg on an expression."""
e, _, _, _, v, _ = expressions
neg = -e
assert isinstance(neg, Expression)
neg_t = neg.terms()
assert (
len(neg_t) == 1
and neg_t[0].variable() is v
and neg_t[0].coefficient() == -10
and neg.constant() == -5
)
def test_expression_mul(
expressions: Tuple[Expression, Expression, Term, Term, Variable, Variable]
):
"""Test expresion multiplication."""
e, _, _, _, v, _ = expressions
for mul in (e * 2.0, 2.0 * e):
assert isinstance(mul, Expression)
mul_t = mul.terms()
assert (
len(mul_t) == 1
and mul_t[0].variable() is v
and mul_t[0].coefficient() == 20
and mul.constant() == 10
)
with pytest.raises(TypeError):
e * v # type: ignore
def test_expression_div(
expressions: Tuple[Expression, Expression, Term, Term, Variable, Variable]
):
"""Test expression divisions."""
e, _, _, _, v, v2 = expressions
div = e / 2
assert isinstance(div, Expression)
div_t = div.terms()
assert (
len(div_t) == 1
and div_t[0].variable() is v
and div_t[0].coefficient() == 5
and div.constant() == 2.5
)
with pytest.raises(TypeError):
e / v2 # type: ignore
with pytest.raises(ZeroDivisionError):
e / 0
def test_expression_addition(
expressions: Tuple[Expression, Expression, Term, Term, Variable, Variable]
):
"""Test expressions additions."""
e, e2, _, t2, v, v2 = expressions
for add in (e + 2, 2.0 + e):
assert isinstance(add, Expression)
assert add.constant() == 7
terms = add.terms()
assert (
len(terms) == 1
and terms[0].variable() is v
and terms[0].coefficient() == 10
)
add2 = e + v2
assert isinstance(add2, Expression)
assert add2.constant() == 5
terms = add2.terms()
assert (
len(terms) == 2
and terms[0].variable() is v
and terms[0].coefficient() == 10
and terms[1].variable() is v2
and terms[1].coefficient() == 1
)
add3 = e + t2
assert isinstance(add3, Expression)
assert add3.constant() == 5
terms = add3.terms()
assert (
len(terms) == 2
and terms[0].variable() is v
and terms[0].coefficient() == 10
and terms[1].variable() is v2
and terms[1].coefficient() == 1
)
add4 = e + e2
assert isinstance(add4, Expression)
assert add4.constant() == -5
terms = add4.terms()
assert (
len(terms) == 2
and terms[0].variable() is v
and terms[0].coefficient() == 10
and terms[1].variable() is v2
and terms[1].coefficient() == 1
)
def test_expressions_substraction(
expressions: Tuple[Expression, Expression, Term, Term, Variable, Variable]
):
"""Test expression substraction."""
e, e2, _, t2, v, v2 = expressions
for sub, diff in zip((e - 2, 2.0 - e), (3, -3)):
assert isinstance(sub, Expression)
assert sub.constant() == diff
terms = sub.terms()
assert (
len(terms) == 1
and terms[0].variable() is v
and terms[0].coefficient() == math.copysign(10, diff)
)
for sub2, diff in zip((e - v2, v2 - e), (5, -5)):
assert isinstance(sub2, Expression)
assert sub2.constant() == diff
terms = sub2.terms()
assert (
len(terms) == 2
and terms[0].variable() is v
and terms[0].coefficient() == math.copysign(10, diff)
and terms[1].variable() is v2
and terms[1].coefficient() == -math.copysign(1, diff)
)
for sub3, diff in zip((e - t2, t2 - e), (5, -5)):
assert isinstance(sub3, Expression)
assert sub3.constant() == diff
terms = sub3.terms()
assert (
len(terms) == 2
and terms[0].variable() is v
and terms[0].coefficient() == math.copysign(10, diff)
and terms[1].variable() is v2
and terms[1].coefficient() == -math.copysign(1, diff)
)
sub4 = e - e2
assert isinstance(sub3, Expression)
assert sub4.constant() == 15
terms = sub4.terms()
assert (
len(terms) == 2
and terms[0].variable() is v
and terms[0].coefficient() == 10
and terms[1].variable() is v2
and terms[1].coefficient() == -1
)
@pytest.mark.parametrize(
"op, symbol",
[
(operator.le, "<="),
(operator.eq, "=="),
(operator.ge, ">="),
(operator.lt, None),
(operator.ne, None),
(operator.gt, None),
],
)
def test_expression_rich_compare_operations(op, symbol) -> None:
"""Test using comparison on variables."""
v1 = Variable("foo")
v2 = Variable("bar")
t1 = Term(v1, 10)
e1 = t1 + 5
e2 = v2 - 10
if symbol is not None:
c = op(e1, e2)
assert isinstance(c, Constraint)
e = c.expression()
t = e.terms()
assert len(t) == 2
if t[0].variable() is not v1:
t = (t[1], t[0])
assert (
t[0].variable() is v1
and t[0].coefficient() == 10
and t[1].variable() is v2
and t[1].coefficient() == -1
)
assert e.constant() == 15
assert c.op() == symbol and c.strength() == strength.required
else:
with pytest.raises(TypeError) as excinfo:
op(e1, e2)
assert "kiwisolver.Expression" in excinfo.exconly()
``` |
{
"source": "jmrozanec/eo-learn",
"score": 3
} |
#### File: eolearn/features/feature_manipulation.py
```python
import logging
from datetime import datetime
import numpy as np
from eolearn.core import EOTask, FeatureType
from sentinelhub.time_utils import iso_to_datetime
LOGGER = logging.getLogger(__name__)
class SimpleFilterTask(EOTask):
"""
Transforms an eopatch of shape [n, w, h, d] into [m, w, h, d] for m <= n. It removes all slices which don't
conform to the filter_func.
A filter_func is a callable which takes an numpy array and returns a bool.
"""
def __init__(self, feature, filter_func, filter_features=...):
"""
:param feature: Feature in the EOPatch , e.g. feature=(FeatureType.DATA, 'bands')
:type feature: (FeatureType, str)
:param filter_func: A callable that takes a numpy evaluates to bool.
:type filter_func: object
:param filter_features: A collection of features which will be filtered
:type filter_features: dict(FeatureType: set(str))
"""
self.feature = self._parse_features(feature)
self.filter_func = filter_func
self.filter_features = self._parse_features(filter_features)
def _get_filtered_indices(self, feature_data):
return [idx for idx, img in enumerate(feature_data) if self.filter_func(img)]
def _update_other_data(self, eopatch):
pass
def execute(self, eopatch):
"""
:param eopatch: Input EOPatch.
:type eopatch: EOPatch
:return: Transformed eo patch
:rtype: EOPatch
"""
feature_type, feature_name = next(self.feature(eopatch))
good_idxs = self._get_filtered_indices(eopatch[feature_type][feature_name] if feature_name is not ... else
eopatch[feature_type])
for feature_type, feature_name in self.filter_features(eopatch):
if feature_type.is_time_dependent():
if feature_type.has_dict():
if feature_type.contains_ndarrays():
eopatch[feature_type][feature_name] = np.asarray([eopatch[feature_type][feature_name][idx] for
idx in good_idxs])
# else:
# NotImplemented
else:
eopatch[feature_type] = [eopatch[feature_type][idx] for idx in good_idxs]
self._update_other_data(eopatch)
return eopatch
class FilterTimeSeries(SimpleFilterTask):
"""
Removes all frames in the time-series with dates outside the user specified time interval.
"""
def __init__(self, start_date, end_date, filter_features=...):
"""
:param start_date: Start date. All frames within the time-series taken after this date will be kept.
:type start_date: datetime.datetime
:param end_date: End date. All frames within the time-series taken before this date will be kept.
:type end_date: datetime.datetime
:param filter_features: A collection of features which will be filtered
:type filter_features: dict(FeatureType: set(str))
"""
self.start_date = start_date
self.end_date = end_date
if not isinstance(start_date, datetime):
raise ValueError('Start date is not of correct type. Please provide the start_date as datetime.datetime.')
if not isinstance(end_date, datetime):
raise ValueError('End date is not of correct type. Please provide the end_date as datetime.datetime.')
super().__init__(FeatureType.TIMESTAMP, lambda date: start_date <= date <= end_date, filter_features)
def _update_other_data(self, eopatch):
if 'time_interval' in eopatch.meta_info:
start_time, end_time = [iso_to_datetime(x) if isinstance(x, str)
else x for x in eopatch.meta_info['time_interval']]
eopatch.meta_info['time_interval'] = (max(start_time, self.start_date),
min(end_time, self.end_date))
return eopatch
``` |
{
"source": "jmrozanec/features-generator",
"score": 3
} |
#### File: features-generator/features/experiment_builder.py
```python
import os
# https://stackoverflow.com/questions/682504/what-is-a-clean-pythonic-way-to-have-multiple-constructors-in-python
class CreateKeyDataframeAction():
def __init__(self, key_name=None, columns=None):
self.key_name = None
self.columns = None
class SquashNumericDataframeAction():
def __init__(self, key_name=None, squash_strategy=None):
self.key_name = key_name
self.squash_strategy = squash_strategy
# TODO!
class JoinDataframeAction():
def __init__(self, key_name=None, squash_strategy=None):
self.key_name = key_name
self.squash_strategy = squash_strategy
class RemoveDummyColsDataframeAction():
def __init__(self, df_name):
self.df_name = df_name
class DatasetSplitOnDate():
def __init__(self, val, test):
self.val = val
self.test = test
def execute():
print("TODO: dummy split")
class DatasetSplitOnPercentage():
def __init__(self, val, test):
self.val = val
self.test = test
def execute():
print("TODO: dummy split")
class DataframeBuilder():
def __init__(self, experiment_builder):
self.experiment_builder = experiment_builder
self.dataset_path = dataset_path
self.is_path = True
base=os.path.basename(dataset_path)
self.df_name = os.path.splitext(base)[0]
self.actions = []
def from_repository(dataset_name):
self.is_path = False
return self
def from_file(dataset_path):
return self
def as(self, df_name):
self.df_name = df_name
return self
def with_key(self, columns):
self.actions.append(CreateKeyDataframeAction(key_name=key_name, columns=columns))
return self
def squash_numeric(self, key_name, squash_strategy):
self.actions.append(SquashNumericDataframeAction(key_name=key_name, squash_strategy=squash_strategy))
return self
def remove_dummy_cols():
print("TODO: dummy remove_dummy_cols")
return self
def remove_dummy_rows():
print("TODO: dummy remove_dummy_rows")
return self
def create_lag_features():
print("TODO: dummy create_lag_features")
return self
def ratio_for_lagged():
print("TODO: dummy ratio_for_lagged")
return self
def and():
return self.experiment_builder
class JoinBuilder():
def __init__(self, experiment_builder, join_type, left_df_name, right_df_name, columns_left, columns_right):
self.experiment_builder = experiment_builder
self.action = JoinDataframeAction(join_type, left_df_name, right_df_name, columns_left, columns_right)
self.df_name = "{}-{}-{}".format(left_df_name, join_type, right_df_name)
def as(self, df_name):
self.df_name = df_name
return self
def and():
return self.experiment_builder
class DatasetSplitBuilder():
def __init__(self, experiment_builder, split_type, val, test):
self.experiment_builder = experiment_builder
self.split_type = split_type
self.val = val
self.test = test
def build(): #TODO rename to method when applied cross actions
if (type(val) != type(test)):
print("Types for val and test should be the same") # TODO throw an error
else:
if (type(val) == "int" || type(val) == "float"):
return DatasetSplitOnPercentage(val, test)
if (type(val) == "datetime.timedelta"):
return DatasetSplitOnDate(val, test)
# TODO put into another package
class ModelTrainBuilder():
def __init__(self, builder, ):
def with_validation_metrics():
print("ModelTrainBuilder::with_validation_metrics()")
def saving_best():
print("ModelTrainBuilder::saving_best()")
class ModelTestBuilder():
def __init__(self, builder, ):
def with_test_metrics():
print("ModelTrainBuilder::with_test_metrics()")
class GBRegressorBuilder():
def __init__(self, experiment_builder):
self.params = {}
self.experiment_builder = experiment_builder
def with_colsample_bytree(colsample_bytree):
self.params['colsample_bytree']=colsample_bytree
return self
def with_gamma(gamma):
self.params['gamma']=gamma
return self
def with_learning_rate(learning_rate):
self.params['learning_rate']=learning_rate
return self
def with_max_depth(max_depth):
self.params['max_depth']=max_depth
return self
def with_min_child_weight(min_child_weight):
self.params['min_child_weight']=min_child_weight
return self
def with_estimators(estimators):
self.params['estimators']=estimators
return self
def with_reg_alpha(reg_alpha):
self.params['reg_alpha']=reg_alpha
return self
def with_reg_lambda(reg_lambda):
self.params['reg_lambda']=reg_lambda
return self
def with_subsample(subsample):
self.params['subsample']=subsample
return self
def and():
return self.experiment_builder
# the builder abstraction
class ExperimentBuilder():
def __init__(self):
self.steps = []
self.seed = 1234
self.steps.append() # TODO: set seed
def set_seed(self, seed: 1234):
self.seed = seed
return self
def load_dataframe(self, dataset_path):
step = DataframeBuilder(self, dataset_path)
self.steps.append(step)
return step
def join(self, join_type, left_df_name, right_df_name, columns_left, columns_right):
step = JoinBuilder(self, join_type, left_df_name, right_df_name, columns_left, columns_right)
self.steps.append(step)
return step
# We shall accept val/test: https://docs.python.org/3/library/datetime.html#datetime.timedelta
#
def split_trainvaltest(val=0.1, test=0.2):
step = DatasetSplitBuilder(self, val, test)
self.steps.append(step)
return step
def create_model(model_type):
if(model_type == 'gbr'):
return GBRegressorBuilder()
def train():
step = TrainAbstractionBuilder()
self.steps.append(step)
return step
def execute():
print("TODO: dummy execution")
def describe():
print("TODO: dummy experiment_builder describe")
def report():
print("TODO: dummy report")
def summary():
print("TODO: dummy summary")
# TODO create a summary report considering ex.: involved datasets and configurations.
f = ExperimentBuilder()
f.set_seed().load_dataframe()
# squash_strategy=mean, sum
#ExperimentBuilder()
# .load_dataframe('cars', '/home/datasets/cars.csv').with_key(key_name, [columns]).squash_numeric('dm-key', squash_strategy)
# .load_dataframe('trains', '/home/datasets/cars.csv').with_key(key_name)
# .inner_join(left, right, columns_left, columns_right)
# .create_lag_features(column, prefix, lag_range)
# .ratio_for_lagged([columns], lagged_column_prefix, source_lag_range, target_offset, target_lag_range_end)
#ExperimentBuilder()
# .load_dataframe('/home/datasets/cars.csv').with_key(key_name, [columns]).squash_numeric('dm-key', squash_strategy).as('cars') -> dataframe builder
# .and() -> experiment builder
# .load_dataframe('trains', '/home/datasets/cars.csv').with_key(key_name).and()
# .create_dataframe_as_join('df1', [left], [right], columns_left, columns_right) -> experiment builder
# .for('df1') -> dataframe builder
# .create_lag_features(column, prefix, lag_range) -> dataframe builder
# .ratio_for_lagged([columns], lagged_column_prefix, source_lag_range, target_offset, target_lag_range_end) -> dataframe builder
# .split_trainvaltest(val=0.1, test=0.2, policy='last') # TODO: we should randomize the dataset and get the required splits
# .split_trainvaltest(val=1.month, test=2.months, policy='any') # TODO: we should take required amount of months (months selected randomly) and then randomize each part
# .split_trainvaltest(val=1.month, test=2.months, policy='last') # TODO: we should sort by date if policy is 'last' and after division randomize each part
# .normalize().and() # TODO: auto-normalize or set manually?
# .feature_selection().and()
# .create_model('gbt').and()
# .train().with_validation_metrics().saving_best().and()
# .test().with_test_metrics().and()
# .report()
# .execute()
``` |
{
"source": "jmrozanec/white-bkg-classification",
"score": 3
} |
#### File: white-bkg-classification/scripts/09-architecture-vgg.py
```python
from __future__ import division, print_function, absolute_import
import tflearn
from tflearn.data_utils import shuffle, to_categorical
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.normalization import local_response_normalization, batch_normalization
from tflearn.layers.estimator import regression
from tflearn.data_utils import image_preloader
train_file = '../images/sampling/dataset-splits/train-cv-1.txt'
test_file = '../images/sampling/dataset-splits/test-cv-1.txt'
from tflearn.data_preprocessing import ImagePreprocessing
import os
def vgg16(input, num_class):
x = tflearn.conv_2d(input, 64, 3, activation='relu', scope='conv1_1')
x = tflearn.conv_2d(x, 64, 3, activation='relu', scope='conv1_2')
x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool1')
x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_1')
x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_2')
x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool2')
x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_1')
x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_2')
x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_3')
x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool3')
x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_1')
x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_2')
x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_3')
x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool4')
x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_1')
x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_2')
x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_3')
x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool5')
x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc6')
x = tflearn.dropout(x, 0.5, name='dropout1')
x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc7')
x = tflearn.dropout(x, 0.5, name='dropout2')
x = tflearn.fully_connected(x, num_class, activation='softmax', scope='fc8',
restore=False)
return x
channels=1
width=64
height=50
model_path = "/tmp"
# the file gen by generated by gen_files_list.py
files_list = "../images/sampling/train-imgs.txt"
from tflearn.data_utils import image_preloader
X, Y = image_preloader(files_list, image_shape=(256, 256), mode='file',
categorical_labels=True, normalize=False,
filter_channel=True)
num_classes = 2 # num of your dataset
# VGG preprocessing
img_prep = ImagePreprocessing()
img_prep.add_featurewise_zero_center(mean=[123.68, 116.779, 103.939],
per_channel=True)
# VGG Network
x = tflearn.input_data(shape=[None, 256, 256, 3], name='input',
data_preprocessing=img_prep)
softmax = vgg16(x, num_classes)
regression = tflearn.regression(softmax, optimizer='adam',
loss='categorical_crossentropy',
learning_rate=0.001, restore=False)
model = tflearn.DNN(regression, checkpoint_path='vgg-finetuning',
max_checkpoints=3, tensorboard_verbose=2,
tensorboard_dir="./logs")
# Start finetuning
model.fit(X, Y, n_epoch=10, validation_set=0.1, shuffle=True,
show_metric=True, batch_size=64, snapshot_epoch=False,
snapshot_step=200, run_id='vgg-finetuning')
model.save('your-task-model-retrained-by-vgg')
```
#### File: scripts/dl-images/01-dl-images.py
```python
from __future__ import division, print_function, absolute_import
import argparse
import tflearn
from tflearn.data_utils import shuffle, to_categorical
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d, avg_pool_2d
from tflearn.layers.normalization import local_response_normalization, batch_normalization
from tflearn.layers.estimator import regression
from tflearn.data_utils import image_preloader
from tflearn.layers.merge_ops import merge
import uuid
import numpy as np
def main(args):
experiment='dl-images'
channels=3
width=64
height=64
num_class=2
epochs=15
folds=5
architecturescount=10
architectureid=args.architectureid
fold=args.fold
test_file='../../images/sampling/lists/images/test-images.txt'
# for architectureid in range(1,architecturescount):
accuracies=[]
#for fold in range(1,folds):
runid='{}-architecture{}-fold{}'.format(experiment,architectureid,fold)
arch = architecture(architectureid, width, height, channels, num_class)
train_file = '../../images/sampling/lists/images/splits/train-cv-{}.txt'.format(fold)
validate_file = '../../images/sampling/lists/images/splits/validate-cv-{}.txt'.format(fold)
X, Y = image_preloader(train_file, image_shape=(height, width, channels), mode='file', categorical_labels=True, normalize=True)
valX, valY = image_preloader(validate_file, image_shape=(height, width, channels), mode='file', categorical_labels=True, normalize=True)
testX, testY = image_preloader(test_file, image_shape=(height, width, channels), mode='file', categorical_labels=True, normalize=True)
arch.fit(X, Y, n_epoch=epochs, validation_set=(valX, valY), snapshot_step=10, snapshot_epoch=False, show_metric=True, run_id=runid)
arch.save('arch-id{}-fold{}.tfl'.format(architectureid, fold))
# accuracies.append(arch.evaluate(testX, testY)[0])
# accuracies=np.asarray(accuracies)
accuracy=arch.evaluate(testX, testY)[0]
append(experiment, architectureid, fold, accuracy)
def append(experiment, architectureid, fold, accuracy):
# line='{},[{}],{},{}\n'.format(architectureid,','.join([str(i) for i in accuracies]),accuracies.mean(), accuracies.std())
line='{},{},{}\n'.format(architectureid,fold, accuracy)
with open('{}.csv'.format(experiment), "a") as report:
report.write(line)
def architecture(id, width, height, channels, num_class):
"""
Obtain DNN architecture for given id.
"""
input = input_data(shape=[None, width, height, channels], name='input')
input = tflearn.layers.core.reshape(input, [-1, width, height, channels], name='Reshape')
if id == 1:
return architecture01(input, num_class)
if id == 2:
return architecture02(input, num_class)
if id == 3:
return architecture03(input, num_class)
if id == 4:
return architecture04(input, num_class)
if id == 5:
return architecture05(input, num_class)
if id == 6:
return architecture06(input, num_class)
if id == 7:
return architecture07(input, num_class)
if id == 8:
return architecture08(input, num_class)
if id == 9:
return architecture09(input, num_class)
if id == 10:
return architecture10(input, num_class)
def architecture01(input, num_class):
network = conv_2d(input, 64, 1, activation='relu', regularizer="L2")
network = batch_normalization(network)
network = conv_2d(network, 64, 1, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = fully_connected(network, 64, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, num_class, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=0.001, loss='categorical_crossentropy', name='target')
return tflearn.DNN(network, tensorboard_verbose=0)
def architecture02(input, num_class):
network = batch_normalization(input)
network = conv_2d(network, 64, 1, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = batch_normalization(network)
network = conv_2d(network, 64, 1, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = batch_normalization(network)
network = conv_2d(network, 64, 1, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = fully_connected(network, num_class, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=0.001, loss='categorical_crossentropy', name='target')
return tflearn.DNN(network, tensorboard_verbose=0)
#ResNet: Taken and adapted from from https://github.com/tflearn/tflearn/blob/master/examples/images/residual_network_mnist.py
def architecture03(input, num_class):
net = tflearn.conv_2d(input, 64, 3, activation='relu', bias=False)
net = tflearn.residual_bottleneck(net, 3, 16, 64)
net = tflearn.residual_bottleneck(net, 1, 32, 128, downsample=False)
net = tflearn.residual_bottleneck(net, 2, 32, 128)
net = tflearn.residual_bottleneck(net, 1, 64, 256, downsample=False)
net = tflearn.residual_bottleneck(net, 2, 64, 256)
net = tflearn.batch_normalization(net)
net = tflearn.activation(net, 'relu')
net = tflearn.global_avg_pool(net)
net = tflearn.fully_connected(net, num_class, activation='softmax')
net = tflearn.regression(net, optimizer='momentum', loss='categorical_crossentropy', learning_rate=0.001)
return tflearn.DNN(net, tensorboard_verbose=0)
def architecture04(input, num_class):
network = conv_2d(input, 64, 1, activation='relu', regularizer="L2")
network = batch_normalization(network)
network = conv_2d(network, 64, 1, activation='relu', regularizer="L2")
network = batch_normalization(network)
network = conv_2d(network, 64, 1, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = fully_connected(network, 64, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, num_class, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=0.001, loss='categorical_crossentropy', name='target')
return tflearn.DNN(network, tensorboard_verbose=0)
def architecture05(input, num_class):
network = conv_2d(input, 128, 1, activation='relu', regularizer="L2")
network = batch_normalization(network)
network = fully_connected(network, 64, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, num_class, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=0.001, loss='categorical_crossentropy', name='target')
return tflearn.DNN(network, tensorboard_verbose=0)
def architecture06(input, num_class):
network1 = batch_normalization(conv_2d(input, 64, 1, activation='relu', regularizer="L2"))
network2 = batch_normalization(conv_2d(input, 64, 3, activation='relu', regularizer="L2"))
network = merge([network1, network2],'concat')
network = fully_connected(network, 64, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, num_class, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=0.001, loss='categorical_crossentropy', name='target')
return tflearn.DNN(network, tensorboard_verbose=0)
def architecture07(input, num_class):
network = conv_2d(input, 64, 1, activation='relu')
network = conv_2d(network, 64, 1, activation='relu')
network = max_pool_2d(network, 2)
network = conv_2d(network, 64, 1, activation='relu')
network = conv_2d(network, 64, 1, activation='relu')
network = max_pool_2d(network, 2)
network = conv_2d(network, 64, 1, activation='relu')
network = conv_2d(network, 64, 1, activation='relu')
network = max_pool_2d(network, 2)
network = conv_2d(network, 64, 1, activation='relu')
network = conv_2d(network, 64, 1, activation='relu')
network = max_pool_2d(network, 2)
network = conv_2d(network, 64, 1, activation='relu')
network = conv_2d(network, 64, 1, activation='relu')
network = max_pool_2d(network, 2)
network = fully_connected(network, num_class, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=0.001, loss='categorical_crossentropy', name='target')
return tflearn.DNN(network, tensorboard_verbose=0)
def architecture08(input, num_class):
network = conv_2d(input, 64, 1, activation='relu', regularizer="L2")
network = batch_normalization(network)
network = max_pool_2d(network, 2)
network = fully_connected(network, 64, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, num_class, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=0.001, loss='categorical_crossentropy', name='target')
return tflearn.DNN(network, tensorboard_verbose=0)
#VGG16 implementation. Taken from: https://github.com/tflearn/tflearn/blob/master/examples/images/vgg_network_finetuning.py
def architecture09(input, num_class):
x = tflearn.conv_2d(input, 64, 3, activation='relu', scope='conv1_1')
x = tflearn.conv_2d(x, 64, 3, activation='relu', scope='conv1_2')
x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool1')
x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_1')
x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_2')
x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool2')
x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_1')
x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_2')
x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_3')
x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool3')
x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_1')
x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_2')
x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_3')
x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool4')
x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_1')
x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_2')
x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_3')
x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool5')
x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc6')
x = tflearn.dropout(x, 0.5, name='dropout1')
x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc7')
x = tflearn.dropout(x, 0.5, name='dropout2')
x = tflearn.fully_connected(x, num_class, activation='softmax', scope='fc8', restore=False)
x = regression(x, optimizer='adam', learning_rate=0.001, loss='categorical_crossentropy', name='target')
return tflearn.DNN(x, tensorboard_verbose=0)
#GoogleLeNet: Taken and adapted from https://github.com/tflearn/tflearn/blob/master/examples/images/googlenet.py
def architecture10(input, num_class):
conv1_7_7 = conv_2d(input, 64, 7, strides=2, activation='relu', name = 'conv1_7_7_s2')
pool1_3_3 = max_pool_2d(conv1_7_7, 3,strides=2)
pool1_3_3 = local_response_normalization(pool1_3_3)
conv2_3_3_reduce = conv_2d(pool1_3_3, 64,1, activation='relu',name = 'conv2_3_3_reduce')
conv2_3_3 = conv_2d(conv2_3_3_reduce, 192,3, activation='relu', name='conv2_3_3')
conv2_3_3 = local_response_normalization(conv2_3_3)
pool2_3_3 = max_pool_2d(conv2_3_3, kernel_size=3, strides=2, name='pool2_3_3_s2')
inception_3a_1_1 = conv_2d(pool2_3_3, 64, 1, activation='relu', name='inception_3a_1_1')
inception_3a_3_3_reduce = conv_2d(pool2_3_3, 96,1, activation='relu', name='inception_3a_3_3_reduce')
inception_3a_3_3 = conv_2d(inception_3a_3_3_reduce, 128,filter_size=3, activation='relu', name = 'inception_3a_3_3')
inception_3a_5_5_reduce = conv_2d(pool2_3_3,16, filter_size=1,activation='relu', name ='inception_3a_5_5_reduce' )
inception_3a_5_5 = conv_2d(inception_3a_5_5_reduce, 32, filter_size=5, activation='relu', name= 'inception_3a_5_5')
inception_3a_pool = max_pool_2d(pool2_3_3, kernel_size=3, strides=1, )
inception_3a_pool_1_1 = conv_2d(inception_3a_pool, 32, filter_size=1, activation='relu', name='inception_3a_pool_1_1')
# merge the inception_3a__
inception_3a_output = merge([inception_3a_1_1, inception_3a_3_3, inception_3a_5_5, inception_3a_pool_1_1], mode='concat', axis=3)
inception_3b_1_1 = conv_2d(inception_3a_output, 128,filter_size=1,activation='relu', name= 'inception_3b_1_1' )
inception_3b_3_3_reduce = conv_2d(inception_3a_output, 128, filter_size=1, activation='relu', name='inception_3b_3_3_reduce')
inception_3b_3_3 = conv_2d(inception_3b_3_3_reduce, 192, filter_size=3, activation='relu',name='inception_3b_3_3')
inception_3b_5_5_reduce = conv_2d(inception_3a_output, 32, filter_size=1, activation='relu', name = 'inception_3b_5_5_reduce')
inception_3b_5_5 = conv_2d(inception_3b_5_5_reduce, 96, filter_size=5, name = 'inception_3b_5_5')
inception_3b_pool = max_pool_2d(inception_3a_output, kernel_size=3, strides=1, name='inception_3b_pool')
inception_3b_pool_1_1 = conv_2d(inception_3b_pool, 64, filter_size=1,activation='relu', name='inception_3b_pool_1_1')
#merge the inception_3b_*
inception_3b_output = merge([inception_3b_1_1, inception_3b_3_3, inception_3b_5_5, inception_3b_pool_1_1], mode='concat',axis=3,name='inception_3b_output')
pool3_3_3 = max_pool_2d(inception_3b_output, kernel_size=3, strides=2, name='pool3_3_3')
inception_4a_1_1 = conv_2d(pool3_3_3, 192, filter_size=1, activation='relu', name='inception_4a_1_1')
inception_4a_3_3_reduce = conv_2d(pool3_3_3, 96, filter_size=1, activation='relu', name='inception_4a_3_3_reduce')
inception_4a_3_3 = conv_2d(inception_4a_3_3_reduce, 208, filter_size=3, activation='relu', name='inception_4a_3_3')
# inception_4a_5_5_reduce = conv_2d(pool3_3_3, 16, filter_size=1, activation='relu', name='inception_4a_5_5_reduce')
# inception_4a_5_5 = conv_2d(inception_4a_5_5_reduce, 48, filter_size=5, activation='relu', name='inception_4a_5_5')
inception_4a_pool = max_pool_2d(pool3_3_3, kernel_size=3, strides=1, name='inception_4a_pool')
inception_4a_pool_1_1 = conv_2d(inception_4a_pool, 64, filter_size=1, activation='relu', name='inception_4a_pool_1_1')
inception_4a_output = merge([inception_4a_1_1, inception_4a_3_3, inception_4a_pool_1_1], mode='concat', axis=3, name='inception_4a_output')
inception_4b_1_1 = conv_2d(inception_4a_output, 160, filter_size=1, activation='relu', name='inception_4a_1_1')
inception_4b_3_3_reduce = conv_2d(inception_4a_output, 112, filter_size=1, activation='relu', name='inception_4b_3_3_reduce')
inception_4b_3_3 = conv_2d(inception_4b_3_3_reduce, 224, filter_size=3, activation='relu', name='inception_4b_3_3')
# inception_4b_5_5_reduce = conv_2d(inception_4a_output, 24, filter_size=1, activation='relu', name='inception_4b_5_5_reduce')
# inception_4b_5_5 = conv_2d(inception_4b_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4b_5_5')
inception_4b_pool = max_pool_2d(inception_4a_output, kernel_size=3, strides=1, name='inception_4b_pool')
inception_4b_pool_1_1 = conv_2d(inception_4b_pool, 64, filter_size=1, activation='relu', name='inception_4b_pool_1_1')
inception_4b_output = merge([inception_4b_1_1, inception_4b_3_3, inception_4b_pool_1_1], mode='concat', axis=3, name='inception_4b_output')
inception_4c_1_1 = conv_2d(inception_4b_output, 128, filter_size=1, activation='relu',name='inception_4c_1_1')
inception_4c_3_3_reduce = conv_2d(inception_4b_output, 128, filter_size=1, activation='relu', name='inception_4c_3_3_reduce')
inception_4c_3_3 = conv_2d(inception_4c_3_3_reduce, 256, filter_size=3, activation='relu', name='inception_4c_3_3')
# inception_4c_5_5_reduce = conv_2d(inception_4b_output, 24, filter_size=1, activation='relu', name='inception_4c_5_5_reduce')
# inception_4c_5_5 = conv_2d(inception_4c_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4c_5_5')
inception_4c_pool = max_pool_2d(inception_4b_output, kernel_size=3, strides=1)
inception_4c_pool_1_1 = conv_2d(inception_4c_pool, 64, filter_size=1, activation='relu', name='inception_4c_pool_1_1')
inception_4c_output = merge([inception_4c_1_1, inception_4c_3_3, inception_4c_pool_1_1], mode='concat', axis=3,name='inception_4c_output')
inception_4d_1_1 = conv_2d(inception_4c_output, 112, filter_size=1, activation='relu', name='inception_4d_1_1')
inception_4d_3_3_reduce = conv_2d(inception_4c_output, 144, filter_size=1, activation='relu', name='inception_4d_3_3_reduce')
inception_4d_3_3 = conv_2d(inception_4d_3_3_reduce, 288, filter_size=3, activation='relu', name='inception_4d_3_3')
# inception_4d_5_5_reduce = conv_2d(inception_4c_output, 32, filter_size=1, activation='relu', name='inception_4d_5_5_reduce')
# inception_4d_5_5 = conv_2d(inception_4d_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4d_5_5')
inception_4d_pool = max_pool_2d(inception_4c_output, kernel_size=3, strides=1, name='inception_4d_pool')
inception_4d_pool_1_1 = conv_2d(inception_4d_pool, 64, filter_size=1, activation='relu', name='inception_4d_pool_1_1')
inception_4d_output = merge([inception_4d_1_1, inception_4d_3_3, inception_4d_pool_1_1], mode='concat', axis=3, name='inception_4d_output')
inception_4e_1_1 = conv_2d(inception_4d_output, 256, filter_size=1, activation='relu', name='inception_4e_1_1')
inception_4e_3_3_reduce = conv_2d(inception_4d_output, 160, filter_size=1, activation='relu', name='inception_4e_3_3_reduce')
inception_4e_3_3 = conv_2d(inception_4e_3_3_reduce, 320, filter_size=3, activation='relu', name='inception_4e_3_3')
# inception_4e_5_5_reduce = conv_2d(inception_4d_output, 32, filter_size=1, activation='relu', name='inception_4e_5_5_reduce')
# inception_4e_5_5 = conv_2d(inception_4e_5_5_reduce, 128, filter_size=5, activation='relu', name='inception_4e_5_5')
inception_4e_pool = max_pool_2d(inception_4d_output, kernel_size=3, strides=1, name='inception_4e_pool')
inception_4e_pool_1_1 = conv_2d(inception_4e_pool, 128, filter_size=1, activation='relu', name='inception_4e_pool_1_1')
inception_4e_output = merge([inception_4e_1_1, inception_4e_3_3, inception_4e_pool_1_1],axis=3, mode='concat')
pool4_3_3 = max_pool_2d(inception_4e_output, kernel_size=3, strides=2, name='pool_3_3')
# inception_5a_1_1 = conv_2d(pool4_3_3, 256, filter_size=1, activation='relu', name='inception_5a_1_1')
# inception_5a_3_3_reduce = conv_2d(pool4_3_3, 160, filter_size=1, activation='relu', name='inception_5a_3_3_reduce')
# inception_5a_3_3 = conv_2d(inception_5a_3_3_reduce, 320, filter_size=3, activation='relu', name='inception_5a_3_3')
# inception_5a_5_5_reduce = conv_2d(pool4_3_3, 32, filter_size=1, activation='relu', name='inception_5a_5_5_reduce')
# inception_5a_5_5 = conv_2d(inception_5a_5_5_reduce, 128, filter_size=5, activation='relu', name='inception_5a_5_5')
# inception_5a_pool = max_pool_2d(pool4_3_3, kernel_size=3, strides=1, name='inception_5a_pool')
# inception_5a_pool_1_1 = conv_2d(inception_5a_pool, 128, filter_size=1,activation='relu', name='inception_5a_pool_1_1')
# inception_5a_output = merge([inception_5a_1_1, inception_5a_pool_1_1], axis=3,mode='concat')
# inception_5b_1_1 = conv_2d(inception_5a_output, 384, filter_size=1,activation='relu', name='inception_5b_1_1')
# inception_5b_3_3_reduce = conv_2d(inception_5a_output, 192, filter_size=1, activation='relu', name='inception_5b_3_3_reduce')
# inception_5b_3_3 = conv_2d(inception_5b_3_3_reduce, 384, filter_size=3,activation='relu', name='inception_5b_3_3')
# inception_5b_5_5_reduce = conv_2d(inception_5a_output, 48, filter_size=1, activation='relu', name='inception_5b_5_5_reduce')
# inception_5b_5_5 = conv_2d(inception_5b_5_5_reduce,128, filter_size=5, activation='relu', name='inception_5b_5_5' )
# inception_5b_pool = max_pool_2d(inception_5a_output, kernel_size=3, strides=1, name='inception_5b_pool')
# inception_5b_pool_1_1 = conv_2d(inception_5b_pool, 128, filter_size=1, activation='relu', name='inception_5b_pool_1_1')
# inception_5b_output = merge([inception_5b_1_1, inception_5b_3_3, inception_5b_pool_1_1], axis=3, mode='concat')
# pool5_7_7 = avg_pool_2d(inception_4e_output, kernel_size=7, strides=1)
pool5_7_7 = dropout(inception_4e_output, 0.4)
loss = fully_connected(pool5_7_7, num_class, activation='softmax')
network = regression(loss, optimizer='momentum', loss='categorical_crossentropy', learning_rate=0.001)
return tflearn.DNN(network, checkpoint_path='model_googlenet', max_checkpoints=1, tensorboard_verbose=0)
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--architectureid', type=int)
parser.add_argument('-f', '--fold', type=int)
args = parser.parse_args()
main(args)
``` |
{
"source": "jms0923/BoundingBoxerImg",
"score": 2
} |
#### File: jms0923/BoundingBoxerImg/ui.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1440, 960)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setMinimumSize(QtCore.QSize(930, 0))
MainWindow.setMouseTracking(True)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.horizontalLayout_1 = QtWidgets.QHBoxLayout()
self.horizontalLayout_1.setObjectName("horizontalLayout_1")
self.frame = QtWidgets.QFrame(self.centralwidget)
self.frame.setMinimumSize(QtCore.QSize(0, 211))
self.frame.setObjectName("frame")
self.list_view_bounding_boxes = QtWidgets.QListView(self.frame)
self.list_view_bounding_boxes.setGeometry(QtCore.QRect(290, 30, 261, 181))
self.list_view_bounding_boxes.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.list_view_bounding_boxes.setObjectName("list_view_bounding_boxes")
# self.class_view = QtWidgets.QTextEdit()
# self.class_view.setObjectName("class_view")
self.list_view_classes = QtWidgets.QListView(self.frame)
self.list_view_classes.setGeometry(QtCore.QRect(1155, 30, 266, 181))
# self.list_view_classes.setEditTriggers(QtWidgets.QAbstractItemView.DoubleClicked)
self.list_view_classes.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.list_view_classes.setObjectName("list_view_classes")
self.label_1 = QtWidgets.QLabel(self.frame)
self.label_1.setGeometry(QtCore.QRect(290, 0, 111, 21))
self.label_1.setObjectName("label_1")
self.label = QtWidgets.QLabel(self.frame)
self.label.setGeometry(QtCore.QRect(0, 0, 91, 21))
self.label.setObjectName("label")
self.list_view_images = QtWidgets.QListView(self.frame)
self.list_view_images.setGeometry(QtCore.QRect(0, 30, 271, 181))
self.list_view_images.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.list_view_images.setObjectName("list_view_images")
# image directory
self.push_button_image_path = QtWidgets.QPushButton(self.frame)
self.push_button_image_path.setGeometry(QtCore.QRect(230, 0, 21, 23))
self.push_button_image_path.setObjectName("push_button_image_path")
self.line_edit_image_path = QtWidgets.QLineEdit(self.frame)
self.line_edit_image_path.setGeometry(QtCore.QRect(100, 0, 121, 21))
self.line_edit_image_path.setObjectName("line_edit_image_path")
# delete button
self.push_button_delete = QtWidgets.QPushButton(self.frame)
self.push_button_delete.setGeometry(QtCore.QRect(850, 90, 61, 31))
self.push_button_delete.setObjectName("push_button_delete")
# coco export button
self.push_button_coco = QtWidgets.QPushButton(self.frame)
self.push_button_coco.setGeometry(QtCore.QRect(850, 0, 61, 31))
self.push_button_coco.setObjectName("push_button_coco")
self.line_edit_x2 = QtWidgets.QLineEdit(self.frame)
self.line_edit_x2.setGeometry(QtCore.QRect(650, 100, 71, 21))
self.line_edit_x2.setObjectName("line_edit_x2")
self.line_edit_y1 = QtWidgets.QLineEdit(self.frame)
self.line_edit_y1.setGeometry(QtCore.QRect(740, 70, 71, 21))
self.line_edit_y1.setObjectName("line_edit_y1")
self.line_edit_label = QtWidgets.QLineEdit(self.frame)
self.line_edit_label.setGeometry(QtCore.QRect(650, 170, 161, 20))
self.line_edit_label.setObjectName("line_edit_label")
self.label_2 = QtWidgets.QLabel(self.frame)
self.label_2.setGeometry(QtCore.QRect(680, 20, 21, 21))
self.label_2.setObjectName("label_2")
self.line_edit_x3 = QtWidgets.QLineEdit(self.frame)
self.line_edit_x3.setGeometry(QtCore.QRect(650, 130, 71, 21))
self.line_edit_x3.setObjectName("line_edit_x3")
self.line_edit_y0 = QtWidgets.QLineEdit(self.frame)
self.line_edit_y0.setGeometry(QtCore.QRect(740, 40, 71, 21))
self.line_edit_y0.setObjectName("line_edit_y0")
self.label_5 = QtWidgets.QLabel(self.frame)
self.label_5.setGeometry(QtCore.QRect(590, 170, 41, 21))
self.label_5.setObjectName("label_5")
self.label_4 = QtWidgets.QLabel(self.frame)
self.label_4.setGeometry(QtCore.QRect(590, 80, 41, 21))
self.label_4.setObjectName("label_4")
self.line_edit_y2 = QtWidgets.QLineEdit(self.frame)
self.line_edit_y2.setGeometry(QtCore.QRect(740, 100, 71, 21))
self.line_edit_y2.setObjectName("line_edit_y2")
self.label_3 = QtWidgets.QLabel(self.frame)
self.label_3.setGeometry(QtCore.QRect(770, 20, 21, 21))
self.label_3.setObjectName("label_3")
self.line_edit_x1 = QtWidgets.QLineEdit(self.frame)
self.line_edit_x1.setGeometry(QtCore.QRect(650, 70, 71, 21))
self.line_edit_x1.setObjectName("line_edit_x1")
self.line_edit_x0 = QtWidgets.QLineEdit(self.frame)
self.line_edit_x0.setGeometry(QtCore.QRect(650, 40, 71, 21))
self.line_edit_x0.setObjectName("line_edit_x0")
self.line_edit_y3 = QtWidgets.QLineEdit(self.frame)
self.line_edit_y3.setGeometry(QtCore.QRect(740, 130, 71, 21))
self.line_edit_y3.setObjectName("line_edit_y3")
self.check_box_selection_mode = QtWidgets.QCheckBox(self.frame)
self.check_box_selection_mode.setGeometry(QtCore.QRect(1040, 175, 81, 21))
self.check_box_selection_mode.setObjectName("check_box_selection_mode")
self.check_box_modify_mode = QtWidgets.QCheckBox(self.frame)
self.check_box_modify_mode.setGeometry(QtCore.QRect(1040, 155, 81, 21))
self.check_box_modify_mode.setObjectName("check_box_modify_mode")
self.horizontalLayout_1.addWidget(self.frame)
self.verticalLayout_4.addLayout(self.horizontalLayout_1)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.graphics_view = GraphicsView(self.centralwidget)
self.graphics_view.setMouseTracking(True)
self.graphics_view.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.graphics_view.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.graphics_view.setObjectName("graphics_view")
self.horizontalLayout_4.addWidget(self.graphics_view)
self.verticalLayout_4.addLayout(self.horizontalLayout_4)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label_1.setText(_translate("MainWindow", "Bounding Boxes"))
self.label.setText(_translate("MainWindow", "이미지파일 경로"))
self.push_button_image_path.setText(_translate("MainWindow", "..."))
self.push_button_delete.setText(_translate("MainWindow", "삭제"))
self.push_button_coco.setText(_translate("MainWindow", "COCO"))
self.label_2.setText(_translate("MainWindow", "x"))
self.label_5.setText(_translate("MainWindow", "Label"))
self.label_4.setText(_translate("MainWindow", "Points"))
self.label_3.setText(_translate("MainWindow", "y"))
self.check_box_selection_mode.setText(_translate("MainWindow", "선택모드"))
self.check_box_modify_mode.setText(_translate("MainWindow", "수정모드"))
from graphicsview import GraphicsView
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
``` |
{
"source": "jms0923/mmdet_tta",
"score": 2
} |
#### File: jms0923/mmdet_tta/predict_utils.py
```python
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from random import randint
import cv2
import mmcv
import numpy as np
import os
import torch
import torch.distributed as dist
import torch.nn as nn
from mmcv.runner import auto_fp16
from mmcv.utils import print_log
from mmcv.image import imread, imwrite
from mmdet.utils import get_root_logger
class PostProcessor():
def __init__(self,
classes,
score_thr=0.3):
self.classes = classes
self.num_classes = len(classes)
if self.num_classes == 10:
self.box_real_class = [0, 1, 1, 2, 3, 4, 5, 6, 5, 6]
elif self.num_classes == 12:
self.box_real_class = [0, 1, 1, 2, 3, 4, 5, 6, 2, 5, 0, 6]
self.score_thr = score_thr
self.thickness = 1
self.font_scale = 0.5
self.win_name = ''
self.wait_time = 0
self.colors = [(0, 0, 255), (0, 255, 0), (255, 0, 0), (255, 255, 0), (0, 255, 255),
(255, 0, 255), (255, 255, 255), (0, 0, 0), (255, 0, 128), (0, 191, 255),
(10, 255, 128), (191, 255, 0), (255, 191, 0), (255, 128, 10), (50, 152, 89)]
self.makeColors()
self.iitpID = 1
self.iitpJson = {'annotations':[]}
def makeColors(self):
if len(self.colors) >= self.num_classes:
return
else:
while len(self.colors) < self.num_classes:
self.colors.append((randint(20, 230), randint(20, 230), randint(20, 230)))
return
def saveResult(self,
img,
result,
show=False,
out_file=None):
img, bboxes, labels = self.extractInfo(img, result, show=False, out_file=out_file)
# draw bounding boxes
return self.imshow_det_bboxes(img, bboxes, labels, show=show, out_file=out_file)
def labelChanger(self, labels):
appliedLabels = []
if self.num_classes == 10:
for i in labels:
if i == 8:
if 3 in labels or 4 in labels:
i = 3
if i == 9:
if 3 in labels:
i = 3
elif 4 in labels:
i = 0
i = self.box_real_class[i]
i += 1
appliedLabels.append(i)
elif self.num_classes == 12:
appliedLabels = [self.box_real_class[i]+1 for i in labels]
else:
print('Unexpected # class')
raise ValueError
return appliedLabels
def saveIitp(self, img, imgPath, result):
_, bboxes, labels = self.extractInfo(img, result, show=False, out_file=None)
bboxes, labels = self.iitpProcess(bboxes, labels)
if len(labels) < 1:
return False
return self.annoMaker(imgPath, bboxes, labels)
def annoMaker(self, imgPath, bboxes, labels, labelChanger=True):
anno = {}
anno['id'] = self.iitpID
self.iitpID += 1
if labelChanger:
labels = self.labelChanger(labels)
fileName = imgPath.split('/')[-1]
anno['file_name'] = fileName
anno['object'] = []
for box, label in zip(bboxes, labels):
anno['object'].append({
'box': box,
'label': 'c'+str(label)
})
self.iitpJson['annotations'].append(anno)
return labels
def iitpProcess(self, bboxes, labels):
assert bboxes.ndim == 2
assert labels.ndim == 1
assert bboxes.shape[0] == labels.shape[0]
assert bboxes.shape[1] == 4 or bboxes.shape[1] == 5
if self.score_thr > 0:
assert bboxes.shape[1] == 5
scores = bboxes[:, -1]
inds = scores > self.score_thr
bboxes = bboxes[inds, :]
labels = labels[inds]
processedBoxes = []
for box in bboxes:
box = box.tolist()
box.pop()
box = list(map(int, box))
processedBoxes.append(box)
return processedBoxes, labels
def bb_intersection_over_union(self, bboxes, labels, box_scores):
# determine the (x, y)-coordinates of the intersection rectangle
best_indexes = []
for i in range(0, len(bboxes) - 1):
best_iou = -1
best_list = []
for j in range(i + 1 , len(bboxes)):
xA = max(bboxes[i][0], bboxes[j][0])
yA = max(bboxes[i][1], bboxes[j][1])
xB = min(bboxes[i][2], bboxes[j][2])
yB = min(bboxes[i][3], bboxes[j][3])
# compute the area of intersection rectangle
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (bboxes[i][2] - bboxes[i][0] + 1) * (bboxes[i][3] - bboxes[i][1] + 1)
boxBArea = (bboxes[j][2] - bboxes[j][0] + 1) * (bboxes[j][3] - bboxes[j][1] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
if iou > best_iou:
best_iou = iou
best_list = [i , j, best_iou]
best_indexes.append(best_list)
index = []
for best_index in best_indexes:
if best_index[2] > 0.98: # best_iou
if box_scores[best_index[0]] > box_scores[best_index[1]]:
index.append(best_index[1])
else :
index.append(best_index[0])
index = set(index)
index = sorted(list(index), reverse=True)
for i in index :
if box_scores[i] < self.score_thr + 0.05:
bboxes = np.delete(bboxes, i, axis = 0)
labels = np.delete(labels, i, axis = 0)
box_scores = np.delete(box_scores, i, axis = 0)
return bboxes, labels, box_scores
def cropBoxes(self, img, result, out_file=None):
img, bboxes, labels = self.extractInfo(img, result, show=False, out_file=out_file)
assert bboxes.ndim == 2
assert labels.ndim == 1
assert bboxes.shape[0] == labels.shape[0]
assert bboxes.shape[1] == 4 or bboxes.shape[1] == 5
img = imread(img)
box_scores = []
if self.score_thr > 0:
assert bboxes.shape[1] == 5
scores = bboxes[:, -1]
inds = scores > self.score_thr
bboxes = bboxes[inds, :]
labels = labels[inds]
box_scores = scores[inds]
img = np.ascontiguousarray(img)
croppedImgs = []
out_label = []
if len(labels) > 1:
bboxes, labels, box_scores = self.bb_intersection_over_union(bboxes, labels, box_scores)
# path to save cropped image if save
# splitPath = out_file.split('/')
# fileName = splitPath.pop(-1).split('.')[0]
for idx, (bbox, label) in enumerate(zip(bboxes, labels)):
# !!!!!!!!!!! ~ Except class cap(8) or label(9) ~ !!!!!!!!!!!!
if label != 8 and label != 9:
bbox_int = bbox.astype(np.int32)
heightRange = (bbox_int[1], bbox_int[3])
widthRange = (bbox_int[0], bbox_int[2])
dst = img.copy()
center_x = int(int(bbox_int[0]) - int(bbox_int[0])*0.15)
center_y = int(int(bbox_int[1]) - int(bbox_int[0])*0.15)
width = int(int(bbox_int[2]) + int(bbox_int[2])*0.15)
height = int(int(bbox_int[3]) + int(bbox_int[3])*0.15)
dst = dst[center_y:height, center_x:width]
# dst = dst[bbox_int[1]:bbox_int[3], bbox_int[0]:bbox_int[2]]
croppedImgs.append(dst)
out_label.append(label)
# save cropped image
# out_file = splitPath.copy()
# out_file.append(fileName+'_'+str(idx)+'.jpg')
# out_file = '/'.join(out_file)
# if out_file is not None:
# imwrite(dst, out_file)
out_label = self.labelChanger(out_label)
return croppedImgs, out_label
def extractInfo(self,
img,
result,
show=False,
out_file=None):
# batch_size = len(result)
# print('batch_size : ', batch_size)
# print('result : ', len(result[0]), result)
img = mmcv.imread(img)
img = img.copy()
if isinstance(result, tuple):
bbox_result, segm_result = result
if isinstance(segm_result, tuple):
segm_result = segm_result[0] # ms rcnn
# print('check msrcnn : ', len(segm_result))
else:
bbox_result, segm_result = result, None
bboxes = np.vstack(bbox_result)
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
# draw segmentation masks
if segm_result is not None and len(labels) > 0: # non empty
# print('check segm_result is not None')
segms = mmcv.concat_list(segm_result)
inds = np.where(bboxes[:, -1] > self.score_thr)[0]
np.random.seed(42)
color_masks = [
np.random.randint(0, 256, (1, 3), dtype=np.uint8)
for _ in range(max(labels) + 1)
]
for i in inds:
i = int(i)
color_mask = color_masks[labels[i]]
mask = segms[i].astype(bool)
img[mask] = img[mask] * 0.5 + color_mask * 0.5
# if out_file specified, do not show image in window
if out_file is not None:
show = False
# if not (show or out_file):
# return img
return img, bboxes, labels
def imshow_det_bboxes(self,
img,
bboxes,
labels,
show=True,
out_file=None):
assert bboxes.ndim == 2
assert labels.ndim == 1
assert bboxes.shape[0] == labels.shape[0]
assert bboxes.shape[1] == 4 or bboxes.shape[1] == 5
img = imread(img)
if self.score_thr > 0:
assert bboxes.shape[1] == 5
scores = bboxes[:, -1]
inds = scores > self.score_thr
bboxes = bboxes[inds, :]
labels = labels[inds]
img = np.ascontiguousarray(img)
for bbox, label in zip(bboxes, labels):
bbox_int = bbox.astype(np.int32)
left_top = (bbox_int[0], bbox_int[1])
right_bottom = (bbox_int[2], bbox_int[3])
cv2.rectangle(
img, left_top, right_bottom, self.colors[label], thickness=self.thickness)
label_text = self.classes[
label] if self.classes is not None else f'cls {label}'
if len(bbox) > 4:
label_text += f'|{bbox[-1]:.02f}'
cv2.putText(img, label_text, (bbox_int[0], bbox_int[1] - (label*2*randint(0, 1))),
cv2.FONT_HERSHEY_COMPLEX, self.font_scale, self.colors[label])
if show:
imshow(img, self.win_name, self.wait_time)
if out_file is not None:
imwrite(img, out_file)
return img
def get_key_object(self, img, result, out_file=None):
img, bboxes, labels = self.extractInfo(img, result, show=False, out_file=out_file)
assert bboxes.ndim == 2
assert labels.ndim == 1
assert bboxes.shape[0] == labels.shape[0]
assert bboxes.shape[1] == 4 or bboxes.shape[1] == 5
box_scores = []
if self.score_thr > 0:
assert bboxes.shape[1] == 5
scores = bboxes[:, -1]
inds = scores > self.score_thr
bboxes = bboxes[inds, :]
labels = labels[inds]
box_scores = scores[inds]
if len(labels) > 1:
bboxes, labels, box_scores = self.bb_intersection_over_union(bboxes, labels, box_scores)
p = self.key_object(bboxes, labels)
return labels, p
def key_object(self, bboxes, labels):
# set debug mode
debug = False
bbox = []
if len(labels) > 1:
for idx_box, ibox in enumerate(bboxes):
bbox.append([ibox[0],ibox[1],ibox[2],ibox[3],ibox[4],labels[idx_box]])
elif len(labels) == 1:
bbox.append([bboxes[0][0],bboxes[0][1],bboxes[0][2],bboxes[0][3],bboxes[0][4],labels[0]])
bounding_box = sorted(bbox, key=lambda k: k[0])
if debug == True:
print('sort: ', bounding_box)
q = []
p = []
flag = 0
for bidx, bi in enumerate(bounding_box):
if bidx == 0 or len(q)==0:
q.append(SingleLinkedList([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]]))
else:
ck = 0
cj = 1
flag2 = 0
for jdx in range(0,len(q)):
if debug == True:
print('ck: ',ck)
print('jdx: ',jdx)
qsz = len(q)
if qsz == 1:
bk = q[0]
elif ck >= 1:
bk = q[ck-cj]
else:
#bk = q[jdx]
bk = q[jdx]
if debug == True:
print('bi: ', bi)
print('size of LL: ', bk.size())
print('bk (Q) :', bk.selectNode(0))
print('size of q',len(q))
for iddd in q:
print('now q: ', iddd.selectNode(0))
iou = self.get_iou(bk.selectNode(0).data, bi)
bk_area = (bk.selectNode(0).data[2]-bk.selectNode(0).data[0])*(bk.selectNode(0).data[3]-bk.selectNode(0).data[1])
bi_area = (bi[2]-bi[0])*(bi[3]-bi[1])
if debug == True:
print('iou',iou)
print('bk_area',bk_area)
print('bi_area',bi_area)
#print('iou/((bi_area/bk_area)+1e-6)',iou/((bi_area/bk_area)+1e-6))
#print('(bk.selectNode(0).data[1]/(bi[1]+1e-6))',(bk.selectNode(0).data[1]/(bi[1]+1e-6)))
#print('bk.selectNode(0).data[3]/(bi[3]+1e-6)',bk.selectNode(0).data[3]/(bi[3]+1e-6))
if iou >= 0.99 and iou/((bi_area/bk_area)+1e-6) >= 0.99:
q.append(SingleLinkedList([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]]))
break
#print('bi[1]/(bk.selectNode(0).data[3]+1e-6)',bi[1]/(bk.selectNode(0).data[3]+1e-6))
#if bi_area > bk_area:
#print('area: ', bk_area/bi_area)
# case 1
# bi_xmin >> bk_xmax
if bi[0] > bk.selectNode(0).data[2]:
if debug == True:
print('case 1')
# delete bk from Q
p.append(q.pop(0))
#if ck == 0:
if jdx == (len(q)) or len(q) == 0:
if int(bk.selectNode(0).data[5]) == 8 and bk_area < bi_area and 0.98<bk.selectNode(0).data[3]/(bi[1]+1e-6):
bk.insertFirst([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]])
break
else:
q.append(SingleLinkedList([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]]))
break
else:
ck += 1
if ck >= 2:
cj += 1
continue
# case 2
# bi_xmin ~= bk_xmax
# bi is side, smaller than bk
#elif 0.98 < (bi[0]/bk.selectNode(0).data[2]) and ((bk.selectNode(0).data[2]-bk.selectNode(0).data[0])*(bk.selectNode(0).data[3]-bk.selectNode(0).data[1])) > ((bi[2]-bi[0])*(bi[3]-bi[1])) and bk.selectNode(0).data[3] > bi[3]:
elif 0.98 < (bi[0]/bk.selectNode(0).data[2]) and 1.1 > (bk.selectNode(0).data[2]/bi[0]) and (bk_area) > (bi_area) and bk.selectNode(0).data[3] > bi[3] and bk.selectNode(0).data[0] < bi[0] and bk.selectNode(0).data[1] < bi[1]:
if debug == True:
print('case 2')
if ck != 0:
ck += 1
if flag == 0:
if len(q) > jdx and bi[0] > q[jdx].selectNode(0).data[2]:
p.append(q.pop(0))
q.append(SingleLinkedList([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]]))
break
elif int(bk.size()) > 2:
p.append(q.pop(0))
q.append(SingleLinkedList([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]]))
break
else:
if bi[5] != 8:
q.append(SingleLinkedList([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]]))
break
else:
bk.insertLast([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]])
break
elif flag == 1:
bk.insertFirst([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]])
flag = 0
break
# case 3
# continue
elif iou == 0.0:
if ck != 0:
ck += 1
if debug == True:
print('case 3')
if jdx == (len(q)-1) or len(q) == 1:
q.append(SingleLinkedList([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]]))
break
else:
continue
# case 4
elif iou > 0.0:
if ck != 0:
ck += 1
if debug == True:
print('case 4')
# a)
if bk.selectNode(0).data[0] < bi[0] and bk.selectNode(0).data[1] < bi[1] and bk.selectNode(0).data[2] > bi[2] and bk.selectNode(0).data[3] > bi[3]:
if debug == True:
print('1')
if bi[5] == 8 or bi[5] == 9:
if bi[4] > 0.6 or flag2 == 1:
bk.insertLast([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]])
flag2 = 0
break
elif bk.selectNode(0).data[5] == 0 and len(q) > 1: # 종이 일 때 q 안에 물체가 두개 이상일 때,
flag2 = 1
continue
elif bi[5] == 9:
bk.insertLast([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]])
flag2 = 0
break
else:
q.append(SingleLinkedList([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]]))
break
else:
q.append(SingleLinkedList([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]]))
break
# insert bi into L(bk)
#break
# b-1)
elif 0.98 < (min(bk.selectNode(0).data[0],bi[0])/(max(bk.selectNode(0).data[0],bi[0])+1e-6)) and 0.98 < (min(bk.selectNode(0).data[3],bi[3])/(max(bk.selectNode(0).data[3],bi[3])+1e-6)):
if debug == True:
print('2')
if (bk_area) > (bi_area):
if bi[5] == 8 or bi[5] == 9:
bk.insertLast([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]]) # 이번만
#q.append(SingleLinkedList([bi[0],bi[1],bi[2],bi[3],bi[5]]))
else:
q.append(SingleLinkedList([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]]))
else:
bk.insertFirst([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]])
break
# b-2)
elif 0.98 < (min(bk.selectNode(0).data[2],bi[2])/(max(bk.selectNode(0).data[2],bi[2])+1e-6)) and 0.98 < (min(bk.selectNode(0).data[3],bi[3])/(max(bk.selectNode(0).data[3],bi[3])+1e-6)):
if debug == True:
print('3')
if (bk_area) > (bi_area):
if int(bk.size()) == 3:
q.append(SingleLinkedList([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]]))
else:
bk.insertLast([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]])
else:
if int(bk.size()) == 3:
q.append(SingleLinkedList([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]]))
else:
bk.insertFirst([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]])
break
# b-3)
elif 0.98 < (min(bk.selectNode(0).data[1],bi[1])/(max(bk.selectNode(0).data[1],bi[1])+1e-6)) and 0.90 < (min(bk.selectNode(0).data[2],bi[2])/(max(bk.selectNode(0).data[2],bi[2])+1e-6)) and 0.95 < (min(bk.selectNode(0).data[0],bi[0])/(max(bk.selectNode(0).data[0],bi[0])+1e-6)): # 0.98 -> 0.90
if debug == True:
print('4')
if (bk_area) > (bi_area):
if bi[5] == 8 or bi[5] == 9:
bk.insertLast([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]]) #이번만
#q.append(SingleLinkedList([bi[0],bi[1],bi[2],bi[3],bi[5]]))
else:
q.append(SingleLinkedList([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]]))
else:
if int(bk.selectNode(0).data[5]) == 8 or int(bk.selectNode(0).data[5]) == 9:
bk.insertFirst([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]])
else:
q.append(SingleLinkedList([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]]))
break
# d) bk is side + bi is bigger than bk
elif 0.98 < (bk.selectNode(0).data[2]/(bi[0]+1e-6)) and bk.selectNode(0).data[1] > bi[1] and bk.selectNode(0).data[3] > bi[3] and bk.selectNode(0).data[1] < bi[1] and (bk_area) < (bi_area):
if debug == True:
print('5')
bk.insertFirst([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]])
break
elif 0.98 < (min(bk.selectNode(0).data[0],bi[0])/(max(bk.selectNode(0).data[0],bi[0])+1e-6)) and bi_area > bk_area and 0.98 < iou/((bk_area/bi_area)+1e-6):
if debug == True:
print('6')
bk.insertFirst([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]])
break
elif bi_area > bk_area and bk.selectNode(0).data[1] > bi[1] and bk.selectNode(0).data[3] < bi[3]:
if debug == True:
print('7')
if flag == 0:
if bidx == len(bounding_box)-1 or len(bounding_box) <= 3:
if 0.95 < (min(bk.selectNode(0).data[0],bi[0])/(max(bk.selectNode(0).data[0],bi[0])+1e-6)):
#print('check')
if int(bk.selectNode(0).data[5]) != 8:
q.append(SingleLinkedList([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]]))
else:
bk.insertFirst([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]])
break
else:
#print('check2')
q.append(SingleLinkedList([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]]))
break
else:
if int(bk.selectNode(0).data[5]) == 8 and bi[5] != 8:
bk.insertFirst([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]])
break
else:
q.append(SingleLinkedList([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]]))
flag += 1
break
elif flag == 1:
bk.insertFirst([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]])
flag = 0
break
# 아래쪽 side
elif bi_area < bk_area and 0.98 < (bi[1]/(bk.selectNode(0).data[3]+1e-6)) and bk.selectNode(0).data[0] < bi[0] and bk.selectNode(0).data[2] > bi[2]:
if debug == True:
print('8')
if bi[5] != 8:
q.append(SingleLinkedList([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]]))
else:
bk.insertLast([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]])
break
# inside 지만, 약간 튀어 나온 inside
elif bi_area < bk_area and 0.95 < iou/((bi_area/bk_area)+1e-6) and 0.98 < (bk.selectNode(0).data[1]/(bi[1]+1e-6)) and 0.98 < (bk.selectNode(0).data[3]/(bi[3]+1e-6)):
if debug == True:
print('9')
if bi[5] == 8 or bi[5] == 9:
bk.insertLast([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]])
else:
q.append(SingleLinkedList([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]]))
break
# 위쪽 side
elif bi_area < bk_area and 0.97 < (bk.selectNode(0).data[1]/(bi[3]+1e-6)) and bi[0] > bk.selectNode(0).data[0] and bi[3] < bk.selectNode(0).data[3]:
if debug == True:
print('10')
if bi[5] == 8:
bk.insertLast([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]])
else:
q.append(SingleLinkedList([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]]))
else:
if debug == True:
print('last')
if jdx == (len(q)-1):
q.append(SingleLinkedList([bi[0],bi[1],bi[2],bi[3],bi[4],bi[5]]))
#cj += 1
continue
else:
continue
for idxy in range(0, len(q)):
p.append(q.pop(0))
return p
def get_iou(self, a, b, epsilon=1e-5):
# COORDINATES OF THE INTERSECTION BOX
x1 = max(a[0], b[0])
y1 = max(a[1], b[1])
x2 = min(a[2], b[2])
y2 = min(a[3], b[3])
# AREA OF OVERLAP - Area where the boxes intersect
width = (x2 - x1)
height = (y2 - y1)
# handle case where there is NO overlap
if (width<0) or (height <0):
return 0.0
area_overlap = width * height
# COMBINED AREA
area_a = (a[2] - a[0]) * (a[3] - a[1])
area_b = (b[2] - b[0]) * (b[3] - b[1])
area_combined = area_a + area_b - area_overlap
# RATIO OF AREA OF OVERLAP OVER COMBINED AREA
iou = area_overlap / (area_combined+epsilon)
return iou
class Node:
def __init__(self, data):
self.data = data
self.next = None
def __str__(self):
return str(self.data)
class SingleLinkedList:
def __init__(self, data):
new_node = Node(data)
self.head = new_node
self.list_size = 1
def __str__(self):
print_list = '[ '
node = self.head
while True:
print_list += str(node)
if node.next == None:
break
node = node.next
print_list += ', '
print_list += ' ]'
return print_list
def insertFirst(self, data):
new_node = Node(data)
temp_node = self.head
self.head = new_node
self.head.next = temp_node
self.list_size += 1
def insertLast(self, data):
node = self.head
while True:
if node.next == None:
break
node = node.next
new_node = Node(data)
node.next = new_node
self.list_size += 1
def insertMiddle(self, num, data):
if self.head.next == None:
self.insertLast(data)
return
node = self.selectNode(num)
new_node = Node(data)
temp_next = node.next
node.next = new_node
new_node.next = temp_next
self.list_size += 1
def selectNode(self, num):
if self.list_size < num:
print("Overflow")
return
node = self.head
count = 0
while count < num:
node = node.next
count += 1
return node
def deleteNode(self, num):
if self.list_size < 1:
return # Underflow
elif self.list_size < num:
return # Overflow
if num == 0:
self.deleteHead()
return
node = self.selectNode(num - 1)
node.next = node.next.next
del_node = node.next
del del_node
def deleteHead(self):
node = self.head
self.head = node.next
del node
def size(self):
return str(self.list_size)
``` |
{
"source": "jms0923/senet-tf2",
"score": 3
} |
#### File: jms0923/senet-tf2/model.py
```python
import tensorflow as tf
from tensorflow.keras import Model, Sequential
from tensorflow.keras.layers import Multiply, Conv2DTranspose, Input, BatchNormalization, Conv2D, Activation, Dense, GlobalAveragePooling2D, MaxPooling2D, ZeroPadding2D, Multiply, Add, Reshape
from tensorflow.keras import backend as K
class Conv2dBn(tf.keras.Model):
def __init__(self, input_shape, filters, kernel_size, padding='same', strides=1, activation='relu', **kwargs):
super(Conv2dBn, self).__init__(**kwargs)
self.input_layer = Input(shape=input_shape)
self.filters = filters
self.kernel_size = kernel_size
self.padding = padding
self.strides = strides
self.activation = activation
self.output_layer = self.call(self.input_layer)
self.output_shape_no_batch = self.output_layer.shape[1:]
super(Conv2dBn, self).__init__(
inputs=self.input_layer,
outputs=self.output_layer,
**kwargs
)
def model(self):
return Model(inputs=self.input_layer, outputs=self.output_layer)
def summary(self, line_length=None, positions=None, print_fn=None):
model = Model(inputs=self.input_layer, outputs=self.output_layer)
return model.summary()
def build(self):
self._is_graph_network = True
self._init_graph_network(
inputs=self.input_layer,
outputs=self.output_layer,
)
def call(self, inputs, training=False):
x = Conv2D(self.filters, self.kernel_size, kernel_initializer='he_normal', padding=self.padding, strides=self.strides)(inputs)
x = BatchNormalization()(x)
if self.activation:
x = Activation(self.activation)(x)
return x
class SeBlock(tf.keras.Model):
def __init__(self, input_shape, reduction_ratio=16, **kwargs):
super(SeBlock, self).__init__(**kwargs)
self.reduction_ratio = reduction_ratio
self.input_layer = Input(shape=input_shape)
self.output_layer = self.call(self.input_layer)
self.output_shape_no_batch = self.output_layer.shape[1:]
super(SeBlock, self).__init__(
self.input_layer,
self.output_layer,
**kwargs
)
def build(self):
self._is_graph_network = True
self._init_graph_network(
inputs=self.input_layer,
outputs=self.output_layer
)
def call(self, inputs, training=False):
ch_input = K.int_shape(inputs)[-1]
ch_reduced = ch_input // self.reduction_ratio
# Squeeze
x = GlobalAveragePooling2D()(inputs)
# Excitation
x = Dense(ch_reduced, kernel_initializer='he_normal', activation='relu', use_bias=False)(x) # Eqn.3
x = Dense(ch_input, kernel_initializer='he_normal', activation='sigmoid', use_bias=False)(x) # Eqn.3
x = Reshape((1, 1, ch_input))(x)
x = Multiply()([inputs, x])
return x
class SeResidualBlock(tf.keras.Model):
def __init__(self, input_shape, filter_sizes, strides=1, reduction_ratio=16, **kwargs):
super(SeResidualBlock, self).__init__(**kwargs)
self.input_layer = Input(shape=input_shape)
self.filter_1, self.filter_2, self.filter_3 = filter_sizes
self.strides = strides
self.reduction_ratio = reduction_ratio
self.conv1 = Conv2dBn(input_shape, self.filter_1, (1, 1), strides=self.strides)
self.conv2 = Conv2dBn(self.conv1.output_shape_no_batch, self.filter_2, (3, 3))
self.conv3 = Conv2dBn(self.conv2.output_shape_no_batch, self.filter_3, (1, 1), activation=None)
self.seBlock = SeBlock(self.conv3.output_shape_no_batch, self.reduction_ratio)
self.projectedInput = Conv2dBn(input_shape, self.filter_3, (1, 1), strides=self.strides, activation=None)
self.output_layer = self.call(self.input_layer)
self.output_shape_no_batch = self.output_layer.shape[1:]
super(SeResidualBlock, self).__init__(
inputs=self.input_layer,
outputs=self.output_layer,
**kwargs
)
def build(self):
self._is_graph_network = True
self._init_graph_network(
inputs=self.input_layer,
outputs=self.output_layer
)
def call(self, input_tensor, training=False):
x = self.conv1(input_tensor)
x = self.conv2(x)
x = self.conv3(x)
x = self.seBlock(x)
projected_input = self.projectedInput(input_tensor) if \
K.int_shape(input_tensor)[-1] != self.filter_3 else input_tensor
shortcut = Add()([projected_input, x])
shortcut = Activation(activation='relu')(shortcut)
return shortcut
class SeResnet(tf.keras.Model):
def __init__(self, input_shape, num_blocks, reduction_ratio=16, **kwargs):
super(SeResnet, self).__init__(**kwargs)
self.input_layer = Input(input_shape) # , batch_size=1
self.blocks_1, self.blocks_2, self.blocks_3, self.blocks_4 = num_blocks
self.reduction_ratio = reduction_ratio
self.conv1, lastOutShape = self._stageBlock(input_shape, 64, 0, stage='1')
self.conv2, lastOutShape = self._stageBlock(lastOutShape, [64, 64, 256], self.blocks_1, stage='2')
self.conv3, lastOutShape = self._stageBlock(lastOutShape, [128, 128, 512], self.blocks_2, stage='3')
self.conv4, lastOutShape = self._stageBlock(lastOutShape, [256, 256, 1024], self.blocks_3, stage='4')
self.conv5, lastOutShape = self._stageBlock(lastOutShape, [512, 512, 2048], self.blocks_4, stage='5')
self.output_layer = self.call(self.input_layer)
super(SeResnet, self).__init__(
self.input_layer,
self.output_layer,
**kwargs
)
def build(self):
self._is_graph_network = True
self._init_graph_network(
inputs=self.input_layer,
outputs=self.output_layer
)
def call(self, inputs):
x = self.conv1(inputs)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
return x
def _stageBlock(self, input_shape, filter_sizes, blocks, stage=''):
strides = 2 if stage != '2' else 1
if stage != '1':
tmpSB = SeResidualBlock(input_shape, filter_sizes, strides, self.reduction_ratio)
lastOutShape = tmpSB.output_shape_no_batch
layers = [tmpSB]
for i in range(blocks - 1):
tmpSB = SeResidualBlock(lastOutShape, filter_sizes, reduction_ratio=self.reduction_ratio)
lastOutShape = tmpSB.output_shape_no_batch
layers.append(tmpSB)
else:
layers = [
Conv2dBn(input_shape, filter_sizes, (7, 7), strides=strides, padding='same'),
MaxPooling2D((3, 3), strides=2, padding='same')
]
convStage = Sequential(layers, name='conv'+str(stage))
lastOutShape = convStage.output_shape[1:]
return convStage, lastOutShape
def se_resnet50():
return SeResnet((224, 224, 3), [3, 4, 6, 3])
def se_resnet101():
return SeResnet((224, 224, 3), [3, 4, 23, 3])
def se_resnet152():
return SeResnet((224, 224, 3), [3, 8, 36, 3])
seResnet50 = se_resnet50()
seResnet50.build()
print(seResnet50.summary())
``` |
{
"source": "jms7446/PRML",
"score": 3
} |
#### File: PRML/prmlmy/pipe.py
```python
class SimplePipe:
"""Estimator must be placed at the end"""
def __init__(self, transformers):
self.transformers = transformers
self.preprocesses = transformers[:-1]
self.estimator = transformers[-1]
def fit(self, X, y=None):
X = self._transform_pre(X)
self.estimator.fit(X, y)
def predict(self, X, *args, **kwargs):
X = self._transform_pre(X)
return self.estimator.predict(X, *args, **kwargs)
def transform(self, X):
X = self._transform_pre(X)
return self.estimator.transform(X)
def _transform_pre(self, X):
for t in self.preprocesses:
X = t.transform(X)
return X
def get_params_(self):
return self.estimator.params_
def score(self, X=None, y=None):
return self.estimator.score(X, y)
``` |
{
"source": "jmsaavedrar/vae",
"score": 3
} |
#### File: vae/models/vae.py
```python
import tensorflow as tf
def conv3x3(channels, stride = 1, **kwargs):
return tf.keras.layers.Conv2D(channels, (3,3),
strides = stride,
padding = 'same',
kernel_initializer = 'he_normal',
**kwargs)
##component BathNormalization + RELU
class BNReLU(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(BNReLU, self).__init__(**kwargs)
self.bn = tf.keras.layers.BatchNormalization(name = 'bn')
def call(self, inputs, training = True):
y = tf.keras.activations.relu(self.bn(inputs, training))
return y
#convolutional block
class ConvBlock(tf.keras.layers.Layer):
def __init__(self, channels, **kwargs):
super(ConvBlock, self).__init__(**kwargs)
self.conv_1 = tf.keras.layers.Conv2D(channels, (3,3),
strides = 2,
padding = 'same',
kernel_initializer = 'he_normal',
**kwargs)
self.conv_2 = tf.keras.layers.Conv2D(channels, (3,3),
strides = 1,
padding = 'same',
kernel_initializer = 'he_normal',
**kwargs)
def call(self, _input):
y = self.conv_2(self.conv_1(_input))
return y
#encoder block
class Encoder(tf.keras.layers.Layer):
def __init__(self, channels, target_dimension, **kwargs):
super(Encoder, self).__init__(**kwargs)
self.conv1 = ConvBlock(channels[0]) #64
self.bn_relu_1 = BNReLU()
self.conv2 = ConvBlock(channels[1]) #32
self.bn_relu_2 = BNReLU()
self.conv3 = ConvBlock(channels[2]) #16
self.bn_relu_3 = BNReLU()
self.conv4 = ConvBlock(channels[3]) #8 x 8 x 64
self.bn_relu_4 = BNReLU()
self.flatten = tf.keras.layers.Flatten()
self.dense_mu = tf.keras.layers.Dense(target_dimension)
self.dense_log_var = tf.keras.layers.Dense(target_dimension)
def call(self, inputs, training):
#input = [128,128,1]
y = self.bn_relu_1(self.conv1(inputs), training) #64x64
y = self.bn_relu_2(self.conv2(y), training) #32x32
y = self.bn_relu_3(self.conv3(y), training) #16x16
y = self.bn_relu_4(self.conv4(y), training) #8
y = self.flatten(y) # 8x8x64 = 4096
mu = self.dense_mu(y)
log_var = self.dense_log_var(y)
x = tf.concat([mu, log_var], axis = 1) #axis
return x # [mu logvar]
class Decoder(tf.keras.layers.Layer):
def __init__(self, channels, **kwargs):
super(Decoder, self).__init__(**kwargs)
self.dense_1 = tf.keras.layers.Dense(4096)
#self.up = tf.keras.layers.UpSampling2D(interpolation = 'bilinear')
self.conv1 = tf.keras.layers.Conv2DTranspose(channels[0], 3, strides = 2, padding = 'same')
self.bn_relu_1 = BNReLU()
self.conv2 = tf.keras.layers.Conv2DTranspose(channels[1], 3, strides = 2, padding = 'same')
self.bn_relu_2 = BNReLU()
self.conv3 = tf.keras.layers.Conv2DTranspose(channels[2], 3, strides = 2, padding = 'same')
self.bn_relu_3 = BNReLU()
self.conv4 = tf.keras.layers.Conv2DTranspose(channels[3], 3, strides = 2, padding = 'same')
self.bn_relu_4 = BNReLU()
self.conv5 = tf.keras.layers.Conv2D(1, (1,1))
self.sigmoid = tf.keras.activations.sigmoid
def call(self, inputs, training):
y = self.dense_1(inputs)
y = tf.reshape(y, (-1, 8,8,64))
y = self.bn_relu_1(self.conv1(y), training) #16
y = self.bn_relu_2(self.conv2(y), training) #32
y = self.bn_relu_3(self.conv3(y), training) #64
y = self.bn_relu_4(self.conv4(y), training) #128
y = self.conv5(y)
y = self.sigmoid(y)
return y
class VAE(tf.keras.Model):
def __init__(self, channels, **kwargs):
super(VAE,self).__init__(**kwargs)
self.encoder = Encoder(channels, 128)
self.decoder = Decoder(tf.reverse(channels, [-1]))
def sampling(self, mu_log_var):
mu, log_var = tf.split(mu_log_var, 2, axis = 1)
epsilon = tf.random.normal(tf.shape(mu), mean = 0, stddev = 1)
return mu + tf.math.exp(log_var / 2) * epsilon
def call(self, _input, training):
mu_log_var = self.encoder(_input, training)
z = self.sampling(mu_log_var)
x = self.decoder(z, training)
x = tf.keras.layers.Flatten()(x)
out = tf.concat([mu_log_var, x], axis = 1)
return out
``` |
{
"source": "jmsalamy/KungFu",
"score": 3
} |
#### File: KungFu/research/model_definition.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import os
import logging
# tensorflow imports
import tensorflow as tf
# tf.keras imports
from tensorflow.keras import Model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Activation, Flatten, Dropout
from tensorflow.keras.layers import BatchNormalization, AveragePooling2D, Input, MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
def Conv4_model(x_train, num_classes):
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same',
input_shape=x_train.shape[1:], name="conv_1"))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3), name="conv_2"))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same', name="conv_3"))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3), name="conv_4"))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
return model
```
#### File: python/kungfu/ext.py
```python
import atexit
from .loader import _call_method, _load_clib, _module_path
def _load_and_init_python_lib():
_load_clib('libkungfu')
_python_lib = _load_clib('libkungfu_python')
_call_method(_python_lib, 'kungfu_python_init')
has_gpu = _call_method(_python_lib, 'kungfu_python_init_gpu')
return _python_lib, has_gpu
_python_lib, _has_gpu = _load_and_init_python_lib()
def _finalize_python_lib():
_call_method(_python_lib, 'kungfu_python_finialize')
if _has_gpu:
_call_method(_python_lib, 'kungfu_python_finialize_gpu')
atexit.register(_finalize_python_lib)
def current_rank():
"""Get the current rank of this peer."""
return _python_lib.kungfu_rank()
def current_local_rank():
return _python_lib.kungfu_local_rank()
def current_cluster_size():
"""Get the number of peers in the current cluster."""
return _python_lib.kungfu_cluster_size()
def _get_cuda_index():
return _python_lib.kungfu_get_cuda_index()
def run_barrier():
"""Run the barrier operation eagerly."""
_python_lib.kungfu_barrier()
def _get_other_ranks():
self_rank = current_rank()
ranks = list(range(current_cluster_size()))
return [r for r in ranks if r != self_rank]
def show_cuda_version():
if _has_gpu:
_call_method(_python_lib, 'kungfu_show_cuda_version', force=True)
else:
print('NCCL is NOT enabled')
def show_nccl_version():
if _has_gpu:
_call_method(_python_lib, 'kungfu_show_nccl_version', force=True)
else:
print('NCCL is NOT enabled')
```
#### File: python/unit/test_op.py
```python
import tensorflow as tf
from kungfu.tensorflow.ops import counter, step_based_schedule
def test_counter():
c = counter()
with tf.Session() as sess:
for i in range(10):
n = sess.run(c)
assert (n == i)
def test_counter_init():
c = counter(init=1)
with tf.Session() as sess:
for i in range(10):
n = sess.run(c)
assert (n == i + 1)
def test_step_based_scheduler():
sizes = [1, 2, 4, 8]
n_step = 3
config = ','.join('%d:%d' % (size, n_step) for size in sizes)
expected_sizes = [1, 1, 1, 2, 2, 2, 4, 4, 4, 8, 8, 8]
schedule = step_based_schedule(config)
with tf.Session() as sess:
for i in range(12):
size = sess.run(schedule)
assert (size == expected_sizes[i])
``` |
{
"source": "jmsanders/dagster",
"score": 2
} |
#### File: daemon_tests/integration_tests/test_queued_run_coordinator_integration.py
```python
import contextlib
import os
import subprocess
from dagster.core.instance import DagsterInstance
from dagster.core.instance.ref import InstanceRef
from dagster.core.test_utils import create_run_for_test, poll_for_finished_run
from dagster.utils import merge_dicts
from dagster.utils.external import external_pipeline_from_run
def setup_instance(dagster_home):
os.environ["DAGSTER_HOME"] = dagster_home
config = """run_coordinator:
module: dagster.core.run_coordinator
class: QueuedRunCoordinator
config:
dequeue_interval_seconds: 1
"""
with open(os.path.join(dagster_home, "dagster.yaml"), "w") as file:
file.write(config)
@contextlib.contextmanager
def start_daemon():
p = subprocess.Popen(["dagster-daemon", "run"])
yield
p.kill()
def create_run(instance, pipeline_handle, **kwargs): # pylint: disable=redefined-outer-name
pipeline_args = merge_dicts(
{
"pipeline_name": "foo_pipeline",
"external_pipeline_origin": pipeline_handle.get_external_origin(),
},
kwargs,
)
return create_run_for_test(instance, **pipeline_args)
def assert_events_in_order(logs, expected_events):
logged_events = [log.dagster_event.event_type_value for log in logs]
filtered_logged_events = [event for event in logged_events if event in expected_events]
assert filtered_logged_events == expected_events
def test_queued_runs(tmpdir, foo_pipeline_handle):
dagster_home_path = tmpdir.strpath
setup_instance(dagster_home_path)
with start_daemon():
instance_ref = InstanceRef.from_dir(dagster_home_path)
with DagsterInstance.from_ref(instance_ref) as instance:
run = create_run(instance, foo_pipeline_handle)
with external_pipeline_from_run(run) as external_pipeline:
instance.submit_run(run.run_id, external_pipeline)
poll_for_finished_run(instance, run.run_id)
logs = instance.all_logs(run.run_id)
assert_events_in_order(
logs, ["PIPELINE_ENQUEUED", "PIPELINE_DEQUEUED", "PIPELINE_SUCCESS"],
)
``` |
{
"source": "jmsantorum/aws-deploy",
"score": 2
} |
#### File: ecs/commands/deploy.py
```python
import click
from aws_deploy.ecs.cli import (
ecs_cli, get_ecs_client, get_task_definition, print_diff, create_task_definition, deploy_task_definition,
rollback_task_definition
)
from aws_deploy.ecs.helper import DeployAction, TaskPlacementError, EcsError
@ecs_cli.command()
@click.argument('cluster')
@click.argument('service')
@click.option('--task', type=str,
help='Task definition to be deployed. Can be a task ARN or a task family with optional revision')
@click.option('-i', '--image', type=(str, str), multiple=True,
help='Overwrites the image for a container: <container> <image>')
@click.option('-t', '--tag', help='Changes the tag for ALL container images')
@click.option('-c', '--command', type=(str, str), multiple=True,
help='Overwrites the command in a container: <container> <command>')
@click.option('-e', '--env', type=(str, str, str), multiple=True,
help='Adds or changes an environment variable: <container> <name> <value>')
@click.option('--env-file', type=(str, str), default=((None, None),), multiple=True, required=False,
help='Load environment variables from .env-file')
@click.option('-s', '--secret', type=(str, str, str), multiple=True,
help='Adds or changes a secret environment variable from the AWS Parameter Store '
'(Not available for Fargate): <container> <name> <parameter name>')
@click.option('--exclusive-env', is_flag=True, default=False, show_default=True,
help='Set the given environment variables exclusively and remove all other pre-existing env variables '
'from all containers')
@click.option('--exclusive-secrets', is_flag=True, default=False, show_default=True,
help='Set the given secrets exclusively and remove all other pre-existing secrets from all containers')
@click.option('-r', '--role', type=str, help='Sets the task\'s role ARN: <task role ARN>')
@click.option('-x', '--execution-role', type=str, help='Sets the execution\'s role ARN: <execution role ARN>')
@click.option('--ignore-warnings', is_flag=True,
help='Do not fail deployment on warnings (port already in use or insufficient memory/CPU)')
@click.option('--timeout', default=300, type=int, show_default=True,
help='Amount of seconds to wait for deployment before command fails. '
'To disable timeout (fire and forget) set to -1.')
@click.option('--sleep-time', default=1, type=int, show_default=True,
help='Amount of seconds to wait between each check of the service.')
@click.option('--deregister/--no-deregister', default=True, show_default=True,
help='Deregister or keep the old task definition.')
@click.option('--rollback/--no-rollback', default=False, show_default=True,
help='Rollback to previous revision, if deployment failed.')
@click.option('--diff/--no-diff', default=True, show_default=True,
help='Print which values were changed in the task definition')
@click.pass_context
def deploy(ctx, cluster, service, task, image, tag, command, env, env_file, secret, exclusive_env, exclusive_secrets,
role, execution_role, ignore_warnings, timeout, sleep_time, deregister, rollback, diff):
"""
Redeploy or modify a service.
\b
CLUSTER is the name of your cluster (e.g. 'my-cluster') within ECS.
SERVICE is the name of your service (e.g. 'my-app') within ECS.
When not giving any other options, the task definition will not be changed.
It will just be duplicated, so that all container images will be pulled and redeployed.
"""
try:
click.secho(f'Deploy [cluster={cluster}, service={service}]')
ecs_client = get_ecs_client(ctx)
deploy_action = DeployAction(ecs_client, cluster, service)
td = get_task_definition(deploy_action, task)
td.set_images(tag, **{key: value for (key, value) in image})
td.set_commands(**{key: value for (key, value) in command})
td.set_environment(env, exclusive_env, env_file)
td.set_secrets(secret, exclusive_secrets)
td.set_role_arn(role)
td.set_execution_role_arn(execution_role)
if diff:
print_diff(td)
new_td = create_task_definition(deploy_action, td)
try:
deploy_task_definition(
deployment=deploy_action,
task_definition=new_td,
title='Deploying new task definition',
success_message='Deployment successful',
failure_message='Deployment failed',
timeout=timeout,
deregister=deregister,
previous_task_definition=td,
ignore_warnings=ignore_warnings,
sleep_time=sleep_time
)
except TaskPlacementError:
if rollback:
rollback_task_definition(deploy_action, td, new_td, sleep_time=sleep_time)
raise
except EcsError as e:
click.secho(str(e), fg='red', err=True)
exit(1)
```
#### File: aws_deploy/ecs/helper.py
```python
import json
import re
from datetime import datetime
from json.decoder import JSONDecodeError
import click
from boto3.session import Session
from boto3_type_annotations.ecs import Client
from botocore.exceptions import ClientError, NoCredentialsError
from dateutil.tz.tz import tzlocal
from dictdiffer import diff
JSON_LIST_REGEX = re.compile(r'^\[.*\]$')
LAUNCH_TYPE_EC2 = 'EC2'
LAUNCH_TYPE_FARGATE = 'FARGATE'
def read_env_file(container_name, file):
env_vars = []
try:
with open(file) as f:
for line in f:
if line.startswith('#') or not line.strip() or '=' not in line:
continue
key, value = line.strip().split('=', 1)
env_vars.append((container_name, key, value))
except Exception as e:
raise EcsTaskDefinitionCommandError(str(e))
return tuple(env_vars)
class EcsClient(object):
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None, region_name=None,
profile_name=None):
session = Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
region_name=region_name,
profile_name=profile_name
)
self.boto: Client = session.client('ecs')
self.events = session.client('events')
def describe_services(self, cluster_name, service_name):
return self.boto.describe_services(
cluster=cluster_name,
services=[service_name]
)
def describe_task_definition(self, task_definition_arn):
try:
return self.boto.describe_task_definition(
taskDefinition=task_definition_arn,
include=[
'TAGS',
]
)
except ClientError:
raise UnknownTaskDefinitionError(
u'Unknown task definition arn: %s' % task_definition_arn
)
def list_tasks(self, cluster_name, service_name):
return self.boto.list_tasks(
cluster=cluster_name,
serviceName=service_name
)
def describe_tasks(self, cluster_name, task_arns):
return self.boto.describe_tasks(cluster=cluster_name, tasks=task_arns)
def register_task_definition(self, family, containers, volumes, role_arn,
execution_role_arn, tags, additional_properties):
if tags:
additional_properties['tags'] = tags
return self.boto.register_task_definition(
family=family,
containerDefinitions=containers,
volumes=volumes,
taskRoleArn=role_arn,
executionRoleArn=execution_role_arn,
**additional_properties
)
def deregister_task_definition(self, task_definition_arn):
return self.boto.deregister_task_definition(
taskDefinition=task_definition_arn
)
def update_service(self, cluster, service, desired_count, task_definition):
if desired_count is None:
return self.boto.update_service(
cluster=cluster,
service=service,
taskDefinition=task_definition
)
return self.boto.update_service(
cluster=cluster,
service=service,
desiredCount=desired_count,
taskDefinition=task_definition
)
def run_task(self, cluster, task_definition, count, started_by, overrides,
launchtype='EC2', subnets=(), security_groups=(),
public_ip=False, platform_version=None):
if launchtype == LAUNCH_TYPE_FARGATE:
if not subnets or not security_groups:
msg = 'At least one subnet (--subnet) and one security ' \
'group (--securitygroup) definition are required ' \
'for launch type FARGATE'
raise TaskPlacementError(msg)
network_configuration = {
"awsvpcConfiguration": {
"subnets": subnets,
"securityGroups": security_groups,
"assignPublicIp": "ENABLED" if public_ip else "DISABLED"
}
}
if platform_version is None:
platform_version = 'LATEST'
return self.boto.run_task(
cluster=cluster,
taskDefinition=task_definition,
count=count,
startedBy=started_by,
overrides=overrides,
launchType=launchtype,
networkConfiguration=network_configuration,
platformVersion=platform_version,
)
return self.boto.run_task(
cluster=cluster,
taskDefinition=task_definition,
count=count,
startedBy=started_by,
overrides=overrides
)
def update_rule(self, cluster, rule, task_definition):
target = self.events.list_targets_by_rule(Rule=rule)['Targets'][0]
target['Arn'] = task_definition.arn.partition('task-definition')[0] + 'cluster/' + cluster
target['EcsParameters']['TaskDefinitionArn'] = task_definition.arn
self.events.put_targets(Rule=rule, Targets=[target])
return target['Id']
class EcsService(dict):
def __init__(self, cluster, service_definition=None, **kwargs):
self._cluster = cluster
super(EcsService, self).__init__(service_definition, **kwargs)
def set_task_definition(self, task_definition):
self[u'taskDefinition'] = task_definition.arn
@property
def cluster(self):
return self._cluster
@property
def name(self):
return self.get(u'serviceName')
@property
def task_definition(self):
return self.get(u'taskDefinition')
@property
def desired_count(self):
return self.get(u'desiredCount')
@property
def deployment_created_at(self):
for deployment in self.get(u'deployments'):
if deployment.get(u'status') == u'PRIMARY':
return deployment.get(u'createdAt')
return datetime.now()
@property
def deployment_updated_at(self):
for deployment in self.get(u'deployments'):
if deployment.get(u'status') == u'PRIMARY':
return deployment.get(u'updatedAt')
return datetime.now()
@property
def errors(self):
return self.get_warnings(
since=self.deployment_updated_at
)
@property
def older_errors(self):
return self.get_warnings(
since=self.deployment_created_at,
until=self.deployment_updated_at
)
def get_warnings(self, since=None, until=None):
since = since or self.deployment_created_at
until = until or datetime.now(tz=tzlocal())
errors = {}
for event in self.get(u'events'):
if u'unable' not in event[u'message']:
continue
if since < event[u'createdAt'] < until:
errors[event[u'createdAt']] = event[u'message']
return errors
class EcsTaskDefinition(object):
def __init__(self, containerDefinitions, volumes, family, revision, status, taskDefinitionArn,
requiresAttributes=None, taskRoleArn=None, executionRoleArn=None, compatibilities=None, tags=None,
**kwargs):
self.containers = containerDefinitions
self.volumes = volumes
self.family = family
self.revision = revision
self.status = status
self.arn = taskDefinitionArn
self.requires_attributes = requiresAttributes or {}
self.role_arn = taskRoleArn or ''
self.execution_role_arn = executionRoleArn or ''
self.tags = tags
self.additional_properties = kwargs
self._diff = []
# the compatibilities parameter is returned from the ECS API, when
# describing a task, but may not be included, when registering a new
# task definition. Just storing it for now.
self.compatibilities = compatibilities
@property
def container_names(self):
for container in self.containers:
yield container['name']
@property
def images(self):
for container in self.containers:
yield container['name'], container['image']
@property
def family_revision(self):
return f'{self.family}:{self.revision}'
@property
def updated(self) -> bool:
return self._diff != []
@property
def diff(self):
return self._diff
def show_diff(self, show_diff: bool = False):
if show_diff:
click.secho('Task definition modified:')
for d in self._diff:
click.secho(f' {str(d)}', fg='blue')
click.secho('')
def diff_raw(self, task_b):
containers_a = {c['name']: c for c in self.containers}
containers_b = {c['name']: c for c in task_b.containers}
requirements_a = sorted([r['name'] for r in self.requires_attributes])
requirements_b = sorted([r['name'] for r in task_b.requires_attributes])
for container in containers_a:
containers_a[container]['environment'] = {e['name']: e['value'] for e in
containers_a[container].get('environment', {})}
for container in containers_b:
containers_b[container]['environment'] = {e['name']: e['value'] for e in
containers_b[container].get('environment', {})}
for container in containers_a:
containers_a[container]['secrets'] = {e['name']: e['valueFrom'] for e in
containers_a[container].get('secrets', {})}
for container in containers_b:
containers_b[container]['secrets'] = {e['name']: e['valueFrom'] for e in
containers_b[container].get('secrets', {})}
composite_a = {
'containers': containers_a,
'volumes': self.volumes,
'requires_attributes': requirements_a,
'role_arn': self.role_arn,
'execution_role_arn': self.execution_role_arn,
'compatibilities': self.compatibilities,
'additional_properties': self.additional_properties,
}
composite_b = {
'containers': containers_b,
'volumes': task_b.volumes,
'requires_attributes': requirements_b,
'role_arn': task_b.role_arn,
'execution_role_arn': task_b.execution_role_arn,
'compatibilities': task_b.compatibilities,
'additional_properties': task_b.additional_properties,
}
return list(diff(composite_a, composite_b))
def get_overrides(self):
override = dict()
overrides = []
for diff in self.diff:
if override.get('name') != diff.container:
override = dict(name=diff.container)
overrides.append(override)
if diff.field == 'command':
override['command'] = self.get_overrides_command(diff.value)
elif diff.field == 'environment':
override['environment'] = self.get_overrides_env(diff.value)
elif diff.field == 'secrets':
override['secrets'] = self.get_overrides_secrets(diff.value)
return overrides
@staticmethod
def parse_command(command):
if re.match(JSON_LIST_REGEX, command):
try:
return json.loads(command)
except JSONDecodeError as e:
raise EcsTaskDefinitionCommandError(
f"command should be valid JSON list. Got following command: {command} resulting in error: {str(e)}"
)
return command.split()
@staticmethod
def get_overrides_command(command):
return EcsTaskDefinition.parse_command(command)
@staticmethod
def get_overrides_env(env):
return [{"name": e, "value": env[e]} for e in env]
@staticmethod
def get_overrides_secrets(secrets):
return [{"name": s, "valueFrom": secrets[s]} for s in secrets]
def get_tag(self, key):
for tag in self.tags:
if tag['key'] == key:
return tag['value']
return None
def set_tag(self, key: str, value: str):
if key and value:
done = False
for tag in self.tags:
if tag['key'] == key:
if tag['value'] != value:
diff = EcsTaskDefinitionDiff(
container=None,
field=f"tags['{key}']",
value=value,
old_value=tag['value']
)
self._diff.append(diff)
tag['value'] = value
done = True
break
if not done:
diff = EcsTaskDefinitionDiff(container=None, field=f"tags['{key}']", value=value, old_value=None)
self._diff.append(diff)
self.tags.append({'key': key, 'value': value})
def set_images(self, tag=None, **images):
self.validate_container_options(**images)
for container in self.containers:
if container['name'] in images:
new_image = images[container['name']]
diff = EcsTaskDefinitionDiff(
container=container['name'],
field='image',
value=new_image,
old_value=container['image']
)
self._diff.append(diff)
container['image'] = new_image
elif tag:
image_definition = container['image'].rsplit(':', 1)
new_image = f'{image_definition[0]}:{tag.strip()}'
# check if tag changes
if new_image != container['image']:
diff = EcsTaskDefinitionDiff(
container=container['name'],
field='image',
value=new_image,
old_value=container['image']
)
self._diff.append(diff)
container['image'] = new_image
def set_commands(self, **commands):
self.validate_container_options(**commands)
for container in self.containers:
if container['name'] in commands:
new_command = commands[container['name']]
diff = EcsTaskDefinitionDiff(
container=container['name'],
field='command',
value=new_command,
old_value=container.get('command')
)
self._diff.append(diff)
container['command'] = self.parse_command(new_command)
def set_environment(self, environment_list, exclusive=False, env_file=((None, None),)):
environment = {}
if None not in env_file[0]:
for env in env_file:
line = read_env_file(env[0], env[1])
environment_list = line + environment_list
for env in environment_list:
environment.setdefault(env[0], {})
environment[env[0]][env[1]] = env[2]
self.validate_container_options(**environment)
for container in self.containers:
if container['name'] in environment:
self.apply_container_environment(
container=container,
new_environment=environment[container['name']],
exclusive=exclusive,
)
elif exclusive is True:
self.apply_container_environment(
container=container,
new_environment={},
exclusive=exclusive,
)
def apply_container_environment(self, container, new_environment, exclusive=False):
environment = container.get('environment', {})
old_environment = {env['name']: env['value'] for env in environment}
if exclusive is True:
merged = new_environment
else:
merged = old_environment.copy()
merged.update(new_environment)
if old_environment == merged:
return
diff = EcsTaskDefinitionDiff(
container=container['name'],
field='environment',
value=merged,
old_value=old_environment
)
self._diff.append(diff)
container['environment'] = [
{"name": e, "value": merged[e]} for e in merged
]
def set_secrets(self, secrets_list, exclusive=False):
secrets = {}
for secret in secrets_list:
secrets.setdefault(secret[0], {})
secrets[secret[0]][secret[1]] = secret[2]
self.validate_container_options(**secrets)
for container in self.containers:
if container['name'] in secrets:
self.apply_container_secrets(
container=container,
new_secrets=secrets[container['name']],
exclusive=exclusive,
)
elif exclusive is True:
self.apply_container_secrets(
container=container,
new_secrets={},
exclusive=exclusive,
)
def apply_container_secrets(self, container, new_secrets, exclusive=False):
secrets = container.get('secrets', {})
old_secrets = {secret['name']: secret['valueFrom'] for secret in secrets}
if exclusive is True:
merged = new_secrets
else:
merged = old_secrets.copy()
merged.update(new_secrets)
if old_secrets == merged:
return
diff = EcsTaskDefinitionDiff(
container=container['name'],
field='secrets',
value=merged,
old_value=old_secrets
)
self._diff.append(diff)
container['secrets'] = [
{"name": s, "valueFrom": merged[s]} for s in merged
]
def validate_container_options(self, **container_options):
for container_name in container_options:
if container_name not in self.container_names:
raise UnknownContainerError(f'Unknown container: {container_name}')
def set_role_arn(self, role_arn):
if role_arn:
diff = EcsTaskDefinitionDiff(
container=None,
field='role_arn',
value=role_arn,
old_value=self.role_arn
)
self.role_arn = role_arn
self._diff.append(diff)
def set_execution_role_arn(self, execution_role_arn):
if execution_role_arn:
diff = EcsTaskDefinitionDiff(
container=None,
field='execution_role_arn',
value=execution_role_arn,
old_value=self.execution_role_arn
)
self.execution_role_arn = execution_role_arn
self._diff.append(diff)
class EcsTaskDefinitionDiff(object):
def __init__(self, container, field, value, old_value):
self.container = container
self.field = field
self.value = value
self.old_value = old_value
def __repr__(self):
if self.field == 'environment':
return '\n'.join(self._get_environment_diffs(
self.container,
self.value,
self.old_value,
))
elif self.field == 'secrets':
return '\n'.join(self._get_secrets_diffs(
self.container,
self.value,
self.old_value,
))
elif self.container:
return f'Changed {self.field} of container "{self.container}" to: "{self.value}" (was: "{self.old_value}")'
else:
return f'Changed {self.field} to: "{self.value}" (was: "{self.old_value}")'
@staticmethod
def _get_environment_diffs(container, env, old_env):
diffs = []
for name, value in env.items():
old_value = old_env.get(name)
if value != old_value or value and not old_value:
message = f'Changed environment "{name}" of container "{container}" to: "{value}"'
diffs.append(message)
for old_name in old_env.keys():
if old_name not in env.keys():
message = f'Removed environment "{old_name}" of container "{container}"'
diffs.append(message)
return diffs
@staticmethod
def _get_secrets_diffs(container, secrets, old_secrets):
diffs = []
for name, value in secrets.items():
old_value = old_secrets.get(name)
if value != old_value or not old_value:
message = f'Changed secret "{name}" of container "{container}" to: "{value}"'
diffs.append(message)
for old_name in old_secrets.keys():
if old_name not in secrets.keys():
message = f'Removed secret "{old_name}" of container "{container}"'
diffs.append(message)
return diffs
class EcsAction(object):
def __init__(self, client: EcsClient, cluster_name: str, service_name: str):
self._client = client
self._cluster_name = cluster_name
self._service_name = service_name
try:
if service_name:
self._service = self.get_service()
except IndexError:
raise EcsConnectionError(
u'An error occurred when calling the DescribeServices '
u'operation: Service not found.'
)
except ClientError as e:
raise EcsConnectionError(str(e))
except NoCredentialsError:
raise EcsConnectionError(
u'Unable to locate credentials. Configure credentials '
u'by running "aws configure".'
)
def get_service(self):
services_definition = self._client.describe_services(
cluster_name=self._cluster_name,
service_name=self._service_name
)
return EcsService(
cluster=self._cluster_name,
service_definition=services_definition[u'services'][0]
)
def get_current_task_definition(self, service):
return self.get_task_definition(service.task_definition)
def get_task_definition(self, task_definition):
task_definition_payload = self._client.describe_task_definition(
task_definition_arn=task_definition
)
task_definition = EcsTaskDefinition(
tags=task_definition_payload.get('tags', None),
**task_definition_payload[u'taskDefinition']
)
return task_definition
def update_task_definition(self, task_definition):
response = self._client.register_task_definition(
family=task_definition.family,
containers=task_definition.containers,
volumes=task_definition.volumes,
role_arn=task_definition.role_arn,
execution_role_arn=task_definition.execution_role_arn,
tags=task_definition.tags,
additional_properties=task_definition.additional_properties
)
new_task_definition = EcsTaskDefinition(**response[u'taskDefinition'])
return new_task_definition
def deregister_task_definition(self, task_definition):
self._client.deregister_task_definition(task_definition.arn)
def update_service(self, service, desired_count=None):
response = self._client.update_service(
cluster=service.cluster,
service=service.name,
desired_count=desired_count,
task_definition=service.task_definition
)
return EcsService(self._cluster_name, response[u'service'])
def is_deployed(self, service):
if len(service[u'deployments']) != 1:
return False
running_tasks = self._client.list_tasks(
cluster_name=service.cluster,
service_name=service.name
)
if not running_tasks[u'taskArns']:
return service.desired_count == 0
running_count = self.get_running_tasks_count(
service=service,
task_arns=running_tasks[u'taskArns']
)
return service.desired_count == running_count
def get_running_tasks_count(self, service, task_arns):
running_count = 0
tasks_details = self._client.describe_tasks(
cluster_name=self._cluster_name,
task_arns=task_arns
)
for task in tasks_details[u'tasks']:
arn = task[u'taskDefinitionArn']
status = task[u'lastStatus']
if arn == service.task_definition and status == u'RUNNING':
running_count += 1
return running_count
@property
def client(self):
return self._client
@property
def service(self):
return self._service
@property
def cluster_name(self):
return self._cluster_name
@property
def service_name(self):
return self._service_name
class DeployAction(EcsAction):
def deploy(self, task_definition):
try:
self._service.set_task_definition(task_definition)
return self.update_service(self._service)
except ClientError as e:
raise EcsError(str(e))
class ScaleAction(EcsAction):
def scale(self, desired_count):
try:
return self.update_service(self._service, desired_count)
except ClientError as e:
raise EcsError(str(e))
class RunAction(EcsAction):
def __init__(self, client, cluster_name):
super(RunAction, self).__init__(client, cluster_name, None)
self._client = client
self._cluster_name = cluster_name
self.started_tasks = []
def run(self, task_definition, count, started_by, launchtype, subnets,
security_groups, public_ip, platform_version):
try:
result = self._client.run_task(
cluster=self._cluster_name,
task_definition=task_definition.family_revision,
count=count,
started_by=started_by,
overrides=dict(containerOverrides=task_definition.get_overrides()),
launchtype=launchtype,
subnets=subnets,
security_groups=security_groups,
public_ip=public_ip,
platform_version=platform_version,
)
self.started_tasks = result['tasks']
return True
except ClientError as e:
raise EcsError(str(e))
class UpdateAction(EcsAction):
def __init__(self, client):
super(UpdateAction, self).__init__(client, None, None)
class DiffAction(EcsAction):
def __init__(self, client):
super(DiffAction, self).__init__(client, None, None)
class EcsError(Exception):
pass
class EcsConnectionError(EcsError):
pass
class UnknownContainerError(EcsError):
pass
class TaskPlacementError(EcsError):
pass
class UnknownTaskDefinitionError(EcsError):
pass
class EcsTaskDefinitionCommandError(EcsError):
pass
``` |
{
"source": "jms-calado/Pycom",
"score": 2
} |
#### File: project/lib/MQTTLogic.py
```python
import _thread
import time
import utime
import gc
import os
import pycom
import ujson as json
from machine import Timer
from umqttrobust import MQTTClient
from logger import Logger
import config
import state
import wifi
class MQTTLogic:
# enable GC
gc.enable()
def __init__(self, client = None):
self.client = client
#self.log = log
#enable logger
self.log = Logger()
#mqtt sub callback
def sub_cb(self, topic, msg):
#print("topic: " + str(topic))
#print("msg: " + str(msg))
self.log.debugLog('Sub Topic: {} ||| Msg: {}'.format(topic.decode(), msg.decode()))
if topic == config.MQTT_SUB_ACTIVE.encode():
if msg == b'true':
state.OP_MODE = True
pycom.nvs_set('active', 1)
self.stopMQTT()
elif msg == b'false':
state.OP_MODE = False
pycom.nvs_set('active', 0)
pycom.nvs_set('bootNum', 0)
elif topic == config.MQTT_SUB_CONF_ENERGY.encode():
try:
json_obj = json.loads(msg.decode())
except ValueError as valueerror:
self.log.debugLog('Exception MQTT json valueerror: {}'.format(valueerror))
try:
state.GNSS_ACTIVE = json_obj['gnss']['active']
config.GNSS_SR = json_obj['gnss']['sr']
state.LTENB_ACTIVE = json_obj['lteNB']['active']
config.LTENB_SR = json_obj['lteNB']['sr']
state.WIFI_ACTIVE = json_obj['wifi']['active']
config.WIFI_SR = json_obj['wifi']['sr']
state.LORA_ACTIVE = json_obj['lora']['active']
config.LORA_SR = json_obj['lora']['sr']
except KeyError as keyerror:
self.log.debugLog('Exception MQTT json keyerror: {}'.format(keyerror))
elif topic == config.MQTT_SUB_CONF_WIFI.encode():
try:
json_obj = json.loads(msg.decode())
except ValueError as valueerror:
self.log.debugLog('Exception MQTT json valueerror: {}'.format(valueerror))
try:
config.SSID = json_obj['ssid']
config.WLANPWD = json_obj['wlanpw']
except KeyError as keyerror:
self.log.debugLog('Exception MQTT json keyerror: {}'.format(keyerror))
# MQTT connect
def startMQTT(self):
self.log.debugLog('start mqtt')
if state.CONNECTED:
try:
self.client = MQTTClient(client_id=config.MQTT_DEVICE_ID, server=config.MQTT_SERVER, user=config.MQTT_USER_ID, password=config.MQTT_PWD, port=1883, keepalive=600)
self.client.set_callback(self.sub_cb)
#ed_id = config.MQTT_DEVICE_ID
#lw_msg = '{"status":"Unexpected disconnection:' + ed_id + '"}'
#lw_msg = '{"status":"123456789012345proto1"}'
lw_msg = '"LW: Unexpected disconnect"'
self.client.set_last_will(topic=config.MQTT_PUB_STATUS, msg=lw_msg, retain=False, qos=1)
#if self.client.connect(clean_session=False):
# raise Exception('MQTT connect: a session already exists')
conn_result = self.client.connect(clean_session=False)
self.log.debugLog('conn_result: {}'.format(conn_result))
except OSError as oserror:
try:
if oserror.errno == errno.EHOSTUNREACH:
# MQTT Connect Failed because Host is unreachable.
self.log.debugLog('Exception MQTT Connect EHOSTUNREACH: {}'.format(oserror))
state.MQTT_ACTIVE = False
return state.MQTT_ACTIVE
else:
self.log.debugLog('Exception MQTT Connect OSError: {}'.format(oserror))
state.MQTT_ACTIVE = False
return state.MQTT_ACTIVE
except AttributeError as atterr:
self.log.debugLog('Exception MQTT Connect AttributeError: {}'.format(atterr))
state.MQTT_ACTIVE = False
return state.MQTT_ACTIVE
except Exception as mqttconnecterror:
self.log.debugLog('Exception MQTT Connect: {}'.format(mqttconnecterror))
#self.log.debugLog(mqttconnecterror)
state.MQTT_ACTIVE = False
return state.MQTT_ACTIVE
self.log.debugLog('MQTT: connected')
try:
self.client.subscribe(topic=config.MQTT_SUB_ACTIVE, qos=1)
time.sleep(1)
except Exception as mqttpubsuberror:
self.log.debugLog('Exception MQTT_SUB_ACTIVE: {}'.format(mqttpubsuberror))
state.MQTT_ACTIVE = False
return state.MQTT_ACTIVE
try:
self.client.subscribe(topic=config.MQTT_SUB_CONF_ENERGY, qos=1)
time.sleep(1)
except Exception as mqttpubsuberror:
self.log.debugLog('Exception MQTT_SUB_CONF_ENERGY: {}'.format(mqttpubsuberror))
state.MQTT_ACTIVE = False
return state.MQTT_ACTIVE
try:
self.client.subscribe(topic=config.MQTT_SUB_CONF_WIFI, qos=1)
time.sleep(1)
except Exception as mqttpubsuberror:
self.log.debugLog('Exception MQTT_SUB_CONF_WIFI: {}'.format(mqttpubsuberror))
state.MQTT_ACTIVE = False
return state.MQTT_ACTIVE
self.log.debugLog('MQTT: subbed')
'''
try:
registered = pycom.nvs_get('registered')
except Exception as nvserror:
registered = None
self.log.debugLog('MQTT registered: {}'.format(registered))
if registered == None:
msg_reg = '{"deviceId":"' + config.MQTT_DEVICE_ID + '","component":["lteNB","lora","wifi","bluetooth","accelerometer","gnss"],"batteryCapacity":800}'
self.client.publish(topic=config.MQTT_PUB_REG, msg=msg_reg, retain=False, qos=1)
pycom.nvs_set('registered', 1)
'''
if not state.OP_MODE:
try:
# start thread
self.runThread = True
self.mqttLogic_thread = _thread.start_new_thread(self.mqtt_thread,())
except Exception as mqttstartthread:
self.log.debugLog('Exception MQTT start thread: {}'.format(mqttstartthread))
state.MQTT_ACTIVE = False
return state.MQTT_ACTIVE
state.MQTT_ACTIVE = True
return state.MQTT_ACTIVE
def mqtt_thread(self):
self.thread_active = True
print('Running mqtt_thread id: {}'.format(_thread.get_ident()))
while self.runThread:
gc.collect()
self.client.check_msg() #check SUBed messages
if not self.runThread:
self.log.debugLog("Breaking thread loop")
break
time.sleep(config.MQTT_PUB_SR)
try:
self.log.debugLog("Try to exit MQTT thread")
self.thread_active = False
_thread.exit()
except SystemExit as sysexiterror:
self.log.debugLog('System Exit: {}'.format(sysexiterror))
self.log.debugLog('Exited mqtt_thread id: {}'.format(_thread.get_ident()))
time.sleep(1)
'''
if not self.thread_active:
if state.MQTT_ACTIVE:
self.client.disconnect()
self.log.debugLog("Try to disconnect MQTT client")
'''
def stopMQTT(self):
self.runThread = False
time.sleep(5)
#if state.MQTT_ACTIVE:
# self.client.disconnect()
self.log.debugLog('end mqtt')
def pubMQTT(self, topic, msg, retain, qos):
#self.log.debugLog('pubMQTT check_msg')
self.client.check_msg() #check SUBed messages
if state.MQTT_ACTIVE:
try:
self.client.publish(topic, msg, retain, qos)
self.log.debugLog('Pub Topic: {} // Msg: {} // Retain: {} // QoS: {}'.format(topic, msg, retain, qos))
except Exception as e:
self.log.debugLog(e)
def pingMQTT(self):
if state.MQTT_ACTIVE:
try:
self.client.ping()
except Exception as e:
self.log.debugLog(e)
try:
res = self.client.wait_msg()
if(res == b"PINGRESP") :
self.log.debugLog('Ping Successful')
else:
self.log.debugLog('Ping response: {}'.format(res))
except Exception as e:
self.log.debugLog(e)
def pubStatus(self, timestamp='1970-01-01T00:00:00Z', lat='0', lon='0', alt='0', hdop='0', vdop='0', pdop='0', batteryLevel='0', x='0', y='0', z='0'):
try:
statusMsg = '{"timestamp":"' + timestamp + '","location":{"lat":' + lat + ',"lon":' + lon + ',"alt":' + alt + ',"hdop":' + hdop + ',"vdop":' + vdop + ',"pdop":' + pdop + '},"batteryLevel":' + batteryLevel + ',"sensor":{"accelerometer":{"x":' + x + ',"y":' + y + ',"z":' + z + '}}}'
#statusMsg = '{"timestamp":"{}","location":{"lat":{},"lon":{},"alt":{},"hdop":{},"vdop":{},"pdop":{}},"batteryLevel":{}}'.format(timestamp, lat, lon, alt, hdop, vdop, pdop, batteryLevel)
if state.WIFI_ACTIVE:
wifiAPs = None
#check if there is wifi
wifiAPs = wifi.wifiAPs()
if wifiAPs is not None:
statusAPs = '{"timestamp":"' + timestamp + '","location":{"lat":' + lat + ',"lon":' + lon + ',"alt":' + alt + ',"hdop":' + hdop + ',"vdop":' + vdop + ',"pdop":' + pdop + '},"batteryLevel":' + batteryLevel + ',"sensor":{"accelerometer":{"x":' + x + ',"y":' + y + ',"z":' + z + '}},' + wifiAPs + '}'
self.pubMQTT(topic=config.MQTT_PUB_STATUS_WIFIAPS, msg=statusAPs, retain=False, qos=0)
else:
self.pubMQTT(topic=config.MQTT_PUB_STATUS, msg=statusMsg, retain=False, qos=0)
else:
self.pubMQTT(topic=config.MQTT_PUB_STATUS, msg=statusMsg, retain=False, qos=0)
except Exception as e:
self.log.debugLog('Status PUB Exception: {}'.format(e))
```
#### File: project/lib/wifi.py
```python
from network import WLAN
import binascii
import machine
import time
import config
import state
from logger import Logger
#enable logger
log = Logger()
# Connect to WLAN
def connectWifi():
wlan = WLAN(mode=WLAN.STA)
ssids = wlan.scan()
for ssid in ssids:
print(ssid.ssid)
if ssid.ssid == config.SSID:
log.debugLog('WLAN: Network found!')
wlan.connect(ssid.ssid, auth=(ssid.sec, config.WLANPWD), timeout=5000)
while not wlan.isconnected():
machine.idle() # save power while waiting
state.CONNECTED = True
log.debugLog('WLAN: Connection succeeded!')
#break
return wlan
else:
log.debugLog('WLAN: Network not found!')
def disconnectWifi(wlan):
if wlan.isconnected():
state.CONNECTED = False
wlan.disconnect()
wlan.deinit()
def wifiAPsLoRa(wlan = None):
if wlan == None:
try:
wlan = WLAN(mode=WLAN.STA)
except Exception as e:
log.debugLog("Failed to start wifi for LoRa ")
return None
try:
ssids = wlan.scan()
except Exception as e:
log.debugLog("Failed to get wifiAPs for LoRa ")
return None
try:
#BSSID 0
a=ssids[0][1]
a=binascii.hexlify(a)
res=[]
for i in range (12):
if (i%2) == 0:
aux=int(a[i:i+2], 16)
res.append(aux)
else:
pass
#RSSI0
a=ssids[0][4]
RSSI0 = -a
res.append(RSSI0)
except Exception as e:
log.debugLog("Failed to get wifiAPs ")
return None
try:
#BSSID 1
a=ssids[1][1]
a=binascii.hexlify(a)
for i in range (12):
if (i%2) == 0:
aux=int(a[i:i+2], 16)
res.append(aux)
else:
pass
BSSID1=int(a, 16)
#RSSI 1
a=ssids[1][4]
RSSI1 = -a
res.append(RSSI1)
except Exception as e:
log.debugLog("Failed to get wifiAPs ")
return None
try:
#BSSID 2
a=ssids[2][1]
a=binascii.hexlify(a)
for i in range (12):
if (i%2) == 0:
aux=int(a[i:i+2], 16)
res.append(aux)
else:
pass
BSSID2=int(a, 16)
#RSSI 2
a=ssids[2][4]
RSSI2 = -a
res.append(RSSI2)
except Exception as e:
log.debugLog("Failed to get wifiAPs ")
return None
'''
if not state.WIFI_ACTIVE:
disconnectWifi(wlan)
'''
disconnectWifi(wlan) #remove if using Wifi for comms
return res
'''
Example out : ""wifiAPs":{"mac_1":"04:92:26:66:be:88","rssi_1":-67,"mac_2":"06:92:26:76:be:88","rssi_2":-67,"mac_3":"00:06:91:fa:5f:d0","rssi_3":-76}"
Used to mount status message when LoRa is not used
'''
def wifiAPs(wlan = None):
if wlan == None:
try:
wlan = WLAN(mode=WLAN.STA)
except Exception as e:
log.debugLog("Failed to start wifi for LoRa ")
return None
try:
ssids = wlan.scan()
except Exception as e:
log.debugLog("Failed to get wifiAPs ")
return None
try:
#BSSID 1
a=ssids[0][1]
a=binascii.hexlify(a)
res="\"wifiAPs\":{\"mac_1\":\""
for i in range (12):
if(i==10):
aux=str(a[i:i+2],16)
res+=aux+"\""+","
else:
if (i%2) == 0:
aux=str(a[i:i+2],16)
res+=aux+":"
else:
pass
#RSSI 1
a=ssids[0][4]
RSSI1 =str(a)
res+="\"rssi_1\":"+RSSI1+",\"mac_2\":\""
except Exception as e:
log.debugLog("Failed to get wifiAPs ")
return None
try:
#BSSID 2
a=ssids[1][1]
a=binascii.hexlify(a)
for i in range (12):
if(i==10):
aux=str(a[i:i+2],16)
res+=aux+"\""+","
else:
if (i%2) == 0:
aux=str(a[i:i+2],16)
res+=aux+":"
else:
pass
#RSSI 2
a=ssids[1][4]
RSSI2 =str(a)
res+="\"rssi_2\":"+RSSI2+",\"mac_3\":\""
except Exception as e:
log.debugLog("Failed to get wifiAPs ")
return None
try:
#BSSID 3
a=ssids[2][1]
a=binascii.hexlify(a)
for i in range (12):
if(i==10):
aux=str(a[i:i+2],16)
res+=aux+"\""+","
else:
if (i%2) == 0:
aux=str(a[i:i+2],16)
res+=aux+":"
else:
pass
#RSSI 3
a=ssids[2][4]
RSSI3 =str(a)
res+="\"rssi_3\":"+RSSI3+"}"
except Exception as e:
log.debugLog("Failed to get wifiAPs ")
return None
'''
if not state.WIFI_ACTIVE:
disconnectWifi(wlan)
'''
disconnectWifi(wlan) #remove if using Wifi for comms
return res
``` |
{
"source": "JMSchietekat/polycircles",
"score": 3
} |
#### File: polycircles/test/test_geometric_correctness.py
```python
import unittest
from polycircles import polycircles
from nose.tools import assert_equal, assert_almost_equal
from geopy.distance import distance
from geographiclib import geodesic
DECIMAL_POINT_ACCURACY = 5
class TestGeometry(unittest.TestCase):
def setUp(self):
self.latitude = 32.074322
self.longitude = 34.792081
self.radius_meters = 100
self.number_of_vertices = 36
polycircle = polycircles.Polycircle(latitude=self.latitude,
longitude=self.longitude,
radius=self.radius_meters,
number_of_vertices=self.number_of_vertices)
self.vertices = polycircle.to_lat_lon()
def test_number_of_vertices(self):
"""Does the number of vertices in the circle match the input+1?
The +1 is because the first vertex should be appended to the internal
list of vertices to properly "close" a polygon in KML.
Asserts that the number of vertices in the approximation polygon
matches the input."""
assert_equal(len(self.vertices), self.number_of_vertices+1)
def test_vertices_distance_from_center(self):
"""Does the distance of the vertices equals the input radius?
Asserts that the distance from each vertex to the center of the
circle equals the radius, in a given decimal digits accuracy."""
for vertex in self.vertices:
actual_distance = distance((self.latitude, self.longitude), (vertex[0], vertex[1])).meters
assert_almost_equal(actual_distance, self.radius_meters, DECIMAL_POINT_ACCURACY)
def test_azimuth_of_vertices(self):
"""Is the azimuth (bearing) to each vertex correct?
Asserts that for n vertices, the bearing to vertex number 0 <= i <= n
is 360/n*i."""
for vertex in self.vertices:
vertex_number = self.vertices.index(vertex)
expected_azimuth = 360.0/(len(self.vertices)-1) * vertex_number
actual_azimuth = (geodesic.Geodesic.WGS84.Inverse(
self.latitude, self.longitude, vertex[0], vertex[1]))['azi1']
if actual_azimuth < 0:
actual_azimuth = 360.0 + actual_azimuth
assert_almost_equal(expected_azimuth, actual_azimuth, places=DECIMAL_POINT_ACCURACY)
if __name__ == '__main__':
unittest.main(verbose=2)
``` |
{
"source": "JMSchoeffmann/uproot",
"score": 2
} |
#### File: uproot/uproot/_util.py
```python
from __future__ import absolute_import
def _tobytes(x):
if hasattr(x, "tobytes"):
return x.tobytes()
else:
return x.tostring()
``` |
{
"source": "jmschrei/bpnet-lite",
"score": 3
} |
#### File: bpnet-lite/bpnetlite/attributions.py
```python
import numpy
import numba
import torch
from tqdm import trange
from captum.attr import DeepLiftShap
class ProfileWrapper(torch.nn.Module):
"""A wrapper class that returns transformed profiles.
This class takes in a trained model and returns the weighted softmaxed
outputs of the first dimension. Specifically, it takes the predicted
"logits" and takes the dot product between them and the softmaxed versions
of those logits. This is for convenience when using captum to calculate
attribution scores.
Parameters
----------
model: torch.nn.Module
A torch model to be wrapped.
"""
def __init__(self, model):
super(ProfileWrapper, self).__init__()
self.model = model
def forward(self, X, X_ctl=None, **kwargs):
logits = self.model(X, X_ctl, **kwargs)[0]
logits = logits.reshape(X.shape[0], -1)
y = torch.nn.functional.log_softmax(logits, dim=-1)
y = torch.exp(y)
return (logits * y).sum(axis=-1).unsqueeze(-1)
class CountWrapper(torch.nn.Module):
"""A wrapper class that only returns the predicted counts.
This class takes in a trained model and returns only the second output.
For BPNet models, this means that it is only returning the count
predictions. This is for convenience when using captum to calculate
attribution scores.
Parameters
----------
model: torch.nn.Module
A torch model to be wrapped.
"""
def __init__(self, model):
super(CountWrapper, self).__init__()
self.model = model
def forward(self, X, X_ctl=None, **kwargs):
return self.model(X, X_ctl, **kwargs)[1]
@numba.jit('void(int64, int64[:], int64[:], int32[:, :], int32[:,], int32[:, :], float32[:, :, :])')
def _fast_shuffle(n_shuffles, chars, idxs, next_idxs, next_idxs_counts, counters, shuffled_sequences):
"""An internal function for fast shuffling using numba."""
for i in range(n_shuffles):
for char in chars:
n = next_idxs_counts[char]
next_idxs_ = numpy.arange(n)
next_idxs_[:-1] = numpy.random.permutation(n-1) # Keep last index same
next_idxs[char, :n] = next_idxs[char, :n][next_idxs_]
idx = 0
shuffled_sequences[i, idxs[idx], 0] = 1
for j in range(1, len(idxs)):
char = idxs[idx]
count = counters[i, char]
idx = next_idxs[char, count]
counters[i, char] += 1
shuffled_sequences[i, idxs[idx], j] = 1
def dinucleotide_shuffle(sequence, n_shuffles=10, random_state=None):
"""Given a one-hot encoded sequence, dinucleotide shuffle it.
This function takes in a one-hot encoded sequence (not a string) and
returns a set of one-hot encoded sequences that are dinucleotide
shuffled. The approach constructs a transition matrix between
nucleotides, keeps the first and last nucleotide constant, and then
randomly at uniform selects transitions until all nucleotides have
been observed. This is a Eulerian path. Because each nucleotide has
the same number of transitions into it as out of it (except for the
first and last nucleotides) the greedy algorithm does not need to
check at each step to make sure there is still a path.
This function has been adapted to work on PyTorch tensors instead of
numpy arrays. Code has been adapted from
https://github.com/kundajelab/deeplift/blob/master/deeplift/dinuc_shuffle.py
Parameters
----------
sequence: torch.tensor, shape=(k, -1)
The one-hot encoded sequence. k is usually 4 for nucleotide sequences
but can be anything in practice.
n_shuffles: int, optional
The number of dinucleotide shuffles to return. Default is 10.
random_state: int or None or numpy.random.RandomState, optional
The random seed to use to ensure determinism. If None, the
process is not deterministic. Default is None.
Returns
-------
shuffled_sequences: torch.tensor, shape=(n_shuffles, k, -1)
The shuffled sequences.
"""
if not isinstance(random_state, numpy.random.RandomState):
random_state = numpy.random.RandomState(random_state)
chars, idxs = torch.unique(sequence.argmax(axis=0), return_inverse=True)
chars, idxs = chars.numpy(), idxs.numpy()
next_idxs = numpy.zeros((len(chars), sequence.shape[1]), dtype=numpy.int32)
next_idxs_counts = numpy.zeros(max(chars)+1, dtype=numpy.int32)
for char in chars:
next_idxs_ = numpy.where(idxs[:-1] == char)[0]
n = len(next_idxs_)
next_idxs[char][:n] = next_idxs_ + 1
next_idxs_counts[char] = n
shuffled_sequences = numpy.zeros((n_shuffles, *sequence.shape), dtype=numpy.float32)
counters = numpy.zeros((n_shuffles, len(chars)), dtype=numpy.int32)
_fast_shuffle(n_shuffles, chars, idxs, next_idxs, next_idxs_counts,
counters, shuffled_sequences)
shuffled_sequences = torch.from_numpy(shuffled_sequences)
return shuffled_sequences
def calculate_attributions(model, X, args=None, model_output="profile",
hypothetical=False, n_shuffles=20, verbose=False, random_state=None):
"""Calculate attributions using DeepLift/Shap and a given model.
This function will calculate DeepLift/Shap attributions on a set of
sequences. It assumes that the model returns "logits" in the first output,
not softmax probabilities, and count predictions in the second output.
It will create GC-matched negatives to use as a reference and proceed
using the given batch size.
Parameters
----------
model: torch.nn.Module
The model to use, either BPNet or one of it's variants.
X: torch.tensor, shape=(-1, 4, -1)
A one-hot encoded sequence input to the model.
args: tuple or None, optional
Additional arguments to pass into the forward function. If None,
pass nothing additional in. Default is None.
model_output: str, "profile" or "count", optional
If "profile", wrap the model using ProfileWrapper and calculate
attributions with respect to the profile. If "count", wrap the model
using CountWrapper and calculate attributions with respect to the
count. Default is "profile".
hypothetical: bool, optional
Whether to return attributions for all possible characters at each
position or only for the character that is actually at the sequence.
Practically, whether to return the returned attributions from captum
with the one-hot encoded sequence. Default is False.
n_shuffles: int, optional
The number of dinucleotide shuffles to return. Default is 10.
batch_size: int, optional
The number of attributions to calculate at the same time. This is
limited by GPU memory. Default is 8.
verbose: bool, optional
Whether to display a progress bar.
random_state: int or None or numpy.random.RandomState, optional
The random seed to use to ensure determinism. If None, the
process is not deterministic. Default is None.
"""
if model_output == "profile":
wrapper = ProfileWrapper(model)
elif model_output == "count":
wrapper = CountWrapper(model)
else:
raise ValueError("model_output must be one of 'profile' or 'count'.")
ig = DeepLiftShap(wrapper)
attributions = []
with torch.no_grad():
for i in trange(len(X), disable=not verbose):
X_ = X[i:i+1]
reference = dinucleotide_shuffle(X_[0], n_shuffles=n_shuffles,
random_state=random_state).cuda()
X_ = X_.cuda()
if args is None:
args_ = None
else:
args_ = tuple([arg[i:i+1].cuda() for arg in args])
attr = ig.attribute(X_, reference, target=0, additional_forward_args=args_)
if not hypothetical:
attr = (attr * X_)
attributions.append(attr.cpu())
attributions = torch.cat(attributions)
return attributions
```
#### File: bpnet-lite/bpnetlite/performance.py
```python
import torch
from .losses import MNLLLoss
from .losses import log1pMSELoss
def smooth_gaussian1d(x, kernel_sigma, kernel_width):
"""Smooth a signal along the sequence length axis.
This function is a replacement for the scipy.ndimage.gaussian1d
function that works on PyTorch tensors. It applies a Gaussian kernel
to each position which is equivalent to applying a convolution across
the sequence with weights equal to that of a Gaussian distribution.
Each sequence, and each channel within the sequence, is smoothed
independently.
Parameters
----------
x: torch.tensor, shape=(n_sequences, n_channels, seq_len)
A tensor to smooth along the last axis. n_channels must be at
least 1.
kernel_sigma: float
The standard deviation of the Gaussian to be applied.
kernel_width: int
The width of the kernel to be applied.
Returns
-------
x_smooth: torch.tensor, shape=(n_sequences, n_channels, seq_len)
The smoothed tensor.
"""
meshgrid = torch.arange(kernel_width, dtype=torch.float32,
device=x.device)
mean = (kernel_width - 1.) / 2.
kernel = torch.exp(-0.5 * ((meshgrid - mean) / kernel_sigma) ** 2.0)
kernel = kernel / torch.sum(kernel)
kernel = kernel.reshape(1, 1, kernel_width).repeat(x.shape[1], 1, 1)
return torch.nn.functional.conv1d(x, weight=kernel, groups=x.shape[1],
padding='same')
def batched_smoothed_function(logps, true_counts, f, smooth_predictions=False,
smooth_true=False, kernel_sigma=7, kernel_width=81,
exponentiate_logps=False, batch_size=200):
"""Batch a calculation with optional smoothing.
Given a set of predicted and true values, apply some function to them in
a batched manner and store the results. Optionally, either the true values
or the predicted ones can be smoothed.
Parameters
----------
logps: torch.tensor
A tensor of the predicted log probability values.
true_counts: torch.tensor
A tensor of the true values, usually integer counts.
f: function
A function to be applied to the predicted and true values.
smooth_predictions: bool, optional
Whether to apply a Gaussian filter to the predictions. Default is
False.
smooth_true: bool, optional
Whether to apply a Gaussian filter to the true values. Default is
False.
kernel_sigma: float, optional
The standard deviation of the Gaussian to be applied. Default is 7.
kernel_width: int, optional
The width of the kernel to be applied. Default is 81.
exponentiate_logps: bool, optional
Whether to exponentiate each batch of log probabilities. Default is
False.
batch_size: int, optional
The number of examples in each batch to evaluate at a time. Default
is 200.
Returns
-------
results: torch.tensor
The results of applying the function to the tensor.
"""
n = logps.shape[0]
results = torch.empty(*logps.shape[:2])
for start in range(0, n, batch_size):
end = start + batch_size
logps_ = logps[start:end]
true_counts_ = true_counts[start:end]
if smooth_predictions:
logps_ = torch.exp(logps_)
logps_ = smooth_gaussian1d(logps_, kernel_sigma, kernel_width)
if exponentiate_logps == False:
logps_ = torch.log(logps_)
else:
if exponentiate_logps:
logps_ = torch.exp(logps_)
if smooth_true:
true_counts_ = smooth_gaussian1d(true_counts_, kernel_sigma, kernel_width)
results[start:end] = f(logps_, true_counts_)
return results
def _kl_divergence(probs1, probs2):
"""
Computes the KL divergence in the last dimension of `probs1` and `probs2`
as KL(P1 || P2). `probs1` and `probs2` must be the same shape. For example,
if they are both A x B x L arrays, then the KL divergence of corresponding
L-arrays will be computed and returned in an A x B array. Does not
renormalize the arrays. If probs2[i] is 0, that value contributes 0.
"""
idxs = ((probs1 != 0) & (probs2 != 0))
quot_ = torch.divide(probs1, probs2)
quot = torch.ones_like(probs1)
quot[idxs] = quot_[idxs]
return torch.sum(probs1 * torch.log(quot), dim=-1)
def jensen_shannon_distance(logps, true_counts):
"""
Computes the Jesnsen-Shannon distance in the last dimension of `probs1` and
`probs2`. `probs1` and `probs2` must be the same shape. For example, if they
are both A x B x L arrays, then the KL divergence of corresponding L-arrays
will be computed and returned in an A x B array. This will renormalize the
arrays so that each subarray sums to 1. If the sum of a subarray is 0, then
the resulting JSD will be NaN.
"""
# Renormalize both distributions, and if the sum is NaN, put NaNs all around
probs1 = torch.exp(logps)
probs1_sum = torch.sum(probs1, dim=-1, keepdims=True)
probs1 = torch.divide(probs1, probs1_sum, out=torch.zeros_like(probs1))
probs2_sum = torch.sum(true_counts, dim=-1, keepdims=True)
probs2 = torch.divide(true_counts, probs2_sum, out=torch.zeros_like(true_counts))
mid = 0.5 * (probs1 + probs2)
return 0.5 * (_kl_divergence(probs1, mid) + _kl_divergence(probs2, mid))
def pearson_corr(arr1, arr2):
"""The Pearson correlation between two tensors across the last axis.
Computes the Pearson correlation in the last dimension of `arr1` and `arr2`.
`arr1` and `arr2` must be the same shape. For example, if they are both
A x B x L arrays, then the correlation of corresponding L-arrays will be
computed and returned in an A x B array.
Parameters
----------
arr1: torch.tensor
One of the tensor to correlate.
arr2: torch.tensor
The other tensor to correlation.
Returns
-------
correlation: torch.tensor
The correlation for each element, calculated along the last axis.
"""
mean1 = torch.mean(arr1, axis=-1).unsqueeze(-1)
mean2 = torch.mean(arr2, axis=-1).unsqueeze(-1)
dev1, dev2 = arr1 - mean1, arr2 - mean2
sqdev1, sqdev2 = torch.square(dev1), torch.square(dev2)
numer = torch.sum(dev1 * dev2, axis=-1) # Covariance
var1, var2 = torch.sum(sqdev1, axis=-1), torch.sum(sqdev2, axis=-1) # Variances
denom = torch.sqrt(var1 * var2)
# Divide numerator by denominator, but use 0 where the denominator is 0
correlation = torch.zeros_like(numer)
correlation[denom != 0] = numer[denom != 0] / denom[denom != 0]
return correlation
def spearman_corr(arr1, arr2):
"""The Spearman correlation between two tensors across the last axis.
Computes the Spearman correlation in the last dimension of `arr1` and `arr2`.
`arr1` and `arr2` must be the same shape. For example, if they are both
A x B x L arrays, then the correlation of corresponding L-arrays will be
computed and returned in an A x B array.
A dense ordering is used and ties are broken based on position in the
tensor.
Parameters
----------
arr1: torch.tensor
One of the tensor to correlate.
arr2: torch.tensor
The other tensor to correlation.
Returns
-------
correlation: torch.tensor
The correlation for each element, calculated along the last axis.
"""
ranks1 = arr1.argsort().argsort().type(torch.float32)
ranks2 = arr2.argsort().argsort().type(torch.float32)
return pearson_corr(ranks1, ranks2)
def mean_squared_error(arr1, arr2):
"""The mean squared error between two tensors averaged along the last axis.
Computes the element-wise squared error between two tensors and averages
these across the last dimension. `arr1` and `arr2` must be the same shape.
For example, if they are both A x B x L arrays, then the correlation of
corresponding L-arrays will be computed and returned in an A x B array.
Parameters
----------
arr1: torch.tensor
A tensor of values.
arr2: torch.tensor
Another tensor of values.
Returns
-------
mse: torch.tensor
The L2 distance between two tensors.
"""
return torch.mean(torch.square(arr1 - arr2), axis=-1)
def calculate_performance_measures(logps, true_counts, pred_log_counts,
kernel_sigma=7, kernel_width=81, smooth_true=False,
smooth_predictions=False, measures=None):
"""
Computes some evaluation metrics on a set of positive examples, given the
predicted profiles/counts, and the true profiles/counts.
Arguments:
`true_profs`: N x T x O x 2 array, where N is the number of
examples, T is the number of tasks, and O is the output profile
length; contains the true profiles for each for each task and
strand, as RAW counts
`log_pred_profs`: a N x T x O x 2 array, containing the predicted
profiles for each task and strand, as LOG probabilities
`true_counts`: a N x T x 2 array, containing the true total counts
for each task and strand
`log_pred_counts`: a N x T x 2 array, containing the predicted LOG total
counts for each task and strand
`smooth_true_profs`: if True, smooth the true profiles before computing
JSD and correlations; true profiles will not be smoothed for any
other metric
`smooth_pred_profs`: if True, smooth the predicted profiles before
computing NLL, cross entropy, JSD, and correlations; predicted
profiles will not be smoothed for any other metric
`print_updates`: if True, print out updates and runtimes
Returns a dictionary with the following:
A N x T-array of the average negative log likelihoods for the profiles
(given predicted probabilities, the likelihood for the true counts),
for each sample/task (strands averaged)
A N x T-array of the average cross entropy for the profiles (given
predicted probabilities, the likelihood for the true counts), for
each sample/task (strands averaged)
A N x T array of average Jensen-Shannon divergence between the predicted
and true profiles (strands averaged)
A N x T array of the Pearson correlation of the predicted and true (log)
counts, for each sample/task (strands pooled)
A N x T array of the Spearman correlation of the predicted and true
(log) counts, for each sample/task (strands pooled)
A N x T array of the mean squared error of the predicted and true (log)
counts, for each sample/task (strands pooled)
A T-array of the Pearson correlation of the (log) total counts, over all
strands and samples
A T-array of the Spearman correlation of the (log) total counts, over
all strands and samples
A T-array of the mean squared error of the (log) total counts, over all
strands and samples
"""
measures_ = {}
if measures is None or 'profile_mnll' in measures:
measures_['profile_mnll'] = batched_smoothed_function(logps=logps,
true_counts=true_counts, f=MNLLLoss,
smooth_predictions=smooth_predictions, smooth_true=False,
kernel_sigma=kernel_sigma, kernel_width=kernel_width)
if measures is None or 'profile_jsd' in measures:
measures_['profile_jsd'] = batched_smoothed_function(logps=logps,
true_counts=true_counts, f=jensen_shannon_distance,
smooth_predictions=smooth_predictions, smooth_true=smooth_true,
kernel_sigma=kernel_sigma, kernel_width=kernel_width)
if measures is None or 'profile_pearson' in measures:
measures_['profile_pearson'] = batched_smoothed_function(logps=logps,
true_counts=true_counts, f=pearson_corr,
smooth_predictions=smooth_predictions, smooth_true=smooth_true,
exponentiate_logps=True, kernel_sigma=kernel_sigma,
kernel_width=kernel_width)
if measures is None or 'profile_spearman' in measures:
measures_['profile_spearman'] = batched_smoothed_function(logps=logps,
true_counts=true_counts, f=spearman_corr,
smooth_predictions=smooth_predictions, smooth_true=smooth_true,
exponentiate_logps=True, kernel_sigma=kernel_sigma,
kernel_width=kernel_width)
# Total count correlations/MSE
true_log_counts = torch.log(true_counts.sum(dim=-1)+1)
if measures is None or 'count_pearson' in measures:
measures_['count_pearson'] = pearson_corr(pred_log_counts.T,
true_log_counts.T)
if measures is None or 'count_spearman' in measures:
measures_['count_spearman'] = spearman_corr(pred_log_counts.T,
true_log_counts.T)
if measures is None or 'count_mse' in measures:
measures_['count_mse'] = mean_squared_error(pred_log_counts.T,
true_log_counts.T)
return measures_
``` |
{
"source": "jmschrei/discern",
"score": 3
} |
#### File: discern/analysis/cancer_analyses.py
```python
import matplotlib
matplotlib.use('pdf')
from discern import *
import time
import sys
from LNS import *
from scipy.stats import fisher_exact, f_oneway as anova
# I personally find these tests to be extremely poor indicators of LNS or DISCERN
# effectiveness. DISCERN and LNS try to identify perturbations in the conditional
# dependence structure, and identify 'driver' genes. These genes do not change
# their expression levels when a cell becomes cancerous. This means that they shouldn't
# be correlated with survival time significantly. It thus makes little sense to look
# at the overlap between univariate survival time p-values. I also find the idea of
# a univariate survival time model to be somewhat shaky. Using these analyses indicates
# that you are OK with these concerns.
def survival( expression, survival, outfile='CoxPH_p_vals.csv' ):
'''
Take in a filename for expression data, and a filename for survival data,
and perform a univariate Cox Proportional Hazard model to identify which
genes are associated with survivor time.
'''
data = load_data( expression, gene_delimiter="|" ).T
# This is what Maxim does with his data
#data[ data == 0 ] = 1e-5
#data = preprocess_data( data, mean_threshold=9.64, log=True, winsorize=2.5, merge_duplicates=True )
#data['ID'] = map( lambda s: s.split('-')[2], data.index )
# Preprocess the data by adding a pseudocount, filtering out low expression
# levels, merging duplicate genes, and log-transforming the data
data = preprocess_data( data, pseudocount=1, mean_threshold=10,
merge_duplicates=True, log=True )
# Split the data into those with cancer and those without cancer
null = data.ix[[ sample for sample in data.index if sample.split('-')[3][0] == '1' ]]
cancer = data.ix[[ sample for sample in data.index if sample.split('-')[3][0] != '1' ]]
# Make a column for ID based on the barcode of the patient
data['ID'] = map( lambda s: '-'.join( s.split('-')[:3] ), data.index )
data = data.ix[[ sample for sample in data.index if sample.split('-')[3][0] != '1' ]]
data = data.drop( ['?'], axis=1 )
# Load the clinical data
clinical = pd.read_table( "LUAD\\LUAD.clin.merged.txt", index_col=0 ).T
# Make an ID column based on the patient barcode
clinical['ID'] = map( str.upper, clinical['patient.bcrpatientbarcode'] )
clinical['patient.survival_time'] = clinical[['patient.daystodeath', 'patient.daystolastfollowup']].max( axis=1 )
clinical = clinical[ clinical['patient.survival_time'] > 0 ]
clinical = clinical[['ID', 'patient.survival_time', 'patient.vitalstatus']]
# Do an outer join on the clinical data and expression data, using the ID
# column as the pivot
data = data.merge( clinical, how='outer' )
# Remove any rows with null values, and remove the ID column
data = data.dropna( how='any' ).drop('ID', axis=1)
# Cast survival time and vital status as integers
data['patient.survival_time'] = map( int, data['patient.survival_time'] )
data['patient.vitalstatus'] = map( int, data['patient.vitalstatus'] == 'dead' )
# Pull out the expression matrix, survival time, and censoring information
survival_time = np.array( data['patient.survival_time'] )
alive = np.array( data['patient.vitalstatus'] )
gene_exp = data[[ gene for gene in data.columns if 'patient' not in gene ]]
# Pass the info into the coxph regression function, which returns a
# pandas dataframe, and save that to a csv.
info = coxph_regression( gene_exp, survival_time, alive, n_cores=-1 )
info.to_csv( outfile )
def analysis( null, cancer, l=0.05, name="analysis" ):
'''
Load up a dataset and pass it to the DISCERN, ANOVA, and LUAD methods. Pass
in the filename to the csv file which contains the gene expression data for
the healthy and cancerous patients respectively. Also pass in a lambda value
for the run to be done at.
'''
# Load up the data
null = pd.read_csv( null, index_col=0 ).T
cancer = pd.read_csv( cancer, index_col=0 ).T
# Ensure that the same genes are in both
assert set( null.columns ) == set( cancer.columns ), "Gene sets not identical"
# Pull the gene names from the columns
gene_names = null.columns
# Run the various analyses
run_discern( null, cancer, gene_names, l, "DISCERN_{}.csv".format( name ) )
run_anova( null, cancer, gene_names, "ANOVA_{}.csv".format( name ) )
run_lns( null, cancer, gene_names, "LNS_{}.csv".format( name ) )
def run_anova( null, cancer, gene_names, outfile ):
'''
Take in a null preprocessed dataset and a cancer preprocessed dataset and
runs ANOVA.
'''
n, d = null.shape
anova_scores = np.zeros( d )
for i in xrange( d ):
anova_scores[i] = anova( null.values[:,i], cancer.values[:,i] )[0]
data = pd.DataFrame( {'ANOVA': anova_scores } )
data.index = gene_names
data.to_csv( outfile )
def run_lns( null, cancer, gene_names, outfile ):
'''
Take in a null preprocessed dataset and a cancer preprocessed dataset and
run local network similarity.
'''
n, d = null.shape
lns = LNS()
scores = lns.fit_score( null, cancer, gene_names )
scores.to_csv( outfile )
def run_discern( null, cancer, gene_names, lambda_opt, outfile ):
'''
Take in a null preprocessed dataset and a cancer preprocessed dataset and
a lambda_opt and run DISCERN using a 50-50 train-test split, saving the
resulting DISCERN scores to an appropriate CSV file.
'''
null_training = null[::2]
null_testing = null[1::2]
cancer_training = cancer[::2]
cancer_testing = cancer[1::2]
discern = DISCERN()
scores = discern.fit_score( null_training, null_testing, cancer_training,
cancer_testing, gene_names, n_cores=8, l=lambda_opt )
scores.to_csv( outfile )
def survival_expression_comparison( discern, anova, lns, survival, name ):
'''
Compare the DISCERN scores for genes to the p-values obtained by running
Cox Proportional Hazards using survival time. Look at the overlap between
the identified genes using p-value and enrichment. Also compare LNS and
ANOVA scores in the same way, to allow for a proper comparison.
Pass in the filename where the DISCERN, ANOVA, LNS, and survival scores
are. Make sure that DISCERN scores are stored under a column named 'T2',
ANOVA scores are stored under a column called 'ANOVA', LNS scores are
stored under a column named 'p', and survival p-values are stored under
a column named 'p-value' in their respective csv files.
'''
import seaborn as sns
survival = pd.read_table( survival, sep=' ', names=['gene', 'p-value'] )
discern = pd.read_csv( discern, index_col=0 )
anova = pd.read_csv( anova, index_col=0 )
lns = pd.read_csv( lns, index_col=0 )
survival_top = set( survival[ survival['p-value'] < 0.05 ].gene ).intersection( set( discern.index ) )
discern = discern.sort( 'T2' )
anova = anova.sort( 'ANOVA' )
lns = lns.sort( 'p' )
n = len( discern.values )
cn = len(survival_top)
discern_p_vals, anova_p_vals, lns_p_vals = [], [], []
discern_enrichment, anova_enrichment, lns_enrichment = [], [], []
discern_overlap = 1 if discern.index[0] in survival_top else 0
anova_overlap = 1 if anova.index[0] in survival_top else 0
lns_overlap = 1 if lns.index[0] in survival_top else 0
for i in xrange( 1, n ):
discern_overlap += 1 if discern.index[i] in survival_top else 0
anova_overlap += 1 if anova.index[i] in survival_top else 0
lns_overlap += 1 if lns.index[i] in survival_top else 0
table = [[discern_overlap, cn-discern_overlap], [i-discern_overlap, n-i-cn+discern_overlap]]
discern_p_vals.append( -np.log10( fisher_exact( table )[1] ) )
discern_enrichment.append( discern_overlap / (1.*cn*i/n) )
table = [[anova_overlap, cn-anova_overlap], [i-anova_overlap, n-i-cn+anova_overlap]]
anova_p_vals.append( -np.log10( fisher_exact( table )[1] ) )
anova_enrichment.append( anova_overlap / (1.*cn*i/n) )
table = [[lns_overlap, cn-lns_overlap], [i-lns_overlap, n-i-cn+lns_overlap]]
lns_p_vals.append( -np.log10( fisher_exact( table )[1] ) )
lns_enrichment.append( lns_overlap / (1.*cn*i/n) )
plt.title( "Overlap P-Value Using Top N Genes" )
plt.xlabel( "N" )
plt.ylabel( "-log10( p-value )" )
plt.plot( discern_p_vals, alpha=0.2, color='r', label='DISCERN' )
plt.plot( anova_p_vals, alpha=0.2, color='g', label='ANOVA' )
plt.plot( lns_p_vals, alpha=0.2, color='b', label='LNS' )
plt.legend()
plt.savefig( name+"_p_value_plot.pdf" )
plt.clf()
plt.title( "Overlap Enrichment Using Top N Genes" )
plt.xlabel( "N" )
plt.ylabel( "Enrichment" )
plt.plot( discern_enrichment[:500], alpha=0.2, color='r', label='DISCERN' )
plt.plot( anova_enrichment[:500], alpha=0.2, color='g', label='ANOVA' )
plt.plot( lns_enrichment[:500], alpha=0.2, color='b', label='LNS' )
plt.legend()
plt.savefig( name+"_enrichment_plot.pdf" )
plt.clf()
def plot_discern_distributions( aml, brca, luad ):
'''
Plot some useful visualizations of the DISCERN scores as a scatter matrix, where
the diagonal is the kernel density of the scores, and the off-diagonals are
scatter plots comparing two conditions. Pass in filenames for where the DISCERN
scores are stored.
'''
from pandas.tools.plotting import scatter_matrix
import seaborn as sns
AML = pd.read_csv( aml, index_col=0 )
BRCA = pd.read_csv( brca, index_col=0 )
LUAD = pd.read_csv( luad, index_col=0 )
AML['Gene'], BRCA['Gene'], LUAD['Gene'] = AML.index, BRCA.index, LUAD.index
AML['AML'], BRCA['BRCA'], LUAD['LUAD'] = np.log10(AML['T2']), np.log10(BRCA['T2']), np.log10(LUAD['T2'])
AML, BRCA, LUAD = AML[['Gene', 'AML']], BRCA[['Gene', 'BRCA']], LUAD[['Gene', 'LUAD']]
data = pd.merge( AML, BRCA, on='Gene' )
data = pd.merge( data, LUAD, on='Gene' )
with sns.axes_style( "whitegrid" ):
scatter_matrix( data, alpha=0.2, figsize=(6,6), diagonal='kde', color='c', density_kwds={'c': 'r', 'lw':1}, lw=0, grid=False )
plt.savefig( 'DISCERN_Scores.pdf' )
plt.clf()
print "TOP 10 GENES SORTED BY EACH METHOD"
print "AML"
print data.sort( 'AML', ascending=False )[['Gene', 'AML']][:10]
print
print "BRCA"
print data.sort( 'BRCA', ascending=False )[['Gene', 'BRCA']][:10]
print
print "LUAD"
print data.sort( 'LUAD', ascending=False )[['Gene', 'LUAD']][:10]
if __name__ == "__main__":
# Define where your AML, BRCA, and LUAD data are. Change these for your
# own file system.
AML_NORMAL_FILEPATH = "AML\\AML1_normal.csv"
AML_CANCER_FILEPATH = "AML\\AML1_cancer.csv"
AML_CLINICAL_FILEPATH = "AML\\aml_all_genes_survival.txt"
BRCA_NORMAL_FILEPATH = "BRCA\\BRCA_data_normal.csv"
BRCA_CANCER_FILEPATH = "BRCA\\BRCA_data_cancer_RESTRICTED.csv"
BRCA_CLINICAL_FILEPATH = "BRCA\\brca_all_genes_survival.txt"
LUAD_NORMAL_FILEPATH = "LUAD\\luad_data_normal.csv"
LUAD_CANCER_FILEPATH = "LUAD\\luad_data_cancer.csv"
LUAD_CLINICAL_FILEPATH = "LUAD\\luad_all_genes_survival.txt"
# Now get DISCERN, ANOVA, and LNS scores for these methods
analysis( AML_NORMAL_FILEPATH, AML_CANCER_FILEPATH, 0.1, "AML" )
analysis( BRCA_NORMAL_FILEPATH, BRCA_CANCER_FILEPATH, 0.05, "BRCA" )
analysis( LUAD_NORMAL_FILEPATH, LUAD_CANCER_FILEPATH, 0.05, "LUAD" )
# Now that we have the scores, do the comparison between survival
# data and expression data.
survival_expression_comparison( "DISCERN_AML.csv", "ANOVA_AML.csv", "LNS_AML.csv", AML_CLINICAL_FILEPATH, "AML" )
survival_expression_comparison( "DISCERN_BRCA.csv", "ANOVA_BRCA.csv", "LNS_BRCA.csv", BRCA_CLINICAL_FILEPATH, "BRCA" )
survival_expression_comparison( "DISCERN_LUAD.csv", "ANOVA_LUAD.csv", "LNS_LUAD.csv", LUAD_CLINICAL_FILEPATH, "LUAD" )
# Now plot distributions of DISCERN scores for all cancer subtypes together.
plot_discern_distributions( "DISCERN_AML.csv", "DISCERN_BRCA.csv", "DISCERN_LUAD.csv" )
```
#### File: discern/analysis/synthetic_analyses.py
```python
import matplotlib
matplotlib.use('pdf')
import numpy
import random
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from yabn import *
from discern import *
from LNS import *
from scipy.stats import f_oneway
random.seed(0)
numpy.random.seed(0)
def barchart( scores, method_names, node_names, title, normalize=True ):
'''
Take in the scores from two different feature selectors and plot them.
'''
sns.set( style='white', context='talk' )
plt.figure( figsize=(12, 6) )
n = len( scores )
items = zip( xrange(n), scores, method_names, sns.color_palette('husl', 3) )
for i, score, name, color in items:
if normalize:
score /= score.sum()
x = np.arange( 0.5, score.shape[0]+0.5 )
plt.bar( x+i*(0.8/n), score, width=0.8/n, alpha=0.5, edgecolor='w', label=name, facecolor=color )
plt.legend()
plt.title( title )
plt.xticks( x+(1.0/n), node_names )
plt.savefig( title + '.pdf' )
def score_network_pair( networka, networkb, node_names, i=100, j=100 ):
'''
This will take in a network and produce DISCERN and ANOVA scores for
each node in the network. The user may set the number of samples
generated for each network through adjusting i and j. Pass in the
order of the node names to get the scores in the proper order.
'''
node_names_a = [ node.name for node in networka.nodes ]
node_names_b = [ node.name for node in networkb.nodes ]
# Get the data from sampling the two networks
a_data = numpy.array([ networka.sample() for n in xrange( i ) ])
b_data = numpy.array([ networkb.sample() for n in xrange( j ) ])
# Convert this data into a dataframe for DISCERN
a_data = pd.DataFrame( a_data, columns=node_names_a )
b_data = pd.DataFrame( b_data, columns=node_names_b )
# Initialize DISCERN and use it on the data
discern = DISCERN()
#l, sse = discern.lambda_opt( a_data[::2], node_names_a, n_cores=6 )
discern.fit_score( a_data[::2], a_data[1::2], b_data[::2], b_data[1::2],
node_names_a, l=0.4, n_cores=8 )
# Get the LNS scores
lns = LNS()
lns.fit_score( a_data, b_data, node_names_a )
# Unpack the two score vectors into a numpy array
discern_scores = numpy.array(discern._scores.ix[ node_names ]['T2'])
anova_scores = numpy.array([ f_oneway( a_data[name], b_data[name] )[0] for name in node_names ])
lns_scores = numpy.array( lns._scores.ix[ node_names ]['r'] )
return discern_scores, anova_scores, lns_scores
def seven_star_tests():
'''
These tests work on a star network, where one node influences a second node,
which then influences three nodes, and there are two independent nods, which
switch identities in the graph. Basically, an influencer no longer influences
and an independent node takes its place.
'''
# Define the two networks we will use
networka = Network( "A" )
networkb = Network( "B" )
# Define all seven nodes, which are the same between the two networks
n1 = Node( NormalDistribution( 12, 0.7 ), name="n1" )
n2 = Node( NormalDistribution( 5, 0.3 ), name="n2" )
n3 = Node( NormalDistribution( 17, 0.9 ), name="n3" )
n4 = Node( NormalDistribution( 22, 1.2 ), name="n4" )
n5 = Node( NormalDistribution( 12, 0.3 ), name="n5" )
n6 = Node( NormalDistribution( 27, 3.2 ), name="n6" )
n7 = Node( NormalDistribution( 88, 1.2 ), name="n7" )
# We'll use a single edge of unit variance for this simple test
e = 1.0
# Add all the nodes to the networks
networka.add_nodes( [n1, n2, n3, n4, n5, n6, n7] )
networkb.add_nodes( [n1, n2, n3, n4, n5, n6, n7] )
# Add all the edges to network A
networka.add_edge( n1, n3, e )
networka.add_edge( n3, n5, e )
networka.add_edge( n3, n6, e )
networka.add_edge( n3, n7, e )
# Add all the edges to network B
networkb.add_edge( n4, n3, e )
networkb.add_edge( n3, n5, e )
networkb.add_edge( n3, n6, e )
networkb.add_edge( n3, n7, e )
# Finalize the internals of the models
networka.bake()
networkb.bake()
# Define the ordered names
node_names = [ "n1", "n2", "n3", "n4", "n5", "n6", "n7" ]
# Score the network
discern, anova, lns = score_network_pair( networka, networkb, node_names )
# Plot the scores
barchart( [discern, anova, lns], ['DISCERN', 'ANOVA', 'LNS'], node_names, "n4-n3+ n1-n3-" )
# Time for a second test, involving a network where only an edge between
# n4 and n1 is added and nothing is removed.
networkb = Network( 'b' )
# Add the nodes in
networkb.add_nodes( [n1, n2, n3, n4, n5, n6, n7] )
# Add the edges in
networkb.add_edge( n1, n3, e )
networkb.add_edge( n3, n5, e )
networkb.add_edge( n3, n6, e )
networkb.add_edge( n3, n7, e )
networkb.add_edge( n4, n1, e )
# Finalize the model
networkb.bake()
# Score the nodes
discern, anova, lns = score_network_pair( networka, networkb, node_names )
# Plot the scores
barchart( [discern, anova, lns], ['DISCERN', 'ANOVA', 'LNS'], node_names, "n4-n1+" )
def independent_no_perturbation_test( name="independent" ):
'''
This will test a network which has no edges, and no perturbation, to see
that the prediction power is not random.
'''
network = Network( 'independent' )
# Create 12 distributions of random size
e = NormalDistribution( 50, 1.2 )
n1 = Node( e, name="n1" )
n2 = Node( e, name="n2" )
n3 = Node( e, name="n3" )
n4 = Node( e, name="n4" )
n5 = Node( e, name="n5" )
n6 = Node( e, name="n6" )
n7 = Node( e, name="n7" )
n8 = Node( e, name="n8" )
n9 = Node( e, name="n9" )
n10 = Node( e, name="n10" )
n11 = Node( e, name="n11" )
n12 = Node( e, name="n12" )
# Add the nodes and finalize the structure of the data
network.add_nodes( [n1,n2,n3,n4,n5,n6,n7,n8,n9,n10,n11,n12] )
network.bake()
node_names = [ 'n{}'.format( i ) for i in xrange( 1, 13 ) ]
# Get the scores
discern, anova, lns = score_network_pair( network, network, node_names )
# Plot it
barchart( [discern, anova, lns], ['DISCERN', 'ANOVA', 'LNS'], node_names, name, normalize=False )
def three_component_test( name="three_component"):
'''
This will test a network which has thirteen nodes and several perturbations.
'''
networka = Network( 'a' )
networkb = Network( 'b' )
# Create some nodes
emission = NormalDistribution( 10, 1 )
n1 = Node( emission, name="n1" )
n2 = Node( emission, name="n2" )
n3 = Node( emission, name="n3" )
n4 = Node( emission, name="n4" )
n5 = Node( emission, name="n5" )
n6 = Node( emission, name="n6" )
n7 = Node( emission, name="n7" )
n8 = Node( emission, name="n8" )
n9 = Node( emission, name="n9" )
n10 = Node( emission, name="n10" )
n11 = Node( emission, name="n11" )
n12 = Node( emission, name="n12" )
n13 = Node( emission, name="n13" )
# Unpack nodes
node_names = [ 'n{}'.format( i ) for i in xrange( 1, 14 ) ]
# Add the nodes to the module
networka.add_nodes( [n1,n2,n3,n4,n5,n6,n7,n8,n9,n10,n11,n12,n13] )
networkb.add_nodes( [n1,n2,n3,n4,n5,n6,n7,n8,n9,n10,n11,n12,n13] )
# Define a uniform edge for simplicity
e = 1.0
# Add edges to the models
networka.add_edge( n1, n2, e )
networka.add_edge( n2, n3, e )
networka.add_edge( n4, n2, e )
networka.add_edge( n5, n6, e )
networka.add_edge( n6, n7, e )
networka.add_edge( n7, n9, e )
networka.add_edge( n7, n10, e )
networka.add_edge( n12, n11, e )
networka.add_edge( n13, n12, e )
networkb.add_edge( n1, n2, e )
networkb.add_edge( n4, n2, e )
networkb.add_edge( n5, n6, e )
networkb.add_edge( n6, n7, e )
networkb.add_edge( n7, n9, e )
networkb.add_edge( n7, n10, e )
networkb.add_edge( n12, n11, e )
networkb.add_edge( n13, n12, e )
networkb.add_edge( n4, n11, e )
networkb.add_edge( n5, n8, e )
networkb.add_edge( n8, n7, e )
# Finalize the models
networka.bake()
networkb.bake()
discern, anova, lns = score_network_pair( networka, networkb, node_names )
barchart( [discern, anova, lns], ['DISCERN', 'ANOVA', 'LNS'], node_names, name )
def DCG( relevance ):
'''
Calculates the Discounted Cumulative Gain by comparing a 'true' ranking
to a predicted ranking.
'''
n = len( relevance )
return sum( (2.**relevance[i]-1.) / (i+1) for i in xrange( n ) )
def large_sparse_network( n=5000, m=50, low=1, high=10, name="large_sparse" ):
'''
Create a synthetic large, n nodes, where m of them get perturbed between
the two graphs by changing between ~low~ and ~high~ edges.
'''
# Randomly generate normal distributions for the node emissions
# Means based on a gamma distribution, stds based on a lognormal
# so that they are both bounded by 1
means = [50]*n
stds = [0.5]*n
#means = numpy.random.gamma( 50, 3.0, n )
#stds = numpy.random.lognormal( 0.5, 0.1, n )
# Randomly choose M genes to perturb, and then for each perturbed gene
# randomly choose the number of edges to perturb
perturbed = numpy.random.choice( np.arange( n ), size=m, replace=False )
n_perturbed_edges = numpy.random.randint( low, high, m )
# Randomly generate the graph structure from beta distributions. All
# weights are rounded to 1, instead of being variable.
null_edges = numpy.tril( numpy.around( numpy.random.beta( 1, 3, (n,n) ) ) )
numpy.fill_diagonal( null_edges, 0 )
alternate_edges = null_edges.copy()
perturb_count = { i:0 for i in xrange(n) }
to_perturb_count = { i:0 for i in xrange(n) }
# For each perturbed edge, randomly select between `low` and `high` number
# of edges to perturb, and perturb them--in this case just a binary flip.
for i, k in it.izip( perturbed, n_perturbed_edges ):
perturbed_id = numpy.random.choice( numpy.arange( i ), size=min(k, i), replace=False )
alternate_edges[i, perturbed_id] = numpy.abs( alternate_edges[i, perturbed_id] - 1 )
perturb_count[i] += perturbed_id.shape[0]
for index in perturbed_id:
to_perturb_count[index] += 1
total_perturb = { i: perturb_count[i]+to_perturb_count[i] for i in xrange(n) }
if numpy.triu( alternate_edges ).sum() > 0:
raise SyntaxError( "Matrix is not a DAG.")
# Initiate the network objects
null = Network( "Null" )
alternate = Network( "Alternate" )
# Create all the nodes
nodes = [ Node( NormalDistribution( mu, sigma ), name="n{}".format( i ) ) for i, mu, sigma in it.izip( xrange(n), means, stds ) ]
node_names = [ node.name for node in nodes ]
# Add them to the model
null.add_nodes( nodes )
alternate.add_nodes( nodes )
# Create all the edges, one at a time
for i in xrange( n ):
for j in xrange( n ):
p = null_edges[i, j]
if p > 0:
null.add_edge( nodes[i], nodes[j], p )
p = alternate_edges[i, j]
if p > 0:
alternate.add_edge( nodes[i], nodes[j], p )
# Finalize the internal structure of the network
null.bake()
alternate.bake()
# Score the network pair according to the metrics
discern, anova, lns = score_network_pair( null, alternate, node_names, i=100, j=300 )
# Make a plot of the scores acorss the nodes
#barchart( [discern, anova, lns], ['DISCERN', 'ANOVA', 'LNS'], node_names, name )
scores = pd.DataFrame({ 'DISCERN': discern, 'ANOVA': anova,
'LNS': lns, 'FROM': perturb_count.values(), 'TO': to_perturb_count.values(),
'TOTAL': total_perturb.values() })
# Calculate the Discounted Cumulative Gain matrix. DCG is a way of measuring
# a ranking of items if you know their true ordering. In this case, genes
# should be ordered by the true number of perturbations to them, and we
# compare the ordering we get from DISCERN, ANOVA, and LNS to that. DCG is
# implemented in the DCG function above. In this case we divide nodes into
# FROM nodes, which are the ranking of nodes according to perturbation in
# number of edges LEAVING that nodes, TO nodes, which is perturbation in number
# of edges going TO that node, and TOTAL which includes both. DISCERN is
# predicted to identify FROM nodes better than other techniques, as those
# should be similiar to driver mutations.
DCG_Matrix = pd.DataFrame( { 'FROM': [ DCG( scores.sort( 'DISCERN', ascending=False )['FROM'].values ),
DCG( scores.sort( 'ANOVA', ascending=False )['FROM'].values ),
DCG( scores.sort( 'LNS', ascending=False )['FROM'].values ) ],
'TO': [ DCG( scores.sort( 'DISCERN', ascending=False )['TO'].values ),
DCG( scores.sort( 'ANOVA', ascending=False )['TO'].values ),
DCG( scores.sort( 'LNS', ascending=False )['TO'].values ) ],
'TOTAL': [ DCG( scores.sort( 'DISCERN', ascending=False )['TOTAL'].values ),
DCG( scores.sort( 'ANOVA', ascending=False )['TOTAL'].values ),
DCG( scores.sort( 'LNS', ascending=False )['TOTAL'].values ) ] } )
DCG_Matrix.index = [ 'DISCERN', 'ANOVA', 'LNS' ]
print DCG_Matrix
return scores, DCG_Matrix
if __name__ == '__main__':
# Run the three smaller tests. Graphs will be output automatically.
independent_no_perturbation_test()
three_component_test()
seven_star_tests()
# Run the large sparse network. This example has 1000 nodes, of which
# 25 are perturbed. You can play with these parameters as much as you
# want, and the Discounted Cumulative Gain matrix will be returned.
large_sparse_network( 1000, 25 )
``` |
{
"source": "jmschrei/kiwano",
"score": 4
} |
#### File: src/kiwano/kiwano.py
```python
import numpy
from apricot import FacilityLocationSelection
def kiwano(similarities, names, **kwargs):
"""Kiwano will take in a similarity matrix and output an ordering.
Kiwano is the implementation of a procedure that can determine the
order that experiments should be performed based on a submodular
optimization procedure applied to imputed versions of those
experiments. Thus, the input to this procedure is a calculated
similarity matrix, and the output is an ordering over those
experiments. The similarity matrix must be symmetric and
non-negative, with higher values indicating higher similarity.
We anticipate that these similarities are squared correlation
valus, but they can be any similarity metric that follows
those properties. The ranking procedure involves optimizing a
facility location function.
This is an implementation of the code for the paper
blah bah blah
<NAME>, <NAME>, <NAME>
"""
if not isinstance(similarities, numpy.ndarray):
raise ValueError("Similarities must be a 2D symmetric numpy array.")
if numpy.any(similarities.T != similarities):
raise ValueError("Similarities must be a 2D symmetric numpy array.")
if similarities.ndim != 2:
raise ValueError("Similarities must be a 2D symmetric numpy array")
if len(similarities) != len(names):
raise ValueError("The length of similarities must be the same as the length of names")
selector = FacilityLocationSelection(len(names), pairwise_func='precomputed', **kwargs)
selector.fit(similarities)
return names[selector.ranking], selector.ranking
``` |
{
"source": "jmschrei/rambutan",
"score": 2
} |
#### File: rambutan/rambutan/rambutan.py
```python
import os, numpy, pandas
try:
from sklearn.metrics import roc_auc_score
except:
roc_auc_score = 'acc'
from joblib import Parallel, delayed
from .io import TrainingGenerator, ValidationGenerator
from .utils import bedgraph_to_dense, fasta_to_dense
from .utils import encode_dnase, extract_regions
def extract_sequence(filename, verbose=False):
"""Extract a nucleotide sequence from a file and encode it.
This function will read in a FastA formatted DNA file and convert it to be
a one-hot encoded numpy array for internal use. If a one-hot encoded file
is passed in, it is simply returned. This function is a convenient wrapper
for joblib to parallelize the unzipping portion.
Parameters
----------
filename : str or numpy.ndarray
The name of the fasta file to open or the one-hot encoded sequence.
verbose: bool, optional
Whether to report the status while extracting sequence. This does not
look good when done in parallel, so it is suggested it is set to false
in that case.
Returns
-------
sequence : numpy.ndarray, shape=(n, 4)
The one-hot encoded DNA sequence.
"""
if isinstance(filename, str):
if verbose:
print("Converting {}".format(filename))
return fasta_to_dense(filename, verbose)
return filename
def extract_dnase(filename, verbose=False):
"""Extract a DNaseI file and encode it.
This function will read in a bedgraph format file and convert it to the
one-hot encoded numpy array used internally. If a one-hot encoded file is
passed in, it is simple returned. This function is a convenient wrapper for
joblib to parallelize the unzipping portion.
Parameters
----------
filename : str or numpy.ndarray
The name of the bedgraph file to open or the one-hot encoded sequence.
verbose: bool, optional
Whether to report the status while extracting sequence. This does not
look good when done in parallel, so it is suggested it is set to false
in that case.
Returns
-------
sequence : numpy.ndarray, shape=(n, 8)
The one-hot encoded DNaseI sequence.
"""
if isinstance(filename, str):
if verbose:
print("Converting {}".format(filename))
dnase_dense = bedgraph_to_dense(filename, verbose)
if verbose:
print("Encoding {}".format(filename))
dnase_ohe = encode_dnase(dnase_dense, verbose)
return dnase_ohe
return filename
class Rambutan(object):
"""Rambutan: a predictor of mid-range DNA-DNA contacts.
This serves as a wrapper for all functionality involving the use of Rambutan.
There are two main functions to use, fit and predict. Fit involves taking in
nucleotide sequence, DNaseI sensitivity, and a contact map, and training the
model. Predict involves taking in nucleotide sequence and DNaseI sensitivity
and predicting significant contacts.
Note: Due to a limitation of mxnets part, you cannot fit and predict in the
same program. You must fit the model and save the parameters during training,
and then load the pre-fit model and make predictions.
Parameters
----------
name : str, optional
The name of the model, necessary for saving or loading parameters.
Default is 'rambutan'.
iteration : int or None, optional
The iteration of training to load model parameters from, if using Rambutan
in predict mode. Default is None.
model : mxnet.symbol or None
An alternate neural network can be passed in if one wishes to train that
using the same framework instead of the original Rambutan model.
learning_rate : float, optional
The learning rate for the optimizer. Default is 0.01.
num_epoch : int, optional
The number of epochs to train the model for. Default is 25.
epoch_size : int, optional
The number of batches which comprise an 'epoch'. Default is 500.
wd : float, optional
The weight decay. This is equivalent to L2 regularization. Default is
0.0.
optimizer : str, optional
The optimizer to use for training. Default is 'adam'.
batch_size : int, optional
The number of samples to use in each batch. Default is 1024.
min_dist : int, optional
The minimum distance to consider contacts for. Default is 50kb.
max_dist : int, optional
The maximum distance to consider contacts for. Default is 1mb.
use_seq : bool, optional
Whether to use nucleotide sequence as an input to the model in the
training step. Default is True.
use_dnase : bool, optional
Whether to use DNaseI sensitivity as an input to the model in the
training step. Default is True.
use_dist : bool, optional
Whether to use genomic distance as an input to the model in the
training step. Default is True.
verbose : bool, optional
Whether to output information during training and prediction. Default
is True.
Example
-------
>>> from rambutan import Rambutan
>>> import numpy
>>> y_pred = Rambutan(iteration=25).predict('chr21.fa', 'chr21.GM12878.dnase.bedgraph', ctxs=[0, 1, 2, 3])
>>> numpy.save("chr21.predictions.npy", y_pred)
"""
def __init__(self, name='rambutan', iteration=None, model=None,
learning_rate=0.01, num_epoch=25, epoch_size=500, wd=0.0,
optimizer='adam', batch_size=1024, min_dist=50000, max_dist=1000000,
use_seq=True, use_dnase=True, use_dist=True, verbose=True):
self.name = name
self.iteration = iteration
self.model = model
self.learning_rate = learning_rate
self.num_epoch = num_epoch
self.epoch_size = epoch_size
self.wd = wd
self.optimizer = optimizer
self.batch_size = batch_size
self.min_dist = min_dist
self.max_dist = max_dist
self.use_seq = use_seq
self.use_dnase = use_dnase
self.use_dist = use_dist
self.verbose = verbose
def predict(self, sequence, dnase, regions=None, ctxs=[0], sparse=False):
"""Make predictions and return the matrix of probabilities.
Rambutan will make a prediction for each pair of genomic loci defined in
`regions' which fall between `min_dist' and `max_dist'. Inputs can either
be appropriately encoded sequence and dnase files, or fasta files and
bedgraph files for the nucleotide sequence and DNaseI sensitivity
respectively. Note: fasta files and bedgraph files must be made up of
a single chromosome, not one entry per chromosome.
Parameters
----------
sequence : numpy.ndarray, shape (n, 4) or str
The nucleotide sequence. Either a one hot encoded matrix of
nucleotides with n being the size of the chromosome, or a file
name for a fasta file.
dnase : numpy.ndarray, shape (n, 8) or str
The DNaseI fold change sensitivity. Either an encoded matrix in
the manner described in the manuscript or the file name of a
bedgraph file.
regions : numpy.ndarray or None, optional
The regions of interest to look at. All other regions will be
ignored. If set to none, the regions of interest are defined
to be 1kb bins for which all nucleotides are mappable, i.e.
where there are no n or N symbols in the fasta file. Default
is None.
ctxs : list, optional
The contexts of the gpus to use for prediction. Currently
prediction is only supported on gpus and not cpus due to
the time it would take for prediction. For example, if you
wanted to use three gpus of index 0 1 and 3 (because 2
is busy doing something else) you would just pass in
ctxs=[0, 1, 3] and the prediction task will be naturally
parallelized across your 3 gpus with a linear speedup.
sparse : bool, optional
Whether to return three arrays, the rows, columns, and values,
or the full dense matrix. Sparse is useful for large matrices.
Returns
-------
y : numpy.ndarray, shape=(m, m)
A matrix of predictions of shape (m, m) where m is the number of
1kb loci in the chromosome. The predictions will reside in the
upper triangle of the matrix since predictions are symmetric.
"""
if isinstance(sequence, str) and isinstance(dnase, str):
if self.verbose:
print("Converting FASTA")
sequence = fasta_to_dense(sequence, self.verbose)
if self.verbose:
print("Converting DNase")
dnase = bedgraph_to_dense(dnase, self.verbose)
if self.verbose:
print("Encoding DNase")
dnase = encode_dnase(dnase, self.verbose)
if regions is None:
regions = extract_regions(sequence)
os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine'
from .models import predict_task
Parallel(n_jobs=len(ctxs))( delayed(predict_task)(self.name,
self.iteration, ctx, len(ctxs), sequence, dnase, regions,
self.use_seq, self.use_dnase, self.use_dist, self.min_dist,
self.max_dist, self.batch_size, self.verbose) for ctx in ctxs)
if sparse == False:
n = int(regions.max()) / 1000 + 1
y = numpy.zeros((n, n))
for ctx in ctxs:
with open('.rambutan.predictions.{}.txt'.format(ctx), 'r') as infile:
for line in infile:
mid1, mid2, p = line.split()
mid1 = (int(float(mid1)) - 500) / 1000
mid2 = (int(float(mid2)) - 500) / 1000
p = float(p)
y[mid1, mid2] = p
os.system('rm .rambutan.predictions.{}.txt'.format(ctx))
return y
else:
rows, cols, values = [], [], []
for ctx in ctxs:
with open('.rambutan.predictions.{}.txt'.format(ctx), 'r') as infile:
for line in infile:
mid1, mid2, p = line.split()
mid1, mid2, p = int(mid1), int(mid2), float(p)
rows.append(mid1)
cols.append(mid2)
values.append(p)
os.system('rm .rambutan.predictions.{}.txt'.format(ctx))
rows = numpy.array(rows)
cols = numpy.array(cols)
values = numpy.array(values)
return rows, cols, values
def fit(self, sequence, dnase, contacts, regions=None, validation_contacts=None,
training_chromosome=None, validation_chromosome=None, ctxs=[0],
eval_metric=roc_auc_score, symbol=None, n_jobs=1):
"""Fit the model to sequence, DNaseI, and Hi-C data.
You can fit the Rambutan model to new data. One must pass in sequence
data, DNaseI data, and Hi-C contact maps. The sequence data can come
either in the form of FastA files or one-hot encoded numpy arrays. The
DNaseI data can likewise come as either bedgraph files or numpy arrays.
The Hi-C data must come in the traditional 7 column format. Validation
data can optionally be passed in to report a validation set error during
the training process. NOTE: Regardless of if they are used or not, all
chromosomes should be passed in to the `sequence` and `dnase` parameters.
The contacts specified in `contacts` will dictate which are used. This is
to make the internals easier.
Parameters for training such as the number of epochs and batches are
set in the initial constructor, following with the sklearn format for
estimators.
Parameters
----------
sequence : numpy.ndarray, shape (n, 4) or str
The nucleotide sequence. Either a one hot encoded matrix of
nucleotides with n being the size of the chromosome, or a file
name for a fasta file.
dnase : numpy.ndarray, shape (n, 8) or str
The DNaseI fold change sensitivity. Either an encoded matrix in
the manner described in the manuscript or the file name of a
bedgraph file.
regions : numpy.ndarray or None, optional
The regions of interest to look at. All other regions will be
ignored. If set to none, the regions of interest are defined
to be 1kb bins for which all nucleotides are mappable, i.e.
where there are no n or N symbols in the fasta file. Default
is None.
ctxs: list, optional
The contexts of the gpus to use for prediction. Currently
prediction is only supported on gpus and not cpus due to
the time it would take for prediction. For example, if you
wanted to use three gpus of index 0 1 and 3 (because 2
is busy doing something else) you would just pass in
ctxs=[0, 1, 3] and the prediction task will be naturally
parallelized across your 3 gpus with a linear speedup.
Returns
-------
y : numpy.ndarray, shape=(m, m)
A matrix of predictions of shape (m, m) where m is the number of
1kb loci in the chromosome. The predictions will reside in the
upper triangle of the matrix since predictions are symmetric.
"""
if not isinstance(sequence, list):
raise ValueError("sequence must be a list of FastA file names or pre-encoded numpy arrays.")
if not isinstance(dnase, list):
raise ValueError("DNase must be a list of bedgraph file names or pre-encoded numpy arrays.")
if isinstance(contacts, str):
contacts = pandas.read_csv(contacts, sep='\t')
with Parallel(n_jobs=n_jobs) as parallel:
sequences = parallel( delayed(extract_sequence)(filename, self.verbose) for filename in sequence )
dnases = parallel( delayed(extract_dnase)(filename, self.verbose) for filename in dnase )
if regions is None:
if self.verbose:
print("Extracting regions")
regions = parallel( delayed(extract_regions)(sequence) for sequence in sequences )
sequences = numpy.array(sequences)
dnases = numpy.array(dnases)
regions = numpy.array(regions)
if isinstance(validation_contacts, str):
validation_contacts = pandas.read_csv(validation_contacts, sep='\t')
validation_chromosome = int(validation_contacts.ix[0][0][3:])
import mxnet as mx
from .models import RambutanSymbol
if symbol is None:
symbol = RambutanSymbol
model = symbol(ctx=map(mx.gpu, ctxs),
epoch_size=self.epoch_size,
num_epoch=self.num_epoch,
learning_rate=self.learning_rate,
wd=self.wd,
optimizer=self.optimizer
)
training_contacts = numpy.empty((contacts.shape[0], 3), dtype='float64')
training_contacts[:,0] = [int(chrom[3:])-1 for chrom in contacts['chr1']]
training_contacts[:,1] = contacts['fragmentMid1'].values
training_contacts[:,2] = contacts['fragmentMid2'].values
if self.verbose:
print("Training on {} contacts".format(training_contacts.shape[0]))
X_train = TrainingGenerator(sequences, dnases, training_contacts, regions,
self.batch_size, min_dist=self.min_dist, max_dist=self.max_dist,
use_seq=self.use_seq, use_dnase=self.use_dnase,
use_dist=self.use_dist)
if validation_contacts is not None:
validation_contacts = validation_contacts[['fragmentMid1', 'fragmentMid2']].values
if self.verbose:
print("Validating on {} contacts from chromosome {}".format(validation_contacts.shape[0], validation_chromosome))
X_validation = ValidationGenerator(sequences[validation_chromosome-1],
dnases[validation_chromosome-1], validation_contacts, regions[validation_chromosome-1],
batch_size=self.batch_size, min_dist=self.min_dist,
max_dist=self.max_dist, use_seq=self.use_seq,
use_dnase=self.use_dnase, use_dist=self.use_dist)
model.fit( X=X_train,
eval_data=X_validation if validation_contacts is not None else None,
eval_metric=eval_metric,
batch_end_callback=mx.callback.Speedometer(self.batch_size),
kvstore='device',
epoch_end_callback=mx.callback.do_checkpoint(self.name)
)
self.iteration = self.num_epoch
return model
``` |
{
"source": "jmschrei/shap",
"score": 2
} |
#### File: shap/benchmark/scorers.py
```python
from .. import LinearExplainer
from .. import KernelExplainer
from .. import SamplingExplainer
from ..explainers import other
from . import metrics
from . import methods
import sklearn
from sklearn.model_selection import train_test_split
import numpy as np
import copy
import functools
import time
def consistency_guarantees(X, y, model_generator, method_name):
# 1.0 - perfect consistency
# 0.8 - guarantees depend on sampling
# 0.6 - guarantees depend on approximation
# 0.0 - no garuntees
guarantees = {
"linear_shap_corr": 1.0,
"linear_shap_ind": 1.0,
"coef": 0.0,
"kernel_shap_1000_meanref": 0.8,
"sampling_shap_1000": 0.8,
"random": 0.0,
"saabas": 0.0,
"tree_gain": 0.0,
"tree_shap": 1.0,
"mean_abs_tree_shap": 1.0,
"lime_tabular_regression_1000": 0.8
}
return None, guarantees[method_name]
def local_accuracy(X, y, model_generator, method_name):
def score_map(true, pred):
""" Converts local accuracy from % of standard deviation to numerical scores for coloring.
"""
v = min(1.0, np.std(pred - true) / (np.std(true) + 1e-8))
if v < 1e-6:
return 1.0
elif v < 0.01:
return 0.9
elif v < 0.05:
return 0.75
elif v < 0.1:
return 0.6
elif v < 0.2:
return 0.4
elif v < 0.3:
return 0.3
elif v < 0.5:
return 0.2
elif v < 0.7:
return 0.1
else:
return 0.0
def score_function(X_train, X_test, y_train, y_test, attr_function):
return metrics.local_accuracy(
X_train, y_train, X_test, y_test, attr_function(X_test),
model_generator, score_map
)
return None, score_method(X, y, None, model_generator, score_function, method_name)
def runtime(X, y, model_generator, method_name):
old_seed = np.random.seed()
np.random.seed(3293)
# average the method scores over several train/test splits
method_reps = []
for i in range(1):
X_train, X_test, y_train, _ = train_test_split(toarray(X), y, test_size=0.1, random_state=i)
# define the model we are going to explain
model = model_generator()
model.fit(X_train, y_train)
# evaluate each method
start = time.time()
explainer = getattr(methods, method_name)(model, X_train)
build_time = time.time() - start
start = time.time()
explainer(X_test)
explain_time = time.time() - start
# we always normalize the explain time as though we were explaining 1000 samples
# even if to reduce the runtime of the benchmark we do less (like just 100)
method_reps.append(build_time + explain_time * 1000.0 / X_test.shape[0])
np.random.seed(old_seed)
return None, np.mean(method_reps)
def remove_positive(X, y, model_generator, method_name, num_fcounts=11):
return run_metric(metrics.remove, X, y, model_generator, method_name, 1, num_fcounts)
def remove_negative(X, y, model_generator, method_name, num_fcounts=11):
return run_metric(metrics.remove, X, y, model_generator, method_name, -1, num_fcounts)
def mask_remove_positive(X, y, model_generator, method_name, num_fcounts=11):
return run_metric(metrics.mask_remove, X, y, model_generator, method_name, 1, num_fcounts)
def mask_remove_negative(X, y, model_generator, method_name, num_fcounts=11):
return run_metric(metrics.mask_remove, X, y, model_generator, method_name, -1, num_fcounts)
def keep_positive(X, y, model_generator, method_name, num_fcounts=11):
return run_metric(metrics.keep, X, y, model_generator, method_name, 1, num_fcounts)
def keep_negative(X, y, model_generator, method_name, num_fcounts=11):
return run_metric(metrics.keep, X, y, model_generator, method_name, -1, num_fcounts)
def mask_keep_positive(X, y, model_generator, method_name, num_fcounts=11):
return run_metric(metrics.mask_keep, X, y, model_generator, method_name, 1, num_fcounts)
def mask_keep_negative(X, y, model_generator, method_name, num_fcounts=11):
return run_metric(metrics.mask_keep, X, y, model_generator, method_name, -1, num_fcounts)
def run_metric(metric, X, y, model_generator, method_name, attribution_sign, num_fcounts):
def metric_function(true, pred):
return np.mean(pred)
def score_function(fcount, X_train, X_test, y_train, y_test, attr_function):
A = attribution_sign * attr_function(X_test)
nmask = np.ones(len(y_test)) * fcount
nmask = np.minimum(nmask, np.array(A > 0).sum(1)).astype(np.int)
return metric(
nmask, X_train, y_train, X_test, y_test, A,
model_generator, metric_function
)
fcounts = intspace(0, X.shape[1], num_fcounts)
return fcounts, score_method(X, y, fcounts, model_generator, score_function, method_name)
def batch_remove_absolute_r2(X, y, model_generator, method_name, num_fcounts=11):
return run_batch_abs_metric(metrics.batch_remove, X, y, model_generator, method_name, sklearn.metrics.r2_score, num_fcounts)
def batch_keep_absolute_r2(X, y, model_generator, method_name, num_fcounts=11):
return run_batch_abs_metric(metrics.batch_keep, X, y, model_generator, method_name, sklearn.metrics.r2_score, num_fcounts)
def run_batch_abs_metric(metric, X, y, model_generator, method_name, loss, num_fcounts):
def score_function(fcount, X_train, X_test, y_train, y_test, attr_function):
A_train = np.abs(attr_function(X_train))
nkeep_train = (np.ones(len(y_train)) * fcount).astype(np.int)
#nkeep_train = np.minimum(nkeep_train, np.array(A_train > 0).sum(1)).astype(np.int)
A_test = np.abs(attr_function(X_test))
nkeep_test = (np.ones(len(y_test)) * fcount).astype(np.int)
#nkeep_test = np.minimum(nkeep_test, np.array(A_test >= 0).sum(1)).astype(np.int)
return metric(
nkeep_train, nkeep_test, X_train, y_train, X_test, y_test, A_train, A_test,
model_generator, loss
)
fcounts = intspace(0, X.shape[1], num_fcounts)
return fcounts, score_method(X, y, fcounts, model_generator, score_function, method_name)
def score_method(X, y, fcounts, model_generator, score_function, method_name):
""" Test an explanation method.
"""
old_seed = np.random.seed()
np.random.seed(3293)
# average the method scores over several train/test splits
method_reps = []
for i in range(1):
X_train, X_test, y_train, y_test = train_test_split(toarray(X), y, test_size=0.1, random_state=i)
# define the model we are going to explain
model = model_generator()
model.fit(X_train, y_train)
def score(attr_function):
cached_attr_function = lambda X: check_cache(attr_function, X)
if fcounts is None:
return score_function(X_train, X_test, y_train, y_test, cached_attr_function)
else:
scores = []
for f in fcounts:
scores.append(score_function(f, X_train, X_test, y_train, y_test, cached_attr_function))
return np.array(scores)
# evaluate the method
method_reps.append(score(getattr(methods, method_name)(model, X_train)))
np.random.seed(old_seed)
return np.array(method_reps).mean(0)
# used to memoize explainer functions so we don't waste time re-explaining the same object
cache0 = None
cache_X0 = None
cache_f0 = None
cache1 = None
cache_X1 = None
cache_f1 = None
def check_cache(f, X):
global cache0, cache_X0, cache_f0
global cache1, cache_X1, cache_f1
if X is cache_X0 and f is cache_f0:
return cache0
elif X is cache_X1 and f is cache_f1:
return cache1
else:
cache_f1 = cache_f0
cache_X1 = cache_X0
cache1 = cache0
cache_f0 = f
cache_X0 = X
cache0 = f(X)
return cache0
def intspace(start, end, count):
return np.unique(np.round(np.linspace(start, end, count)).astype(np.int))
def toarray(X):
""" Converts DataFrames to numpy arrays.
"""
if hasattr(X, "values"):
X = X.values
return X
``` |
{
"source": "jmsckv/NAS-Bench-201",
"score": 2
} |
#### File: NAS-Bench-201/nas_201_api/preprocessing.py
```python
import os
import numpy as np
import pickle
import random
from tqdm import tqdm
import logging
from nas_201_api import NASBench201API as API
from utils import get_index_of_max_or_min_element_in_list, write_dict_of_lists_to_csv, dict_to_array, rank_columns_of_2d_array
RAWPATH = os.environ['NASBENCH_RAW']
OUTPATH = os.environ['NASBENCH_OUT']
default_datasets = ['cifar10-valid', 'cifar10', 'cifar100', 'ImageNet16-120']
default_seeds = [777, 888, 999]
# TODO: rework, logging
def validate_data(api=None, datasets=default_datasets, seeds=default_seeds, rawpath=RAWPATH, outpath=OUTPATH, test_run=None):
"""
Goal is to find out for which architectures we have 200 epochs on all datasets with 3 random seeds.
This claim is made in the paper, but as of 23/2/20 the dataset contains less observations.
:param datasets: datasets contained in NASBENCH201
:param seeds: random seeds for which potentially every architecture was trained on potentially every dataset
:return tuple
"""
# in the test run case, we simply randomly draw a specified number of architectures
if test_run:
n_archs = random.sample(range(len(api)), test_run)
else:
n_archs = range(len(api))
# validate the number of runs for every architecture in every dataset
n_runs = dict()
# which exceptions do we get for which architecture?
exceptions = {key: set() for key in n_archs}
# validation logic
for d in tqdm(datasets):
n_runs[d] = list() # a list with each entry representing an architecture
for i in n_archs:
try:
results_i = api.query_meta_info_by_index(i)
count_runs = 0 # how many runs with different random seeds?
for s in seeds:
try:
k = (d, s)
if results_i.all_results[k].epochs == 200: # we assume that this attribute indicates a valid run, another criterion may be better
count_runs += 1
except Exception as e:
exceptions[i].update({(type(e),e)})
n_runs[d].append(count_runs)
except Exception as e:
exceptions[i].update({(type(e),e)})
# serialize results
results = (n_runs, exceptions)
if outpath:
res_path = OUTPATH + '/validation_results.pkl'
with open(res_path,'wb') as wf:
pickle.dump(results,wf)
return results
# validate data
#api = API(RAWPATH)
#api.get_more_info(2,'cifar10-valid',1)
#api.get_more_info(2,'cifar10-valid',100)
#r_runs, exceptions = validate_data(api)
def create_record(outpath,api, log_exceptions = True,dataset='cifar10-valid',epochs=200, best_so_far=True, store_CSV =True, store_np=True, precompute_ranks_per_epoch=True):
"""
:param dataset: dataset for which we query api
:param outpath: specify path where returned df gets serialized as CSV file
:return: a dictionary , where each index/entry corresponds to one architecture, and each of the 1-200 columns corresponds to the validation accuracy of the corresponding period
"""
if log_exceptions:
log_path = os.path.join(outpath, 'create_record.log')
logger = logging.getLogger()
# Configure logger
logging.basicConfig(filename=log_path, format='%(asctime)s %(filename)s: %(message)s', filemode='w')
results = {key:list() for key in range(len(api))}
# results_test = {key:None for key in range(len(api))} currently api does not support easy lookup of test
exceptions = list()
for i in range(len(api)):
for e in range(epochs):
acc = 0
try:
if best_so_far:
current_acc = api.get_more_info(i,dataset,e)['valid-accuracy']
if acc < current_acc:
acc = current_acc
else:
acc = api.get_more_info(i,dataset,e)['valid-accuracy']
results[i].append(acc)
except Exception as e:
logging.error(type(e),e, exc_info=True)
exceptions.append((i,e))
if len(exceptions):
print(f"Found {len(exceptions)} exceptions. Corresponds to a fraction of: {len(exceptions)/(len(api)*epochs)}")
"""
# currently disabled as api not reporting test performance via get_more_info
# let's also record for each architecture how well it performs on the held-out test data
# therefore we take the best performing weights on val and look up the corresponding performance on test
for key in results.keys():
ind = get_index_of_max_or_min_element_in_list(results[key])
try:
results_test[key] = api.get_more_info(key,'cifar10-valid', ind)
except Exception as e:
logging.error(type(e), e, exc_info=True)
# would also have to serialize
"""
# serialize data, this is what we'll use for simulation
out_fn = os.path.join(outpath,'proc_data_'+ dataset +'.pkl')
with open (out_fn, 'wb') as wf:
pickle.dump(results,wf)
if store_CSV:
out_fn = os.path.join(outpath, 'proc_data_' + dataset + '.csv')
write_dict_of_lists_to_csv(out_fn, results, include_keys_as_header_col=False)
if store_np:
out_fn = os.path.join(outpath, 'proc_data_' + dataset + '.npy')
np.save(out_fn, dict_to_array(results))
if precompute_ranks_per_epoch:
out_fn = os.path.join(outpath, 'precomputed_ranks_per_epoch.npy')
np.save(out_fn, rank_columns_of_2d_array(dict_to_array(results)))
print(f"Finished preprocessing for dataset {dataset}.")
results, exceptions
def shuffle_data_n_times_and_store(outpath, max_i, n_samples=500, store_CSV = True):
""" This is to prevent that same random seed could return different random sequences on different hardware.
Hence, we draw all random once and before running the experiments.
The serialized results are also used to simulate RS baselines."""
results = {}
for i in range(n_samples):
random.seed(i)
l = list(range(max_i))
random.shuffle(l)
results[i] = l
out_fn = os.path.join(outpath, 'precomputed_random_samples.pkl')
with open (out_fn,'wb') as wf:
pickle.dump(results,wf)
if store_CSV:
out_fn = os.path.join(outpath, 'precomputed_random_samples.csv')
write_dict_of_lists_to_csv(out_fn, results, include_keys_as_header_col=False)
def main():
api = API(RAWPATH)
r,e = create_record(outpath=OUTPATH,api=api)
shuffle_data_n_times_and_store(OUTPATH,len(api))
main()
``` |
{
"source": "jms/compress_service",
"score": 2
} |
#### File: jms/compress_service/zipit.py
```python
import falcon
import logging
import json
from wsgiref import simple_server
import os
import urlparse
from rq import Queue
from redis import Redis
from service_utils import compress
def max_body(limit):
def hook(req, resp, resource, params):
length = req.content_length
if length is not None and length > limit:
msg = ('The size of the request is too large. The body must not '
'exceed ' + str(limit) + ' bytes in length.')
raise falcon.HTTPRequestEntityTooLarge(
'Request body is too large', msg)
return hook
class CompressResources:
def __init__(self):
self.logger = logging.getLogger('compress_it. ' + __name__)
redis_url = os.getenv('REDIS_URL')
if not redis_url:
raise RuntimeError('Set up Redis first.')
urlparse.uses_netloc.append('redis')
url = urlparse.urlparse(redis_url)
conn = Redis(host=url.hostname, port=url.port, db=0, password=url.password)
self.q = Queue('default', connection=conn)
@falcon.before(max_body(64 * 1024))
def on_get(self, req, resp):
resp.body = json.dumps({"message": "Compression service demo"})
resp.content_type = "application/json"
resp.set_header('X-Powered-By', 'jms')
resp.status = falcon.HTTP_200
@falcon.before(max_body(64 * 1024))
def on_post(self, req, resp):
# parse json, call module compress and notify
# req.stream corresponds to the WSGI wsgi.input environ variable,
# and allows you to read bytes from the request body.
#
# See also: PEP 3333
if req.content_length in (None, 0):
raise falcon.HTTPBadRequest('Empty request body',
'A valid JSON document is required.')
body = req.stream.read()
if not body:
raise falcon.HTTPBadRequest('Empty request body',
'A valid JSON document is required.')
try:
data = json.loads(body.decode('utf-8'))
case_id = data.get('id', None)
file_list = data.get('files', None)
bucket_name = data.get('bucket', None)
base_name = data.get('prefix', None)
if case_id is not None and file_list is not None and bucket_name is not None:
self.q.enqueue(compress.process_data, case_id, file_list, bucket_name, base_name)
# compress.process_data(case_id, file_list, bucket_name, base_name)
# response ok, task received
resp.body = json.dumps(
{
"message": "Compression task started, App will be notified via Pubnub when the task is complete"})
resp.status = falcon.HTTP_200
else:
raise falcon.HTTPBadRequest('Invalid Data',
'Information required not available')
except (ValueError, UnicodeDecodeError):
raise falcon.HTTPError(falcon.HTTP_753,
'Malformed JSON',
'Could not decode the request body. The '
'JSON was incorrect or not encoded as '
'UTF-8.')
# falcon.API instances are callable WSGI apps
app = falcon.API()
zip_it = CompressResources()
app.add_route('/compress', zip_it)
``` |
{
"source": "jmscslgroup/canviz",
"score": 2
} |
#### File: canviz/strym/dashboard.py
```python
__author__ = '<NAME>'
__email__ = '<EMAIL>'
import json
import sys
class dashboard:
"""
`dashboard` works within the strym package to collect metadata files
from within a folder and print interesting aspects of the collection
Parameters
--------------
directory: `str`
Reads from the specified directory
verbose: `bool`
Boolean flag, if `True` verbosity is enabled
kwargs: variable list of argument in the dictionary format
Attributes
--------------
directory: `str`
Reads from the specified directory
verbose: `bool`
Boolean flag, if `True` verbosity is enabled
metadata_dict: `dict`
Metadata dictionary
jsonlist: `list`
A list contain json data
"""
def __init__(self,directory='./',verbose=False,start=None,end=None,**kwargs):
self.directory = directory
self.verbose = verbose
self.start=start
self.end=end
# print(self.directory)
# process all the input folders first
# parentfolder = "/Users/sprinkle/work/data/cyverse/rahulbhadani/JmscslgroupData/PandaData/"
import glob
folderlist = glob.glob(self.directory+"*")
if verbose:
print(folderlist)
jsonlist = []
for datafolder in folderlist:
# datafolder = "/Users/sprinkle/work/data/cyverse/rahulbhadani/JmscslgroupData/PandaData/2020_03_03/"
import glob
jsonlisttmp = glob.glob(datafolder+"/*.json")
if verbose:
print(jsonlisttmp)
if len(jsonlisttmp) > 0:
for f in jsonlisttmp:
jsonlist.append(f)
if verbose:
print(jsonlist)
metadata_dict = []
for json_file_str in jsonlist:
try:
with open(json_file_str) as json_file:
data = json.load(json_file)
metadata_dict.append(data)
except Exception as ex:
# if verbose:
print(f'Skipping {json_file_str}, continuing (ex={ex})')
self.metadata_dict = metadata_dict
self.jsonlist = jsonlist
def statistics(self):
"""
Retrieves interesting statistics
Returns
----------
`str` :
String formatted JSON
"""
result=''
result += f'Metadata entries: {len(self.metadata_dict)}\n'
result += f'JSON files found: {len(self.jsonlist)}\n'
return result
def miles(self):
"""
Retrieves distance traveled in miles
Returns
----------
`float` :
Total distance travelled in miles
"""
dist=0
self.error_count=0
for d in self.metadata_dict:
try:
dist = dist + d['distance_miles']
except Exception as ex:
self.error_count += 1
if self.verbose:
print(f'No key distance_miles in dictionary, skipping')
return dist
def kilometers(self):
"""
Retrieves distance traveled in Kilometers
Returns
----------
`float` :
Total distance travelled in Kilometers
"""
dist=0
self.error_count=0
for d in self.metadata_dict:
try:
dist = dist + d['distance_km']
except Exception as ex:
self.error_count += 1
if self.verbose:
print(f'No key distance_kilometers in dictionary, skipping')
return dist
def main(argv):
import os, getopt
directory = './'
verbose = False
try:
opts, args = getopt.getopt(argv,"hvd:s:e:",["directory="])
except getopt.GetoptError:
print('dashboard.py <-v,--verbose> -d <directory> -s <start_date> -e <end_date>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('dashboard.py <-v,--verbose> -d <directory>')
sys.exit()
elif opt in ('-d', '--directory'):
directory = arg
print(f'directory={directory}')
elif opt in ('-s', '--start-date'):
import datetime
start = datetime.fromisoformat(arg)
print(f'start_date={start}')
elif opt in ('-e', '--end-date'):
import datetime
end = datetime.fromisoformat(arg)
print(f'end_date={end}')
elif opt in ('-v', '--verbose'):
verbose = True
print(f'verbose={verbose}')
from strym import dashboard
try:
db = dashboard(directory=directory,verbose=verbose)
print(db.statistics())
print(f'Total driving distance (miles): {db.miles()} ({db.error_count} files not parsed)')
print(f'Total driving distance (km): {db.kilometers()} ({db.error_count} files not parsed)')
except Exception as ex:
print(f'Exception when processing {directory} (msg={ex})')
# find all the JSON files in this directory
# parentfolder = "/Users/sprinkle/work/data/cyverse/rahulbhadani/JmscslgroupData/PandaData/"
# import glob
# folderlist = glob.glob(parentfolder+"*")
# print(folderlist)
# jsonlist = []
# for datafolder in folderlist:
# # datafolder = "/Users/sprinkle/work/data/cyverse/rahulbhadani/JmscslgroupData/PandaData/2020_03_03/"
# import glob
# jsonlisttmp = glob.glob(datafolder+"/*.json")
# print(jsonlisttmp)
# if len(jsonlisttmp) > 0:
# for f in jsonlisttmp:
# jsonlist.append(f)
# print(jsonlist)
# metadata_dict = []
# for json_file_str in jsonlist:
# try:
# with open(json_file_str) as json_file:
# data = json.load(json_file)
# metadata_dict.append(data)
# except Exception as ex:
# print(f'Skipping {json_file_str}, continuing (ex={ex})')
# dist=0
# for d in metadata_dict:
# try:
# dist = dist + d['distance_miles']
# except Exception as ex:
# print(f'No key distance_miles in dictionary, skipping')
# print(dist)
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: canviz/strym/strym.py
```python
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__version__ = "0.0.0" # this is set to actual version later
# For System and OS level task
import sys, getopt
## General Data processing and visualization Import
import struct
import signal
import binascii
import bitstring
import time
import datetime
import serial
import csv
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import pandas as pd # Note that this is not commai Panda, but Database Pandas
import matplotlib.animation as animation
from matplotlib import style
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import uuid
import scipy.special as sp
import pickle
import os
from os.path import expanduser
from packaging import version
import warnings
try:
import libusb1
except OSError:
warnings.warn("libusb-1.0.dll may not be present. Normal strymread operations to read CAN data from csv files won't be affect, but you won't be able to read live data from car. For resolution to this issue, follow the steps described in https://github.com/jmscslgroup/strym/issues/8#issuecomment-652539765")
try:
import usb1
except OSError:
warnings.warn("libusb-1.0.dll may not be present. Normal strymread operations to read CAN data from csv files won't be affect, but you won't be able to read live data from car. For resolution to this issue, follow the steps described in https://github.com/jmscslgroup/strym/issues/8#issuecomment-652539765")
# cantools import
import cantools
from pathlib import Path
version_src = ''
try:
import importlib.resources as pkg_resources
with pkg_resources.path('strym', 'version') as rsrc:
version_src = rsrc
except ImportError:
# Try backported to PY<37 `importlib_resources`.
print("Python older than 3.7 detected. ")
try:
import importlib_resources as pkg_resources
with pkg_resources.path('strym', 'version') as rsrc:
version_src = rsrc
except ImportError:
print("importlib_resources not found. Install backported importlib_resources through `pip install importlib-resources`")
v = Path(version_src).open(encoding = "utf-8").read().splitlines()
__version__ = v[0].strip()
def timeout(func, args=(), timeout_duration=2, default=None, **kwargs):
"""This spwans a thread and runs the given function using the args, kwargs and
return the given default value if the timeout_duration is exceeded
"""
import threading
class InterruptableThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.result = default
def run(self):
try:
self.result = func(*args, **kwargs)
except:
pass
it = InterruptableThread()
it.start()
it.join(timeout_duration)
return it.result
def get_latest_strym_version():
from subprocess import check_output, CalledProcessError
try: # needs to work offline as well
result = check_output(["yolk", "-V", "strym"])
return result.split()[1].decode("utf-8")
except CalledProcessError:
return "0.0.0"
def check_for_latest_version():
latest_version = timeout(
get_latest_strym_version, timeout_duration=5, default="0.0.0"
)
if version.parse(__version__) < version.parse(latest_version):
warnings.warn("{}\n{}\n{}\n{}\n{}\n{}".format(
"There is a newer version of strym available on PyPI:\n",
"Your version: \t",
__version__,
"Latest version: \t",
latest_version,
"Consider updating it by using command pip install --upgrade strym"
))
check_for_latest_version()
class strym:
'''
`strym` class records data from Comm AI Panda and visualize in real time.
The constructor first gets an "USB context" by creating `USBContext` instance.
Then, it browses available USB devices and open the one whose manufacturer is
COMMA.AI. One right device is identified, `strym` creates a device handle,
enables automatic kernel driver detachment and claim interface for I/O operation.
Read and Write for USB devices are either done synchronosly or in isochronos mode.
If your interest is merely in capturing data, you should perform synchronous mode.
For (almost) real time visualization, isochronous mode is the way to go.
Parameters
-------------
dbcfile: `string`
Provide path of can database file in order to decode the message
kwargs:
Arbitrary keyword arguments.
path: `string`
Specify the path/folder where data will be saved. By default path is set to `~/CyverseData/JmscslgroupData/PandaData`
See Also
-----------------
## https://github.com/gotmc/libusb
## https://pypi.org/project/libusb1/
## https://vovkos.github.io/doxyrest/samples/libusb/index.html
## https://github.com/vpelletier/python-libusb1
## https://www.beyondlogic.org/usbnutshell/usb4.shtml
'''
def __init__(self, dbcfile, **kwargs):
# Get the home folder of the current user
home = expanduser("~")
# Create a folder CyverseData where all the log files will be record.
self.data_folder = home+ '/CyverseData/JmscslgroupData/PandaData'
## Parse the variable number of arguments
try:
self.data_folder = kwargs["path"]
except KeyError as e:
pass
# Get the USB Context
self.context = usb1.USBContext()
# Get all the USB device list
device_list = self.context.getDeviceList()
commaai_device = None
# Iterate over the list of devices
for device in device_list:
try:
device_manufacturer = device.getManufacturer()
print('Device manufacturer is {}\n'.format(device_manufacturer))
if device_manufacturer == 'comma.ai':
commaai_device = device
print("We found a COMMA AI Device with serial number {}".format(commaai_device.getSerialNumber()))
break
except usb1.USBErrorAccess:
# If the device is not accessible, do not do anything
# print('USB Device Not accessible')
pass
if commaai_device is None:
print("No comma.ai device was found. Aborting")
sys.exit(-1)
self.device = commaai_device
# Save the serial number for future use
self.serial = commaai_device.getSerialNumber()
# open the comma.ai device and obtain a device handle. A handle allows you to
# perform I/O on the device in question. Internally, this function adds a
# reference to the device and makes it available to you through
# `libusb_get_device()`. This reference is removed during libusb_close().
# This is a non-blocking function; no requests are sent over the bus.
self.handle = commaai_device.open()
# set_auto_detach_kernel_driver to enable/disable libusb's automatic kernel driver detachment.
self.handle.setAutoDetachKernelDriver(True)
# You must claim the interface you wish to use before you can perform I/O on any of its endpoints.
self.handle.claimInterface(0)
# define endpoint for reading
self.ENDPOINT_READ = 1
# buffer size
self.BUFFER_SIZE = 16
# dbc file from constructor
self.dbcfile = dbcfile
# load can database from dbc file
self.db = cantools.database.load_file(dbcfile)
# Set up the figure
self.fig = plt.figure()
self.axis = self.fig.add_subplot(1,1,1)
# logfile name attribute, initially None, it will be given value when we are ready to log the message
self.csvwriter = None
# Variable to Hold Specified Data for visualization
self.data = []
# Variable to Hold Time
self.time = []
# Boolean flag to keep recording data
self.keep_recording = True
# Message Type and attributes will be saved into these variables. This is only useful when you want to visualize the specific data
self.msg_type = None
self.attribute_num = None
self.attribute_name = None
self.newbuffer = None
def process_received_data(self, transfer):
'''
`process_received_data` function implements a callback that processes the reeceived data
from USB in isochronous mode. Once data is extracted from buffer, it is saved in the object's data variable.
The data is used to update the plot in the real time.
'''
curr_time = time.time() # Records time of collection
if transfer.getStatus() != usb1.TRANSFER_COMPLETED:
# Transfer did not complete successfully, there is no data to read.
# This example does not resubmit transfers on errors. You may want
# to resubmit in some cases (timeout, ...).
return
self.newbuffer = transfer.getBuffer()[:transfer.getActualLength()]
if self.newbuffer is None:
return
# parse the can buffer into message ID, message, and bus number
can_recv = self.parse_can_buffer(self.newbuffer)
this_message = None
this_message_name = None
for message_id, _, new_message, bus in can_recv:
if self.typelist is not None and message_id not in self.typelist:
continue
self.csvwriter.writerow(([str(curr_time), str(binascii.hexlify(self.newbuffer).decode('utf-8')) , str(bus), str((message_id)), str(binascii.hexlify(new_message).decode('utf-8')), len(new_message)]))
if self.visualize:
try:
this_message = self.db.get_message_by_frame_id(message_id)
this_message_name = this_message.name
# if the message currently received is in the list of messageTypes to be plotted, parse it and plot it
match_bool = False
if self.match == "exact":
match_bool = self.msg_type == this_message_name
elif self.match == "in":
match_bool = self.msg_type in this_message_name
if match_bool :
decoded_msg = self.db.decode_message(this_message_name, bytes(new_message))
attribute_names = list(decoded_msg.keys())
self.attribute_name = attribute_names[self.attribute_num]
data =decoded_msg[self.attribute_name]
print('Time: {}, Data: {}'.format(curr_time, data))
self.data.append(data)
self.time.append(curr_time)
# Only plot 500 points at a time
# Check if data doesn't have 500 points then consume all of the data
if len(self.data) > 500:
data500 = self.data[-500:]
time500 = self.time[-500:]
else:
data500 = self.data
time500 = self.time
self.axis.clear()
self.axis.plot(time500, data500, linestyle='None', color='firebrick', linewidth=2, marker='.', markersize = 3)
self.axis.set_axisbelow(True)
self.axis.minorticks_on()
self.axis.grid(which='major', linestyle='-', linewidth='0.5', color='salmon')
self.axis.grid(which='minor', linestyle=':', linewidth='0.25', color='dimgray')
plt.title(self.msg_type + ": " + self.attribute_name)
plt.xlabel('Time')
plt.ylabel(self.attribute_name)
self.axis.plot()
plt.draw()
plt.pause(0.00000001)
except KeyError as e:
# print("this_message_name: {}".format(this_message_name))
if self.log == "debug":
print('Message ID not supported by current DBC files ["{}"]' .format(e))
continue
def _visualize(self ):
'''
This is internal function meant to visualize specific attribute of the given message passed to
`isolog` function.
'''
pass
def isolog(self, visualize, msg_type, attribute_num, **kwargs):
'''
`isoviz()` function will log everything in asynchronous manner but only visualize specific attribute of the given message.
Upon pressing ctrl-C, the logging will terminate and SIGINT signal handler
will create a plot and save in two formats: python's pickle format and pdf.
`isoviz` is responsible handling data transfer in the isochronous mode and parsing through callback function `process_received_data`
See https://vovkos.github.io/doxyrest/samples/libusb/group_libusb_asyncio.html?highlight=transfer#details-group-libusb-asyncio
for more detail
Parameters
-------------
visualize: `bool`
specifies whether to visaulize while logging the CAN data
msg_type: `string`
specifies a valid message type from the DBC file
attribute_num: `int`
select the specific attribute from the given `mgs_type` to be displayed
**kwargs:
Arbitrary keyword arguments.
log: `enumeration: {info, debug}`
set log level to info and debug
match: `enumeration: {exact, in}`
how the message type and specified attribute should be matched for visualization. `exact` specifies exact match, `in` specifies substring matching.
'''
self.msg_type = msg_type
self.attribute_num = attribute_num
self.visualize = visualize
self.log = "info"
try:
self.log = kwargs["log"]
except KeyError as e:
#print("KeyError: {}".format(str(e)))
#raise
pass
self.match = "exact"
try:
self.match = kwargs["match"]
except KeyError as e:
#print("KeyError: {}".format(str(e)))
#raise
pass
self.typelist = None
try:
self.typelist = kwargs["typelist"]
except KeyError as e:
#print("KeyError: {}".format(str(e)))
#raise
pass
dt_object = datetime.datetime.fromtimestamp(time.time())
# Now create a folder inside CyverseData corresponding to today's date.
todaysfolder = dt_object.strftime('%Y_%m_%d')
path = self.data_folder + "/" + todaysfolder
if not os.path.exists(path):
os.makedirs(path)
dt = dt_object.strftime('%Y-%m-%d-%H-%M-%S-%f')
logfile = path + '/' + dt + '_' + '_CAN_Messages'+'.csv'
self.logfile = logfile
filehandler = open(logfile, 'a')
print('Writing data to file: '+logfile)
print('Press Ctrl - C to terminate')
self.csvwriter = csv.writer(filehandler)
self.csvwriter.writerow(['Time','Buffer','Bus', 'MessageID', 'Message', 'MessageLength'])
while self.keep_recording:
try:
# Get an `USBTransfer` instance for asynchronous use.
transfer = self.handle.getTransfer()
transfer.setBulk(usb1.ENDPOINT_IN | self.ENDPOINT_READ, self.BUFFER_SIZE, callback = self.process_received_data,)
try:
transfer.submit()
except usb1.DoomedTransferError:
pass
try:
self.context.handleEvents()
except usb1.USBErrorInterrupted:
pass
except KeyboardInterrupt as e:
# Capture the SIGINT event and call plot function to finalize the plot and save the data
self.kill(signal.SIGINT)
#signal.signal(signal.SIGINT, self.kill)
# SIGINT signal handler that will terminate lself.axogging of can data and save a final plot of the desired attribute of a message type
def kill(self, sig):
"""
`kill` catches SIGINT or CTRL-C while recording the data
and closes the comma ai device connection
"""
self.handle.close()
print('CTRL-C (SIGINT) received. Stopping log.')
self.keep_recording = False
if self.msg_type is None:
self.msg_type = 'Message Type'
if self.attribute_num is None:
self.attribute_num = 'Attribute'
if self.visualize:
# Ctrl-C Also saves the current figure being visualized with all data plotted on it.
self.axis.clear()
plt.rcParams["figure.figsize"] = (16,8)
self.axis.plot(self.time, self.data, linestyle='None', color='firebrick', linewidth=2, marker='.', markersize = 3)
self.axis.set_axisbelow(True)
self.axis.minorticks_on()
self.axis.grid(which='major', linestyle='-', linewidth='0.5', color='salmon')
self.axis.grid(which='minor', linestyle=':', linewidth='0.25', color='dimgray')
plt.title(self.msg_type + ": " + self.attribute_name)
plt.xlabel('Time')
plt.ylabel(self.attribute_name)
current_fig = plt.gcf()
file_name_to_save = self.logfile[0:-4]
current_fig.savefig(file_name_to_save + ".pdf", dpi = 300)
pickle.dump(self.fig,open(file_name_to_save + ".pickle",'wb'))
def parse_can_buffer(self, dat):
"""
`parse_can_buffer` parses the can data received through the USB device
and returns list of message ID, message and bus number
Parameters
-------------
dat: `bytearray`
byte data to be parsed
Returns
------------
`list`
Returns a list containing message ID, message and bus number
"""
ret = []
for j in range(0, len(dat), 0x10):
ddat = dat[j:j+0x10]
f1, f2 = struct.unpack("II", ddat[0:8])
extended = 4
if f1 & extended:
address = f1 >> 3
else:
address = f1 >> 21
dddat = ddat[8:8+(f2&0xF)]
ret.append((address, f2>>16, dddat, (f2>>4)&0xFF))
return ret
``` |
{
"source": "jmscslgroup/catvehicle",
"score": 2
} |
#### File: catvehicle/src/rlpredict.py
```python
import rospy
import sys, getopt
import tensorflow as tf
print("Loaded Tensorflow version {}".format(tf.__version__))
import os, sys
import pandas as pd
import numpy as np
import pickle
from std_msgs.msg import String, Header, Float64
from geometry_msgs.msg import Twist, Pose, PoseStamped
from nav_msgs.msg import Path, Odometry
from geometry_msgs.msg import Point
from catvehicle import safe_accel
class rlpredict():
"""
Python class for rlprediction
Parameters
----------
ns: `str`
Nampespace of the robot model (i.e., AV)
policy_model: `tf.keras.Sequential`
RL-trained policy model
vf_model: `tf.keras.Sequenctial`
RL-trained value function model
ego_cmdvel_topic: `str`
What's the topic on which commanded velocity of ego vehicle is to be published
leadvel_topic: `str`
What's the topic on which leader's velocity is being published
relvel_topic: `str`
What's the topic on which relative velocity is being broadcasted
leaddist_topic: `str`
What's the topic on which leader's relative distance is being published
egovel_topic: `str`
What's the topic on which ego's velocity is being published
Attributes
-----------
ns: `str`
Nampespace of the robot model (i.e., AV)
policy_model: `tf.keras.Sequential`
RL-trained policy model
vf_model: `tf.keras.Sequenctial`
RL-trained value function model
ego_cmdvel_topic: `str`
What's the topic on which commanded velocity of ego vehicle is to be published
leadvel_topic: `str`
What's the topic on which leader's velocity is being published
relvel_topic: `str`
What's the topic on which relative velocity is being broadcasted
leaddist_topic: `str`
What's the topic on which leader's relative distance is being published
egovel_topic: `str`
What's the topic on which ego's velocity is being published
"""
def __init__(self, ns, policy_model, vf_model, ego_cmdvel_topic = None, leadvel_topic = None, relvel_topic = None, leaddist_topic = None, egovel_topic = None, headway_scale = 1000, speed_scale = 50):
# variables for holding data
self.leaddist = None
self.egovel = None
self.relvel = None
self.leadervel = None
self.leaderaccel = None
self.egoaccel = None
self.total_dist = 0
self.old_leaddist = None
self.old_egovel = None
self.old_leadervel = None
self.old_leaderaccel = None
self.old_egoaccel = None
self.old_cmdvel = 0
# last and current time
self.last_leadveltime = None
self.last_egoveltime = None
self.last_disttime = None
self.current_leadveltime = None
self.current_egoveltime = None
self.current_disttime = None
self.last_pubtime = None
self.current_pubtime = None
# new message flag
self.newlead_msg = False
self.newego_msg = False
self.newdist_msg = False
self.newrelvel_msg = False
self.first = False
self.policy_model = None
self.vf_model = None
if os.path.exists(policy_model):
self.policy_model = tf.keras.models.load_model(policy_model)
else:
rospy.logerror("Error reading filepath {} for policy model.")
if os.path.exists(vf_model):
self.vf_model = tf.keras.models.load_model(vf_model)
else:
rospy.logerror("Error reading filepath {} for value-function model.")
self.headway_scale = headway_scale
self.speed_scale = speed_scale
self.leadvel_topic = leadvel_topic
self.relvel_topic = relvel_topic
if ego_cmdvel_topic is not None:
self.ego_cmdvel_topic = ego_cmdvel_topic
else:
self.ego_cmdvel_topic = 'cmd_vel'
self.ego_cmdaccel_topic = 'accel'
if leaddist_topic is not None:
self.leaddist_topic = leaddist_topic
else:
self.leaddist_topic = 'distanceEstimatorSteeringBased/dist'
if egovel_topic is not None:
self.egovel_topic = egovel_topic
else:
self.egovel_topic = 'vel'
self.ns = ns
rospy.init_node("rl_predict", anonymous=True)
# publishers
self.egocmdvel_pub = rospy.Publisher(self.ego_cmdvel_topic, Twist, queue_size = 1)
self.egocmdaccel_pub = rospy.Publisher(self.ego_cmdaccel_topic, Twist, queue_size = 1)
# subscribers
# if leadvel topic is None, we will estimate leader's velocity from leaddist and ego velocity
if self.leadvel_topic is not None:
self.leadvel_sub = rospy.Subscriber(self.leadvel_topic, Twist, self.leadvel_cb )
# if relative vel topic is None, we will estimate relative velocity from leaddist
if self.relvel_topic is not None:
self.relvel_sub = rospy.Subscriber(self.relvel_topic, Twist, self.relvel_cb )
self.distance_sub = rospy.Subscriber(self.leaddist_topic, Float64, self.leaddist_cb)
self.egovel_sub = rospy.Subscriber(self.egovel_topic, Twist, self.egovel_cb)
def leadvel_cb(self, data):
"""
Call back function for leader's velocity subscriber
"""
self.old_leadervel = self.leadervel
self.leadervel = data.linear.x
# Assign current velocity time to the last velocity time before getting a new time
self.last_leadveltime = self.current_leadveltime
self.current_leadveltime = rospy.Time.now()
if (self.last_leadveltime is not None) and (self.current_leadveltime is not None):
duration = self.current_leadveltime - self.last_leadveltime
deltaT = duration.to_sec()
self.total_dist = self.total_dist + self.leadervel*deltaT
#self.total_dist = self.total_dist%50.0
if deltaT == 0.0:
self.newlead_msg = False
return
# Calculate instantaneous acceleration
self.leaderaccel = (self.leadervel -self.old_leadervel)/deltaT
self.newlead_msg = True
def egovel_cb(self, data):
"""
Call back function for ego's velocity subscriber
"""
self.old_egovel = self.egovel
self.egovel = data.linear.x
# Assign current velocity time to the last velocity time before getting a new time
self.last_egoveltime = self.current_egoveltime
self.current_egoveltime = rospy.Time.now()
if (self.last_egoveltime is not None) and (self.current_egoveltime is not None):
duration = self.current_egoveltime - self.last_egoveltime
deltaT = duration.to_sec()
if deltaT == 0.0:
self.newego_msg = False
return
# Calculate instantaneous acceleration
self.egoaccel = (self.egovel -self.old_egovel)/deltaT
self.newego_msg = True
def relvel_cb(self, data):
"""
Call back function for relative velocity subscriber
"""
self.relvel = data.linear.x
self.newrelvel_msg = True
def leaddist_cb(self, data):
"""
Call back function for relative distance subscriber
"""
self.leaddist = data.data
if self.relvel_topic is None:
self.old_leaddist = self.leaddist
# Assign current distance time to the last distance time before getting a new time
self.last_disttime = self.current_disttime
self.current_disttime = rospy.Time.now()
if (self.last_disttime is not None) and (self.current_disttime is not None):
duration = self.current_disttime - self.last_disttime
deltaT = duration.to_sec()
if deltaT == 0.0:
self.newdist_msg = False
return
# Calculate instantaneous acceleration
self.relvel = (self.leaddist -self.old_leaddist)/deltaT
self.newdist_msg = True
def publish(self):
"""
Publish function for rlpredict class
"""
new_vel = 0
if self.leadvel_topic is None:
self.leadervel = self.leaddist + self.egovel
if np.any([self.newdist_msg, self.newego_msg, self.newlead_msg, self.newrelvel_msg]):
rospy.loginfo("Input ego velocity: {}".format(self.egovel))
rospy.loginfo("Input leader velocity: {}".format(self.leadervel))
rospy.loginfo("Input lead distance: {}".format(self.leaddist))
rospy.loginfo("Total distance covered modulo 50 by ego: {}".format(self.total_dist))
state = np.array([[self.egovel/self.speed_scale, self.leaddist/self.headway_scale, self.leadervel/self.speed_scale, 0, (self.total_dist/50.0)%50]])
rospy.loginfo("Input state is {}".format(state))
policy_prediction = self.policy_model.predict(state)
value_prediction = self.vf_model.predict(state)
rospy.loginfo("Reward for current prediction is {}".format(value_prediction[0][0]))
rospy.loginfo("Mean acceleration from current prediction is {}".format(policy_prediction[0][0]))
rospy.loginfo("Log of standard deviation of acceleration from current prediction is {}".format(policy_prediction[0][1]))
new_accel = policy_prediction[0][0]
self.last_pubtime = self.current_pubtime
self.current_pubtime = rospy.Time.now()
if (self.last_pubtime is not None) and (self.current_pubtime is not None):
duration = self.current_disttime - self.last_disttime
deltaT = duration.to_sec()
if deltaT == 0.0:
return
if not self.first:
if new_accel < 0.0:
new_accel = 0.1
self.first = True
modified_accel = safe_accel(new_accel, self.egovel, self.leadervel, self.leaddist, deltaT)
#new_vel = self.old_cmdvel + new_accel*deltaT
new_vel = self.old_cmdvel + modified_accel*deltaT
if new_vel < 0.0001:
new_vel = 0
new_vel_msg = Twist()
rospy.loginfo("Predicted velocity for the ego is {}".format(new_vel))
new_vel_msg.linear.x = new_vel
new_vel_msg.linear.y = 0.0
new_vel_msg.linear.z = 0.0
new_vel_msg.angular.x = 0.0
new_vel_msg.angular.y = 0.0
new_vel_msg.angular.z = 0.0
self.egocmdvel_pub.publish(new_vel_msg)
new_accell_msg = Twist()
rospy.loginfo("Predicted velocity for the ego is {}".format(new_vel))
new_accell_msg.linear.x = modified_accel
new_accell_msg.linear.y = new_accel
new_accell_msg.linear.z = 0.0
new_accell_msg.angular.x = 0.0
new_accell_msg.angular.y = 0.0
new_accell_msg.angular.z = 0.0
self.egocmdaccel_pub.publish(new_accell_msg)
self.newdist_msg = False
self.newego_msg = False
self.newlead_msg = False
self.newrelvel_msg = False
self.old_cmdvel = new_vel
def main(argv):
ns = rospy.get_namespace() # Retrieve namespace this way appends '/' at the end as well
ns = ns[0:-1]
argv = argv[:-2]
policy_model = argv[0]
print("Policy model is {}".format(policy_model))
vf_model = argv[1]
print("VF model is {}".format(vf_model))
if len(argv) > 2:
distance_topic = argv[2]
print("distance_topic model is {}".format(distance_topic))
else:
distance_topic = None
if len(argv) > 3:
leadvel_topic = argv[3]
print("leadvel_topic model is {}".format(leadvel_topic))
else:
leadvel_topic = None
if len(argv) > 4:
relvel_topic = argv[4]
print("relvel_topic model is {}".format(relvel_topic))
else:
relvel_topic = None
node = rlpredict(ns, policy_model=policy_model, vf_model=vf_model, leadvel_topic=leadvel_topic, relvel_topic=relvel_topic, leaddist_topic = distance_topic)
rate= rospy.Rate(20)
while not rospy.is_shutdown():
if rospy.get_param("/execute", False):
node.publish()
rate.sleep()
if __name__ == '__main__':
main(sys.argv[1:])
``` |
{
"source": "jmscslgroup/privpurge",
"score": 3
} |
#### File: privpurge/privpurge/time_region.py
```python
import itertools
class time_region:
def __init__(self, start, end):
self.start = start
self.end = end
@classmethod
def create_many(cls, gpsdata, privregions):
intersects = [
any(map(lambda reg: reg.intersects((lat, lon)), privregions))
for lon, lat in zip(gpsdata.Long, gpsdata.Lat)
]
result = [
list(g)
for k, g in itertools.groupby(
zip(gpsdata.Gpstime.to_list(), intersects), key=lambda x: x[1]
)
]
result = [
time_region(l[0][0], l[-1][0] if len(l) > 1 else l[0][0])
for l in result
if l[0][1]
]
return result
def __repr__(self):
return f"{(self.start, self.end)}"
def __matmul__(self, df): # intersection with dataframes
return (self.start <= df) & (df <= self.end)
```
#### File: privpurge/tests/show_drive.py
```python
import json
import folium
import folium.plugins
import tempfile
import os
import re
def plot_privpurge(message, outdir, filename=None):
if filename is None:
filename = "~map_" + next(tempfile._get_candidate_names()) + ".html"
my_map = folium.Map()
for fl in [
f for f in os.listdir(outdir) if os.path.isfile(os.path.join(outdir, f))
]:
if re.search(r".{37}_GPS_Messages.csv", fl):
with open(os.path.join(os.getcwd(), os.path.join(outdir, fl))) as f:
f.readline()
points = [
tuple(map(float, l.split(",")[2:4][::-1])) for l in f.readlines()
]
folium.vector_layers.PolyLine(points).add_to(my_map)
data = json.load(open(os.path.join(os.getcwd(), message)))
for region in data["regions"]:
if region["type"] == "circle":
lon, lat = region["data"]["center"]
rad = region["data"]["radius"]
folium.vector_layers.Circle(
location=[lat, lon], radius=rad, color="#3186cc", fill_color="#3186cc"
).add_to(my_map)
elif region["type"] == "polygon":
dat = [(lat, lon) for lon, lat in region["data"]]
folium.vector_layers.Polygon(
locations=dat, color="#3186cc", fill_color="#3186cc"
).add_to(my_map)
my_map.save(filename)
print(f"Map saved to: {filename}")
import argparse
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("directory")
parser.add_argument("zonefile")
parser.add_argument("-o", "--output")
return parser.parse_args()
if __name__ == "__main__":
cwd = os.getcwd()
args = get_args()
plot_privpurge(
os.path.join(cwd, args.zonefile),
os.path.join(cwd, args.directory),
filename=args.output,
)
``` |
{
"source": "jmscslgroup/rosbagpy",
"score": 2
} |
#### File: rosbagpy/bagpy/bagreader.py
```python
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__version__ = "0.0.0" # this is set to actual version later
import sys
import ntpath
import os
import time
from io import BytesIO
import csv
import inspect
import rosbag
from std_msgs.msg import String, Header
from geometry_msgs.msg import Twist, Pose, PoseStamped
from nav_msgs.msg import Path, Odometry
from geometry_msgs.msg import Point, Twist
from sensor_msgs.msg import LaserScan
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sea
import pickle
from packaging import version
from pathlib import Path
version_src = ''
try:
import importlib.resources as pkg_resources
with pkg_resources.path('bagpy', 'version') as rsrc:
version_src = rsrc
except ImportError:
# Try backported to PY<37 `importlib_resources`.
print("Python older than 3.7 detected. ")
try:
import importlib_resources as pkg_resources
with pkg_resources.path('bagpy', 'version') as rsrc:
version_src = rsrc
except ImportError:
print("importlib_resources not found. Install backported importlib_resources through `pip install importlib-resources`")
try:
v = Path(version_src).open(encoding = "utf-8").read().splitlines()
except TypeError:
v = Path(str(version_src)).open(encoding = "utf-8").read().splitlines()
__version__ = v[0].strip()
def timeout(func, args=(), timeout_duration=2, default=None, **kwargs):
"""This spwans a thread and runs the given function using the args, kwargs and
return the given default value if the timeout_duration is exceeded
"""
import threading
class InterruptableThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.result = default
def run(self):
try:
self.result = func(*args, **kwargs)
except:
pass
it = InterruptableThread()
it.start()
it.join(timeout_duration)
return it.result
def get_latest_bagpy_version():
from subprocess import check_output, CalledProcessError
try: # needs to work offline as well
result = check_output(["yolk", "-V", "bagpy"])
return result.split()[1].decode("utf-8")
except CalledProcessError:
return "0.0.0"
def check_for_latest_version():
latest_version = timeout(
get_latest_bagpy_version, timeout_duration=5, default="0.0.0"
)
if version.parse(__version__) < version.parse(latest_version):
import warnings
warnings.warn("{}\n{}\n{}\n{}\n{}\n{}".format(
"There is a newer version of bagpy available on PyPI:\n",
"Your version: \t",
__version__,
"Latest version: \t",
latest_version,
"Consider updating it by using command pip install --upgrade bagpy"
))
check_for_latest_version()
class bagreader:
'''
`bagreader` class provides API to read rosbag files in an effective easy manner with significant hassle.
This class is reimplementation of its MATLAB equivalent that can be found at https://github.com/jmscslgroup/ROSBagReader
Parameters
----------------
bagfile: `string`
Bagreader constructor takes name of a bag file as an argument. name of the bag file can be provided as the full qualified path, relative path or just the file name.
verbose: `bool`
If True, prints some relevant information. Default: `True`
tmp: `bool`
If True, creates directory in /tmp folder. Default: `False`
Attributes
--------------
bagfile: `string`
Full path of the bag file, e.g `/home/ece446/2019-08-21-22-00-00.bag`
filename: `string`
Name of the bag file, e.g. `2019-08-21-22-00-00.bag`
dir: `string`
Directory where bag file is located
reader: `rosbag.Bag`
rosbag.Bag object that
topic: `pandas dataframe`
stores the available topic from bag file being read as a table
n_messages: `integer`
stores the number of messages
message_types:`list`, `string`
stores all the available message types
datafolder: `string`
stores the path/folder where bag file is present - may be relative to the bag file or full-qualified path.
topic_table: `pandas.DataFrame`
A pandas DataFrame showing list of topics, their types, frequencies and message counts
E.g. If bag file is at `/home/ece446/2019-08-21-22-00-00.bag`, then datafolder is `/home/ece446/2019-08-21-22-00-00/`
message_dictionary: `dictionary`
message_dictionary will be a python dictionary to keep track of what datafile have been generated mapped by types
Example
---------
>>> b = bagreader('2020-03-01-23-52-11.bag')
'''
def __init__(self , bagfile , verbose=True , tmp = False):
self.bagfile = bagfile
slashindices = find(bagfile, '/')
# length of slashindices list will be zero if a user has pass only bag file name , e.g. 2020-03-04-12-22-42.bag
if len(slashindices) > 0:
self.filename =bagfile[slashindices[-1]:]
self.dir = bagfile[slashindices[0]:slashindices[-1]]
else:
self.filename = bagfile
self.dir = './'
self.reader = rosbag.Bag(self.bagfile)
info = self.reader.get_type_and_topic_info()
self.topic_tuple = info.topics.values()
self.topics = info.topics.keys()
self.message_types = []
for t1 in self.topic_tuple: self.message_types.append(t1.msg_type)
self.n_messages = []
for t1 in self.topic_tuple: self.n_messages.append(t1.message_count)
self.frequency = []
for t1 in self.topic_tuple: self.frequency.append(t1.frequency)
self.topic_table = pd.DataFrame(list(zip(self.topics, self.message_types, self.n_messages, self.frequency)), columns=['Topics', 'Types', 'Message Count', 'Frequency'])
self.start_time = self.reader.get_start_time()
self.end_time = self.reader.get_end_time()
self.datafolder = bagfile[0:-4]
if tmp:
self.datafolder = '/tmp/' + bagfile.split('/')[-1][0:-4]
self.verbose = verbose
if os.path.exists(self.datafolder):
if self.verbose:
print("[INFO] Data folder {0} already exists. Not creating.".format(self.datafolder))
else:
try:
os.mkdir(self.datafolder)
except OSError:
print("[ERROR] Failed to create the data folder {0}.".format(self.datafolder))
else:
if self.verbose:
print("[INFO] Successfully created the data folder {0}.".format(self.datafolder))
def message_by_topic(self, topic):
'''
Class method `message_by_topic` to extract message from the ROS Bag by topic name `topic`
Parameters
---------------
topic: `str`
Topic from which to extract messages.
Returns
---------
`str`
The name of the csv file to which data from the `topic` has been extracted
Example
-----------
>>> b = bagreader('/home/ivory/CyverseData/ProjectSparkle/sparkle_n_1_update_rate_100.0_max_update_rate_100.0_time_step_0.01_logtime_30.0_2020-03-01-23-52-11.bag')
>>> msg_file = b.message_by_topic(topic='/catvehicle/vel')
'''
msg_list = []
tstart =None
tend = None
time = []
for topic, msg, t in self.reader.read_messages(topics=topic, start_time=tstart, end_time=tend):
time.append(t)
msg_list.append(msg)
msgs = msg_list
if len(msgs) == 0:
print("No data on the topic:{}".format(topic))
return None
# set column names from the slots
cols = ["Time"]
m0 = msgs[0]
slots = m0.__slots__
for s in slots:
v, s = slotvalues(m0, s)
if isinstance(v, tuple):
snew_array = []
p = list(range(0, len(v)))
snew_array = [s + "_" + str(pelem) for pelem in p]
s = snew_array
if isinstance(s, list):
for i, s1 in enumerate(s):
cols.append(s1)
else:
cols.append(s)
tempfile = self.datafolder + "/" + topic.replace("/", "-") + ".csv"
file_to_write = ntpath.dirname(tempfile) + '/' + ntpath.basename(tempfile)[1:]
if sys.hexversion >= 0x3000000:
opencall = open(file_to_write, "w", newline='')
else:
opencall = open(file_to_write, 'wb')
with opencall as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(cols) # write the header
for i, m in enumerate(msgs):
slots = m.__slots__
vals = []
vals.append(time[i].secs + time[i].nsecs*1e-9)
for s in slots:
v, s = slotvalues(m, s)
if isinstance(v, tuple):
snew_array = []
p = list(range(0, len(v)))
snew_array = [s + "_" + str(pelem) for pelem in p]
s = snew_array
if isinstance(s, list):
for i, s1 in enumerate(s):
vals.append(v[i])
else:
vals.append(v)
writer.writerow(vals)
return file_to_write
def laser_data(self, **kwargs):
'''
Class method `laser_data` extracts laser data from the given file, assuming laser data is of type `sensor_msgs/LaserScan`.
Parameters
-------------
kwargs
variable keyword arguments
Returns
---------
`list`
A list of strings. Each string will correspond to file path of CSV file that contains extracted data of laser scan type
Example
----------
>>> b = bagreader('/home/ivory/CyverseData/ProjectSparkle/sparkle_n_1_update_rate_100.0_max_update_rate_100.0_time_step_0.01_logtime_30.0_2020-03-01-23-52-11.bag')
>>> laserdatafile = b.laser_data()
>>> print(laserdatafile)
'''
tstart =None
tend = None
type_to_look ="sensor_msgs/LaserScan"
table_rows = self.topic_table[self.topic_table['Types']==type_to_look]
topics_to_read = table_rows['Topics'].values
message_counts = table_rows['Message Count'].values
column_names = ["Time",
"header.seq",
"header.frame_id",
"angle_min" ,
"angle_max",
"angle_increment",
"time_increment",
"scan_time",
"range_min",
"range_max"]
for p in range(0, 182):
column_names.append("ranges_" + str(p))
for p in range(0, 182):
column_names.append("intensities_" + str(p))
all_msg = []
csvlist = []
for i in range(len(table_rows)):
tempfile = self.datafolder + "/" + topics_to_read[i].replace("/", "-") + ".csv"
file_to_write = ntpath.dirname(tempfile) + '/' + ntpath.basename(tempfile)[1:]
#msg_list = [LaserScan() for count in range(message_counts[i])]
k = 0
if sys.hexversion >= 0x3000000:
opencall = open(file_to_write, "w", newline='')
else:
opencall = open(file_to_write, 'wb')
with opencall as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(column_names) # write the header
for topic, msg, t in self.reader.read_messages(topics=topics_to_read[i], start_time=tstart, end_time=tend):
#msg_list[k] = msg
new_row = [t.secs + t.nsecs*1e-9,
msg.header.seq,
msg.header.frame_id,
msg.angle_min,
msg.angle_max,
msg.angle_increment,
msg.time_increment,
msg.scan_time,
msg.range_min,
msg.range_max]
ranges = [None]*182
intensities = [None]*182
for ir, ran in enumerate(msg.ranges):
ranges[ir] = ran
for ir, ran in enumerate(msg.intensities):
intensities[ir] = ran
new_row = new_row + ranges
new_row = new_row + intensities
writer.writerow(new_row)
k = k + 1
csvlist.append(file_to_write)
return csvlist
def vel_data(self, **kwargs):
'''
Class method `vel_data` extracts velocity data from the given file, assuming laser data is of type `geometry_msgs/Twist`.
Parameters
-------------
kwargs
variable keyword arguments
Returns
---------
`list`
A list of strings. Each string will correspond to file path of CSV file that contains extracted data of geometry_msgs/Twist type
Example
----------
>>> b = bagreader('/home/ivory/CyverseData/ProjectSparkle/sparkle_n_1_update_rate_100.0_max_update_rate_100.0_time_step_0.01_logtime_30.0_2020-03-01-23-52-11.bag')
>>> veldatafile = b.vel_data()
>>> print(veldatafile)
'''
tstart = None
tend = None
type_to_look ="geometry_msgs/Twist"
table_rows = self.topic_table[self.topic_table['Types']==type_to_look]
topics_to_read = table_rows['Topics'].values
message_counts = table_rows['Message Count'].values
column_names = ["Time",
"linear.x",
"linear.y",
"linear.z" ,
"angular.x",
"angular.y",
"angular.z"]
csvlist = []
for i in range(len(table_rows)):
tempfile = self.datafolder + "/" + topics_to_read[i].replace("/", "-") + ".csv"
file_to_write = ntpath.dirname(tempfile) + '/' + ntpath.basename(tempfile)[1:]
k = 0
if sys.hexversion >= 0x3000000:
opencall = open(file_to_write, "w", newline='')
else:
opencall = open(file_to_write, 'wb')
with opencall as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(column_names) # write the header
for topic, msg, t in self.reader.read_messages(topics=topics_to_read[i], start_time=tstart, end_time=tend):
new_row = [t.secs + t.nsecs*1e-9,
msg.linear.x,
msg.linear.y,
msg.linear.z,
msg.angular.x,
msg.angular.y,
msg.angular.z]
writer.writerow(new_row)
k = k + 1
csvlist.append(file_to_write)
return csvlist
def std_data(self, **kwargs):
'''
Class method `std_data` extracts velocity data from the given file, assuming laser data is of type `std_msgs/{bool, byte, Float32, Float64, Int16, Int32, Int8, UInt16, UInt32, UInt64, UInt8}` of 1-dimension.
Parameters
-------------
kwargs
variable keyword arguments
Returns
---------
`list`
A list of strings. Each string will correspond to file path of CSV file that contains extracted data of `std_msgs/{bool, byte, Float32, Float64, Int16, Int32, Int8, UInt16, UInt32, UInt64, UInt8}`
Example
----------
>>> b = bagreader('/home/ivory/CyverseData/ProjectSparkle/sparkle_n_1_update_rate_100.0_max_update_rate_100.0_time_step_0.01_logtime_30.0_2020-03-01-23-52-11.bag')
>>> stddatafile = b.std_data()
>>> print(stddatafile)
'''
tstart = None
tend = None
type_to_look =["std_msgs/Bool", "'std_msgs/Byte", "std_msgs/Float32", "std_msgs/Float64",
"std_msgs/Int8", "std_msgs/Int16", "std_msgs/Int32",
"std_msgs/Uint8", "std_msgs/Uint16", "std_msgs/Uint32"]
table_rows = self.topic_table[self.topic_table['Types'].isin(type_to_look)]
topics_to_read = table_rows['Topics'].values
message_counts = table_rows['Message Count'].values
column_names = ["Time", "data"]
csvlist = []
for i in range(len(table_rows)):
tempfile = self.datafolder + "/" + topics_to_read[i].replace("/", "-") + ".csv"
file_to_write = ntpath.dirname(tempfile) + '/' + ntpath.basename(tempfile)[1:]
k = 0
if sys.hexversion >= 0x3000000:
opencall = open(file_to_write, "w", newline='')
else:
opencall = open(file_to_write, 'wb')
with opencall as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(column_names) # write the header
for topic, msg, t in self.reader.read_messages(topics=topics_to_read[i], start_time=tstart, end_time=tend):
new_row = [t.secs + t.nsecs*1e-9,
msg.data]
writer.writerow(new_row)
k = k + 1
csvlist.append(file_to_write)
return csvlist
def compressed_images(self, **kwargs):
raise NotImplementedError("To be implemented")
def odometry_data(self, **kwargs):
'''
Class method `odometry_data` extracts velocity data from the given file, assuming laser data is of type `nav_msgs/Odometry`.
Parameters
-------------
kwargs
variable keyword arguments
Returns
---------
`list`
A list of strings. Each string will correspond to file path of CSV file that contains extracted data of nav_msgs/Odometry type
Example
----------
>>> b = bagreader('/home/ivory/CyverseData/ProjectSparkle/sparkle_n_1_update_rate_100.0_max_update_rate_100.0_time_step_0.01_logtime_30.0_2020-03-01-23-52-11.bag')
>>> odomdatafile = b.odometry_data()
>>> print(odomdatafile)
'''
tstart = None
tend = None
type_to_look ="nav_msgs/Odometry"
table_rows = self.topic_table[self.topic_table['Types']==type_to_look]
topics_to_read = table_rows['Topics'].values
message_counts = table_rows['Message Count'].values
column_names = ["Time",
"header.seq",
"header.frame_id",
"child_frame_id",
"pose.x" ,
"pose.y",
"pose.z",
"orientation.x",
"orientation.y",
"orientation.z",
"orientation.w",
"linear.x",
"linear.y",
"linear.z",
"angular.x",
"angular.y",
"angular.z"]
csvlist = []
for i in range(len(table_rows)):
tempfile = self.datafolder + "/" + topics_to_read[i].replace("/", "-") + ".csv"
file_to_write = ntpath.dirname(tempfile) + '/' + ntpath.basename(tempfile)[1:]
k = 0
if sys.hexversion >= 0x3000000:
opencall = open(file_to_write, "w", newline='')
else:
opencall = open(file_to_write, 'wb')
with opencall as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(column_names) # write the header
for topic, msg, t in self.reader.read_messages(topics=topics_to_read[i], start_time=tstart, end_time=tend):
new_row = [t.secs + t.nsecs*1e-9,
msg.header.seq,
msg.header.frame_id,
msg.child_frame_id,
msg.pose.pose.position.x,
msg.pose.pose.position.y,
msg.pose.pose.position.z,
msg.pose.pose.orientation.x,
msg.pose.pose.orientation.y,
msg.pose.pose.orientation.z,
msg.pose.pose.orientation.w,
msg.twist.twist.linear.x,
msg.twist.twist.linear.y,
msg.twist.twist.linear.z]
writer.writerow(new_row)
k = k + 1
csvlist.append(file_to_write)
return csvlist
def wrench_data(self, **kwargs):
'''
Class method `wrench_data` extracts velocity data from the given file, assuming laser data is of type `geometry_msgs/Wrench`.
Parameters
-------------
kwargs
variable keyword arguments
Returns
---------
`list`
A list of strings. Each string will correspond to file path of CSV file that contains extracted data of geometry_msgs/Wrench type
Example
----------
>>> b = bagreader('/home/ivory/CyverseData/ProjectSparkle/sparkle_n_1_update_rate_100.0_max_update_rate_100.0_time_step_0.01_logtime_30.0_2020-03-01-23-52-11.bag')
>>> wrenchdatafile = b.wrench_data()
>>> print(wrenchdatafile)
'''
tstart = None
tend = None
type_to_look ="geometry_msgs/Wrench"
table_rows = self.topic_table[self.topic_table['Types']==type_to_look]
topics_to_read = table_rows['Topics'].values
message_counts = table_rows['Message Count'].values
column_names = ["Time",
"force.x",
"force.y",
"force.z" ,
"torque.x",
"torque.y",
"torque.z"]
csvlist = []
for i in range(len(table_rows)):
tempfile = self.datafolder + "/" + topics_to_read[i].replace("/", "-") + ".csv"
file_to_write = ntpath.dirname(tempfile) + '/' + ntpath.basename(tempfile)[1:]
k = 0
if sys.hexversion >= 0x3000000:
opencall = open(file_to_write, "w", newline='')
else:
opencall = open(file_to_write, 'wb')
with opencall as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(column_names) # write the header
for topic, msg, t in self.reader.read_messages(topics=topics_to_read[i], start_time=tstart, end_time=tend):
new_row = [t.secs + t.nsecs*1e-9,
msg.force.x,
msg.force.y,
msg.force.z,
msg.torque.x,
msg.torque.y,
msg.torque.z]
writer.writerow(new_row)
k = k + 1
csvlist.append(file_to_write)
return csvlist
def clock_data(self, **kwargs):
'''
Class method `vel_data` extracts velocity data from the given file, assuming laser data is of type `rosgraph_msgs/Clock`.
Parameters
-------------
kwargs
variable keyword arguments
Returns
---------
`list`
A list of strings. Each string will correspond to file path of CSV file that contains extracted data of rosgraph_msgs/Clock type
Example
----------
>>> b = bagreader('/home/ivory/CyverseData/ProjectSparkle/sparkle_n_1_update_rate_100.0_max_update_rate_100.0_time_step_0.01_logtime_30.0_2020-03-01-23-52-11.bag')
>>> clockdatafile = b.clock_data()
>>> print(clockdatafile)
'''
tstart = None
tend = None
type_to_look ="rosgraph_msgs/Clock"
table_rows = self.topic_table[self.topic_table['Types']==type_to_look]
topics_to_read = table_rows['Topics'].values
message_counts = table_rows['Message Count'].values
column_names = ["Time",
"clock.secs",
"clock.nsecs"]
csvlist = []
for i in range(len(table_rows)):
tempfile = self.datafolder + "/" + topics_to_read[i].replace("/", "-") + ".csv"
file_to_write = ntpath.dirname(tempfile) + '/' + ntpath.basename(tempfile)[1:]
k = 0
if sys.hexversion >= 0x3000000:
opencall = open(file_to_write, "w", newline='')
else:
opencall = open(file_to_write, 'wb')
with opencall as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(column_names) # write the header
for topic, msg, t in self.reader.read_messages(topics=topics_to_read[i], start_time=tstart, end_time=tend):
new_row = [t.secs + t.nsecs*1e-9,
msg.clock.secs,
msg.clock.nsecs]
writer.writerow(new_row)
k = k + 1
csvlist.append(file_to_write)
return csvlist
def pointcloud_data(self, **kwargs):
raise NotImplementedError("To be implemented")
def plot_vel(self, save_fig = False):
'''
`plot_vel` plots the timseries velocity data
Parameters
-------------
save_fig: `bool`
If `True` figures are saved in the data directory.
'''
import IPython
shell_type = IPython.get_ipython().__class__.__name__
if shell_type == 'ZMQInteractiveShell':
IPython.get_ipython().run_line_magic('matplotlib', 'inline')
csvfiles = self.vel_data()
dataframes = [None]*len(csvfiles)
# read the csvfiles into pandas dataframe
for i, csv in enumerate(csvfiles):
df = pd.read_csv(csv)
dataframes[i] = df
fig, axs = create_fig(len(csvfiles))
for i, df in enumerate(dataframes):
axs[i].scatter(x = 'Time', y='linear.x', data=df, marker='D', linewidth=0.3, s = 9, color="#2E7473")
axs[i].scatter(x = 'Time', y='linear.y', data=df, marker='s', linewidth=0.3, s = 9, color="#EE5964")
axs[i].scatter(x = 'Time', y='linear.z', data=df, marker='p', linewidth=0.3, s = 9, color="#ED9858")
axs[i].scatter(x = 'Time', y='angular.x', data=df, marker='P', linewidth=0.3, s = 9, color="#1c54b2")
axs[i].scatter(x = 'Time', y='angular.y', data=df, marker='*', linewidth=0.3, s = 9, color="#004F4A")
axs[i].scatter(x = 'Time', y='angular.z', data=df, marker='8', linewidth=0.3, s = 9, color="#4F4A00")
axs[i].legend(df.columns.values[1:])
if shell_type in ['ZMQInteractiveShell', 'TerminalInteractiveShell']:
axs[i].set_title(ntpath.basename(csvfiles[i]), fontsize=16)
axs[i].set_xlabel('Time', fontsize=14)
axs[i].set_ylabel('Messages', fontsize=14)
else:
axs[i].set_title(ntpath.basename(csvfiles[i]), fontsize=12)
axs[i].set_xlabel('Time', fontsize=10)
axs[i].set_ylabel('Messages', fontsize=10)
fig.tight_layout()
suffix = ''
if len(self.datafolder) < 100:
suffix = '\n' + self.datafolder
if shell_type in ['ZMQInteractiveShell', 'TerminalInteractiveShell']:
fig.suptitle("Velocity Timeseries Plot"+suffix, fontsize = 14, y = 1.02)
else:
fig.suptitle("Velocity Timeseries Plot"+suffix, fontsize = 10, y = 1.02)
if save_fig:
current_fig = plt.gcf()
fileToSave = self.datafolder + "/" + _get_func_name()
with open(fileToSave + ".pickle", 'wb') as f:
pickle.dump(fig, f)
current_fig.savefig(fileToSave + ".pdf", dpi = 100)
current_fig.savefig(fileToSave + ".png", dpi = 100)
plt.show()
def plot_std(self, save_fig = False):
'''
`plot_std` plots the timseries standard Messages such as `std_msgs/{bool, byte, Float32, Float64, Int16, Int32, Int8, UInt16, UInt32, UInt64, UInt8}` of 1-dimension
Parameters
-------------
save_fig: `bool`
If `True` figures are saved in the data directory.
'''
import IPython
shell_type = IPython.get_ipython().__class__.__name__
if shell_type == 'ZMQInteractiveShell':
IPython.get_ipython().run_line_magic('matplotlib', 'inline')
csvfiles = self.std_data()
dataframes = [None]*len(csvfiles)
# read the csvfiles into pandas dataframe
for i, csv in enumerate(csvfiles):
df = pd.read_csv(csv)
dataframes[i] = df
fig, axs = create_fig(len(csvfiles))
if len(csvfiles) == 0:
print("No standard data found")
return
for i, df in enumerate(dataframes):
axs[i].scatter(x = 'Time', y='data', data=df, marker='D', linewidth=0.3, s = 9, color="#2E7473")
axs[i].legend(df.columns.values[1:])
if shell_type in ['ZMQInteractiveShell', 'TerminalInteractiveShell']:
axs[i].set_title(ntpath.basename(csvfiles[i]), fontsize=16)
axs[i].set_xlabel('Time', fontsize=14)
axs[i].set_ylabel('Messages', fontsize=14)
else:
axs[i].set_title(ntpath.basename(csvfiles[i]), fontsize=12)
axs[i].set_xlabel('Time', fontsize=10)
axs[i].set_ylabel('Messages', fontsize=10)
suffix = ''
if len(self.datafolder) < 100:
suffix = '\n' + self.datafolder
if shell_type in ['ZMQInteractiveShell', 'TerminalInteractiveShell']:
fig.suptitle("Standard Messages Timeseries Plot"+suffix, fontsize = 14, y = 1.02)
else:
fig.suptitle("Standard Messages Timeseries Plot"+suffix, fontsize = 10, y = 1.02)
fig.tight_layout()
if save_fig:
current_fig = plt.gcf()
fileToSave = self.datafolder + "/" + _get_func_name()
with open(fileToSave + ".pickle", 'wb') as f:
pickle.dump(fig, f)
current_fig.savefig(fileToSave + ".pdf", dpi = 300)
plt.show()
def plot_odometry(self, save_fig = False):
'''
`plot_odometry` plots the timseries odometry data
Parameters
-------------
save_fig: `bool`
If `True` figures are saved in the data directory.
'''
import IPython
shell_type = IPython.get_ipython().__class__.__name__
if shell_type == 'ZMQInteractiveShell':
IPython.get_ipython().run_line_magic('matplotlib', 'inline')
csvfiles = self.odometry_data()
dataframes = [None]*len(csvfiles)
# read the csvfiles into pandas dataframe
for i, csv in enumerate(csvfiles):
df = pd.read_csv(csv)
dataframes[i] = df
fig, axs = create_fig(len(csvfiles))
for i, df in enumerate(dataframes):
axs[i].scatter(x = 'Time', y='pose.x', data=df, marker='D', linewidth=0.3,s = 9, color="#2E7473")
axs[i].scatter(x = 'Time', y='pose.y', data=df, marker='D', linewidth=0.3, s = 9, color="#EE5964")
axs[i].scatter(x = 'Time', y='pose.z', data=df, marker='D', linewidth=0.3, s = 9, color="#ED9858")
axs[i].scatter(x = 'Time', y='orientation.x', data=df, marker='*', linewidth=0.3, s = 9, color="#1c54b2")
axs[i].scatter(x = 'Time', y='orientation.y', data=df, marker='*', linewidth=0.3, s = 9, color="#004F4A")
axs[i].scatter(x = 'Time', y='orientation.z', data=df, marker='8', linewidth=0.3, s = 9, color="#4F4A00")
axs[i].scatter(x = 'Time', y='orientation.w', data=df, marker='8', linewidth=0.3, s = 9, color="#004d40")
axs[i].scatter(x = 'Time', y='linear.x', data=df, marker='s', linewidth=0.3, s = 9, color="#ba68c8")
axs[i].scatter(x = 'Time', y='linear.y', data=df, marker='s', linewidth=0.3, s = 9, color="#2C0C32")
axs[i].scatter(x = 'Time', y='linear.z', data=df, marker='P', linewidth=0.3, s = 9, color="#966851")
axs[i].scatter(x = 'Time', y='angular.x', data=df, marker='P', linewidth=0.3, s = 9, color="#517F96")
axs[i].scatter(x = 'Time', y='angular.y', data=df, marker='p', linewidth=0.3, s = 9, color="#B3C1FC")
axs[i].scatter(x = 'Time', y='angular.z', data=df, marker='p', linewidth=0.3, s = 9, color="#FCEFB3")
axs[i].legend(df.columns.values[4:])
if shell_type in ['ZMQInteractiveShell', 'TerminalInteractiveShell']:
axs[i].set_title(ntpath.basename(csvfiles[i]), fontsize=16)
axs[i].set_xlabel('Time', fontsize=14)
axs[i].set_ylabel('Messages', fontsize=14)
else:
axs[i].set_title(ntpath.basename(csvfiles[i]), fontsize=12)
axs[i].set_xlabel('Time', fontsize=10)
axs[i].set_ylabel('Messages', fontsize=10)
suffix = ''
if len(self.datafolder) < 100:
suffix = '\n' + self.datafolder
if shell_type in ['ZMQInteractiveShell', 'TerminalInteractiveShell']:
fig.suptitle("Odometry Timeseries Plot"+suffix, fontsize = 14, y = 1.02)
else:
fig.suptitle("Odometry Timeseries Plot"+suffix, fontsize = 10, y = 1.02)
fig.tight_layout()
if save_fig:
current_fig = plt.gcf()
fileToSave = self.datafolder + "/" + _get_func_name()
with open(fileToSave + ".pickle", 'wb') as f:
pickle.dump(fig, f)
current_fig.savefig(fileToSave + ".pdf", dpi = 100)
current_fig.savefig(fileToSave + ".png", dpi = 100)
plt.show()
def plot_wrench(self, save_fig = False):
'''
`plot_wrench` plots the timseries wrench data
Parameters
-------------
save_fig: `bool`
If `True` figures are saved in the data directory.
'''
import IPython
shell_type = IPython.get_ipython().__class__.__name__
if shell_type == 'ZMQInteractiveShell':
IPython.get_ipython().run_line_magic('matplotlib', 'inline')
csvfiles = self.wrench_data()
dataframes = [None]*len(csvfiles)
# read the csvfiles into pandas dataframe
for i, csv in enumerate(csvfiles):
df = pd.read_csv(csv)
dataframes[i] = df
fig, axs = create_fig(len(csvfiles))
for i, df in enumerate(dataframes):
axs[i].scatter(x = 'Time', y='force.x', data=df, marker='D', linewidth=0.3, s = 9, color="#2E7473")
axs[i].scatter(x = 'Time', y='force.y', data=df, marker='s', linewidth=0.3, s = 9, color="#EE5964")
axs[i].scatter(x = 'Time', y='force.z', data=df, marker='*', linewidth=0.3, s = 9, color="#ED9858")
axs[i].scatter(x = 'Time', y='torque.x', data=df, marker='P', linewidth=0.3, s = 9, color="#1c54b2")
axs[i].scatter(x = 'Time', y='torque.y', data=df, marker='p', linewidth=0.3, s = 9, color="#004F4A")
axs[i].scatter(x = 'Time', y='torque.z', data=df, marker='8', linewidth=0.3, s = 9, color="#4F4A00")
axs[i].legend(df.columns.values[1:])
if shell_type in ['ZMQInteractiveShell', 'TerminalInteractiveShell']:
axs[i].set_title(ntpath.basename(csvfiles[i]), fontsize=16)
axs[i].set_xlabel('Time', fontsize=14)
axs[i].set_ylabel('Messages', fontsize=14)
else:
axs[i].set_title(ntpath.basename(csvfiles[i]), fontsize=12)
axs[i].set_xlabel('Time', fontsize=10)
axs[i].set_ylabel('Messages', fontsize=10)
suffix = ''
if len(self.datafolder) < 100:
suffix = '\n' + self.datafolder
if shell_type in ['ZMQInteractiveShell', 'TerminalInteractiveShell']:
fig.suptitle("Wrench Timeseries Plot"+suffix, fontsize = 14, y = 1.02)
else:
fig.suptitle("Wrench Timeseries Plot"+suffix, fontsize = 10, y = 1.02)
fig.tight_layout()
if save_fig:
current_fig = plt.gcf()
fileToSave = self.datafolder + "/" + _get_func_name()
with open(fileToSave + ".pickle", 'wb') as f:
pickle.dump(fig, f)
current_fig.savefig(fileToSave + ".pdf", dpi = 300)
plt.show()
def animate_laser(self):
raise NotImplementedError("To be implemented")
def animate_pointcloud(self):
raise NotImplementedError("To be implemented")
def slotvalues(m, slot):
vals = getattr(m, slot)
try:
slots = vals.__slots__
varray = []
sarray = []
for s in slots:
vnew, snew = slotvalues(vals, s)
if isinstance(snew, list):
for i, snn in enumerate(snew):
sarray.append(slot + '.' + snn)
varray.append(vnew[i])
elif isinstance(snew, str):
sarray.append(slot + '.' + snew)
varray.append(vnew)
return varray, sarray
except AttributeError:
return vals, slot
def _get_func_name():
return inspect.stack()[1][3]
def animate_timeseries(time, message, **kwargs):
'''
`animate_timeseries` will animate a time series data. Time and Message pandas series are expected
Parameters
----------
time: `pandas.core.series.Series`
Time Vector in the form of Pandas Timeseries
message: `pandas.core.series.Series`
Message Vector in the form of Pandas Timeseries
kwargs: variable keyword arguments
title: `str`
Title of the plot. By Default, it is `Timeseries Plot`
'''
import IPython
shell_type = IPython.get_ipython().__class__.__name__
assert (len(time) == len(message)), ("Time and Message Vector must be of same length. Current Length of Time Vector: {0}, Current Length of Message Vector: {0}".format(len(time), len(message)))
plot_title = 'Timeseries Plot'
try:
plot_title = kwargs["title"]
except KeyError as e:
pass
fig, ax = create_fig(1)
ax = ax[0]
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = [15, 10]
plt.rcParams['font.size'] = 16.0
plt.rcParams['legend.fontsize'] = 14.0
plt.rcParams['xtick.labelsize'] = 14.0
plt.rcParams['ytick.labelsize'] = 14.0
plt.rcParams['legend.markerscale'] = 2.0
if shell_type in ['ZMQInteractiveShell', 'TerminalInteractiveShell']:
if shell_type == 'ZMQInteractiveShell':
IPython.get_ipython().run_line_magic('matplotlib', 'inline')
print('Warning: Animation is being executed in IPython/Jupyter Notebook. Animation may not be real-time.')
l, = ax.plot([np.min(time),np.max(time)],[np.min(message),np.max(message)], alpha=0.6,
marker='o', markersize=5, linewidth=0, markerfacecolor='#275E56')
def animate(i):
l.set_data(time[:i], message[:i])
ax.set_xlabel('Time', fontsize=15)
ax.set_ylabel('Message', fontsize=15)
ax.set_title(plot_title, fontsize=16)
for index in range(len(message)-1):
animate(index)
IPython.display.clear_output(wait=True)
display(fig)
plt.pause(time[index + 1] - time[index])
else:
for index in range(0, len(message)-1):
ax.clear()
if index < 500:
sea.lineplot(time[:index], message[:index], linewidth=2.0, color="#275E56")
else:
sea.lineplot(time[index - 500:index], message[index - 500:index], linewidth=2.0, color="#275E56")
ax.set_title(plot_title, fontsize=16)
ax.set_xlabel('Time', fontsize=15)
ax.set_ylabel('Message', fontsize=15)
plt.draw()
plt.pause(time[index + 1] - time[index])
def find(s, ch):
'''
Function `find` returns indices all the occurence of `ch` in `s`
Parameters
-------------
s: `string`
String or a setence where to search for occurrences of the character `ch`
s: `char`
Character to look for
Returns
---------
`list`
List of indices of occurrences of character `ch` in the string `s`.
'''
return [i for i, ltr in enumerate(s) if ltr == ch]
def timeindex(df, inplace=False):
'''
Convert multi Dataframe of which on column must be 'Time' to pandas-compatible timeseries where timestamp is used to replace indices
Parameters
--------------
df: `pandas.DataFrame`
A pandas dataframe with two columns with the column names "Time" and "Message"
inplace: `bool`
Modifies the actual dataframe, if true, otherwise doesn't.
Returns
-----------
`pandas.DataFrame`
Pandas compatible timeseries with a single column having column name "Message" where indices are timestamp in hum an readable format.
'''
if inplace:
newdf = df
else:
newdf =df.copy(deep = True)
newdf['Time'] = df['Time']
Time = pd.to_datetime(newdf['Time'], unit='s')
newdf['Clock'] = pd.DatetimeIndex(Time)
if inplace:
newdf.set_index('Clock', inplace=inplace)
else:
newdf = newdf.set_index('Clock')
return newdf
def _setplots(**kwargs):
import IPython
shell_type = IPython.get_ipython().__class__.__name__
ncols = 1
nrows= 1
if kwargs.get('ncols'):
ncols = kwargs['ncols']
if kwargs.get('nrows'):
nrows = kwargs['nrows']
if shell_type in ['ZMQInteractiveShell', 'TerminalInteractiveShell']:
plt.style.use('default')
plt.rcParams['figure.figsize'] = [12*ncols, 6*nrows]
plt.rcParams['font.size'] = 22.0 + 3*(ncols-1)
plt.rcParams["font.family"] = "serif"
plt.rcParams["mathtext.fontset"] = "dejavuserif"
plt.rcParams['figure.facecolor'] = '#ffffff'
#plt.rcParams[ 'font.family'] = 'Roboto'
#plt.rcParams['font.weight'] = 'bold'
plt.rcParams['xtick.color'] = '#01071f'
plt.rcParams['xtick.minor.visible'] = True
plt.rcParams['ytick.minor.visible'] = True
plt.rcParams['xtick.labelsize'] = 16 + 2*(ncols-1)
plt.rcParams['ytick.labelsize'] = 16 + 2*(ncols-1)
plt.rcParams['ytick.color'] = '#01071f'
plt.rcParams['axes.labelcolor'] = '#000000'
plt.rcParams['text.color'] = '#000000'
plt.rcParams['axes.labelcolor'] = '#000000'
plt.rcParams['grid.color'] = '#f0f1f5'
plt.rcParams['axes.labelsize'] = 20+ 3*(ncols-1)
plt.rcParams['axes.titlesize'] = 25+ 3*(ncols-1)
#plt.rcParams['axes.labelweight'] = 'bold'
#plt.rcParams['axes.titleweight'] = 'bold'
plt.rcParams["figure.titlesize"] = 30.0 + 4*(ncols-1)
#plt.rcParams["figure.titleweight"] = 'bold'
plt.rcParams['legend.markerscale'] = 2.0
plt.rcParams['legend.fontsize'] = 10.0 + 3*(ncols-1)
plt.rcParams["legend.framealpha"] = 0.5
else:
plt.style.use('default')
plt.rcParams['figure.figsize'] = [18*ncols, 6*nrows]
plt.rcParams["font.family"] = "serif"
plt.rcParams["mathtext.fontset"] = "dejavuserif"
plt.rcParams['font.size'] = 12.0
plt.rcParams['figure.facecolor'] = '#ffffff'
#plt.rcParams[ 'font.family'] = 'Roboto'
#plt.rcParams['font.weight'] = 'bold'
plt.rcParams['xtick.color'] = '#01071f'
plt.rcParams['xtick.minor.visible'] = True
plt.rcParams['ytick.minor.visible'] = True
plt.rcParams['xtick.labelsize'] = 10
plt.rcParams['ytick.labelsize'] = 10
plt.rcParams['ytick.color'] = '#01071f'
plt.rcParams['axes.labelcolor'] = '#000000'
plt.rcParams['text.color'] = '#000000'
plt.rcParams['axes.labelcolor'] = '#000000'
plt.rcParams['grid.color'] = '#f0f1f5'
plt.rcParams['axes.labelsize'] = 10
plt.rcParams['axes.titlesize'] = 10
#plt.rcParams['axes.labelweight'] = 'bold'
#plt.rcParams['axes.titleweight'] = 'bold'
plt.rcParams["figure.titlesize"] = 24.0
#plt.rcParams["figure.titleweight"] = 'bold'
plt.rcParams['legend.markerscale'] = 1.0
plt.rcParams['legend.fontsize'] = 8.0
plt.rcParams["legend.framealpha"] = 0.5
def create_fig(num_of_subplots=1, **kwargs):
import IPython
shell_type = IPython.get_ipython().__class__.__name__
nrows = num_of_subplots
ncols = 1
if kwargs.get('ncols'):
ncols = kwargs['ncols']
if kwargs.get('nrows'):
nrows = kwargs['nrows']
_setplots(ncols=ncols, nrows=nrows)
fig, ax = plt.subplots(ncols=ncols, nrows=nrows)
if nrows == 1 and ncols == 1:
ax_ = []
ax_.append(ax)
ax = ax_
else:
ax = ax.ravel()
if sys.hexversion >= 0x3000000:
for a in ax:
a.minorticks_on()
a.grid(which='major', linestyle='-', linewidth='0.25', color='dimgray')
a.grid(which='minor', linestyle=':', linewidth='0.25', color='dimgray')
a.patch.set_facecolor('#fafafa')
a.spines['bottom'].set_color('#161616')
a.spines['top'].set_color('#161616')
a.spines['right'].set_color('#161616')
a.spines['left'].set_color('#161616')
else:
for a in ax:
a.minorticks_on()
a.grid(True, which='both')
fig.tight_layout(pad=0.3*nrows)
return fig, ax
def set_colorbar(fig, ax, im, label):
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
axins1 = inset_axes(ax,
width="50%", # width = 50% of parent_bbox width
height="3%", # height : 5%
loc='upper right')
cbr = fig.colorbar(im, ax=ax, cax=axins1, orientation="horizontal")
cbr.set_label(label, fontsize = 20)
return cbr
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.