metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jjanssen/django-cms-timetravel",
"score": 2
} |
#### File: cms_timetravel/managers/pages.py
```python
import logging
import warnings
from django.conf import settings
from django.db.models import Q
from cms.models.query import PageQuerySet
from ..utils import get_timetravel_date
def published(self, site=None):
"""
Get all published items from Django CMS Pages and
filter them according to the actual Timetravel date.
"""
pub = self.on_site(site).filter(published=True)
ref_date = get_timetravel_date()
if settings.CMS_SHOW_START_DATE:
pub = pub.filter(
Q(publication_date__lt=ref_date) |
Q(publication_date__isnull=True)
)
if settings.CMS_SHOW_END_DATE:
pub = pub.filter(
Q(publication_end_date__gte=ref_date) |
Q(publication_end_date__isnull=True)
)
if settings.CMS_SHOW_START_DATE or settings.CMS_SHOW_END_DATE:
logging.debug('Retrieving CMS Published pages with date {0}'.format(ref_date))
else:
warnings.warn("You must 'CMS_SHOW_START_DATE' or 'CMS_SHOW_END_DATE' to True, otherwise there isn't much to timetravel on.")
return pub
def expired(self):
"""
Get all expired items from Django CMS Pages and
filter them according to the actual Timetravel date.
"""
ref_date = get_timetravel_date()
logging.debug('Retrieving CMS Expired pages with date {0}'.format(ref_date))
return self.on_site().filter(publication_end_date__lte=ref_date)
PageQuerySet.published = published
PageQuerySet.expired = expired
``` |
{
"source": "jjanssen/django-p3p",
"score": 2
} |
#### File: django-p3p/p3p/middleware.py
```python
from django.template.loader import render_to_string
class P3PMiddleware(object):
def process_response(self, request, response):
response['P3P'] = render_to_string('p3p/headers.txt').strip()
return response
```
#### File: p3p/tests/test_middleware.py
```python
from django.http import HttpResponse
from django.test import RequestFactory, SimpleTestCase
from ..middleware import P3PMiddleware
class P3PMiddlewareTest(SimpleTestCase):
rf = RequestFactory()
def test_p3p_response_header(self):
request = self.rf.get('/')
response = HttpResponse("Example response")
self.assertEqual(
P3PMiddleware().process_response(request, response),
response
)
self.assertTrue(response.has_header('P3P'))
self.assertEqual(response.get('P3P'), 'CP=""')
```
#### File: jjanssen/django-p3p/runtests.py
```python
import sys
import django
import warnings
from django.conf import settings
from django.core.management import execute_from_command_line
if not settings.configured:
settings.configure(
INSTALLED_APPS=(
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sites',
'django.contrib.admin',
'django.contrib.sessions',
'p3p'
),
ROOT_URLCONF='p3p.tests.urls',
TEST_RUNNER='django.test.runner.DiscoverRunner',
TEMPLATES=[
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
},
]
)
def runtests():
# Don't ignore DeprecationWarnings
warnings.simplefilter('default', DeprecationWarning)
argv = sys.argv[:1] + ['test', 'p3p'] + sys.argv[1:]
execute_from_command_line(argv)
if __name__ == '__main__':
runtests()
``` |
{
"source": "jjanssen/wagtail-grapple",
"score": 2
} |
#### File: grapple/types/streamfield.py
```python
import json
import graphene
import wagtail
import inspect
import wagtail.documents.blocks
import wagtail.embeds.blocks
import wagtail.images.blocks
import wagtail.snippets.blocks
from django.conf import settings
from graphene.types import Scalar
from graphene_django.converter import convert_django_field
from wagtail.core.fields import StreamField
from wagtail.core import blocks
from ..registry import registry
class GenericStreamFieldInterface(Scalar):
@staticmethod
def serialize(stream_value):
return stream_value.stream_data
@convert_django_field.register(StreamField)
def convert_stream_field(field, registry=None):
return GenericStreamFieldInterface(
description=field.help_text, required=not field.null
)
class StreamFieldInterface(graphene.Interface):
id = graphene.String()
block_type = graphene.String(required=True)
field = graphene.String(required=True)
raw_value = graphene.String(required=True)
@classmethod
def resolve_type(cls, instance, info):
"""
If block has a custom Graphene Node type in registry then use it,
otherwise use generic block type.
"""
if hasattr(instance, "block"):
mdl = type(instance.block)
if mdl in registry.streamfield_blocks:
return registry.streamfield_blocks[mdl]
for block_class in inspect.getmro(mdl):
if block_class in registry.streamfield_blocks:
return registry.streamfield_blocks[block_class]
return registry.streamfield_blocks["generic-block"]
def resolve_id(self, info, **kwargs):
return self.id
def resolve_block_type(self, info, **kwargs):
return type(self.block).__name__
def resolve_field(self, info, **kwargs):
return self.block.name
def resolve_raw_value(self, info, **kwargs):
if isinstance(self.value, dict):
return serialize_struct_obj(self.value)
return self.value
def generate_streamfield_union(graphql_types):
class StreamfieldUnion(graphene.Union):
class Meta:
types = graphql_types
@classmethod
def resolve_type(cls, instance, info):
"""
If block has a custom Graphene Node type in registry then use it,
otherwise use generic block type.
"""
mdl = type(instance.block)
if mdl in registry.streamfield_blocks:
return registry.streamfield_blocks[mdl]
return registry.streamfield_blocks["generic-block"]
return StreamfieldUnion
class StructBlockItem:
id = None
block = None
value = None
def __init__(self, id, block, value=""):
self.id = id
self.block = block
self.value = value
def serialize_struct_obj(obj):
rtn_obj = {}
if hasattr(obj, "stream_data"):
rtn_obj = []
for field in obj.stream_data:
rtn_obj.append(serialize_struct_obj(field["value"]))
else:
for field in obj:
value = obj[field]
if hasattr(value, "stream_data"):
rtn_obj[field] = list(
map(
lambda data: serialize_struct_obj(data["value"]),
value.stream_data,
)
)
elif hasattr(value, "value"):
rtn_obj[field] = value.value
elif hasattr(value, "src"):
rtn_obj[field] = value.src
elif hasattr(value, "file"):
rtn_obj[field] = value.file.url
else:
rtn_obj[field] = value
return rtn_obj
class StructBlock(graphene.ObjectType):
class Meta:
interfaces = (StreamFieldInterface,)
blocks = graphene.List(graphene.NonNull(StreamFieldInterface), required=True)
def resolve_blocks(self, info, **kwargs):
stream_blocks = []
for name, value in self.value.items():
block = self.block.child_blocks[name]
if not issubclass(type(block), blocks.StreamBlock):
value = block.to_python(value)
stream_blocks.append(StructBlockItem(name, block, value))
return stream_blocks
class StreamBlock(StructBlock):
class Meta:
interfaces = (StreamFieldInterface,)
def resolve_blocks(self, info, **kwargs):
stream_blocks = []
for field in self.value.stream_data:
block = self.value.stream_block.child_blocks[field["type"]]
if not issubclass(type(block), blocks.StructBlock):
value = block.to_python(field["value"])
stream_blocks.append(StructBlockItem(field["type"], block, field["value"]))
return stream_blocks
class StreamFieldBlock(graphene.ObjectType):
value = graphene.String(required=True)
class Meta:
interfaces = (StreamFieldInterface,)
class CharBlock(graphene.ObjectType):
value = graphene.String(required=True)
class Meta:
interfaces = (StreamFieldInterface,)
class TextBlock(graphene.ObjectType):
value = graphene.String(required=True)
class Meta:
interfaces = (StreamFieldInterface,)
class EmailBlock(graphene.ObjectType):
value = graphene.String(required=True)
class Meta:
interfaces = (StreamFieldInterface,)
class IntegerBlock(graphene.ObjectType):
value = graphene.Int(required=True)
class Meta:
interfaces = (StreamFieldInterface,)
class FloatBlock(graphene.ObjectType):
value = graphene.Float(required=True)
class Meta:
interfaces = (StreamFieldInterface,)
class DecimalBlock(graphene.ObjectType):
value = graphene.Float(required=True)
class Meta:
interfaces = (StreamFieldInterface,)
class RegexBlock(graphene.ObjectType):
value = graphene.String(required=True)
class Meta:
interfaces = (StreamFieldInterface,)
class URLBlock(graphene.ObjectType):
value = graphene.String(required=True)
class Meta:
interfaces = (StreamFieldInterface,)
class BooleanBlock(graphene.ObjectType):
value = graphene.Boolean(required=True)
class Meta:
interfaces = (StreamFieldInterface,)
class DateBlock(graphene.ObjectType):
value = graphene.String(format=graphene.String(), required=True)
class Meta:
interfaces = (StreamFieldInterface,)
def resolve_value(self, info, **kwargs):
format = kwargs.get("format")
if format:
return self.value.strftime(format)
return self.value
class DateTimeBlock(DateBlock):
class Meta:
interfaces = (StreamFieldInterface,)
class TimeBlock(DateBlock):
class Meta:
interfaces = (StreamFieldInterface,)
class RichTextBlock(graphene.ObjectType):
value = graphene.String(required=True)
class Meta:
interfaces = (StreamFieldInterface,)
class RawHTMLBlock(graphene.ObjectType):
value = graphene.String(required=True)
class Meta:
interfaces = (StreamFieldInterface,)
class BlockQuoteBlock(graphene.ObjectType):
value = graphene.String(required=True)
class Meta:
interfaces = (StreamFieldInterface,)
class ChoiceOption(graphene.ObjectType):
key = graphene.String(required=True)
value = graphene.String(required=True)
class ChoiceBlock(graphene.ObjectType):
value = graphene.String(required=True)
choices = graphene.List(graphene.NonNull(ChoiceOption), required=True)
class Meta:
interfaces = (StreamFieldInterface,)
def resolve_choices(self, info, **kwargs):
choices = []
for key, value in self.block._constructor_kwargs["choices"]:
choice = ChoiceOption(key, value)
choices.append(choice)
return choices
def get_media_url(url):
if url[0] == "/":
return settings.BASE_URL + url
return url
class EmbedBlock(graphene.ObjectType):
value = graphene.String(required=True)
url = graphene.String(required=True)
class Meta:
interfaces = (StreamFieldInterface,)
def resolve_url(self, info, **kwargs):
if hasattr(self, "value"):
return get_media_url(self.value.url)
return get_media_url(self.url)
class StaticBlock(graphene.ObjectType):
value = graphene.String(required=True)
class Meta:
interfaces = (StreamFieldInterface,)
class ListBlock(graphene.ObjectType):
items = graphene.List(graphene.NonNull(StreamFieldInterface), required=True)
class Meta:
interfaces = (StreamFieldInterface,)
def resolve_items(self, info, **kwargs):
# Get the nested StreamBlock type
block_type = self.block.child_block
# Return a list of GraphQL types from the list of valuess
return [StructBlockItem(self.id, block_type, item) for item in self.value]
registry.streamfield_blocks.update(
{
"generic-block": StreamFieldBlock,
blocks.CharBlock: CharBlock,
blocks.TextBlock: TextBlock,
blocks.EmailBlock: EmailBlock,
blocks.IntegerBlock: IntegerBlock,
blocks.FloatBlock: FloatBlock,
blocks.DecimalBlock: DecimalBlock,
blocks.RegexBlock: RegexBlock,
blocks.URLBlock: URLBlock,
blocks.BooleanBlock: BooleanBlock,
blocks.DateBlock: DateBlock,
blocks.TimeBlock: TimeBlock,
blocks.DateTimeBlock: DateTimeBlock,
blocks.RichTextBlock: RichTextBlock,
blocks.RawHTMLBlock: RawHTMLBlock,
blocks.BlockQuoteBlock: BlockQuoteBlock,
blocks.ChoiceBlock: ChoiceBlock,
blocks.StreamBlock: StreamBlock,
blocks.StructBlock: StructBlock,
blocks.StaticBlock: StaticBlock,
blocks.ListBlock: ListBlock,
wagtail.embeds.blocks.EmbedBlock: EmbedBlock,
}
)
def register_streamfield_blocks():
from .pages import PageInterface
from .documents import get_document_type
from .images import get_image_type
class PageChooserBlock(graphene.ObjectType):
page = graphene.Field(PageInterface, required=True)
class Meta:
interfaces = (StreamFieldInterface,)
def resolve_page(self, info, **kwargs):
return self.value
class DocumentChooserBlock(graphene.ObjectType):
document = graphene.Field(get_document_type(), required=True)
class Meta:
interfaces = (StreamFieldInterface,)
def resolve_document(self, info, **kwargs):
return self.value
class ImageChooserBlock(graphene.ObjectType):
image = graphene.Field(get_image_type(), required=True)
class Meta:
interfaces = (StreamFieldInterface,)
def resolve_image(self, info, **kwargs):
return self.value
class SnippetChooserBlock(graphene.ObjectType):
snippet = graphene.String(required=True)
class Meta:
interfaces = (StreamFieldInterface,)
def resolve_snippet(self, info, **kwargs):
return self.value
registry.streamfield_blocks.update(
{
blocks.PageChooserBlock: PageChooserBlock,
wagtail.documents.blocks.DocumentChooserBlock: DocumentChooserBlock,
wagtail.images.blocks.ImageChooserBlock: ImageChooserBlock,
wagtail.snippets.blocks.SnippetChooserBlock: SnippetChooserBlock,
}
)
``` |
{
"source": "jjaquish/widgetastic.patternfly4",
"score": 2
} |
#### File: testing/ouia/test_dropdown.py
```python
import pytest
from widgetastic.widget import View
from widgetastic_patternfly4 import DropdownItemDisabled
from widgetastic_patternfly4 import DropdownItemNotFound
from widgetastic_patternfly4.ouia import DropdownOUIA
TESTING_PAGE_URL = (
"https://patternfly-docs-ouia.netlify.app/documentation/react/components/dropdown" # noqa
)
@pytest.fixture
def dropdown(browser):
class TestView(View):
ROOT = ".//div[@id='ws-react-c-dropdown-ouia']"
dropdown = DropdownOUIA("Dropdown")
view = TestView(browser)
return view.dropdown
def test_dropdown_is_displayed(dropdown):
assert dropdown.is_displayed
def test_enabled_dropdown(dropdown):
assert dropdown.is_enabled
def test_dropdown_items(dropdown):
assert dropdown.items == [
"Link",
"Action",
"Disabled Link",
"Disabled Action",
"",
"Separated Link",
"Separated Action",
]
assert dropdown.has_item("Action")
assert not dropdown.has_item("Non existing items")
assert dropdown.item_enabled("Action")
assert not dropdown.item_enabled("Disabled Link")
def test_dropdown_open(dropdown):
assert not dropdown.is_open
dropdown.open()
assert dropdown.is_open
dropdown.close()
assert not dropdown.is_open
def test_dropdown_item_select(dropdown):
dropdown.item_select("Action")
assert not dropdown.is_open
with pytest.raises(DropdownItemDisabled):
dropdown.item_select("Disabled Link")
with pytest.raises(DropdownItemNotFound):
dropdown.item_select("Non existing items")
``` |
{
"source": "jjaraalm/hsds",
"score": 2
} |
#### File: chunkread/hsds/hsds_logger.py
```python
import os
app = None # global app handle
if "LOG_LEVEL" in os.environ:
log_level = os.environ["LOG_LEVEL"]
else:
log_level = "DEBUG"
def debug(msg):
print("log_level:", log_level)
if log_level == "DEBUG":
print("DEBUG> " + msg)
if app:
counter = app["log_count"]
counter["DEBUG"] += 1
def info(msg):
if log_level not in ("ERROR", "WARNING", "WARN"):
print("INFO> " + msg)
if app:
counter = app["log_count"]
counter["INFO"] += 1
def warn(msg):
if log_level != "ERROR":
print("WARN> " + msg)
if app:
counter = app["log_count"]
counter["WARN"] += 1
def warning(msg):
if log_level != "ERROR":
print("WARN> " + msg)
if app:
counter = app["log_count"]
counter["WARN"] += 1
def error(msg):
print("ERROR> " + msg)
if app:
counter = app["log_count"]
counter["ERROR"] += 1
```
#### File: http/ghcn/helper.py
```python
import base64
import config
"""
Helper function - get endpoint we'll send http requests to
"""
def getEndpoint():
endpoint = config.get("hsds_endpoint")
return endpoint
"""
get default request headers for domain
"""
def getRequestHeaders(domain=None, username=None, password=<PASSWORD>):
if username is None:
username = config.get("user_name")
if password is None:
password = <PASSWORD>.<PASSWORD>("user_password")
headers = { }
if domain is not None:
headers['host'] = domain
if username and password:
auth_string = username + ':' + password
auth_string = auth_string.encode('utf-8')
auth_string = base64.b64encode(auth_string)
auth_string = auth_string.decode('utf-8')
auth_string = "Basic " + auth_string
headers['Authorization'] = auth_string
return headers
```
#### File: tests/unit/shuffleTest.py
```python
import unittest
import sys
import numpy as np
import time
sys.path.append('../..')
from hsds.util.s3Util import _shuffle, _unshuffle
class ShuffleUtilTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(ShuffleUtilTest, self).__init__(*args, **kwargs)
# main
def testShuffle(self):
arr = np.zeros((3,), dtype='<u2')
arr[0] = 0x0001
arr[1] = 0x0002
arr[2] = 0x0003
data = arr.tobytes()
fmt = '{:02X}{:02X} ' * (len(data)//2)
self.assertEqual(fmt.format(*data), "0100 0200 0300 ")
shuffled = _shuffle(2, data)
self.assertEqual(fmt.format(*shuffled), "0102 0300 0000 ")
unshuffled = _unshuffle(2, shuffled)
self.assertEqual(fmt.format(*data), "0100 0200 0300 ")
for i in range(len(data)):
self.assertEqual(data[i], unshuffled[i])
def testTime(self):
arr = np.random.rand(1000,1000)
now = time.time()
data = arr.tobytes()
shuffled = _shuffle(8, data)
self.assertEqual(len(data), len(shuffled))
unshuffled = _unshuffle(8, shuffled)
elapsed = time.time() - now
# this was taking ~0.04 s with an i7
# without numba, time was 2.4s (60x slower)
#print("time:", elapsed)
self.assertTrue(elapsed < 0.1)
self.assertEqual(len(shuffled), len(unshuffled))
self.assertEqual(data, unshuffled)
if __name__ == '__main__':
#setup test files
unittest.main()
``` |
{
"source": "jjaramillo34/demo-image-processing",
"score": 2
} |
#### File: jjaramillo34/demo-image-processing/utils_helpers.py
```python
from time import sleep
import cv2
import streamlit as st
import pandas as pd
import sqlite3
from sqlite3 import Error, adapters
import numpy as np
import base64
import sys
import os
import re
import uuid
import time
import ipinfo
import pymongo
from datetime import datetime
from webcolors import CSS3_HEX_TO_NAMES, hex_to_rgb
from skimage.exposure import rescale_intensity
from duckduckgo_search import ddg
from io import BytesIO
from PIL import Image
from scipy.spatial import KDTree
import streamlit.components.v1 as components
from streamlit_embedcode import github_gist
def version():
return st.sidebar.caption(f"Streamlit version `{st.__version__}`")
def load_image(filename):
image = cv2.imread(filename)
return image
def load_image_PIL(filename):
image = Image.open(filename)
return image
def converted(image):
#image = cv2.imread(filename)
converted_image = np.array(image.convert('RGB'))
return converted_image
def increment_counter():
st.session_state.count += 1
def load_image_file_uploader(image):
img_file = st.file_uploader(label='Upload a file', type=['png', 'jpg', 'jpeg'])
image = load_image_PIL(img_file)
image = converted(image)
return st.image(image)
def source_code(title, url, gist_url):
with st.expander(title):
cols = st.columns(2)
with cols[0]:
components.iframe(url, height=800, scrolling=True)
with cols[1]:
github_gist(gist_url, width=800, height=800)
st.sidebar.caption(f"Streamlit version `{st.__version__}`")
def convert_rgb_to_names(rgb_tuple):
# a dictionary of all the hex and their respective names in css3
css3_db = CSS3_HEX_TO_NAMES
names = []
rgb_values = []
for color_hex, color_name in css3_db.items():
names.append(color_name)
rgb_values.append(hex_to_rgb(color_hex))
kdt_db = KDTree(rgb_values)
distance, index = kdt_db.query(rgb_tuple)
return f'closest match: {names[index]}'
def auto_canny_thresh(image, sigma=0.33):
# compute the median of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
# return the edged image
return edged
def convolve(image, kernel):
# grab the spatial dimensions of the image, along with
# the spatial dimensions of the kernel
(iH, iW) = image.shape[:2]
(kH, kW) = kernel.shape[:2]
# allocate memory for the output image, taking care to
# "pad" the borders of the input image so the spatial
# size (i.e., width and height) are not reduced
pad = (kW - 1) // 2
image = cv2.copyMakeBorder(image, pad, pad, pad, pad,
cv2.BORDER_REPLICATE)
output = np.zeros((iH, iW), dtype="float32")
# loop over the input image, "sliding" the kernel across
# each (x, y)-coordinate from left-to-right and top to
# bottom
for y in np.arange(pad, iH + pad):
for x in np.arange(pad, iW + pad):
# extract the ROI of the image by extracting the
# *center* region of the current (x, y)-coordinates
# dimensions
roi = image[y - pad:y + pad + 1, x - pad:x + pad + 1]
# perform the actual convolution by taking the
# element-wise multiplicate between the ROI and
# the kernel, then summing the matrix
k = (roi * kernel).sum()
# store the convolved value in the output (x,y)-
# coordinate of the output image
output[y - pad, x - pad] = k
# rescale the output image to be in the range [0, 255]
output = rescale_intensity(output, in_range=(0, 255))
output = (output * 255).astype("uint8")
# return the output image
return output
def download_button1(image, label, file_name, mime, key):
# comvert np.array into PIL image
result = Image.fromarray(image)
buf = BytesIO()
result.save(buf, format="PNG")
byte_im = buf.getvalue()
btn = st.download_button(
label=label,
data=byte_im,
file_name=file_name,
mime=mime, key=key)
return btn
def download_button(object_to_download, download_filename, button_text, isPNG):
"""
Generates a link to download the given object_to_download.
Params:
------
object_to_download: The object to be downloaded.
download_filename (str): filename and extension of file. e.g. mydata.csv,
some_txt_output.txt download_link_text (str): Text to display for download
link.
button_text (str): Text to display on download button (e.g. 'click here to download file')
pickle_it (bool): If True, pickle file.
Returns:
-------
(str): the anchor tag to download object_to_download
Examples:
--------
download_link(Pillow_image_from_cv_matrix, 'your_image.jpg', 'Click to me to download!')
"""
buffered = BytesIO()
if isPNG:
object_to_download.save(buffered, format="PNG")
else:
object_to_download.save(buffered, format="JPEG")
b64 = base64.b64encode(buffered.getvalue()).decode()
button_uuid = str(uuid.uuid4()).replace('-', '')
button_id = re.sub('\d+', '', button_uuid)
custom_css = f"""
<style>
#{button_id} {{
display: inline-flex;
align-items: center;
justify-content: center;
background-color: rgb(255, 255, 255);
color: rgb(38, 39, 48);
padding: .25rem .75rem;
position: relative;
text-decoration: none;
border-radius: 4px;
border-width: 1px;
border-style: solid;
border-color: rgb(230, 234, 241);
border-image: initial;
}}
#{button_id}:hover {{
border-color: rgb(246, 51, 102);
color: rgb(246, 51, 102);
}}
#{button_id}:active {{
box-shadow: none;
background-color: rgb(246, 51, 102);
color: white;
}}
</style> """
dl_link = custom_css + f'<a download="{download_filename}" id="{button_id}" href="data:file/txt;base64,{b64}">{button_text}</a><br></br>'
return dl_link
# returns dictinary with ip location
def get_location_data():
import urllib.request
external_ip = urllib.request.urlopen('https://ident.me').read().decode('utf8')
#print(external_ip)
ACCESS_TOKEN = st.secrets["secret"]["secret"]
#print('Token', ACCESS_TOKEN)
handler = ipinfo.getHandler(ACCESS_TOKEN)
details = handler.getDetails(external_ip)
print(details.details)
return details.details
# Initialize connection.
# Uses st.experimental_singleton to only run once.
@st.experimental_singleton
def init_connection():
return pymongo.MongoClient(**st.secrets["mongo_ratings"])
client = init_connection()
@st.experimental_memo(ttl=600)
def insert_data_mongodb(rating, feedback, date_r, city, ip, region, country, loc):
#client = pymongo.MongoClient(st.secrets["mongo_ratings"]['host'])
print("MongoDB Connected successfully!!!")
# database
database = client['app_ratings']
# Created collection
collection = database['ratings']
location_dict = get_location_data()
date_r = datetime.now()
loc = location_dict['loc']
city = location_dict['city']
ip = location_dict['ip']
region = location_dict['region']
country = location_dict['country']
my_dict = {
"rating": rating,
"feedback": feedback,
'date': date_r,
'city': city,
'ip': ip,
'region': region,
'country': country,
'loc' : { 'type': "Point", 'coordinates': [loc.split(',')[0] ,loc.split(',')[1]] },
}
x = collection.insert_one(my_dict)
#client.close()
print("MongoDB Close successfully!!!")
@st.experimental_singleton
def average_ratings_mongodb():
print("MongoDB Connected successfully!!!")
# database
#client = pymongo.MongoClient(st.secrets["mongo_ratings"]['host'])
database = client['app_ratings']
# Created collection
collection = database['ratings']
x = collection.aggregate([
{"$group":
{
"_id": None,
"avg_rating": {"$avg":"$rating"} }
}
])
#client.close()
print("MongoDB Close successfully!!!")
return list(x)[0]['avg_rating']
<EMAIL>(suppress_st_warning=True)
def scrape_duckduckgo(col_name):
# Python code to illustrate inserting data in MongoDB
print("Connected successfully!!!")
# database
#client = pymongo.MongoClient(st.secrets["mongo_ratings"]['host'])
db = client.duckduckgo
# Created collection
c = col_name.replace(" ", '_')
collection = db[c]
if c in db.list_collection_names():
print("The collection exists.")
# This is a cursor instance
cur = collection.find()
results = list(cur)
# Checking the cursor is empty
# or not
if len(results)==0:
print("Empty Cursor")
keywords = col_name
results = ddg(keywords, region='wt-wt', safesearch='Moderate', time='y', max_results=10)
time.sleep(0.75)
result_df = pd.DataFrame.from_dict(results)
result_df.reset_index(inplace=True)
data_dict = result_df.to_dict("records")
collection.insert_many(data_dict)
st.dataframe(result_df, height=850)
else:
print("Cursor is Not Empty")
print("Do Stuff Here")
cols = st.columns(2)
results = collection.find({})
with cols[0]:
for doc in results:
#st.write(doc['title'])
st.markdown(f'<h5 style="font-size:12px;"><a href="{doc["href"]}" target="_blank">{doc["title"]}</a></h5>', unsafe_allow_html=True)
with cols[1]:
results_df = pd.DataFrame(list(collection.find({}, {'_id': False})))
st.dataframe(results_df, height=850)
csv = convert_df(results_df)
st.download_button(
label="Download data as CSV",
data=csv,
file_name='results.csv',
mime='text/csv')
else:
collection = db.create_collection(c)
print("The collection not exists collection has been creaed.")
keywords = col_name
results = ddg(keywords, region='wt-wt', safesearch='Moderate', time='y', max_results=10)
time.sleep(0.75)
result_df = pd.DataFrame.from_dict(results)
result_df.reset_index(inplace=True)
data_dict = result_df.to_dict("records")
collection.insert_many(data_dict)
st.dataframe(result_df, height=850)
if col_name in db.list_collection_names():
print(True)
#else:
# with st.spinner('Getting results from duckduckgo...'):
# keywords = col_name
# results = ddg(keywords, region='wt-wt', safesearch='Moderate', time='y', max_results=10)
# time.sleep(0.75)
# #st.write(results)
# cols = st.columns(2)
# with cols[0]:
# for i, ele in enumerate(results):
# st.markdown(f'<h5 style="font-size:12px;"><a href="{results[i]["href"]}" target="_blank">{results[i]["title"]}</a></h5>', unsafe_allow_html=True)
# with cols[1]:
# result_df = pd.DataFrame.from_dict(results)
# st.dataframe(result_df, height=850)
# csv = convert_df(result_df)
# st.download_button(
# label="Download data as CSV",
# data=csv,
# file_name='results.csv',
# mime='text/csv',)
def convert_df(df):
# IMPORTANT: Cache the conversion to prevent computation on every rerun
return df.to_csv().encode('utf-8')
``` |
{
"source": "jjaramillo34/fastapi-mongo",
"score": 3
} |
#### File: server/routes/student.py
```python
from fastapi import APIRouter, Body
from fastapi.encoders import jsonable_encoder
from apps.server.database import (
add_student,
delete_student,
retrieve_student,
retrieve_students,
update_student,
)
from apps.server.models.student import (
ErrorResponseModel,
ResponseModel,
StudentSchema,
UpdateStudentModel,
)
router = APIRouter()
@router.get("/", response_description="Students retrieved")
async def get_students():
students = await retrieve_students()
if students:
return ResponseModel(students, "Students data retrieved successfully")
return ResponseModel(students, "Empty list returned")
@router.post("/", response_description="Student data added into the database")
async def add_student_data(student: StudentSchema = Body(...)):
student = jsonable_encoder(student)
new_student = await add_student(student)
return ResponseModel(new_student, "Student added successfully.")
@router.get("/{id}", response_description="Student data retrieved")
async def get_student_data(id):
student = await retrieve_student(id)
if student:
return ResponseModel(student, "Student data retrieved successfully")
return ErrorResponseModel("An error occurred.", 404, "Student doesn't exist.")
@router.put("/{id}")
async def update_student_data(id: str, req: UpdateStudentModel = Body(...)):
req = {k: v for k, v in req.dict().items() if v is not None}
updated_student = await update_student(id, req)
if updated_student:
return ResponseModel(
"Student with ID: {} name update is successful".format(id),
"Student name updated successfully",
)
return ErrorResponseModel(
"An error occurred",
404,
"There was an error updating the student data.",
)
@router.delete("/{id}", response_description="Student data deleted from the database")
async def delete_student_data(id: str):
deleted_student = await delete_student(id)
if deleted_student:
return ResponseModel(
"Student with ID: {} removed".format(id), "Student deleted successfully"
)
return ErrorResponseModel(
"An error occurred", 404, "Student with id {0} doesn't exist".format(id)
)
``` |
{
"source": "jjaramillo34/pyimagesearchuniversity_course",
"score": 3
} |
#### File: Basic Image Processing Operations/convolutions-opencv/convolutions_practice.py
```python
import argparse
import re
import cv2
import numpy as np
from skimage.exposure import rescale_intensity
def convolve(image, kernel):
# grab the spatial dimensions of the image, along with the spatial
# dimensions of the kernel
(iH, iW) = image.shape[:2]
(kH, kW) = kernel.shape[:2]
# allocate memmory for the output image, taking care to "pad" the
# borders of the input image so the spatial size (i.e., width and height) are not reduced
pad = (kW - 1) // 2
image = cv2.copyMakeBorder(image, pad, pad, pad, pad,
cv2.BORDER_REPLICATE)
output = np.zeros((iH, iW), dtype="float32")
# loop over the input image, "sliding" the kernel across each
# (x, y)-coordinate from left-to-right and top to bottom
for y in np.arange(pad, iH + pad):
for x in np.arange(pad, iW + pad):
# extract the ROI of the image by extracting the *center* region
# of the current (x, y)-coordinates dimensions
roi = image[y - pad:y + pad + 1, x - pad:x + pad + 1]
# perform the actual convolution by taking the element-wise
# mulplicate between the ROI and the kernel, the summing the
# matrix
k = (roi * kernel).sum()
# store the colvolded value in the output (x, y)-coordinate of
# the output image
output[y - pad, x - pad] = k
# rescale the output image to be in the range [0, 255]
output = rescale_intensity(output, in_range=(0, 255))
output = (output * 255).astype("uint8")
#return the output image
return output
# construct the argument parser and the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", type=str, required=True,
help="path to the input image")
args = vars(ap.parse_args())
# construct average blurring kernels used to smooth an image
smallBlur = np.ones((7, 7), dtype="float") * (1.0 / (7 * 7))
largeBlur = np.ones((21, 21), dtype="float") * (1.0 / (21 * 21))
# construct a sharpening filter
sharpen = np.array((
[0 , -1, 0],
[-1, 5, -1],
[0, -1, 0]), dtype="int")
# construct the Laplacian kernel to detect edge-like regions on an image
laplacian = np.array((
[0, 1, 0],
[1, -4, 1],
[0 ,1 ,0]), dtype="int")
# construct the Sobel x-asis kernel
sobelX = np.array((
[-1, 0, 1],
[-2, 0 , 2],
[-1, 0, 1]), dtype="int")
# construct the Sobel y-axis kernel
sobelY = np.array((
[-1, -2, -1],
[0, 0 ,0],
[1, 2, 1]), dtype="int")
# construct the kernel bank, a list of kernels we're going to apply using out
# custom 'convolve' function and OpenCV's 'filter2D' function
kernelBank = (
("small_blur", smallBlur),
("large_blur", largeBlur),
("sharpen", sharpen),
("laplacian", laplacian),
("sobel_x", sobelX),
("sobel_y", sobelY)
)
# load the input image and convert it to grayscale
image = cv2.imread(args["image"])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# loop over the kernels
for (kernelName, kernel) in kernelBank:
# apply the kernel to the grayscale image using both our custom 'convolve'
# fundtion and OpenCV;s 'filter2D' funstion
print("[INFO] applying {} kernel".format(kernelName))
convoleOutput = convolve(gray, kernel)
opencvOutput = cv2.filter2D(gray, -1, kernel)
# show the output images
cv2.imshow("original", gray)
cv2.imshow("{} - convole".format(kernelName), convoleOutput)
cv2.imshow("{} - opencv".format(kernelName), opencvOutput)
cv2.waitKey(0)
cv2.destroyAllWindows()
```
#### File: Histograms/gamma-correction/adjust_gamma.py
```python
from __future__ import print_function
import numpy as np
import argparse
import cv2
def adjust_gamma(image, gamma=1.0):
# build a lookup table mapping the pixel values [0, 255] to
# their adjusted gamma values
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
# apply gamma correction using the lookup table
return cv2.LUT(image, table)
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
help="path to input image")
args = vars(ap.parse_args())
# load the original image
original = cv2.imread(args["image"])
# loop over various values of gamma
for gamma in np.arange(0.0, 3.5, 0.5):
# ignore when gamma is 1 (there will be no change to the image)
if gamma == 1:
continue
# apply gamma correction and show the images
gamma = gamma if gamma > 0 else 0.1
adjusted = adjust_gamma(original, gamma=gamma)
cv2.putText(adjusted, "g={}".format(gamma), (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 3)
cv2.imshow("Images", np.hstack([original, adjusted]))
cv2.waitKey(0)
``` |
{
"source": "JJardin77580/pdf.js",
"score": 2
} |
#### File: gyp/generator/eclipse.py
```python
from xml.sax.saxutils import escape
import os.path
import subprocess
import gyp
import gyp.common
import gyp.msvs_emulation
import shlex
import xml.etree.cElementTree as ET
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'PRODUCT_DIR', 'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!), so we convert them to variables
generator_default_variables[dirname] = '$' + dirname
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
# Include dirs will occasionally use the SHARED_INTERMEDIATE_DIR variable as
# part of the path when dealing with generated headers. This value will be
# replaced dynamically for each configuration.
generator_default_variables['SHARED_INTERMEDIATE_DIR'] = \
'$SHARED_INTERMEDIATE_DIR'
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
flavor = gyp.common.GetFlavor(params)
default_variables.setdefault('OS', flavor)
if flavor == 'win':
# Copy additional generator configuration data from VS, which is shared
# by the Eclipse generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
def GetAllIncludeDirectories(target_list, target_dicts,
shared_intermediate_dirs, config_name, params,
compiler_path):
"""Calculate the set of include directories to be used.
Returns:
A list including all the include_dir's specified for every target followed
by any include directories that were added as cflag compiler options.
"""
gyp_includes_set = set()
compiler_includes_list = []
# Find compiler's default include dirs.
if compiler_path:
command = shlex.split(compiler_path)
command.extend(['-E', '-xc++', '-v', '-'])
proc = subprocess.Popen(args=command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = proc.communicate()[1]
# Extract the list of include dirs from the output, which has this format:
# ...
# #include "..." search starts here:
# #include <...> search starts here:
# /usr/include/c++/4.6
# /usr/local/include
# End of search list.
# ...
in_include_list = False
for line in output.splitlines():
if line.startswith('#include'):
in_include_list = True
continue
if line.startswith('End of search list.'):
break
if in_include_list:
include_dir = line.strip()
if include_dir not in compiler_includes_list:
compiler_includes_list.append(include_dir)
flavor = gyp.common.GetFlavor(params)
if flavor == 'win':
generator_flags = params.get('generator_flags', {})
for target_name in target_list:
target = target_dicts[target_name]
if config_name in target['configurations']:
config = target['configurations'][config_name]
# Look for any include dirs that were explicitly added via cflags. This
# may be done in gyp files to force certain includes to come at the end.
# TODO(jgreenwald): Change the gyp files to not abuse cflags for this, and
# remove this.
if flavor == 'win':
msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
cflags = msvs_settings.GetCflags(config_name)
else:
cflags = config['cflags']
for cflag in cflags:
if cflag.startswith('-I'):
include_dir = cflag[2:]
if include_dir not in compiler_includes_list:
compiler_includes_list.append(include_dir)
# Find standard gyp include dirs.
if 'include_dirs' in config:
include_dirs = config['include_dirs']
for shared_intermediate_dir in shared_intermediate_dirs:
for include_dir in include_dirs:
include_dir = include_dir.replace('$SHARED_INTERMEDIATE_DIR',
shared_intermediate_dir)
if not os.path.isabs(include_dir):
base_dir = os.path.dirname(target_name)
include_dir = base_dir + '/' + include_dir
include_dir = os.path.abspath(include_dir)
gyp_includes_set.add(include_dir)
# Generate a list that has all the include dirs.
all_includes_list = list(gyp_includes_set)
all_includes_list.sort()
for compiler_include in compiler_includes_list:
if not compiler_include in gyp_includes_set:
all_includes_list.append(compiler_include)
# All done.
return all_includes_list
def GetCompilerPath(target_list, data, options):
"""Determine a command that can be used to invoke the compiler.
Returns:
If this is a gyp project that has explicit make settings, try to determine
the compiler from that. Otherwise, see if a compiler was specified via the
CC_target environment variable.
"""
# First, see if the compiler is configured in make's settings.
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings_dict = data[build_file].get('make_global_settings', {})
for key, value in make_global_settings_dict:
if key in ['CC', 'CXX']:
return os.path.join(options.toplevel_dir, value)
# Check to see if the compiler was specified as an environment variable.
for key in ['CC_target', 'CC', 'CXX']:
compiler = os.environ.get(key)
if compiler:
return compiler
return 'gcc'
def GetAllDefines(target_list, target_dicts, data, config_name, params,
compiler_path):
"""Calculate the defines for a project.
Returns:
A dict that includes explicit defines declared in gyp files along with all of
the default defines that the compiler uses.
"""
# Get defines declared in the gyp files.
all_defines = {}
flavor = gyp.common.GetFlavor(params)
if flavor == 'win':
generator_flags = params.get('generator_flags', {})
for target_name in target_list:
target = target_dicts[target_name]
if flavor == 'win':
msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
extra_defines = msvs_settings.GetComputedDefines(config_name)
else:
extra_defines = []
if config_name in target['configurations']:
config = target['configurations'][config_name]
target_defines = config['defines']
else:
target_defines = []
for define in target_defines + extra_defines:
split_define = define.split('=', 1)
if len(split_define) == 1:
split_define.append('1')
if split_define[0].strip() in all_defines:
# Already defined
continue
all_defines[split_define[0].strip()] = split_define[1].strip()
# Get default compiler defines (if possible).
if flavor == 'win':
return all_defines # Default defines already processed in the loop above.
if compiler_path:
command = shlex.split(compiler_path)
command.extend(['-E', '-dM', '-'])
cpp_proc = subprocess.Popen(args=command, cwd='.',
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
cpp_output = cpp_proc.communicate()[0]
cpp_lines = cpp_output.split('\n')
for cpp_line in cpp_lines:
if not cpp_line.strip():
continue
cpp_line_parts = cpp_line.split(' ', 2)
key = cpp_line_parts[1]
if len(cpp_line_parts) >= 3:
val = cpp_line_parts[2]
else:
val = '1'
all_defines[key] = val
return all_defines
def WriteIncludePaths(out, eclipse_langs, include_dirs):
"""Write the includes section of a CDT settings export file."""
out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \
'settingswizards.IncludePaths">\n')
out.write(' <language name="holder for library settings"></language>\n')
for lang in eclipse_langs:
out.write(' <language name="%s">\n' % lang)
for include_dir in include_dirs:
out.write(' <includepath workspace_path="false">%s</includepath>\n' %
include_dir)
out.write(' </language>\n')
out.write(' </section>\n')
def WriteMacros(out, eclipse_langs, defines):
"""Write the macros section of a CDT settings export file."""
out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \
'settingswizards.Macros">\n')
out.write(' <language name="holder for library settings"></language>\n')
for lang in eclipse_langs:
out.write(' <language name="%s">\n' % lang)
for key in sorted(defines):
out.write(' <macro><name>%s</name><value>%s</value></macro>\n' %
(escape(key), escape(defines[key])))
out.write(' </language>\n')
out.write(' </section>\n')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
generator_flags = params.get('generator_flags', {})
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.join(generator_flags.get('output_dir', 'out'),
config_name)
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
# Ninja uses out/Debug/gen while make uses out/Debug/obj/gen as the
# SHARED_INTERMEDIATE_DIR. Include both possible locations.
shared_intermediate_dirs = [os.path.join(toplevel_build, 'obj', 'gen'),
os.path.join(toplevel_build, 'gen')]
GenerateCdtSettingsFile(target_list,
target_dicts,
data,
params,
config_name,
os.path.join(toplevel_build,
'eclipse-cdt-settings.xml'),
options,
shared_intermediate_dirs)
GenerateClasspathFile(target_list,
target_dicts,
options.toplevel_dir,
toplevel_build,
os.path.join(toplevel_build,
'eclipse-classpath.xml'))
def GenerateCdtSettingsFile(target_list, target_dicts, data, params,
config_name, out_name, options,
shared_intermediate_dirs):
gyp.common.EnsureDirExists(out_name)
with open(out_name, 'w') as out:
out.write('<?xml version="1.0" encoding="UTF-8"?>\n')
out.write('<cdtprojectproperties>\n')
eclipse_langs = ['C++ Source File', 'C Source File', 'Assembly Source File',
'GNU C++', 'GNU C', 'Assembly']
compiler_path = GetCompilerPath(target_list, data, options)
include_dirs = GetAllIncludeDirectories(target_list, target_dicts,
shared_intermediate_dirs,
config_name, params, compiler_path)
WriteIncludePaths(out, eclipse_langs, include_dirs)
defines = GetAllDefines(target_list, target_dicts, data, config_name,
params, compiler_path)
WriteMacros(out, eclipse_langs, defines)
out.write('</cdtprojectproperties>\n')
def GenerateClasspathFile(target_list, target_dicts, toplevel_dir,
toplevel_build, out_name):
'''Generates a classpath file suitable for symbol navigation and code
completion of Java code (such as in Android projects) by finding all
.java and .jar files used as action inputs.'''
gyp.common.EnsureDirExists(out_name)
result = ET.Element('classpath')
def AddElements(kind, paths):
# First, we need to normalize the paths so they are all relative to the
# toplevel dir.
rel_paths = set()
for path in paths:
if os.path.isabs(path):
rel_paths.add(os.path.relpath(path, toplevel_dir))
else:
rel_paths.add(path)
for path in sorted(rel_paths):
entry_element = ET.SubElement(result, 'classpathentry')
entry_element.set('kind', kind)
entry_element.set('path', path)
AddElements('lib', GetJavaJars(target_list, target_dicts, toplevel_dir))
AddElements('src', GetJavaSourceDirs(target_list, target_dicts, toplevel_dir))
# Include the standard JRE container and a dummy out folder
AddElements('con', ['org.eclipse.jdt.launching.JRE_CONTAINER'])
# Include a dummy out folder so that Eclipse doesn't use the default /bin
# folder in the root of the project.
AddElements('output', [os.path.join(toplevel_build, '.eclipse-java-build')])
ET.ElementTree(result).write(out_name)
def GetJavaJars(target_list, target_dicts, toplevel_dir):
'''Generates a sequence of all .jars used as inputs.'''
for target_name in target_list:
target = target_dicts[target_name]
for action in target.get('actions', []):
for input_ in action['inputs']:
if os.path.splitext(input_)[1] == '.jar' and not input_.startswith('$'):
if os.path.isabs(input_):
yield input_
else:
yield os.path.join(os.path.dirname(target_name), input_)
def GetJavaSourceDirs(target_list, target_dicts, toplevel_dir):
'''Generates a sequence of all likely java package root directories.'''
for target_name in target_list:
target = target_dicts[target_name]
for action in target.get('actions', []):
for input_ in action['inputs']:
if (os.path.splitext(input_)[1] == '.java' and
not input_.startswith('$')):
dir_ = os.path.dirname(os.path.join(os.path.dirname(target_name),
input_))
# If there is a parent 'src' or 'java' folder, navigate up to it -
# these are canonical package root names in Chromium. This will
# break if 'src' or 'java' exists in the package structure. This
# could be further improved by inspecting the java file for the
# package name if this proves to be too fragile in practice.
parent_search = dir_
while os.path.basename(parent_search) not in ['src', 'java']:
parent_search, _ = os.path.split(parent_search)
if not parent_search or parent_search == toplevel_dir:
# Didn't find a known root, just return the original path
yield dir_
break
else:
yield parent_search
def GenerateOutput(target_list, target_dicts, data, params):
"""Generate an XML settings file that can be imported into a CDT project."""
if params['options'].generator_output:
raise NotImplementedError("--generator_output not implemented for eclipse")
user_config = params.get('generator_flags', {}).get('config', None)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
``` |
{
"source": "jjaros587/MSD",
"score": 2
} |
#### File: MSD/eshop/main.py
```python
from HtmlTestRunner import HTMLTestRunner
from unittest import TestLoader, TestSuite
from eshop.tests.sample_test import SampleTest
from eshop.utils.config_parser import ConfigParser
def main():
runner = HTMLTestRunner(**ConfigParser().get_report_params())
runner.run(TestSuite(TestLoader().loadTestsFromTestCase(SampleTest)))
if __name__ == "__main__":
main()
```
#### File: eshop/page_objects/base_page.py
```python
from selenium.webdriver.common.by import By
from eshop.utils.page_factory import find_by
from eshop.utils.waiter import Waiter
class BasePage:
items = {
"free": "/volne-prodejne-leky"
}
cart_amount = find_by(By.ID, "js-cart-amount-desktop")
navigation = find_by(By.ID, "js-main-nav")
def __init__(self, driver):
self.driver = driver
self.waiter = Waiter(driver)
def access_item(self, key):
self.navigation().find_element(By.XPATH, "//a[@href='%s']" % self.items[key]).click()
self.waiter.wait_for_page_load()
def get_cart_amount(self):
return self.cart_amount().text
```
#### File: eshop/utils/page_factory.py
```python
_strategy_kwargs = ['id_', 'xpath', 'link_text', 'partial_link_text',
'name', 'tag_name', 'class_name', 'css_selector']
def cacheable_decorator(lookup):
def func(self):
if not hasattr(self, '_elements_cache'):
self._elements_cache = {} # {callable_id: element(s)}
cache = self._elements_cache
key = id(lookup)
if key not in cache:
cache[key] = lookup(self)
return cache[key]
return func
def _find_by(how, using, multiple, cacheable, context, driver_attr, **kwargs):
def func(self):
# context - driver or a certain element
if context:
driver = context() if callable(context) else context.__get__(self) # or property
else:
driver = getattr(self, driver_attr)
# 'how' AND 'using' take precedence over keyword arguments
if how and using:
lookup = driver.find_elements if multiple else driver.find_element
return lookup(how, using)
[[key, value]] = kwargs.items()
if len(kwargs) != 1 or key not in _strategy_kwargs:
raise ValueError(
"If 'how' AND 'using' are not specified, one and only one of the following "
"valid keyword arguments should be provided: %s." % _strategy_kwargs)
suffix = key[:-1] if key.endswith('_') else key # find_element(s)_by_xxx
prefix = 'find_elements_by' if multiple else 'find_element_by'
lookup = getattr(driver, '%s_%s' % (prefix, suffix))
return lookup(value)
return cacheable_decorator(func) if cacheable else func
def find_by(how=None, using=None, multiple=False, cacheable=False, context=None, driver_attr='driver', **kwargs):
return _find_by(how, using, multiple, cacheable, context, driver_attr, **kwargs)
``` |
{
"source": "jjaros587/selenium_generator_sample_project",
"score": 3
} |
#### File: pages/search/LoginPage.py
```python
from selenium.webdriver.common.by import By
from selenium_generator.factories.page_factory import find_by
class LoginPage:
_field_username = find_by(By.NAME, "username", cacheable=True)
_field_password = find_by(By.NAME, "password", cacheable=True)
_button_logout = find_by(By.XPATH, "//a/text()='Logout'", cacheable=True)
_button_login = find_by(name="login", cacheable=True)
def __init__(self, driver):
self._driver = driver
self._driver.get("http://qualitypointtech.net/timesheetdemo/index.php")
def login_positive(self, username, password):
self.login(username, password)
assert "report" in self._driver.current_url
def login_negative(self, username, password):
self.login(username, password)
assert "report" not in self._driver.current_url
def login(self, username, password):
self._field_username().send_keys(username)
self._field_password().send_keys(password)
self._button_login().click()
def logout(self):
self._button_logout().click()
``` |
{
"source": "jjaros587/selenium_generator",
"score": 4
} |
#### File: selenium_generator/base/singleton.py
```python
from functools import wraps
__instances = {}
"""List of class instances created with singleton decorator."""
def singleton(cls):
"""Decorator for singleton design pattern. Function decorates a class and stores its instance to a list of instances
from which is later returned if the class is called again.
Args:
cls: Decorated class
Returns:
Instance of a decorated class
"""
@wraps(cls)
def get_instance(*args, **kwargs):
instance = __instances.get(cls, None)
if not instance:
instance = cls(*args, **kwargs)
__instances[cls] = instance
return instance
return get_instance
```
#### File: selenium_generator/test_runner/runner.py
```python
import os
from unittest.result import failfast
from HtmlTestRunner.runner import HTMLTestRunner, HtmlTestResult
from HtmlTestRunner.result import _TestInfo
DEFAULT_TEMPLATE = os.path.join(os.path.dirname(__file__), "template", "report_template.html")
"""Path to a default template for test report"""
DEFAULT_OUTPUT = "./reports/"
"""Default directory where test report should be generated."""
class TestInfo(_TestInfo):
"""Class which stores information about executed test.
Args:
test_result (Result): Test result class
test_method (type): Executed test method
outcome (int): Index which takes test correct value for test result type from _TestInfo class
err (tuple): Holds detailed information about occurred error
sub_test:
screen_shot (str): Path to screen shot of a failed test
Attributes:
screen_shot (str): Path to screen shot of a failed test
"""
def __init__(self, test_result, test_method, outcome=_TestInfo.SUCCESS, err=None, sub_test=None, screen_shot=None):
_TestInfo.__init__(self, test_result, test_method, outcome=outcome, err=err, subTest=sub_test)
self.screen_shot = screen_shot
class Result(HtmlTestResult):
"""Class which is used for generating of test result.
Args:
stream (:class:`unittest.runner._WritelnDecorator`):
descriptions (bool):
verbosity (int): Arg specify how detailed information we want to write in the console
Attributes:
infoclass (TestInfo): Class for storing information about test execution
"""
def __init__(self, stream, descriptions, verbosity):
HtmlTestResult.__init__(self, stream, descriptions, verbosity)
self.infoclass = TestInfo
@failfast
def addFailure(self, test, err):
"""Method which create information about failed tests on fail.
Args:
test_method (type): Executed test method
err (tuple): Holds detailed information about occurred error
"""
self._save_output_data()
test_info = self._create_test_info(test, err)
self._prepare_callback(test_info, self.failures, "FAIL", "F")
@failfast
def addError(self, test, err):
"""Method which create information about failed tests on error.
Args:
test_method (type): Executed test method
err (tuple): Holds detailed information about occurred error
"""
self._save_output_data()
test_info = self._create_test_info(test, err)
self._prepare_callback(test_info, self.errors, 'ERROR', 'E')
def _create_test_info(self, test, err):
"""Base method for creating of instance of _TestInfo class based on given parameters.
Args:
test_method (type): Executed test method
err (tuple): Holds detailed information about occurred error
Returns:
TestInfo: Instance of a _TestInfo class which holds information about execution of a test
"""
screen_shot = test.screen_shot_path if hasattr(test, 'screen_shot_path') else None
return self.infoclass(self, test, outcome=self.infoclass.FAILURE, err=err, screen_shot=screen_shot)
class Runner(HTMLTestRunner):
"""Class for running test scenarios.
Args:
driver_name (str): Name of a driver
output (str): Path to folder for storing test report
report_title (str): Title of a generated test report
report_name (str): Name of a html file with test report
template (str): Path to file with test report template
resultclass (Result): Test result class
Attributes:
driver_name (str): Name of a driver
output (str): Path to folder for storing test report
report_title (str): Title of a generated test report
report_name (str): Name of a html file with test report
template (str): Path to file with test report template
resultclass (Result): Test result class
"""
def __init__(self, driver_name="", output=DEFAULT_OUTPUT, report_title="Test results", report_name="TestReport",
template=DEFAULT_TEMPLATE, resultclass=Result):
print("\n Running driver %s..." % driver_name)
report_name = report_name + "_" + driver_name + "_"
HTMLTestRunner.__init__(self, output=output, report_title=report_title, report_name=report_name,
template=template, resultclass=resultclass, combine_reports=True)
```
#### File: tests/file_manager/test_file_manager.py
```python
import os
import shutil
import unittest
from ddt import ddt, data, unpack
from selenium_generator.base.file_manager import FileManager
from pathlib import Path
@ddt
class FileManagerTest(unittest.TestCase):
path = os.path.join(os.path.dirname(__file__), "test_data")
path_mkdir = os.path.join(path, "test_mkdir")
path_rmtree = os.path.join(path, "test_rmtree")
@classmethod
def setUpClass(cls):
os.makedirs(os.path.dirname(os.path.join(cls.path_rmtree, "inner_folder", "test_file.txt")))
def test_remove_tree(self):
FileManager.remove_tree(self.path_rmtree)
self.assertFalse(Path(self.path_rmtree).exists())
def test_mkdir(self):
FileManager.mkdir(self.path_mkdir)
self.assertTrue(Path(self.path_mkdir).exists())
@data(("test_json.json", True), ("blabla.json", False))
@unpack
def test_file_exists(self, filename, result):
self.assertTrue(FileManager.file_exists(os.path.join(self.path, filename)) == result)
def test_get_list_of_files(self):
self.assertEqual(FileManager.get_list_of_files(self.path).__len__(), 3)
def test_load_yaml(self):
file = FileManager.load_yaml(os.path.join(self.path, "test_yaml.yaml"))
self.assertEqual(type(file), dict)
self.assertEqual(file["test_key"], "test_value")
def test_load_json(self):
file = FileManager.load_json(os.path.join(self.path, "test_json.json"))
self.assertEqual(type(file), dict)
self.assertEqual(file["test_key"], "test_value")
@data(("test_json.json", True), ("test_yaml.yaml", False))
@unpack
def test_check_extension(self, filename, result):
self.assertEqual(FileManager.check_extension(os.path.join(self.path, filename), ".json"), result)
@classmethod
def tearDownClass(cls):
if Path(cls.path_mkdir).exists():
Path(cls.path_mkdir).rmdir()
if Path(cls.path_rmtree).exists():
shutil.rmtree(cls.path_rmtree)
```
#### File: selenium_generator/tests/test_singleton.py
```python
import unittest
from selenium_generator.base.singleton import singleton
@singleton
class TestClass:
pass
class SingletonTest(unittest.TestCase):
def test_singleton(self):
a = TestClass()
b = TestClass()
self.assertTrue(a is b)
``` |
{
"source": "jjaroszsfdc/SalesforcePy",
"score": 2
} |
#### File: SalesforcePy/tests/test_approvals.py
```python
import testutil
import responses
@responses.activate
def test_query_approvals():
testutil.add_response("login_response_200")
testutil.add_response("api_version_response_200")
testutil.add_response("approvals_response_200")
client = testutil.get_client()
ar = client.approvals()
sfdc_response = ar[0]
assert ar[1].status == 200
assert ar[1].http_method is "GET"
assert "approvals" in sfdc_response
@responses.activate
def test_query_approvals_with_proxy():
testutil.add_response("login_response_200")
testutil.add_response("api_version_response_200")
testutil.add_response("approvals_response_200")
client = testutil.get_client_with_proxy()
ar = client.approvals()
sfdc_response = ar[0]
assert ar[1].status == 200
assert ar[1].http_method is "GET"
assert ar[1].proxies.get("https") is testutil.proxies.get("https")
assert "approvals" in sfdc_response
@responses.activate
def test_approval_request():
testutil.add_response("login_response_200")
testutil.add_response("api_version_response_200")
testutil.add_response("approval_request_response_200")
client = testutil.get_client()
body = {
"requests": [{
"actionType": "Submit",
"contextId": "00161000011ueBV",
"nextApproverIds": ["00561000000j3h2"],
"comments": "this is a test",
"contextActorId": "005610000027SlY",
"processDefinitionNameOrId": "test_account",
"skipEntryCriteria": "true"}]
}
ar = client.approvals(body)
req_response = ar[0]
request_status = req_response[0].get("instanceStatus", None)
assert ar[1].status == 200
assert ar[1].http_method is "POST"
assert request_status == "Pending"
@responses.activate
def test_approval_request_with_proxy():
testutil.add_response("login_response_200")
testutil.add_response("api_version_response_200")
testutil.add_response("approval_request_response_200")
client = testutil.get_client_with_proxy()
body = {
"requests": [{
"actionType": "Submit",
"contextId": "00161000011ueBV",
"nextApproverIds": ["00561000000j3h2"],
"comments": "this is a test",
"contextActorId": "005610000027SlY",
"processDefinitionNameOrId": "test_account",
"skipEntryCriteria": "true"}]
}
ar = client.approvals(body)
req_response = ar[0]
request_status = req_response[0].get("instanceStatus", None)
assert ar[1].status == 200
assert ar[1].http_method is "POST"
assert ar[1].proxies.get("https") is testutil.proxies.get("https")
assert request_status == "Pending"
```
#### File: SalesforcePy/tests/test_jobs.py
```python
import os
import testutil
import responses
tests_dir = os.path.dirname(os.path.realpath(__file__))
ACCOUNTS_INSERT_BULK_CSV = os.path.join(tests_dir, "fixtures/accounts_insert_bulk.csv")
ACCOUNTS_INSERT_JOB = {"object": "Account", "operation": "insert", "lineEnding": "CRLF"}
UPLOAD_COMPLETED = "UploadComplete"
@responses.activate
def test_create_job():
testutil.add_response("login_response_200")
testutil.add_response("api_version_response_200")
testutil.add_response("jobs_create_200")
client = testutil.get_client()
job = ACCOUNTS_INSERT_JOB
create_result = client.jobs.ingest.create(job_resource=job)
assert create_result[0] == testutil.mock_responses.get("jobs_create_200").get("body")
assert create_result[1].status == 200
@responses.activate
def test_batches():
testutil.add_response("login_response_200")
testutil.add_response("api_version_response_200")
testutil.add_response("jobs_create_200")
testutil.add_response("jobs_batches_201")
client = testutil.get_client()
job = ACCOUNTS_INSERT_JOB
create_result = client.jobs.ingest.create(job_resource=job)
with open(ACCOUNTS_INSERT_BULK_CSV) as f:
csv_file = f.read()
job_id = create_result[0].get("id")
batches_result = client.jobs.ingest.batches(job_id=job_id, csv_file=csv_file)
assert batches_result[0] == testutil.mock_responses.get("jobs_batches_201").get("body")
assert batches_result[1].status == 201
@responses.activate
def test_update_close_job():
testutil.add_response("login_response_200")
testutil.add_response("api_version_response_200")
testutil.add_response("jobs_create_200")
testutil.add_response("jobs_update_close_200")
client = testutil.get_client()
job = ACCOUNTS_INSERT_JOB
create_result = client.jobs.ingest.create(job_resource=job)
job_id = create_result[0].get("id")
update_result = client.jobs.ingest.update(job_id=job_id, state=UPLOAD_COMPLETED)
assert update_result[0] == testutil.mock_responses.get("jobs_update_close_200").get("body")
assert update_result[1].status == 200
@responses.activate
def test_get_job_info():
testutil.add_response("login_response_200")
testutil.add_response("api_version_response_200")
testutil.add_response("jobs_create_200")
testutil.add_response("jobs_get_job_info_200")
client = testutil.get_client()
job = ACCOUNTS_INSERT_JOB
create_result = client.jobs.ingest.create(job_resource=job)
job_id = create_result[0].get("id")
get_result = client.jobs.ingest.get(job_id=job_id)
assert get_result[0] == testutil.mock_responses.get("jobs_get_job_info_200").get("body")
assert get_result[1].status == 200
@responses.activate
def test_get_all_jobs():
testutil.add_response("login_response_200")
testutil.add_response("api_version_response_200")
testutil.add_response("jobs_create_200")
testutil.add_response("jobs_get_all_200")
client = testutil.get_client()
job = ACCOUNTS_INSERT_JOB
client.jobs.ingest.create(job_resource=job)
get_result = client.jobs.ingest.get()
assert get_result[0] == testutil.mock_responses.get("jobs_get_all_200").get("body")
assert get_result[1].status == 200
@responses.activate
def test_get_successful_jobs():
testutil.add_response("login_response_200")
testutil.add_response("api_version_response_200")
testutil.add_response("jobs_create_200")
testutil.add_response("jobs_batches_201")
testutil.add_response("jobs_update_close_200")
testutil.add_response("jobs_get_successes_200")
client = testutil.get_client()
job = ACCOUNTS_INSERT_JOB
create_result = client.jobs.ingest.create(job_resource=job)
with open(ACCOUNTS_INSERT_BULK_CSV) as f:
csv_file = f.read()
job_id = create_result[0].get("id")
client.jobs.ingest.batches(job_id=job_id, csv_file=csv_file)
get_result = client.jobs.ingest.get(job_id=job_id, successes=True)
assert get_result[0] == testutil.mock_responses.get("jobs_get_successes_200").get("body")
assert get_result[1].status == 200
@responses.activate
def test_get_failed_jobs():
testutil.add_response("login_response_200")
testutil.add_response("api_version_response_200")
testutil.add_response("jobs_create_200")
testutil.add_response("jobs_batches_201")
testutil.add_response("jobs_update_close_200")
testutil.add_response("jobs_get_failures_200")
client = testutil.get_client()
job = ACCOUNTS_INSERT_JOB
create_result = client.jobs.ingest.create(job_resource=job)
with open(ACCOUNTS_INSERT_BULK_CSV) as f:
csv_file = f.read()
job_id = create_result[0].get("id")
client.jobs.ingest.batches(job_id=job_id, csv_file=csv_file)
get_result = client.jobs.ingest.get(job_id=job_id, failures=True)
assert get_result[0] == testutil.mock_responses.get("jobs_get_failures_200").get("body")
assert get_result[1].status == 200
@responses.activate
def test_get_unprocessed_jobs():
testutil.add_response("login_response_200")
testutil.add_response("api_version_response_200")
testutil.add_response("jobs_create_200")
testutil.add_response("jobs_batches_201")
testutil.add_response("jobs_update_close_200")
testutil.add_response("jobs_get_unprocessed_200")
client = testutil.get_client()
job = ACCOUNTS_INSERT_JOB
create_result = client.jobs.ingest.create(job_resource=job)
with open(ACCOUNTS_INSERT_BULK_CSV) as f:
csv_file = f.read()
job_id = create_result[0].get("id")
client.jobs.ingest.batches(job_id=job_id, csv_file=csv_file)
get_result = client.jobs.ingest.get(job_id=job_id, unprocessed=True)
assert get_result[0] == testutil.mock_responses.get("jobs_get_unprocessed_200").get("body")
assert get_result[1].status == 200
@responses.activate
def test_delete_job():
testutil.add_response("login_response_200")
testutil.add_response("api_version_response_200")
testutil.add_response("jobs_create_200")
testutil.add_response("jobs_delete_204")
client = testutil.get_client()
job = ACCOUNTS_INSERT_JOB
create_result = client.jobs.ingest.create(job_resource=job)
job_id = create_result[0].get("id")
delete_result = client.jobs.ingest.delete(job_id=job_id)
assert delete_result[0] == testutil.mock_responses.get("jobs_delete_204").get("body")
assert delete_result[1].status == 204
```
#### File: SalesforcePy/tests/test_search.py
```python
import testutil
import responses
@responses.activate
def test_search():
testutil.add_response("login_response_200")
testutil.add_response("search_response_200")
testutil.add_response("api_version_response_200")
client = testutil.get_client()
search_result = client.search(
"FIND {sfdc_py} RETURNING Account(Id, Name) LIMIT 5")
assert search_result[0] == testutil.mock_responses["search_response_200"]["body"]
assert search_result[1].status == 200
@responses.activate
def test_search_with_proxy():
testutil.add_response("login_response_200")
testutil.add_response("search_response_200")
testutil.add_response("api_version_response_200")
client = testutil.get_client_with_proxy()
search_result = client.search(
"FIND {sfdc_py} RETURNING Account(Id, Name) LIMIT 5")
assert search_result[0] == testutil.mock_responses["search_response_200"]["body"]
assert search_result[1].status == 200
assert search_result[1].proxies.get("https") is testutil.proxies.get("https")
``` |
{
"source": "JJashim/Face_Localize_Feature_Extract",
"score": 2
} |
#### File: JJashim/Face_Localize_Feature_Extract/face_localize_feature_extract.py
```python
from glob import glob
import sys #can be used to perform sys.exit()
import cv2
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # being tried to avoid unnecessary/warning prints of Tensorfliow
import tensorflow as tf
tf.get_logger().setLevel('INFO') # being tried to avoid unnecessary/warning prints of Tensorfliow
tf.logging.set_verbosity(tf.logging.ERROR) # being tried to avoid unnecessary/warning prints of Tensorfliow
import pandas as pd
from facenet.src import facenet
from facenet.src.align import detect_face
def create_Dir(folder_path,folder_name=''):
if folder_name is not None:
folder_path = os.path.join(folder_path,folder_name)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
return folder_path
# Method to perform cropping the images using bounding box info.
def crop_image_by_bbox(image,bbox,img_size):
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(bbox[0] - margin / 2, 0)
bb[1] = np.maximum(bbox[1] - margin / 2, 0)
bb[2] = np.minimum(bbox[2] + margin / 2, img_size[1])
bb[3] = np.minimum(bbox[3] + margin / 2, img_size[0])
cropped = image[bb[1]:bb[3], bb[0]:bb[2], :]
return cropped,bb
def load_image_align_data(dest_path,image_paths,image_size, margin, pnet, rnet, onet, discarded_folder_path = '', bbox_thresh = 0.95):
minsize = 20 # minimum size of face
threshold = [0.6, 0.7, 0.7] # three steps's threshold
factor = 0.709 # scale factor
image_list,image_names = [], []
discared_image_cnt = 0
img_files = glob(image_paths+'*.png') #Incase glob doesn't work in Windows environment replace it with 'os' library module.
img_files.extend(glob(image_paths+'*.jpg')) #add more image extensions else restrict user to share images in specific format
img_files = sorted(img_files) #chk length -- report error if its empty and exit
print('Total images read are:', len(img_files))
for files in img_files:
img = cv2.imread(files,1)
if img is not None:
image_list.append(img)
image_files = img_files.copy()
recog_image_paths,img_list = [],[]
fnames_list = []
cropped_big_face = dest_path+'_cropped_face/' #Dir to store cropped faces
cropped_big_face = create_Dir(cropped_big_face)
print('path of cropped_big_face:',cropped_big_face)
for img_path,x in zip(image_files,range(len(image_list))):
fname = os.path.basename(img_path)
dest_fname = os.path.join(dest_path,fname)
img_size = np.asarray(image_list[x].shape)[0:2]
img = cv2.imread(img_path,cv2.IMREAD_COLOR) #chk img.shape and log filename if its empty
bounding_boxes, _ = detect_face.detect_face(
image_list[x], minsize, pnet, rnet, onet, threshold, factor)
nrof_samples = len(bounding_boxes)
r_cnt = 0
img_temp = image_list[x].copy()
while(nrof_samples == 0 and r_cnt < 3):
image_list[x] = cv2.rotate(image_list[x], cv2.ROTATE_90_CLOCKWISE)
bounding_boxes, _ = detect_face.detect_face(
image_list[x], minsize, pnet, rnet, onet, threshold, factor)
nrof_samples = len(bounding_boxes)
r_cnt += 1
if nrof_samples > 0:
if r_cnt == 0:
#cv2.imwrite(os.path.join(dest_path,fname),img)
pass
#perform image rotation of degrees: [90,180,270] iff faces aren't recognized
elif r_cnt == 1:
rot_angle = cv2.ROTATE_90_CLOCKWISE
elif r_cnt == 2:
rot_angle = cv2.ROTATE_180
elif r_cnt == 3:
rot_angle = cv2.ROTATE_90_COUNTERCLOCKWISE
if r_cnt > 0:
image_list[x] = cv2.rotate(img_temp, rot_angle)
else:
image_list[x] = img_temp
big_area = -1;big_face_no = -1 #param used for finding the bigger face within the image
img_size = np.asarray(image_list[x].shape)[0:2]
for i in range(nrof_samples):
if bounding_boxes[i][4] > bbox_thresh:
img_name = fname#img_path
det = np.squeeze(bounding_boxes[i, 0:4])
cropped,bb = crop_image_by_bbox(image_list[x],det,img_size)
x1,y1,x2,y2 = bb
area_ratio = (x2-x1)*(y2-y1)/(np.prod(img_size))
if area_ratio > big_area:
big_area = area_ratio
big_face_no = i
#cv2.rectangle(image_list[x], (x1, y1), (x2, y2), (0,0,255), 3) #comment -- to remove drawing bounding box on all faces detected
#cv2.imwrite(dest_fname,image_list[x]) #comment -- to remove drawing bounding box on all faces detected
if big_face_no < 0:
continue
else: #indirectly checks bounding_boxes[i][4] > 0.95
det = np.squeeze(bounding_boxes[big_face_no, 0:4])
print('conf. score of ',img_name,' is:',str(round(bounding_boxes[big_face_no][4],3))) #print in log: confidence score of big face detected and localized.
cropped,bb = crop_image_by_bbox(image_list[x],det,img_size)
cv2.imwrite(os.path.join(cropped_big_face,img_name),cropped)
x1,y1,x2,y2 = bb
cv2.rectangle(image_list[x], (x1, y1), (x2, y2), (0,0,255), 3)#draw bounding box only on big face
cv2.imwrite(dest_fname,image_list[x])
aligned = cv2.resize(
cropped, (image_size, image_size), cv2.INTER_LINEAR)
prewhitened = facenet.prewhiten(aligned)
img_list.append(prewhitened)
fnames_list.append(img_name)
else:
discared_image_cnt += 1
discard_fname = os.path.join(discarded_folder_path,fname)
cv2.imwrite(discard_fname,img_temp)
print('Total number of Discarded images are:',discared_image_cnt)
if len(img_list) > 0:
images = np.stack(img_list)
print('Total number of Localized images:',len(images)) #No. of images been able to be localized
return images, fnames_list
else:
#Perform exit & mention no faces recognized..check input folder
print("load_image_align_data returned None !")
return None
# Create model to perform localization -- MTCNN
def create_network_face_detection(gpu_memory_fraction):
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(
per_process_gpu_memory_fraction=gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(
gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = detect_face.create_mtcnn(sess, None)
return pnet, rnet, onet
if __name__ == '__main__':
FACE_FEATURE_REQUIRED = True #should be set by the user -- True/False. True/1 means Face Localization + Feature Extraction and False/0 means only Face Localization is performed
batch_size = 32 #config -- user param
margin = 44 #add to config -- developer
image_size = 160 #add to config -- developer --image size used to resize faces which will be passed to Facenet for face feature extraction
BBox_Thresh = 0.95 #add to config -- developer
image_paths = '/Users/jowherali.syed/Projects/DL_Comp/face_comp_codes/input/Images/' #Input path
dest_path = '/Users/jowherali.syed/Projects/DL_Comp/face_comp_codes/output/' #Output Folder
dest_path = create_Dir(dest_path) #create output DIR
img_dest_path = create_Dir(dest_path,'Localized_Images') #create DIR to store localized images within output/Localized_Images
discard_folder_path = create_Dir(dest_path,'Discarded_Images') #create DIR to store discarded images
if FACE_FEATURE_REQUIRED:
model_path = '/Users/jowherali.syed/Projects/DL_Comp/face_comp_codes/models/20180402-114759/' #add to config --model_path: "Required for face extraction alone"
csv_name = 'face_fingerprint.csv' #Output CSV file name
csv_dest_path = create_Dir(dest_path,'csv_output') #Create csv folder within output folder
csv_dest_file_path = os.path.join(csv_dest_path,csv_name)
# To perform face localize
pnet, rnet, onet = create_network_face_detection(gpu_memory_fraction=1.0)
train_images,image_paths = load_image_align_data(img_dest_path,image_paths,image_size, margin, pnet, rnet, onet,discarded_folder_path = discard_folder_path, bbox_thresh = BBox_Thresh)
# To perform Facial feature extraction
if FACE_FEATURE_REQUIRED:
temp_tr_images,temp_image_paths = [],[] #temp vars required for batch process
list_image_paths,list_train_embs = [],[] #to collate into a single list post batch process
with tf.Graph().as_default():
with tf.Session() as sess:
facenet.load_model(model_path)
images_placeholder = sess.graph.get_tensor_by_name("input:0")
embeddings = sess.graph.get_tensor_by_name("embeddings:0")
phase_train_placeholder = sess.graph.get_tensor_by_name("phase_train:0")
bt_sz = batch_size
for i in range(0,len(train_images),bt_sz):
temp_tr_images = train_images[i:i+bt_sz]
temp_image_paths = image_paths[i:i+bt_sz]
feed_dict = {images_placeholder: temp_tr_images,
phase_train_placeholder: False} #currently passing entire images as input to the model..pass it in batches and keep the batch_size at config param -- default it to 32
print('len(temp_tr_images):',len(temp_tr_images))
train_embs = sess.run(embeddings, feed_dict=feed_dict)
list_train_embs.extend(train_embs)
list_image_paths.extend(temp_image_paths)
embs_dict = dict(zip(list_image_paths,list_train_embs))
df_train = pd.DataFrame.from_dict(embs_dict,orient='index')
print('Face Embedded: No. of images:', len(image_paths),'within',len(train_images),'Localized Images')
df_train.to_csv(csv_dest_file_path) #output CSV files -- {img_names,features}
#At the end of execution -- print or inform users about the Output folders
```
#### File: facenet/src/facenet.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
import numpy as np
from tensorflow.python.training import training
import random
import re
from tensorflow.python.platform import gfile
import math
from six import iteritems
def prewhiten(x):
mean = np.mean(x)
std = np.std(x)
std_adj = np.maximum(std, 1.0/np.sqrt(x.size))
y = np.multiply(np.subtract(x, mean), 1/std_adj)
return y
def load_model(model, input_map=None):
# Check if the model is a model directory (containing a metagraph and a checkpoint file)
# or if it is a protobuf file with a frozen graph
model_exp = os.path.expanduser(model)
if (os.path.isfile(model_exp)):
print('Model filename: %s' % model_exp)
with gfile.FastGFile(model_exp,'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, input_map=input_map, name='')
else:
print('Model directory: %s' % model_exp)
meta_file, ckpt_file = get_model_filenames(model_exp)
print('Metagraph file: %s' % meta_file)
print('Checkpoint file: %s' % ckpt_file)
saver = tf.train.import_meta_graph(os.path.join(model_exp, meta_file), input_map=input_map)
saver.restore(tf.get_default_session(), os.path.join(model_exp, ckpt_file))
def get_model_filenames(model_dir):
files = os.listdir(model_dir)
meta_files = [s for s in files if s.endswith('.meta')]
if len(meta_files)==0:
raise ValueError('No meta file found in the model directory (%s)' % model_dir)
elif len(meta_files)>1:
raise ValueError('There should not be more than one meta file in the model directory (%s)' % model_dir)
meta_file = meta_files[0]
ckpt = tf.train.get_checkpoint_state(model_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_file = os.path.basename(ckpt.model_checkpoint_path)
return meta_file, ckpt_file
meta_files = [s for s in files if '.ckpt' in s]
max_step = -1
for f in files:
step_str = re.match(r'(^model-[\w\- ]+.ckpt-(\d+))', f)
if step_str is not None and len(step_str.groups())>=2:
step = int(step_str.groups()[1])
if step > max_step:
max_step = step
ckpt_file = step_str.groups()[0]
return meta_file, ckpt_file
def list_variables(filename):
reader = training.NewCheckpointReader(filename)
variable_map = reader.get_variable_to_shape_map()
names = sorted(variable_map.keys())
return names
def put_images_on_grid(images, shape=(16,8)):
nrof_images = images.shape[0]
img_size = images.shape[1]
bw = 3
img = np.zeros((shape[1]*(img_size+bw)+bw, shape[0]*(img_size+bw)+bw, 3), np.float32)
for i in range(shape[1]):
x_start = i*(img_size+bw)+bw
for j in range(shape[0]):
img_index = i*shape[0]+j
if img_index>=nrof_images:
break
y_start = j*(img_size+bw)+bw
img[x_start:x_start+img_size, y_start:y_start+img_size, :] = images[img_index, :, :, :]
if img_index>=nrof_images:
break
return img
def write_arguments_to_file(args, filename):
with open(filename, 'w') as f:
for key, value in iteritems(vars(args)):
f.write('%s: %s\n' % (key, str(value)))
``` |
{
"source": "jjasonkal/pinry",
"score": 2
} |
#### File: pinry/pinry/middleware.py
```python
from django.middleware.csrf import get_token
class ForceCSRFCookieMiddleware:
def process_request(self, request):
if "CSRF_TOKEN" not in request.META:
get_token(request)
else:
if request.method != "GET":
get_token(request)
return
```
#### File: pinry/users/serializers.py
```python
from django.conf import settings
from django.contrib.auth import login
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from users.models import User
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = (
'username',
'email',
'gravatar',
'password',
'password_repeat',
settings.DRF_URL_FIELD_NAME,
)
extra_kwargs = {
settings.DRF_URL_FIELD_NAME: {
"view_name": "users:user-detail",
},
}
password = serializers.CharField(
write_only=True,
required=True,
allow_blank=False,
min_length=6,
max_length=32,
)
password_repeat = serializers.CharField(
write_only=True,
required=True,
allow_blank=False,
min_length=6,
max_length=32,
)
def create(self, validated_data):
if validated_data['password'] != validated_data['password_repeat']:
raise ValidationError(
detail={
"password_repeat": "Tow password doesn't match",
}
)
validated_data.pop('password_repeat')
password = validated_data.pop('password')
user = super(UserSerializer, self).create(
validated_data,
)
user.set_password(password)
user.save()
login(
self.context['request'],
user=user,
backend=settings.AUTHENTICATION_BACKENDS[0],
)
return user
``` |
{
"source": "jjasonkal/polyaxon",
"score": 2
} |
#### File: polyaxon/client/client.py
```python
import polyaxon_sdk
from polyaxon import settings
from polyaxon.client.transport import Transport
class PolyaxonClient:
def __init__(self, config=None, token=None):
self._config = config or settings.CLIENT_CONFIG
self._config.token = token or settings.AUTH_CONFIG.token
self._transport = None
self.api_client = polyaxon_sdk.ApiClient(
self.config.sdk_config, **self.config.client_header
)
self._projects_v1 = None
self._runs_v1 = None
self._auth_v1 = None
self._users_v1 = None
self._versions_v1 = None
self._agents_v1 = None
self._components_v1 = None
self._models_v1 = None
def reset(self):
self._transport = None
self._projects_v1 = None
self._runs_v1 = None
self._auth_v1 = None
self._users_v1 = None
self._versions_v1 = None
self._agents_v1 = None
self._components_v1 = None
self._models_v1 = None
self.api_client = polyaxon_sdk.ApiClient(
self.config.sdk_config, **self.config.client_header
)
def set_health_check(self, url):
self.transport.set_health_check(url)
def unset_health_check(self, url):
self.transport.unset_health_check(url)
@property
def transport(self):
if not self._transport:
self._transport = Transport(config=self.config)
return self._transport
@property
def config(self):
return self._config
@property
def projects_v1(self):
if not self._projects_v1:
self._projects_v1 = polyaxon_sdk.ProjectsV1Api(self.api_client)
return self._projects_v1
@property
def runs_v1(self):
if not self._runs_v1:
self._runs_v1 = polyaxon_sdk.RunsV1Api(self.api_client)
return self._runs_v1
@property
def auth_v1(self):
if not self._auth_v1:
self._auth_v1 = polyaxon_sdk.AuthV1Api(self.api_client)
return self._auth_v1
@property
def users_v1(self):
if not self._users_v1:
self._users_v1 = polyaxon_sdk.UsersV1Api(self.api_client)
return self._users_v1
@property
def versions_v1(self):
if not self._versions_v1:
self._versions_v1 = polyaxon_sdk.VersionsV1Api(self.api_client)
return self._versions_v1
@property
def agents_v1(self):
if not self._agents_v1:
self._agents_v1 = polyaxon_sdk.AgentsV1Api(self.api_client)
return self._agents_v1
@property
def components_v1(self):
if not self._components_v1:
self._components_v1 = polyaxon_sdk.HubComponentsV1Api(self.api_client)
return self._components_v1
@property
def models_v1(self):
if not self._models_v1:
self._models_v1 = polyaxon_sdk.HubModelsV1Api(self.api_client)
return self._models_v1
def sanitize_for_serialization(self, value):
return self.api_client.sanitize_for_serialization(value)
```
#### File: polyaxonfile/specs/base.py
```python
import copy
from marshmallow import ValidationError
from polyaxon.config_reader.spec import ConfigSpec
from polyaxon.exceptions import PolyaxonfileError
from polyaxon.pkg import SCHEMA_VERSION
from polyaxon.polyaxonfile.specs import kinds
from polyaxon.polyaxonfile.specs.sections import Sections
from polyaxon.utils.list_utils import to_list
class BaseSpecification(Sections):
"""Base abstract specification for plyaxonfiles and configurations."""
_SPEC_KIND = None
MAX_VERSION = (
SCHEMA_VERSION # Max Polyaxonfile specification version this CLI supports
)
MIN_VERSION = (
SCHEMA_VERSION # Min Polyaxonfile specification version this CLI supports
)
CONFIG = None
@classmethod
def check_version(cls, data):
if cls.VERSION not in data:
raise PolyaxonfileError("The Polyaxonfile `version` must be specified.")
if not cls.MIN_VERSION <= data[cls.VERSION] <= cls.MAX_VERSION:
raise PolyaxonfileError(
"The Polyaxonfile's version specified is not supported by your current CLI."
"Your CLI support Polyaxonfile versions between: {} <= v <= {}."
"You can run `polyaxon upgrade` and "
"check documentation for the specification.".format(
cls.MIN_VERSION, cls.MAX_VERSION
)
)
@classmethod
def check_kind(cls, data):
if cls.KIND not in data:
raise PolyaxonfileError("The Polyaxonfile `kind` must be specified.")
if data[cls.KIND] not in kinds.KINDS:
raise PolyaxonfileError(
"The Polyaxonfile with kind `{}` is not a supported value.".format(
data[cls.KIND]
)
)
@classmethod
def check_data(cls, data):
cls.check_version(data)
cls.check_kind(data)
if data[cls.KIND] != cls._SPEC_KIND:
raise PolyaxonfileError(
"The specification used `{}` is incompatible with the kind `{}`.".format(
cls.__name__, data[cls.KIND]
)
)
for key in set(data.keys()) - set(cls.SECTIONS):
raise PolyaxonfileError(
"Unexpected section `{}` in Polyaxonfile version `{}`. "
"Please check the Polyaxonfile specification "
"for this version.".format(key, data[cls.VERSION])
)
for key in cls.REQUIRED_SECTIONS:
if key not in data:
raise PolyaxonfileError(
"{} is a required section for a valid Polyaxonfile".format(key)
)
@classmethod
def get_kind(cls, data):
cls.check_kind(data=data)
return data[cls.KIND]
@classmethod
def read(cls, values):
if isinstance(values, cls.CONFIG):
return values
values = to_list(values)
data = ConfigSpec.read_from([{"kind": cls._SPEC_KIND}] + values)
try:
config = cls.CONFIG.from_dict(copy.deepcopy(data))
except TypeError as e:
raise ValidationError(
"Received a non valid config `{}`: `{}`".format(cls._SPEC_KIND, e)
)
cls.check_data(data)
return config
```
#### File: compiler/resolvers/base.py
```python
from typing import Dict, Optional
from polyaxon.exceptions import PolyaxonCompilerError
from polyaxon.polyaxonfile import CompiledOperationSpecification
from polyaxon.polyflow import V1CompiledOperation
from polyaxon.polypod.compiler.config import PolypodConfig
from polyaxon.polypod.contexts import resolve_contexts, resolve_globals_contexts
class BaseResolver:
KINDS = set()
def __init__(
self,
run,
compiled_operation: V1CompiledOperation,
owner_name: str,
project_name: str,
project_uuid: str,
run_name: str,
run_uuid: str,
run_path: str,
params: Optional[Dict],
):
if not compiled_operation:
raise PolyaxonCompilerError("A run spec is required for resolution.")
self.run = run
self.compiled_operation = compiled_operation
self.owner_name = owner_name
self.project_name = project_name
self.project_uuid = project_uuid
self.run_name = run_name
self.run_uuid = run_uuid
self.run_path = run_path
self.params = params or {}
self.connection_by_names = {}
self.namespace = None
self.artifacts_store = None
self.secrets = None
self.config_maps = None
self.polyaxon_sidecar = None
self.polyaxon_init = None
self.iteration = None
self.agent_config = None
self.contexts = {}
self.globals = {}
def resolve_edges(self):
pass
def resolve_globals(self):
self.globals = resolve_globals_contexts(
namespace=self.namespace,
owner_name=self.owner_name,
project_name=self.project_name,
project_uuid=self.project_uuid,
run_name=self.run_name,
run_path=self.run_path,
run_uuid=self.run_uuid,
iteration=self.iteration,
)
def resolve_params(self):
raise NotImplementedError
def resolve_connections_params(self):
self.compiled_operation = CompiledOperationSpecification.apply_run_connections_params(
config=self.compiled_operation,
artifact_store=self.artifacts_store.name if self.artifacts_store else None,
contexts=self.globals,
)
def resolve_profile(self):
pass
def resolve_agent(self):
pass
def patch(self):
pass
def apply_content(self):
try:
self.compiled_operation = CompiledOperationSpecification.apply_context(
self.compiled_operation
)
except Exception as e:
raise PolyaxonCompilerError(
"Could not apply run context, error: {}".format(repr(e))
)
def resolve_io(self):
pass
def resolve_access(self):
pass
def resolve_connections(self):
polypod_config = PolypodConfig()
polypod_config.resolve(
compiled_operation=self.compiled_operation, agent_config=self.agent_config
)
self.polyaxon_sidecar = polypod_config.polyaxon_sidecar
self.polyaxon_init = polypod_config.polyaxon_init
self.namespace = polypod_config.namespace
self.secrets = polypod_config.secrets
self.config_maps = polypod_config.config_maps
self.connection_by_names = polypod_config.connection_by_names
self.artifacts_store = polypod_config.artifacts_store
def resolve_contexts(self):
self.contexts = resolve_contexts(
namespace=self.namespace,
owner_name=self.owner_name,
project_name=self.project_name,
project_uuid=self.project_uuid,
run_name=self.run_name,
run_path=self.run_path,
run_uuid=self.run_uuid,
compiled_operation=self.compiled_operation,
connection_by_names=self.connection_by_names,
artifacts_store=self.artifacts_store,
iteration=self.iteration,
)
def apply_contexts(self):
self.compiled_operation = CompiledOperationSpecification.apply_run_contexts(
self.compiled_operation, contexts=self.contexts
)
def resolve_state(self):
pass
def resolve(self) -> V1CompiledOperation:
self.resolve_edges()
self.resolve_globals()
self.resolve_params()
self.resolve_profile()
self.resolve_agent()
self.resolve_connections_params()
self.patch()
self.apply_content()
self.resolve_io()
self.resolve_access()
self.resolve_connections()
self.resolve_contexts()
self.apply_contexts()
self.resolve_state()
return self.compiled_operation
```
#### File: polyaxon/tracking/__init__.py
```python
from typing import List, Union, Sequence
from polyaxon.client import RunClient
from polyaxon.polyboard.artifacts import V1RunArtifact
from polyaxon.tracking.run import Run
from polyaxon_sdk import V1Operation
TRACKING_RUN = None
def init(
owner: str = None,
project: str = None,
run_uuid: str = None,
client: RunClient = None,
track_code: bool = True,
track_env: bool = False,
refresh_data: bool = False,
artifacts_path: str = None,
):
global TRACKING_RUN
TRACKING_RUN = Run(
owner=owner,
project=project,
run_uuid=run_uuid,
client=client,
track_code=track_code,
refresh_data=refresh_data,
track_env=track_env,
artifacts_path=artifacts_path,
)
def create(
name: str = None,
description: str = None,
tags: Union[str, Sequence[str]] = None,
content: Union[str, V1Operation] = None,
):
global TRACKING_RUN
TRACKING_RUN.create(
name=name, description=description, tags=tags, content=content,
)
def get_tensorboard_path():
global TRACKING_RUN
return TRACKING_RUN.get_tensorboard_path()
def get_outputs_path():
global TRACKING_RUN
return TRACKING_RUN.outputs_path
def get_artifacts_path():
global TRACKING_RUN
return TRACKING_RUN.artifacts_path
def log_metric(name, value, step=None, timestamp=None):
global TRACKING_RUN
TRACKING_RUN.log_metric(
name=name, value=value, step=step, timestamp=timestamp,
)
def log_metrics(step=None, timestamp=None, **metrics):
global TRACKING_RUN
TRACKING_RUN.log_metrics(step=step, timestamp=timestamp, **metrics)
def log_image(data, name=None, step=None, timestamp=None, rescale=1, dataformats="CHW"):
global TRACKING_RUN
TRACKING_RUN.log_image(
data=data,
name=name,
step=step,
timestamp=timestamp,
rescale=rescale,
dataformats=dataformats,
)
def log_image_with_boxes(
tensor_image,
tensor_boxes,
name=None,
step=None,
timestamp=None,
rescale=1,
dataformats="CHW",
):
global TRACKING_RUN
TRACKING_RUN.log_image_with_boxes(
tensor_image=tensor_image,
tensor_boxes=tensor_boxes,
name=name,
step=step,
timestamp=timestamp,
rescale=rescale,
dataformats=dataformats,
)
def log_mpl_image(data, name=None, close=True, step=None, timestamp=None):
global TRACKING_RUN
TRACKING_RUN.log_mpl_image(
data=data, name=name, close=close, step=step, timestamp=timestamp,
)
def log_video(data, name=None, fps=4, step=None, timestamp=None, content_type=None):
global TRACKING_RUN
TRACKING_RUN.log_video(
data=data,
name=name,
fps=fps,
step=step,
timestamp=timestamp,
content_type=content_type,
)
def log_audio(
data, name=None, sample_rate=44100, step=None, timestamp=None, content_type=None,
):
global TRACKING_RUN
TRACKING_RUN.log_audio(
data=data,
name=name,
sample_rate=sample_rate,
step=step,
timestamp=timestamp,
content_type=content_type,
)
def log_text(name, text, step=None, timestamp=None):
global TRACKING_RUN
TRACKING_RUN.log_text(
name=name, text=text, step=step, timestamp=timestamp,
)
def log_html(name, html, step=None, timestamp=None):
global TRACKING_RUN
TRACKING_RUN.log_html(
name=name, html=html, step=step, timestamp=timestamp,
)
def log_np_histogram(name, values, counts, step=None, timestamp=None):
global TRACKING_RUN
TRACKING_RUN.log_np_histogram(
name=name, values=values, counts=counts, step=step, timestamp=timestamp,
)
def log_histogram(name, values, bins, max_bins=None, step=None, timestamp=None):
global TRACKING_RUN
TRACKING_RUN.log_histogram(
name=name,
values=values,
bins=bins,
max_bins=max_bins,
step=step,
timestamp=timestamp,
)
def log_model(path, name=None, framework=None, spec=None, step=None, timestamp=None):
global TRACKING_RUN
TRACKING_RUN.log_model(
path=path,
name=name,
framework=framework,
spec=spec,
step=step,
timestamp=timestamp,
)
def log_dataframe(path, name=None, content_type=None, step=None, timestamp=None):
global TRACKING_RUN
TRACKING_RUN.log_dataframe(
path=path, name=name, content_type=content_type, step=step, timestamp=timestamp,
)
def log_artifact(path, name=None, artifact_kind=None, step=None, timestamp=None):
global TRACKING_RUN
TRACKING_RUN.log_artifact(
path=path,
name=name,
artifact_kind=artifact_kind,
step=step,
timestamp=timestamp,
)
def log_roc_auc_curve(name, fpr, tpr, auc=None, step=None, timestamp=None):
global TRACKING_RUN
TRACKING_RUN.log_roc_auc_curve(
name=name, fpr=fpr, tpr=tpr, auc=auc, step=step, timestamp=timestamp,
)
def log_sklearn_roc_auc_curve(name, y_preds, y_targets, step=None, timestamp=None):
global TRACKING_RUN
TRACKING_RUN.log_sklearn_roc_auc_curve(
name=name, y_preds=y_preds, y_targets=y_targets, step=step, timestamp=timestamp,
)
def log_pr_curve(
name, precision, recall, average_precision=None, step=None, timestamp=None
):
global TRACKING_RUN
TRACKING_RUN.log_pr_curve(
name=name,
precision=precision,
recall=recall,
average_precision=average_precision,
step=step,
timestamp=timestamp,
)
def log_sklearn_pr_curve(name, y_preds, y_targets, step=None, timestamp=None):
global TRACKING_RUN
TRACKING_RUN.log_sklearn_pr_curve(
name=name, y_preds=y_preds, y_targets=y_targets, step=step, timestamp=timestamp,
)
def log_curve(name, x, y, annotation=None, step=None, timestamp=None):
global TRACKING_RUN
TRACKING_RUN.log_sklearn_pr_curve(
name=name, x=x, y=y, annotation=annotation, step=step, timestamp=timestamp,
)
def log_plotly_chart(name, figure, step=None, timestamp=None):
global TRACKING_RUN
TRACKING_RUN.log_plotly_chart(
name=name, figure=figure, step=step, timestamp=timestamp,
)
def log_bokeh_chart(name, figure, step=None, timestamp=None):
global TRACKING_RUN
TRACKING_RUN.log_bokeh_chart(
name=name, figure=figure, step=step, timestamp=timestamp,
)
def log_mpl_plotly_chart(name, figure, step=None, timestamp=None):
global TRACKING_RUN
TRACKING_RUN.log_mpl_plotly_chart(
name=name, figure=figure, step=step, timestamp=timestamp,
)
def set_description(description):
global TRACKING_RUN
TRACKING_RUN.set_description(description=description)
def set_name(name):
global TRACKING_RUN
TRACKING_RUN.set_name(name=name)
def log_status(status, reason=None, message=None):
global TRACKING_RUN
TRACKING_RUN.log_status(
status=status, reason=reason, message=message,
)
def log_inputs(reset=False, **inputs):
global TRACKING_RUN
TRACKING_RUN.log_inputs(reset=reset, **inputs)
def log_outputs(reset=False, **outputs):
global TRACKING_RUN
TRACKING_RUN.log_outputs(reset=reset, **outputs)
def log_tags():
global TRACKING_RUN
TRACKING_RUN.log_tags()
def log_succeeded():
global TRACKING_RUN
TRACKING_RUN.log_succeeded()
def log_stopped():
global TRACKING_RUN
TRACKING_RUN.log_stopped()
def log_failed(message=None, traceback=None):
global TRACKING_RUN
TRACKING_RUN.log_failed(message=message, traceback=traceback)
def log_code_ref():
global TRACKING_RUN
TRACKING_RUN.log_code_ref()
def log_data_ref(name: str, hash: str = None, path: str = None, content=None):
global TRACKING_RUN
TRACKING_RUN.log_data_ref(name=name, content=content, hash=hash, path=path)
def log_file_ref(path: str, hash: str = None, content=None):
global TRACKING_RUN
TRACKING_RUN.log_file_ref(
path=path, hash=hash, content=content,
)
def log_dir_ref(path: str):
global TRACKING_RUN
TRACKING_RUN.log_dir_ref(path=path)
def log_artifact_lineage(body: List[V1RunArtifact]):
global TRACKING_RUN
TRACKING_RUN.log_artifact_lineage(body)
def set_artifacts_path(artifacts_path: str):
global TRACKING_RUN
TRACKING_RUN.set_artifacts_path(artifacts_path)
```
#### File: polyaxon_sdk/models/v1_plugins.py
```python
import pprint
import re # noqa: F401
import six
from polyaxon_sdk.configuration import Configuration
class V1Plugins(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"auth": "bool",
"docker": "bool",
"shm": "bool",
"collect_artifacts": "bool",
"collect_logs": "bool",
"collect_resources": "str",
"sync_statuses": "bool",
"log_level": "str",
"notifications": "list[V1Notification]",
}
attribute_map = {
"auth": "auth",
"docker": "docker",
"shm": "shm",
"collect_artifacts": "collect_artifacts",
"collect_logs": "collect_logs",
"collect_resources": "collect_resources",
"sync_statuses": "sync_statuses",
"log_level": "log_level",
"notifications": "notifications",
}
def __init__(
self,
auth=None,
docker=None,
shm=None,
collect_artifacts=None,
collect_logs=None,
collect_resources=None,
sync_statuses=None,
log_level=None,
notifications=None,
local_vars_configuration=None,
): # noqa: E501
"""V1Plugins - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._auth = None
self._docker = None
self._shm = None
self._collect_artifacts = None
self._collect_logs = None
self._collect_resources = None
self._sync_statuses = None
self._log_level = None
self._notifications = None
self.discriminator = None
if auth is not None:
self.auth = auth
if docker is not None:
self.docker = docker
if shm is not None:
self.shm = shm
if collect_artifacts is not None:
self.collect_artifacts = collect_artifacts
if collect_logs is not None:
self.collect_logs = collect_logs
if collect_resources is not None:
self.collect_resources = collect_resources
if sync_statuses is not None:
self.sync_statuses = sync_statuses
if log_level is not None:
self.log_level = log_level
if notifications is not None:
self.notifications = notifications
@property
def auth(self):
"""Gets the auth of this V1Plugins. # noqa: E501
:return: The auth of this V1Plugins. # noqa: E501
:rtype: bool
"""
return self._auth
@auth.setter
def auth(self, auth):
"""Sets the auth of this V1Plugins.
:param auth: The auth of this V1Plugins. # noqa: E501
:type: bool
"""
self._auth = auth
@property
def docker(self):
"""Gets the docker of this V1Plugins. # noqa: E501
:return: The docker of this V1Plugins. # noqa: E501
:rtype: bool
"""
return self._docker
@docker.setter
def docker(self, docker):
"""Sets the docker of this V1Plugins.
:param docker: The docker of this V1Plugins. # noqa: E501
:type: bool
"""
self._docker = docker
@property
def shm(self):
"""Gets the shm of this V1Plugins. # noqa: E501
:return: The shm of this V1Plugins. # noqa: E501
:rtype: bool
"""
return self._shm
@shm.setter
def shm(self, shm):
"""Sets the shm of this V1Plugins.
:param shm: The shm of this V1Plugins. # noqa: E501
:type: bool
"""
self._shm = shm
@property
def collect_artifacts(self):
"""Gets the collect_artifacts of this V1Plugins. # noqa: E501
:return: The collect_artifacts of this V1Plugins. # noqa: E501
:rtype: bool
"""
return self._collect_artifacts
@collect_artifacts.setter
def collect_artifacts(self, collect_artifacts):
"""Sets the collect_artifacts of this V1Plugins.
:param collect_artifacts: The collect_artifacts of this V1Plugins. # noqa: E501
:type: bool
"""
self._collect_artifacts = collect_artifacts
@property
def collect_logs(self):
"""Gets the collect_logs of this V1Plugins. # noqa: E501
:return: The collect_logs of this V1Plugins. # noqa: E501
:rtype: bool
"""
return self._collect_logs
@collect_logs.setter
def collect_logs(self, collect_logs):
"""Sets the collect_logs of this V1Plugins.
:param collect_logs: The collect_logs of this V1Plugins. # noqa: E501
:type: bool
"""
self._collect_logs = collect_logs
@property
def collect_resources(self):
"""Gets the collect_resources of this V1Plugins. # noqa: E501
:return: The collect_resources of this V1Plugins. # noqa: E501
:rtype: str
"""
return self._collect_resources
@collect_resources.setter
def collect_resources(self, collect_resources):
"""Sets the collect_resources of this V1Plugins.
:param collect_resources: The collect_resources of this V1Plugins. # noqa: E501
:type: str
"""
self._collect_resources = collect_resources
@property
def sync_statuses(self):
"""Gets the sync_statuses of this V1Plugins. # noqa: E501
:return: The sync_statuses of this V1Plugins. # noqa: E501
:rtype: bool
"""
return self._sync_statuses
@sync_statuses.setter
def sync_statuses(self, sync_statuses):
"""Sets the sync_statuses of this V1Plugins.
:param sync_statuses: The sync_statuses of this V1Plugins. # noqa: E501
:type: bool
"""
self._sync_statuses = sync_statuses
@property
def log_level(self):
"""Gets the log_level of this V1Plugins. # noqa: E501
:return: The log_level of this V1Plugins. # noqa: E501
:rtype: str
"""
return self._log_level
@log_level.setter
def log_level(self, log_level):
"""Sets the log_level of this V1Plugins.
:param log_level: The log_level of this V1Plugins. # noqa: E501
:type: str
"""
self._log_level = log_level
@property
def notifications(self):
"""Gets the notifications of this V1Plugins. # noqa: E501
:return: The notifications of this V1Plugins. # noqa: E501
:rtype: list[V1Notification]
"""
return self._notifications
@notifications.setter
def notifications(self, notifications):
"""Sets the notifications of this V1Plugins.
:param notifications: The notifications of this V1Plugins. # noqa: E501
:type: list[V1Notification]
"""
self._notifications = notifications
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Plugins):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1Plugins):
return True
return self.to_dict() != other.to_dict()
``` |
{
"source": "jjasonn0717/TemporalBART",
"score": 2
} |
#### File: models/event_lm/seq2seq.py
```python
import os
import numpy as np
from scipy.optimize import linear_sum_assignment
from typing import Any, Dict, List, Optional, Union
import re
import logging
import json
import torch
from torch import nn
from torch.nn import functional as F
from denoising_event_lm.modules.transformers import get_huggingface_tokenizer
from allennlp.common.params import Params
from allennlp.data import Instance, Vocabulary
from allennlp.data.batch import Batch
from allennlp.nn import util
from allennlp.data.fields.text_field import TextFieldTensors
from allennlp.models.model import Model
from allennlp.modules import Seq2SeqEncoder
from allennlp.nn import util, RegularizerApplicator
from allennlp.training.metrics import Average
from rouge_score import rouge_scorer
rouge_scorer = rouge_scorer.RougeScorer(["rougeLsum"], use_stemmer=True)
from denoising_event_lm.models import ModelBase
from denoising_event_lm.training.metrics import metric_map
from denoising_event_lm.modules.transformers import TransformerForConditionalGeneration
from denoising_event_lm.utils.constants import EVENT_TAG, POINTER_EVENT_TAGS, ARGS_TAG
logger = logging.getLogger(__name__)
def normalize_arg_type(arg_type):
if arg_type[0] in ['R', 'C']:
return arg_type[2:]
else:
return arg_type
def get_flatten_varg_toks(varg):
varg_toks = [varg['V_toks']] + varg['ARGS_toks']
varg_span = [varg['V_span']] + varg['ARGS_span']
varg_type = ['V'] + [normalize_arg_type(arg_type) for arg_type in varg['ARGS_type']]
assert len(varg_toks) == len(varg_span) and len(varg_toks) == len(varg_type)
indices = list(range(len(varg_toks)))
# sort pred/args by their textual order
indices = sorted(indices, key=lambda x: varg_span[x])
varg_toks = [varg_toks[i] for i in indices]
varg_type = [varg_type[i] for i in indices]
flatten_toks = []
for i, toks in enumerate(varg_toks):
flatten_toks.extend(toks)
return flatten_toks
def V_ARGS_string_to_varg_seq(varg_string, add_event_sep_entry=True):
#vargs = varg_string.split(EVENT_TAG)[1:]
regex = rf"({'|'.join([EVENT_TAG]+POINTER_EVENT_TAGS)})(.*?)(?={'|'.join(['$',EVENT_TAG]+POINTER_EVENT_TAGS)})"
vargs = [(x.group(1), x.group(2)) for x in re.finditer(regex, varg_string)]
varg_seq = []
for event_sep, varg in vargs:
v, *desc = varg.split(ARGS_TAG)
desc = f" {ARGS_TAG} ".join(desc)
if add_event_sep_entry:
varg_seq.append(
{
"V_toks": [v.strip()],
"Description": desc.strip(),
"EVENT_SEP": event_sep
}
)
else:
varg_seq.append(
{
"V_toks": [v.strip()],
"Description": desc.strip()
}
)
return varg_seq
def get_event_matching(varg_seq_a, varg_seq_b):
# get description if needed: ARG0 Pred ARG1 ...
if len(varg_seq_a) > 0 and not 'Description' in varg_seq_a[0]:
for varg in varg_seq_a:
varg['Description'] = " ".join(get_flatten_varg_toks(varg))
if len(varg_seq_b) > 0 and not 'Description' in varg_seq_b[0]:
for varg in varg_seq_b:
varg['Description'] = " ".join(get_flatten_varg_toks(varg))
# miximum weighted bipartite matching
if len(varg_seq_a) > 0 and len(varg_seq_b) > 0:
scores = [[0 for j in range(len(varg_seq_b))] for i in range(len(varg_seq_a))]
for i in range(len(varg_seq_a)):
for j in range(len(varg_seq_b)):
e_sep_a = varg_seq_a[i]['EVENT_SEP']
v_a = " ".join(varg_seq_a[i]['V_toks'])
desc_a = varg_seq_a[i]['Description']
e_sep_b = varg_seq_b[j]['EVENT_SEP']
v_b = " ".join(varg_seq_b[j]['V_toks'])
desc_b = varg_seq_b[j]['Description']
scores[i][j] = float(e_sep_a == e_sep_b) * float(v_a == v_b) * rouge_scorer.score(desc_a, desc_b)['rougeLsum'].fmeasure
rows, cols = linear_sum_assignment(scores, maximize=True)
total_score = sum(scores[i][j] for i, j in zip(rows, cols)) / len(rows)
else:
rows, cols = [], []
total_score = 0
# build seq representations
repr_a = list(range(len(varg_seq_a)))
repr_b = list(range(len(varg_seq_a), len(varg_seq_a)+len(varg_seq_b)))
for i, j in zip(rows, cols):
if scores[i][j] > 0:
repr_b[j] = repr_a[i]
return repr_a, repr_b, total_score
@Model.register("event_lm_transformer_seq2seq")
class EventLMTransformerSeq2Seq(ModelBase):
"""
transformer_name (str): the pretrained transformer to use.
decode_kwargs (Dict[str, Any]): decoding arguments for ``generate`` function of ``seq2seq_generator``.
tokenizer_kwargs (Dict[str, Any]): tokenizer arguments.
"""
def __init__(self,
vocab: Vocabulary,
transformer_name: str,
seq2seq_generator: TransformerForConditionalGeneration,
label_smoothing: float = 0.0,
decode_kwargs: Optional[Dict[str, Any]] = None,
tokenizer_kwargs: Optional[Dict[str, Any]] = None,
dropout: float = 0.1,
extra_metrics: Dict[str, str]=None,
regularizer: Optional[RegularizerApplicator] = None) -> None:
super().__init__(vocab, regularizer)
self._seq2seq_generator = seq2seq_generator
self._label_smoothing = label_smoothing
self._tokenizer = get_huggingface_tokenizer(transformer_name, **(tokenizer_kwargs or {}))
if len(self._tokenizer) > self._seq2seq_generator._base_model.config.vocab_size:
self._seq2seq_generator._base_model.resize_token_embeddings(len(self._tokenizer))
logger.info("Resize embeddings from %d to %d",
self._seq2seq_generator._base_model.config.vocab_size,
len(self._tokenizer))
self._decode_kwargs = decode_kwargs or {}
self._dropout = dropout
self._pad_token_id = self._seq2seq_generator._base_model.config.pad_token_id
self._decoder_start_token_id = self._seq2seq_generator._base_model.config.decoder_start_token_id
self._event_token_id = self._tokenizer.added_tokens_encoder[EVENT_TAG]
self._pointer_event_token_ids = [self._tokenizer.added_tokens_encoder[tag] for tag in POINTER_EVENT_TAGS if tag in self._tokenizer.added_tokens_encoder]
self._arg_token_id = self._tokenizer.added_tokens_encoder[ARGS_TAG]
self._orig_special_ids = set(self._tokenizer.all_special_ids) - set(self._tokenizer.additional_special_tokens_ids)
self._loss_trackers = {'loss': Average()}
self._pairwise_metric = metric_map['chain_pairwise_accuracy']()
self._desc_rouge = Average()
if extra_metrics is not None:
self._extra_metrics = {}
for m_name, m_type in extra_metrics.items():
self._extra_metrics[m_name] = metric_map[m_type]()
else:
self._extra_metrics = None
def forward(self, # type: ignore
source_tok_ids: torch.LongTensor,
target_tok_ids: torch.LongTensor = None,
source_attention_mask: torch.LongTensor = None,
target_attention_mask: torch.LongTensor = None,
metadata: List[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]:
'''
print("question context tok ids")
print(question_context_tok_ids[0])
print(self._tokenizer.decode(question_context_tok_ids[0], skip_special_tokens=False, clean_up_tokenization_spaces=True))
print("target tok ids")
print(target_tok_ids[0])
print(self._tokenizer.decode(target_tok_ids[0], skip_special_tokens=False, clean_up_tokenization_spaces=True))
'''
batch_size = source_tok_ids.size(0)
output_dict = {}
# compute loss and predictions under teacher forcing
if target_tok_ids is not None:
# create decoder inputs, need to add ``decoder_start_token_id``: in bart, it is </s>
decoder_input_ids = target_tok_ids.new_zeros(target_tok_ids.shape)
decoder_input_ids[..., 1:] = target_tok_ids[..., :-1].clone()
decoder_input_ids[..., 0] = self._decoder_start_token_id
decoder_attention_mask = target_attention_mask.new_zeros(target_attention_mask.shape)
decoder_attention_mask[..., 1:] = target_attention_mask[..., :-1].clone()
decoder_attention_mask[..., 0] = 1
# create labels
labels = target_tok_ids.clone().detach()
labels[target_tok_ids == self._pad_token_id] = -100
'''
print("decoder_input_ids")
print(decoder_input_ids[0])
print("decoder attention mask")
print(decoder_attention_mask[0])
print("labels")
print(labels[0])
input()
'''
# loss, prediction_scores, cache, all_dec_hiddens, all_dec_attns, encoder_outputs; if exists
seq2seq_outputs = self._seq2seq_generator(
input_ids=source_tok_ids,
attention_mask=source_attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
lm_labels=labels,
label_smoothing=self._label_smoothing
)
loss = seq2seq_outputs[0]
# shape: (batch_size, length, vocab_size)
logits = seq2seq_outputs[1]
# get teacher forcing prediction ids, (batch_size, length)
tf_prediction_ids = torch.max(logits, dim=-1)[1]
output_dict["tf_prediction_ids"] = tf_prediction_ids
self._loss_trackers['loss'](loss)
output_dict["loss"] = loss
prediction_ids = tf_prediction_ids
'''
print("tf pred")
print(self._tokenizer.decode(prediction_ids[0], skip_special_tokens=False, clean_up_tokenization_spaces=True))
'''
# decode the prediction
if not self.training:
# get decode prediction ids, (batch_size*num_return_sequences, length)
decode_prediction_ids = self._seq2seq_generator.generate(
input_ids=source_tok_ids,
attention_mask=source_attention_mask,
event_token_ids=[self._event_token_id]+self._pointer_event_token_ids,
arg_token_id=self._arg_token_id,
**self._decode_kwargs
)
# (batch_size, num_return_sequences, length)
decode_prediction_ids = decode_prediction_ids.view(batch_size,
self._decode_kwargs.get("num_return_sequences", 1),
decode_prediction_ids.size(-1))
output_dict["decode_prediction_ids"] = decode_prediction_ids
prediction_ids = decode_prediction_ids
if self._decode_kwargs.get('num_return_sequences', 1) > 1:
prediction_ids = prediction_ids.view(batch_size, self._decode_kwargs['num_return_sequences'], prediction_ids.size(-1))
# Compute the EM and F1 on SQuAD and add the textual prediction to the output.
if metadata is not None:
output_dict['prediction_str'] = []
output_dict['prediction_varg_seq'] = []
output_dict['gold_varg_seq'] = []
if len(prediction_ids.size()) == 3:
output_dict['beam_prediction_str'] = []
output_dict['beam_prediction_varg_seq'] = []
source_strs = []
target_strs = []
is_scrambleds = []
varg_seqs = []
keep_varg_ids = []
ids = []
doc_ids = []
data_srcs = []
for i in range(batch_size):
source_strs.append(metadata[i]['source_str'])
target_strs.append(metadata[i]['target_str'])
is_scrambleds.append(metadata[i]['is_scrambled'])
varg_seqs.append(metadata[i]['varg_seq'])
keep_varg_ids.append(metadata[i]['keep_varg_ids'])
ids.append(metadata[i]['_id'])
doc_ids.append(metadata[i]['doc_id'])
data_srcs.append(metadata[i]['data_src'])
if len(prediction_ids.size()) == 2:
predicted_token_ids = prediction_ids[i].detach().cpu().numpy()
elif len(prediction_ids.size()) == 3:
output_dict['beam_prediction_str'].append([])
output_dict['beam_prediction_varg_seq'].append([])
beam_size = prediction_ids.size(1)
for beam_idx in range(beam_size):
predicted_token_ids = prediction_ids[i, beam_idx].detach().cpu().numpy()
predicted_token_ids = [tok_id for tok_id in predicted_token_ids if not tok_id in self._orig_special_ids] # remove original special tokens
prediction_str = self._tokenizer.decode(predicted_token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True)
prediction_varg_seq = V_ARGS_string_to_varg_seq(prediction_str)
output_dict['beam_prediction_str'][i].append(prediction_str)
output_dict['beam_prediction_varg_seq'][i].append(prediction_varg_seq)
predicted_token_ids = prediction_ids[i, 0].detach().cpu().numpy()
predicted_token_ids = [tok_id for tok_id in predicted_token_ids if not tok_id in self._orig_special_ids] # remove original special tokens
prediction_str = self._tokenizer.decode(predicted_token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True)
prediction_varg_seq = V_ARGS_string_to_varg_seq(prediction_str)
output_dict['prediction_str'].append(prediction_str)
output_dict['prediction_varg_seq'].append(prediction_varg_seq)
gold_varg_seq = metadata[i].get('gold_varg_seq', None)
output_dict['gold_varg_seq'].append(gold_varg_seq)
if gold_varg_seq is not None:
pred_seq, gold_seq, matching_score = get_event_matching(prediction_varg_seq, gold_varg_seq)
self._pairwise_metric(pred_seq, gold_seq)
self._desc_rouge(matching_score)
if self._extra_metrics is not None:
for m_name, met in self._extra_metrics.items():
met(pred_seq, gold_seq)
'''
if i == 0:
print("prediction_ids")
print(predicted_token_ids)
print("prediction_str raw")
print(self._tokenizer.decode(predicted_token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True))
print("prediction_str")
print(prediction_str)
print("prediction varg seq")
print(json.dumps(prediction_varg_seq, indent=2))
print("gold varg seq")
print(json.dumps(gold_varg_seq, indent=2))
input()
'''
output_dict['source_str'] = source_strs
output_dict['target_str'] = target_strs
output_dict['is_scrambled'] = is_scrambleds
output_dict['varg_seq'] = varg_seqs
output_dict['keep_varg_ids'] = keep_varg_ids
output_dict['_id'] = ids
output_dict['doc_id'] = doc_ids
output_dict['data_src'] = data_srcs
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
metrics = {}
metrics['pairwise_acc'] = self._pairwise_metric.get_metric(reset)
metrics['desc_rouge'] = self._desc_rouge.get_metric(reset)
if self._extra_metrics is not None:
for m_name, met in self._extra_metrics.items():
score = met.get_metric(reset)
if type(score) == dict:
for score_key in score:
metrics[m_name+'_'+score_key] = score[score_key]
else:
metrics[m_name] = score
for name, tracker in self._loss_trackers.items():
metrics[name] = tracker.get_metric(reset).item()
return metrics
def compute_sequence_scores(self, instances: List[Instance]) -> List[Dict[str, np.ndarray]]:
batch_size = len(instances)
with torch.no_grad():
cuda_device = self._get_prediction_device()
dataset = Batch(instances)
dataset.index_instances(self.vocab)
model_input = util.move_to_device(dataset.as_tensor_dict(), cuda_device)
assert len(model_input) == 5
source_tok_ids = model_input['source_tok_ids']
target_tok_ids = model_input['target_tok_ids']
source_attention_mask = model_input['source_attention_mask']
target_attention_mask = model_input['target_attention_mask']
metadata = model_input['metadata']
output_dict = {}
# compute loss under teacher forcing, which should be the sequence score
# create decoder inputs, need to add ``decoder_start_token_id``: in bart, it is </s>
decoder_input_ids = target_tok_ids.new_zeros(target_tok_ids.shape)
decoder_input_ids[..., 1:] = target_tok_ids[..., :-1].clone()
decoder_input_ids[..., 0] = self._decoder_start_token_id
decoder_attention_mask = target_attention_mask.new_zeros(target_attention_mask.shape)
decoder_attention_mask[..., 1:] = target_attention_mask[..., :-1].clone()
decoder_attention_mask[..., 0] = 1
# create labels
labels = target_tok_ids.clone().detach()
labels[target_tok_ids == self._pad_token_id] = -100
# prediction_scores, cache, all_dec_hiddens, all_dec_attns, encoder_outputs; if exists
seq2seq_outputs = self._seq2seq_generator(
input_ids=source_tok_ids,
attention_mask=source_attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
lm_labels=None, # don't compute loss,
use_cache=False
)
# shape: (batch_size, length, vocab_size)
logits = seq2seq_outputs[0]
loss_fct = nn.CrossEntropyLoss(reduction='none')
# shape: (batch_size*length,)
label_len = labels.size(1)
neg_logprob = loss_fct(logits.view(batch_size*label_len, self._seq2seq_generator._base_model.config.vocab_size),
labels.view(batch_size*label_len))
# shape: (batch_size,)
seq_scores = -torch.sum(neg_logprob.view(batch_size, label_len), dim=-1)
'''
event_tags_mask = torch.any(labels.unsqueeze(-1) == labels.new_tensor([self._event_token_id]+self._pointer_event_token_ids), dim=-1)
seq_scores = -torch.sum(neg_logprob.view(batch_size, label_len) * event_tags_mask.float(), dim=-1)
'''
# shape: (batch_size,)
seq_len = torch.sum((labels != -100).float(), dim=-1)
output_dict['seq_score'] = seq_scores / seq_len
instance_separated_output: List[Dict[str, np.ndarray]] = [
{} for _ in dataset.instances
]
for name, output in list(output_dict.items()):
if isinstance(output, torch.Tensor):
# NOTE(markn): This is a hack because 0-dim pytorch tensors are not iterable.
# This occurs with batch size 1, because we still want to include the loss in that case.
if output.dim() == 0:
output = output.unsqueeze(0)
if output.size(0) != batch_size:
self._maybe_warn_for_unseparable_batches(name)
continue
output = output.detach().cpu().numpy()
elif len(output) != batch_size:
self._maybe_warn_for_unseparable_batches(name)
continue
for instance_output, batch_element in zip(instance_separated_output, output):
instance_output[name] = batch_element
return instance_separated_output
def generate_extra_events(self, instances: List[Instance]) -> List[Dict[str, np.ndarray]]:
batch_size = len(instances)
# TODO: if batch_size > 1, need to modify the generate function to start decode from the shortest sequence
assert batch_size == 1
with torch.no_grad():
cuda_device = self._get_prediction_device()
dataset = Batch(instances)
dataset.index_instances(self.vocab)
model_input = util.move_to_device(dataset.as_tensor_dict(), cuda_device)
assert len(model_input) == 5
source_tok_ids = model_input['source_tok_ids']
target_tok_ids = model_input['target_tok_ids']
source_attention_mask = model_input['source_attention_mask']
target_attention_mask = model_input['target_attention_mask']
metadata = model_input['metadata']
override_decode_kwargs = metadata[0].get('override_decode_kwargs', {})
decode_kwargs = {k: v for k, v in self._decode_kwargs.items()}
decode_kwargs.update(override_decode_kwargs)
if 'bad_verbs_ids' in metadata[0]:
bad_verbs_ids = metadata[0]['bad_verbs_ids']
ban_bad_verbs_event_idxs = metadata[0]['ban_bad_verbs_event_idxs']
else:
bad_verbs_ids = None
ban_bad_verbs_event_idxs = None
if 'target_suffix_encodes' in metadata[0]:
target_suffix_ids = metadata[0]['target_suffix_encodes']['input_ids']
target_suffix_start_event_idx = metadata[0]['target_suffix_start_event_idx']
else:
target_suffix_ids = None
target_suffix_start_event_idx = None
num_output_events = metadata[0]['num_output_events']
output_dict = {}
# create decoder inputs, need to add ``decoder_start_token_id``: in bart, it is </s>
_, target_len = target_tok_ids.shape
decoder_input_ids = target_tok_ids.new_zeros((batch_size, target_len+1))
decoder_input_ids[..., 1:] = target_tok_ids.clone()
decoder_input_ids[..., 0] = self._decoder_start_token_id
# TODO: if batch_size > 1, need to pass decoder_attention_mask
'''
decoder_attention_mask = target_attention_mask.new_zeros((batch_size, target_len+1))
decoder_attention_mask[..., 1:] = target_attention_mask.clone()
decoder_attention_mask[..., 0] = 1
'''
# get decode prediction ids, (batch_size*num_return_sequences, length)
decode_prediction_ids = self._seq2seq_generator.generate(
input_ids=source_tok_ids,
attention_mask=source_attention_mask,
no_repeat_ngram_size=3,
bad_verbs_ids=bad_verbs_ids,
ban_bad_verbs_event_idxs=ban_bad_verbs_event_idxs,
max_num_events=num_output_events,
min_num_events=num_output_events,
event_token_ids=[self._event_token_id]+self._pointer_event_token_ids,
arg_token_id=self._arg_token_id,
input_suffix_ids=target_suffix_ids,
input_suffix_start_event_idx=target_suffix_start_event_idx,
decoder_input_ids=decoder_input_ids,
use_cache=False,
**decode_kwargs
)
# (batch_size, num_return_sequences, length)
decode_prediction_ids = decode_prediction_ids.view(batch_size,
decode_kwargs.get("num_return_sequences", 1),
decode_prediction_ids.size(-1))
output_dict["decode_prediction_ids"] = decode_prediction_ids
prediction_ids = decode_prediction_ids
if decode_kwargs.get('num_return_sequences', 1) > 1:
prediction_ids = prediction_ids.view(batch_size, decode_kwargs['num_return_sequences'], prediction_ids.size(-1))
if metadata is not None:
output_dict['prediction_str'] = []
output_dict['prediction_varg_seq'] = []
output_dict['gold_varg_seq'] = []
output_dict['input_varg_seq'] = []
if len(prediction_ids.size()) == 3:
output_dict['beam_prediction_str'] = []
output_dict['beam_prediction_varg_seq'] = []
for i in range(batch_size):
if len(prediction_ids.size()) == 2:
predicted_token_ids = prediction_ids[i].detach().cpu().numpy()
elif len(prediction_ids.size()) == 3:
output_dict['beam_prediction_str'].append([])
output_dict['beam_prediction_varg_seq'].append([])
beam_size = prediction_ids.size(1)
for beam_idx in range(beam_size):
predicted_token_ids = prediction_ids[i, beam_idx].detach().cpu().numpy()
predicted_token_ids = [tok_id for tok_id in predicted_token_ids if not tok_id in self._orig_special_ids] # remove original special tokens
prediction_str = self._tokenizer.decode(predicted_token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True)
prediction_varg_seq = V_ARGS_string_to_varg_seq(prediction_str)
output_dict['beam_prediction_str'][i].append(prediction_str)
output_dict['beam_prediction_varg_seq'][i].append(prediction_varg_seq)
predicted_token_ids = prediction_ids[i, 0].detach().cpu().numpy()
predicted_token_ids = [tok_id for tok_id in predicted_token_ids if not tok_id in self._orig_special_ids] # remove original special tokens
prediction_str = self._tokenizer.decode(predicted_token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True)
prediction_varg_seq = V_ARGS_string_to_varg_seq(prediction_str)
output_dict['prediction_str'].append(prediction_str)
output_dict['prediction_varg_seq'].append(prediction_varg_seq)
gold_varg_seq = metadata[i].get('gold_varg_seq', None)
output_dict['gold_varg_seq'].append(gold_varg_seq)
output_dict['input_varg_seq'].append(metadata[i]['input_varg_seq'])
'''
if i == 0:
print("prediction_ids")
print(predicted_token_ids)
print("prediction_str raw")
print(self._tokenizer.decode(predicted_token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True))
print("prediction_str")
print(prediction_str)
print("prediction varg seq")
print(json.dumps(prediction_varg_seq, indent=2))
print("gold varg seq")
print(json.dumps(gold_varg_seq, indent=2))
input()
'''
instance_separated_output: List[Dict[str, np.ndarray]] = [
{} for _ in dataset.instances
]
for name, output in list(output_dict.items()):
if isinstance(output, torch.Tensor):
# NOTE(markn): This is a hack because 0-dim pytorch tensors are not iterable.
# This occurs with batch size 1, because we still want to include the loss in that case.
if output.dim() == 0:
output = output.unsqueeze(0)
if output.size(0) != batch_size:
self._maybe_warn_for_unseparable_batches(name)
continue
output = output.detach().cpu().numpy()
elif len(output) != batch_size:
self._maybe_warn_for_unseparable_batches(name)
continue
for instance_output, batch_element in zip(instance_separated_output, output):
instance_output[name] = batch_element
return instance_separated_output
```
#### File: modules/transformers/bart_for_eventlm.py
```python
import logging
import math
import random
import numpy
import warnings
from typing import Dict, List, Optional, Tuple, Iterable
import numpy as np
import torch
from torch import Tensor
import torch.nn.functional as F
from torch import Tensor, nn
from torch.nn import CrossEntropyLoss
from transformers import BartForConditionalGeneration
from transformers.generation_utils import BeamHypotheses, top_k_top_p_filtering, calc_banned_ngram_tokens, calc_banned_bad_words_ids
logger = logging.getLogger(__name__)
def label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=-100):
"""From fairseq"""
if target.dim() == lprobs.dim() - 1:
target = target.unsqueeze(-1)
valid_target = target.clone().detach()
valid_target[target == ignore_index] = 0
nll_loss = -lprobs.gather(dim=-1, index=valid_target)
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
if ignore_index is not None:
pad_mask = target.eq(ignore_index)
nll_loss.masked_fill_(pad_mask, 0.0)
smooth_loss.masked_fill_(pad_mask, 0.0)
else:
nll_loss = nll_loss.squeeze(-1)
smooth_loss = smooth_loss.squeeze(-1)
nll_loss = nll_loss.sum() # mean()? Scared to break other math.
smooth_loss = smooth_loss.sum()
eps_i = epsilon / lprobs.size(-1)
loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss
return loss, nll_loss
def calc_banned_bad_verbs_ids(prev_input_ids: Iterable[int], batch_bad_verbs_ids: Iterable[int]) -> Iterable[int]:
banned_tokens = []
def _tokens_match(prev_tokens, tokens):
if len(tokens) == 0:
# if bad word tokens is just one token always ban it
return True
if len(tokens) > len(prev_input_ids):
# if bad word tokens are longer then prev input_ids they can't be equal
return False
if prev_tokens[-len(tokens) :] == tokens:
# if tokens match
return True
else:
return False
for prev_input_ids_slice, bad_verbs_ids_slice in zip(prev_input_ids, batch_bad_verbs_ids):
if bad_verbs_ids_slice is not None:
banned_tokens_slice = []
for banned_token_seq in bad_verbs_ids_slice:
assert len(banned_token_seq) > 0, "Banned words token sequences {} cannot have an empty list".format(
bad_verbs_ids_slice
)
if _tokens_match(prev_input_ids_slice.tolist(), banned_token_seq[:-1]) is False:
# if tokens do not match continue
continue
banned_tokens_slice.append(banned_token_seq[-1])
banned_tokens.append(banned_tokens_slice)
else:
banned_tokens.append(None)
return banned_tokens
class BartForConditionalGeneration_for_EventLM(BartForConditionalGeneration):
def forward(
self,
input_ids,
attention_mask=None,
encoder_outputs=None,
decoder_input_ids=None,
decoder_attention_mask=None,
decoder_cached_states=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
loss_reduction='mean',
label_smoothing=0.0,
**unused,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for computing the masked language modeling loss.
Indices should either be in ``[0, ..., config.vocab_size]`` or -100 (see ``input_ids`` docstring).
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens
with labels
in ``[0, ..., config.vocab_size]``.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.RobertaConfig`) and inputs:
masked_lm_loss (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Masked language modeling loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Conditional generation example::
# Mask filling only works for bart-large
from transformers import BartTokenizer, BartForConditionalGeneration
tokenizer = BartTokenizer.from_pretrained('facebook/bart-large')
TXT = "My friends are <mask> but they eat too many carbs."
model = BartForConditionalGeneration.from_pretrained('facebook/bart-large')
input_ids = tokenizer([TXT], return_tensors='pt')['input_ids']
logits = model(input_ids)[0]
masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()
probs = logits[0, masked_index].softmax(dim=0)
values, predictions = probs.topk(5)
tokenizer.decode(predictions).split()
# ['good', 'great', 'all', 'really', 'very']
"""
if "lm_labels" in unused:
warnings.warn(
"The `lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.",
DeprecationWarning,
)
labels = unused.pop("lm_labels")
if labels is not None:
use_cache = False
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
decoder_cached_states=decoder_cached_states,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
lm_logits = F.linear(outputs[0], self.model.shared.weight, bias=self.final_logits_bias)
outputs = (lm_logits,) + outputs[1:] # Add cache, hidden states and attention if they are here
if labels is not None:
if label_smoothing == 0.0:
loss_fct = nn.CrossEntropyLoss(reduction=loss_reduction)
# TODO(SS): do we need to ignore pad tokens in labels?
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
else:
lprobs = torch.nn.functional.log_softmax(lm_logits, dim=-1)
masked_lm_loss, _ = label_smoothed_nll_loss(
lprobs, labels, label_smoothing, ignore_index=-100
)
outputs = (masked_lm_loss,) + outputs
return outputs
def postprocess_next_token_scores(
self,
scores,
input_ids,
no_repeat_ngram_size,
bad_words_ids,
batch_bad_verbs_ids,
cur_len,
min_length,
max_length,
eos_token_id,
repetition_penalty,
batch_size,
num_beams,
cur_num_events,
cur_num_args,
max_num_events,
min_num_events,
event_token_ids,
arg_token_id,
):
# repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858)
if repetition_penalty != 1.0:
self.enforce_repetition_penalty_(
scores, batch_size, num_beams, input_ids, repetition_penalty,
)
# set eos token prob to zero if min_length is not reached
if eos_token_id is not None and cur_len < min_length:
scores[:, eos_token_id] = -float("inf")
# set eos token prob to zero if num_min_events is not reached
if eos_token_id is not None and min_num_events is not None:
scores[cur_num_events < min_num_events, eos_token_id] = -float("inf")
# set event token prob to zero if num_max_events is reached
if max_num_events is not None:
for i, n in enumerate(cur_num_events.detach().cpu().numpy()):
if n >= max_num_events:
scores[i, event_token_ids] = -float("inf")
if no_repeat_ngram_size > 0:
# calculate a list of banned tokens to prevent repetitively generating the same ngrams
num_batch_hypotheses = batch_size * num_beams
# from fairseq: https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345
banned_batch_tokens = calc_banned_ngram_tokens(
input_ids, num_batch_hypotheses, no_repeat_ngram_size, cur_len
)
for i, banned_tokens in enumerate(banned_batch_tokens):
scores[i, banned_tokens] = -float("inf")
if bad_words_ids is not None:
# calculate a list of banned tokens according to bad words
banned_tokens = calc_banned_bad_words_ids(input_ids, bad_words_ids)
for i, banned_tokens in enumerate(banned_tokens):
scores[i, banned_tokens] = -float("inf")
if batch_bad_verbs_ids is not None:
# calculate a list of banned tokens according to bad verbs
banned_tokens = calc_banned_bad_verbs_ids(input_ids, batch_bad_verbs_ids)
for i, banned_tokens in enumerate(banned_tokens):
if banned_tokens is not None:
scores[i, banned_tokens] = -float("inf")
return scores
@torch.no_grad()
def generate(
self,
input_ids: Optional[torch.LongTensor] = None,
max_length: Optional[int] = None,
min_length: Optional[int] = None,
do_sample: Optional[bool] = None,
early_stopping: Optional[bool] = None,
num_beams: Optional[int] = None,
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
repetition_penalty: Optional[float] = None,
bad_words_ids: Optional[Iterable[int]] = None,
bad_verbs_ids: Optional[Iterable[int]] = None,
ban_bad_verbs_event_idxs: Optional[List[int]] = None,
bos_token_id: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
length_penalty: Optional[float] = None,
no_repeat_ngram_size: Optional[int] = None,
num_return_sequences: Optional[int] = None,
attention_mask: Optional[torch.LongTensor] = None,
decoder_start_token_id: Optional[int] = None,
use_cache: Optional[bool] = None,
encoder_outputs: Optional[torch.LongTensor] = None,
max_num_events: Optional[int] = None,
min_num_events: Optional[int] = None,
event_token_ids: Optional[Iterable[int]] = None,
arg_token_id: Optional[int] = None,
input_suffix_ids: Optional[Iterable[int]] = None,
input_suffix_start_event_idx: Optional[int] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
**model_specific_kwargs
) -> torch.LongTensor:
r""" Generates sequences for models with a LM head. The method currently supports greedy decoding, beam-search decoding, sampling with temperature, sampling with top-k or nucleus sampling.
Adapted in part from `Facebook's XLM beam search code`_.
.. _`Facebook's XLM beam search code`:
https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529
Parameters:
input_ids: (`optional`) `torch.LongTensor` of shape `(batch_size, sequence_length)`
The sequence used as a prompt for the generation. If `None` the method initializes
it as an empty `torch.LongTensor` of shape `(1,)`.
max_length: (`optional`) int
The max length of the sequence to be generated. Between `min_length` and infinity. Default to 20.
min_length: (`optional`) int
The min length of the sequence to be generated. Between 0 and infinity. Default to 0.
do_sample: (`optional`) bool
If set to `False` greedy decoding is used. Otherwise sampling is used. Defaults to `False` as defined in `configuration_utils.PretrainedConfig`.
early_stopping: (`optional`) bool
if set to `True` beam search is stopped when at least `num_beams` sentences finished per batch. Defaults to `False` as defined in `configuration_utils.PretrainedConfig`.
num_beams: (`optional`) int
Number of beams for beam search. Must be between 1 and infinity. 1 means no beam search. Default to 1.
temperature: (`optional`) float
The value used to module the next token probabilities. Must be strictly positive. Default to 1.0.
top_k: (`optional`) int
The number of highest probability vocabulary tokens to keep for top-k-filtering. Between 1 and infinity. Default to 50.
top_p: (`optional`) float
The cumulative probability of parameter highest probability vocabulary tokens to keep for nucleus sampling. Must be between 0 and 1. Default to 1.
repetition_penalty: (`optional`) float
The parameter for repetition penalty. Between 1.0 and infinity. 1.0 means no penalty. Default to 1.0.
pad_token_id: (`optional`) int
Padding token. Default to specicic model pad_token_id or None if it does not exist.
bos_token_id: (`optional`) int
BOS token. Defaults to `bos_token_id` as defined in the models config.
eos_token_id: (`optional`) int
EOS token. Defaults to `eos_token_id` as defined in the models config.
length_penalty: (`optional`) float
Exponential penalty to the length. Default to 1.
no_repeat_ngram_size: (`optional`) int
If set to int > 0, all ngrams of size `no_repeat_ngram_size` can only occur once.
bad_words_ids: (`optional`) list of lists of int
`bad_words_ids` contains tokens that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use `tokenizer.encode(bad_word, add_prefix_space=True)`.
num_return_sequences: (`optional`) int
The number of independently computed returned sequences for each element in the batch. Default to 1.
attention_mask (`optional`) obj: `torch.LongTensor` of same shape as `input_ids`
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
Defaults to `None`.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_start_token_id=None: (`optional`) int
If an encoder-decoder model starts decoding with a different token than BOS.
Defaults to `None` and is changed to `BOS` later.
use_cache: (`optional`) bool
If `use_cache` is True, past key values are used to speed up decoding if applicable to model. Defaults to `True`.
model_specific_kwargs: (`optional`) dict
Additional model specific kwargs will be forwarded to the `forward` function of the model.
Return:
output: `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`
sequence_length is either equal to max_length or shorter if all batches finished early due to the `eos_token_id`
Examples::
tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.
outputs = model.generate(max_length=40) # do greedy decoding
print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('openai-gpt') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('openai-gpt') # Download model and configuration from S3 and cache.
input_context = 'The dog'
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, num_beams=5, num_return_sequences=3, temperature=1.5) # generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context 'The dog'
for i in range(3): # 3 output sequences were generated
print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.
input_context = 'The dog'
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=40, temperature=0.7, num_return_sequences=3) # 3 generate sequences using by sampling
for i in range(3): # 3 output sequences were generated
print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('ctrl') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('ctrl') # Download model and configuration from S3 and cache.
input_context = 'Legal My neighbor is' # "Legal" is one of the control codes for ctrl
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=50, temperature=0.7, repetition_penalty=1.2) # generate sequences
print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('gpt2') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('gpt2') # Download model and configuration from S3 and cache.
input_context = 'My cute dog' # "Legal" is one of the control codes for ctrl
bad_words_ids = [tokenizer.encode(bad_word, add_prefix_space=True) for bad_word in ['idiot', 'stupid', 'shut up']]
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=100, do_sample=True, bad_words_ids=bad_words_ids) # generate sequences without allowing bad_words to be generated
"""
# We cannot generate if the model does not have a LM head
if self.get_output_embeddings() is None:
raise AttributeError(
"You tried to generate sequences with a model that does not have a LM Head."
"Please use another model class (e.g. `OpenAIGPTLMHeadModel`, `XLNetLMHeadModel`, `GPT2LMHeadModel`, `CTRLLMHeadModel`, `T5WithLMHeadModel`, `TransfoXLLMHeadModel`, `XLMWithLMHeadModel`, `BartForConditionalGeneration` )"
)
max_length = max_length if max_length is not None else self.config.max_length
min_length = min_length if min_length is not None else self.config.min_length
do_sample = do_sample if do_sample is not None else self.config.do_sample
early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping
use_cache = use_cache if use_cache is not None else self.config.use_cache
num_beams = num_beams if num_beams is not None else self.config.num_beams
temperature = temperature if temperature is not None else self.config.temperature
top_k = top_k if top_k is not None else self.config.top_k
top_p = top_p if top_p is not None else self.config.top_p
repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty
bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id
pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id
length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty
no_repeat_ngram_size = (
no_repeat_ngram_size if no_repeat_ngram_size is not None else self.config.no_repeat_ngram_size
)
bad_words_ids = bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids
num_return_sequences = (
num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences
)
decoder_start_token_id = (
decoder_start_token_id if decoder_start_token_id is not None else self.config.decoder_start_token_id
)
if input_ids is not None:
batch_size = input_ids.shape[0] # overriden by the input batch_size
else:
batch_size = 1
assert isinstance(max_length, int) and max_length > 0, "`max_length` should be a strictly positive integer."
assert isinstance(min_length, int) and min_length >= 0, "`min_length` should be a positive integer."
assert isinstance(do_sample, bool), "`do_sample` should be a boolean."
assert isinstance(early_stopping, bool), "`early_stopping` should be a boolean."
assert isinstance(use_cache, bool), "`use_cache` should be a boolean."
assert isinstance(num_beams, int) and num_beams > 0, "`num_beams` should be a strictly positive integer."
assert temperature > 0, "`temperature` should be strictly positive."
assert isinstance(top_k, int) and top_k >= 0, "`top_k` should be a positive integer."
assert 0 <= top_p <= 1, "`top_p` should be between 0 and 1."
assert repetition_penalty >= 1.0, "`repetition_penalty` should be >= 1."
assert input_ids is not None or (
isinstance(bos_token_id, int) and bos_token_id >= 0
), "If input_ids is not defined, `bos_token_id` should be a positive integer."
assert pad_token_id is None or (
isinstance(pad_token_id, int) and (pad_token_id >= 0)
), "`pad_token_id` should be a positive integer."
assert (eos_token_id is None) or (
isinstance(eos_token_id, int) and (eos_token_id >= 0)
), "`eos_token_id` should be a positive integer."
assert length_penalty > 0, "`length_penalty` should be strictly positive."
assert (
isinstance(no_repeat_ngram_size, int) and no_repeat_ngram_size >= 0
), "`no_repeat_ngram_size` should be a positive integer."
assert (
isinstance(num_return_sequences, int) and num_return_sequences > 0
), "`num_return_sequences` should be a strictly positive integer."
assert (
bad_words_ids is None or isinstance(bad_words_ids, list) and isinstance(bad_words_ids[0], list)
), "`bad_words_ids` is either `None` or a list of lists of tokens that should not be generated"
assert (
bad_verbs_ids is None or isinstance(bad_verbs_ids, list) and isinstance(bad_verbs_ids[0], list)
), "`bad_verbs_ids` is either `None` or a list of lists of tokens that should not be generated"
assert (
ban_bad_verbs_event_idxs is None or isinstance(ban_bad_verbs_event_idxs, list) and isinstance(ban_bad_verbs_event_idxs[0], int)
), "`ban_bad_verbs_event_idxs` is either `None` or a list of integers"
if input_ids is None:
assert isinstance(bos_token_id, int) and bos_token_id >= 0, (
"you should either supply a context to complete as `input_ids` input "
"or a `bos_token_id` (integer >= 0) as a first token to start the generation."
)
input_ids = torch.full(
(batch_size, 1), bos_token_id, dtype=torch.long, device=next(self.parameters()).device,
)
else:
assert input_ids.dim() == 2, "Input prompt should be of shape (batch_size, sequence length)."
assert (
isinstance(event_token_ids, list) and all(isinstance(i, int) and i >= 0 for i in event_token_ids)
), "`event_token_ids` should be a list of positive integer."
assert (
isinstance(arg_token_id, int) and (arg_token_id >= 0)
), "`arg_token_id` should be a positive integer."
# not allow to duplicate outputs when greedy decoding
if do_sample is False:
if num_beams == 1:
# no_beam_search greedy generation conditions
assert (
num_return_sequences == 1
), "Greedy decoding will always produce the same output for num_beams == 1 and num_return_sequences > 1. Please set num_return_sequences = 1"
else:
# beam_search greedy generation conditions
assert (
num_beams >= num_return_sequences
), "Greedy beam search decoding cannot return more sequences than it has beams. Please set num_beams >= num_return_sequences"
# create attention mask if necessary
# TODO (PVP): this should later be handled by the forward fn() in each model in the future see PR 3140
if (attention_mask is None) and (pad_token_id is not None) and (pad_token_id in input_ids):
attention_mask = input_ids.ne(pad_token_id).long()
elif attention_mask is None:
attention_mask = input_ids.new_ones(input_ids.shape)
# set pad_token_id to eos_token_id if not set. Important that this is done after
# attention_mask is created
if pad_token_id is None and eos_token_id is not None:
logger.warning(
"Setting `pad_token_id` to {} (first `eos_token_id`) to generate sequence".format(eos_token_id)
)
pad_token_id = eos_token_id
# current position and vocab size
if hasattr(self.config, "vocab_size"):
vocab_size = self.config.vocab_size
elif (
self.config.is_encoder_decoder
and hasattr(self.config, "decoder")
and hasattr(self.config.decoder, "vocab_size")
):
vocab_size = self.config.decoder.vocab_size
# set effective batch size and effective batch multiplier according to do_sample
if do_sample:
effective_batch_size = batch_size * num_return_sequences
effective_batch_mult = num_return_sequences
else:
effective_batch_size = batch_size
effective_batch_mult = 1
if self.config.is_encoder_decoder:
if decoder_start_token_id is None:
decoder_start_token_id = bos_token_id
assert (
decoder_start_token_id is not None
), "decoder_start_token_id or bos_token_id has to be defined for encoder-decoder generation"
assert hasattr(self, "get_encoder"), "{} should have a 'get_encoder' function defined".format(self)
assert callable(self.get_encoder), "{} should be a method".format(self.get_encoder)
if encoder_outputs is None:
# get encoder and store encoder outputs
encoder = self.get_encoder()
encoder_outputs: tuple = encoder(input_ids, attention_mask=attention_mask)
# Expand input ids if num_beams > 1 or num_return_sequences > 1
if num_return_sequences > 1 or num_beams > 1:
input_ids_len = input_ids.shape[-1]
input_ids = input_ids.unsqueeze(1).expand(batch_size, effective_batch_mult * num_beams, input_ids_len)
attention_mask = attention_mask.unsqueeze(1).expand(
batch_size, effective_batch_mult * num_beams, input_ids_len
)
input_ids = input_ids.contiguous().view(
effective_batch_size * num_beams, input_ids_len
) # shape: (batch_size * num_return_sequences * num_beams, cur_len)
attention_mask = attention_mask.contiguous().view(
effective_batch_size * num_beams, input_ids_len
) # shape: (batch_size * num_return_sequences * num_beams, cur_len)
if self.config.is_encoder_decoder:
# create empty decoder_input_ids
if decoder_input_ids is None:
input_ids = torch.full(
(effective_batch_size * num_beams, 1),
decoder_start_token_id,
dtype=torch.long,
device=next(self.parameters()).device,
)
cur_len = 1
cur_num_events = input_ids.new_zeros((effective_batch_size * num_beams,))
cur_num_args = input_ids.new_zeros((effective_batch_size * num_beams,))
else:
assert (
2 == len(decoder_input_ids.shape)
), f"expected decoder_input_ids to have 2 dimensions, got {len(decoder_input_ids.shape)} "
assert (
batch_size == decoder_input_ids.shape[0]
), f"expected decoder_input_ids to have 1st dimension bs={batch_size}, got {decoder_input_ids.shape[0]} "
assert (
torch.all(decoder_input_ids[:, 0] == decoder_start_token_id)
), f"expected decoder_input_ids to have 1st dimension bs={batch_size}, got\n {decoder_input_ids} "
input_ids = decoder_input_ids
cur_len = input_ids.shape[-1]
# expand input_ids
input_ids = input_ids.unsqueeze(1).expand(batch_size, effective_batch_mult * num_beams, cur_len)
input_ids = input_ids.contiguous().view(
effective_batch_size * num_beams, cur_len
) # shape: (batch_size * num_return_sequences * num_beams, cur_len)
# compute cur_num_events
cur_num_events = torch.sum(torch.any(input_ids.unsqueeze(-1) == input_ids.new_tensor(event_token_ids), dim=-1), dim=-1)
cur_num_args = torch.sum(input_ids == arg_token_id, dim=-1)
assert (
batch_size == encoder_outputs[0].shape[0]
), f"expected encoder_outputs[0] to have 1st dimension bs={batch_size}, got {encoder_outputs[0].shape[0]} "
# expand batch_idx to assign correct encoder output for expanded input_ids (due to num_beams > 1 and num_return_sequences > 1)
expanded_batch_idxs = (
torch.arange(batch_size)
.view(-1, 1)
.repeat(1, num_beams * effective_batch_mult)
.view(-1)
.to(input_ids.device)
)
# expand encoder_outputs
encoder_outputs = (encoder_outputs[0].index_select(0, expanded_batch_idxs), *encoder_outputs[1:])
else:
encoder_outputs = None
cur_len = input_ids.shape[-1]
# compute cur_num_events
cur_num_events = torch.sum(torch.any(input_ids.unsqueeze(-1) == input_ids.new_tensor(event_token_ids), dim=-1), dim=-1)
cur_num_args = torch.sum(input_ids == arg_token_id, dim=-1)
assert (
cur_len < max_length
), f"The context has {cur_len} number of tokens, but `max_length` is only {max_length}. Please make sure that `max_length` is bigger than the number of tokens, by setting either `generate(max_length=...,...)` or `config.max_length = ...`"
assert (
max_num_events is None or torch.all(cur_num_events <= max_num_events)
), f"The context has {cur_num_events} number of events, but `max_num_events` is only {max_num_events}. Please make sure that `max_num_events` is bigger than or equal to the number of events, by setting `generate(max_num_events=...,...)`"
assert (
ban_bad_verbs_event_idxs is None or all(torch.all(cur_num_args <= e_id) for e_id in ban_bad_verbs_event_idxs)
), f"The context has {cur_num_args} number of args, but `ban_bad_verbs_event_idxs` is {ban_bad_verbs_event_idxs}. Please make sure that `ban_bad_verbs_event_idxs` is bigger than or equal to the number of events, by setting `generate(ban_bad_verbs_event_idxs=...,...)`"
assert (
input_suffix_start_event_idx is None or torch.all(cur_num_events <= input_suffix_start_event_idx)
), f"The context has {cur_num_events} number of args, but `input_suffix_start_event_idx` is {input_suffix_start_event_idx}. Please make sure that `input_suffix_start_event_idx` is bigger than or equal to the number of events, by setting `generate(input_suffix_start_event_idx=...,...)`"
if num_beams > 1:
output = self._generate_beam_search(
input_ids,
cur_len=cur_len,
cur_num_events=cur_num_events,
cur_num_args=cur_num_args,
max_length=max_length,
min_length=min_length,
do_sample=do_sample,
early_stopping=early_stopping,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
bad_verbs_ids=bad_verbs_ids,
ban_bad_verbs_event_idxs=ban_bad_verbs_event_idxs,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
batch_size=effective_batch_size,
num_return_sequences=num_return_sequences,
length_penalty=length_penalty,
num_beams=num_beams,
vocab_size=vocab_size,
encoder_outputs=encoder_outputs,
attention_mask=attention_mask,
use_cache=use_cache,
max_num_events=max_num_events,
min_num_events=min_num_events,
event_token_ids=event_token_ids,
arg_token_id=arg_token_id,
input_suffix_ids=input_suffix_ids,
input_suffix_start_event_idx=input_suffix_start_event_idx,
model_specific_kwargs=model_specific_kwargs,
)
else:
output = self._generate_no_beam_search(
input_ids,
cur_len=cur_len,
cur_num_events=cur_num_events,
cur_num_args=cur_num_args,
max_length=max_length,
min_length=min_length,
do_sample=do_sample,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
bad_verbs_ids=bad_verbs_ids,
ban_bad_verbs_event_idxs=ban_bad_verbs_event_idxs,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
batch_size=effective_batch_size,
encoder_outputs=encoder_outputs,
attention_mask=attention_mask,
use_cache=use_cache,
max_num_events=max_num_events,
min_num_events=min_num_events,
event_token_ids=event_token_ids,
arg_token_id=arg_token_id,
input_suffix_ids=input_suffix_ids,
input_suffix_start_event_idx=input_suffix_start_event_idx,
model_specific_kwargs=model_specific_kwargs,
)
return output
def _generate_no_beam_search(
self,
input_ids,
cur_len,
cur_num_events,
cur_num_args,
max_length,
min_length,
do_sample,
temperature,
top_k,
top_p,
repetition_penalty,
no_repeat_ngram_size,
bad_words_ids,
bad_verbs_ids,
ban_bad_verbs_event_idxs,
pad_token_id,
eos_token_id,
batch_size,
encoder_outputs,
attention_mask,
use_cache,
max_num_events,
min_num_events,
event_token_ids,
arg_token_id,
input_suffix_ids,
input_suffix_start_event_idx,
model_specific_kwargs,
):
""" Generate sequences for each example without beam search (num_beams == 1).
All returned sequence are generated independantly.
"""
# length of generated sentences / unfinished sentences
unfinished_sents = input_ids.new(batch_size).fill_(1)
sent_lengths = input_ids.new(batch_size).fill_(max_length)
# the indices to the input_suffix_ids
if input_suffix_ids is not None:
input_suffix_timesteps = [None] * batch_size
else:
input_suffix_timesteps = None
past = (encoder_outputs, None) if encoder_outputs is not None else None
decoded_probs = -1.0 * torch.ones((batch_size, 1), dtype=torch.float, device=input_ids.device)
while cur_len < max_length:
if bad_verbs_ids is not None and ban_bad_verbs_event_idxs is not None:
batch_bad_verbs_ids = [bad_verbs_ids if n-1 in ban_bad_verbs_event_idxs else None for n in cur_num_events.detach().cpu().numpy()] # n event tokens means now in the span of the (n-1)th events
if all(e is None for e in batch_bad_verbs_ids):
batch_bad_verbs_ids = None
else:
batch_bad_verbs_ids = None
model_inputs = self.prepare_inputs_for_generation(
input_ids, past=past, attention_mask=attention_mask, use_cache=use_cache, **model_specific_kwargs
)
outputs = self(**model_inputs)
next_token_logits = outputs[0][:, -1, :]
scores = self.postprocess_next_token_scores(
scores=next_token_logits,
input_ids=input_ids,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
batch_bad_verbs_ids=batch_bad_verbs_ids,
cur_len=cur_len,
min_length=min_length,
max_length=max_length,
eos_token_id=eos_token_id,
repetition_penalty=repetition_penalty,
batch_size=batch_size,
num_beams=1,
cur_num_events=cur_num_events,
cur_num_args=cur_num_args,
max_num_events=max_num_events,
min_num_events=min_num_events,
event_token_ids=event_token_ids,
arg_token_id=arg_token_id,
)
# if model has past, then set the past variable to speed up decoding
if self._use_cache(outputs, use_cache):
past = outputs[1]
if do_sample:
# Temperature (higher temperature => more likely to sample low probability tokens)
if temperature != 1.0:
scores = scores / temperature
# Top-p/top-k filtering
next_token_logscores = top_k_top_p_filtering(scores, top_k=top_k, top_p=top_p)
# Sample
probs = F.softmax(next_token_logscores, dim=-1)
next_token = torch.multinomial(probs, num_samples=1).squeeze(1)
else:
# Greedy decoding
probs = F.softmax(scores, dim=-1)
next_token = torch.argmax(scores, dim=-1)
# replace next token with input suffix if needed
if input_suffix_ids is not None:
for b_idx, suffix_t in enumerate(input_suffix_timesteps):
if suffix_t is not None:
if suffix_t < len(input_suffix_ids):
next_token[b_idx] = input_suffix_ids[suffix_t]
input_suffix_timesteps[b_idx] += 1
else:
next_token[b_idx] = pad_token_id
next_token_probs = torch.gather(probs, dim=1, index=next_token.unsqueeze(-1)).squeeze(-1)
# update generations and finished sentences
if eos_token_id is not None:
# pad finished sentences if eos_token_id exist
tokens_to_add = next_token * unfinished_sents + (pad_token_id) * (1 - unfinished_sents)
else:
tokens_to_add = next_token
scores_to_add = next_token_probs * unfinished_sents + (-1) * (1 - unfinished_sents)
# add token and increase length by one
input_ids = torch.cat([input_ids, tokens_to_add.unsqueeze(-1)], dim=-1)
decoded_probs = torch.cat([decoded_probs, scores_to_add.unsqueeze(-1)], dim=-1)
cur_len = cur_len + 1
# update cur_num_events
cur_num_events = cur_num_events + (torch.any(tokens_to_add.unsqueeze(-1) == tokens_to_add.new_tensor(event_token_ids), dim=-1)).int()
cur_num_args = cur_num_args + (tokens_to_add == arg_token_id).int()
# update input suffix time steps
if input_suffix_ids is not None:
for b_idx, n in enumerate(cur_num_events.detach().cpu().numpy()):
if n-1 == input_suffix_start_event_idx and input_suffix_timesteps[b_idx] is None:
assert input_suffix_ids[0] in event_token_ids
assert tokens_to_add[b_idx] in event_token_ids, repr(tokens_to_add[b_idx])+'\n'+repr(event_token_ids) + '\n' + repr(torch.sum(torch.any(tokens_to_add.unsqueeze(-1) == tokens_to_add.new_tensor(event_token_ids), dim=-1), dim=-1)) + '\n' + repr(tokens_to_add)
input_suffix_timesteps[b_idx] = 1
if eos_token_id is not None:
eos_in_sents = tokens_to_add == eos_token_id
# if sentence is unfinished and the token to add is eos, sent_lengths is filled with current length
is_sents_unfinished_and_token_to_add_is_eos = unfinished_sents.mul(eos_in_sents.long()).bool()
sent_lengths.masked_fill_(is_sents_unfinished_and_token_to_add_is_eos, cur_len)
# unfinished_sents is set to zero if eos in sentence
unfinished_sents.mul_((~eos_in_sents).long())
# stop when there is a </s> in each sentence, or if we exceed the maximul length
if unfinished_sents.max() == 0:
break
# extend attention_mask for new generated input if only decoder
if self.config.is_encoder_decoder is False:
attention_mask = torch.cat(
[attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
)
assert torch.all(torch.logical_and(min_num_events <= cur_num_events, cur_num_events <= max_num_events))
# compute sequence scores
best_scores = []
for hypo_idx, hypo in enumerate(input_ids):
scored_toks = [v.item() for v in decoded_probs[hypo_idx, :] if v != -1]
scored_toks = [v+1e-9 if v == 0 else v for v in scored_toks]
# compute avg of log probs
score = numpy.sum(numpy.log(scored_toks)) / len(scored_toks)
best_scores.append(score)
return input_ids
def _generate_beam_search(
self,
input_ids,
cur_len,
cur_num_events,
cur_num_args,
max_length,
min_length,
do_sample,
early_stopping,
temperature,
top_k,
top_p,
repetition_penalty,
no_repeat_ngram_size,
bad_words_ids,
bad_verbs_ids,
ban_bad_verbs_event_idxs,
pad_token_id,
eos_token_id,
batch_size,
num_return_sequences,
length_penalty,
num_beams,
vocab_size,
encoder_outputs,
attention_mask,
use_cache,
max_num_events,
min_num_events,
event_token_ids,
arg_token_id,
input_suffix_ids,
input_suffix_start_event_idx,
model_specific_kwargs,
):
""" Generate sequences for each example with beam search.
"""
# generated hypotheses
generated_hyps = [
BeamHypotheses(num_beams, max_length, length_penalty, early_stopping=early_stopping)
for _ in range(batch_size)
]
# scores for each sentence in the beam
beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device)
# for greedy decoding it is made sure that only tokens of the first beam are considered to avoid sampling the exact same tokens three times
if do_sample is False:
beam_scores[:, 1:] = -1e9
beam_scores = beam_scores.view(-1) # shape (batch_size * num_beams,)
# the indices to the input_suffix_ids
if input_suffix_ids is not None:
input_suffix_timesteps = [None] * (batch_size * num_beams)
else:
input_suffix_timesteps = None
# cache compute states
past = (encoder_outputs, None) if encoder_outputs is not None else None
# done sentences
done = [False for _ in range(batch_size)]
while cur_len < max_length:
if bad_verbs_ids is not None and ban_bad_verbs_event_idxs is not None:
batch_bad_verbs_ids = [bad_verbs_ids if n-1 in ban_bad_verbs_event_idxs else None for n in cur_num_events.detach().cpu().numpy()] # n event tokens means now in the span of the (n-1)th events
if all(e is None for e in batch_bad_verbs_ids):
batch_bad_verbs_ids = None
else:
batch_bad_verbs_ids = None
model_inputs = self.prepare_inputs_for_generation(
input_ids, past=past, attention_mask=attention_mask, use_cache=use_cache, **model_specific_kwargs
)
outputs = self(**model_inputs) # (batch_size * num_beams, cur_len, vocab_size)
next_token_logits = outputs[0][:, -1, :] # (batch_size * num_beams, vocab_size)
# if model has past, then set the past variable to speed up decoding
if self._use_cache(outputs, use_cache):
past = outputs[1]
if self.config.is_encoder_decoder and do_sample is False:
# TODO (PVP) still a bit hacky here - there might be a better solution
next_token_logits = self.adjust_logits_during_generation(
next_token_logits, cur_len=cur_len, max_length=max_length
)
scores = F.log_softmax(next_token_logits, dim=-1) # (batch_size * num_beams, vocab_size)
scores = self.postprocess_next_token_scores(
scores=scores,
input_ids=input_ids,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
batch_bad_verbs_ids=batch_bad_verbs_ids,
cur_len=cur_len,
min_length=min_length,
max_length=max_length,
eos_token_id=eos_token_id,
repetition_penalty=repetition_penalty,
batch_size=batch_size,
num_beams=num_beams,
cur_num_events=cur_num_events,
cur_num_args=cur_num_args,
max_num_events=max_num_events,
min_num_events=min_num_events,
event_token_ids=event_token_ids,
arg_token_id=arg_token_id,
)
# replace next token with input suffix if needed
if input_suffix_ids is not None:
for b_idx, suffix_t in enumerate(input_suffix_timesteps):
if suffix_t is not None:
if suffix_t < len(input_suffix_ids):
scores[b_idx, [not v == input_suffix_ids[suffix_t] for v in range(vocab_size)]] = -float("inf")
input_suffix_timesteps[b_idx] += 1
else:
scores[b_idx, [not v == pad_token_id for v in range(vocab_size)]] = -float("inf")
assert scores.shape == (batch_size * num_beams, vocab_size), "Shapes of scores: {} != {}".format(
scores.shape, (batch_size * num_beams, vocab_size)
)
if do_sample:
_scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)
# Temperature
if temperature != 1.0:
_scores = _scores / temperature
# Top-p/top-k filtering
_scores = top_k_top_p_filtering(
_scores, top_k=top_k, top_p=top_p, min_tokens_to_keep=2
) # (batch_size * num_beams, vocab_size)
# re-organize to group the beam together to sample from all beam_idxs
_scores = _scores.contiguous().view(
batch_size, num_beams * vocab_size
) # (batch_size, num_beams * vocab_size)
# Sample 2 next tokens for each beam (so we have some spare tokens and match output of greedy beam search)
probs = F.softmax(_scores, dim=-1)
next_tokens = torch.multinomial(probs, num_samples=2 * num_beams) # (batch_size, num_beams * 2)
# Compute next scores
next_scores = torch.gather(_scores, -1, next_tokens) # (batch_size, num_beams * 2)
# sort the sampled vector to make sure that the first num_beams samples are the best
next_scores, next_scores_indices = torch.sort(next_scores, descending=True, dim=1)
next_tokens = torch.gather(next_tokens, -1, next_scores_indices) # (batch_size, num_beams * 2)
else:
next_scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)
# re-organize to group the beam together (we are keeping top hypothesis accross beams)
next_scores = next_scores.view(
batch_size, num_beams * vocab_size
) # (batch_size, num_beams * vocab_size)
next_scores, next_tokens = torch.topk(next_scores, 2 * num_beams, dim=1, largest=True, sorted=True)
assert next_scores.size() == next_tokens.size() == (batch_size, 2 * num_beams)
# next batch beam content
next_batch_beam = []
# for each sentence
for batch_idx in range(batch_size):
# if we are done with this sentence, add a pad token
if done[batch_idx]:
assert (
len(generated_hyps[batch_idx]) >= num_beams
), "Batch can only be done if at least {} beams have been generated".format(num_beams)
assert (
eos_token_id is not None and pad_token_id is not None
), "generated beams >= num_beams -> eos_token_id and pad_token have to be defined"
next_batch_beam.extend([(0, pad_token_id, 0)] * num_beams) # pad the batch
continue
# next sentence beam content, this will get added to next_batch_beam
next_sent_beam = []
# next tokens for this sentence
for beam_token_rank, (beam_token_id, beam_token_score) in enumerate(
zip(next_tokens[batch_idx], next_scores[batch_idx])
):
# get beam and token IDs
beam_id = beam_token_id // vocab_size
token_id = beam_token_id % vocab_size
effective_beam_id = batch_idx * num_beams + beam_id
# add to generated hypotheses if end of sentence
if (eos_token_id is not None) and (token_id.item() == eos_token_id):
# if beam_token does not belong to top num_beams tokens, it should not be added
is_beam_token_worse_than_top_num_beams = beam_token_rank >= num_beams
if is_beam_token_worse_than_top_num_beams:
continue
generated_hyps[batch_idx].add(
input_ids[effective_beam_id].clone(), beam_token_score.item(),
)
else:
# add next predicted token since it is not eos_token
next_sent_beam.append((beam_token_score, token_id, effective_beam_id))
# once the beam for next step is full, don't add more tokens to it.
if len(next_sent_beam) == num_beams:
break
# Check if we are done so that we can save a pad step if all(done)
done[batch_idx] = done[batch_idx] or generated_hyps[batch_idx].is_done(
next_scores[batch_idx].max().item(), cur_len
)
# update next beam content
assert len(next_sent_beam) == num_beams, "Beam should always be full"
next_batch_beam.extend(next_sent_beam)
assert len(next_batch_beam) == num_beams * (batch_idx + 1), "We should have added num_beams each step"
# stop when we are done with each sentence
if all(done):
break
# sanity check / prepare next batch
assert len(next_batch_beam) == batch_size * num_beams
beam_scores = beam_scores.new([x[0] for x in next_batch_beam])
beam_tokens = input_ids.new([x[1] for x in next_batch_beam])
beam_idx = input_ids.new([x[2] for x in next_batch_beam])
# re-order batch and update current length
input_ids = input_ids[beam_idx, :]
input_ids = torch.cat([input_ids, beam_tokens.unsqueeze(1)], dim=-1)
cur_len = cur_len + 1
# re-order cur_num_events and update cur_num_events
cur_num_events = cur_num_events[beam_idx]
cur_num_args = cur_num_args[beam_idx]
cur_num_events = cur_num_events + (torch.any(beam_tokens.unsqueeze(-1) == beam_tokens.new_tensor(event_token_ids), dim=-1)).int()
cur_num_args = cur_num_args + (beam_tokens == arg_token_id).int()
# update input suffix time steps
if input_suffix_ids is not None:
input_suffix_timesteps = [input_suffix_timesteps[b_idx] for b_idx in beam_idx]
for b_idx, n in enumerate(cur_num_events.detach().cpu().numpy()):
if n-1 == input_suffix_start_event_idx and input_suffix_timesteps[b_idx] is None:
assert input_suffix_ids[0] in event_token_ids
assert beam_tokens[b_idx] in event_token_ids
input_suffix_timesteps[b_idx] = 1
# re-order internal states
if past is not None and use_cache:
past = self._reorder_cache(past, beam_idx)
else:
# encoder outputs are same with for each beam in a sample
pass
# extend attention_mask for new generated input if only decoder
if self.config.is_encoder_decoder is False:
attention_mask = torch.cat(
[attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
)
# finalize all open beam hypotheses and add to generated hypotheses
for batch_idx in range(batch_size):
if done[batch_idx]:
continue
# test that beam scores match previously calculated scores if not eos and batch_idx not done
if eos_token_id is not None and all(
(token_id % vocab_size).item() != eos_token_id for token_id in next_tokens[batch_idx]
):
assert torch.all(
next_scores[batch_idx, :num_beams] == beam_scores.view(batch_size, num_beams)[batch_idx]
), "If batch_idx is not done, final next scores: {} have to equal to accumulated beam_scores: {}".format(
next_scores[:, :num_beams][batch_idx], beam_scores.view(batch_size, num_beams)[batch_idx],
)
# need to add best num_beams hypotheses to generated hyps
for beam_id in range(num_beams):
effective_beam_id = batch_idx * num_beams + beam_id
final_score = beam_scores[effective_beam_id].item()
final_tokens = input_ids[effective_beam_id]
generated_hyps[batch_idx].add(final_tokens, final_score)
# depending on whether greedy generation is wanted or not define different output_batch_size and output_num_return_sequences_per_batch
output_batch_size = batch_size if do_sample else batch_size * num_return_sequences
output_num_return_sequences_per_batch = 1 if do_sample else num_return_sequences
# select the best hypotheses
sent_lengths = input_ids.new(output_batch_size)
best = []
# retrieve best hypotheses
for i, hypotheses in enumerate(generated_hyps):
sorted_hyps = sorted(hypotheses.beams, key=lambda x: x[0])
for j in range(output_num_return_sequences_per_batch):
effective_batch_idx = output_num_return_sequences_per_batch * i + j
best_hyp = sorted_hyps.pop()[1]
sent_lengths[effective_batch_idx] = len(best_hyp)
best.append(best_hyp)
# shorter batches are padded
if sent_lengths.min().item() != sent_lengths.max().item():
assert pad_token_id is not None, "`Pad_token_id` has to be defined"
sent_max_len = min(sent_lengths.max().item() + 1, max_length)
decoded = input_ids.new(output_batch_size, sent_max_len).fill_(pad_token_id)
# fill with hypothesis and eos_token_id if necessary
for i, hypo in enumerate(best):
decoded[i, : sent_lengths[i]] = hypo
if sent_lengths[i] < max_length:
decoded[i, sent_lengths[i]] = eos_token_id
else:
# none of the hypotheses have an eos_token
assert (len(hypo) == max_length for hypo in best)
decoded = torch.stack(best).type(torch.long).to(next(self.parameters()).device)
return decoded
```
#### File: predictors/data_visualization/visualize_entity_events_graphs.py
```python
import sys
import os
import json
import pickle
import argparse
import glob
import math
import numpy as np
import time
import traceback
from tqdm import tqdm
from collections import defaultdict
from graphviz import Digraph
import bisect
from denoising_event_lm.utils.utils import read_data
def get_all_doc_spans(doc_len, eiid2events, events_edges, unmatchedsrleiids, unmatchedsrl_eiid2events, mentions, tokens):
e_in_graph = set([eiid for eiid in events_edges.keys()]) | set([eiid for ends in events_edges.values() for eiid in ends])
# temporal events
obj_spans = [[e['tok_start'], e['tok_end'], ["in_graph" if eiid in e_in_graph else "not_in_graph", eiid]]
for eiid, e in eiid2events.items()]
# unmatched srl events
obj_spans += [[unmatchedsrl_eiid2events[eiid]['tok_start'], unmatchedsrl_eiid2events[eiid]['tok_end'], ["srl", eiid]] for eiid in unmatchedsrleiids]
# mentions, some mentions may be a predicate so we check here (eg: UDS-T dev #113)
span2idx = {(s, e): i for i, (s, e, tags) in enumerate(obj_spans)}
for m in mentions:
if (m['span'][0], m['span'][1]) in span2idx:
obj_spans[span2idx[(m['span'][0], m['span'][1])]][2][1] = ", entity"
else:
obj_spans.append([m['span'][0], m['span'][1], ["mention", "entity"]])
obj_spans = sorted(obj_spans)
#print(json.dumps(obj_spans, indent=4))
# check non-overlap
i = 0
while i < len(obj_spans)-1:
prev_s, prev_e, prev_tags = obj_spans[i]
s, e, tags = obj_spans[i+1]
if not s > prev_e:
if not (tags[0] == "mention" or prev_tags[0] == "mention"):
if e >= prev_e + 1: # s1 s2 e1 e2 -> (s1 e1)(e1+1, e2)
if i+2 == len(obj_spans) or not [prev_e+1, e] == [obj_spans[i+2][0], obj_spans[i+2][1]]: # prevent [e1+1, e2] already exists
obj_spans[i+1][0] = prev_e + 1
obj_spans = sorted(obj_spans) # when modify i+1, need to re-sort
else:
if tags[0] == "in_graph" or (tags[0] == "not_in_graph" and not obj_spans[i+2][2][0] == 'in_graph') or (tags[0] == "srl" and not obj_spans[i+2][2][0] == 'in_graph'):
obj_spans[i+2][2] = tags
obj_spans = obj_spans[:i+1] + obj_spans[i+2:]
else:
# s1 s2 e2 e1 -> (s1, s2-1)(s2, e2)(e2, e1)
obj_spans[i][1] = s - 1
if s == prev_s:
print(tokens[prev_s:prev_e+1], tokens[s:e+1])
print((prev_s, prev_e), (s, e))
print(prev_tags, tags)
assert not s == prev_s
if prev_e > e+1: # prevent s1 s2 e2==e1
insert_sp = [e+1, prev_e, prev_tags]
insert_pos = bisect.bisect_left([(ele[0], ele[1]) for ele in obj_spans], (e+1, prev_e), lo=i+2) # get insert pos only by (s, e) or the already existed (e2+1, e1) may be at insert_pos-1 instead of insert_pos
if insert_pos == len(obj_spans) or not [e+1, prev_e] == [obj_spans[insert_pos][0], obj_spans[insert_pos][1]]: # prevent [e2+1, e1] already exists
obj_spans = obj_spans[:insert_pos] + [insert_sp] + obj_spans[insert_pos:]
else:
if prev_tags[0] == "mention":
if e >= prev_e + 1: # s1 s2 e1 e2 -> (s1 e1)(e1+1, e2)
if i+2 == len(obj_spans) or not [prev_e+1, e] == [obj_spans[i+2][0], obj_spans[i+2][1]]: # prevent [e1+1, e2] already exists
obj_spans[i+1][0] = prev_e + 1
obj_spans = sorted(obj_spans) # when modify i+1, need to re-sort
else:
if tags[0] == "in_graph" or (tags[0] == "not_in_graph" and not obj_spans[i+2][2][0] == 'in_graph') or (tags[0] == "srl" and not obj_spans[i+2][2][0] == 'in_graph'):
obj_spans[i+2][2] = tags
obj_spans = obj_spans[:i+1] + obj_spans[i+2:]
else:
# s1 s2 e2 e1 -> (s1, s2-1)(s2, e2)(e2, e1)
obj_spans[i][1] = s - 1
if s == prev_s:
print(tokens[prev_s:prev_e+1], tokens[s:e+1])
print((prev_s, prev_e), (s, e))
print(prev_tags, tags)
assert not s == prev_s
if prev_e >= e+1: # prevent s1 s2 e2==e1
insert_sp = [e+1, prev_e, ["mention", "entity"]]
insert_pos = bisect.bisect_left([(ele[0], ele[1]) for ele in obj_spans], (e+1, prev_e), lo=i+2) # get insert pos only by (s, e) or the already existed (e2+1, e1) may be at insert_pos-1 instead of insert_pos
if insert_pos == len(obj_spans) or not [e+1, prev_e] == [obj_spans[insert_pos][0], obj_spans[insert_pos][1]]: # prevent [e2+1, e1] already exists
obj_spans = obj_spans[:insert_pos] + [insert_sp] + obj_spans[insert_pos:]
elif tags[0] == "mention":
if s - 1 >= prev_s: # s1 s2 e1 e2 or s1 s2 e2 e1 -> (s1, s2-1)(s2, e2)
obj_spans[i][1] = s - 1
else:
# s1==s2 e1 e2 -> (s1, e1)(e1+1, e2)
if i+2 == len(obj_spans) or not [prev_e+1, e] == [obj_spans[i+2][0], obj_spans[i+2][1]]: # prevent [e1+1, e2] already exists
obj_spans[i+1][0] = prev_e + 1
obj_spans = sorted(obj_spans) # when modify i+1, need to re-sort
else:
if tags[0] == "in_graph" or (tags[0] == "not_in_graph" and not obj_spans[i+2][2][0] == 'in_graph') or (tags[0] == "srl" and not obj_spans[i+2][2][0] == 'in_graph'):
obj_spans[i+2][2] = tags
obj_spans = obj_spans[:i+1] + obj_spans[i+2:]
if not e >= prev_e + 1:
print(span2idx)
print((prev_s, prev_e), (s, e))
print(prev_tags, tags)
exit()
i += 1
# check results
assert all(obj_spans[i][0] > obj_spans[i-1][1] for i in range(1, len(obj_spans)))
assert all(e >= s for s, e, tags in obj_spans)
all_spans = []
sp2tags = []
last_end = -1
for s, e, tags in obj_spans:
if s > last_end+1:
all_spans.append((last_end+1, s-1))
sp2tags.append(["", ""])
all_spans.append((s, e))
sp2tags.append(tags)
last_end = e
if doc_len > last_end+1:
all_spans.append((last_end+1, doc_len-1))
sp2tags.append(["", ""])
return all_spans, sp2tags
def get_digraph_template(eiid2events, events_edges):
g = Digraph()
for start, ends in events_edges.items():
for end in ends:
g.edge(("[%s]\n" % start)+eiid2events[start]['event'],
("[%s]\n" % end)+eiid2events[end]['event'])
return g.source
def get_instance_for_render(d_nlp, d_graphs):
assert d_nlp['doc_id'] == d_graphs['doc_id']
doc_text = d_nlp['text']
doc_toks = d_nlp['tokens']
sents_tok_offset = d_nlp['sents_tok_offset'] + [len(doc_toks)]
eiid2srlvid = d_graphs['eiid2srlvid']
unmatchedsrl_eiid2events = d_graphs['unmatchedsrl_eiid2events']
clusterid2graph = d_graphs['clusterid2graph']
clusterid2unmatchedsrleiids = d_graphs['clusterid2unmatchedsrleiids']
# get coref
coref_clusters = d_nlp['pred_coref']
clusterid2mentions = defaultdict(list)
for cluster in coref_clusters:
for m in cluster:
offset = sents_tok_offset[m['sent_id']]
start, end = m['span']
m['span'] = [start+offset, end+offset]
clusterid2mentions[m['cluster_id']].append(m)
# get render instance for each entity
entity_objs = []
for c_id in clusterid2graph:
eiid2events = clusterid2graph[c_id]['eiid2events']
events_edges = clusterid2graph[c_id]['events_edges']
unmatchedsrleiids = clusterid2unmatchedsrleiids.get(c_id, [])
mentions = clusterid2mentions[int(c_id)]
all_doc_spans, doc_sp2tags = get_all_doc_spans(len(doc_toks), eiid2events, events_edges, unmatchedsrleiids, unmatchedsrl_eiid2events, mentions, doc_toks)
graph_template = get_digraph_template(eiid2events, events_edges)
obj = {"doc_tokens": doc_toks,
"all_doc_spans": all_doc_spans,
"doc_sp2tags": doc_sp2tags,
"graph_template": graph_template,
"doc_id": d_nlp['doc_id'],
}
entity_objs.append(obj)
return entity_objs
def render_token(tok, tags):
style = ""
if tags[0] == "in_graph":
style = "background-color: rgba(0, 0, 255, 0.5); border-radius: 7px; padding-left: 3px; padding-right: 3px; border-style: solid; border-color: rgba(0, 0, 255, 0.6); border-width: 1.5px"
elif tags[0] == "not_in_graph":
style = "background-color: rgba(0, 0, 255, 0.2); border-radius: 7px; padding-left: 3px; padding-right: 3px; border-style: dashed; border-color: rgba(0, 0, 255, 0.3); border-width: 1.5px"
elif tags[0] == "srl":
style = "background-color: rgba(0, 0, 255, 0.2); border-radius: 7px; padding-left: 3px; padding-right: 3px; border-style: dashed; border-color: rgba(0, 0, 255, 0.3); border-width: 1.5px"
elif tags[0] == "mention":
style = "background-color: rgba(0, 179, 179, 0.4); border-radius: 7px; padding-left: 3px; padding-right: 3px;"
style = repr(style)
tip = repr(tags[1])
br_splits = tok.split('<br />\n')
block = "".join("<span>{:s}</span>".format(br_split if i == len(br_splits)-1 else f"<span>{br_split}</span><br/><br/>")
for i, br_split in enumerate(br_splits))
return \
f"""<span><span data-toggle="tooltip" data-placement="auto top" title={tip} style={style}>{block}</span><span> </span></span>"""
def render_doc(entity_obj, c_id, last=False):
"""render documents with each special spans being highlighted, also add divs for graphviz rendering"""
doc_id = entity_obj['doc_id'].replace('.', '_')
tokens = entity_obj['doc_tokens']
spans = entity_obj['all_doc_spans']
sp2tags = entity_obj['doc_sp2tags']
doc_block = "".join(render_token(" ".join(tokens[s:e+1]), sp2tags[i]) for i, (s, e) in enumerate(spans))
hr = """<hr style="height: 1px" />""" if not last else ""
return f"""
<div class="form__field">
<div class="doc">
<h4>Doc #{doc_id} - Entity #{c_id}</h4>
{doc_block}
</div>
<div id="graph_{doc_id}_{c_id}" style="text-align: center;" class="doc">
</div>
{hr}
</div>
"""
def render_entity_events_graphs(ins):
"""render documents with each special spans being highlighted, also add divs for graphviz rendering"""
block = "".join(render_doc(entity_obj, c_id, c_id == len(ins)-1) for c_id, entity_obj in enumerate(ins))
return f"""
<div>
{block}
<br/>
<br/>
<br/>
<hr style="height: 2px; border: none; background-color: #b3b3b3;" />
</div>
"""
def render_graphviz_objects(ins):
"""render graphviz object for each instance, put into the script part"""
block = "\n".join('d3.select("#graph_{:s}_{:s}").graphviz().zoom(false).renderDot({:s});'.format(obj['doc_id'].replace('.', '_'), str(c_id), repr(obj['graph_template'])) for c_id, obj in enumerate(ins))
return block
def render_index_html(html_body, script_body):
"""get index.html"""
return f"""
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<link href="https://fonts.googleapis.com/css?family=Roboto+Mono&display=swap" rel="stylesheet">
<link href='https://fonts.googleapis.com/css?family=Source+Sans+Pro' rel='stylesheet' type='text/css'>
<script src="https://d3js.org/d3.v5.min.js"></script>
<script src="https://unpkg.com/@hpcc-js/[email protected]/dist/index.min.js"></script>
<script src="https://unpkg.com/[email protected]/build/d3-graphviz.js"></script>
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.4.1/css/bootstrap.min.css">
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.4.1/jquery.min.js"></script>
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.4.1/js/bootstrap.min.js"></script>
<style>
body,
html {{
min-width: 48em;
font-size: 16px;
width: 100%;
height: 100%;
margin: 0;
padding: 0;
}}
* {{
font-family: 'Source Sans Pro', sans-serif;
color: #232323;
}}
.model__content {{
padding: 0.6em 2em 0.875em 2em;
margin: auto;
-webkit-transition: padding .2s ease, margin .2s ease;
transition: padding .2s ease, margin .2s ease;
}}
.form__field {{
-webkit-transition: margin .2s ease;
transition: margin .2s ease;
}}
div.doc {{
color:black;
font-size: 16px;
padding-left: 5px;
padding-top: 5px;
padding-bottom: 5px;
padding-right: 5px;
margin-bottom: 10px;
line-height: 40px;
}}
</style>
</head>
<body>
<div class="model__content">
{html_body}
</div>
<script>
{script_body}
$(document).ready(function(){{
$('[data-toggle="tooltip"]').tooltip();
}});
</script>
</body>
</html>
"""
def main(args):
graphs_data, _ = read_data(args.graphs_input, args)
nlp_data, _ = read_data(args.nlp_input, args)
if args.num_splits is None or args.num_splits <= 1:
all_instances = []
num_graphs = 0
num_graphs_with_distractor = 0
num_distractors = 0
for d_nlp, d_graphs in zip(tqdm(nlp_data), graphs_data):
instance = get_instance_for_render(d_nlp, d_graphs)
for obj in instance:
num_graphs += 1
num_graphs_with_distractor += int(any(tag[0] in ['not_in_graph', 'srl'] for tag in obj['doc_sp2tags']))
num_distractors += sum(tag[0] in ['not_in_graph', 'srl'] for tag in obj['doc_sp2tags'])
all_instances.append(instance)
html_body = "".join(render_entity_events_graphs(ins) for ins in all_instances)
script_body = "\n".join(render_graphviz_objects(ins) for ins in all_instances)
index_html_string = render_index_html(html_body, script_body)
with open(args.output, 'w') as f:
f.write(index_html_string)
print(num_graphs_with_distractor / num_graphs, num_graphs_with_distractor, num_graphs)
print(num_distractors / num_graphs, num_distractors, num_graphs)
else:
batch = len(nlp_data) // args.num_splits
for start in range(0, len(nlp_data), batch):
end = start + batch
all_instances = []
for d_nlp, d_graphs in zip(tqdm(nlp_data[start:end]), graphs_data[start:end]):
instance = get_instance_for_render(d_nlp, d_graphs)
all_instances.append(instance)
html_body = "".join(render_entity_events_graphs(ins) for ins in all_instances)
script_body = "\n".join(render_graphviz_objects(ins) for ins in all_instances)
index_html_string = render_index_html(html_body, script_body)
with open(args.output+'_{:d}-{:d}_index.html'.format(start, end), 'w') as f:
f.write(index_html_string)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="create index html for rendering entity event graphs")
parser.add_argument("--graphs_input", help="input path to load entity event graphs")
parser.add_argument("--nlp_input", help="input path to nlp annotated data")
parser.add_argument("--output", default="./index.html", help="output path for index.html")
parser.add_argument('--start', type=int, help='start idx of data to be processed', default=-1)
parser.add_argument('--end', type=int, help='end idx of data to be processed', default=-1)
parser.add_argument('--num_splits', type=int, help='split outputs to different files', default=None)
args = parser.parse_args()
main(args)
```
#### File: predictors/event_lm/test_demo_event_lm_mctaco_before_after.py
```python
import json
import pickle
import sys
import os
import glob
import pandas as pd
import numpy as np
from tqdm import tqdm
from allennlp.predictors.predictor import Predictor
from copy import deepcopy
import torch
from torch import nn
import heapq
import argparse
import allennlp
from allennlp.common.checks import check_for_gpu
if allennlp.__version__ == '0.8.5':
from allennlp.common.util import import_submodules as import_module_and_submodules
elif allennlp.__version__ == '1.1.0':
from allennlp.common.util import import_module_and_submodules
from allennlp.models.archival import load_archive
def normalize_arg_type(arg_type):
if arg_type[0] in ['R', 'C']:
return arg_type[2:]
else:
return arg_type
def get_flatten_varg_toks(varg):
varg_toks = [varg['V_toks']] + varg['ARGS_toks']
varg_span = [varg['V_span']] + varg['ARGS_span']
varg_type = ['V'] + [normalize_arg_type(arg_type) for arg_type in varg['ARGS_type']]
assert len(varg_toks) == len(varg_span) and len(varg_toks) == len(varg_type)
indices = list(range(len(varg_toks)))
# sort pred/args by their textual order
indices = sorted(indices, key=lambda x: varg_span[x])
varg_toks = [varg_toks[i] for i in indices]
varg_type = [varg_type[i] for i in indices]
flatten_toks = []
for i, toks in enumerate(varg_toks):
flatten_toks.extend(toks)
return flatten_toks
def chain_str(chain):
texts = []
for varg in chain:
if not 'Description' in varg:
varg['Description'] = " ".join(get_flatten_varg_toks(varg))
texts.append("<EVENT> " + " ".join(varg['V_toks']) + " <ARGS> " + varg['Description'])
return texts
def check_chain_fulfill_constraints(events, constraints):
def fulfill_constraint(e1, e2):
for e in events:
if e == e1:
return True
elif e == e2:
return False
return all(fulfill_constraint(e1, e2) for e1, e2 in constraints)
def predict_on_unseen_events(data, predictor, args, file=sys.stdout):
question_event_in_context = data['question_event_in_context']
question_event_in_context_idx = data['question_event_in_context_idx']
assert data['context_events'][question_event_in_context_idx] == question_event_in_context
assert data['temp_rel'] in {'BEFORE', 'AFTER'}
if data['temp_rel'] == 'BEFORE':
constraints = [(data['candidate_event'], question_event_in_context)]
elif data['temp_rel'] == 'AFTER':
constraints = [(question_event_in_context, data['candidate_event'])]
test_json = {
'events': data['context_events'],
'cand_event': data['candidate_event'],
'beams': args.beams,
'feed_unseen': args.feed_unseen
}
output = predictor.predict_json(test_json)
print('---'*3, file=file)
print('##Context##', file=file)
print(data['context'], file=file)
print(file=file)
print('##Question##', file=file)
print(data['question'], file=file)
print(file=file)
print('##Candidate##', file=file)
print(data['candidate'], file=file)
print(file=file)
print("##Relation##", file=file)
print("[Candidate]", data['temp_rel'], "[Question]", file=file)
print(file=file)
print('---'*3, file=file)
print("input_repr:", file=file)
for r in chain_str(output['input_vargs']):
print(r, file=file)
print(file=file)
print('---'*3, file=file)
print("question_repr:", file=file)
for r in chain_str([question_event_in_context]):
print(r, file=file)
print(file=file)
print('---'*3, file=file)
print("cand_repr:", file=file)
for r in chain_str(output['unseen_vargs']):
print(r, file=file)
print(file=file)
print('---'*3, file=file)
print("Max: {:.4f} - Min: {:.4f} - Mean: {:.4f} - Std: {:.4f} - Best POS: {:.4f}".format(np.max(output['all_beam_scores']), np.min(output['all_beam_scores']), np.mean(output['all_beam_scores']), np.std(output['all_beam_scores']), output["best_pos_score"]), file=file)
beam_matches = []
for b_idx, pred in enumerate(output['beam_pred']):
if "EVENT_SEP" in pred['pred_vargs'][0]:
for v in pred['pred_vargs']:
v.pop("EVENT_SEP")
assert question_event_in_context in pred['pred_vargs']
assert data['candidate_event'] in pred['pred_vargs']
match = check_chain_fulfill_constraints(pred['pred_vargs'], constraints)
beam_matches.append(match)
print("Beam {:d} (gold: {} - score: {:.4f})".format(b_idx, match, pred['score']), file=file)
for r in chain_str(pred['pred_vargs']):
print(r, file=file)
print(file=file)
print("\n\n", file=file)
return beam_matches
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='test the predictor above')
parser.add_argument('--archive-path', type=str, required=True, help='path to trained archive file')
parser.add_argument('--predictor', type=str, required=True, help='name of predictor')
parser.add_argument('--weights-file', type=str,
help='a path that overrides which weights file to use')
parser.add_argument('--cuda-device', type=int, default=-1, help='id of GPU to use (if any)')
parser.add_argument('-o', '--overrides', type=str, default="",
help='a JSON structure used to override the experiment configuration')
parser.add_argument('--include-package',
type=str,
action='append',
default=[],
help='additional packages to include')
parser.add_argument('--input-path', type=str, nargs='+', help='input data')
parser.add_argument('--beams', type=int, help='beam size', default=1)
parser.add_argument('--num_instances', type=int, default=-1,
help='number of instances to process')
parser.add_argument('--feed-unseen', action='store_true', help='whether to feed unseen events as inputs', default=False)
args = parser.parse_args()
# Load modules
for package_name in args.include_package:
import_module_and_submodules(package_name)
check_for_gpu(args.cuda_device)
archive = load_archive(args.archive_path,
weights_file=args.weights_file,
cuda_device=args.cuda_device,
overrides=args.overrides)
predictor = Predictor.from_archive(archive, args.predictor)
data = []
for path_regex in args.input_path:
for path in sorted(glob.glob(path_regex)):
with open(path, 'r') as f:
data += json.load(f)
if args.num_instances > 0:
data = data[:args.num_instances]
print("Num Instances:", len(data))
total_confusion = {
"gold BEFORE": {
"pred BEFORE": 0.,
"pred AFTER": 0.
},
"gold AFTER": {
"pred BEFORE": 0.,
"pred AFTER": 0.
}
}
total_correct = 0.
total_examples = 0
for d_idx, d in enumerate(tqdm(data)):
beam_matches = predict_on_unseen_events(d, predictor, args)
if beam_matches[0]:
pred_temp_rel = d['temp_rel']
else:
if d['temp_rel'] == 'BEFORE':
pred_temp_rel = 'AFTER'
else:
pred_temp_rel = 'BEFORE'
total_confusion['gold '+d['temp_rel']]['pred '+pred_temp_rel] += 1
total_correct += int(beam_matches[0])
total_examples += 1
assert sum(pv for gk, gv in total_confusion.items() for pk, pv in gv.items()) == total_examples
assert sum(pv for gk, gv in total_confusion.items() for pk, pv in gv.items() if gk[5:] == pk[5:]) == total_correct
print("Acc: {:.4f} ({:.4f} / {:d})".format(total_correct / total_examples, total_correct, total_examples))
# BEFORE f1
if sum(pv for pk, pv in total_confusion['gold BEFORE'].items()) > 0:
recl = total_confusion['gold BEFORE']['pred BEFORE'] / sum(pv for pk, pv in total_confusion['gold BEFORE'].items())
else:
recl = 0.
if sum(gv['pred BEFORE'] for gk, gv in total_confusion.items()) > 0:
prec = total_confusion['gold BEFORE']['pred BEFORE'] / sum(gv['pred BEFORE'] for gk, gv in total_confusion.items())
else:
prec = 0.
if prec + recl > 0:
before_f1 = (2 * prec * recl) / (prec + recl)
else:
before_f1 = 0.
print("BEFORE P: {:.4f} - R: {:.4f} - F1: {:.4f}".format(prec, recl, before_f1))
# AFTER f1
if sum(pv for pk, pv in total_confusion['gold AFTER'].items()) > 0:
recl = total_confusion['gold AFTER']['pred AFTER'] / sum(pv for pk, pv in total_confusion['gold AFTER'].items())
else:
recl = 0.
if sum(gv['pred AFTER'] for gk, gv in total_confusion.items()) > 0:
prec = total_confusion['gold AFTER']['pred AFTER'] / sum(gv['pred AFTER'] for gk, gv in total_confusion.items())
else:
prec = 0.
if prec + recl > 0:
after_f1 = (2 * prec * recl) / (prec + recl)
else:
after_f1 = 0.
print("AFTER P: {:.4f} - R: {:.4f} - F1: {:.4f}".format(prec, recl, after_f1))
macro_f1 = (before_f1 + after_f1) / 2.
print("Macro F1: {:.4f})".format(macro_f1))
``` |
{
"source": "jjason/RayTracerChallenge",
"score": 3
} |
#### File: RayTracerChallenge/ray_tracer/lights.py
```python
from color import Color
from point import Point
class PointLight:
def __init__(self,
position=None,
intensity=None):
self.position = position
self.intensity = intensity
@property
def position(self):
return self._position
@position.setter
def position(self, value):
self._position = Point(x=value.x, y=value.y, z=value.z) \
if value else Point(x=0, y=0, z=0)
@property
def intensity(self):
return self._intensity
@intensity.setter
def intensity(self, value):
self._intensity = Color(red=value.red,
green=value.green,
blue=value.blue) \
if value else Color(red=1, green=1, blue=1)
def __eq__(self, other):
return self.position == other.position and \
self.intensity == other.intensity
def __ne__(self, other):
return not self == other
```
#### File: ray_tracer/patterns/checker_board.py
```python
import math
from patterns.pattern import Pattern
class CheckerBoard(Pattern):
"""
The checker board pattern. A checker board pattern has two colors and
changes in all three dimensions such that no two adjacent cubes are the
same color. The color is determined by:
+- color_a if (|px| + |py| + |pz|) mod 2 == 0
color @ point => |
+- color_b otherwise.
"""
def __init__(self,
color_a=None,
color_b=None,
transform=None):
"""
Initialize the stripe pattern object.
:param color_a: The color for the cubes with anchor point having zero
or two coordinates with odd values - (0, 0, 0), (1, 1, 0), etc.
If not provided, default is white.
:param color_b: The color for the cubes with anchor point having zero
or two coordinates with even values - (1, 1, 1), (1, 0, 0), etc.
If not provided, default is black
:param transform: The transform to be applied to the pattern. If None,
then the identity transform is used.
"""
super().__init__(color_a=color_a, color_b=color_b, transform=transform)
def color_at(self, position):
"""
Return the color (a or b) for the position provided. The color is
determined as described above.
:param position: The point for which color is to be determined.
:return: Color, the color for the point.
"""
return self.color_a if (math.floor(position.x) +
math.floor(position.y) +
math.floor(position.z)) % 2 == 0 else \
self.color_b
```
#### File: ray_tracer/patterns/gradient.py
```python
import math
from patterns.pattern import Pattern
class Gradient(Pattern):
"""
The gradient pattern. A gradient pattern has two colors and blends between
them as the point moves in the x direction between coordinates. For a point
with an x coordinate px, floor(px) is color_a and floor(px) + 1 is color_b.
To determine the color of px, do a linear interpolation between color_a and
color_b between floor(px) and px. i.e.,
color @ px = color_a + (color_b - color_a) * (px - floor(px))
The color is independent of y and z coordinates.
"""
def __init__(self,
color_a=None,
color_b=None,
transform=None):
"""
Initialize the gradient pattern object.
:param color_a: The color at floor(x coordinate). If not provided,
default is white.
:param color_b: The color at floor(x coordinate) + 1. If not provided,
default is black
:param transform: The transform to be applied to the pattern. If None,
then the identity transform is used.
"""
super().__init__(color_a=color_a, color_b=color_b, transform=transform)
def color_at(self, position):
"""
Return the interpolated color for the position provided. The color is
determined as described above.
:param position: The point for which color is to be determined.
:return: Color, the color for the point.
"""
distance = self.color_b - self.color_a
fraction = position.x - math.floor(position.x)
return self.color_a + (distance * fraction)
```
#### File: RayTracerChallenge/ray_tracer/sphere.py
```python
import math
from intersections import Intersection, Intersections
from point import Point
from shape import Shape
class Sphere(Shape):
def __init__(self,
center=Point(),
radius=1.0,
transform=None,
material=None):
super().__init__(transform=transform, material=material)
self.center = center
self.radius = radius
@property
def center(self):
return self._center
@center.setter
def center(self, value):
self._center = Point(x=value.x, y=value.y, z=value.z) \
if value else Point(x=0, y=0, z=0)
@property
def radius(self):
return self._radius
@radius.setter
def radius(self, value):
self._radius = value
def _intersect(self, ray):
"""
Override base class method to provide sphere-specific method for
computing intersection with ray and sphere.
This method should not be called directly. Instead, call the public
intersect method defined in the Shape class.
:param ray: The ray we use to compute intersections.
:return: Intersections, the set of Intersection objects representing
the intersections between the provided ray and this sphere.
"""
# To understand how to compute the intersection of a ray with this
# sphere, it might be helpful to read this lesson,
# https://tinyurl.com/tuhcnx7, on the Analytical
# Solution for Ray-Sphere Intersection. This Wikipedia page is also
# helpful for understanding solving the problem for a sphere that is
# not at the center: https://tinyurl.com/u57fouv
#
# TL;DR
#
# By taking the equation of a sphere with radius, r, and center, c, any
# point, p, on the sphere satisfies the equation:
#
# 2 2
# (p - c) = r
#
# The equation of any point on the ray with origin O and direction D:
#
# p = O + tD
#
# you can substitute the equation for a point on the ray into the
# equation for the sphere to get:
#
# 2 2
# |O + Dt - c| = r
#
# 2 2
# |O + Dt - c| - r = 0
#
# 2 2
# |(O - c) + Dt| - r = 0
#
# 2 2 2 2
# (O - c) + 2D(O - c)t + D t - r = 0
#
# and finally get to a quadratic in the form f(x) = ax^2 + bx + c
#
# 2 2 2 2
# D t + 2D(O - c)t + (O - c) - r = 0
#
# 2
# a = D
#
# b = 2 * D * (O - c)
#
# 2 2 2
# c = (O - c) - r = (O - c) * (O - c) - r
#
# Note: (O - c) is the ray from the center of the sphere to the origin
# of the ray. When the sphere is centered on the origin, the c
# component disappears.
#
# From there we can use our favorite, the quadratic formula,
#
# 2
# -b +/- sqrt(b - 4ac)
# --------------------
# 2a
#
# and more specifically the discriminant d = (b - 4ac) to determine how
# many solutions there are:
#
# d < 0 - no solutions
# d = 0 - 1 solution
# d > 0 - 2 solutions
#
# and then the solution(s) if any exist.
#
# Phew. That is a lot of explaining, but I wanted to make sure that I
# understood what the code in the book was doing.
# Create a vector from the center of this sphere to the origin of the
# ray
sphere_to_ray = ray.origin - self._center
# Compute the a, b, and c coefficients for the quadratic equation
# described above:
a = ray.direction.dot_product(ray.direction)
b = 2 * ray.direction.dot_product(sphere_to_ray)
c = sphere_to_ray.dot_product(sphere_to_ray) - self._radius**2
# Now compute the discriminant to determine if we even have solutions.
discriminant = b**2 - 4 * a * c
if discriminant < 0:
return Intersections()
i1 = Intersection(time=(-b - math.sqrt(discriminant)) / (2 * a),
shape=self)
i2 = Intersection(time=(-b + math.sqrt(discriminant)) / (2 * a),
shape=self)
return Intersections(i1, i2)
def _normal_at(self, position):
"""
Override base class method to provide sphere-specific method for
computing normal at the point specified.
This method should not be called directly. Instead, call the public
normal_at method defined in the Shape class.
:param position: The position on the sphere for which the normal is
to be computed.
:return: Vector, the normal at the point provided.
"""
return position - Point(x=0, y=0, z=0)
```
#### File: ray_tracer/tests/test_checker_board.py
```python
import unittest
from color import Color
from point import Point
from patterns.checker_board import CheckerBoard
class TestCheckerBoard(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.white = Color(red=1, green=1, blue=1)
cls.black = Color(red=0, green=0, blue=0)
def test_color_should_repeat_in_x(self):
c = CheckerBoard(color_a=self.__class__.white,
color_b=self.__class__.black)
self.assertEqual(c.color_at(position=Point(x=0, y=0, z=0)),
self.__class__.white)
self.assertEqual(c.color_at(position=Point(x=0.99, y=0, z=0)),
self.__class__.white)
self.assertEqual(c.color_at(position=Point(x=1.01, y=0, z=0)),
self.__class__.black)
def test_color_should_repeat_in_y(self):
c = CheckerBoard(color_a=self.__class__.white,
color_b=self.__class__.black)
self.assertEqual(c.color_at(position=Point(x=0, y=0, z=0)),
self.__class__.white)
self.assertEqual(c.color_at(position=Point(x=0, y=0.99, z=0)),
self.__class__.white)
self.assertEqual(c.color_at(position=Point(x=0, y=1.01, z=0)),
self.__class__.black)
def test_color_should_repeat_in_z(self):
c = CheckerBoard(color_a=self.__class__.white,
color_b=self.__class__.black)
self.assertEqual(c.color_at(position=Point(x=0, y=0, z=0)),
self.__class__.white)
self.assertEqual(c.color_at(position=Point(x=0, y=0, z=0.99)),
self.__class__.white)
self.assertEqual(c.color_at(position=Point(x=0, y=0, z=1.01)),
self.__class__.black)
if __name__ == '__main__':
unittest.main()
```
#### File: ray_tracer/tests/test_lights.py
```python
import unittest
from color import Color
from lights import PointLight
from point import Point
class TestPointLight(unittest.TestCase):
def test_create(self):
p = Point(x=0, y=0, z=0)
i = Color(red=1, green=1, blue=1)
l = PointLight(position=p, intensity=i)
self.assertEqual(p, l.position)
self.assertEqual(i, l.intensity)
if __name__ == '__main__':
unittest.main()
```
#### File: ray_tracer/tests/test_ray.py
```python
import unittest
from matrix import Matrix
from point import Point
from vector import Vector
from ray import Ray
class TestRay(unittest.TestCase):
def test_create(self):
o = Point(x=1, y=2, z=3)
d = Vector(x=4, y=5, z=6)
r = Ray(origin=o, direction=d)
self.assertEqual(r.origin, o)
self.assertEqual(r.direction, d)
def test_position(self):
r = Ray(origin=Point(x=2, y=3, z=4), direction=Vector(x=1, y=0, z=0))
self.assertEqual(r.position(time=0), Point(x=2, y=3, z=4))
self.assertEqual(r.position(time=1), Point(x=3, y=3, z=4))
self.assertEqual(r.position(time=-1), Point(x=1, y=3, z=4))
self.assertEqual(r.position(time=2.5), Point(x=4.5, y=3, z=4))
def test_transform_by_identity(self):
r1 = Ray(origin=Point(x=1, y=2, z=3), direction=Vector(x=0, y=1, z=0))
m = Matrix.identity()
r2 = r1.transform(transformation=m)
self.assertIsNot(r1, r2)
self.assertEqual(r2.origin, r1.origin)
self.assertEqual(r2.direction, r1.direction)
def test_transform_by_translation(self):
r1 = Ray(origin=Point(x=1, y=2, z=3), direction=Vector(x=0, y=1, z=0))
m = Matrix.translation_transform(x=3, y=4, z=5)
r2 = r1.transform(transformation=m)
self.assertIsNot(r1, r2)
self.assertEqual(r2.origin, Point(x=4, y=6, z=8))
self.assertEqual(r2.direction, Vector(x=0, y=1, z=0))
def test_transform_by_scaling(self):
r1 = Ray(origin=Point(x=1, y=2, z=3), direction=Vector(x=0, y=1, z=0))
m = Matrix.scaling_transform(x=2, y=3, z=4)
r2 = r1.transform(transformation=m)
self.assertIsNot(r1, r2)
self.assertEqual(r2.origin, Point(x=2, y=6, z=12))
self.assertEqual(r2.direction, Vector(x=0, y=3, z=0))
def test_equal(self):
r1 = Ray(origin=Point(x=1, y=2, z=3), direction=Vector(x=0, y=1, z=0))
r2 = Ray(origin=Point(x=1, y=2, z=3), direction=Vector(x=0, y=1, z=0))
self.assertTrue(r1 == r1)
self.assertTrue(r1 == r2)
r3 = Ray(origin=Point(x=0, y=2, z=3), direction=Vector(x=0, y=1, z=0))
self.assertFalse(r1 == r3)
r3 = Ray(origin=Point(x=1, y=3, z=3), direction=Vector(x=0, y=1, z=0))
self.assertFalse(r1 == r3)
r3 = Ray(origin=Point(x=1, y=2, z=4), direction=Vector(x=0, y=1, z=0))
self.assertFalse(r1 == r3)
r3 = Ray(origin=Point(x=1, y=2, z=3), direction=Vector(x=1, y=1, z=0))
self.assertFalse(r1 == r3)
r3 = Ray(origin=Point(x=1, y=2, z=3), direction=Vector(x=0, y=0, z=0))
self.assertFalse(r1 == r3)
r3 = Ray(origin=Point(x=1, y=2, z=3), direction=Vector(x=0, y=1, z=1))
self.assertFalse(r1 == r3)
def test_not_equal(self):
r1 = Ray(origin=Point(x=1, y=2, z=3), direction=Vector(x=0, y=1, z=0))
r2 = Ray(origin=Point(x=1, y=2, z=3), direction=Vector(x=0, y=1, z=0))
self.assertFalse(r1 != r1)
self.assertFalse(r1 != r2)
r3 = Ray(origin=Point(x=0, y=2, z=3), direction=Vector(x=0, y=1, z=0))
self.assertTrue(r1 != r3)
r3 = Ray(origin=Point(x=1, y=3, z=3), direction=Vector(x=0, y=1, z=0))
self.assertTrue(r1 != r3)
r3 = Ray(origin=Point(x=1, y=2, z=4), direction=Vector(x=0, y=1, z=0))
self.assertTrue(r1 != r3)
r3 = Ray(origin=Point(x=1, y=2, z=3), direction=Vector(x=1, y=1, z=0))
self.assertTrue(r1 != r3)
r3 = Ray(origin=Point(x=1, y=2, z=3), direction=Vector(x=0, y=0, z=0))
self.assertTrue(r1 != r3)
r3 = Ray(origin=Point(x=1, y=2, z=3), direction=Vector(x=0, y=1, z=1))
self.assertTrue(r1 != r3)
if __name__ == '__main__':
unittest.main()
```
#### File: ray_tracer/tests/test_ring.py
```python
import unittest
import math
from color import Color
from point import Point
from patterns.ring import Ring
class TestRing(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.white = Color(red=1, green=1, blue=1)
cls.black = Color(red=0, green=0, blue=0)
def test_color_should_not_change_with_y(self):
r = Ring(color_a=self.__class__.white, color_b=self.__class__.black)
self.assertEqual(r.color_at(position=Point(x=0, y=0, z=0)),
self.__class__.white)
self.assertEqual(r.color_at(position=Point(x=0, y=1, z=0)),
self.__class__.white)
self.assertEqual(r.color_at(position=Point(x=0, y=2, z=0)),
self.__class__.white)
def test_color_at_should_extend_in_x_and_y(self):
r = Ring(color_a=self.__class__.white, color_b=self.__class__.black)
self.assertEqual(r.color_at(position=Point(x=0, y=0, z=0)),
self.__class__.white)
self.assertEqual(r.color_at(position=Point(x=1, y=0, z=0)),
self.__class__.black)
self.assertEqual(r.color_at(position=Point(x=0, y=0, z=1)),
self.__class__.black)
self.assertEqual(r.color_at(position=Point(x=math.sqrt(2)/2,
y=0,
z=math.sqrt(2)/2)),
self.__class__.black)
if __name__ == '__main__':
unittest.main()
```
#### File: ray_tracer/tests/test_vector.py
```python
import math
import unittest
from math import sqrt
from point import Point
from tuple import Tuple
from util import Utilities
from vector import Vector
class TestVector(unittest.TestCase):
def test_create(self):
v = Vector(4, -4, 3)
t = Tuple(v.x, v.y, v.z, 0.0)
self.assertTrue(v == t)
def test_is_point(self):
v = Vector(4, -4, 3)
self.assertFalse(v.is_point())
def test_is_vector(self):
v = Vector(4, -4, 3)
self.assertTrue(v.is_vector())
def test_add_vector(self):
v1 = Vector(1, 2, 3)
v2 = Vector(4, 5, 6)
v3 = v1 + v2
self.assertTrue(v3.is_vector())
self.assertEqual(v3, Vector(5, 7, 9))
def test_subtract_point(self):
with self.assertRaises(TypeError):
Vector(1, 2, 3) - Point(4, 5, 6)
def test_subtract_vector(self):
v1 = Vector(3, 2, 1)
v2 = Vector(5, 6, 7)
v3 = v1 - v2
self.assertTrue(v3.is_vector())
self.assertEqual(v3, Vector(-2, -4, -6))
def subtract_from_zero_vector(self):
zero = Vector(0, 0, 0)
v1 = Vector(1, -2, 3)
v2 = zero - v1
self.assertTrue(v2.is_vector())
self.assertEqual(v2, Vector(-1, 2, -3))
def test_magnitude(self):
self.assertTrue(Utilities.equal(Vector(1, 0, 0).magnitude(), 1))
self.assertTrue(Utilities.equal(Vector(0, 1, 0).magnitude(), 1))
self.assertTrue(Utilities.equal(Vector(0, 0, 1).magnitude(), 1))
self.assertTrue(Utilities.equal(Vector(1, 2, 3).magnitude(), sqrt(14)))
self.assertTrue(Utilities.equal(Vector(-1, -2, -3).magnitude(), sqrt(14)))
def test_normalize(self):
self.assertEqual(Vector(4, 0, 0).normalize(), Vector(1, 0, 0))
self.assertEqual(Vector(1, 2, 3).normalize(), Vector(0.26726, 0.53452, 0.80178))
def test_normalized_magnitude(self):
self.assertTrue(
Utilities.equal(Vector(1, 2, 3).normalize().magnitude(), 1))
def test_dot_product(self):
v1 = Vector(1, 2, 3)
v2 = Vector(2, 3, 4)
self.assertTrue(Utilities.equal(v1.dot_product(v2), 20))
def test_cross_product(self):
v1 = Vector(1, 2, 3)
v2 = Vector(2, 3, 4)
self.assertEqual(v1.cross_product(v2), Vector(-1, 2, -1))
self.assertEqual(v2.cross_product(v1), Vector(1, -2, 1))
def test_cross_product_with_point(self):
with self.assertRaises(NotImplementedError):
Vector().cross_product(Point())
def test_reflect_flat_surface(self):
v = Vector(x=1, y=-1, z=0)
n = Vector(x=0, y=1, z=0)
r = v.reflect(normal=n)
self.assertEqual(r, Vector(x=1, y=1, z=0))
def test_reflect_slanted_surface(self):
v = Vector(0, -1, 0)
n = Vector(x=math.sqrt(2)/2, y=math.sqrt(2)/2, z=0)
r = v.reflect(normal=n)
self.assertEqual(r, Vector(x=1, y=0, z=0))
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jjatinggoyal/accessbility-indicators",
"score": 3
} |
#### File: accessbility-indicators/Code/0_u_places_data_extraction.py
```python
from google.colab import drive #to retrieve data from drive
drive.mount('/content/drive/')
cd 'drive/My Drive/Data/Places'
import urllib.request
import urllib.parse
import urllib3
import json as simplejson
import json
import time
import pandas as pd
import os
import math
import sys
"""
Please do this"""
TOTAL_REQUESTS = 0
"""Please do this"""
INITIAL_REQUESTS = 0
with open('API_Keys/api_key_ac2.txt', 'r') as f:
API_KEY = f.readline().strip()
print(API_KEY)
# bboxes = {'dhanbad': (23.63, 24.06, 86.28, 86.82), 'jaipur': (26.443348, 27.868705, 74.912218, 76.286641), 'aurangabad': (24.46667, 25.11667, 83.98333, 84.73333), 'ranchi': (22.881168, 23.714176, 84.862497, 85.896009)}
bboxes = dict()
dist_coords = pd.read_csv("../OSM/district_coordinates.csv")
for i, j in dist_coords.iterrows():
bboxes[j['District_Name'].lower()] = (j['MinLat'], j['MaxLat'], j['MinLong'], j['MaxLong'])
print(bboxes)
sides = {'bus_station': 3, 'taxi_stand':3, 'train_station': 4, 'primary_school': 2, 'school': 2, 'bank': 2, 'local_government_office': 2, 'police': 2, 'hospital': 2, 'doctor': 2, 'department_store': 4, 'supermarket': 2}
def make_request_google_places(lat,lng,category,api_key,rankby,radius=None):
url = 'https://maps.googleapis.com/maps/api/place/nearbysearch/json?'
values = {'key' : api_key,
'location' : lat+','+lng,
'type' : category,
'language' : 'en'
}
if rankby == 'prominence':
values['radius'] = radius
else:
values['rankby'] = 'distance'
arguments = urllib.parse.urlencode(values)
req = urllib.request.Request(url+arguments)
response = simplejson.load(urllib.request.urlopen(req))
if not response['status'] == "OK":
#print '%s' %response
raise Exception(response['status'])
return response
def make_request_google_places_nextpage(api_key, nextpagetoken):
url = 'https://maps.googleapis.com/maps/api/place/nearbysearch/json?'
values = {'key' : api_key,
'pagetoken': nextpagetoken
}
arguments = urllib.parse.urlencode(values)
req = urllib.request.Request(url+arguments)
response = simplejson.load(urllib.request.urlopen(req))
if not response['status'] == "OK":
#print '%s' %response
raise Exception(response['status'])
return response
def make_request(API_KEY, path, lat = '', lng = '', amenity = '', nextpage = False, nextpagetoken = '', rankby='prominence', radius='1414'):
global TOTAL_REQUESTS
try:
if TOTAL_REQUESTS - INITIAL_REQUESTS > 4995:
print(str(TOTAL_REQUESTS - INITIAL_REQUESTS) + ' requests made. Stopping the program')
exit()
TOTAL_REQUESTS +=1
if not nextpage:
res = make_request_google_places(lat, lng, amenity, API_KEY, rankby, radius = radius)
else:
print('NEXT PAGE. WAIT FOR 5 SEC')
time.sleep(5)
print('RESUMED')
res = make_request_google_places_nextpage(API_KEY, nextpagetoken)
with open(path, 'w') as f:
json.dump(res, f)
return res
except Exception as detail:
error = str(detail)
print ('%s' % error)
if error.__contains__('INVALID_REQUEST'):
raise Exception(error)
elif error.__contains__('OVER_QUERY_LIMIT') and error.__contains__('REQUEST_DENIED'):
print('QPS EXCEEDED. WAITING FOR 5 MINS')
time.sleep(300)
print('RESUMED')
elif error.__contains__('HTTP Error 500: Internal Server Error'):
print('INTERNAL SERVER ERROR')
elif error.__contains__('urlopen error [Errno -2]'):
print("URL OPEN ERROR")
return {'status':'INVALID'}
def fetch_data(district, address, amenity, API_KEY, side):
min_lat, max_lat = int(bboxes[district][0]*1000)/10, int(bboxes[district][1]*1000)/10
min_lon, max_lon = int(bboxes[district][2]*1000)/10, int(bboxes[district][3]*1000)/10
center_list = []
lat_lim = max_lat + (not (max_lat-min_lat)%side==0)*(side-(max_lat-min_lat)%side)
lon_lim = max_lon + (not (max_lon-min_lon)%side==0)*(side-(max_lon-min_lon)%side)
radius = str(int(float(side*math.sqrt(2))/2.0*1000))
cur_lon = float(min_lon) + float(side)/2.0
while(cur_lon<lon_lim):
cur_lat = float(min_lat) + float(side)/2.0
while (cur_lat<lat_lim):
center_list.append((str(float(cur_lat)/100.0), str(float(cur_lon)/100.0)))
cur_lat += float(side)
cur_lon = cur_lon + float(side)
n = 0
file_name = 0
print('################## '+str(len(center_list))+' GRIDS ARE PRESENT ####################')
while(n<len(center_list)):
for i in range(n, min(n+50, len(center_list))):
res = make_request(API_KEY, address + '/'+ str(file_name)+ '.json',lat = center_list[i][0], lng = center_list[i][1], amenity = amenity, radius=radius)
if not res['status'] == 'INVALID':
file_name+=1
####################################################
if res.get('next_page_token'):
nextpagetoken = res['next_page_token']
res = make_request(API_KEY, address + '/'+ str(file_name)+ '.json', nextpage=True, nextpagetoken=nextpagetoken)
if not res['status'] == 'INVALID':
file_name+=1
####################################################
if res.get('next_page_token'):
nextpagetoken = res['next_page_token']
res = make_request(API_KEY,address + '/'+ str(file_name)+ '.json', nextpage=True, nextpagetoken=nextpagetoken)
if not res['status'] == 'INVALID':
file_name+=1
print('################## '+str(min(n+50, len(center_list))) + '/' + str(len(center_list))+' GRIDS PROCESSED. WAITING FOR 5 SECS ####################')
time.sleep(5)
print('RESUMED')
n+=50
"""
Please run this"""
DISTRICT = 'patna'
CATEGORY = 'health'
AMENITY = ['doctor']
for x in AMENITY:
PATH = 'data/' + DISTRICT + '/' + CATEGORY + '/' + x
if not os.path.exists(PATH):
os.makedirs(PATH)
fetch_data(DISTRICT, PATH, x, API_KEY, sides[x])
print('Total ' + str(TOTAL_REQUESTS) + ' requests have been made')
```
#### File: accessbility-indicators/Code/3_linear_regression_on_pixels.py
```python
from google.colab import drive #to retrieve data from drive
drive.mount('/content/drive/')
cd 'drive/My Drive/Data/OSM'
import numpy as np
import pandas as pd
from pylab import *
from PIL import Image
from scipy import ndimage
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
import os, sys
# districts=['Bangalore', 'Chennai', 'Delhi', 'Gurgaon', 'Hyderabad', 'Kolkata', 'Mumbai']
# years = ['2016', '2017', '2018', '2019']
districts=['Bokaro', 'Jamui']
years = ['2016', '2019']
# defining required functions here
'''
This function is used to prepare district image for the application of smoothing filters.
The background and builtup pixels are given value 0 and the non-built-up pixels are given value 1.
This is because the filters should perform smoothing over BU and NBU pixels only and not background.
Input:
a) original_image: The BU/NBU maps with background pixels having value 0, BU pixels having value 65, and NBU value 130
Output:
a) prepped_image: The Background and BU pixels will have value 0 and NBU pixels will have value 1.
'''
def Prepare_image_for_filters(original_image):
prepped_image = original_image//130
return prepped_image
'''
This function removes the background pixels from the 1D array of the smoothed image using original image.
A pixel is retained in the smoothed array only if it's value in original image is either 1 (for BU) or 2 (for NBU)
'''
def Remove_background_pixels(original_1D_image, smoothed_1D_image):
smooth_temp = [ smoothed_1D_image[i] for i in range(len(smoothed_1D_image)) if original_1D_image[i] > 0]
return smooth_temp
"""'''
Driver code starts here
'''
for district in districts:
print (district)
year_to_pixel_matrix = [] # this matrix stores for each year the value of all pixels
for year in years:
original_image = np.array( Image.open('BU_NBU_maps/'+district+'/'+district+'_BU_NBU_'+year+'.png') )
prepped_image_for_filters = Prepare_image_for_filters(original_image)
# Apply Convolution and gaussian filters over prepped image. All filter parameters are hyper-parameters
kernel = np.array([[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]])
smoothed_image = ndimage.convolve( prepped_image_for_filters, kernel, mode='constant', cval=0.0)
smoothed_image = ndimage.gaussian_filter(smoothed_image, sigma=0.2, truncate=11.0, output=float)
# convert the 2D images into 1D arrays for further pixel-wise processing
original_1D_image = original_image.flatten()
smoothed_1D_image = smoothed_image.flatten()
assert(len(original_1D_image) == len(smoothed_1D_image))
smoothed_1D_image = Remove_background_pixels(original_1D_image, smoothed_1D_image)
year_to_pixel_matrix.append(smoothed_1D_image)
# transpose is taken to store pixel values in rows against the years in columns
pixel_to_year_matrix = np.array(year_to_pixel_matrix, copy=False).T
# Applying linear regression on the values of each pixel over differen years i.e each row of pixel_to_year_matrix
# For this, the boundary pixels of a district should be avoided as their smooth value is impacted by background pixels
relabelled_original_image = original_image//65 # 0 for background, 1 for BU, and 2 for NBU
dimensions = relabelled_original_image.shape
background_vs_non_background_image = np.sign(relabelled_original_image) # using signum function, background pixels remain 0 and non-background become 1
# using convolution filter, each non-boundary pixel inside the district will have value 9 in the mask
boundary_identifying_kernel = np.array([[1,1,1],[1,1,1],[1,1,1]]) # this should be a 5x5 filter but we'll loose out on double boundary pixels
boundary_vs_non_boundary_mask = ndimage.convolve(background_vs_non_background_image, boundary_identifying_kernel, mode='constant', cval=0.0)
current_pixel = 0 # refers to current pixel position we check for being boundary pixel or not
# Define variables for applying linear regression
year_list_as_input = np.reshape(range(len(years)), (-1,1)) # matrix of type [[1],[2],...,[len(year)]], -1 refers to unspecified no. of rows here
# following values are found corresponding to each pixel using its value in all years
slope = []
intercept = []
cost_array = []
for j in range(dimensions[0]):
for k in range(dimensions[1]):
if (background_vs_non_background_image[j][k]): # if pixel is inside the district
if (boundary_vs_non_boundary_mask[j][k] == 9): # if pixel is not boundary pixel
linear_model = LinearRegression()
# we predict value of pixel for a given year and find best fit of linear regression on it
# so year_list_as_input is our input variable for linear regression
regression = linear_model.fit(year_list_as_input, pixel_to_year_matrix[current_pixel])
cost = np.mean((pixel_to_year_matrix[current_pixel] - linear_model.predict(year_list_as_input))**2)
cost_array.append(cost)
slope.append(round(regression.coef_[0], 4)) #coef.shape is (1,1)
intercept.append(round(regression.intercept_, 4)) #intercept.shape is (1)
current_pixel += 1
cost_array = np.array(cost_array)
print(cost_array)
# Save the cost array
os.makedirs('Cost_results_from_Regression/'+district, exist_ok = True)
# multiply each cost value by 1000 to overcome data loss from storing small values
np.savetxt('Cost_results_from_Regression/'+district+'/'+district+'_regression_cost_array.txt', cost_array*1000, fmt='%d')
# creating and saving CDFs against the cost values of pixels for each district
unique_cost_values, cost_frequencies = np.unique(cost_array, return_counts=True)
total_cost_values = (float) (cost_frequencies.sum())
cost_frequencies = cost_frequencies/total_cost_values
cdf = np.cumsum(cost_frequencies)
plt.plot(unique_cost_values,cdf,label = 'data')
# check if a CDF file already exists, since matplotlib doesn't overwrite, delete previous file
if os.path.isfile('Cost_results_from_Regression/'+district+'/'+district+'_linear_regression_cdf'):
os.remove('Cost_results_from_Regression/'+district+'/'+district+'_linear_regression_cdf')
savefig('Cost_results_from_Regression/'+district+'/'+district+'_linear_regression_cdf')
plt.clf()
print("Done")
"""
for year in years:
original_image = np.array( Image.open('BU_NBU_maps/'+district+'/'+district+'_BU_NBU_'+year+'.png') )
prepped_image_for_filters = Prepare_image_for_filters(original_image)
# Apply Convolution and gaussian filters over prepped image. All filter parameters are hyper-parameters
kernel = np.array([[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]])
smoothed_image = ndimage.convolve( prepped_image_for_filters, kernel, mode='constant', cval=0.0)
smoothed_image = ndimage.gaussian_filter(smoothed_image, sigma=0.2, truncate=11.0, output=float)
# convert the 2D images into 1D arrays for further pixel-wise processing
original_1D_image = original_image.flatten()
smoothed_1D_image = smoothed_image.flatten()
assert(len(original_1D_image) == len(smoothed_1D_image))
smoothed_1D_image = Remove_background_pixels(original_1D_image, smoothed_1D_image)
year_to_pixel_matrix.append(smoothed_1D_image)
# transpose is taken to store pixel values in rows against the years in columns
pixel_to_year_matrix = np.array(year_to_pixel_matrix, copy=False).T
# Applying linear regression on the values of each pixel over differen years i.e each row of pixel_to_year_matrix
# For this, the boundary pixels of a district should be avoided as their smooth value is impacted by background pixels
relabelled_original_image = original_image//65 # 0 for background, 1 for BU, and 2 for NBU
dimensions = relabelled_original_image.shape
background_vs_non_background_image = np.sign(relabelled_original_image) # using signum function, background pixels remain 0 and non-background become 1
# using convolution filter, each non-boundary pixel inside the district will have value 9 in the mask
boundary_identifying_kernel = np.array([[1,1,1],[1,1,1],[1,1,1]]) # this should be a 5x5 filter but we'll loose out on double boundary pixels
boundary_vs_non_boundary_mask = ndimage.convolve(background_vs_non_background_image, boundary_identifying_kernel, mode='constant', cval=0.0)
current_pixel = 0 # refers to current pixel position we check for being boundary pixel or not
# Define variables for applying linear regression
year_list_as_input = np.reshape(range(len(years)), (-1,1)) # matrix of type [[1],[2],...,[len(year)]], -1 refers to unspecified no. of rows here
# following values are found corresponding to each pixel using its value in all years
slope = []
intercept = []
cost_array = []
for j in range(dimensions[0]):
for k in range(dimensions[1]):
if (background_vs_non_background_image[j][k]): # if pixel is inside the district
if (boundary_vs_non_boundary_mask[j][k] == 9): # if pixel is not boundary pixel
linear_model = LinearRegression()
# we predict value of pixel for a given year and find best fit of linear regression on it
# so year_list_as_input is our input variable for linear regression
regression = linear_model.fit(year_list_as_input, pixel_to_year_matrix[current_pixel])
cost = np.mean((pixel_to_year_matrix[current_pixel] - linear_model.predict(year_list_as_input))**2)
cost_array.append(cost)
slope.append(round(regression.coef_[0], 4)) #coef.shape is (1,1)
intercept.append(round(regression.intercept_, 4)) #intercept.shape is (1)
current_pixel += 1
cost_array = np.array(cost_array)
print(cost_array)
# Save the cost array
os.makedirs('Cost_results_from_Regression/'+district, exist_ok = True)
# multiply each cost value by 1000 to overcome data loss from storing small values
np.savetxt('Cost_results_from_Regression/'+district+'/'+district+'_regression_cost_array.txt', cost_array*1000, fmt='%d')
# creating and saving CDFs against the cost values of pixels for each district
unique_cost_values, cost_frequencies = np.unique(cost_array, return_counts=True)
total_cost_values = (float) (cost_frequencies.sum())
cost_frequencies = cost_frequencies/total_cost_values
cdf = np.cumsum(cost_frequencies)
plt.plot(unique_cost_values,cdf,label = 'data')
# check if a CDF file already exists, since matplotlib doesn't overwrite, delete previous file
if os.path.isfile('Cost_results_from_Regression/'+district+'/'+district+'_linear_regression_cdf'):
os.remove('Cost_results_from_Regression/'+district+'/'+district+'_linear_regression_cdf')
savefig('Cost_results_from_Regression/'+district+'/'+district+'_linear_regression_cdf')
plt.clf()
print("Done")
```
#### File: accessbility-indicators/Code/9_cluster_urbanized_grids.py
```python
from google.colab import drive #to retrieve data from drive
drive.mount('/content/drive/')
cd 'drive/My Drive/Data/OSM'
import numpy as np
import pandas as pd
from IPython.display import display
from sklearn.preprocessing import minmax_scale
import matplotlib.pyplot as plt
import scipy.cluster.hierarchy as shc
from sklearn.cluster import AgglomerativeClustering
import sys, os
'''
This function plots the dendrogram plot of hierarchical clusters
Inputs:
1) feature_vectors: feature vector of grids belonging to a particular cluster
2) title: title of the plot
3) save_filename: complete path where the image is to be stored
'''
def Plot_dendrogram(feature_vectors, title, save_filename):
plt.figure(figsize = (15,8))
plt.title(title)
plt.xlabel('Grid')
plt.ylabel('distance')
dendrogram = shc.dendrogram(shc.linkage(feature_vectors, method='ward'),
leaf_rotation = 90., # rotates the x axis labels
leaf_font_size = 8., # font size for the x axis labels
)
plt.savefig(save_filename)
# plt.show()
'''
This function makes the boxplot of cluster using the feature vector of grids belonging to it
Inputs:
1) cluster_feature_vectors = feature vectors of the grids belonging to the cluster
2) title = title of the boxplot
'''
def Plot_boxplot(cluster_feature_vectors, title, save_filename):
figure, axis = plt.subplots()
axis.set_title(title)
axis.set_ylim(-0.1, 1.1)
axis.boxplot(cluster_feature_vectors, labels = ['#3-ways', '#4-ways', 'WR', 'Urb_footprint'])
axis.tick_params(labelsize = 14)
plt.savefig(save_filename, bbox_inches = 'tight', pad_inches = 0)
'''
This function creates a combined file for each district storing road indicators, urban indicators, and class labels
Inputs:
1) districts = list of all districts
2) class_information_dataframe = A dataframe of combined districts having columns ['District_name','Grid_number','Class_label']
3) year = The year for which data of district is stored
Outputs:
1) Separate file is written for each district and each year holding all urban and road indicators at grid level along with class label
Rejected grids (non-urbanized grids) are assigned class label 0
ClassI-ClassV grids (selected urbanized grids) are assigned integer values from 1-5 respectively
'''
def Save_grid_level_indicators(districts, class_information_dataframe, year):
final_directory = 'Grid_wise_all_indicators'
grouped_dataframe_class_labels = class_information_dataframe.groupby(class_information_dataframe.District_name)
for district in districts:
os.makedirs(final_directory+'/'+district, exist_ok = True)
road_indicators_filename = 'Grid_wise_road_indicators/'+district+'_road_indicators.csv'
road_indicators_df = pd.read_csv(road_indicators_filename)
urban_indicators_filename = 'Grid_wise_urban_indicators/'+district+'/'+district+'_urban_indicators_'+year+'.csv'
urban_indicators_df = pd.read_csv(urban_indicators_filename)
# merge both road and urban indicators in a common dataframe
merged_dataframe = pd.merge(road_indicators_df, urban_indicators_df, left_on='Grid_number', right_on='Grid_number', how='inner')
dataframe_class_labels = grouped_dataframe_class_labels.get_group(district)
final_dataframe = pd.merge(merged_dataframe, dataframe_class_labels, left_on=['Grid_number','District_name'], right_on=['Grid_number','District_name'], how='left')
final_dataframe.sort_values(by=['Grid_number'], inplace=True)
final_dataframe['Class_label'] = final_dataframe['Class_label'].fillna(0)
final_dataframe.to_csv(final_directory+'/'+district+'/'+district+'_'+year+'_all_indicators.csv', index=False)
#print("\n Grid-level indicators successfully written to file")
'''
Driver code begins here for the year 2019
'''
#print("\n *********** Categorizing Urbanized Grids ***********\n")
# districts = ['Bangalore','Chennai','Delhi','Gurgaon','Hyderabad','Kolkata','Mumbai']
districts = ['Hardoi', 'Jamui', 'Bokaro']
year = '2019'
combined_dataframe_all_districts = pd.DataFrame()
for district in districts:
road_indicators_filename = 'Grid_wise_road_indicators/'+district+'_road_indicators.csv'
road_indicators_df = pd.read_csv(road_indicators_filename)
urban_indicators_filename = 'Grid_wise_urban_indicators/'+district+'/'+district+'_urban_indicators_'+year+'.csv'
urban_indicators_df = pd.read_csv(urban_indicators_filename)
# merge both road and urban indicators in a common dataframe
merged_dataframe = pd.merge(road_indicators_df, urban_indicators_df, left_on='Grid_number', right_on='Grid_number', how='inner')
combined_dataframe_all_districts = combined_dataframe_all_districts.append(merged_dataframe)
# Find only sufficiently urbanized grids for further analysis (i.e. urban and periurban)
selected_grid_types = ['Urban','PeriUrban']
selected_grids_dataframe = combined_dataframe_all_districts.loc[combined_dataframe_all_districts['Grid_type'].isin(selected_grid_types)].copy()
# select only those columns of dataframe on which clustering is to implemented
clustering_dataframe = selected_grids_dataframe[['Three_ways','Four_ways','Walkability_ratio','Urban_percentage']].copy()
clustering_feature_vectors = np.array(clustering_dataframe)
# normalize the feature vector of each grid
norm_feature_vectors = minmax_scale(clustering_feature_vectors, feature_range=(0,1), axis=0)
# add 1 and take log of columns with huge difference in the their minimum and maximum value
norm_feature_vectors[:, 0:2] = norm_feature_vectors[:, 0:2] + 1
norm_feature_vectors[:, 0:2] = np.log2(norm_feature_vectors[:, 0:2])
global_mean = norm_feature_vectors.mean(axis=0)
print("\nThe global mean in order #3way | #4way | WR | UF \n",global_mean,"\n")
save_fig_directory = "Visualization_Results/Clustering_figures_2019"
os.makedirs(save_fig_directory, exist_ok=True)
# plot the dendogram plot of the hierarchical clustering
title = 'Dendrogram of hierarchical clustering of all selected grids '+year
Plot_dendrogram(norm_feature_vectors, title, save_fig_directory+"/Dendrogram_all_grids_2019")
'''
This is where manual interpretation will start working!
'''
# We can visually see that the longest vertical line is the blue one
# and we cut at that line to get 2 clusters. Now we assign all our data points to these 2 clusters first
number_of_clusters = 5
agglomerative_clusters = AgglomerativeClustering(n_clusters = number_of_clusters, affinity = 'euclidean', linkage = 'ward')
cluster_grid_mapping = agglomerative_clusters.fit_predict(norm_feature_vectors)
clusters_feature_vectors = [] # this list stores the feature vector of all grids belongs to all clusters
clusters_grid_info = [] # this list stores complete information of all grids belongs to all clusters
for i in range(number_of_clusters):
clusters_feature_vectors.append( norm_feature_vectors[ cluster_grid_mapping == i ] )
clusters_grid_info.append( selected_grids_dataframe[ cluster_grid_mapping == i ].copy() )
# at this point, clusters_feature_vectors[i] contains the list of feature vector of all grids belonging to cluster i
# at this point, clusters_grid_info[i] contains the list of information of all grids belonging to cluster i
# create boxplots of each of the clusters
for cluster_id in range(len(clusters_feature_vectors)):
title = year+' Cluster-'+str(cluster_id)+' size: '+str(clusters_feature_vectors[cluster_id].shape[0])
save_filename = save_fig_directory+"/Boxplot_Major_cluster"+str(cluster_id)+"_2019"
Plot_boxplot(clusters_feature_vectors[cluster_id], title, save_filename)
#plt.show()
# plot the dendogram plot of each of the major clusters
for cluster_id in range(len(clusters_feature_vectors)):
title = 'Dendrogram of hierarchical clustering of major cluster-'+str(cluster_id)+' '+year
save_filename = save_fig_directory+"/Dendrogram_Major_cluster"+str(cluster_id)+"_2019"
Plot_dendrogram(clusters_feature_vectors[cluster_id], title, save_filename)
# Print average values of each major cluster
for cluster_id in range(len(clusters_feature_vectors)):
print("\n**********Major Cluster ",cluster_id,": ***********")
print(clusters_feature_vectors[cluster_id].mean(axis=0))
print("\n")
'''
Based on manual interpretation, we will assign class 1-5 as labels to all urbanized grids.
Some clusters will be merged and others will be re-labelled
Major cluster 0:
sub-cluster 0 = Class-I (1)
sub-cluster 1 = Class-V (5)
sub-cluster 2 = Class-III (3)
sub-cluster 3 = Class-IV (4)
sub-cluster 4 = Class-II (2)
'''
# assigning class labels to major clusters
for i in range(len(clusters_feature_vectors)):
if i == 0:
clusters_grid_info[i]["Class_label"] = [1] * len(clusters_grid_info[i])
elif i == 1:
clusters_grid_info[i]["Class_label"] = [5] * len(clusters_grid_info[i])
elif i == 2:
clusters_grid_info[i]["Class_label"] = [3] * len(clusters_grid_info[i])
elif i == 3:
clusters_grid_info[i]["Class_label"] = [4] * len(clusters_grid_info[i])
elif i == 4:
clusters_grid_info[i]["Class_label"] = [2] * len(clusters_grid_info[i])
# Merge desired clusters as per their labels
final_clusters = []
# Class 1
final_clusters.append( clusters_feature_vectors[0].copy() )
# Class 2
final_clusters.append( clusters_feature_vectors[4].copy() )
# Class 3
final_clusters.append( clusters_feature_vectors[2].copy() )
# Class 4
final_clusters.append( clusters_feature_vectors[3].copy() )
# Class 5
final_clusters.append( clusters_feature_vectors[1].copy() )
# make boxplots of final clusters
for cluster_id in range(len(final_clusters)):
title = year+' Class-'+str(cluster_id+1)+' size: '+str(final_clusters[cluster_id].shape[0])
save_filename = save_fig_directory+"/Final_Boxplot_class"+str(cluster_id+1)+"_2019"
Plot_boxplot(final_clusters[cluster_id], title, save_filename)
'''
Saving the centroids of each cluster to map the grids of year 2016
'''
Class1_mean_2019 = final_clusters[0].mean(axis=0)
Class2_mean_2019 = final_clusters[1].mean(axis=0)
Class3_mean_2019 = final_clusters[2].mean(axis=0)
Class4_mean_2019 = final_clusters[3].mean(axis=0)
Class5_mean_2019 = final_clusters[4].mean(axis=0)
print("\n\n********** The Mean Values Of Final Clusters **********")
print("\n Class 1: ", Class1_mean_2019)
print("\n Class 2: ", Class2_mean_2019)
print("\n Class 3: ", Class3_mean_2019)
print("\n Class 4: ", Class4_mean_2019)
print("\n Class 5: ", Class5_mean_2019)
'''
Writing the class labels to the file of urban indicators for each district
'''
final_dataframe = pd.DataFrame()
final_column_list = ['District_name','Grid_number','Class_label']
for cluster_id in range(len(clusters_grid_info)):
curr_dataframe = clusters_grid_info[cluster_id][final_column_list].copy()
final_dataframe = final_dataframe.append( curr_dataframe )
Save_grid_level_indicators(districts, final_dataframe, year)
print("\n**** Clustering complete for year: ",year," ****\n")
'''
Driver code begins here for the year 2016.
It uses the centers of 2019 clusters to assign each grid in 2016 an appropriate class label
'''
# districts = ['Bangalore','Chennai','Delhi','Gurgaon','Hyderabad','Kolkata','Mumbai']
districts = ['Dhanbad']
year = '2016'
combined_dataframe_all_districts = pd.DataFrame()
for district in districts:
road_indicators_filename = 'Grid_wise_road_indicators/'+district+'_road_indicators.csv'
road_indicators_df = pd.read_csv(road_indicators_filename)
urban_indicators_filename = 'Grid_wise_urban_indicators/'+district+'/'+district+'_urban_indicators_'+year+'.csv'
urban_indicators_df = pd.read_csv(urban_indicators_filename)
# merge both road and urban indicators in a common dataframe
merged_dataframe = pd.merge(road_indicators_df, urban_indicators_df, left_on='Grid_number', right_on='Grid_number', how='inner')
combined_dataframe_all_districts = combined_dataframe_all_districts.append(merged_dataframe)
# Find only sufficiently urbanized grids for further analysis (i.e. urban and periurban)
selected_grid_types = ['Urban','PeriUrban']
selected_grids_dataframe = combined_dataframe_all_districts.loc[combined_dataframe_all_districts['Grid_type'].isin(selected_grid_types)].copy()
# select only those columns of dataframe on which clustering is to implemented
clustering_dataframe = selected_grids_dataframe[['Three_ways','Four_ways','Walkability_ratio','Urban_percentage']].copy()
clustering_feature_vectors = np.array(clustering_dataframe)
# normalize the feature vector of each grid
norm_feature_vectors = minmax_scale(clustering_feature_vectors, feature_range=(0,1), axis=0)
# add 1 and take log of columns with huge difference in the their minimum and maximum value
norm_feature_vectors[:, 0:2] = norm_feature_vectors[:, 0:2] + 1
norm_feature_vectors[:, 0:2] = np.log2(norm_feature_vectors[:, 0:2])
# Class1_mean_2019
# Class2_mean_2019
# Class3_mean_2019
# Class4_mean_2019
# Class5_mean_2019
final_labels_2016 = []
for grid_id in range(len(norm_feature_vectors)):
dist_class1 = np.linalg.norm(norm_feature_vectors[grid_id] - Class1_mean_2019)
dist_class2 = np.linalg.norm(norm_feature_vectors[grid_id] - Class2_mean_2019)
dist_class3 = np.linalg.norm(norm_feature_vectors[grid_id] - Class3_mean_2019)
dist_class4 = np.linalg.norm(norm_feature_vectors[grid_id] - Class4_mean_2019)
dist_class5 = np.linalg.norm(norm_feature_vectors[grid_id] - Class5_mean_2019)
Min_distance = min( dist_class1, dist_class2, dist_class3, dist_class4, dist_class5 )
if(Min_distance == dist_class1):
final_labels_2016.append(1)
elif(Min_distance == dist_class2):
final_labels_2016.append(2)
elif(Min_distance == dist_class3):
final_labels_2016.append(3)
elif(Min_distance == dist_class4):
final_labels_2016.append(4)
elif(Min_distance == dist_class5):
final_labels_2016.append(5)
selected_grids_dataframe['Class_label'] = final_labels_2016
final_column_list = ['District_name','Grid_number','Class_label']
final_dataframe = selected_grids_dataframe[final_column_list].copy()
Save_grid_level_indicators(districts, final_dataframe, year)
print("**** Clustering complete for year: ",year," ****\n")
print("\n#### Check Grid_wise_all_indicators directory for the results!! ####\n")
``` |
{
"source": "jjatria/knight",
"score": 3
} |
#### File: python/knight/value.py
```python
import knight
class Value():
@classmethod
def parse(cls, stream):
if not isinstance(stream, knight.Stream):
stream = knight.Stream(stream)
while stream.matches(r'(?:#.*?(\n|\Z)|\A[\s()\[\]{}:])*'):
pass
for subcls in [Number, Text, Boolean, Identifier, Null, Ast]:
if None != (value := subcls.parse(stream)):
return value
@classmethod
def create(cls, data):
if isinstance(data, Value):
return data
elif isinstance(data, str):
return Text(data)
elif isinstance(data, bool):
return Boolean(data)
elif isinstance(data, int):
return Number(data)
elif data == None:
return Null(None)
else:
raise TypeError(f"unknown value kind '{type(data)}'")
def __init__(self, data):
if type(self) == Value:
raise RuntimeError("nope")
self.data = data
def __repr__(self):
return f"Value({repr(self.data)})"
def run(self):
return self
def __str__(self):
return str(self.run().data)
def __int__(self):
return int(self.run().data)
def __bool__(self):
return bool(self.run().data)
def __add__(self, rhs):
return Number(int(self) + int(rhs))
def __sub__(self, rhs):
return Number(int(self) - int(rhs))
def __mul__(self, rhs):
return Number(int(self) * int(rhs))
def __div__(self, rhs):
return Number(int(self) / int(rhs))
def __mod__(self, rhs):
return Number(int(self) % int(rhs))
def __pow__(self, rhs):
return Number(int(self) ** int(rhs))
def __lt__(self, rhs):
return int(self) < (int(rhs))
def __eq__(self, rhs):
return type(self) == type(rhs) and self.data == rhs.data
class Number(Value):
@classmethod
def parse(cls, stream):
if match := stream.matches(r'\d+'):
return Number(int(match))
class Text(Value):
@classmethod
def parse(cls, stream):
if match := stream.matches(r'(["\'])((?:.|\n)*?)(\1|\Z)'):
if match[0] not in ['"', '\''] or match[0] != match[-1]:
# note that the stream is still advanced...
raise ArgumentError("unterminated string encountered: " + match)
else:
return Text(match[1:-1])
def __add__(self, rhs):
return Text(str(self) + str(rhs))
def __mul__(self, rhs):
return Text(str(self) * int(rhs))
def __lt__(self, rhs):
return str(self) < str(rhs)
class Boolean(Value):
@classmethod
def parse(cls, stream):
if match := stream.matches(r'[TF][A-Z]*'):
return Boolean(match[0] == 'T')
def __str__(self):
return "true" if self.data else "false"
class Null(Value):
@classmethod
def parse(cls, stream):
if match := stream.matches(r'N[A-Z]*'):
return Null(None)
def __str__(self):
return "null"
class Identifier(Value):
@classmethod
def parse(cls, stream):
if match := stream.matches(r'[a-z_][a-z0-9_]*'):
return Identifier(match)
def run(self):
return knight.ENVIRONMENT[self.data]
class Ast(Value):
@classmethod
def parse(cls, stream):
if func := knight.Function.known.get(str(stream)[0]):
stream.matches(r'[A-Z]+|.')
return Ast(func, [Value.parse(stream) for _ in range(func.arity)])
def __init__(self, func, args):
self.func = func
self.args = args
def __repr__(self):
return f"Value({repr(self.func)}, {repr(self.args)})"
def run(self):
return self.func(*self.args)
``` |
{
"source": "JJavier98/TFG-Bebop-YOLO",
"score": 2
} |
#### File: TFG-Bebop-YOLO/src/main2.0.py
```python
from __future__ import division, print_function, absolute_import
from timeit import time
import os
import sys
import warnings
import cv2
import matplotlib.pyplot as plt
import numpy as np
import rospy
from PIL import Image
from cnn.yolo import YOLO
from deep_sort import preprocessing
from deep_sort import nn_matching
from deep_sort.detection import Detection
from deep_sort.tracker import Tracker
from tools import generate_detections as gdet
from video_reader.videocaptureasync import VideoCaptureAsync
from bebop.reactivebebop import ReactiveBebop
from video_reader.videoreader import VideoReader
import imutils.video
import argparse
current_path=os.path.dirname(os.path.abspath(__file__))
# cosntruimos los argumentos
parser = argparse.ArgumentParser(description='Bebop Tracking Script')
parser.add_argument('--path',default='/dev/video0', help='path del video a usar.\n \
\'bebop_cam\' para usar el dron.\
\n Si se deja vacio se tomara /dev/video0')
parser.add_argument("--sync", default=False, help='Si vamos a abrir un vídeo marcar a True')
parser.add_argument("--interval", default=3, help='Cada cuántos fotogramas hacemos detección')
parser.add_argument("--res", default='original', help='resolucion del video indicado')
parser.add_argument("--output", default=None, help="Path y nombre del archivo donde guardaremos la salida del tracker")
parser.add_argument("--fps_out", default=5, help="FPS del vídeo de salida. Más fps -> cámara rápida. Menos fps -> cámara lenta")
warnings.filterwarnings('ignore')
def main(yolo):
try:
args = parser.parse_args()
path = args.path
res = args.res
output = args.output
sync = args.sync
interval = int(args.interval)
except:
args = rospy.myargv(argv=sys.argv)
path = args[1]
res = args[2]
output = None if args[3]=='None' else args[3]
sync = True if args[4]=='True' else False
interval = int(args[5])
if res=='original':
print('Debe indicar la resolución: width,heigh')
exit()
if res!='original':
res = res.split(',')
res = (int(res[0]), int(res[1]))
if interval<=0: interval = 1
max_track_ls=[0]
min_fps_ls=[144]
max_fps_ls=[0]
fps_list_ls=[[]]
ntracks_list_ls=[[]]
# Definition of the parameters
max_cosine_distance = 0.3
nn_budget = None
nms_max_overlap = 1.0
# Deep SORT
model_filename = current_path+'/../model_data/mars-small128.pb'
encoder = gdet.create_box_encoder(model_filename,batch_size=1)
metric = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
tracker1 = Tracker(metric)
trackers=[tracker1]
bebop = ReactiveBebop(res)
bebop.start()
real_path = path
if path=='bebop_cam':
real_path='bebop'
video_reader = VideoReader(src = real_path, res = res, write_path = output, sync = sync)
readers=[video_reader]
titulos=[path]
if path=='bebop_cam':
max_track_ls.append(0)
min_fps_ls.append(144)
max_fps_ls.append(0)
fps_list_ls.append([])
ntracks_list_ls.append([])
metric2 = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
tracker2 = Tracker(metric2)
trackers.append(tracker2)
cam_reader = VideoReader(src = '/dev/video0', res = (426,240))
cam_reader.start()
readers.append(cam_reader)
titulos=['bebop','cam']
if not sync: readers[0].start()
contador=0
con_cam=False
ret1 = True
ret2 = True
ccc=0
confirmed=False
while ret1 and ret2:
ret1, frame1 = readers[0].read() # frame shape 640*480*3
frames=[frame1]
if not ret1:
break
if path=='bebop_cam':
ret2, frame2 = readers[1].read() # frame shape 640*480*3
frames.append(frame2)
if not ret2:
break
if not sync or contador==interval:
contador=0
for i, reader, frame, titu, tracker, fps_list, ntracks_list in zip([0,1],readers,frames,titulos,trackers,fps_list_ls,ntracks_list_ls):
reader.setIniTime()
image = Image.fromarray(frame[...,::-1]) # bgr to rgb
#boxs = yolo.detect_image(image)[0]
#confidence = yolo.detect_image(image)[1]
if ccc%3==0 or not confirmed:
boxs,confidence = yolo.detect_image(image)
features = encoder(frame,boxs)
detections = [Detection(bbox, confidence, feature) for bbox, confidence, feature in zip(boxs, confidence, features)]
# Run non-maxima suppression.
boxes = np.array([d.tlwh for d in detections])
scores = np.array([d.confidence for d in detections])
indices = preprocessing.non_max_suppression(boxes, nms_max_overlap, scores)
detections = [detections[i] for i in indices]
# Call the tracker
tracker.update(detections)
tracker.predict()
confirmed_tracks = []
for track in tracker.tracks:
if not track.is_confirmed() or track.time_since_update > 1:
confirmed=False
continue
confirmed=True
bbox = track.to_tlbr()
cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),(255,255,255), 2)
cv2.putText(frame, str(track.track_id),(int(bbox[0]), int(bbox[1])),0, 5e-3 * 200, (0,255,0),2)
confirmed_tracks.append(track)
if track.track_id > max_track_ls[i]:
max_track_ls[i]=track.track_id
if titu=='bebop':
bebop.update_tracks(confirmed_tracks)
reader.write(frame)
elif titu=='cam':
bebop.update_cam_tracks(confirmed_tracks)
#if ccc%3==0:
#for det in detections:
# bbox = det.to_tlbr()
# score = "%.2f" % round(det.confidence * 100, 2)
# #cv2.rectangle(frame,(int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),(255,0,0), 2)
# cv2.putText(frame, score + '%', (int(bbox[0]), int(bbox[3])), 0, 5e-3 * 130, (0,255,0),2)
cv2.imshow(titu, frame)
fps = reader.getFPS()
if fps < min_fps_ls[i]:
min_fps_ls[i]=fps
elif fps>max_fps_ls[i]:
max_fps_ls[i]=fps
fps_list.append(fps)
ntracks_list.append(len(tracker.tracks))
#print("FPS = %f"%(fps))
ccc+=1
# Press Q to stop!
if cv2.waitKey(1) & 0xFF == ord('q') or not bebop.menu_alive():
break
contador+=1
for reader in readers:
reader.stopRead() # termina la lectura del video
reader.releaseWrite() # termina la escritura de video
bebop.stop()
cv2.destroyWindow(real_path)
if path=='bebop_cam':
cv2.destroyWindow('cam')
for titu, max_track, min_fps, max_fps, fps_list, ntracks_list in zip(titulos,max_track_ls,min_fps_ls,max_fps_ls,fps_list_ls,ntracks_list_ls):
print('') # para dar buen formato de salida
print(titu) # para dar buen formato de salida
print('Max FPS: '+str(max_fps))
print('Min FPS: '+str(min_fps))
print('Mean FPS: '+str(sum(fps_list)/len(fps_list)))
print('Max track: '+str(max_track))
if output!=None:
number = 1
try:
print(current_path+'/../output/'+output+'.txt')
f = open(current_path+'/../output/'+output+'.txt', "r")
number = int(f.read().split('*')[-2])+1
f.close()
except: pass
try:
f = open(current_path+'/../output/'+output+'.txt', "a")
f.write('Ejecución: '+str(number)+'\n')
f.write('Título: '+str(titu)+'\n')
f.write('res: '+str(res)+'\n')
f.write('Max FPS: '+str(max_fps)+'\n')
f.write('Min FPS: '+str(min_fps)+'\n')
f.write('Mean FPS: '+str(sum(fps_list)/len(fps_list))+'\n')
f.write('Max track: '+str(max_track)+'\n')
f.write('Interval: '+str(interval)+'\n')
f.write('*'+str(number)+'*\n')
f.close()
except: pass
fig, (ax1, ax2) = plt.subplots(2)
fig.suptitle(titu)
ax1.plot(fps_list)
ax1.set_ylabel('fps')
ax1.set_xlabel('frame')
ax1.set_title('fps per frame')
ax2.plot(ntracks_list)
ax2.set_ylabel('tracks')
ax2.set_xlabel('frame')
ax2.set_title('tracks per frame')
plt.show()
if __name__ == '__main__':
main(YOLO())
``` |
{
"source": "JJavier98/TFG-Dron-de-Vigilancia",
"score": 2
} |
#### File: Deep Sort/src/humanclassifier.py
```python
import cPickle as cPickle
import cv2
import time
import numpy as np
from itertools import compress
#from tensorflow.contrib import keras
import keras
import tensorflow as tf
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.compat.v1.Session(config=config)
class HumanClassifier:
def __init__(self, path, alg):
self.histogram_equalization = cv2.createCLAHE(clipLimit=5, tileGridSize=(8, 8))
self.descriptor = cv2.HOGDescriptor()
self.alg = alg
self.get_model_by_alg(path)
def get_model_by_alg(self, path):
if self.alg == 'DNN':
self.classifier, self.train_mean, self.train_std = load_keras_model(path)
if self.alg == 'SVM':
self.classifier = load_pkl_model(path)
def preprocess_by_alg(self, frame, bbs):
if self.alg == 'DNN':
return self.dnn_img_preprocess(frame, bbs)
if self.alg == 'SVM':
return self.svm_img_preprocess(frame, bbs)
def predict_by_alg(self, data):
if self.alg == 'DNN':
return self.classifier.predict(data, verbose=1, batch_size=128).argmax(axis=-1)
if self.alg == 'SVM':
return self.classifier.predict(data)
def filter_human_bb(self, frame, bbs):
start_time = time.time()
if len(bbs) == 0:
return []
data = self.preprocess_by_alg(frame, bbs)
print("Filter preprocess:" + str(time.time() - start_time))
if len(bbs) == 1:
data.reshape(1, -1)
prediction = self.predict_by_alg(data)
# Return filtered detections (Human detections)
print("Filter prediction:" + str(time.time()-start_time))
return list(compress(bbs, prediction))
def svm_img_preprocess(self, frame, bbs):
data = []
for bb in bbs:
# get bounding box coordinates
x, y, w, h = bb
# crop image to get detection
img = frame[y:y + h, x:x + w]
# Histogram equalization and resizing
img = self.histogram_equalization.apply(cv2.cvtColor(img, cv2.COLOR_RGB2GRAY))
img = cv2.resize(img, (64, 128))
# get image descriptor
img_d = self.descriptor.compute(img)
data.append(np.concatenate(img_d))
data = np.array(data)
return data
def dnn_img_preprocess(self, frame, bbs):
data = []
for bb in bbs:
# get bounding box coordinates
x, y, w, h = bb
# crop image to get detection
img = frame[y:y + h, x:x + w]
# cv2.imwrite("../datasets/detections/" + str(uuid.uuid4()) + ".png", img)
img = cv2.resize(img, (64, 128))
# Histogram equalization and resizing
img_YCrCb = cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)
#cl1 = self.histogram_equalization.apply(img_YCrCb[:, :, 0])
#img_YCrCb[:, :, 0] = cl1
img_RGB = cv2.cvtColor(img_YCrCb, cv2.COLOR_YCrCb2RGB)
img_prep = cv2.copyMakeBorder(img_RGB, 0, 0, 32, 32, cv2.BORDER_CONSTANT)
data.append(img_prep)
data = np.array(data)
data = (data - self.train_mean) / (self.train_std + 0.000001)
return data
def load_pkl_model(path):
with open(path, 'rb') as fid:
return cPickle.load(fid)
def load_keras_model(path):
model = keras.models.load_model(path)
file = open(path + '_norm', "r")
for line in file:
fields = line.split(",")
train_mean = fields[0]
train_std = fields[1]
return model, float(train_mean), float(train_std)
```
#### File: Deep Sort/src/imgconverter.py
```python
from __future__ import print_function
import roslib
roslib.load_manifest('msgs_to_cv2')
import sys
import rospy
import cv2
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
class image_converter:
def __init__(self):
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/bebop/image_raw",Image,self.callback)
def callback(self,data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
cv2.imshow("hola", cv_image)
cv2.waitKey(3)
def main(args):
while True:
ic = image_converter()
rospy.init_node('image_converter', anonymous=True)
"""
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
cv2.destroyAllWindows()
"""
if __name__ == '__main__':
main(sys.argv)
```
#### File: Deep Sort/src/viewer.py
```python
Sort/src/viewer.py
from threading import Thread
import cv2
import numpy as np
import time
import Queue
class VideoViewer:
def __init__(self, shape=(1280,720), caption="Viewer"):
self.caption = caption
self.stopped = False
self.shape = shape
self.frame = np.zeros(shape+(3,), np.uint8)
self.color = (0, 0, 0)
self.text_color = (255,255,255)
self.text_thickness = 1
self.thickness = 1
def run(self, update_fun):
terminate = update_fun(self.stopped)
while not terminate and not self.stopped:
if cv2.waitKey(1) == ord("q"):
self.stopped = True
cv2.imshow(self.caption, self.frame)
terminate = update_fun(self.stopped)
cv2.waitKey(1)
cv2.destroyWindow(self.caption)
def set_frame(self, frame):
self.frame = frame
def start(self):
Thread(target=self.run, args=()).start()
return self
def show(self):
while not self.stopped:
cv2.imshow(self.caption, self.frame)
if cv2.waitKey(1) == ord("q"):
self.stopped = True
self.output_video.release()
self.frame[:] = 0
cv2.destroyWindow(self.caption)
cv2.waitKey(1)
cv2.imshow(self.caption, self.frame)
def stop(self):
self.stopped = True
def draw_rectangle(self, x, y, w, h, label=None):
x, y, w, h = int(x), int(y), int(w), int(h)
cv2.rectangle(self.frame, (x, y), (x + w, y + h), self.color, self.thickness)
if label is not None:
text_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1, self.thickness)
center = x + 5, y - text_size[0][1] + 5
pt2 = x + 10 + text_size[0][0], y - 10 - text_size[0][1]
cv2.rectangle(self.frame, (x, y), pt2, self.color, -1)
cv2.putText(self.frame, label, center, cv2.FONT_HERSHEY_PLAIN, 1, self.text_color, self.text_thickness)
``` |
{
"source": "jjavier-bm/conkit",
"score": 3
} |
#### File: core/tests/test_distancefile.py
```python
import unittest
from conkit.core.distogram import Distogram
from conkit.core.distancefile import DistanceFile
class TestDistanceFile(unittest.TestCase):
def test_original_file_format(self):
distance_file = DistanceFile("test")
distance_file.original_file_format = "pdb"
distogram = Distogram("test")
distance_file.add(distogram)
self.assertTrue(distogram in distance_file.child_list)
self.assertEqual("pdb", distogram.original_file_format)
```
#### File: core/tests/test_distance.py
```python
import unittest
import numpy as np
from conkit.core.distance import Distance
from conkit.core.distogram import Distogram
class TestDistance(unittest.TestCase):
def test_predicted_distance_1(self):
distance = Distance(1, 25, (0.15, 0.45, 0.25, 0.05, 0.1), ((0, 4), (4, 6), (6, 8), (8, 10), (10, np.inf)))
self.assertEqual(distance.max_score, 0.45)
self.assertTupleEqual(distance.predicted_distance_bin, (4, 6))
self.assertEqual(distance.predicted_distance, 5)
def test_predicted_distance_2(self):
distogram = Distogram('test')
distogram.original_file_format = 'pdb'
distance = Distance(36, 86, (1,), ((6.589181, 6.589181),), 0.934108)
distogram.add(distance)
self.assertEqual(6.589181, distance.predicted_distance)
def test_predicted_distance_3(self):
distance = Distance(2, 3, (0.2, 0.3, 0.3, 0.2), ((0, 4), (4, 6), (6, 8), (8, np.inf)))
self.assertEqual(distance.max_score, 0.3)
self.assertTupleEqual(distance.predicted_distance_bin, (4, 6))
self.assertEqual(distance.predicted_distance, 5)
def test_get_probability_within_distance_1(self):
distance = Distance(1, 25, (0.15, 0.45, 0.25, 0.05, 0.1), ((0, 4), (4, 6), (6, 8), (8, 10), (10, np.inf)))
self.assertEqual(distance.raw_score, 0.85)
self.assertEqual(distance.get_probability_within_distance(5), 0.375)
self.assertEqual(distance.get_probability_within_distance(8), 0.85)
self.assertEqual(distance.get_probability_within_distance(10), 0.9)
self.assertEqual(distance.get_probability_within_distance(25), 0.999999969409768)
self.assertEqual(distance.get_probability_within_distance(np.inf), 1)
self.assertEqual(distance.get_probability_within_distance(0), 0)
with self.assertRaises(ValueError):
distance.get_probability_within_distance(-5)
def test_get_probability_within_distance_2(self):
distogram = Distogram('test')
distogram.original_file_format = 'pdb'
distance = Distance(36, 86, (1,), ((6.589181, 6.589181),), 0.934108)
distogram.add(distance)
self.assertEqual(1, distance.get_probability_within_distance(8))
self.assertEqual(0, distance.get_probability_within_distance(5))
def test_reshape_bins_1(self):
distance = Distance(1, 25, (0.15, 0.45, 0.25, 0.05, 0.1), ((0, 4), (4, 6), (6, 8), (8, 10), (10, np.inf)))
new_bins = ((0, 2), (2, 8), (8, np.inf))
distance.reshape_bins(new_bins)
self.assertEqual(distance.raw_score, 0.85)
self.assertEqual(round(distance.get_probability_within_distance(8), 2), 0.85)
self.assertTupleEqual(new_bins, distance.distance_bins)
self.assertTupleEqual((0.075, 0.775, 0.15000000000000002), distance.distance_scores)
def test_reshape_bins_2(self):
distogram = Distogram('test')
distogram.original_file_format = 'pdb'
distance = Distance(36, 86, (1,), ((6.589181, 6.589181),), 0.934108)
distogram.add(distance)
with self.assertRaises(ValueError):
distance.reshape_bins(((0, 1), (1, 10), (10, np.inf)))
def test__assert_valid_bins_1(self):
distance = Distance(1, 25, (0.15, 0.45, 0.25, 0.05, 0.1), ((0, 4), (4, 6), (6, 8), (8, 10), (10, np.inf)))
with self.assertRaises(ValueError):
distance._assert_valid_bins(((0, 1), (np.inf, 10), (10, np.inf)))
def test__assert_valid_bins_2(self):
distance = Distance(1, 25, (0.15, 0.45, 0.25, 0.05, 0.1), ((0, 4), (4, 6), (6, 8), (8, 10), (10, np.inf)))
with self.assertRaises(ValueError):
distance._assert_valid_bins(((0, 1), (1, 10), (10, 20)))
def test__assert_valid_bins_3(self):
distance = Distance(1, 25, (0.15, 0.45, 0.25, 0.05, 0.1), ((0, 4), (4, 6), (6, 8), (8, 10), (10, np.inf)))
with self.assertRaises(ValueError):
distance._assert_valid_bins(((0, 1), (5, 10, 15), (15, np.inf)))
def test__assert_valid_bins_4(self):
distance = Distance(1, 25, (0.15, 0.45, 0.25, 0.05, 0.1), ((0, 4), (4, 6), (6, 8), (8, 10), (10, np.inf)))
with self.assertRaises(ValueError):
distance._assert_valid_bins(((0, 1), (5, 10), (10, np.inf)))
def test__assert_valid_bins_5(self):
distance = Distance(1, 25, (0.15, 0.45, 0.25, 0.05, 0.1), ((0, 4), (4, 6), (6, 8), (8, 10), (10, np.inf)))
with self.assertRaises(ValueError):
distance._assert_valid_bins(((0, 1), (1, 10), (10, 20), (25, 30), (30, 31), (45, np.inf)))
def test__assert_valid_bins_6(self):
distance = Distance(1, 25, (0.15, 0.45, 0.25, 0.05, 0.1), ((0, 4), (4, 6), (6, 8), (8, 10), (10, np.inf)))
with self.assertRaises(ValueError):
distance._assert_valid_bins(((0, 1), (10, 10), (10, np.inf)))
def test__assert_valid_bins_7(self):
distance = Distance(1, 25, (0.15, 0.45, 0.25, 0.05, 0.1), ((0, 4), (4, 6), (6, 8), (8, 10), (10, np.inf)))
with self.assertRaises(ValueError):
distance._assert_valid_bins(((0, np.inf),))
if __name__ == "__main__":
unittest.main(verbosity=2)
```
#### File: conkit/io/_parser.py
```python
__author__ = "<NAME>"
__date__ = "04 Oct 2016"
__version__ = "0.13.2"
import abc
ABC = abc.ABCMeta("ABC", (object,), {})
from conkit.core.contact import Contact
from conkit.core.contactmap import ContactMap
from conkit.core.contactfile import ContactFile
from conkit.core.sequence import Sequence
from conkit.core.sequencefile import SequenceFile
class Parser(ABC):
"""Abstract class for all parsers
"""
@abc.abstractmethod
def read(self):
pass
@abc.abstractmethod
def write(self):
pass
@classmethod
def _reconstruct(cls, hierarchy):
"""Wrapper to re-construct full hierarchy when parts are provided"""
if isinstance(hierarchy, ContactFile):
h = hierarchy
elif isinstance(hierarchy, ContactMap):
h = ContactFile("conkit")
h.add(hierarchy)
elif isinstance(hierarchy, Contact):
h = ContactFile("conkit")
m = ContactMap("1")
m.add(hierarchy)
h.add(m)
elif isinstance(hierarchy, SequenceFile):
h = hierarchy
elif isinstance(hierarchy, Sequence):
h = SequenceFile("conkit")
h.add(hierarchy)
return h
class ContactFileParser(Parser):
"""General purpose class for all contact file parsers"""
pass
class DistanceFileParser(Parser):
"""General purpose class for all distance prediction file parsers"""
pass
class BinaryDistanceFileParser(Parser):
"""General purpose class for all binary distance prediction file parsers"""
pass
class SequenceFileParser(Parser):
"""General purpose class for all sequence file parsers"""
pass
``` |
{
"source": "jjavier-bm/crops",
"score": 3
} |
#### File: crops/command_line/crops-cropseq.py
```python
from crops.about import __prog__, __description__, __author__, __date__, __version__
import argparse
import os
from crops.io import check_path
from crops.io import outpathgen
from crops.io import parsers as cin
from crops.io import taggers as ctg
from crops.core import ops as cop
from crops import command_line as ccl
import time
import copy
logger=None
def create_argument_parser():
"""Create a parser for the command line arguments used in crops-renumber"""
parser = argparse.ArgumentParser(prog=__prog__, formatter_class=argparse.RawDescriptionHelpFormatter,
description=__description__+' ('+__prog__+') v.'+__version__+'\n'+__doc__)
parser.add_argument("input_seqpath",nargs=1, metavar="Sequence_filepath",
help="Input sequence filepath.")
parser.add_argument("input_database",nargs=1, metavar="Intervals_database",
help="Input intervals database filepath.")
parser.add_argument("-o","--outdir",nargs=1,metavar="Output_Directory",
help="Set output directory path. If not supplied, default is the one containing the input sequence.")
parser.add_argument("-s","--sort",nargs=1, metavar="Sort_type",
help="Sort output sequences in descending order by criteria provided - 'ncrops' or 'percent'. Add 'T' ('ncropsIN', 'percentIN') to ignore numbers from terminals. Only for multiple ID fasta inputs.")
sections=parser.add_mutually_exclusive_group(required=False)
sections.add_argument("-t","--terminals",action='store_true',default=False,
help="Ignore interval discontinuities and only crop the ends off.")
sections.add_argument("-u","--uniprot_threshold", nargs=2, metavar=("Uniprot_ratio_threshold","Sequence_database"),
help='Act if SIFTS database is used as intervals source AND %% residues from single Uniprot sequence is above threshold. Threshold: [MIN,MAX)=[0,100). Database path: uniclust##_yyyy_mm_consensus.fasta-path or server-only. The latter requires internet connexion.')
parser.add_argument('--version', action='version', version='%(prog)s '+ __version__)
return parser
def main():
starttime=time.time()
parser = create_argument_parser()
args = parser.parse_args()
global logger
logger = ccl.crops_logger(level="info")
logger.info(ccl.welcome())
inseq=check_path(args.input_seqpath[0],'file')
indb=check_path(args.input_database[0],'file')
if args.uniprot_threshold is not None:
insprot=check_path(args.uniprot_threshold[1]) if args.uniprot_threshold != 'server-only' else 'server-only'
else:
insprot=None
minlen=float(args.uniprot_threshold[0]) if args.uniprot_threshold is not None else 0.0
targetlbl=ctg.target_format(indb,terms=args.terminals, th=minlen)
infixlbl=ctg.infix_gen(indb,terms=args.terminals)
if args.outdir is None:
outdir=check_path(os.path.dirname(inseq),'dir')
else:
outdir=check_path(os.path.join(args.outdir[0],''),'dir')
if args.sort is not None:
if (args.sort[0].lower()!='ncrops' and args.sort[0].lower()!='percent' and
args.sort[0].lower()!='ncropsin' and args.sort[0].lower()!='percentin'):
raise ValueError("Arguments for sorting option can only be either 'ncrops' or 'percent'.")
else:
sorter=args.sort[0].lower()
###########################################
logger.info('Parsing sequence file '+inseq)
seqset=cin.parseseqfile(inseq)
logger.info('Done')
logger.info('Parsing interval database file '+indb)
if len(seqset)>0:
intervals=cin.import_db(indb,pdb_in=seqset)
else:
raise ValueError('No chains were imported from sequence file.')
logger.info('Done\n')
if insprot is not None and minlen>0.0:
logger.info('Parsing uniprot sequence file: '+insprot)
uniprotset={}
for seqncid, seqnc in seqset.items():
for monomerid, monomer in seqnc.imer.items():
if 'uniprot' in intervals[seqncid][monomerid].tags:
for key in intervals[seqncid][monomerid].tags['uniprot']:
if key.upper() not in uniprotset:
uniprotset[key.upper()]=None
uniprotset=cin.parseseqfile(insprot, uniprot=uniprotset)['uniprot']
logger.info('Done\n')
logger.info('Cropping sequence(s)...')
if len(seqset)>1 and args.sort is not None:
sorted_outseq={}
cropmaps=['cropmap','cropbackmap']
for key, S in seqset.items():
if key in intervals:
for key2,monomer in S.imer.items():
if key2 in intervals[key]:
if insprot is not None and minlen>0.0:
newinterval=intervals[key][key2].deepcopy()
newinterval.tags['description']+=' - Uniprot threshold'
newinterval.subint=[]
unilbl=' uniprot chains included: '
for unicode,uniintervals in intervals[key][key2].tags['uniprot'].items():
if 100*uniintervals.n_elements()/uniprotset.imer[unicode].length()>=minlen:
newinterval=newinterval.union(intervals[key][key2].intersection(uniintervals))
unilbl+=unicode +'|'
monomer=cop.crop_seq(monomer,newinterval,targetlbl+unilbl,terms=args.terminals)
else:
monomer=cop.crop_seq(monomer,intervals[key][key2],targetlbl,terms=args.terminals)
if monomer.ncrops()>0:
monomer.info['header'] += ' |'
if monomer.ncrops()>0:
monomer.info['header'] += ' Units cropped: ' + str(monomer.ncrops())
monomer.info['header'] += ' (' + str(monomer.ncrops(offmidseq=True))+' not from terminals) '
monomer.info['header'] += '; % cropped: '+str(round(100*monomer.ncrops()/len(monomer.seqs['cropseq']),2))
monomer.info['header'] += ' (' +str(round(100*monomer.ncrops(offmidseq=True)/len(monomer.seqs['cropseq']),2))+' not from terminals) '
else:
pass
if len(seqset)==1 or args.sort is None:
if len(seqset)>1:
outseq=outpathgen(outdir,filename=os.path.splitext(os.path.basename(inseq))[0]+infixlbl["croprenum"]+os.path.splitext(os.path.basename(inseq))[1])
for cmap in cropmaps:
if cmap in monomer.info:
outmap=outpathgen(outdir,filename=os.path.splitext(os.path.basename(inseq))[0]+infixlbl["croprenum"]+'.'+cmap)
monomer.dumpmap(outmap,themap=cmap)
else:
outseq=outpathgen(outdir,subdir=key,filename=key+infixlbl["croprenum"]+os.path.splitext(os.path.basename(inseq))[1],mksubdir=True)
for cmap in cropmaps:
if cmap in monomer.info:
outmap=outpathgen(outdir,subdir=key,filename=key+infixlbl["croprenum"]+'.'+cmap,mksubdir=True)
monomer.dumpmap(outmap,themap=cmap)
monomer.dump(outseq)
if len(seqset)>1 and args.sort is not None:
sorted_outseq[monomer.info['oligomer_id']+'_'+monomer.info['chain_id']]=monomer.deepcopy()
else:
for key2,monomer in S.imer.items():
monomer.info['cropmap']={}
for n in range(1,monomer.length()+1):
monomer.info['cropmap'][n]=n
monomer.info['cropbackmap']=copy.deepcopy(monomer.info['cropmap'])
if len(seqset)==1 or args.sort is None:
if len(seqset)>1:
outseq=outpathgen(outdir,filename=os.path.splitext(os.path.basename(inseq))[0]+infixlbl["croprenum"]+os.path.splitext(os.path.basename(inseq))[1])
for cmap in cropmaps:
if cmap in monomer.info:
outmap=outpathgen(outdir,filename=os.path.splitext(os.path.basename(inseq))[0]+infixlbl["croprenum"]+'.'+cmap)
monomer.dumpmap(outmap,themap=cmap)
else:
outseq=outpathgen(outdir,subdir=key,filename=key+infixlbl["croprenum"]+os.path.splitext(os.path.basename(inseq))[1],mksubdir=True)
for cmap in cropmaps:
if cmap in monomer.info:
outmap=outpathgen(outdir,subdir=key,filename=key+infixlbl["croprenum"]+'.'+cmap,mksubdir=True)
monomer.dumpmap(outmap,themap=cmap)
monomer.dump(outseq)
if len(seqset)>1 and args.sort is not None:
sorted_outseq[monomer.info['oligomer_id']+'_'+monomer.info['chain_id']]=monomer.deepcopy()
croptime=time.time()
logger.debug('Crop time = '+str(croptime-starttime)+ ' s')
logger.info('Done\n')
if len(seqset)>1 and args.sort is not None:
logger.info('Sorting sequence(s)...')
outseq=outpathgen(outdir,filename=os.path.splitext(os.path.basename(inseq))[0]+infixlbl["cropseq"]+".sorted_"+sorter+os.path.splitext(os.path.basename(inseq))[1])
outseq=outpathgen(outdir,filename=os.path.splitext(os.path.basename(inseq))[0]+infixlbl["croprenum"]+".sorted_"+sorter+os.path.splitext(os.path.basename(inseq))[1])
if sorter=='ncrops':
sorted_outseq2=sorted(sorted_outseq.items(), key=lambda x: x[1].ncrops(),reverse=True)
elif sorter=='percent':
sorted_outseq2=sorted(sorted_outseq.items(), key=lambda x: x[1].ncrops()/x[1].full_length(), reverse=True)
elif sorter=='ncropsin':
sorted_outseq2=sorted(sorted_outseq.items(), key=lambda x: x[1].ncrops(offmidseq=True),reverse=True)
elif sorter=='percentin':
sorted_outseq2=sorted(sorted_outseq.items(), key=lambda x: x[1].ncrops(offmidseq=True)/x[1].full_length(), reverse=True)
del sorted_outseq
for monomer in sorted_outseq2:
monomer[1].dump(outseq)
logger.debug('Sort time = '+str(time.time()-croptime)+ ' s')
logger.info('Done\n')
return
if __name__ == "__main__":
import sys
import traceback
try:
main()
logger.info(ccl.ok())
sys.exit(0)
except Exception as e:
if not isinstance(e, SystemExit):
msg = "".join(traceback.format_exception(*sys.exc_info()))
logger.critical(msg)
sys.exit(1)
```
#### File: crops/io/__init__.py
```python
import os
import argparse
def check_path(path,typeofpath=None):
"""Returns full path if correct.
:param path: Input (local) path.
:type path: str
:param typeofpath: The type of path, 'dir' or 'file', defaults to None.
:type typeofpath: str, optional
:raises ValueError: When given typeofpath is neither 'dir' nor 'file'.
:raises argparse: If wrong path given.
:return: Complete checked path.
:rtype: str
"""
pathok=False
if typeofpath=='dir':
path=os.path.abspath(path)
if os.path.isdir(os.path.join(path,'')):
path=os.path.abspath(os.path.join(path,''))
pathok=True
elif typeofpath=='file':
path=os.path.abspath(path)
if os.path.isfile(path):
pathok=True
elif typeofpath is None:
if os.path.isdir(os.path.abspath(os.path.join(path,''))):
path=os.path.abspath(os.path.join(path,''))
pathok=True
else:
path=os.path.abspath(path)
if os.path.isfile(path):
pathok=True
else:
raise ValueError("Input string 'typeofpath' should be either 'dir' or 'file'.")
if pathok:
return path
else:
raise argparse.ArgumentTypeError(f"readable_dir:{path} is not a valid path")
def outpathgen(globaldir,subdir=None,filename=None,mksubdir=False):
"""Returns the desired output filepath.
:param globaldir: General output dir.
:type globaldir: str
:param subdir: Additional subdirectory, defaults to None.
:type subdir: str, optional
:param filename: File name, defaults to None.
:type filename: str, optional.
:param mksubdir: Create directory if not existing, defaults to False.
:type mksubdir: bool, optional
:raises FileNotFoundError: Directory does not exist and mksubdir is False.
:return: Output filepath.
:rtype: str
"""
newpath=check_path(globaldir,'dir')
if subdir is not None:
newpath=os.path.join(newpath,subdir)
if not os.path.isdir(newpath):
if mksubdir:
os.mkdir(newpath)
else:
raise FileNotFoundError('Directory does not exist')
if filename is not None:
newpath=os.path.join(newpath,filename)
return newpath
```
#### File: crops/io/parsers.py
```python
from crops.about import __prog__, __description__, __author__, __date__, __version__
import gemmi
import os
import csv
import urllib3
import copy
from crops.elements.sequence import Sequence
from crops.elements.sequence import guess_type
from crops.io.taggers import retrieve_id
from crops.elements.intervals import intinterval
def import_db(inpath,pdb_in=None):
"""Imports intervals database. Input must be a .csv file (filepath).
If imported file is not 'pdb_chain_uniprot.csv' from SIFTS database,
the columns must contain molecule ID, chain ID, lower element of subset,
and higher element of subset, in this order.
:param inpath: Path to interval database used.
:type inpath: str
:param pdb_in: Chain ID(s). If given, the imported values
will be filtered to contain only IDs provided, defaults to None.
:type pdb_in: str, dict, optional
:raises TypeError: When pdb_in is given and is neither a string nor a dictionary.
:return: dict [str, :class:`~crops.elements.intervals.intinterval`im]
:rtype: A dictionary of :class:`~crops.elements.intervals.intinterval`.
"""
database_out={}
if isinstance(pdb_in,str):
pdb_in_lower={}
pdb_in_lower[pdb_in.lower()]=None
elif isinstance(pdb_in,dict):
pdb_in_lower={}
for element in pdb_in:
if not isinstance(element,str):
raise TypeError('Argument should be either None, a string, or a dictionary with empty values.')
pdb_in_lower[element.lower()]=None
elif pdb_in is None:
pass
else:
raise TypeError('Argument should be either None, a string, or a dictionary with empty values.')
if os.path.basename(inpath)=='pdb_chain_uniprot.csv':
mol=0
chain=1
up=2
leftend=3
rightend=4
else:
mol=0
chain=1
leftend=2
rightend=3
up=None
csv_chain_file = open(inpath)
csv_chain = csv.reader(csv_chain_file)
for entry in csv_chain:
if entry[0][0] != "#" and entry[0] !="PDB":
if pdb_in is None or entry[mol].lower() in pdb_in_lower:
if entry[mol].lower() not in database_out:
database_out[entry[mol].lower()]={}
if entry[chain] not in database_out[entry[mol].lower()]:
database_out[entry[mol].lower()][entry[chain]]=intinterval(description=entry[mol].lower()+'_'+entry[chain])
if up is not None:
database_out[entry[mol].lower()][entry[chain]].tags['uniprot']={}
database_out[entry[mol].lower()][entry[chain]]= \
database_out[entry[mol].lower()][entry[chain]].union(other=[int(entry[leftend]),int(entry[rightend])])
if up is not None:
if entry[up].upper() not in database_out[entry[mol].lower()][entry[chain]].tags['uniprot']:
database_out[entry[mol].lower()][entry[chain]].tags['uniprot'][entry[up]]=intinterval(description=entry[up].upper())
database_out[entry[mol].lower()][entry[chain]].tags['uniprot'][entry[up]]=\
database_out[entry[mol].lower()][entry[chain]].tags['uniprot'][entry[up]].union([int(entry[leftend]),int(entry[rightend])])
return database_out
def parsestrfile(str_inpath):
"""Returns dictionary containing :class:`~gemmi.Structure` objects and another one with the file names.
:param str_inpath: Either a directory or file path.
:type str_inpath: str
:raises KeyError: More than one structure file containing same identifier.
:return strdict: A dictionary containing imported :class:`~gemmi.Structure` objects.
:rtype strdict: dict [str, :class:`~gemmi.Structure`]
:return filedict: A dictionary containing file names.
:rtype filedict: dict [str, str]
"""
strdict={}
filedict={}
if os.path.isfile(str_inpath):
structure=gemmi.read_structure(str_inpath)
pdbid=structure.name.lower()
strdict[pdbid]=structure
filedict[pdbid]=os.path.basename(str_inpath)
elif os.path.isdir(str_inpath):
filelist=os.listdir(str_inpath)
for file in filelist:
if os.isfile(file):
try:
structure=gemmi.read_structure(file)
pdbid=structure.name.lower()
if pdbid in strdict:
raise KeyError('Structure '+pdbid+' loaded more than once. Check files in directory and remove duplicates.')
strdict[pdbid]=structure
filedict[pdbid]=os.path.basename(str_inpath)
except:
pass
return strdict, filedict
def parseseqfile(inpath,uniprot=None):
"""Sequence file parser.
:param inpath: Sequence file path.
:type inpath: str
:param uniprot: A dictionary of Uniprot codes, defaults to None.
:type uniprot: str, dict [str, any], optional
:return: A dictionary containing parsed :class:`~crops.elements.sequence.Sequence`.
If uniprot is not None, the dictionary will contain a single entry with a :class:`~crops.elements.sequence.Sequence`
that will contain the requested Uniprot chains as :class:`~crops.elements.sequence.monomer_sequence` objects.
:rtype: dict [str, :class:`~crops.elements.sequence.Sequence`]
"""
newseqs={}
newid=[]
head=''
chain=''
ignore=False
if uniprot is not None:
if not isinstance(uniprot,str) and not isinstance(uniprot,dict):
raise TypeError('Input argument uniprot must be either a string or a dictionary.')
elif isinstance(uniprot,str):
unitemp=uniprot
uniprot={}
uniprot[unitemp]=None
for upcode in uniprot:
if not isinstance(upcode,str):
raise TypeError('Input argument uniprot must be either a string or a dictionary.')
if inpath == 'server-only' and uniprot is not None:
for upcode in uniprot:
try:
for line in urllib3.urlopen('https://www.uniprot.org/uniprot/'+upcode.upper()+'.fasta'):
if line.startswith(">"):
chain = ''
head = line
else:
chain += str(line)
if len(newseqs)==0:
newseqs['uniprot']=Sequence(seq_id=upcode.upper(),source='Uniprot server')
if upcode.upper() not in newseqs['uniprot'].imer:
newseqs['uniprot'].add_monomer(nheader=head,nseq=chain,nid=upcode.upper(), guesstype=True)
except:
raise OSError('Uniprot sequence '+upcode.upper()+' not found online. If this file exists, check your internet connexion.')
elif inpath == 'server-only' and uniprot is None:
raise TypeError('Input argument inpath cannot be "server-only" when a dict of uniprot ids is not provided.')
else:
with open(inpath,'r') as f:
indx=-1
while True:
line=f.readline().rstrip()
if (not line or line.startswith(">")) and not ignore:
if uniprot is not None:
if indx>=0:
if len(newseqs)==0:
newseqs['uniprot']=Sequence(seq_id=newid[0].upper(),source=os.path.basename(inpath))
if newid[0].upper() not in newseqs['uniprot'].imer:
newseqs['uniprot'].add_monomer(nheader=head,nseq=chain,nid=newid[0].upper(), guesstype=True)
if len(newseqs['uniprot'].imer)==len(uniprot):
break
else:
if indx>=0:
if newid[0].lower() not in newseqs:
newseqs[newid[0].lower()]=Sequence(seq_id=newid[0].lower(),source=os.path.basename(inpath))
if newid[2] is not None and newid[2] not in newseqs[newid[0].lower()].groups:
newseqs[newid[0].lower()].groups[newid[2]] = newid[1]
for iid in newid[1]:
newseqs[newid[0].lower()].add_monomer(head,chain,nid=iid, guesstype=True)
newseqs[newid[0].lower()].imer[iid].info['seq_group']=newid[2]
if not line:
try:
line=f.readline().rstrip()
if not line:
break
except:
break
if line.startswith(">"):
newid=retrieve_id(line)
head=line
indx += 1
chain = ''
if uniprot is not None:
ignore=False if newid[0] in uniprot else True
elif line.startswith("#") or line.startswith(' #'):
pass
else:
if not ignore:
chain += str(line)
if uniprot is not None:
for upcode in uniprot:
if upcode.upper() not in newseqs['uniprot'].imer:
try:
for line in urllib3.urlopen('https://www.uniprot.org/uniprot/'+upcode.upper()+'.fasta'):
if line.startswith(">"):
chain = ''
head = line
else:
chain += str(line)
if len(newseqs)==0:
newseqs['uniprot']=Sequence(seq_id=upcode.upper(),source='Uniprot server')
if upcode.upper() not in newseqs['uniprot'].imer:
newseqs['uniprot'].add_monomer(nheader=head,nseq=chain,nid=upcode.upper(), guesstype=True)
except:
raise OSError('Uniprot sequence '+upcode.upper()+' not found in local file or online. Check your internet connexion.')
return newseqs
def parsemapfile(inpath):
"""Cropmap file parser.
:param inpath: Cropmap file path.
:type inpath: str
:return: A dictionary containing parsed mapping and backmapping coordinates.
:rtype: dict [str, dict[str, dict[str, dict[int, int]]]]
"""
mapdict={}
newid=[]
with open(inpath, 'r') as f:
indx = -1
while True:
line=f.readline().rstrip()
if (not line or line.startswith(">")):
if indx >= 0:
if newid[0].lower() not in mapdict:
mapdict[newid[0].lower()]={}
for iid in newid[1]:
if iid not in mapdict[newid[0].lower()]:
mapdict[newid[0].lower()][iid]={}
mapdict[newid[0].lower()][iid]['cropmap']=copy.deepcopy(forthmap)
mapdict[newid[0].lower()][iid]['cropbackmap']=copy.deepcopy(backmap)
if not line:
try:
line=f.readline().rstrip()
if not line:
break
except:
break
if line.startswith(">"):
newid=retrieve_id(line)
indx += 1
forthmap={}
backmap={}
elif line.startswith("#") or line.startswith(' #'):
pass
else:
m=line.split(' ')
if m[1] != '0':
forthmap[int(m[0])] = int(m[1])
backmap[int(m[1])] = int(m[0])
else:
forthmap[int(m[0])] = None
return mapdict
```
#### File: crops/io/taggers.py
```python
from crops.about import __prog__, __description__, __author__, __date__, __version__
import os
def target_format(inpath,terms=False,th=0):
"""Returns extra information for .fasta headers.
:param inpath: Path to interval database used.
:type inpath: str
:param terms: Are only terminal ends discarded?, defaults to False.
:type terms: bool, optional
:param th: Uniprot threshold, defaults to 0.
:type th: int, float, optional
:return: Extra information for .fasta headers
:rtype: str
"""
if os.path.basename(inpath)=='pdb_chain_uniprot.csv':
outcome=' | CROPS | UNIPROT via SIFTS'
if th>0:
outcome += ' - UNIPROT CHAIN INCLUDED THRESHOLD = '+str(th)
else:
outcome=' | CROPS | CUSTOM'
if terms and th==0:
outcome += ' - ONLY TERMINALS REMOVED'
return outcome
def infix_gen(inpath,terms=False):
"""Returns filename tag for outputs.
:param inpath: Path to interval database used.
:type inpath: str
:param terms: Are terminal ends the only segments to be discarded?, defaults to False.
:type terms: bool, optional
:return: Filename tag.
:rtype: str
"""
if os.path.basename(inpath)=='pdb_chain_uniprot.csv':
cut=".to_uniprot"
else:
cut=".custom"
if terms:
cut=".custom"
infix_out={
"croprenum" : ".crops"+cut,
"cropseq" : ".crops"+cut,
"crop" : ".crops.oldids"+cut,
"renumber" : ".crops.seq"}
return infix_out
def retrieve_id(seqheader,extrainfo=False):
"""Extracts sequence IDs from a standard .fasta header.
:param seqheader: Standard .fasta header, starting with ">".
:type seqheader: str
:param extrainfo: If True, extra information string is returned instead of sequence IDs, defaults to False.
:type extrainfo: bool, optional
:raises ValueError: If seqheader is not a string.
:return: A list with the two sequence identifiers (e.g. [pdb ID, chain ID]) or a single string if extrainfo==True.
:rtype: list [str], str
"""
if not isinstance(seqheader,str):
raise ValueError('Argument is not a str')
namechar=False
idchar=False
nameseq=["", [], None] #PDB IF, chain IDs for seqgroup,
newchid=''
if seqheader.startswith('>sp|'):
for i in range(4,len(seqheader)):
if seqheader[i]=='|':
if extrainfo:
return seqheader[i:]
break
else:
nameseq[0]+=seqheader[i]
nameseq[1]=[nameseq[0]]
return nameseq
if seqheader.startswith('>pdb|'):
for i in range(5,len(seqheader)):
if seqheader[i]=='|':
for j in range(i+1,len(seqheader)):
if seqheader[j]!=' ' and seqheader[i]!='|':
newchid += seqheader[j]
elif seqheader[j]==' ':
if newchid != '':
nameseq[1].append(newchid)
newchid = ''
else:
if extrainfo:
return seqheader[i:]
break
else:
nameseq[0]+=seqheader[i]
return nameseq
for j in range(len(seqheader)):
if seqheader[j]==">":
idchar=True
elif seqheader[j]==":" or seqheader[j]=="_":
idchar=False
namechar=True
elif seqheader[j]==" ":
pass
elif seqheader[j]=="[":
if seqheader[j:j+5]=="[auth" or seqheader[j:j+6]=="[ auth":
newchid=''
elif ((seqheader[j]=="a" and seqheader[j:j+4]=='auth') or
(seqheader[j]=="u" and seqheader[j-1:j+3]=='auth') or
(seqheader[j]=="t" and seqheader[j-2:j+2]=='auth') or
(seqheader[j]=="h" and seqheader[j-3:j+1]=='auth')):
pass
elif seqheader[j]=="]":
pass
elif seqheader[j]=="|":
if seqheader[j+1:j+6]=='Chain' or seqheader[j+1:j+6]=='chain':
if newchid != '':
nameseq[2] = newchid
k=0 if seqheader[j+6]==' ' else 1
newchid=''
for jj in range(j+6+k+1,len(seqheader)):
if seqheader[jj]==',':
nameseq[1].append(newchid)
newchid=''
elif seqheader[jj]==" ":
pass
elif seqheader[jj]=="[":
if seqheader[jj:jj+5]=="[auth" or seqheader[jj:jj+6]=="[ auth":
newchid=''
elif ((seqheader[jj]=="a" and seqheader[jj:jj+4]=='auth') or
(seqheader[jj]=="u" and seqheader[jj-1:jj+3]=='auth') or
(seqheader[jj]=="t" and seqheader[jj-2:jj+2]=='auth') or
(seqheader[jj]=="h" and seqheader[jj-3:jj+1]=='auth')):
pass
elif seqheader[jj]=="]":
pass
elif seqheader[jj]=="|" or seqheader[jj]==":" or jj==len(seqheader)-1:
if extrainfo:
if jj==len(seqheader)-1:
return ''
else:
return seqheader[jj:]
if jj==len(seqheader)-1:
newchid+=seqheader[jj]
nameseq[1].append(newchid)
newchid=''
return nameseq
else:
newchid+=seqheader[jj]
else:
if extrainfo:
return seqheader[j:]
nameseq[1].append(newchid)
return nameseq
elif seqheader[j]==" " and seqheader[j-1]!="_":
if extrainfo:
return seqheader[j:]
if namechar:
nameseq[1].append(newchid)
return nameseq
else:
if namechar:
newchid += seqheader[j]
elif idchar:
nameseq[0] += seqheader[j].lower()
``` |
{
"source": "jjavier-bm/LamAnalysis",
"score": 2
} |
#### File: LamAnalysis/Data_Analysis/uploadable_trajectories.py
```python
import argparse
import os
import math
def create_argument_parser():
"""Create a parser for the command line arguments used in crops-renumber"""
parser = argparse.ArgumentParser(prog="LamAnalysis - uploadable trajectories",
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Script to extract single trajectories from high-stats files")
parser.add_argument("lintraj_path",nargs=1, metavar="Sequence_filepath",
help="Input linear trajectory filepath.")
parser.add_argument("Nparticles",nargs=1, type=int, metavar="N_particles",
help="Input number of nanodimers.")
return parser
def main():
Nconfigsperfile = 3
Ntimesteps = 100000000
period = 500000
Nch = 12696
Lch = 30
fortfiles = [101,102,103,104,105,106,107,108,109,110,
120,130,140,150,160,170,180,190,200]
parser = create_argument_parser()
args = parser.parse_args()
Npart = args.Nparticles[0]
inlinpath = os.path.abspath(args.lintraj_path[0])
if not os.path.isfile(inlinpath):
raise OSError('input file not found')
fileparts = []
while True:
if len(fileparts) == 0:
parts = os.path.splitext(inlinpath)
outlinpath = parts[0] + '.cleanversion' + parts[1]
else:
parts = os.path.splitext(parts[0])
if parts[1]=='.lin':
outlogpath = parts[0] + '.log'
for part in reversed(fileparts):
outlogpath += part
break
else:
fileparts.append(parts[1])
if len(fileparts)==1:
fileparts.append('.singlestat')
# Linearly-(time-)spaced trajectory file. Remove duplicated configurations.
with open(inlinpath,'r') as fin:
with open(outlinpath,'w') as fout:
l = 1
c = 0
while True:
line=fin.readline().rstrip()
if not line:
break
if c > 0 and c % Nconfigsperfile == 0:
pass
else:
fout.write(line+'\n')
if l == 9+Nch*Lch+Npart*2:
l = 1
c += 1
else:
l += 1
# Logarithmically-(time-)spaced trajectory files. Single stat trajectory file.
Nconfigsperfile = 9*(int(math.log10(Ntimesteps))-2)+1
with open(outlogpath,'w') as fout:
for n in fortfiles:
inpath = os.path.join(os.path.dirname(inlinpath),'fort.'+str(n))
with open(inpath,'r') as fin:
l = 1
c = 0 if n == 101 else 1
while True:
line=fin.readline().rstrip()
if not line:
break
if n == 101 or c == Nconfigsperfile:
fout.write(line+'\n')
if l == 9+Nch*Lch+Npart*2:
l = 1
c += 1
else:
l += 1
return
if __name__ == "__main__":
import sys
import traceback
try:
main()
sys.exit(0)
except Exception as e:
if not isinstance(e, SystemExit):
msg = "".join(traceback.format_exception(*sys.exc_info()))
raise RuntimeError(msg)
sys.exit(1)
``` |
{
"source": "jjavier-bm/PISACov",
"score": 2
} |
#### File: pisacov/core/contacts.py
```python
from pisacov import __prog__, __description__, __version__
from pisacov import __author__, __date__, __copyright__
from pisacov.iomod import _conf_ops as pco
from pisacov.core import _psicov_modes as PSICOV_modes
import copy
import logging
import os
from crops.elements import sequences as pes
from crops.iomod import taggers as ctg
from conkit.core import contactmap as ckc
from conkit.core.contactmap import ContactMap
from conkit import plot as ckplot
from matplotlib import pyplot as plt
def backmapping(cmap, sequence):
"""Return the contact prediction map with the original residue numbers.
:param cmap: Contact prediction map.
:type cmap: :class:`~conkit.core.contactmap.ContactMap`
:param sequence: Sequence.
:type sequence: :class:`~crops.elements.sequences.sequence`
:return: Contact prediction map with backmapped ids.
:rtype: :class:`~conkit.core.contactmap.ContactMap`
"""
if ((isinstance(cmap, ckc.ContactMap) is False) and
(isinstance(cmap, ContactMap) is False)):
logging.critical('First argument must be a Conkit ContactMap object.')
raise TypeError
if isinstance(sequence, pes.sequence) is False:
logging.critical('Second argument must be a CROPS sequence object.')
raise TypeError
conpredout = cmap.deepcopy()
for n in range(len(cmap)):
c1 = sequence.cropbackmap[cmap[n].res1_seq]
c2 = sequence.cropbackmap[cmap[n].res2_seq]
if c1 < c2:
conpredout[n].res1_seq = c1
conpredout[n].res2_seq = c2
nid = c1, c2
else:
conpredout[n].res2_seq = c1
conpredout[n].res1_seq = c2
nid = c2, c1
conpredout[n].id = nid
return conpredout
def filter_contacts(cmap, threshold=0.2):
"""Remove low score contacts from contact prediction list.
:param cmap: Contact prediction map.
:type cmap: :class:`~conkit.core.contactmap.ContactMap`
:param threshold: Threshold, defaults to 0.2.
:type threshold: float, optional
"""
cmap.sort('raw_score', reverse=True, inplace=True)
cnt = 0
for contact in cmap:
if contact.raw_score < threshold:
break
else:
cnt = cnt+1
return cmap[:cnt-1]
def map_intersection(conpredmap, strconarray):
"""Generate a Contact map with the common contacts of conpredmap and strconarray.
:param conpredmap: Conkit Contact Map (contact prediction).
:type conpredmap: :class:`~conkit.core.contactmap.ContactMap`
:param strconarray: Numpy-read contact list (generated from pdb structure).
:type strconarray: :class:`~numpy.ndarray`
:return: Intersection map.
:rtype: :class:`~conkit.core.contactmap.ContactMap`
"""
newmap = ContactMap(id=strconarray.id)
ncst = 1 if len(strconarray.shape) == 1 else strconarray.shape[0]
for ic2 in conpredmap:
if ncst == 1:
if (int(ic2.res1_seq) == int(strconarray[0]) and
int(ic2.res2_seq) == int(strconarray[1])):
try:
newmap.add(ic2)
except Exception:
pass
elif (int(ic2.res2_seq) == int(strconarray[0]) and
int(ic2.res1_seq) == int(strconarray[1])):
try:
newmap.add(ic2)
except Exception:
pass
elif ncst > 1:
for ic1 in range(ncst):
if (int(ic2.res1_seq) == int(strconarray[ic1][0]) and
int(ic2.res2_seq) == int(strconarray[ic1][1])):
try:
newmap.add(ic2)
except Exception:
pass
elif (int(ic2.res2_seq) == int(strconarray[ic1][0]) and
int(ic2.res1_seq) == int(strconarray[ic1][1])):
try:
newmap.add(ic2)
except Exception:
pass
newmap.sort("raw_score", reverse=True, inplace=True)
return newmap
class contact_atlas:
"""
A :class:`~pisacov.core.contacts.contact_atlas` object containing information from
sequences, contact prediction maps and structure contacts.
The :class:`~pisacov.core.contacts.contact_atlas` class represents a data structure to hold
contact maps, matched and unmatched with structure contacts and sequence.
:param name: Atlas identifier, defaults to None.
:type name: str, optional
:param sequence: A sequence object, defaults to None.
:type sequence: :class:`~crops.elements.sequences.sequence`, optional
:param dimer_interface: An interface object, defaults to None.
:type dimer_interface: :class:`~pisacov.core.interfaces.interface`, optional
:param conpredmap: A Contact prediction map, defaults to None.
:type conpredmap: :class:`~conkit.core.contactmap.ContactMap`, optional
:param conpredtype: Source of contact prediction list (one of 'psicov', 'deepmetapsicov', 'ccmpred'), defaults to None.
:type conpredtype: str, optional
:ivar name: Atlas identifier.
:vartype name: str
:ivar sequence: A sequence object.
:vartype sequence: :class:`~crops.elements.sequences.sequence`
:ivar interface: An interface object.
:vartype interface: :class:`~pisacov.core.interfaces.interface`
:ivar conpred_raw: A Contact prediction map, as originally parsed.
:vartype conpred_raw: :class:`~conkit.core.contactmap.ContactMap`
:ivar conpred: A Contact prediction map, after processing.
:vartype conpred: :class:`~conkit.core.contactmap.ContactMap`
:ivar conpred_source: Source of contact prediction list (one of 'psicov', 'deepmetapsicov', 'ccmpred').
:vartype conpred_source: str
:ivar conkitmatch: Dictionary of prediction-structure matched contact maps.
:vartype conkitmatch: dict [str : :class:`~conkit.core.contactmap.ContactMap`]
:ivar ckplotmatch: Dictionary of prediction-structure matched contact maps (for plotting).
:vartype ckplotmatch: dict [str : :class:`~conkit.core.contactmap.ContactMap`]
:ivar tp: Dictionary of number of true positives.
:vartype tp: dict [str : int]
:ivar tn: Dictionary of number of false positives.
:vartype tn: dict [str : int]
:ivar fp: Dictionary of number of true negatives.
:vartype fp: dict [str : int]
:ivar fn: Dictionary of number of false negatives.
:vartype fn: dict [str : int]
:ivar npotential: Number of potential contacts.
:vartype npotential: int
:example:
>>> from pisacov.core import contacts as pcc
>>> from pisacov.core import interfaces as pci
>>> from crops.iomod import parsers as cps
>>> from conkit import io as ckio
>>> myseq = cps.parseseqfile('7M6C.fasta') # Parse sequence with CROPS
>>> myseq['7m6c']
Multiple sequence object: (id=7m6c, sequences = {'1': Sequence object >7M6C_1|Chain A (seq=MRTLWIMAVL[...]KPLCKKADPC, type=Undefined, length=138)})
>>> myseq['7m6c'].imer['1'].seqs['conkit'] = ckio.read('7M6C.fasta', 'fasta') # Add to sequence the Conkit-parsed version for later use
>>> myseq['7m6c'].imer['1'].seqs
{'mainseq': 'MRTLWIMAVLLVGVEGSLVELGKMILQETGKNPVTSYGAYGCNCGVLGRGKPKDATDRCCSVHKCCYKKMTGCNPKKDRYSYSWKDKTIVCDENNPCLKELCECDKAVAICLRENLDTYNEKYKKYYKKPLCKKADPC',
'conkit': Sequence(id="7M6C_1|Chain A|Basic phospholipase A2|Bothrops atrox (8725)" seq="MRTLW...KADPC" seq_len=138)}
>>> myinterfacelist = []
>>> myinterfacelist.append(pci.interface(name='1')) # Create new interface and add chains
>>> myinterfacelist[0].chains.append(pci.chain_info(dimer_id='A',
pisa_id='1',
crystal_id='A',
seqid='1',
biotype='Protein'))
>>> myinterfacelist[0].chains.append(pci.chain_info(dimer_id='B',
pisa_id='1',
crystal_id='A',
seqid='1',
biotype='Protein'))
>>> myinterfacelist[0].stable = False # Define the stability of interface
>>> myinterfacelist[0]
Interface object 1 (chains=A,A, type=Protein-Protein, stable=False)
>>> myinterfacelist[0].chains[0]
Chain Info object: (Dimer ID = A, PISA xml ID = 1, Monomer ID in assymmetric unit = A, Sequence ID = 1, Biotype = Protein)
>>> inputmap = ckio.read('7m6c.interface.1.pdb', 'pdb') # Parse interface dimer structure
>>> myinterfacelist[0].structure = []
>>> for m in range(len(inputmap)): # Add interface structure to interface object
myinterfacelist[0].structure.append(inputmap[m].as_contactmap())
myinterfacelist[0].structure[m].id = inputmap[m].id
>>> myinterfacelist[0].structure
[ContactMap(id="A", ncontacts=572),
ContactMap(id="AB", ncontacts=41),
ContactMap(id="BA", ncontacts=41),
ContactMap(id="B", ncontacts=572)]
>>> myconpredmap = ckio.read('7m6c.psicov', 'psicov') # Parse contact prediction list
>>> myatlas = pcc.contact_atlas(name='7M6C_1', # Create atlas
sequence=myseq['7m6c'].imer['1'],
dimer_interface=myinterfacelist[0],
conpredmap=myconpredmap,
conpredtype='psicov')
>>> myatlas
Contact Atlas object 7M6C_1 (Interface=Interface object 1 (chains=A,A, type=Protein-Protein, stable=False), Contact Prediction=ContactMap(id="map_1", ncontacts=8298))
>>> myatlas.set_conpred_seq() # Assign sequence to contact prediction list
>>> myatlas.remove_neighbours()
>>> myatlas.remove_intra()
>>> myatlas.make_match(filterout=0.2, tpmap=False) # Match Structure and Prediction
>>> myatlas
Contact Atlas object 7M6C_1 (Interface=Interface object 1 (chains=A,A, type=Protein-Protein, stable=False), Contact Prediction=ContactMap(id="map_1", ncontacts=8092), True Positives=0)
>>> myatlas.tp
>>> myatlas.fp
>>> myatlas.fn
>>> myatlas.plotmap('myoutpath/plotfile.png')
"""
_kind = 'Contact Atlas'
__slots__ = ['name', 'interface', 'sequence',
'conpred_raw', 'conpred', 'conpred_source',
'conkitmatch', 'ckplotmatch',
'tp', 'tn', 'fp', 'fn', 'npotential']
def __init__(self, name=None, sequence=None, dimer_interface=None,
conpredmap=None, conpredtype=None):
self.name = name
self.interface = dimer_interface
self.conpred_raw = conpredmap
if conpredmap is not None:
self.conpred = self.conpred_raw.deepcopy()
else:
self.conpred = None
self.conpred_source = conpredtype
self.sequence = sequence
self.conkitmatch = {}
self.ckplotmatch = {}
self.tp = {}
self.fp = {}
self.tn = {}
self.fn = {}
self.npotential = None
# Set sequence values and Number of total potential contacts
if sequence is not None:
lseq = sequence.length()
self.npotential = (lseq**2 - lseq) / 2
self.npotential = int(self.npotential / 2)
def __repr__(self):
string = (self._kind+" object "+self.name +
" (Interface=" + str(self.interface) + ", " +
"Contact Prediction=" + str(self.conpred))
if 'raw' in self.tp:
string += ", True Positives=" + str(self.tp['raw'])
string += ")"
return string
def copy(self):
return copy.copy(self)
def deepcopy(self):
return copy.deepcopy(self)
def remove_neighbors(self, mindist=2):
md = mindist
return self.remove_neighbours(mindist=md)
def remove_neighbours(self, mindist=2):
"""Return :class:`~conkit.core.contactmap.ContactMap` without neighbouring pairs.
:param mindist: Minimum allowed distance, defaults to 2.
:type mindist: int, optional
"""
lseq = self.sequence.length()
self.npotential = lseq**2 - lseq
for n in range(1, mindist):
self.npotential -= 2*(lseq - n)
self.npotential = int(self.npotential / 2)
self.conpred.remove_neighbors(min_distance=mindist, inplace=True)
def remove_intra(self):
"""Remove intramolecular contacts from intermolecular :class:`~conkit.core.contactmap.ContactMap`."""
for m in [0, 3]:
intra = self.interface.structure[m]
for contact1 in intra:
c1 = contact1.id
# c1 = str(contact1.id)[1:-1].split(', ')
for contact2 in reversed(self.conpred):
c2 = contact2.id
# c2 = str(contact2.id)[1:-1].split(', ')
if ((c1[0] == c2[0] and c1[1] == c2[1]) or
(c1[1] == c2[0] and c1[0] == c2[1])):
self.conpred.remove(contact2.id) # CHECK THAT REMOVAL INSIDE LOOP IS OK
def set_sequence(self, sequence):
"""
Set Atlas sequence.
This is the correct way to update the sequence.
Storing directly in :attr:`~pisacov.core.contacts.contact_atlas.sequence` will have sequence-dependent values not updated.
:param sequence: A sequence object.
:type sequence: :class:`~crops.elements.sequences.sequence`
"""
lseq = sequence.length()
self.npotential = (lseq**2 - lseq) / 2
self.npotential = int(self.npotential / 2)
def set_cropmap(self):
"""Renumber :class:`~conkit.core.contactmap.ContactMap` according to :class:`~crops.elements.sequences.sequence`."""
self.conpred = backmapping(self.conpred, self.sequence)
def set_conpred_seq(self, sequence=None):
"""Set contact prediction sequence.
:param sequence: Conkit type sequence, defaults to self.sequence.seqs['conkit'].
:type mindist: :class:`~conkit.core.sequence.Sequence`, optional
"""
seq_in = self.sequence.seqs['conkit'] if sequence is None else sequence
self.conpred.sequence = seq_in
self.conpred.set_sequence_register()
def make_match(self, filterout=None, tpmap=False):
"""Match Structure and contact prediction maps.
:param filterout: Threshold score below which contacts are filtered out, defaults to None.
:type filterout: float, optional
:param tpmap: If True, only consider conpred's TPs, defaults to False.
:type tpmap: bool, optional
"""
if tpmap is False:
self.conkitmatch['raw'] = self.conpred.deepcopy()
else:
self.conkitmatch['raw'] = map_intersection(self.conpred,
self.interface.contactmap)
if self.conpred_source == 'psicov':
rscmin = 0.0
rscmax = 0.0
for contact in self.conkitmatch['raw']:
if contact.raw_score < rscmin:
rscmin = contact.raw_score
if contact.raw_score > rscmax:
rscmax = contact.raw_score
if filterout is not None:
self.conkitmatch['raw'] = filter_contacts(self.conkitmatch['raw'].deepcopy(),
threshold=filterout)
else:
pass
if self.conpred_source == 'psicov':
psicovmodes = PSICOV_modes()
for pm in psicovmodes:
self.conkitmatch[pm] = self.conkitmatch['raw'].deepcopy()
for contact in self.conkitmatch['shifted']:
contact.raw_score -= rscmin
for contact in self.conkitmatch['norm']:
contact.raw_score -= rscmin
contact.raw_score /= rscmax
for contact in self.conkitmatch['abs']:
contact.raw_score = abs(contact.raw_score)
if filterout is not None:
for pm in psicovmodes:
self.conkitmatch[pm] = filter_contacts(self.conkitmatch[pm].deepcopy(),
threshold=filterout)
self.tp['raw'] = 0
self.fp['raw'] = 0
self.tn['raw'] = 0
self.fn['raw'] = 0
if self.conpred_source == 'psicov':
for pm in psicovmodes:
self.tp[pm] = 0
self.fp[pm] = 0
self.tn[pm] = 0
self.fn[pm] = 0
structuremap = self.interface.structure[1].deepcopy()
for altsc, cmap in self.conkitmatch.items():
logging.info('Structure: ' + str(structuremap) +
', Conpred: ' + str(cmap) +
', Source: ' + self.conpred_source +
', Mode: ' + altsc)
if len(cmap) > 0 and len(structuremap) > 0:
self.conkitmatch[altsc] = cmap.deepcopy()
self.conkitmatch[altsc] = cmap.match_naive(structuremap,
add_false_negatives=True,
inplace=False,
match_other=True)
for contact in self.conkitmatch[altsc]:
if contact.true_positive:
self.tp[altsc] += 1
elif contact.false_positive:
self.fp[altsc] += 1
elif contact.false_negative:
self.fn[altsc] += 1
elif contact.true_negative:
logging.warning('True negatives appearing in conkit match.')
else:
logging.warning('Contact ' + str(contact.id) + ' not evaluated.')
else:
logging.info('Contact map contains no contacts.')
self.tn[altsc] = (self.npotential -
self.tp[altsc] - self.fp[altsc] - self.fn[altsc])
def plot_map(self, outpath, mode='raw'):
"""Plot matched contact map.
:param outpath: Path to output file.
:type outpath: str
:param mode: Mode, if any, defaults to 'raw'.
:type mode: str, optional
"""
try:
fig = ckplot.ContactMapFigure(self.conkitmatch[mode],
# fig = ckplot.ContactMapFigure(self.ckplotmatch[mode],
reference=self.interface.structure[1],
legend=True,
lim=(1, self.sequence.full_length()))
fig.savefig(outpath, overwrite=True)
plt.close(fig.fig)
except Exception:
logging.warning('Something went wrong with ConKit ' +
'and Contact Plot was not produced.')
def plot_map_alt(self, outpath, mode='raw', plot_type='png', ncontacts=None):
"""Plot matched contact map.
:param outpath: Path to output file.
:type outpath: str
:param mode: Mode, if any, defaults to 'raw'.
:type mode: str, optional
:param plot_type: Plot either as a 'png' image, 'eps' vector image or 'dat' in raw grace format, defaults to 'png'.
:type plot_type: str, optional
:param ncontacts: Number of contacts plotted as a function of L, defaults to None (all contacts).
:type ncontacts: int or float, optional
"""
import matplotlib.pyplot as plt
if ncontacts is not None:
nc = round(self.sequence.length()*ncontacts)
else:
nc = len(self.conkitmatch[mode])
fpx = []
fpy = []
tpx = []
tpy = []
fnx = []
fny = []
title = (self.name + ', ' + 'Interface ' + self.interface +
', Chains ' + self.chains[0] + self.chains[1] +
', ', self.conpred_source)
if self.conpred_source == 'psicov':
title += ' (' + mode + ')'
n = 0
for contact in self.conkitmatch[mode]:
c1 = contact.id[0]
c2 = contact.id[1]
if contact.true_positive and n < nc:
n += 1
tpx.append(c1)
tpx.append(c2)
tpy.append(c2)
tpy.append(c1)
elif contact.true_negative and n < nc:
n += 1
fpx.append(c1)
fpy.append(c2)
fpx.append(c2)
fpy.append(c1)
else:
fnx.append(c1)
fny.append(c2)
fnx.append(c2)
fny.append(c1)
if plot_type == 'png':
fig, ax = plt.subplots(dpi=141)
elif plot_type == 'eps':
fig, ax = plt.subplots(dpi=1200)
elif plot_type == 'dat':
xdat = [tpx, fpx, fnx]
ydat = [tpy, fpy, fny]
with open(outpath, 'w') as fout:
for p in range(3):
fout.write('@target G0.S' + str(p) + '\n@type xy\n')
for n in range(len(xdat[p])):
fout.write(str(xdat[p][n]) + ' ' + str(ydat[p][n]) + '\n')
fout.write('&\n')
return
ax.set_title(title, y=1.08)
vmin = 1
vmax = self.sequence.length()
ax.axis([vmin, vmax, vmin, vmax])
ax.set_xlim(vmin - 0.5, vmax + 0.5)
ax.set_ylim(vmin - 0.5, vmax + 0.5)
ax.set_xlabel('Residues from Chain 1')
ax.set_ylabel('Residues from Chain 2')
s = ((ax.get_window_extent().width / (vmax-vmin+1.) * 50./fig.dpi) ** 2)
ax.scatter(tpx, tpy, s=s, marker='o', linewidth=0, c='k', label='Matched (TP)')
ax.scatter(fpx, fpy, s=s, marker='o', linewidth=0, c='r', label='Unmatched (TN)')
ax.scatter(fnx, fny, s=s, marker='o', linewidth=0, c='lightgrey', label='Structure (FN)')
ax.legend(numpoints=1,
fontsize=10,
bbox_to_anchor=(0.0, 1.02, 1.0, 0.102),
loc=3,
ncol=3,
mode="expand",
borderaxespad=0.0)
if plot_type == 'png' or plot_type == 'eps':
fig.savefig(outpath, format=plot_type, overwrite=True)
```
#### File: pisacov/core/__init__.py
```python
from pisacov import __prog__, __description__, __version__
from pisacov import __author__, __date__, __copyright__
def _psicov_modes():
"""Return a list with PSICOV modes used.
:return: Psicov Modes
:rtype: list [str]
"""
return ['norm', 'abs', 'shifted']
```
#### File: pisacov/core/scores.py
```python
from pisacov import __prog__, __description__, __version__
from pisacov import __author__, __date__, __copyright__
from pisacov.iomod import _conf_ops as pco
from pisacov.core import contacts as pcc
from pisacov.core import _psicov_modes as PSICOV_modes
import logging
from numpy import sqrt
def _scorenames(crop=False):
"""Return scorenames.
:param crop: Include tag for cropped sequence, defaults to False
:type crop: bool, optional
:return: scorenames
:rtype: dict [str: list [str]]
"""
croptag = 'fullseq' if crop is False else 'cropseq'
names = pco._sourcenames()
shortnames = pco._sourcenames(short=True)
scorenames = {}
mainnameraw = ['Nconpred', 'Nconused',
'ACCScoreRaw', 'AVScoreRaw',
'TP', 'PREC', 'COVER', 'MCC', 'JACCARD']
mainnamepsi = ['Nconpred', 'Nconused',
'ACCScoreRaw', 'AVScoreRaw',
'ACCScoreNorm', 'AVScoreNorm',
'ACCScoreAbs', 'AVScoreAbs',
'ACCScoreShift', 'AVScoreShift',
'TP', 'PREC', 'COVER', 'MCC', 'JACCARD']
for i in range(len(names)):
if names[i] not in scorenames:
scorenames[names[i]] = []
lnames = mainnameraw if names[i] != 'psicov' else mainnamepsi
for mn in lnames:
scorenames[names[i]].append(mn + '_' +
croptag + '_' +
shortnames[i])
return scorenames
def accscore(inatlas, alt=None):
"""Return the cumulative score of the True Positive contacts.
:param inatlas: Contact atlas.
:type inatlas: :class:`~pisacov.core.contacts.contact_atlas`
:param alt: Alternative scoring (abs, shifted, norm), defaults to None.
:type alt: str, optional
:return: Cumulative value
:rtype: float
"""
if isinstance(inatlas, pcc.contact_atlas) is False:
logging.critical('First argument of accscore must be a Contact Atlas.')
raise TypeError
if (alt is not None and alt != 'raw' and alt != 'abs'
and alt != 'shifted' and alt != 'norm'):
logging.critical("Argument must be one of 'abs', 'shifted' or 'norm'.")
raise TypeError
if alt is None:
alt = 'raw'
inmap = inatlas.conkitmatch[alt]
acc = 0.0
ntp = float(inatlas.tp[alt])
for contact in inmap:
if contact.true_positive is True:
acc += contact.raw_score
return acc
def avscore(inatlas, alt=None):
"""Return the average score of the True Positive contacts.
:param inatlas: Contact atlas.
:type inatlas: :class:`~pisacov.core.contacts.contact_atlas`
:param alt: Alternative scoring (abs, shifted, norm), defaults to None.
:type alt: str, optional
:return: Average value
:rtype: float
"""
if isinstance(inatlas, pcc.contact_atlas) is False:
logging.critical('First argument of avscore must be a Contact Atlas.')
raise TypeError
if (alt is not None and alt != 'raw' and alt != 'abs'
and alt != 'shifted' and alt != 'norm'):
logging.critical("Argument must be one of 'abs', 'shifted' or 'norm'.")
raise TypeError
if alt is None:
alt = 'raw'
ntp = float(inatlas.tp[alt])
av = accscore(inatlas, alt) / ntp
return av
def n_tps(inatlas, alt=None):
"""Return the number of True Positive contacts.
:param inatlas: Contact atlas.
:type inatlas: :class:`~pisacov.core.contacts.contact_atlas`
:param alt: Alternative scoring (abs, shifted, norm), defaults to None.
:type alt: str, optional
:return: Number of true positives.
:rtype: int
"""
if isinstance(inatlas, pcc.contact_atlas) is False:
logging.critical("Argument 'inatlas' must be a Contact Atlas.")
raise TypeError
if (alt is not None and alt != 'raw' and alt != 'abs'
and alt != 'shifted' and alt != 'norm'):
logging.critical("Argument must be one of 'abs', 'shifted' or 'norm'.")
raise TypeError
if alt is None:
alt = 'raw'
return inatlas.tp[alt]
def n_fps(inatlas, alt=None):
"""Return the number of False Positive contacts.
:param inatlas: Contact atlas.
:type inatlas: :class:`~pisacov.core.contacts.contact_atlas`
:param alt: Alternative scoring (abs, shifted, norm), defaults to None.
:type alt: str, optional
:return: Number of false positives.
:rtype: int
"""
if isinstance(inatlas, pcc.contact_atlas) is False:
logging.critical("Argument 'inatlas' must be a Contact Atlas.")
raise TypeError
if (alt is not None and alt != 'raw' and alt != 'abs'
and alt != 'shifted' and alt != 'norm'):
logging.critical("Argument must be one of 'abs', 'shifted' or 'norm'.")
raise TypeError
if alt is None:
alt = 'raw'
return inatlas.fp[alt]
def n_tns(inatlas, alt=None):
"""Return the number of True Negative contacts.
:param inatlas: Contact atlas.
:type inatlas: :class:`~pisacov.core.contacts.contact_atlas`
:param alt: Alternative scoring (abs, shifted, norm), defaults to None.
:type alt: str, optional
:return: Number of true negatives.
:rtype: int
"""
if isinstance(inatlas, pcc.contact_atlas) is False:
logging.critical("Argument 'inatlas' must be a Contact Atlas.")
raise TypeError
if (alt is not None and alt != 'raw' and alt != 'abs'
and alt != 'shifted' and alt != 'norm'):
logging.critical("Argument must be one of 'abs', 'shifted' or 'norm'.")
raise TypeError
if alt is None:
alt = 'raw'
return inatlas.tn[alt]
def n_fns(inatlas, alt=None):
"""
Return the number of False Negative contacts.
:param inatlas: Contact atlas.
:type inatlas: :class:`~pisacov.core.contacts.contact_atlas`
:param alt: Alternative scoring (abs, shifted, norm), defaults to None.
:type alt: str, optional
:return: Number of false negatives.
:rtype: int
"""
if isinstance(inatlas, pcc.contact_atlas) is False:
logging.critical("Argument 'inatlas' must be a Contact Atlas.")
raise TypeError
if (alt is not None and alt != 'raw' and alt != 'abs'
and alt != 'shifted' and alt != 'norm'):
logging.critical("Argument must be one of 'abs', 'shifted' or 'norm'.")
raise TypeError
if alt is None:
alt = 'raw'
return inatlas.fn[alt]
def mcc(inatlas, alt=None):
"""
Return the number of Matthew's Correlation Coefficient.
MCC = (TP*TN-FP*FN) / ((TP+FP)*(TP+FN)*(TN+FP)*(TN+FN))
:param inatlas: Contact atlas.
:type inatlas: :class:`~pisacov.core.contacts.contact_atlas`
:param alt: Alternative scoring (abs, shifted, norm), defaults to None.
:type alt: str, optional
:return: MCC
:rtype: float
"""
if isinstance(inatlas, pcc.contact_atlas) is False:
logging.critical('Argument must be a Contact Atlas.')
raise TypeError
if (alt is not None and alt != 'raw' and alt != 'abs'
and alt != 'shifted' and alt != 'norm'):
logging.critical("Argument must be one of 'abs', 'shifted' or 'norm'.")
raise TypeError
if alt is None:
alt = 'raw'
ntp = float(inatlas.tp[alt])
nfp = float(inatlas.fp[alt])
nfn = float(inatlas.fn[alt])
ntn = float(inatlas.tn[alt])
mcc = ntp*ntn - nfp*nfn
denom = (ntp+nfp)
denom *= (ntp+nfn)
denom *= (ntn+nfp)
denom *= (ntn+nfn)
if denom == 0:
mcc = 'NaN'
else:
mcc /= sqrt(denom)
return mcc
def precision(inatlas, alt=None):
"""
Return the precision of the contact map.
Prec = TP / (TP+FP)
:param inatlas: Contact atlas.
:type inatlas: :class:`~pisacov.core.contacts.contact_atlas`
:param alt: Alternative scoring (abs, shifted, norm), defaults to None.
:type alt: str, optional
:return: Precision.
:rtype: float
"""
if isinstance(inatlas, pcc.contact_atlas) is False:
logging.critical('Argument must be a Contact Atlas.')
raise TypeError
if (alt is not None and alt != 'raw' and alt != 'abs'
and alt != 'shifted' and alt != 'norm'):
logging.critical("Argument must be one of 'abs', 'shifted' or 'norm'.")
raise TypeError
if alt is None:
alt = 'raw'
ntp = float(inatlas.tp[alt])
nfp = float(inatlas.fp[alt])
if (ntp + nfp) == 0:
p = 'NaN'
else:
p = ntp / (ntp + nfp)
return p
def coverage(inatlas, alt=None):
"""
Return the coverage of the contact map.
Also known as True Positive Rate, Recall and Sensitivity.
Cover = TP / (TP+FN)
:param inatlas: Contact atlas.
:type inatlas: :class:`~pisacov.core.contacts.contact_atlas`
:param alt: Alternative scoring (abs, shifted, norm), defaults to None.
:type alt: str, optional
:return: Coverage.
:rtype: float
"""
if isinstance(inatlas, pcc.contact_atlas) is False:
logging.critical('Argument must be a Contact Atlas.')
raise TypeError
if (alt is not None and alt != 'raw' and alt != 'abs'
and alt != 'shifted' and alt != 'norm'):
logging.critical("Argument must be one of 'abs', 'shifted' or 'norm'.")
raise TypeError
if alt is None:
alt = 'raw'
ntp = float(inatlas.tp[alt])
nfn = float(inatlas.fn[alt])
if (ntp + nfn) == 0:
c = 'NaN'
else:
c = ntp / (ntp + nfn)
return c
def jaccard(inatlas, alt=None):
"""
Return the Jaccard Index of a given matched map.
Jaccard = TP / (TP+FP+FN).
:param inatlas: Contact atlas.
:type inatlas: :class:`~pisacov.core.contacts.contact_atlas`
:param alt: Alternative scoring (abs, shifted, norm), defaults to None.
:type alt: str, optional
:return: Jaccard Index
:rtype: float
"""
if isinstance(inatlas, pcc.contact_atlas) is False:
logging.critical('Argument must be a Contact Atlas.')
raise TypeError
if (alt is not None and alt != 'raw' and alt != 'abs'
and alt != 'shifted' and alt != 'norm'):
logging.critical("Argument must be one of 'abs', 'shifted' or 'norm'.")
raise TypeError
if alt is None:
alt = 'raw'
ntp = float(inatlas.tp[alt])
nfp = float(inatlas.fp[alt])
nfn = float(inatlas.fn[alt])
if (ntp + nfp + nfn) == 0:
jacc = 'NaN'
else:
jacc = ntp / (ntp + nfp + nfn)
return jacc
def accuracy(inatlas, alt=None):
"""
Return the Accuracy of a given matched map.
Accuracy = (TP+TN) / (TP+TN+FP+FN).
:param inatlas: Contact atlas.
:type inatlas: :class:`~pisacov.core.contacts.contact_atlas`
:param alt: Alternative scoring (abs, shifted, norm), defaults to None.
:type alt: str, optional
:return: Accuracy Index
:rtype: float
"""
if isinstance(inatlas, pcc.contact_atlas) is False:
logging.critical('Argument must be a Contact Atlas.')
raise TypeError
if (alt is not None and alt != 'raw' and alt != 'abs'
and alt != 'shifted' and alt != 'norm'):
logging.critical("Argument must be one of 'abs', 'shifted' or 'norm'.")
raise TypeError
if alt is None:
alt = 'raw'
ntp = float(inatlas.tp[alt])
nfp = float(inatlas.fp[alt])
nfn = float(inatlas.fn[alt])
ntn = float(inatlas.tn[alt])
if (ntp + ntn + nfp + nfn) == 0:
accindex = 'NaN'
else:
accindex = (ntp + ntn) / (ntp + nfp + nfn)
return accindex
def fn_rate(inatlas, alt=None):
"""
Return the False Negative Rate of a given matched map.
FNR = TN / (TN+FN).
:param inatlas: Contact atlas.
:type inatlas: :class:`~pisacov.core.contacts.contact_atlas`
:param alt: Alternative scoring (abs, shifted, norm), defaults to None.
:type alt: str, optional
:return: False Negative Rate.
:rtype: float
"""
if isinstance(inatlas, pcc.contact_atlas) is False:
logging.critical('Argument must be a Contact Atlas.')
raise TypeError
if (alt is not None and alt != 'raw' and alt != 'abs'
and alt != 'shifted' and alt != 'norm'):
logging.critical("Argument must be one of 'abs', 'shifted' or 'norm'.")
raise TypeError
if alt is None:
alt = 'raw'
nfn = float(inatlas.fn[alt])
ntn = float(inatlas.tn[alt])
if (ntn + nfn) == 0:
fnr = 'NaN'
else:
fnr = nfn / (ntn + nfn)
return fnr
def specificity(inatlas, alt=None):
"""
Return the False Negative Rate of a given matched map.
Also known as True Negative Rate.
SPC = TN / (TN+FP).
:param inatlas: Contact atlas.
:type inatlas: :class:`~pisacov.core.contacts.contact_atlas`
:param alt: Alternative scoring (abs, shifted, norm), defaults to None.
:type alt: str, optional
:return: Specificity.
:rtype: float
"""
if isinstance(inatlas, pcc.contact_atlas) is False:
logging.critical('Argument must be a Contact Atlas.')
raise TypeError
if (alt is not None and alt != 'raw' and alt != 'abs'
and alt != 'shifted' and alt != 'norm'):
logging.critical("Argument must be one of 'abs', 'shifted' or 'norm'.")
raise TypeError
if alt is None:
alt = 'raw'
nfp = float(inatlas.fp[alt])
ntn = float(inatlas.tn[alt])
if (ntn + nfp) == 0:
spc = 'NaN'
else:
spc = ntn / (ntn + nfp)
return spc
def fp_rate(inatlas, alt=None):
"""
Return the False Positive Rate of a given matched map.
FPR = 1 - SPC = FP / (TN+FP).
:param inatlas: Contact atlas.
:type inatlas: :class:`~pisacov.core.contacts.contact_atlas`
:param alt: Alternative scoring (abs, shifted, norm), defaults to None.
:type alt: str, optional
:return: False Positive Rate.
:rtype: float
"""
if isinstance(inatlas, pcc.contact_atlas) is False:
logging.critical('Argument must be a Contact Atlas.')
raise TypeError
if (alt is not None and alt != 'raw' and alt != 'abs'
and alt != 'shifted' and alt != 'norm'):
logging.critical("Argument must be one of 'abs', 'shifted' or 'norm'.")
raise TypeError
if alt is None:
alt = 'raw'
nfp = float(inatlas.fp[alt])
ntn = float(inatlas.tn[alt])
if (ntn + nfp) == 0:
fpr = 'NaN'
else:
fpr = nfp / (ntn + nfp)
return fpr
def list_scores(inatlas, tag=None):
"""
Return a list containing all the results in order.
:param inatlas: Contact atlas.
:type inatlas: :class:`~pisacov.core.contacts.contact_atlas`
:param tag: None ('psicov'), or 'ccmpred', or 'deepmetapsicov', defaults to None.
:type tag: str, optional
:return: Results list.
:rtype: list
"""
values = []
psicovmodes = PSICOV_modes()
values.append(str(inatlas.conpred_raw.ncontacts))
values.append(str(inatlas.conkitmatch['raw'].ncontacts))
acc = accscore(inatlas)
values.append(str(acc))
if inatlas.tp['raw'] == 0:
values.append(str(0.0))
else:
values.append(str(acc/inatlas.tp['raw']))
if tag == 'psicov':
for m in psicovmodes:
acc = accscore(inatlas, alt=m)
values.append(str(acc))
if inatlas.tp[m] == 0:
values.append(str(0.0))
else:
values.append(str(acc/inatlas.tp[m]))
values.append(str(inatlas.tp['raw']))
values.append(str(precision(inatlas)))
values.append(str(coverage(inatlas)))
values.append(str(mcc(inatlas)))
values.append(str(jaccard(inatlas)))
return values
```
#### File: pisacov/iomod/_conf_ops.py
```python
from pisacov import __prog__, __description__, __version__
from pisacov import __author__, __date__, __copyright__
from pisacov.iomod import conf as pconf
from pisacov.iomod.paths import check_path
import logging
_surl = 'ftp://ftp.ebi.ac.uk/pub/databases/msd/sifts/flatfiles/csv/pdb_chain_uniprot.csv.gz'
_uniurl = 'https://www.uniprot.org/uniprot/'
def _default_values(key):
defaultvals = {}
defaultvals['HHBLITS_PARAMETERS'] = [3, 0.001, 'inf', 50, 99]
defaultvals['UNICLUST_FASTA_PATH'] = None
defaultvals['NEIGHBOURS_MINDISTANCE'] = 2
defaultvals['REMOVE_INTRA_CONTACTS'] = True
return defaultvals[key]
def _default_keys():
defaultkeys = ['SIFTS_PATH', 'PISA_PATH', 'DMP_PATH', 'HHBLITS_PATH',
'HHBLITS_DATABASE_NAME', 'HHBLITS_DATABASE_DIR', 'HHBLITS_PARAMETERS',
'UNICLUST_FASTA_PATH', 'NEIGHBOURS_MINDISTANCE', 'REMOVE_INTRA_CONTACTS']
return defaultkeys
def _check_input(val, key):
compmsg = {'SIFTS_PATH': "SIFTS_PATH file not found.",
'PISA_PATH': "'PISA_PATH file not found.",
'HHBLITS_PATH': "HHBLITS_PATH file not found.",
'HHBLITS_DATABASE_NAME': "HHBLITS_DATABASE_DIR should be a string.",
'HHBLITS_DATABASE_DIR': "HHBLITS_DATABASE_DIR directory not found.",
'DMP_PATH': "DMP_PATH file not found.",
'HHBLITS_PARAMETERS': "Format should be [int, float, int or 'inf', int, int].",
'UNICLUST_FASTA_PATH': "UNICLUST_FASTA_PATH file not found.",
'NEIGHBOURS_MINDISTANCE': "NEIGHBOURS_MINDISTANCE should be an integer.",
'REMOVE_INTRA_CONTACTS': "REMOVE_INTRA_CONTACTS should be a boolean."}
errormsg = compmsg[key] + ' Please, update the configuration file using pisaconf.'
if (key == 'SIFTS_PATH' or key == 'PISA_PATH' or
key == 'HHBLITS_PATH' or key == 'DMP_PATH'):
try:
val = check_path(val, 'file')
except Exception:
logging.critical(errormsg)
raise NameError()
if (key == 'UNICLUST_FASTA_PATH'):
if val == "" or val is None:
val = None
else:
try:
val = check_path(val, 'file')
except Exception:
logging.critical(errormsg)
raise NameError()
elif (key == 'HHBLITS_DATABASE_DIR'):
try:
val = check_path(val, 'dir')
except Exception:
logging.critical(errormsg)
raise NameError()
elif (key == 'HHBLITS_DATABASE_NAME'):
if isinstance(val, str) is False:
try:
val = str(val)
except Exception:
logging.critical(errormsg)
raise ValueError()
else:
pass
elif (key == 'HHBLITS_PARAMETERS'):
if val is None or val == "":
val = _default_values(key)
else:
if isinstance(val, list) is False:
logging.critical('HHBLITS_PARAMETERS is not a python list.')
raise ValueError()
else:
for n in range(len(val)):
if n == 1:
if isinstance(val[n], float) is False:
try:
val[n] = float(val[n])
except Exception:
logging.critical(errormsg)
raise TypeError
else:
if n == 2 and val[n] == 'inf':
pass
else:
if isinstance(val[n], int) is False:
try:
val[n] = int(val[n])
except Exception:
logging.critical(errormsg)
raise TypeError
elif (key == 'NEIGHBOURS_MINDISTANCE'):
if val is None or val == "":
val = _default_values(key)
else:
if isinstance(val[n], int) is False:
try:
val = int(val)
except Exception:
logging.critical(errormsg)
raise TypeError
elif (key == 'REMOVE_INTRA_CONTACTS'):
if val is None:
val = _default_values(key)
elif isinstance(val, str) is True:
if val == "":
val = _default_values(key)
elif val.lower() == "true":
val = True
elif val.lower() == "false":
val = False
else:
logging.critical(errormsg)
raise TypeError
elif isinstance(val, bool) is True:
pass
else:
logging.critical(errormsg)
raise TypeError
return val
def _parse_conf():
trymsg = ' Please, update the configuration file using pisaconf.'
compmsg = 'One or more of the requested parameters not found. '
try:
configfile = {'SIFTS_PATH': pconf.SIFTS_PATH,
'PISA_PATH': pconf.PISA_PATH,
'HHBLITS_PATH': pconf.HHBLITS_PATH,
'HHBLITS_DATABASE_NAME': pconf.HHBLITS_DATABASE_NAME,
'HHBLITS_DATABASE_DIR': pconf.HHBLITS_DATABASE_DIR,
'DMP_PATH': pconf.DMP_PATH}
except Exception:
logging.critical(compmsg + trymsg)
raise ValueError
try:
configfile['UNICLUST_FASTA_PATH'] = pconf.UNICLUST_FASTA_PATH
except Exception:
configfile['UNICLUST_FASTA_PATH'] = None
try:
configfile['HHBLITS_PARAMETERS'] = pconf.HHBLITS_PARAMETERS
except Exception:
configfile['HHBLITS_PARAMETERS'] = None
try:
configfile['NEIGHBOURS_MINDISTANCE'] = pconf.NEIGHBOURS_MINDISTANCE
configfile['REMOVE_INTRA_CONTACTS'] = False
except Exception:
configfile['NEIGHBOURS_MINDISTANCE'] = None
configfile['REMOVE_INTRA_CONTACTS'] = True
config = {}
for keystr, value in configfile.items():
config[keystr] = _check_input(value, keystr)
return config
def _initialise_inputs():
outvalues = _parse_conf()
outvalues['INSEQ'] = None
outvalues['INSTR'] = None
outvalues['ALTDB'] = None
outvalues['OUTROOT'] = None
outvalues['OUTCSVPATH'] = None
outvalues['UPTHRESHOLD'] = None
return outvalues
def _check_hhparams(paramlist):
"""Return a list with the validated HHBLITS input arguments.
:param paramlist: User-provided list of HHBLITS arguments.
:type paramlist: list of str
:raises ValueError: Arguments are not valid.
:return: Complete and checked list of HHBLITS parameters
:rtype: list of (int, float, str)
"""
if (paramlist == 'dmp' or paramlist == [3, 0.001, 'inf', 50, 99] or
paramlist == ['3', '0.001', 'inf', '50', '99']):
outparams = ['3', '0.001', 'inf', '50', '99']
elif (paramlist == 'hhblits' or paramlist == [2, 0.001, 1000, 0, 90] or
paramlist == ['2', '0.001', '1000', '0', '90']):
outparams = ['2', '0.001', '1000', '0', '90']
else:
try:
int(float(paramlist[0]))
float(paramlist[1])
if paramlist[2] != 'inf':
int(float(paramlist[2]))
float(paramlist[4])
float(paramlist[5])
except Exception:
logging.critical('One or more of HHblits arguments given are not valid')
raise ValueError
outparams=[str(int(float(paramlist[0]))), str(float(paramlist[1])),
paramlist[2], str(float(paramlist[4])), str(float(paramlist[5]))]
if paramlist[2] != 'inf':
outparams = str(int(float(paramlist[2])))
return outparams
def _check_uniprot(inuniprot):
"""Return Uniprot segment threshold value and Uniprot database path.
:param inuniprot: Initial argument for Uniprot threshold
:type inuniprot: str
:raises ValueError: Argument is not valid.
:return: Threshold value and database path.
:rtype: float, str
"""
try:
float(inuniprot)
except Exception:
logging.critical('Uniprot threshold given not valid.')
raise ValueError
try:
dbpath = check_path(pconf.UNICLUST_FASTA_PATH, 'file')
except Exception:
logging.warning('Uniprot database file does not exist. Switching to server-only.')
dbpath = 'server-only'
return float(inuniprot), dbpath
def _sourcenames(short=False):
"""Return a list with the source names.
:param short: True for shorter names, defaults to False
:type short: bool, optional
:return: Source names.
:rtype: dict [list [str]]
"""
if short is False:
sources = ["psicov", "ccmpred", "deepmetapsicov"]
else:
sources = ["psicov", "ccmpred", "dmp"]
return sources
def _sources():
"""Return the subdir name and extension of each of the contact prediction types.
:return: Contact prediction types and location.
:rtype: dict [list [str]]
"""
sources = _sourcenames()
confiledir = ["deepmetapsicov", "deepmetapsicov", "deepmetapsicov"]
confilesuffix = ["psicov", "ccmpred", "deepmetapsicov.con"]
conkittype = ["psicov", "ccmpred", "psicov"]
threshold = [0.2, 0.1, 0.1]
outsinfo = {}
for n in range(len(sources)):
outsinfo[sources[n]] = [confiledir[n], confilesuffix[n],
conkittype[n], threshold[n]]
return outsinfo
```
#### File: pisacov/iomod/paths.py
```python
from pisacov import __prog__, __description__, __version__
from pisacov import __author__, __date__, __copyright__
from pisacov.iomod import conf as pcnf
import os
import argparse
import glob
import logging
def check_path(path, typeofpath=None):
"""Return full path. If typeofpath is given, path is checked.
:param path: Input (local) path.
:type path: str
:param typeofpath: The type of path, 'dir', 'file' or 'either', defaults to None.
:type typeofpath: str, optional
:raises ValueError: When given typeofpath is none of 'dir', 'file' or 'either'.
:raises argparse: If wrong path or pathtype given.
:return: Absolute path.
:rtype: str
"""
pathok = False
if typeofpath == 'dir' or typeofpath == 'either':
path = os.path.abspath(path)
if os.path.isdir(os.path.join(path, '')) is True:
path = os.path.abspath(os.path.join(path, ''))
pathok = True
else:
if os.path.isfile(path) is True:
pathok = True
elif typeofpath == 'file':
path = os.path.abspath(path)
if os.path.isfile(path) is True:
pathok = True
elif typeofpath is None:
path = os.path.abspath(path)
pathok = True
else:
logging.critical("Input string 'typeofpath' should be either 'dir' or 'file'.")
raise ValueError
if pathok is True:
return path
else:
logging.critical(f"readable_dir:{path} is not a valid path")
raise argparse.ArgumentTypeError()
def check_wildcard(path):
"""Return list of several files full path.
:param path: Path containing wildcard '*'.
:type path: str
:return: List of absolute paths.
:rtype: list [str]
"""
abspath = check_path(path)
return glob.glob(abspath)
def mdir(dirpath):
"""Create directory recursively if not existent.
:param dirpath: Path to directory to be created.
:type dirpath: str
"""
if not os.path.exists(dirpath):
os.makedirs(dirpath)
return
def output_dir(root, baseid):
"""Return path to output directory.
:param baseid: PDB ID or other id to identify the molecule.
:type baseid: str
:return: Path to output directory.
:rtype: str
"""
if not isinstance(baseid, str):
try:
str(baseid)
except Exception:
logging.critical('Not a valid argument. String expected.')
raise ValueError
outpath = os.path.join(root, str(baseid).lower(), "")
return outpath
def mkdirout(root, baseid=None):
"""Check or create output directories.
:param root: Path to output directory.
:type root: str
:param baseid: PDB ID of molecule been analised, defaults to None.
:type baseid: str
:raises OSError: If the system does not allow to create the directories.
:return: Dictionary containing absolute paths to output directories.
:rtype: dict of str
"""
outpaths = {}
try:
outpaths['root'] = check_path(root, 'dir')
except Exception:
try:
os.mkdir(os.path.join(root, ""))
except Exception:
errormsg = ("Unable to create output directory " +
str(os.path.join(root, "")))
logging.critical(errormsg)
raise OSError
outpaths['root'] = check_path(root, 'dir')
if baseid is not None:
try:
outpaths['pdbid'] = check_path(os.path.join(outpaths['root'],
baseid, ""), 'dir')
except Exception:
try:
os.mkdir(os.path.join(outpaths['root'], baseid, ""))
except Exception:
errormsg = ("Unable to create output directory " +
str(os.path.join(outpaths['root'], baseid, "")))
logging.critical(errormsg)
raise OSError
outpaths['pdbid'] = check_path(os.path.join(outpaths['root'], baseid, ""), 'dir')
try:
outpaths['pisa'] = check_path(os.path.join(outpaths['pdbid'], 'pisa_interfaces', ""), 'dir')
except Exception:
try:
os.mkdir(os.path.join(outpaths['pdbid'], 'pisa_interfaces', ""))
except Exception:
errormsg = ("Unable to create output directory " +
str(os.path.join(outpaths['pdbid'],
'pisa_interfaces', "")))
logging.critical(errormsg)
raise OSError
outpaths['pisa'] = check_path(os.path.join(outpaths['pdbid'],'pisa_interfaces', ""), 'dir')
try:
outpaths['pisacov'] = check_path(os.path.join(outpaths['pdbid'], 'pisacov', ""), 'dir')
except Exception:
try:
os.mkdir(os.path.join(outpaths['pdbid'], 'pisacov', ""))
except Exception:
errormsg = ("Unable to create output directory " +
str(os.path.join(outpaths['pdbid'], 'pisacov', "")))
logging.critical(errormsg)
raise OSError
outpaths['pisacov'] = check_path(os.path.join(outpaths['pdbid'], 'pisacov', ""), 'dir')
try:
outpaths['dmp'] = check_path(os.path.join(outpaths['pdbid'], 'dmp_predictions', ""), 'dir')
except Exception:
try:
os.mkdir(os.path.join(outpaths['pdbid'], 'dmp_predictions', ""))
except Exception:
errormsg = ("Unable to create output directory " +
str(os.path.join(outpaths['pdbid'], 'dmp_predictions', "")))
logging.critical(errormsg)
raise OSError
outpaths['dmp'] = check_path(os.path.join(outpaths['pdbid'], 'dmp_predictions', ""), 'dir')
return outpaths
```
#### File: pisacov/sys/dmp.py
```python
from pisacov import __prog__, __description__, __version__
from pisacov import __author__, __date__, __copyright__
from pisacov.iomod.conf import DMP_PATH
from pisacov.iomod import paths as ppaths
import os
import logging
def rundmp(spath, msapath, outdir):
"""Run DeepMetaPSICOV to produce contact prediction lists.
:param seqpath: Input sequence filepath.
:type seqpath: str
:param msapath: Input MSA filepath.
:type msapath: str
"""
dmp_exec = '"'+ppaths.check_path(DMP_PATH, 'file')+'"'
try:
os.system(dmp_exec + ' -i ' + spath + ' -a ' + msapath)
except Exception:
logging.critical(' An error occurred while executing DeepMetaPSICOV.')
raise OSError
``` |
{
"source": "JJavierga/lanenet-lane-detection",
"score": 3
} |
#### File: lanenet-lane-detection/mytools/bin2seg.py
```python
import cv2
import numpy as np
class bin2seg():
def __init__(self):
self.img=0
[self.fils, self.cols]=[0,0]
self.img_embedding=0
self.used=0
self.threshold=255
def reuse(self,img,threshold):
# padding
[orig_fils,orig_cols]=img.shape
border = np.zeros((orig_fils+2,orig_cols+2))
#print(img.shape)
#print(border.shape)
#print(border[1:-2,1:-2].shape)
#print(border[1:-1,1:-1].shape)
border[1:-1,1:-1]=img
self.img=border
[self.fils, self.cols]=self.img.shape
self.used=np.zeros((self.fils,self.cols))
self.img_embedding=self.used
self.threshold=threshold
def __del__(self):
print('Object erased')
def extend_colour(self,pos_f, pos_c, count):
"""
:input:
"""
pendiente_f = [pos_f]
pendiente_c = [pos_c]
while pendiente_f:
pos_f = pendiente_f[0]
pos_c = pendiente_c[0]
if (pos_f - 1 >= 0) and (pos_f + 1 <= self.fils - 1) and (pos_c - 1 >= 0) and (pos_c + 1 <= self.cols - 1):
for i in range(-1,2):
for j in range(-1,2):
new_f = pos_f + i
new_c = pos_c + j
#print(new_f)
#print(new_c)
if(self.img[new_f, new_c]>=self.threshold and self.used[new_f, new_c]==False):
#print('G')
pendiente_f.append(new_f)
pendiente_c.append(new_c)
self.used[new_f, new_c]=True
self.img_embedding[new_f, new_c]= count*30
pendiente_f.pop(0)
pendiente_c.pop(0)
def colour_lines(self):
"""
:input: image main path, images path new images path
"""
""" This should be done by calling programm
complete_path=ops.join(folder_path, subimg_path)
#print(complete_path)
img=cv2.imread(complete_path,cv2.IMREAD_GRAYSCALE)
"""
count=1
for col in range(self.cols):
for fil in reversed(range(self.fils)):
if self.img[fil,col]>=self.threshold and self.used[fil,col]==False:
#print('F')
self.extend_colour(fil, col, count)
count+=1
#print(new_img_path)
#print(folder_path)
#print(subimg_path)
""" This should be done by calling programm
cv2.imwrite(new_img_path,img_embedding)
"""
return self.img_embedding
```
#### File: lanenet-lane-detection/mytools/Data_augmentation.py
```python
import matplotlib as plt
import argparse
import os.path as ops
from get_files import file_searcher
import os
from os import walk
import cv2
### Don't use /home/, use instead /home
def init_args():
"""
:return:
"""
parser = argparse.ArgumentParser()
parser.add_argument('-i','--image_path', type=str, help='The image path or the src image save dir')
return parser.parse_args()
if __name__ == '__main__':
"""
test code
"""
# init args
args = init_args()
Searcher=file_searcher(args.image_path)
file_list=Searcher.get_files()
for i in range(8):
angle=i*45
name0='_'+str(angle)+'_0'
name1='_'+str(angle)+'_1'
for file_name in file_list:
print(file_name)
comp_path=ops.join(args.image_path, file_name)
ending=file_name[-4:]
img=cv2.imread(comp_path)
#print(ops.join(args.image_path,file_name))
newname0=file_name[0:-4]+name0+ending
#print(file_name[0:-4]+name0+ending)
rotation_matrix = cv2.getRotationMatrix2D((img.shape[0] / 2, img.shape[1] / 2), angle, 1)
img2 = cv2.warpAffine(img, rotation_matrix, (img.shape[0], img.shape[1]))
cv2.imwrite(ops.join(args.image_path,newname0), img2)
#print(file_name[0:-5]+name1+ending)
newname1=file_name[0:-4]+name1+ending
img2=cv2.flip(img2, 1) # Horizontally
cv2.imwrite(ops.join(args.image_path,newname1), img2)
```
#### File: lanenet-lane-detection/mytools/renameall.py
```python
import cv2
import matplotlib as plt
import argparse
import os.path as ops
import os
from os import walk
import numpy as np
def init_args():
"""
:return:
"""
parser = argparse.ArgumentParser()
parser.add_argument('-i','--image_path', type=str, help='The image path or the src image save dir')
return parser.parse_args()
if __name__ == '__main__':
"""
test code
"""
# init args
args = init_args()
for f in os.listdir(args.image_path):
if f.endswith('.png'):
#newname=f[:-4]+'.png'
#print(newname)
img=cv2.imread(ops.join(args.image_path,f),cv2.IMREAD_GRAYSCALE)
#print(img.shape)
#cv2.imwrite(ops.join(args.image_path,f),img)
#os.remove(ops.join(args.image_path,f))
#os.rename(ops.join(args.image_path,f),ops.join(args.image_path,newname))
```
#### File: lanenet-lane-detection/mytools/train_file.py
```python
import os
import argparse
from os.path import join
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('-p','--folders_path', type=str, help='Path to gt, binary and instance folders')
return parser.parse_args()
if __name__ == '__main__':
"""
Finds files in f1 that are not in f2
"""
args = get_args()
with open(join(args.folders_path,'train.txt'),'w') as f:
gt_binary_folder=join(args.folders_path,'gt_binary_image')
gt_instance_folder=join(args.folders_path,'gt_instance_image')
gt_image_folder=join(args.folders_path,'gt_image')
for file_name in os.listdir(gt_image_folder):
binary_file_path=join(gt_binary_folder,file_name)
instance_file_path=join(gt_instance_folder,file_name)
image_file_path=join(gt_image_folder,file_name)
info = '{:s} {:s} {:s}'.format(image_file_path, binary_file_path, instance_file_path)
f.write(info + '\n')
``` |
{
"source": "jjaviergalvez/CarND-Term3-Quizes",
"score": 4
} |
#### File: CarND-Term3-Quizes/search/value.py
```python
grid = [[0, 1, 0, 1, 0, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0]]
goal = [len(grid)-1, len(grid[0])-1]
cost = 1 # the cost associated with moving from a cell to an adjacent one
delta = [[-1, 0 ], # go up
[ 0, -1], # go left
[ 1, 0 ], # go down
[ 0, 1 ]] # go right
delta_name = ['^', '<', 'v', '>']
def compute_value(grid,goal,cost):
# ----------------------------------------
# insert code below
# ----------------------------------------
# make sure your function returns a grid of values as
# demonstrated in the previous video.
value = [[99 for col in range(len(grid[0]))] for row in range(len(grid))]
closed = [[0 for col in range(len(grid[0]))] for row in range(len(grid))]
# initialization
value[goal[0]][goal[1]] = 0
closed[goal[0]][goal[1]] = 1
lst = [goal]
for e in lst:
x = e[0]
y = e[1]
step = value[x][y] + cost
for i in range(len(delta)):
x2 = x + delta[i][0]
y2 = y + delta[i][1]
if x2 >= 0 and x2 < len(grid) and y2 >=0 and y2 < len(grid[0]):
if closed[x2][y2] == 0 and grid[x2][y2] == 0:
value[x2][y2] = step
lst.append([x2,y2])
closed[x2][y2] = 1
return value
result = compute_value(grid, goal, cost)
for row in result:
print(row)
``` |
{
"source": "jjaw/888-RaBBit",
"score": 3
} |
#### File: jjaw/888-RaBBit/bit_rabbit_generation_script.py
```python
from PIL import Image
import csv
# library to convert rgb code to name
from webcolors import rgb_to_name
import webcolors
# library to work with arrays
# https://numpy.org/
import numpy as np
import pandas as pd
# library to interact with the operating system
import os
# library to generate random integer values
from random import seed
from random import randint
from numpy.lib.shape_base import dstack
# library to deepcopy
import copy
# custom modules
import lib.coordinates as coordinates
from lib.randomizer import randomize_by_pixel
from lib.rabbit_construction import create_rabbit
import lib.trackers
# gets path to be used in image creation mechanism, using os
#
# original code, doesn't work
# dirname = os.path.dirname(__file__)
dirname = os.path.dirname(os.path.abspath(__file__))
# sets final image dimensions as 480x480 pixels
# the original 24x24 pixel image will be expanded to these dimensions
dimensions = 336, 336
# tells how many times to iterate through the following mechanism
# which equals the number of birds
# e.g.
# for x in range(0-200)
# would generate 201 birds numbered 0-200
tracker = {}
def render_random_image(seed_num, n):
# set seed number
seed(seed_num)
# dictionary to track attributes of each rabbit
rabbit_attr = {}
# dictionary to track how many times each attribute appeared
total_attr = lib.trackers.total_attr
for x in range(1, 1+n):
# initialize attribute tracker for each rabbits
# tracking background color, skin color, and total accessories
rabbit_attr[x] = {
"bg_rgb": "",
"bg_color": "",
"sk_rgb": "",
"sk_color": "",
"acc": []
}
# background color
# randomize bg color
r, g, b = randint(133, 255), randint(133, 255), randint(133, 255)
bg = (r, g, b)
rabbit_attr[x]["bg_rgb"] = bg
try:
rabbit_attr[x]["bg_color"] = rgb_to_name(bg, spec='css3')
except:
rabbit_attr[x]["bg_color"] = "None"
# outline color
ol = (19, 0, 0)
# special attributes R colors
# r1, r2, r3 = (0,0,0), (0,0,0), (0,0,0)
# randomize skin color
r, g, b = randint(0, 255), randint(0, 255), randint(0, 255)
sk = (r, g, b)
rabbit_attr[x]["sk_rgb"] = sk
try:
rabbit_attr[x]["sk_color"] = rgb_to_name(sk, spec='css3')
except:
rabbit_attr[x]["sk_color"] = "None"
# create_rabbit module creates a basic rabbit canvas for us
basic_rabbit = create_rabbit(bg, ol, sk)
# Creates a deep copy of the canvas in order to add accessories
pixels = copy.deepcopy(basic_rabbit)
# 1. check for pixelized skin or background
#
# background: 35.00%
# body: 13.33%
if randint(0, 10000)<= 3500:
#randomizer for skin by pixel
randomize_by_pixel(pixels, sk)
rabbit_attr[x]["sk_color"] = "pixelized"
# counter to track attribute count of the total collection
total_attr["sk_pixel"] += 1
else:
total_attr["sk_solid"] += 1
if randint(0, 10000) <= 1333:
#randomizer for bg by pixel
randomize_by_pixel(pixels, bg)
rabbit_attr[x]["bg_color"] = "pixelized"
total_attr["bg_pixel"] += 1
else:
total_attr["bg_solid"] += 1
# 2. check for mouth accessories
#
# Total: 55.55%
# weed: 33.33% * 55.55%
# cig: 15.00% * 55.5%
# candy: 51.67% * 55.55%
mouth_prob = 5555
mouth_attr = False
if randint(0, 10000) <= mouth_prob:
mouth_attr = True
if mouth_attr:
mouth_accessory = randint(0, 10000)
if mouth_accessory <= 3333:
# draw weed
coordinates.weed_draw(pixels)
rabbit_attr[x]["acc"].append("weed")
total_attr["weed"] += 1
mouth_attr = False
elif mouth_accessory >= 8500:
# draw cig
burn_color = coordinates.cig_draw(pixels)
rabbit_attr[x]["acc"].append("cig"+ burn_color)
# tracking cig by burn color
if burn_color == "_red_burn":
total_attr["cig_red"] += 1
else:
total_attr["cig_blue"] +=1
mouth_attr = False
else:
# draw candy
candy_color = coordinates.candy_draw(pixels)
rabbit_attr[x]["acc"].append("candy" + candy_color)
# tracking cig by burn color
if candy_color == "_red":
total_attr["candy_red"] += 1
else:
total_attr["candy_green"] +=1
mouth_attr = False
else:
print("No mouth accessory")
# 3. check for robot arm
#
# Robot Arm: 15.88%
# Requires special interaction with the back acccessories:
# Will render robot arm after the back accessories check
robot_arm_prob = 1588
robot_arm_attr = False
# robot_drawn starts as TRUE, shows that it has already been drawn.
# only when robot_drawn == False and robot_arm_attr == True will the robot_arm be drawn
robot_drawn = True
if randint(0, 10000) <= robot_arm_prob:
# Variable to track whehter model has robot arm
robot_arm_attr = True
# Variable to track whether the robot arm has been drawn
robot_drawn = False
# 4. check for jetback or angel
# Totaly: 18.88%
# Angel_Wing: 76.67% * 18.88%
# Jet_Pack: 23.33% * 18.88%
back_prob = 1888
jetpack_prob = 2333
back_attr = False
if randint(0, 10000) <= back_prob:
back_attr = True
if back_attr:
# Check which back item will be drawn
# Need special logic for interaction with the robot arm accessory
back_accessory = randint(0, 10000)
if back_accessory <= jetpack_prob and robot_arm_attr:
coordinates.robot_arm_draw(pixels)
coordinates.jetpack_draw(pixels)
rabbit_attr[x]["acc"].append("robot_arm")
rabbit_attr[x]["acc"].append("jetpack")
total_attr["robot_arm"] += 1
total_attr["jetpack"] += 1
# set back attributes back to false after it's drawn
back_attr = False
# set robot arm attribute back to false after it's drawn
robot_arm_attr = False
# robot has been drawn
robot_drawn = True
elif back_accessory > jetpack_prob and robot_arm_attr:
coordinates.angel_draw(pixels)
coordinates.robot_arm_draw(pixels)
rabbit_attr[x]["acc"].append("angel")
rabbit_attr[x]["acc"].append("robot_arm")
total_attr["angel"] += 1
total_attr["robot_arm"] += 1
back_attr = False
robot_arm_attr = False
robot_drawn = True
elif back_accessory <= jetpack_prob:
coordinates.jetpack_draw(pixels)
rabbit_attr[x]["acc"].append("jetpack")
total_attr["jetpack"] += 1
back_attr = False
elif back_accessory > jetpack_prob:
coordinates.angel_draw(pixels)
rabbit_attr[x]["acc"].append("angel")
total_attr["angel"] += 1
back_attr = False
else:
print("No back accessory")
# check whether the robot arm has been drawn
if robot_arm_attr and not robot_drawn:
coordinates.robot_arm_draw(pixels)
rabbit_attr[x]["acc"].append("robot_arm")
total_attr["robot_arm"] += 1
# set arm attribute back after it's drawn
robot_arm_attr = False
# reset arm drawn tracker after it's drawn
robot_drawn = False
else:
print("No robot arm accessory")
# 5. check for whisker
#
# Whisker: 8.88%
#
whisker_prob = 888
whisker_attr = False
if randint(0, 10000) < whisker_prob:
whisker_attr = True
if whisker_attr:
coordinates.whisker_draw(pixels)
rabbit_attr[x]["acc"].append("whisker")
total_attr["whisker"] += 1
whisker_attr = False
else:
print("Not a wise rabbit")
# 6. check for goggle
#
# Goggle: 31.37
#
goggle_prob = 3137
goggle_attr = False
if randint(0, 10000) <= goggle_prob:
goggle_attr = True
if goggle_attr:
lens_color = coordinates.goggle_draw(pixels)
# tracking goggle by lens color
if lens_color == "_cyan_lens":
total_attr["goggle_cyan"] += 1
elif lens_color == "_yellow_lens":
total_attr["goggle_yellow"] += 1
elif lens_color == "_magenta_lens":
total_attr["goggle_magenta"] += 1
else:
total_attr["goggle_white"] += 1
rabbit_attr[x]["acc"].append("goggle" + lens_color)
goggle_attr = False
else:
print("Does not have a goggle")
# 7. check for diamond
#
# Diamond: 2.88%
#
diamond_prob = 288
diamond_attr = False
if randint(0, 10000) <= diamond_prob:
diamond_attr = True
if diamond_attr:
coordinates.diamond_draw(pixels)
rabbit_attr[x]["acc"].append("diamond")
total_attr["diamond"] += 1
diamond_attr = False
else:
print("Paper Hand")
# sort rabbit personal attributes
rabbit_attr[x]["acc"].sort
print("Assembling... number " + str(x))
# convert the pixels into an array using numpy
array = np.array(pixels, dtype=np.uint8)
# use PIL to create an image from the new array of pixels
new_image = Image.fromarray(array)
new_image = new_image.resize(dimensions, resample=0)
imgname = dirname + '/rabbit_images/' + (str(x)) + '.png'
new_image.save(imgname)
# dump total stats
(pd.DataFrame.from_dict(data=total_attr, orient='index')
.to_csv('total_stats.csv', mode="w"))
# dump rabbit individual stat
(pd.DataFrame.from_dict(data=rabbit_attr, orient='index')
.to_csv('rabbit_stats.csv', mode="w"))
# render_random_image(int seed_x, int num)
# seed: for random.seed()
# num: number of images to generate
seed_num = 8888888888 # An auspicious beginning... says the rabbit
num = 317 # This is a sideway carrot
render_random_image(seed_num, num)
``` |
{
"source": "j-jaworski/reach",
"score": 2
} |
#### File: airflow/dags/policy.py
```python
import datetime
import os
from collections import namedtuple
from airflow import DAG
import airflow.configuration as conf
import airflow.utils.dates
from reach.airflow.tasks import es_index_epmc_metadata
from reach.airflow.tasks import es_index_fulltext_docs
from reach.airflow.tasks import es_index_fuzzy_matched
from reach.airflow.tasks import fuzzy_match_refs
from reach.airflow.tasks.spider_operator import SpiderOperator
from reach.airflow.tasks.extract_refs_operator import ExtractRefsOperator
from reach.airflow.tasks.parse_pdf_operator import ParsePdfOperator
ORGANISATIONS = [
'who_iris',
'nice',
'gov_uk',
'msf',
'unicef',
'parliament',
]
DEFAULT_ARGS = {
'depends_on_past': False,
'start_date': airflow.utils.dates.days_ago(2),
'retries': 0,
'retry_delay': datetime.timedelta(minutes=5),
}
ItemLimits = namedtuple('ItemLimits', ('spiders', 'index'))
#
# Configuration & paths
#
def verify_s3_prefix():
reach_s3_prefix = conf.get("core", "reach_s3_prefix")
assert reach_s3_prefix.startswith('s3://')
assert not reach_s3_prefix.endswith('/')
verify_s3_prefix()
def get_es_hosts():
result = conf.get("core", "elasticsearch_hosts")
assert result
return [x.strip() for x in result.split(',')]
def to_s3_output(dag, *args):
""" Returns the S3 URL for any output file for the DAG. """
components, suffix = args[:-1], args[-1]
path = '/'.join(components)
slug = '-'.join(components)
return (
'{{ conf.get("core", "reach_s3_prefix") }}'
'/output/%s/%s/%s%s'
) % (dag.dag_id, path, slug, suffix)
def to_s3_output_dir(dag, *args):
""" Returns the S3 URL for any output directory for the DAG. """
path = '/'.join(args)
slug = '-'.join(args)
return (
'{{ conf.get("core", "reach_s3_prefix") }}'
'/output/%s/%s/%s'
) % (dag.dag_id, path, slug)
def to_s3_model(*args):
""" Returns the S3 URL for to a model, rooted under
${REACH_S3_PREFIX}/models/"""
return (
'{{ conf.get("core", "reach_s3_prefix") }}'
'/models/%s'
) % '/'.join(args)
def create_org_pipeline(dag, organisation, item_limits, spider_years):
""" Creates all tasks tied to a single organisation::
Spider -> ParsePdf
\-> ExtractRefs -> FuzzyMatchRefs
\-> ESIndexFulltextdocs [TODO: -> FullTextMatchRefs]
"""
spider = SpiderOperator(
task_id='Spider.%s' % organisation,
organisation=organisation,
dst_s3_dir=to_s3_output_dir(
dag, 'spider', organisation),
item_years=spider_years,
item_max=item_limits.spiders,
dag=dag)
parsePdf = ParsePdfOperator(
task_id='ParsePdf.%s' % organisation,
organisation=organisation,
src_s3_dir=spider.dst_s3_dir,
dst_s3_key=to_s3_output(
dag, 'parsed-pdfs', organisation, '.json.gz'),
dag=dag)
esIndexFullTexts = es_index_fulltext_docs.ESIndexFulltextDocs(
task_id="ESIndexFulltextDocs.%s" % organisation,
src_s3_key=parsePdf.dst_s3_key,
organisation=organisation,
es_host='elasticsearch',
item_limits=item_limits.index,
es_index='-'.join([dag.dag_id, 'docs']),
dag=dag
)
parser_model = to_s3_model(
'reference_parser_models',
'reference_parser_pipeline.pkl')
extractRefs = ExtractRefsOperator(
task_id='ExtractRefs.%s' % organisation,
model_path=parser_model,
src_s3_key=parsePdf.dst_s3_key,
dst_s3_key=to_s3_output(
dag, 'extracted-refs', organisation, '.json.gz'),
dag=dag)
fuzzyMatchRefs = fuzzy_match_refs.FuzzyMatchRefsOperator(
task_id='FuzzyMatchRefs.%s' % organisation,
es_hosts=get_es_hosts(),
src_s3_key=extractRefs.dst_s3_key,
dst_s3_key=to_s3_output(
dag, 'fuzzy-matched-refs', organisation, '.json.gz'),
es_index='-'.join([dag.dag_id, 'epmc', 'metadata']),
dag=dag,
)
esIndexFuzzyMatched = es_index_fuzzy_matched.ESIndexFuzzyMatchedCitations(
task_id="ESIndexFuzzyMatchedCitations.%s" % organisation,
src_s3_key=fuzzyMatchRefs.dst_s3_key,
organisation=organisation,
es_host='elasticsearch',
item_limits=item_limits.index,
es_index='-'.join([dag.dag_id, 'citations']),
dag=dag
)
parsePdf >> esIndexFullTexts
spider >> parsePdf >> extractRefs >> fuzzyMatchRefs >> esIndexFuzzyMatched
return fuzzyMatchRefs
def create_dag(dag_id, default_args, spider_years,
item_limits):
"""
Creates a DAG.
Args:
default_args: default args for the DAG
spider_op_cls: Spider operator class.
"""
dag = DAG(
dag_id=dag_id,
default_args=default_args,
schedule_interval='0 0 * * 0,3'
)
epmc_metadata_key = '/'.join([
'{{ conf.get("core", "openresearch_s3_prefix") }}',
'output', 'open-research', 'epmc-metadata', 'epmc-metadata.json.gz'
])
esIndexPublications = es_index_epmc_metadata.ESIndexEPMCMetadata(
task_id='ESIndexEPMCMetadata',
src_s3_key=epmc_metadata_key,
es_host='elasticsearch',
max_epmc_metadata=item_limits.index,
es_index='-'.join([dag_id, 'epmc', 'metadata']),
dag=dag
)
for organisation in ORGANISATIONS:
fuzzyMatchRefs = create_org_pipeline(
dag,
organisation,
item_limits,
spider_years,
)
esIndexPublications >> fuzzyMatchRefs
return dag
test_dag = create_dag(
'policy-test',
DEFAULT_ARGS,
[2018],
ItemLimits(10, 500),
)
policy_dag = create_dag(
'policy',
DEFAULT_ARGS,
list(range(2012, datetime.datetime.now().year + 1)),
ItemLimits(None, None),
)
```
#### File: reach/airflow/safe_import.py
```python
from contextlib import contextmanager
from threading import Lock
# Not a re-entrant lock b/c we believe imports of this sort should
# only happen once from the calling thread.
SAFE_IMPORT_LOCK = Lock()
@contextmanager
def safe_import():
"""
Context manager for ensuring that only one thread is importing
at a time. If two threads enter this context, the second will fail
with an exception so that we can't get caught in an import lock.
"""
acquired = SAFE_IMPORT_LOCK.acquire(blocking=False)
try:
if not acquired:
# NB: we could, instead, just wait here. But the invariant
# we're expecting is that, thanks to how the celery executor
# works, only one call to execute() should happen at a time,
# because only one thread should ever be running.
raise Exception('Multiple imports attempted at once!')
yield
finally:
if acquired:
SAFE_IMPORT_LOCK.release()
```
#### File: airflow/tasks/parse_pdf_operator.py
```python
import os
import logging
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from reach.airflow.safe_import import safe_import
from reach.sentry import report_exception
logger = logging.getLogger(__name__)
class ParsePdfOperator(BaseOperator):
"""
Pulls data from the dimensions.ai to a bucket in S3.
Args:
organisation: The organisation to pull documents from.
"""
template_fields = (
'dst_s3_key',
'src_s3_dir',
)
KEYWORD_SEARCH_CONTEXT = 2
@apply_defaults
def __init__(self, organisation, src_s3_dir, dst_s3_key, *args, **kwargs):
super(ParsePdfOperator, self).__init__(*args, **kwargs)
self.organisation = organisation
self.src_s3_dir = src_s3_dir
self.dst_s3_key = dst_s3_key
@report_exception
def execute(self, context):
with safe_import():
import reach.pdf_parser.main as pdf_parser_main
os.environ.setdefault(
'SCRAPY_SETTINGS_MODULE',
'scraper.wsf_scraping.settings'
)
if not self.src_s3_dir.startswith('s3://'):
raise ValueError
if not self.dst_s3_key.startswith('s3://'):
raise ValueError
input_uri = 'manifest' + self.src_s3_dir
pdf_parser_main.parse_all_pdf(
self.organisation,
input_uri,
self.dst_s3_key,
)
```
#### File: reach/elastic/import_sections_from_s3.py
```python
import tempfile
import csv
import json
import random
import boto3
from urllib.parse import urlparse
from argparse import ArgumentParser
from elasticsearch import Elasticsearch
from . import common
parser = ArgumentParser()
parser.add_argument('file_url')
parser.add_argument('-s', '--size',
default=1024,
type=int,
help=('The megabytes to pull. Defaults to 100.'
'A negative value will pull the entire dataset'))
parser.add_argument('-H', '--host',
default='127.0.0.1',
help='Address of the Elasticsearch server')
parser.add_argument('-P', '--port',
default='9200',
help='Port of the Elasticsearch server')
parser.add_argument('-C', '--clean', dest='clean', action='store_true',
help='Clean the elasticsearch database before import')
def write_to_es(es, item):
""" Writes the given csv line to elasticsearch.
Args:
es: a living connection to elacticsearch
item: an dict from a json line.
"""
if item["sections"]:
print(item['hash'])
section = item['sections']['Reference']
body = json.dumps({
'section': section,
'uri': item['uri'],
'hash': item['hash']
})
es.index(
index='datalabs-sections',
ignore=400,
body=body,
doc_type='section'
)
def clean_es(es):
""" Empty the elasticsearch database.
Args:
es: a living connection to elasticsearch
"""
common.recreate_index(es, 'datalabs-sections')
def import_data(file_url, es, size):
""" Import data from a given file in elasticsearch.
Args:
file_url: a file url
es: a living connection to elasticsearch
size: the size of the data to pull in bytes
"""
def yield_from_s3(s3_url, size):
s3 = boto3.resource('s3')
parsed_url = urlparse(s3_url)
print('Getting %s from %s bucket' % (parsed_url.path, parsed_url.netloc))
s3_file = s3.Object(bucket_name=parsed_url.netloc, key=parsed_url.path[1:])
with tempfile.TemporaryFile(mode='r+b') as tf:
rows = s3_file.get(Range='bytes=0-%d' % size)
for data in rows['Body']:
tf.write(data)
print('Got the fileobj')
tf.seek(0)
with open(tf.fileno(), mode='r', closefd=False) as json_file:
for line in json_file:
yield line
def yield_from_local(file_url, size):
with open(file_url) as f:
for line in f:
yield line
yield_lines = yield_from_s3 if file_url.startswith('s3://') else yield_from_local
for line in yield_lines(file_url, size):
item = json.loads(line)
write_to_es(es, item)
if __name__ == '__main__':
args = parser.parse_args()
es = Elasticsearch([{'host': args.host, 'port': args.port}])
if args.clean:
clean_es(es)
size = args.size * 1024 * 1000 ** 2
import_data(args.file_url, es, size)
```
#### File: reach/pdf_parser/pdf_parse.py
```python
import errno
import io
import logging
import math
import os
import subprocess
import tempfile
import lxml.etree
from lxml.etree import XMLSyntaxError
from .objects.PdfObjects import PdfFile, PdfLine, PdfPage
from .tools.extraction import (_find_elements, _flatten_text,
_flatten_fontspec)
MAX_HTML_SIZE = 64 * 1024 * 1024
ERR_PDF2HTML_NONZERO_EXIT = 'pdf2html failed'
ERR_NO_FILE = 'pdf2html produced no output'
ERR_EMPTY_FILE = 'html file was empty'
ERR_FILE_TOO_LARGE = 'html file too large'
ERR_XML_SYNTAX = 'xml file has some syntax error'
BASE_FONT_SIZE = -10
logger = logging.getLogger(__name__)
def parse_pdf_document(document):
""" Parses a file using pdftohtml, returning a
PdfFile object, easier to analyse.
Args:
document: file object, pointing to a named file
"""
with tempfile.NamedTemporaryFile(suffix='.xml', mode='w+b') as tf:
# Run pdftohtml on the document, and output an xml formated document
cmd = [
'pdftohtml',
'-i',
'-xml',
'-zoom',
'1.5',
document.name,
tf.name
]
try:
subprocess.check_call(
cmd,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
except subprocess.CalledProcessError as e:
logger.warning(
"The pdf [%s] could not be converted: %r",
document.name,
e.stderr,
)
return None, None, [ERR_PDF2HTML_NONZERO_EXIT]
try:
# Try to get file stats in order to check both its existence
# and if it has some content
st = os.stat(tf.name)
except OSError as e:
if e.errno != errno.ENOENT:
raise
return None, None, [ERR_NO_FILE]
if st.st_size == 0:
return None, None, [ERR_EMPTY_FILE]
if st.st_size > MAX_HTML_SIZE:
# Files this large are usually unparseable and blow out our
# memory usage. Skip them.
logger.warning(
'oversized-pdf file: name=%s size=%d max-size=%d',
tf.name, st.st_size, MAX_HTML_SIZE
)
return None, None, [ERR_FILE_TOO_LARGE]
try:
tree = lxml.etree.parse(io.BytesIO(tf.read()))
except XMLSyntaxError:
return None, None, [ERR_XML_SYNTAX]
file_pages = []
full_text = '\n'.join([_flatten_text(text) for text in tree.xpath('//text')])
pages = tree.xpath('page')
for page_num, page in enumerate(pages):
lines = page.xpath('text')
page_lines = []
# Create a mapping dict to allow font family and size lookups
fontspec = _flatten_fontspec(page.xpath('//fontspec'))
for line in lines:
family = fontspec[line.get('font')]['family']
size = int(fontspec[line.get('font')]['size'])
text = _flatten_text(line)
pdf_line = None
pdf_line = PdfLine(
size,
False,
text,
page_num,
family
)
if pdf_line:
page_lines.append(pdf_line)
file_pages.append(PdfPage(page_lines, page_num))
pdf_file = PdfFile(file_pages)
return pdf_file, full_text, None
def grab_section(pdf_file, keyword):
"""Given a pdf parsed file object (PdfFile) and a keyword corresponding to
a title, returns the matching section of the pdf text.
"""
result = ''
text = ''
elements = _find_elements(pdf_file, keyword)
for start_title, end_title in elements:
if not end_title:
end_page = len(pdf_file.pages)
else:
end_page = end_title.page_number + 1
for page_number in range(start_title.page_number, end_page):
if pdf_file.get_page(page_number).get_page_text(True):
text += pdf_file.get_page(page_number).get_page_text()
if end_title and (start_title.page_number != end_title.page_number):
result += text[
text.find(start_title.text):text.find(end_title.text)
]
else:
result += text[text.find(start_title.text):]
text = ''
return result
```
#### File: pdf_parser/tests/test_pdf_objects.py
```python
import json
import unittest
from reach.pdf_parser.objects.PdfObjects import PdfFile
from reach.pdf_parser.pdf_parse import parse_pdf_document, grab_section
from reach.scraper.tests.common import (TEST_PDF, TEST_PDF_MULTIPAGE,
TEST_PDF_PAGE_NUMBER)
"""Test file content (html transcription):
<h1>Test</h1>
Test
<b>Test bold</b>
<h1>References</h1>
<ol>
<li>Test</li>
<li>Test</li>
<li>Test</li>
</ol>"""
JSON_PDF = json.dumps({
'pages': [
{
'lines': [
{
'size': 17,
'bold': True,
'text': 'Page 1 - Title 1',
'page_number': 1,
'font_face': 'Times',
}
],
'number': 1
},
{
'lines': [
{
'size': 17,
'bold': True,
'text': 'Page 2 - Title 2',
'page_number': 2,
'font_face': 'Times',
},
{
'size': 12,
'bold': False,
'text': 'Page 2 - Text 1',
'page_number': 2,
'font_face': 'Times',
},
],
'number': 2
},
],
'has_bold': True
})
class TestPdfObjects(unittest.TestCase):
def setUp(self):
self.test_file = open(TEST_PDF, 'rb')
self.pdf_file_object, _, _ = parse_pdf_document(self.test_file)
def tearDown(self):
self.test_file.close()
def test_mean(self):
font_mean = self.pdf_file_object.get_mean_font_size()
self.assertTrue(font_mean in range(18, 22))
def test_upper_mean(self):
upper_mean = self.pdf_file_object.get_upper_mean_font_size()
self.assertEqual(upper_mean, 22)
def test_list_by_size(self):
list_fonts = self.pdf_file_object.get_font_size_list()
for i in list_fonts:
self.assertTrue(i in [16, 22])
def test_bold(self):
list_bold_lines = self.pdf_file_object.get_bold_lines()
for line in list_bold_lines:
self.assertEqual(line.text, 'Test bold')
def test_page_text(self):
page_text = self.pdf_file_object.get_page(0).get_page_text(
ignore_page_numbers=True
)
self.assertTrue(len(page_text) > 0)
def test_lines_by_keyword(self):
keyword = 'References'
keyword_lines = self.pdf_file_object.get_lines_by_keyword(keyword)
self.assertEqual(len(keyword_lines), 1)
self.assertEqual('References' in keyword_lines[0], True)
def test_lines_by_keywords(self):
keywords = ['bold', 'test', 'machine']
keyword_lines = self.pdf_file_object.get_lines_by_keywords(keywords)
#self.assertTrue('bold' in keyword_lines.keys())
#self.assertEqual(len(keyword_lines['bold']), 1)
self.assertTrue('test' in keyword_lines.keys())
self.assertTrue(len(keyword_lines['test']) in [5, 6])
#self.assertEqual('bold' in keyword_lines['bold'][0], True)
def test_lines_by_keywords_and_context(self):
keywords = ['bold', 'test', 'machine']
keyword_lines = self.pdf_file_object.get_lines_by_keywords(keywords, 2)
#self.assertTrue('bold' in keyword_lines.keys())
#self.assertEqual(len(keyword_lines['bold']), 5)
self.assertTrue('test' in keyword_lines.keys())
#self.assertTrue(len(keyword_lines['test']) in [22, 24])
def test_from_json(self):
pdf_file = PdfFile()
pdf_file.from_json(JSON_PDF)
self.assertTrue(len(pdf_file.pages) == 2)
def test_to_json(self):
pdf_file = PdfFile()
pdf_file.from_json(JSON_PDF)
pdf_export = pdf_file.to_json()
self.assertEqual(pdf_export, JSON_PDF)
class TestPdfObjectsMultipage(unittest.TestCase):
"""
Tests against a multi-page pdf
"""
def setUp(self):
self.test_file = open(TEST_PDF_MULTIPAGE, 'rb')
self.pdf_file_object, self.full_text, _ = parse_pdf_document(self.test_file)
def tearDown(self):
self.test_file.close()
def test_parse_pdf_document_pages(self):
pages = self.pdf_file_object.pages
self.assertEqual(len(pages), 2)
self.assertEqual(len(pages[0].lines), 5)
self.assertEqual(len(pages[1].lines), 5)
self.assertEqual(pages[0].lines[0].text, "Test Page 1")
self.assertEqual(pages[0].lines[4].text, "Partly italic line.")
self.assertEqual(pages[1].lines[0].text, "Test Page 2")
self.assertEqual(pages[0].lines[4].text, "Partly italic line.")
def test_parse_pdf_document_fulltext(self):
full_text_lines = self.full_text.split('\n')
self.assertIsInstance(full_text_lines, list)
self.assertEqual(len(full_text_lines), 10)
self.assertEqual(full_text_lines[0], 'Test Page 1')
self.assertEqual(full_text_lines[5], 'Test Page 2')
class TestPdfObjectsPageNumber(unittest.TestCase):
"""
Provides a test case for the issue descrubed in
https://github.com/wellcometrust/reach/issues/258
"""
def setUp(self):
self.test_file = open(TEST_PDF_PAGE_NUMBER, 'rb')
self.pdf_file_object, _, _ = parse_pdf_document(self.test_file)
# Cycle through the pdf document, and flatten
# into a single string
pages = []
for num in range(0, 2):
page = self.pdf_file_object.get_page(num)
text = page.get_page_text()
pages.append(text)
full_text = "".join(pages)
# Split string apart again based on line endings
self.lines = full_text.split("\n")
def tearDown(self):
self.test_file.close()
def test_page_numbers_on_separate_lines(self):
"""
Check that the page numbers end up on their own lines
"""
self.assertEqual(self.lines[4], '99')
self.assertEqual(self.lines[8], '99')
```
#### File: pdf_parser/tools/dbTools.py
```python
import psycopg2
import psycopg2.extras
import logging
from datetime import datetime
class DatabaseConnector:
def __init__(self, dsn):
"""Initialise an instance of the DatabaseConnector class and create:
- self.logger: a logger to log errors
- self.connection: the connection to the sqlite3 database
- self.cursor: the cursor to execute requests on the database
It also run the check_db method, creating the database if it doesn't
exists yet.
Args:
dsn: data source name / libpq connection string
"""
self.logger = logging.getLogger(__name__)
self.connection = psycopg2.connect(dsn)
self.cursor = self.connection.cursor(
cursor_factory=psycopg2.extras.NamedTupleCursor
)
def __del__(self):
"""Commit all changes and close the database connection on the deletion
of this instance from memory.
"""
try:
self.connection.commit()
self.cursor.close()
self.connection.close()
except AttributeError:
if hasattr(self, 'logger'):
# __del__ can be called before self.logger is created.
self.logger.warning(
'Tried to close a non-existent connection.'
)
def _execute(self, query, params=()):
"""Try to execute the SQL query passed by the query parameter, with the
arguments passed by the tuple argument params.
"""
try:
self.cursor.execute(query, params)
self.connection.commit()
except psycopg2.Error as e:
self.logger.error(
'An exception had been encountered when executing %s',
query,
)
raise
def reset_scraped(self):
"""Set all the publications `scrape_again` attribute to 1, forcing the
web scraper to download and analyse them again.
"""
self._execute(
"UPDATE publication SET scrape_again = %s",
('True',)
)
def clear_db(self):
"""Remove all the publications from the database."""
self._execute(
"DELETE FROM publication"
)
def get_scraping_info(self, file_hash):
"""Check if an publication had already been scraped by looking for its
file hash into the database. If the publication exists, returns its id
and its `scrape_again` value
"""
self._execute(
"""
SELECT id, scrape_again
FROM publication
WHERE file_hash = %s;
""",
(file_hash,)
)
result = self.cursor.fetchone()
return result
def get_publications(self, offset=0, limit=-1):
"""Return a list of publications. By default, returns every
publications. This method accepts start and end arguments to
paginate results.
"""
if limit > 0:
self._execute(
"""
SELECT title, file_hash, url
FROM publication LIMIT %s OFFSET %s
""",
(offset, limit,)
)
else:
self._execute("SELECT title, file_hash, url FROM publication")
result = []
for publication in self.cursor.fetchall():
result.append(publication)
return result
def insert_full_publication(self, publication, id_provider):
"""Insert an entire publication in the database and return its id."""
self._execute(
"""
INSERT INTO publication(title, url, pdf_name, file_hash,
authors, pub_year,
id_provider, datetime_creation)
VALUES(%s, %s, %s, %s, %s, %s, %s, %s)
RETURNING id;
""",
(publication['title'], publication['uri'], publication['pdf'],
publication['hash'], publication.get('authors'),
publication.get('year'), id_provider, datetime.now(),)
)
return self.cursor.fetchone().id
def update_full_publication(self, publication):
self._execute(
"""
UPDATE publication
SET
title = %s,
url = %s,
pdf_name = %s,
file_hash = %s,
authors = %s,
pub_year = %s,
scrape_again = %s
WHERE id = %s;
""",
(publication['title'], publication['uri'], publication['pdf'],
publication['hash'], publication.get('authors'),
publication.get('year'), False, publication['id'],)
)
def insert_joints_and_text(self, table, items, id_publication):
"""Create both a name table and a joint table between a publication and
another item with a 0-n or 1-n cardinality, containing a text element.
"""
if not items:
return
for name, text_value in items.items():
db_name_id = self.get_or_create_name(name, table)
self._execute(
"""
INSERT
INTO publications_{table}s(
id_publication,
id_{table},
text_content
)
VALUES(%s, %s, %s);
""".format(table=table),
(id_publication, db_name_id, text_value,)
)
def insert_joints(self, table, items, id_publication):
"""Create both a name table and a joint table between a publication and
another item with a 0-n or 1-n cardinality.
"""
if not items:
return
for name in items:
db_name_id = self.get_or_create_name(name, table)
self._execute(
"""
INSERT
INTO publications_{table}s(
id_publication,
id_{table}
)
VALUES(%s, %s);
""".format(table=table),
(id_publication, db_name_id,)
)
def get_or_create_name(self, name, table):
"""Insert a `name` like element in a table if it doesn't exists and
returns its ID. If it already exists, just returns the ID.
"""
self._execute(
'SELECT id FROM {table} WHERE name = %s'.format(table=table),
(name,)
)
item = self.cursor.fetchone()
if item:
return item.id
else:
self._execute(
"""
INSERT INTO {table}(name)
VALUES(%s)
RETURNING id;
""".format(table=table),
(name,)
)
return self.cursor.fetchone().id
def insert_publication(self, file_hash, url):
"""Try to insert an publication, composed by its file hash and url,
into the database.
"""
if len(url) >= 255:
self.logger.warning(
f'Article title ({url}) is too long ({len(url)}/255).'
)
url = url[:255]
self._execute(
"INSERT INTO publication (file_hash, url) VALUES (%s, %s)",
(file_hash, url)
)
def get_finished_crawls(self):
self._execute("SELECT * FROM spiders WHERE status = %s", ('finished',))
result = []
for publication in self.cursor:
result.append(publication)
return result
def insert_spider(self, name, uuid):
self._execute(
"INSERT INTO spiders (name, uuid, status) VALUES (%s, %s, %s)",
(name[:255], uuid, 'running')
)
def close_spider(self, uuid):
self._execute(
"""
UPDATE spiders
WHERE uuid = %s
SET status = %s, end_time = CURRENT_TIMESTAMP;
""",
(uuid, 'finished')
)
```
#### File: scraper/wsf_scraping/contracts.py
```python
from scrapy.contracts import Contract
class AjaxContract(Contract):
"""Add headers to a contract so that it becomes an ajax request."""
name = "ajax"
def adjust_request_args(self, kwargs):
headers = {
'X-Requested-With': 'XMLHttpRequest',
'referer': 'https://www.nice.org.uk/guidance/published'
}
kwargs['headers'] = headers
return kwargs
```
#### File: scraper/wsf_scraping/pipelines.py
```python
import os
import logging
from urllib.parse import urlparse
from scrapy.utils.project import get_project_settings
from .file_system import S3FileSystem, LocalFileSystem, get_file_hash
from scrapy.exceptions import DropItem
class WsfScrapingPipeline(object):
def __init__(self, organisation):
"""Initialise the pipeline, giving it access to the settings, keywords
and creating the folder in which to store pdfs if stored locally.
"""
self.settings = get_project_settings()
uri = self.settings['FEED_URI'].replace('%(name)s', organisation)
# Initialize logging
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.INFO)
self.logger.info(
'Pipeline initialized FEED_CONFIG=%s',
self.settings.get('FEED_CONFIG'),
)
self.setup_storage(uri, organisation)
self.manifest = self.storage.get_manifest()
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.spider.name)
def setup_storage(self, url, organisation):
"""Take the output url and set the right feed storage for the pdf.
Sets the storage system in use for the pipeline in the following:
* S3FileSystem: Store the pdfs in Amazon S3.
* LocalFileSystem: Store the pdfs in a local directory.
Args:
- url: A string reprensenting the location to store the pdf files.
"""
parsed_url = urlparse(url)
scheme = parsed_url.scheme
if scheme == 'manifests3':
self.logger.debug('Using S3 File system')
self.storage = S3FileSystem(
parsed_url.path,
organisation,
parsed_url.netloc
)
else:
self.logger.debug('Using Local File System')
self.storage = LocalFileSystem(parsed_url.path, organisation)
def is_in_manifest(self, hash):
"""Check if a file hash is in the current manifest.
Args:
- hash: A 36 chars string repsenting the md5 digest of a file.
Returns:
- True if the file hash is in the manifest, else False.
"""
if hash[:2] in self.manifest:
if hash in self.manifest[hash[:2]]:
return True
return False
def process_item(self, item, spider):
"""Process items sent by the spider.
The returned items will then be sent to the FeedStorage, where they
are stored in a temporary file and processed at the end of the process.
Args:
- item: The item returned by the spider.
- spider: The spider from which the item id coming.
Raises:
- DropItem: If the pdf couldn't be saved, we want to drop the item.
Returns:
- item: The processed item, to be used in a feed storage.
"""
if not item['pdf']:
raise DropItem(
'Empty filename, could not parse the pdf.'
)
item['hash'] = get_file_hash(item['pdf'])
in_manifest = self.is_in_manifest(item['hash'])
if not in_manifest:
path = os.path.join(
'pdf',
item['hash'][:2],
)
with open(item['pdf'], 'rb') as pdf:
self.storage.save(pdf, path, item['hash'] + '.pdf')
# Remove the file to save storage
os.unlink(item['pdf'])
return item
```
#### File: wsf_scraping/spiders/msf_spider.py
```python
import scrapy
from .base_spider import BaseSpider
class MsfSpider(BaseSpider):
name = 'msf'
custom_settings = {
'JOBDIR': BaseSpider.jobdir(name)
}
def start_requests(self):
"""Set up the initial request to the website to scrape."""
urls = [
'https://www.msf.org.uk/activity-reports',
'https://www.msf.org.uk/reports',
]
for url in urls:
self.logger.info('Initial url: %s', url)
yield scrapy.Request(
url=url,
errback=self.on_error,
dont_filter=True,
callback=self.parse,
)
def parse(self, response):
""" Parse both reports and activity-reports pages.
@url https://www.msf.org.uk/activity-reports
@returns items 0 0
@returns requests 10
"""
doc_links = response.css('.field-items a::attr(href)').extract()
for url in doc_links:
yield scrapy.Request(
url=response.urljoin(url),
errback=self.on_error,
callback=self.save_pdf
)
```
#### File: wsf_scraping/spiders/parliament_spider.py
```python
import scrapy
from scrapy.http import Request
from .base_spider import BaseSpider
class ParliamentSpider(BaseSpider):
name = 'parliament'
custom_settings = {
'JOBDIR': BaseSpider.jobdir(name),
'ROBOTSTXT_OBEY': False
}
def start_requests(self):
"""This sets up the initial urls."""
query_list = [
"Parameters.Fields.all=",
"Parameters.Fields.all-target=",
"Parameters.Fields.phrase=",
"Parameters.Fields.phrase-target=",
"Parameters.Fields.any=",
"Parameters.Fields.any-target=",
"Parameters.Fields.exclude=",
"Parameters.Fields.exclude-target=",
"Parameters.Fields.type=Bills",
"Parameters.Fields.type=Select+Committee+reports",
"Parameters.Fields.type=Select+Committee+written+evidence",
"Parameters.Fields.type=Debates",
"Parameters.Fields.type=Research+briefings",
"Parameters.Fields.member=",
"Parameters.Fields.subject=",
"Parameters.Fields.reference=",
"When%3A=date",
"Parameters.Fields.date=01%2F01%2F1980",
"Parameters.Fields.date=04%2F10%2F2018",
"Parameters.PageSize=100"
]
base_url = 'http://search-material.parliament.uk/search'
query_params = '&'.join(query_list)
url = base_url + '?' + query_params
self.logger.info('Initial url: %s', url)
yield scrapy.Request(
url=url,
callback=self.parse,
errback=self.on_error,
dont_filter=True,
)
def parse(self, response):
"""Parse the articles listing page and go to the next one."""
for li in response.css('#results li'):
# direct pdfs links ends with pdf
link = li.css('h4 a::attr(href)').extract_first().strip()
meta = li.css('.resultdetails::text').extract()
meta = [m.strip() for m in meta]
# The date is always in format `dd Mmm YYYY`
title = li.css('h4 a::text').extract_first().strip()
year = meta[0][-4:]
types = meta[1]
yield Request(
url=response.urljoin(link),
meta={
'title': title,
'year': year,
'types': types
},
callback=self.parse_others,
errback=self.on_error,
)
next = response.css('.next a::attr(href)').extract_first()
if next:
yield Request(
url=response.urljoin(next),
callback=self.parse,
errback=self.on_error,
)
def parse_others(self, response):
"""Try to retrieve a pdf from a depth 2 page. If no pdf is found
at this point, we stop looking this way.
"""
# Some of the parliament's pdf are categorised as octetstream
is_pdf = self._check_headers(
response.headers
) or self._check_headers(
response.headers,
b'application/octet-stream'
)
if is_pdf:
yield Request(
url=response.urljoin(response.request.url),
meta={
'title': response.meta.get('title'),
'year': response.meta.get('year'),
'types': response.meta.get('types'),
},
callback=self.save_pdf,
errback=self.on_error,
)
elif self._check_headers(
response.headers,
b'text/html',
):
for href in response.css('a::attr(href)').extract():
if href.endswith('pdf'):
yield Request(
url=response.urljoin(href),
meta={
'title': response.meta.get('title'),
'year': response.meta.get('year'),
'types': response.meta.get('types'),
},
callback=self.save_pdf,
errback=self.on_error,
)
yield
```
#### File: web/tests/test_template.py
```python
from reach.web.views import template
def test_to_template_names():
cases = [
('/', ('index.html',)),
('/foo', ('foo.html', 'foo/index.html')),
('/foo.html', ('foo.html', 'foo/index.html')),
('/foo/gar', ('foo/gar.html', 'foo/gar/index.html')),
('/_macros.html', tuple()),
]
for path, expected in cases:
assert expected == template.to_template_names(path)
```
#### File: web/views/search.py
```python
import json
import math
from elasticsearch import ConnectionError, NotFoundError
import falcon
from reach.web.views import template
def _get_pages(current_page, last_page):
"""Return a list of pages to be used in the rendered template from the
last page number."""
pages = []
if current_page > 3:
pages.append(1)
if current_page > 4:
pages.append('...')
pages.extend(
range(
max(current_page - 2, 1), min(current_page + 3, last_page)
)
)
if current_page < last_page - 3:
pages.append('...')
if last_page not in pages:
pages.append(last_page)
return pages
def _search_es(es, params, explain=False):
"""Run a search on the elasticsearch database.
Args:
es: An Elasticsearch active connection.
params: The request's parameters. Shoud include 'term' and at
least a field ([text|title|organisation]).
explain: A boolean to enable|disable elasticsearch's explain.
Returns:
True|False: The search success status
es.search()|str: A dict containing the result of the search if it
succeeded or a string explaining why it failed
"""
try:
fields = params.get('fields', []).split(',')
page = params.get('page', 1)
size = params.get('size', 50)
es.cluster.health(wait_for_status='yellow')
es_body = {
'from': (page - 1) * size,
'size': size,
'query': {
'multi_match': {
'query': params.get('term'),
'type': "best_fields",
'fields': ['.'.join(['doc', f]) for f in fields]
}
}
}
return True, es.search(
index='policy-test-docs',
body=json.dumps(es_body),
explain=explain
)
except ConnectionError:
message = 'Could not join the elasticsearch server.'
raise falcon.HTTPServiceUnavailable(description=message)
except NotFoundError:
message = 'No results found.'
return False, {'message': message}
except Exception as e:
raise falcon.HTTPError(description=str(e))
class FulltextApi:
"""Let you search for terms in publications fulltexts. Returns a json.
Args:
es: An elasticsearch connection
es_explain: A boolean to enable|disable elasticsearch's explain.
"""
def __init__(self, es, es_explain):
self.es = es
self.es_explain = es_explain
def on_get(self, req, resp):
"""Returns the result of a search on the elasticsearch cluster.
Args:
req: The request passed to this controller
resp: The reponse object to be returned
"""
if req.params:
status, response = _search_es(self.es, req.params, self.es_explain)
if status:
response['status'] = 'success'
resp.body = json.dumps(response)
else:
resp.body = json.dumps({
'status': 'error',
'message': response
})
else:
resp.body = json.dumps({
'status': 'error',
'message': "The request doesn't contain any parameters"
})
resp.status = falcon.HTTP_400
class FulltextPage(template.TemplateResource):
"""Let you search for terms in publications fulltexts. Returns a web page.
Args:
es: An elasticsearch connection
es_explain: A boolean to enable|disable elasticsearch's explain.
"""
def __init__(self, template_dir, es, es_explain, context=None):
self.es = es
self.es_explain = es_explain
super(FulltextPage, self).__init__(template_dir, context)
def on_get(self, req, resp):
if req.params:
params = {
"term": req.params.get('term', ''), # es returns none on empty
"fields": "text,organisation", # search_es is expects a str
"page": int(req.params.get('page', 1)),
"size": int(req.params.get('size', 50)),
}
status, response = _search_es(self.es, params, True)
self.context['es_response'] = response
self.context['es_status'] = status
if (not status) or (response.get('message')):
self.context.update(params)
super(FulltextPage, self).render_template(
resp,
'/results/policy-docs',
)
return
self.context['pages'] = _get_pages(
params['page'],
math.ceil(
float(response['hits']['total']['value']) / params['size'])
)
self.context.update(params)
super(FulltextPage, self).render_template(
resp,
'/results/policy-docs',
)
else:
super(FulltextPage, self).on_get(req, resp)
``` |
{
"source": "jjayala1/adventofCode2020",
"score": 3
} |
#### File: jjayala1/adventofCode2020/day10a.py
```python
def clean_data():
f = open('day10.txt','r')
jolts = sorted([int(x.replace('\n','')) for x in f.readlines()])
jolts = [0, *jolts, jolts[-1] + 3 ]
return jolts
def sort_adapters(jolts):
dif = [(jolts[i+1]-jolts[i]) for i in range(len(jolts)-1) ]
return dif.count(1) * dif.count(3)
def count_arrangements(jolts):
paths = [0 for x in range(len(jolts)-1)]
paths.append(1)
for i in range(len(jolts)-1,-1,-1):
for j in range(1,4):
if i+j < len(jolts) and jolts[i+j] - jolts[i] <= 3:
paths[i] += paths[i+j]
#print(f'Connector: {jolts[i]} -- Iteration: {i} -- Paths: {paths} -- TotPaths: {paths[i]} ')
return paths[0]
def count_arrangements1(jolts):
paths = [0 for x in range(1,len(jolts))]
paths.insert(0,1)
for i in range(len(jolts)):
for j in range(1,4):
if i-j >= 0 and jolts[i] - jolts[i-j] <= 3:
paths[i] += paths[i-j]
#print(f'Connector: {jolts[i]} -- Iteration: {i} -- Paths: {paths} -- TotPaths: {paths[i]} ')
return paths[-1]
jolts = clean_data()
result = sort_adapters(jolts)
print(f'Part 1: {result}')
arrangements = count_arrangements(jolts)
print(f'Part 2a descending: {arrangements}')
arrangements1 = count_arrangements1(jolts)
print(f'Part 2b ascending : {arrangements1}')
```
#### File: jjayala1/adventofCode2020/day10b.py
```python
def clean_data():
f = open('day10b.txt','r')
data = [int(x.replace('\n','')) for x in f.readlines()]
data.sort()
return data
def part2(data):
data.sort()
print(data)
ans = {}
ans[0] = 1
for a in data:
ans[a] = ans.get(a-1,0) + ans.get(a-2,0) + ans.get(a-3,0)
print(a,ans[a],ans.get(a-1,0),ans.get(a-2,0),ans.get(a-3,0))
print(ans,ans[data[-1]])
data = clean_data()
result = part2(data)
print(f'Part 2: {result}')
```
#### File: jjayala1/adventofCode2020/day4B.py
```python
valid_passport_data = {
'ecl', 'pid', 'eyr', 'hcl',
'byr', 'iyr', 'hgt',
}
count_required_data = len(valid_passport_data)
def ecl_rule(value):
return value in {'amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth'}
def pid_rule(value):
try:
int(value)
isnumber = True
except:
isnumber = False
return (len(value) == 9) and isnumber
def eyr_rule(value):
try:
year = int(value)
except:
return False
return year >= 2020 and year <= 2030
def hcl_rule(value):
try:
int(value[1:], 16)
except:
return False
return value[0] == '#' and len(value) == 7
def byr_rule(value):
try:
year = int(value)
except:
return False
return year >= 1920 and year <= 2002
def iyr_rule(value):
try:
year = int(value)
except:
return False
return year >= 2010 and year <= 2020
def hgt_rule(value):
units = value[-2:]
try:
height = float(value[:-2])
except:
return False
if units == 'cm':
return height >= 150.00 and height <= 193.00
if units == 'in':
return height >= 59.00 and height <= 76.00
return False
validation_rules = {
'ecl': ecl_rule,
'pid': pid_rule,
'eyr': eyr_rule,
'hcl': hcl_rule,
'byr': byr_rule,
'hgt': hgt_rule,
'iyr': iyr_rule
}
def load_passports(path):
with open(path, 'r') as file:
lines = file.readlines()
passports = []
passport_data = {}
for line in lines:
if ':' in line:
key_value_list = line.split()
for key_value in key_value_list:
key, value = key_value.split(':')
passport_data[key] = value
else:
passports.append(passport_data)
passport_data = {}
passports.append(passport_data)
return passports
def validate_passport(passport):
keys = set(passport)
common_data = valid_passport_data.intersection(keys)
if len(common_data) == count_required_data:
is_valid = True
for key in keys:
if key == 'cid':
continue
try:
value = passport[key]
is_valid = (is_valid and validation_rules[key](value))
except Exception as e:
print(f'ValidationError: {e}')
return int(is_valid)
return 0
# === MAIN ===
valid_passports_count = 0
passports = load_passports('day4.txt')
for p in passports:
valid_passports_count += validate_passport(p)
print(valid_passports_count)
```
#### File: jjayala1/adventofCode2020/day6.py
```python
def clean_groups():
groups = []
f = open('day6.txt','r')
data =f.readlines()
group = ''
for d in data:
d = d[0:len(d)-1]
group += ' ' + d
if d == '':
groups.append(group)
group = ''
return groups
def count_uinique_answers_part1(groups):
total_count = 0
for g in groups:
g = g.replace(' ','')
answers = list(g)
answers.sort()
answers = set(answers)
total_count += len(answers)
return total_count
def count_uinique_answers_part2(groups):
total_count = 0
for g in groups:
answers = g.split()
total_count += find_duplicates(answers)
#print(answers, total_count)
return total_count
def find_duplicates(group):
if len(group) == 1:
return len(list(group[0]))
intersect = group[0]
for i,d in enumerate(group):
intersect = set(group[i]).intersection(intersect)
#print(intersect)
return len(intersect)
groups = clean_groups()
total1 = count_uinique_answers_part1(groups)
total2 = count_uinique_answers_part2(groups)
print(f'Part 1: {total1}')
print(f'Part 2: {total2}')
```
#### File: jjayala1/adventofCode2020/day8.py
```python
def clean_data():
f = open('day8.txt','r')
data = f.readlines()
for i,d in enumerate(data):
for r in (('\n',''),('bags',''),('bag',''),('.','')):
d = d.replace(*r)
data[i] = d
return data
def get_acum(data):
acum = 0
loop = []
j = 0
for i,d in enumerate(data):
op, value = data[j].split()
loop.append(j)
acum += int(value) if op == 'acc' else 0
j += int(value) if op == 'jmp' else 1
if j in loop:
return acum,j
if j == len(data):
return acum,j
return acum,j
changed = []
def change_op(data):
global changed
datan = data.copy()
change = 0
for i,d in enumerate(data):
op, value = d.split()
if op == 'jmp' and change == 0 and i not in changed:
datan[i] = 'nop ' + value
changed.append(i)
change = 1
#print(f'changing {i}')
if op == 'nop' and change == 0 and i not in changed:
datan[i] = 'jmp ' + value
changed.append(i)
change = 1
if change == 0:
exit()
acum,j = get_acum(datan)
if j != len(data):
return change_op(data)
else:
return(acum,j)
data = clean_data()
acum = get_acum(data)
acum2 = change_op(data)
print(f'Part 1: {acum[0]}')
print(f'Part 2: {acum2[0]}')
``` |
{
"source": "jjayala1/sprint",
"score": 2
} |
#### File: jjayala1/sprint/modal1.py
```python
def view(day, progress):
linksP = [[60,'<NAME>','https://www.linkedin.com/posts/rodneydaut_failuretosuccess-linkedin30daysprint-activity-6886695645353127936-sRQx'],[61,'<NAME>','https://www.linkedin.com/posts/roseyhwang_coaching-business-entrepreneurship-activity-6886698097083224064-Lvdi']]
linksC = [[60,'<NAME>','https://www.linkedin.com/posts/rodneydaut_failuretosuccess-linkedin30daysprint-activity-6886695645353127936-sRQx'],[61,'<NAME>','https://www.linkedin.com/posts/roseyhwang_coaching-business-entrepreneurship-activity-6886698097083224064-Lvdi']]
view = {
"type": "modal",
"title": {
"type": "plain_text",
"text": f"Posts Day {day} {progress}",
"emoji": True
},
"submit": {
"type": "plain_text",
"text": "Submit",
"emoji": True
},
"close": {
"type": "plain_text",
"text": "Cancel",
"emoji": True
},
"blocks": [
{
"type": "divider"
},
{
"type": "context",
"elements": [
{
"type": "image",
"image_url": "https://api.slack.com/img/blocks/bkb_template_images/highpriority.png",
"alt_text": "Pending"
},
{
"type": "mrkdwn",
"text": "*Pending*"
}
]
}
]
},
for i,l in enumerate(linksP):
view['blocks'].append({
"type": "section",
"text": {
"type": "mrkdwn",
"text": f":white_check_mark: <{l[2]}|{l[1]}>"
},
},
{
"type": "section",
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Commented",
"emoji": True
},
"value": "Commented",
"action_id": f"{l[0]}"
},
},
)
view['blocks'].append({
"type": "divider"
},
{
"type": "context",
"elements": [
{
"type": "image",
"image_url": "https://api.slack.com/img/blocks/bkb_template_images/mediumpriority.png",
"alt_text": "Completed"
},
{
"type": "mrkdwn",
"text": "*Completed*"
}
]
})
for i,l in enumerate(linksC):
view['blocks'].append({
"type": "section",
"text": {
"type": "mrkdwn",
"text": f":white_check_mark: <{l[2]}|{l[1]}>"
},
})
return view
print(view(1,'asasa'))
``` |
{
"source": "jjaycaneza/school_test",
"score": 2
} |
#### File: report/courses_report/courses_report.py
```python
from __future__ import unicode_literals
import frappe
@frappe.whitelist()
def get_courses_information():
coursesData = frappe.db.sql("""select * from `tabCourses` """,as_dict=True)
jsonData = []
for i in range(len(coursesData)):
print(coursesData[i])
jsonData.append({
'courseid': coursesData[i].name,
'coursename': coursesData[i].coursename
})
return jsonData
def execute(filters=None):
coursesData = get_courses_information()
columns = [
{
'fieldname': 'courseid',
'label': ('Course ID'),
'fieldtype': 'Data',
'options': ''
},
{
'fieldname': 'coursename',
'label': ('Course Name'),
'fieldtype': 'Data',
'options': ''
}
]
data = coursesData
return columns, data
```
#### File: report/student_information_report_form/student_information_report_form.py
```python
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
@frappe.whitelist()
def get_student_information():
studentData = frappe.db.sql("""select * from `tabStudent Information` """,as_dict=True)
jsonData = []
for i in range(len(studentData)):
print(studentData[i])
jsonData.append({
'studentid': studentData[i].name,
'fullname': studentData[i].fullname,
'age': studentData[i].age
})
return jsonData
def execute(filters=None):
studentData = get_student_information()
columns = [
{
'fieldname': 'studentid',
'label': ('Student ID'),
'fieldtype': 'Data',
'options': ''
},
{
'fieldname': 'fullname',
'label': ('<NAME>'),
'fieldtype': 'Data',
'options': ''
},
{
'fieldname': 'age',
'label': ('Age'),
'fieldtype': 'Int',
'options': ''
}
]
data = studentData
return columns, data
``` |
{
"source": "jjayd/project",
"score": 3
} |
#### File: project/clustering/k_means.py
```python
import numpy as np
from nltk.tokenize import word_tokenize, sent_tokenize
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.manifold import MDS
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.cluster import *
import os
from math import log10
from scipy.sparse import csr_matrix
from gensim.models import KeyedVectors
path = os.path.abspath(os.path.dirname(__file__))
allwords=[]
model = KeyedVectors.load_word2vec_format('../wiki.ko.vec')
def elbow_plot(data, maxK=10, seed_centroids=None):
"""
parameters:
- data: pandas DataFrame (data to be fitted)
- maxK (default = 10): integer (maximum number of clusters with which to run k-means)
- seed_centroids (default = None ): float (initial value of centroids for k-means)
"""
sse = {}
for k in range(1, maxK):
print("k: ", k)
if seed_centroids is not None:
seeds = seed_centroids.head(k)
kmeans = KMeans(n_clusters=k, max_iter=500, n_init=100, random_state=0, init=np.reshape(seeds, (k,1))).fit(data)
data["clusters"] = kmeans.labels_
else:
kmeans = KMeans(n_clusters=k, max_iter=300, n_init=100, random_state=0).fit(data)
data["clusters"] = kmeans.labels_
# Inertia: Sum of distances of samples to their closest cluster center
sse[k] = kmeans.inertia_
plt.figure()
plt.plot(list(sse.keys()), list(sse.values()))
#plt.show()
plt.savefig(os.path.join(path,'elbow.png'))
return
def f(t,d):
return d.count(t)
def tf(t,d):
return 0.5+0.5*f(t,d)/max([f(w,d) for w in d])
def idf(t,D):
numerator = len(D)
denominator = 1+len([True for d in D if t in d])
return log10(numerator/denominator)
def tfidf(t,d,D):
return tf(t,d)*idf(t,D)
def tokenizer(d):
return d.split()
def tfidfScorer(D):
tokenized_D = [tokenizer(d) for d in D]
global allwords
for d in tokenized_D:
for t in d:
allwords.append(t)
allwords=list(set(allwords))
allwords.sort()
# print(allwords)
result = []
for d in tokenized_D:
tmp=[]
for k in allwords:
if k in d:
tmp.append([k,tfidf(k,d,tokenized_D)])
else:
tmp.append([k,0])
result.append(tmp)
# result.append([(t,tfidf(t,d,tokenized_D)) for t in allwords])
return result
#Instead of TfIdf Vectorizer
def applysim(m):
# model = KeyexydVectors.load_word2vec_format('../wiki.ko.vec')
global model
# return m
global allwords
length = len(allwords)
matrix = [[0]*length for i in range(length)]
for i in range(length):
for j in range(i+1,length):
try:
sim = model.wv.similarity(allwords[i],allwords[j])
except:
sim=0
if sim>0.6:
matrix[i][j]=sim
matrix[j][i]=sim
# print(matrix)
# print()
print("finish sim")
for i in range(len(m)):
for j in range(1,length):
for k in range(j+1,length):
if matrix[j][k]!=0:
# print(j,k)
# print(allwords[j],allwords[k],matrix[j][k])
# print(m[i][j],m[i][k])
if m[i][j]==0:
m[i][j]=m[i][k]*matrix[j][k]
elif m[i][k]==0:
m[i][k]=m[i][j]*matrix[j][k]
return m
def weighted_tfidf(data):
wtfidf=[]
f = open('../subtime.txt','r')
time=[]
while True:
line = f.readline()
if not line: break
time.append(int(line)/100000)
for i,doc in enumerate(tfidfScorer(data)):
tmp=[]
tmp.append(time[i])
for j in doc:
tmp.append(j[1])
wtfidf.append(tmp)
wtfidf=applysim(wtfidf)
wtfidf=np.asarray(wtfidf)
return csr_matrix(wtfidf)
# Tokenizer to return stemmed words, we use this
def tokenize_and_stem(text_file):
# declaring stemmer and stopwords language
stemmer = SnowballStemmer("english")
f = open('../outtfdf.txt')
sw=[]
while True:
w=f.readline()
if not w: break
sw.append(w[:-1])
stop_words=sw
# stop_words = set(stopwords.words('english'))
words = word_tokenize(text_file)
filtered = [w for w in words if w not in stop_words]
stems = [stemmer.stem(t) for t in filtered]
# print(stems)
# print("gggggggg\n\n\n")
return stems
def main():
# data = pd.read_csv('../wordslist.txt',names=['text'])
data = pd.read_csv('../keywordslist.txt',names=['text'])
# text data in dataframe and removing stops words
f = open('../outtfdf.txt')
sw=[]
while True:
w = f.readline()
if not w: break
sw.append(w[:-1])
stop_words = sw
data['text'] = data['text'].apply(lambda x: ' '.join([word for word in str(x).split() if word not in stop_words]))
# Using TFIDF vectorizer to convert convert words to Vector Space
tfidf_vectorizer = TfidfVectorizer(max_features=200000,
use_idf=True,
# stop_words='korean',
tokenizer=tokenize_and_stem)
# Fit the vectorizer to text data
tfidf_matrix = tfidf_vectorizer.fit_transform(data['text'])
# tfidf_matrix = weighted_tfidf(data['text'])
rows,cols=tfidf_matrix.nonzero()
# terms = tfidf_vectorizer.get_feature_names()
# print(tfidf_matrix.shape)
# Kmeans++
# km = SpectralClustering(n_clusters=3,affinity="precomputed",n_neighbors=9)
maxK=50
sse={}
for k in range(15,maxK):
km = KMeans(n_clusters=k,init='k-means++',max_iter=50, n_init=1,verbose=0,random_state=3425)
km.fit(tfidf_matrix)
lables = km.labels_
print(k,km.labels_)
sse[k]=km.inertia_
print()
plt.figure()
plt.plot(list(sse.keys()),list(sse.values()))
plt.savefig(os.path.join(path,'kmeans_elbow.png'))
return
km = KMeans(n_clusters=16, init='k-means++', max_iter=10, n_init=1, verbose=0, random_state=3425)
km.fit(tfidf_matrix)
labels = km.labels_
print(labels)
# return
clusters = labels.tolist()
# Calculating the distance measure derived from cosine similarity
distance = 1 - cosine_similarity(tfidf_matrix)
# Dimensionality reduction using Multidimensional scaling (MDS)
mds = MDS(n_components=2, dissimilarity="precomputed", random_state=1)
pos = mds.fit_transform(distance)
xs, ys = pos[:, 0], pos[:, 1]
# Saving cluster visualization after mutidimensional scaling
for x, y, in zip(xs, ys):
plt.scatter(x, y)
plt.title('MDS output of News Headlines')
plt.savefig(os.path.join(path, 'results\MDS.png'))
return
# Creating dataframe containing reduced dimensions, identified labels and text data for plotting KMeans output
df = pd.DataFrame(dict(label=clusters, data=data['text'], x=xs, y=ys))
df.to_csv(os.path.join(path, 'results\kmeans_clustered_DF.txt'), sep=',')
label_color_map = {0: 'red',
1: 'blue',
2: 'green',
3: 'pink',
4: 'purple',
5: 'yellow',
6: 'orange',
7: 'grey',
8: 'black',
9: 'ivory',
10: 'pink',
11: 'black',
12: 'teal',
13: 'navy',
14: 'brown',
15: 'burgundy',
16: 'black'
}
csv = open(os.path.join(path, 'results\kmeans_clustered_output.txt'), 'w')
csv.write('Cluster Headline\n')
fig, ax = plt.subplots(figsize=(9, 9))
for index, row in df.iterrows():
cluster = row['label']
label_color = label_color_map[row['label']]
label_text = row['data']
ax.plot(row['x'], row['y'], marker='o', ms=12,c='white')
row = str(cluster) + ',' + label_text + '\n'
csv.write(row)
# ax.legend(numpoints=1)
for i in range(len(df)):
ax.text(df.loc[i]['x'], df.loc[i]['y'], df.loc[i]['label'], size=8)
plt.title('News Headlines using KMeans Clustering')
plt.savefig(os.path.join(path, 'results\kmeans.png'))
if __name__ == '__main__':
main()
```
#### File: jjayd/project/mytfdf.py
```python
import io
import re
def f(t, d):
# d: document
return d.count(t)
def tf(t,d):
return 0.5 + 0.5*f(t,d)/max([f(w,d) for w in d])
def df(t,D):
numerator = len(D)
denominator = 1+ len([True for d in D if t in d])
return (denominator/numerator)
def tfdf(t,d,D):
return tf(t,d)*df(t,D)
def tokenizer(d):
return d.split()
def tfdfScorer(D):
tokenized_D = [tokenizer(d) for d in D]
result=[]
for d in tokenized_D:
result.append([(t,tfdf(t,d,tokenized_D)) for t in d])
return result
if __name__ == '__main__':
r=[]
day = []
company = []
listwords = []
X = []
for i in range(1,11,1):
tmp = 'text/out'+str(i)+'.txt'
r.append(io.open(tmp,mode='r',encoding='utf-8'))
for i in range(1,11,1):
day.append(r[i-1].readline())
company.append(r[i-1].readline())
line = r[i-1].readline()
listwords.append(line)
corpus = listwords
for i, doc in enumerate(tfdfScorer(corpus)):
# print('===== document[%d] ====='%i)
doc = list(set(doc))
doc.sort(key = lambda element : element[1])
for a in doc:
if a[1]>0.4:
X.append(a[0])
X = list(set(X))
for i in X:
print(i)
```
#### File: ko_self_attention_character_base/self-attention_ko_experiment/eval.py
```python
import tensorflow as tf
import numpy as np
import os
import re
import data_helpers
# Parameters
# ==================================================
# Data loading params
tf.flags.DEFINE_string("test_dir", "data/test.sent_data.txt", "Path of test data")
# Eval Parameters
tf.flags.DEFINE_integer("batch_size", 60, "Batch Size")
tf.flags.DEFINE_string("checkpoint_dir", "", "Checkpoint directory from training run")
tf.flags.DEFINE_boolean("visualize", True, "Save the html visualization code")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
def eval():
with tf.device('/cpu:0'):
x_text, y = data_helpers.load_data_and_labels(FLAGS.test_dir)
# Map data into vocabulary
vocab_path = os.path.join(FLAGS.checkpoint_dir, "..", "vocab")
vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor.restore(vocab_path)
x_eval = np.array(list(vocab_processor.transform(x_text)))
y_eval = np.argmax(y, axis=1)
checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
# Get the placeholders from the graph by name
input_text = graph.get_operation_by_name("input_text").outputs[0]
# input_y = graph.get_operation_by_name("input_y").outputs[0]
A = graph.get_operation_by_name("self-attention/attention").outputs[0]
# Tensors we want to evaluate
predictions = graph.get_operation_by_name("output/predictions").outputs[0]
# Generate batches for one epoch
batches = data_helpers.batch_iter(list(zip(x_eval, x_text)), FLAGS.batch_size, 1, shuffle=False)
if FLAGS.visualize:
f = open('visualize.html', 'w', encoding='utf-8')
f.write('<html style="margin:0;padding:0;"><body style="margin:0;padding:0;">\n')
# Collect the predictions here
all_predictions = []
tokenizer = re.compile(r"[A-Z]{2,}(?![a-z])|[A-Z][a-z]+(?=[A-Z])|[\'\w\-]+", re.UNICODE)
for batch in batches:
x_batch, text_batch = zip(*batch)
batch_predictions, attention = sess.run([predictions, A], {input_text: x_batch})
all_predictions = np.concatenate([all_predictions, batch_predictions])
if FLAGS.visualize:
f.write('<div style="margin:25px;">\n')
for k in range(len(attention)):
f.write('<p style="margin:10px;">\n')
ww = data_helpers.my_func(text_batch[k])
result = np.mean(attention[k], axis=0)[:len(ww)]
for j in range(len(result)):
alpha = "{:.7f}".format(result[j])
alpha = round(float(alpha) * 15, 4)
w = ww[j][0]
try:
f.write('<span style="background-color:rgba(255,0,0,{alpha})">{w}</span>')
except Exception as ex:
print('alpha: {alpha} _____ w:{w} ')
print(ex)
f.write('</p>\n')
f.write('</div>\n')
if FLAGS.visualize:
f.write('</body></html>')
f.close()
correct_predictions = float(sum(all_predictions == y_eval))
print("\nTotal number of test examples: {}".format(len(y_eval)))
print("Accuracy: {:g}\n".format(correct_predictions / float(len(y_eval))))
def main(_):
eval()
if __name__ == "__main__":
tf.app.run()
``` |
{
"source": "jjayp4rk/test-sm",
"score": 2
} |
#### File: src/ml_pipeline/prepare.py
```python
from airflow.models import Variable
from time import gmtime, strftime
import boto3
def start(bucket, keys, file_paths):
timestamp_prefix = strftime("%Y-%m-%d-%H-%M-%S", gmtime())
Variable.set("timestamp", timestamp_prefix)
s3 = boto3.client('s3')
input_key = keys[0]
input_file = file_paths[0]
preproc_key = keys[1]
preproc_file = file_paths[1]
s3.upload_file(Filename=input_file, Bucket=bucket, Key=input_key)
s3.upload_file(Filename=preproc_file, Bucket=bucket, Key=preproc_key)
``` |
{
"source": "jJayyyyyyy/USTC-2018-Smester-1",
"score": 3
} |
#### File: Lab/lab03/dt_id3_test.py
```python
from math import log
def get_cnt_dict(dataset, index=-1):
'''
当 index = -1 时(最后一列), 统计的是每个类别出现的次数
当 index = 其他值时, 统计的是某列特征的各个值出现的次数
'''
cnt_dict = {}
for record in dataset:
key = record[index]
if key not in cnt_dict:
cnt_dict[key] = 0
cnt_dict[key] += 1
# print(cnt_dict)
return cnt_dict
def get_cnt_dict_cont(dataset, index):
tmp_dataset = sorted(dataset, key=lambda record : record[index])
cnt_dict = {}
size = len(tmp_dataset)
if size < 1:
cnt_dict
for record in tmp_dataset:
key = record[index]
y = record[-1]
def get_max_y(y_list):
'''
采用 majority vote 的方法(进行多数投票)
'''
y_cnt_dict = {}
for key in y_list:
if key not in y_cnt_dict:
y_cnt_dict[key] = 0
y_cnt_dict[key] += 1
max_val = 0
max_key = 0
for key, val in y_cnt_dict.items():
if val > max_val:
max_val = val
max_key = key
max_y = max_key
return max_y
def get_new_dataset_cont(dataset, id_x, threshold):
new_dataset_less = []
new_dataset_greater = []
for record in dataset:
if record[id_x] <= threshold:
new_dataset_less.append(record[:id_x] + record[id_x+1:])
else:
new_dataset_greater.append(record[:id_x] + record[id_x+1:])
return new_dataset_less, new_dataset_greater
class Dataset(object):
def __init__(self, option):
if option == 'iris':
self.gen_iris_dataset()
def gen_MLA_dataset(self):
self.size = 5
self.dataset = [[1, 100, 1], [1, 100, 1], [1, 200, -1], [0, 100, -1], [0, 100, -1]]
# self.dataset = [[1, 1, 1], [1, 1, 1], [1, 0, -1], [0, 1, -1], [0, 1, -1]]
def gen_iris_dataset(self):
from sklearn import datasets
iris = datasets.load_iris()
self.x_name_list = iris.feature_names
self.y_name_list = iris.target_names
self.size = len(iris.data)
x_list = iris.data
y_list = iris.target
dataset = []
for i in range(self.size):
x = x_list[i].tolist()
record = [ x for x in iris.data[i] ]
y = int(y_list[i])
record.append(y)
dataset.append(record)
self.dataset = dataset
def get_index_list(self, shuffle=True, seed=None):
import numpy as np
index_list = np.arange(self.size)
if shuffle:
if seed:
np.random.seed(seed)
np.random.shuffle(index_list)
return index_list
def get_train_and_test_data(self, train_ratio=0.8, seed=0):
size = self.size
train_size = int(size * train_ratio)
test_size = size - train_size
index_list = self.get_index_list(seed=seed)
train_index = index_list[:train_size]
test_index = index_list[train_size:]
dataset = self.dataset
train_set = []
for i in train_index:
train_set.append(dataset[i])
test_set = []
for i in test_index:
test_set.append(dataset[i])
self.train_set = train_set
self.test_set = test_set
return train_set, test_set
class DecisionTree(object):
def __init__(self):
pass
def get_HD(self, dataset):
'''
计算经验熵, HD, empirical entrophy
H(D) = - Σ [ pi · lg(pi) ]
pi = Ci / D
'''
total_cnt = len(dataset)
y_cnt_dict = get_cnt_dict(dataset, -1)
HD = 0
for y, cnt in y_cnt_dict.items():
p = cnt / total_cnt
HD -= p * log(p, 2)
return HD
def get_HD_A_cont(self, id_x, dataset):
'''
计算连续属性的经验条件熵
'''
total_cnt = len(dataset)
dataset = sorted(dataset, key=lambda record : record[id_x])
# print(dataset)
min_HD_A_cont = 1e10
best_threshold = 0
y_list = [record[-1] for record in dataset]
# print(y_list)
for i in range(1, len(y_list)):
pre_x = dataset[i-1][id_x]
now_x = dataset[i][id_x]
pre_y = dataset[i-1][-1]
now_y = dataset[i][-1]
if pre_y != now_y:
new_dataset1 = dataset[:i]
HD1 = self.get_HD(new_dataset1)
p1 = i / total_cnt
new_dataset2 = dataset[i:]
HD2 = self.get_HD(new_dataset2)
p2 = 1 - p1
threshold = (pre_x + now_x) / 2
HD_A_cont = p1 * HD1 + p2 * HD2
# print()
# print('pre_y:', pre_y)
# print('now_y:', now_y)
# print('new_dataset1: ', new_dataset1)
# print('new_dataset2: ', new_dataset2)
# print('p1: ', p1)
# print('p2: ', p2)
# print('threshold: ', threshold)
# print('HD1: ', HD1)
# print('HD2: ', HD2)
# print('HD_A_cont: ', HD_A_cont)
if HD_A_cont < min_HD_A_cont:
min_HD_A_cont = HD_A_cont
best_threshold = threshold
# print()
# print('min_HD_A_cont ', min_HD_A_cont)
# print('best_threshold ', best_threshold)
# print('\n')
return min_HD_A_cont, best_threshold
def get_best_feature_index_cont(self, dataset):
HD = self.get_HD(dataset)
feature_num = len(dataset[0]) - 1
max_info_gain = 0
best_feature_index = 0
best_threshold = 0
for feature_index in range(feature_num):
# find min_HD_A_cont
# so that info_gain_cont is max
HD_A_cont, threshold = self.get_HD_A_cont(feature_index, dataset)
info_gain = HD - HD_A_cont
# print(HD_A_cont, threshold, info_gain)
if info_gain > max_info_gain:
max_info_gain = info_gain
best_feature_index = feature_index
best_threshold = threshold
# print('HD:', HD)
# print('HD_A_cont:', HD_A_cont)
# print('info_gain ', info_gain)
# print('max_info_gain ', max_info_gain)
# print('best_threshold ', best_threshold)
# print('\n\n')
return best_feature_index, best_threshold
def create_tree_cont(self, dataset, x_name_list):
feature_name = [name for name in x_name_list]
y_list = [ record[-1] for record in dataset ]
if len(y_list) == 0:
return 0
y0 = y_list[0]
if y_list.count(y0) == len(y_list):
# 只剩一个类别
y = y0
return y
record0 = dataset[0]
if len(record0) == 1:
# 只剩 1 个属性, 无法继续划分, 投票决定叶子结点的类别
y = get_max_y(y_list)
return y
index, threshold = self.get_best_feature_index_cont(dataset)
name = feature_name[index] + '#' + str(threshold)
tree = {name: {}}
# feature_value1 = set([record[index] for record in dataset])
feature_value1 = '<= ' + str(threshold)
feature_value2 = '> ' + str(threshold)
del( feature_name[index] )
sub_feature_name1 = [ name for name in feature_name]
sub_feature_name2 = [ name for name in feature_name]
new_dataset1, new_dataset2 = get_new_dataset_cont(dataset, index, threshold)
sub_tree1 = self.create_tree_cont(new_dataset1, sub_feature_name1)
tree[name][0] = sub_tree1
sub_tree2 = self.create_tree_cont(new_dataset2, sub_feature_name2)
tree[name][1] = sub_tree2
return tree
def get_accuracy(self, tree, test_set):
get_index = {'sepal length (cm)':0, 'sepal width (cm)':1, 'petal length (cm)':2, 'petal width (cm)':3}
cnt_all = len(test_set)
cnt_yes = 0
for ix in range(cnt_all):
sub_tree = tree
test_sample = test_set[ix]
# print(test_sample)
for i in range(10):
key = list(sub_tree.keys())[0]
# print(key)
pos = key.find('#')
name = key[:pos]
threshold = float(key[pos+1:])
# print('name:', name)
# print('threshold:', threshold)
feature_index = get_index[name]
x = test_sample[feature_index]
# print('x:', x)
val_list = sub_tree[key]
if x <= threshold:
val = val_list[0]
else:
val = val_list[1]
if isinstance(val, int):
# print('test_y:', test_sample[-1])
# print('pred_y:', val)
if val == test_sample[-1]:
cnt_yes += 1
# print('index', i)
break
else:
sub_tree = val
print('准确率: %.2f%%' % (100*cnt_yes/cnt_all) )
if __name__ == '__main__':
ds = Dataset(option='iris')
train_set, test_set = ds.get_train_and_test_data(train_ratio=0.8, seed=2)
x_name_list = ds.x_name_list
dt = DecisionTree()
tree = dt.create_tree_cont(train_set, x_name_list)
print('id3 decision tree:\n', tree, '\n\n') # https://jsonformatter.curiousconcept.com/
from treeplotter import *
createPlot(tree)
dt.get_accuracy(tree, test_set)
```
#### File: Lab/lab03/dt_sklearn.py
```python
import numpy as np
class Dataset(object):
def __init__(self, train_x, train_y, test_x, test_y):
self.train_x = train_x
self.train_y = train_y
self.test_x = test_x
self.test_y = test_y
self.pred_y = None
class FeatureEngineer(object):
def __init__(self, iris):
self.x = iris.data[:]
self.y = iris.target[:]
def standardize_with_sklearn(self):
from sklearn.preprocessing import StandardScaler
ss = StandardScaler()
std_x = ss.fit_transform(self.x)
self.std_x = std_x
return self.std_x
def get_index_list(self, shuffle=True,seed=None):
index_list = np.arange(self.x.shape[0])
if shuffle:
if seed:
np.random.seed(seed)
np.random.shuffle(index_list)
# print(index_list)
return index_list
def get_train_and_test_data(self, train_ratio=0.8, shuffle=True, seed=0):
x = self.x
y = self.y
size = self.x.shape[0]
train_size = int(size * train_ratio)
test_size = size - train_size
index_list = self.get_index_list(shuffle=shuffle, seed=seed)
train_index = index_list[:train_size]
test_index = index_list[train_size:]
dataset = Dataset(x[train_index], y[train_index], x[test_index], y[test_index])
return dataset
def test_iris():
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
iris = datasets.load_iris()
fe = FeatureEngineer(iris)
fe.standardize_with_sklearn()
dataset = fe.get_train_and_test_data(train_ratio=0.70)
dt = DecisionTreeClassifier(criterion='entropy')
dt.fit(dataset.train_x, dataset.train_y)
pred_y = dt.predict(dataset.test_x)
dataset.pred_y = pred_y
from sklearn.metrics import accuracy_score
print("准确率: ", accuracy_score(dataset.test_y, dataset.pred_y))
if __name__ == '__main__':
test_iris()
```
#### File: Lab/lab03/gen_iris_dataset.py
```python
class Dataset(object):
def __init__(self, option):
if option == 'iris':
self.gen_iris_dataset()
def gen_MLA_dataset(self):
self.size = 5
self.dataset = [[1, 100, 1], [1, 100, 1], [1, 200, -1], [0, 100, -1], [0, 100, -1]]
# self.dataset = [[1, 1, 1], [1, 1, 1], [1, 0, -1], [0, 1, -1], [0, 1, -1]]
def gen_iris_dataset(self):
from sklearn import datasets
iris = datasets.load_iris()
self.x_name_list = iris.feature_names
self.y_name_list = iris.target_names
self.size = len(iris.data)
x_list = iris.data
y_list = iris.target
dataset = []
for i in range(self.size):
x = x_list[i].tolist()
record = [ x for x in iris.data[i] ]
y = int(y_list[i])
record.append(y)
dataset.append(record)
self.dataset = dataset
def get_index_list(self, shuffle=True, seed=None):
import numpy as np
index_list = np.arange(self.size)
if shuffle:
if seed:
np.random.seed(seed)
np.random.shuffle(index_list)
return index_list
def get_train_and_test_data(self, train_ratio=0.8, seed=0):
size = self.size
train_size = int(size * train_ratio)
test_size = size - train_size
index_list = self.get_index_list(seed=seed)
train_index = index_list[:train_size]
test_index = index_list[train_size:]
dataset = self.dataset
train_set = []
for i in train_index:
train_set.append(dataset[i])
test_set = []
for i in test_index:
test_set.append(dataset[i])
self.train_set = train_set
self.test_set = test_set
return train_set, test_set
if __name__ == '__main__':
ds = Dataset(option='iris')
train_set, test_set = ds.get_train_and_test_data(train_ratio=0.8, seed=2)
``` |
{
"source": "jJayyyyyyy/USTC-2018-Summer-Course",
"score": 3
} |
#### File: DailyTest/PyMySQL/testdb.py
```python
import mysql.connector, logging, datetime
def doSQL(sql, args=''):
res = None
try:
conn = mysql.connector.connect(user='xxx', password='<PASSWORD>', database='zzz')
cursor = conn.cursor()
cursor.execute(sql)
if args == 'retrieve':
res = cursor.fetchall()
conn.commit()
except Exception as e:
logging.exception(e)
finally:
cursor.close()
conn.close()
return res
def initdb():
sql = 'drop table if exists menu'
doSQL(sql)
sql = 'create table menu (menu_id int primary key not null, menu_name varchar(40) not null, menu_date date not null);'
doSQL(sql)
sql = "insert into menu values(1, 'hello', '20180825')"
doSQL(sql)
# CRUD
def create():
print('\n---------before insert:---------')
retrieve()
sql = "insert into menu values (2, 'zzz', '20180827')"
doSQL(sql);
print('\n---------after insert:---------')
retrieve()
print('\n\n')
def retrieve():
sql = 'select * from menu'
res = doSQL(sql, 'retrieve')
for record in res:
for column in record:
print(str(column), end='\t\t')
print()
def update():
print('\n---------before update:---------')
retrieve()
sql = "update menu set menu_date = '20180829' where menu_id = 2"
doSQL(sql)
print('\n---------after update:---------')
retrieve()
print('\n\n')
def delete():
print('\n---------before delete:---------')
retrieve()
sql = "delete from menu where menu_id = 2"
doSQL(sql)
print('\n---------after delete:---------')
retrieve()
print('\n\n')
if __name__ == '__main__':
initdb()
create()
update()
delete()
``` |
{
"source": "jjbandit/game",
"score": 3
} |
#### File: game/scripts/gdb_pretty_printers.py
```python
import gdb;
from pprint import pprint;
class v3_printer:
def __init__(self, val):
self.val = val
def to_string(self):
return self.val['E']
class v4_printer:
def __init__(self, val):
self.val = val
def to_string(self):
return self.val['E']
class counted_string_printer:
def __init__(self, val):
self.val = val
def to_string(self):
return self.val['Start'].string('ascii', 'strict', int(self.val['Count']))
class xml_property_printer:
def __init__(self, val):
self.val = val
def to_string(self):
name = counted_string_printer(self.val['Name']).to_string()
value = counted_string_printer(self.val['Value']).to_string()
if name and value:
return "%s=\"%s\"" % (name, value)
elif name:
return "%s" % (name)
else:
return "Error printing xml_property, Name field not set, Value set to : \"%s\"" % (value)
class xml_token_printer:
def __init__(self, val):
self.val = val
def to_string(self):
type_string = str(self.val['Type']).split('_')[1]
value_string = xml_property_printer(self.val['Property']).to_string()
if (type_string == "Boolean" or
type_string == "Float" or
type_string == "Int" or
type_string == "Property"):
return "%s" % (value_string)
elif type_string == "Open":
return "<%s>" % (value_string)
elif type_string == "Close":
return "</%s>" % (value_string)
elif type_string == "Unknown":
return "Zero Token"
else:
return "Invalid type string encountered in pretty printer {}".format(type_string)
def bonsai_pretty_printers():
pp = gdb.printing.RegexpCollectionPrettyPrinter("bonsai_printers")
pp.add_printer('counted_string' , '^counted_string$' , counted_string_printer)
pp.add_printer('xml_token_printer' , '^xml_token$' , xml_token_printer)
pp.add_printer('xml_property_printer' , '^xml_property$' , xml_property_printer)
pp.add_printer('v4' , '^v4$' , v4_printer)
pp.add_printer('v3' , '^v3$' , v3_printer)
return pp
gdb.printing.register_pretty_printer(
gdb.current_objfile(),
bonsai_pretty_printers())
``` |
{
"source": "JJBarata/jogos",
"score": 4
} |
#### File: JJBarata/jogos/main.py
```python
import forca
import adivinhacao
def escolhe_jogo():
print('*********************************')
print('****** Escolha o seu jogo ******!')
print('*********************************')
print('Escolha qual jogo deseja jogar: (1) Forca - (2) Adivinhação')
jogo = int(input('Digite o número do jogo: '))
if jogo == 1:
print('Jogando Forca')
forca.jogar()
elif jogo == 2:
print('Jogando Adivinhação')
adivinhacao.jogar()
if __name__ == '__main__':
escolhe_jogo()
``` |
{
"source": "JJBarata/py-social-network",
"score": 2
} |
#### File: py-social-network/dwitter/views.py
```python
from django.shortcuts import render
from .models import Profile
def dashboard(request):
return render(request, 'base.html')
def profile_list(request):
profiles = Profile.objects.exclude(user=request.user)
return render(request, 'dwitter/profile_list.html', {'profiles': profiles})
def profile(request, pk):
profile = Profile.objects.get(pk=pk)
return render(request, 'dwitter/profile.html', {'profile': profile})
``` |
{
"source": "jjberg83/oving_10",
"score": 3
} |
#### File: jjberg83/oving_10/test_quiz.py
```python
import unittest
from quiz import *
class TestMultipleChoice(unittest.TestCase):
def test_answer_check(self):
liste_med_alle_instansene = read_the_document()
# Question 1
self.assertFalse(liste_med_alle_instansene[0].answer_check(1))
self.assertFalse(liste_med_alle_instansene[0].answer_check(2))
self.assertFalse(liste_med_alle_instansene[0].answer_check(4))
self.assertTrue(liste_med_alle_instansene[0].answer_check(3))
# Question 2
self.assertFalse(liste_med_alle_instansene[1].answer_check(1))
self.assertFalse(liste_med_alle_instansene[1].answer_check(3))
self.assertFalse(liste_med_alle_instansene[1].answer_check(4))
self.assertTrue(liste_med_alle_instansene[1].answer_check(2))
# Question 3
self.assertFalse(liste_med_alle_instansene[2].answer_check(2))
self.assertFalse(liste_med_alle_instansene[2].answer_check(3))
self.assertFalse(liste_med_alle_instansene[2].answer_check(4))
self.assertTrue(liste_med_alle_instansene[2].answer_check(1))
# Question 4
self.assertFalse(liste_med_alle_instansene[3].answer_check(1))
self.assertFalse(liste_med_alle_instansene[3].answer_check(2))
self.assertFalse(liste_med_alle_instansene[3].answer_check(4))
self.assertTrue(liste_med_alle_instansene[3].answer_check(3))
# Question 5
self.assertFalse(liste_med_alle_instansene[4].answer_check(1))
self.assertFalse(liste_med_alle_instansene[4].answer_check(3))
self.assertFalse(liste_med_alle_instansene[4].answer_check(4))
self.assertTrue(liste_med_alle_instansene[4].answer_check(2))
# Question 6
self.assertFalse(liste_med_alle_instansene[5].answer_check(1))
self.assertFalse(liste_med_alle_instansene[5].answer_check(3))
self.assertFalse(liste_med_alle_instansene[5].answer_check(4))
self.assertFalse(liste_med_alle_instansene[5].answer_check(5))
self.assertTrue(liste_med_alle_instansene[5].answer_check(2))
# Question 7
self.assertFalse(liste_med_alle_instansene[6].answer_check(1))
self.assertFalse(liste_med_alle_instansene[6].answer_check(2))
self.assertFalse(liste_med_alle_instansene[6].answer_check(3))
self.assertFalse(liste_med_alle_instansene[6].answer_check(5))
self.assertTrue(liste_med_alle_instansene[6].answer_check(4))
# Question 8
self.assertFalse(liste_med_alle_instansene[7].answer_check(1))
self.assertTrue(liste_med_alle_instansene[7].answer_check(2))
def test_korrekt_svar_tekst(self):
liste_med_alle_instansene = read_the_document()
# Question 1
self.assertFalse(liste_med_alle_instansene[0].korrekt_svar_tekst()=='RAM')
self.assertFalse(liste_med_alle_instansene[0].korrekt_svar_tekst()=='Harddisk')
self.assertFalse(liste_med_alle_instansene[0].korrekt_svar_tekst()=='Sekundærlager')
self.assertTrue(liste_med_alle_instansene[0].korrekt_svar_tekst()=='CPU')
# Question 2
self.assertFalse(liste_med_alle_instansene[1].korrekt_svar_tekst()=='bit')
self.assertFalse(liste_med_alle_instansene[1].korrekt_svar_tekst()=='bryter')
self.assertFalse(liste_med_alle_instansene[1].korrekt_svar_tekst()=='transistor')
self.assertTrue(liste_med_alle_instansene[1].korrekt_svar_tekst()=='byte')
# Question 3
self.assertFalse(liste_med_alle_instansene[2].korrekt_svar_tekst()=='instruksjoner')
self.assertFalse(liste_med_alle_instansene[2].korrekt_svar_tekst()=='variabler')
self.assertFalse(liste_med_alle_instansene[2].korrekt_svar_tekst()=='CPU-er')
self.assertTrue(liste_med_alle_instansene[2].korrekt_svar_tekst()=='bits')
# Question 4
self.assertFalse(liste_med_alle_instansene[3].korrekt_svar_tekst()=='heksadesimal')
self.assertFalse(liste_med_alle_instansene[3].korrekt_svar_tekst()=='oktal')
self.assertFalse(liste_med_alle_instansene[3].korrekt_svar_tekst()=='desimal')
self.assertTrue(liste_med_alle_instansene[3].korrekt_svar_tekst()=='binær')
# Question 5
self.assertFalse(liste_med_alle_instansene[4].korrekt_svar_tekst()=='38')
self.assertFalse(liste_med_alle_instansene[4].korrekt_svar_tekst()=='173')
self.assertFalse(liste_med_alle_instansene[4].korrekt_svar_tekst()=='1011001')
self.assertTrue(liste_med_alle_instansene[4].korrekt_svar_tekst()=='89')
# Question 6
self.assertFalse(liste_med_alle_instansene[5].korrekt_svar_tekst()=='1011')
self.assertFalse(liste_med_alle_instansene[5].korrekt_svar_tekst()=='11011')
self.assertFalse(liste_med_alle_instansene[5].korrekt_svar_tekst()=='101111')
self.assertFalse(liste_med_alle_instansene[5].korrekt_svar_tekst()=='111011')
self.assertTrue(liste_med_alle_instansene[5].korrekt_svar_tekst()=='10111')
# Question 7
self.assertFalse(liste_med_alle_instansene[6].korrekt_svar_tekst()=='assembly')
self.assertFalse(liste_med_alle_instansene[6].korrekt_svar_tekst()=='c')
self.assertFalse(liste_med_alle_instansene[6].korrekt_svar_tekst()=='java')
self.assertFalse(liste_med_alle_instansene[6].korrekt_svar_tekst()=='python')
self.assertTrue(liste_med_alle_instansene[6].korrekt_svar_tekst()=='maskinkode')
# Question 8
self.assertFalse(liste_med_alle_instansene[7].korrekt_svar_tekst()=='kompilert')
self.assertTrue(liste_med_alle_instansene[7].korrekt_svar_tekst()=='tolket')
if __name__ == '__main__':
unittest.main()
# def read_the_document():
# question_list = list()
# with open('sporsmaalsfil.txt', 'r', encoding='UTF8') as fila:
# for linje in fila:
# linje_liste = linje.replace(':', ',').strip('\n').split(',')
# sporsmaal = linje_liste.pop(0)
# rett_svar = int(linje_liste.pop(0))
# alternativer = [x.strip(' []') for x in linje_liste]
# ny_instans = MultipleChoice(sporsmaal, rett_svar, alternativer)
# question_list.append(ny_instans)
# return question_list
# class MultipleChoice:
# #Constructor
# def __init__(self, question, correct_answer, alternatives):
# self.question = question
# self.correct_answer = correct_answer
# self.alternatives = alternatives
# def answer_check(self, entered_integer):
# if entered_integer == self.correct_answer+1:
# return True
# else:
# return False
# def korrekt_svar_tekst(self):
# return self.alternatives[self.correct_answer]
# def __str__(self):
# return f'{self.question}\n' + '\n'.join(f'{indeks} - {element}' for indeks, element in enumerate(self.alternatives,1))+ '\n'
# if __name__ == '__main__':
# liste_med_alle_instansene = read_the_document()
# sum_spiller1 = 0
# sum_spiller2 = 0
# for sporsmaal in liste_med_alle_instansene:
# print(sporsmaal)
# svar_spiller1 = int(input("Velg et svaralternativ for spiller 1: "))
# svar_spiller2 = int(input("Velg et svaralternativ for spiller 2: "))
# print(f'Korrekt svar: {sporsmaal.korrekt_svar_tekst()}')
# if sporsmaal.answer_check(svar_spiller1):
# sum_spiller1 += 1
# print('Spiller 1: Korrekt')
# else:
# print('Spiller 1: Feil')
# if sporsmaal.answer_check(svar_spiller2):
# sum_spiller2 += 1
# print('Spiller 2: Korrekt')
# else:
# print('Spiller 2: Feil')
# print(f'Sum spiller 1: {sum_spiller1}')
# print(f'Sum spiller 2: {sum_spiller2}')
``` |
{
"source": "jjberg83/python_eksperimenter",
"score": 3
} |
#### File: jjberg83/python_eksperimenter/lambda.py
```python
def originalFunc(tall):
return lambda a : a * tall
myDoubler = originalFunc(2)
print(myDoubler(10))
```
#### File: jjberg83/python_eksperimenter/testing_av_not.py
```python
def not_does_this(x):
return not x
my_str = 'halo'
print(not_does_this(1))
print(not_does_this(len(my_str) % 2))
``` |
{
"source": "jjbernard/BeagleAD",
"score": 3
} |
#### File: BeagleAD/tests/test_loader.py
```python
import unittest
from dataload import createTSDataLoader
class DataLoadTests(unittest.TestCase):
def setUp(self):
self.train_size = 0.8
self.bs = 64
self.w = 20
self.p_w = 2
self.data_file = 'data.csv'
self.train_dl, self.valid_dl = createTSDataLoader(filename='data.csv',
train_size=self.train_size,
bs = self.bs,
w = self.w,
p_w = self.pw)
def tearDown(self):
pass
def test_load():
pass
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jjbernard/Pytorch-FastAI-Tests",
"score": 3
} |
#### File: Pytorch-FastAI-Tests/60MinBlitz/classifier.py
```python
import torch
import torchvision
import torchvision.transforms as transforms
import numpy as np
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def main(PATH, classes, device):
print("Creating transforms for image dataset")
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
print("Creating dataset object")
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
print("Creating dataloader with a batch size of 4")
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4, shuffle=True, num_workers=2)
testloader = torch.utils.data.DataLoader(testset, batch_size=4, shuffle=False, num_workers=2)
print(f"Classes are {classes}")
# dataiter = iter(trainloader)
# images, labels = dataiter.next()
#
# imshow(torchvision.utils.make_grid(images))
# print(' '.join('%5s' % classes[labels[j]] for j in range(4)))
print("Create network")
net = Net()
net.to(device)
print("Define loss (cross entropy) and optimizer")
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.004, momentum=0.9)
print("iterate over number of epochs and dataset")
for epoch in range(2):
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
inputs, labels = data[0].to(device), data[1].to(device)
optimizer.zero_grad()
output = net(inputs)
loss = criterion(output, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % 2000 == 1999:
print(f"[{epoch + 1, i + 1}] loss: {running_loss / 2000}")
running_loss = 0.0
print("finished training")
print("save the model")
torch.save(net.state_dict(), PATH)
print()
return testloader
def imshow(img):
img = img / 2 + 0.5 # denormalize...
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
def inference(testloader, PATH, classes):
correct = 0
total = 0
net = Net()
net.load_state_dict(torch.load(PATH))
with torch.no_grad():
for data in testloader:
images, labels = data
outputs = net(images)
#print(f"output size: {outputs.size()}")
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print(f"Accuracy of the network on the 10000 test images: {100 * correct / total:.2f}%")
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
with torch.no_grad():
for data in testloader:
images, labels = data
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
c = (predicted == labels).squeeze()
for i in range(4):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
for i in range(10):
print(f"Accuracy of {classes[i]}: {100 * class_correct[i] / class_total[i]} %")
if __name__ == '__main__':
PATH = './cifar_net.pth'
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print()
print(f"Device is: {device}")
tloader = main(PATH, classes, device)
print()
print("Inference time...")
inference(tloader, PATH, classes)
```
#### File: Pytorch-FastAI-Tests/60MinBlitz/getting_started.py
```python
import torch
import numpy as np
def main():
print("Create a 5x3 tensor with data from the memory space that it has been created from")
x = torch.empty(5, 3)
print(x)
print("Create a 5x3 tensor with random data")
x = torch.rand(5, 3)
print(x)
print("Create a zero filled matrix of data type long")
x = torch.zeros(5, 3, dtype=torch.long)
print(x)
print("Create a tensor directly from data")
x = torch.tensor([5.5, 3])
print(x)
print("Create a tensor based on an existing tensor")
print("Create a tensor filled with ones")
x = torch.ones(5, 3, dtype=torch.double)
print(x)
print("Create a tensor that has the same shape as before but filled with random data and adjusted data type")
x = torch.randn_like(x, dtype=torch.float)
print(x)
print("Show the size")
print(x.size())
print()
print("---------------------------------------------------------------------------------------------")
print()
print("OPERATIONS")
print("Create a random tensor of the same size as before and add it to our previous tensor")
y = torch.rand(5, 3)
print(y)
print()
print(x + y)
print("another way to add tensors")
print(torch.add(x, y))
print("providing an output tensor as argument")
result = torch.empty(5, 3)
print("non initialized empty tensor")
print(result)
torch.add(x, y, out=result)
print("same tensor now used as the output for the addition operation")
print(result)
print("in place operation with .add_()")
y.add_(x)
print(y)
print("using standard numpy indexing: x[:, 1] -> all rows and second column")
print(x[:, 1])
print("To resize, use .view and -1 will imply that torch will find automatically the other dimensions")
x = torch.randn(4, 4)
print(x)
y = x.view(16)
print(y)
z = x.view(-1, 8)
print("Printing the sizes of the tensors with the different resizes")
print(x.size(), y.size(), z.size())
print("For a one element tensor, use .item to get the item as a Python object")
x = torch.randn(1)
print(x)
print(x.item())
print()
print("---------------------------------------------------------------------------------------------")
print()
print("numpy bridge")
a = torch.ones(5)
print(a)
b = a.numpy()
print(b)
print("If we do operations on the tensor, it will change the value of the numpy array automatically")
a.add_(1)
print(a)
print(b)
print("we can create tensors from numpy arrays")
a = np.ones(5)
b = torch.from_numpy(a)
np.add(a, 1, out=a)
print(a)
print(b)
if __name__ == '__main__':
main()
``` |
{
"source": "jjbeto/pycalc-micro",
"score": 2
} |
#### File: pycalc-micro/evaluate/main.py
```python
import re
from flask import Flask, request
from evaluate.eval_operation import evaluate
from shared.error_handler import initialize_error_handlers
from shared.error_model import TextPlainRequestError, RequestBodyMandatoryError
_REGEX_EXTRACT_BODY = re.compile(r'^b\'(.*)?\'$')
def create_app(test_config=None):
"""
Create an app by initializing components.
"""
application = Flask(__name__)
application.config.from_mapping(
# a default secret that should be overridden by instance config
SECRET_KEY="master-key",
)
if test_config:
application.config.update(test_config)
initialize_error_handlers(application)
@application.route('/', methods=['POST'])
def post_evaluate():
if request.content_type != 'text/plain':
raise TextPlainRequestError()
body = extract_body(request.get_data())
evaluation = evaluate(body)
return str(evaluation), 200, {'Content-Type': 'text/plain; charset=utf-8'}
return application
def extract_body(data):
raw_body = str(data)
groups = _REGEX_EXTRACT_BODY.match(raw_body).groups()
if len(groups):
return groups[0]
else:
raise RequestBodyMandatoryError()
if __name__ == '__main__':
app = create_app()
app.run(host='0.0.0.0')
```
#### File: pycalc-micro/shared/communicator.py
```python
import msgpack
import requests
from shared.error_model import InvalidOperationError, MsgpackInvalidError
from shared.obj_factory import check_type
_LOCATIONS = {
'evaluate': 'http://localhost:5000/',
'add': 'http://localhost:5001/',
'sub': 'http://localhost:5002/',
'mul': 'http://localhost:5003/',
'div': 'http://localhost:5004/',
'pow': 'http://localhost:5005/',
}
def remote_call(json):
"""
Execute a remote call to evaluate an operation_type
"""
operation_type = json.get('t')
if not operation_type or operation_type.lower() not in _LOCATIONS.keys():
raise InvalidOperationError()
response = requests.post(_LOCATIONS[operation_type.lower()],
data=msgpack.packb(json, raw=False),
headers={'Content-Type': 'application/msgpack'})
return msgpack.unpack(response.content, raw=False)
def get_evaluation(json):
"""
Get the result operation for one side
:param json: the object to evaluate
:param side: the side to be evaluate - 'r' is right and 'l' is left
"""
if isinstance(json, dict):
return json if check_type(json, 'num') else remote_call(json)
else:
raise InvalidOperationError(f'Invalid node: {json}')
def extract_body(data):
"""
Extracts the message from data
"""
try:
return msgpack.unpackb(data, raw=False)
except ValueError:
raise MsgpackInvalidError()
```
#### File: pycalc-micro/shared/error_model.py
```python
class ApiError(Exception):
"""
Base exception with a status code and message
"""
def __init__(self, status_code, message):
self.__status_code = status_code
self.__message = message
@property
def status_code(self):
return str(self.__status_code)
@property
def message(self):
return str(self.__message)
class TextPlainRequestError(ApiError):
"""
406 Invalid request type, only text/plain is accepted
"""
def __init__(self):
super().__init__(406, 'Invalid request type, only text/plain is accepted')
class MsgpackRequestError(ApiError):
"""
406 Invalid request type, only application/msgpack is accepted
"""
def __init__(self):
super().__init__(406, 'Invalid request type, only application/msgpack is accepted')
class MsgpackInvalidError(ApiError):
"""
400 BBad Request: not possible to decode message
"""
def __init__(self):
super().__init__(400, 'Bad Request: not possible to decode message')
class BadRequestError(ApiError):
"""
400 BBad Request: not possible to decode message
"""
def __init__(self):
super().__init__(400, 'Bad Request: message is not correct')
class RequestBodyMandatoryError(ApiError):
"""
400 Bad Request - request body is mandatory
"""
def __init__(self):
super().__init__(406, 'Invalid request type, only text/plain is accepted')
class InvalidOperationError(ApiError):
"""
500 Internal Error - operation is invalid
"""
def __init__(self, msg=None):
super().__init__(406, f'Internal Error: {msg}' if msg else 'Internal Error: operation is invalid')
```
#### File: tests/unit/test_div.py
```python
import msgpack
import pytest
from div.div_operation import div
from div.main import create_app
@pytest.fixture
def app():
app = create_app({
'TESTING': True,
})
yield app
@pytest.fixture
def client(app):
return app.test_client()
def test_post_div(client):
json = {'t': 'div',
'v': {
'l': {'t': 'num', 'v': 2},
'r': {'t': 'num', 'v': 2}
}}
response = client.post("/", data=msgpack.packb(json), content_type='application/msgpack')
assert response.status_code == 200
result = msgpack.unpackb(response.data, raw=False)
assert result.get('v') == 1
def test_div():
json = {'t': 'div',
'v': {
'l': {'t': 'num', 'v': 2},
'r': {'t': 'num', 'v': 2}
}}
response = div(json)
assert response.get('t') == 'num'
assert response.get('v') == 1
def test_add_div_levels():
json = {'t': 'div',
'v':
{
'l': {'t': 'num', 'v': 2},
'r':
{
't': 'div',
'v':
{
'l': {'t': 'num', 'v': 4},
'r': {'t': 'num', 'v': 2}
}
}
}}
response = div(json)
assert response.get('t') == 'num'
assert response.get('v') == 1
``` |
{
"source": "jjbiggins/FuncNotify",
"score": 3
} |
#### File: FuncNotify/FuncNotify/__main__.py
```python
import argparse
import subprocess
import ast
import re
from FuncNotify.api import time_func
NO_ARGS_ERROR = "No arguuments remianing to be exectued, confirm " \
"the desired execution is in the proper format"
COLLECTION_ARGS_ERROR = "Collection Argument not closed properly, " \
"check for hanging identifiers"
class ParseKwargs(argparse.Action):
"""Parses for the format `arg=val` parses for either strings or bools exclusively,
no ints. If it paressess a val that doesn't have a `=`, it's added to the unparse.
Wrapped to nobody can access this as it's exclusive to parsing. Used by argparse
`__call__()` parses args into their proper formats, whether it's a kwarg, collection or a command"""
translation_dict = {"true" : True,
"True" : True,
"false": False,
"False": False}
""" Translates string bools to bools"""
class CollectionParse:
"""Class used to parse string representations of collections
Does so safety with ast.literal_eval and ensures that all collections
are cloesd"""
CloseList = {'[': ']',
'{': '}',
'(': ')',
'"': '"',
"'": "'"}
PairTypes = {*CloseList.keys(), *CloseList.values()}
def __init__(self, key: str):
self.collection_str_list = []
self.remaining_pair_type = []
self.key=key
def build(self, value: str, PairTypes=PairTypes, CloseList=CloseList):
"""Ensures that all characters that start something terminate,
Lowkey a compiler check, ensures the collection is written properly
and all opens are closed.
Args:
value (str): new string input
"""
for pair_type in filter(lambda x: x in PairTypes, value):
if self.remaining_pair_type and pair_type == CloseList.get(self.remaining_pair_type[-1]):
self.remaining_pair_type.pop()
else:
self.remaining_pair_type.append(pair_type)
self.collection_str_list.append(value)
return not self.remaining_pair_type
def eval_(self):
"""Evaluates the collecte list and returns the collection
Raises:
Exception: Depending on problems with the string, \
like compile errors
"""
return ast.literal_eval("".join(self.collection_str_list))
def __bool__(self):
"""Saw in video, faster than bool(list) apparently"""
return not not self.remaining_pair_type
def add_kwarg(self, namespace, key, value):
"""Adds args to args.kwargs so it can be passed into the decorator"""
if key in getattr(namespace, self.dest): # If key in list
if isinstance(getattr(namespace, self.dest)[key], list):
getattr(namespace, self.dest)[key].append(value) # Adds to list
else:
getattr(namespace, self.dest)[key] = [getattr(namespace, self.dest)[key], value] # creates list
else:
getattr(namespace, self.dest)[key] = value
def add_collection_parse(self, namespace, CollectParseObj, value):
"""Adds value to CollectParseObj, if it is a proper collection,
it will evaluate and add it to the kwargs
"""
if CollectParseObj.build(value):
self.add_kwarg(namespace, CollectParseObj.key, CollectParseObj.eval_())
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, dict())
setattr(namespace, "_unrecognized_args", list())
CollectParseObj=ParseKwargs.CollectionParse("")
for value in values:
if not CollectParseObj and re.match('^[a-zA-Z0-9_]+(=[^\n\t\r]+)$', value):
key, value = value.split('=')
if value[0] in ["[", "{", "("]:
# Occurs only if the first char is a collection object
CollectParseObj=ParseKwargs.CollectionParse(key)
self.add_collection_parse(namespace, CollectParseObj, value)
else:
value = ParseKwargs.translation_dict.get(value, value) # Ensures bools are properly parsed
self.add_kwarg(namespace, key, value)
elif CollectParseObj: # Count repeats of the first guy and subtract count of the other guy
self.add_collection_parse(namespace, CollectParseObj, value)
else:
getattr(namespace, "_unrecognized_args").append(value)
if CollectParseObj:
raise argparse.ArgumentError(COLLECTION_ARGS_ERROR)
def main():
"""Parses the commandline input for the executable command and kwargs
separated by the indicatiorr `-k` or `--kwargs` and `arg=val` syntax.
One can pass collections as strings as kwargs but must wrap the object in
double quotes.
Uses `subprocess.run()` to execute the command input.
Raises:
argparse.ArgumentError: If either a collection isn't closed \
or no command is left after parsing, ArgumentError will be raised
"""
parser = argparse.ArgumentParser(
description="FuncNotify - Be notified securely when your function/script completes. " \
"Store all your variables in a `.env` file and let us do the work for you " \
"To input arguments, use --kwargs followed by `arg=value`")
parser.add_argument('-k', '--kwargs', nargs='*', action=ParseKwargs,
help="Input as many kwargs as needed to specify exactly how you want to be notified " \
"Make sure all inputs are in the format `var`=`value` which all values " \
"will be interpreted as strings. To input sets, dicts or lists, it's best wrap the " \
"entire collection in double quotes and strings within in single quotes")
args, remaining_args = parser.parse_known_args()
kwargs = {**args.kwargs} if args.kwargs else {}
# Runs the remaining_args as if they were ran in the terminal
def sub_run(): return subprocess.run(remaining_args, check=True)
sub_run.__name__ = " ".join(remaining_args) # Setting changing name for clarity
# Faster than using decorator, also using wacky lambda to raise Exceptions properly
time_func(sub_run, **kwargs)() if remaining_args else (lambda: (_ for _ in ()).throw(
argparse.ArgumentError(NO_ARGS_ERROR)))()
if __name__ == "__main__":
"""When `FuncNotify` is called from the CLI, we execute main()"""
main()
```
#### File: FuncNotify/FuncNotify/NotifyMethods.py
```python
import os
import time
import traceback
import inspect
import logging
import logging.handlers
import socket
import collections
from abc import ABCMeta, abstractmethod
DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
class FactoryRegistry(ABCMeta):
_REGISTRY = {}
def __new__(cls, clsname, bases, attrs):
newclass = super().__new__(cls, clsname, bases, attrs)
if not inspect.isabstract(newclass): # Removes abstract methods from registry
cls._REGISTRY[newclass.__name__.replace("Method", "")] = newclass
return newclass
@classmethod
def get_cls_registry(cls)->dict:
"""Registers every class created in a dictionary, creating automated
factory methoods
Returns:
dict: Takes a string of type (Class name without method) and returns NotifyObj
"""
return dict(cls._REGISTRY)
class NotifyMethods(metaclass=FactoryRegistry):
"""Abstract class for the methods of notifying the user, \
handles the messages and logger for error checking
"""
# Tracking and testing, intended to in case one needs to check functions ran
_buffer = collections.deque([], maxlen=5) # Tracks last five for error checking,
__slots__ = ("__environ_dict", "_error")
logger=None
log_method_dict={}
_messageDict = {"Start": ["Function: `{0}` called...",
"Machine Name: {machine}",
"Start Time: {1}"],
"End": ["Function: `{0}` completed",
"Machine Name: {machine}",
"Finish Time: {1}",
"Total Time: {2:.2f}"],
"Error": ["Function: `{0}` failed due to a {1}",
"Exception Reason: {2}"
"Fail Time Stamp: {3}",
"Machine Name: {machine}",
"Fail Traceback: {4}"],
"Custom": ["{0}"],
}
def __init__(self, environ: dict=None, mute: bool=False, use_log: bool=False, *args, **kwargs):
self.__environ_dict = environ if isinstance(environ, dict) else {}
NotifyMethods.set_mute(mute)
try:
NotifyMethods.logger_init(self.__environ_dict, self.__environ_dict, use_log, *args, **kwargs)
# Why do I have to declare the __environ_dict twice? I have no idea TODO someone smarter help
self._set_credentials(*args, **kwargs)
self._error=None # Always default to notify user
except Exception as ex:
NotifyMethods.log(status="ERROR", METHOD=self.__class__.__name__,
message="[CREDENTIALS] Connection to setting up notifications \
interupted, double check env variables")
NotifyMethods.log(status="ERROR", METHOD=self.__class__.__name__,
message=f"[CREDENTIALS] {ex}")
self._error=CredentialError(self, ex) # If error with credentials
NotifyMethods._add_buffer(self)
@property
def environ_dict(self):
"""Wanted to hide environment variables but still be able to test
Returns:
bool: Whether environ_dict contains anything"""
return not not self.__environ_dict
def _type_or_env(self, val, env_variable: str, type_: type=str)->str:
"""Checks if inputted value is of the type `type_`, default to string, otherwise \
searches environment for that variable. If not found, doesn't notify ussers
Args:
val (any): Input, should always be a string but if not will search environment
type_ (type): the type too coompare to
env_variable (str): environment variable name
Returns:
type_: important information used by apis
Raises:
KeyError: Raises if environment variable not found in name, this will set `self._error` \
to that exception so it can be accessed
"""
return val if isinstance(val, type_) else self.__environ_dict[env_variable]
@classmethod
def _add_buffer(cls, NotifyObject):
"""Adds each object to a pseudo cyclical buffer that holds 5 objects that
can be checked when you grab the buffer
"""
if isinstance(NotifyObject._error, Exception):
NotifyObject=NotifyObject._error
cls._buffer.append(NotifyObject)
@classmethod
def get_buffer(cls):
"""Buffer holding previous NotifyMethods to be able to interact with
Returns:
deque: Holds last 5 objects
"""
return cls._buffer
@classmethod
def set_mute(cls, mute: bool=False):
"""Mutes the send of messages for the entire class
Args:
mute (bool, optional): whether to enable/disable messages for a period of time. Defaults to False.
"""
cls._mute = mute if isinstance(mute, bool) else False
@classmethod
def logger_init(cls, environ: dict, log: bool=False, buffer: int=65536, logger_path: str=None, *args, **kwargs):
"""Initializes a logger to tract messages sent and errors (not errors outside of FuncNotify) that arise from sending the message.
Args:
environ (dict): current environment variables
log (bool, optional): Whether to log the files]. Defaults to False.
buffer (int, optional): Size of each log file. Defaults to 65536 (2**16).
logger_path (str, optional): path to logger. Defaults to None.
"""
if (environ.get("LOG") or log or logger_path) and cls.logger is None: # Uses existing logger if it existss
if logger_path:
path=logger_path
else:
path = environ.get("LOGGER_PATH", "")
path = path if path else os.getcwd() # If env variable but not defined is empty sets path to cwd
if not os.path.isdir(os.path.join(path, "logs")):
os.mkdir("logs")
import __main__ # Necessary for naming, setting up print formatting
logger_name = __main__.__file__.split('/')[-1].split('.')[0]
cls.logger = logging.getLogger(logger_name)
cls.logger.setLevel(logging.DEBUG)
logger_console_format = "[%(levelname)s]: %(message)s"
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.WARNING)
console_handler.setFormatter(logging.Formatter(logger_console_format))
cls.logger.addHandler(console_handler)
logger_file_format = "[%(levelname)s] - %(asctime)s - %(name)s - : %(message)s in %(pathname)s:%(lineno)d"
file_handler = logging.handlers.RotatingFileHandler(filename=f"{path}/logs/{logger_name}.log",
maxBytes=int(environ.get("FILE_SIZE", buffer)),
backupCount=1)
file_handler.setLevel(logging.WARNING)
file_handler.setFormatter(logging.Formatter(logger_file_format))
cls.logger.addHandler(file_handler)
# Dictionary houses all logging methods
logger_strings = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL", "OFF"]
logger_levels = range(logging.DEBUG, logging.CRITICAL + 11, 10)
logger_funcs = [cls.logger.debug, cls.logger.info, cls.logger.warning, cls.logger.error, cls.logger.critical]
cls.log_method_dict = dict(zip(logger_strings, logger_funcs))
cls.log_level_dict = dict(zip(logger_strings, logger_levels))
elif not (environ.get("LOG") or log or logger_path) and environ:
cls.logger_off()
# Logger suite, functions that control logging functinos that run
@classmethod
def set_logger(cls, level: int=None, level_string: str=None):
"""Determines whether the loggger should pay atention to. The default \
level is `Warning` and calling this function will set it to `Debug`.
Args:
level (int, optional): level to set log to level. Mututally exclusive with level_string.
Defaults to logging.DEBUG.
level_string (str, optional): str representation to set log level to. \
Must be all capitalized letters. Mututally exclusive with level.
Defaults to "DEBUG".
"""
if cls.logger is None:
NotifyMethods.logger_init(log=True)
if level is not None and level_string is not None:
raise ValueError("`level` and `level_string` are mutually exclusive variables")
else:
lvl = max(level if isinstance(level, int) else -1, cls.log_level_dict.get(level_string, -1))
lvl = lvl if lvl != -1 else logging.DEBUG
cls.logger.setLevel(lvl)
@classmethod
def logger_off(cls):
"""Turn off logger by setting the logger value so high nothing triggers it
"""
cls.set_logger(logging.CRITICAL+1)
@classmethod
def _format_log(cls, status: str, METHOD: str, message: str, *args, **kwargs):
ret_messsage = f"[{METHOD=}] Message = {message}"
return ret_messsage, {'exc_info': status>logging.INFO}
@classmethod
def log(cls, status: str="DEBUG", *args, **kwargs):
"""Logs the current event, one can pass multiple argugments too
Args:
status (str, optional): logging level. Defaults to "DEBUG".
"""
if cls.logger:
log_message, kwdict = cls._format_log(cls.log_level_dict.get(status, logging.ERROR), *args, **kwargs)
cls.log_method_dict.get(status,
lambda *args, **kwargs: [
cls.logger.error(*args, **kwargs),
cls.logger.error("Logger method not found, using [ERROR]"),]
)(log_message, **kwdict)
@abstractmethod
def _set_credentials(self, *args, **kwargs)->None:
"""Sets up object with environment variables
"""
pass
@abstractmethod
def _send_message(self, message: str)->None:
"""Interacts with the respective platforms apis, the prior 3 all call this functioon to send the message
"""
pass
# Suite of funcitons sends and formats messages for each different method. These guys help format each message for each of the instances
def send_start_MSG(self, func):
self._send_MSG_base(formatList=[func.__name__, time.strftime(DATE_FORMAT, time.localtime())], machine=socket.gethostname(),
type_="Start")
def send_end_MSG(self, func, diff: float):
self._send_MSG_base(formatList=[func.__name__, time.strftime(DATE_FORMAT, time.localtime()), diff], machine=socket.gethostname(),
type_="End")
def send_error_MSG(self, func, ex: Exception):
self._send_MSG_base(formatList=[func.__name__, type(ex), str(ex), time.strftime(DATE_FORMAT, time.localtime()), traceback.format_exc()], machine=socket.gethostname(),
type_="Error")
def send_custom_MSG(self, MSG: str):
"""Send custom messages, kind of an easter egg and will require a bit of custom code ot set up
Args:
MSG (str): Any valid string
"""
self._send_MSG_base(formatList=[MSG], type_="Custom")
def _format_message(self, formatList: list, type_: str="Error", *args, **kwargs):
return '\n'.join(NotifyMethods._messageDict[type_]).format(*formatList, *args, **kwargs) + self._addon(type_=type_)
def _addon(self, type_: str="Error")->str:
"""Pseudo-abstsract method, sometimess will add emojis and other fun messages
that are platform specific. Not necessary to implement but you can for personalization!
"""
return ""
def _send_MSG_base(self, *args, **kwargs)->None:
"""All functions begin by calling send_MSG_base and depending on the status of that functioon, it'll be sent or
an error will be logged if the initial credentials aren't valid
Args:
MSG (str): Current MSG to be sent.
"""
MSG = self._format_message(*args, **kwargs)
if not NotifyMethods._mute:
if self._error:
NotifyMethods.log(status="ERROR", METHOD=self.__class__.__name__,
message=f"[ERROR] {self._error} \n[Message] {MSG}")
return
try:
self._send_message(MSG)
NotifyMethods.log(status="DEBUG", METHOD=self.__class__.__name__,
message=MSG)
except Exception as ex:
self._error=MessageSendError(self, ex)
NotifyMethods.log(status="ERROR", METHOD=self.__class__.__name__,
message=f"[Error] {self._error} \n[Message] {MSG}")
else:
NotifyMethods.log(status="INFO", METHOD=self.__class__.__name__,
message=f"[Message] {MSG} \n[Muted] True")
class CredentialError(Exception):
"""Errrors occuring while setting up the credentials"""
__slots__=("NotifyObject", "error")
def __init__(self, NotifyObject: NotifyMethods, error: Exception):
self.NotifyObject=NotifyObject
"""NotifyMethods object where something went wrong"""
self.error=error
"""The Error with the NotifyMethods object"""
super().__init__(self.__str__())
def __str__(self):
return f"The following exception occurred with the credentials of using {self.NotifyObject.__class__.__name__} \n" \
f"[Error] {self.error} \n" \
f"[Fix] Check all credentials are strings and are accurate, check the type hints, and env variables"
class MessageSendError(Exception):
"""Errors that occur when sending the message and are caught then"""
__slots__=("NotifyObject", "error")
def __init__(self, NotifyObject: NotifyMethods, error: Exception):
self.NotifyObject=NotifyObject
""""NotifyMethods object where something went wrong"""
self.error=error
"""The Error with the NotifyMethods object"""
super().__init__(self.__str__())
def __str__(self):
return f"The following exception occurred while sending the messagge with the method {self.NotifyObject.__class__.__name__} \n"\
f"[Error] {self.error} \n" \
f"[Fix] This is an error with the respective platform's API, ensure the credentials for are valid and you have access," \
f"check env variables, and ensure that all the types are correct. This is likely an issue with your implementation."
``` |
{
"source": "jjbiggins/stumpy",
"score": 2
} |
#### File: stumpy/tests/test_aamp_mmotifs.py
```python
import numpy as np
import numpy.testing as npt
import naive
import pytest
from stumpy.aamp_mmotifs import aamp_mmotifs
from stumpy import config
test_data = [
np.array(
[
[5.2, 0.1, 3.5, 3.4, 7.1, 9.8, 3.7, 5.0, 2.1, 4.3, 7.5, 6.8, 8.0, 8.1, 1.2],
[
7.3,
3.2,
5.0,
9.1,
8.2,
7.3,
4.8,
8.2,
10.0,
0.0,
4.1,
3.2,
2.3,
0.1,
1.4,
],
[6.2, 7.6, 7.6, 8.4, 1.1, 5.9, 9.2, 8.5, 9.3, 4.6, 3.5, 0.0, 3.1, 5.3, 0.9],
[
0.1,
1.3,
3.0,
2.1,
6.2,
1.3,
9.5,
10.0,
1.8,
2.0,
2.1,
5.2,
1.3,
0.5,
4.3,
],
]
)
]
def test_aamp_mmotifs_default_parameters():
motif_distances_ref = np.array(
[[0.0, 0.06315749, 0.25275899, 0.34087884, 0.3452315]]
)
motif_indices_ref = np.array([[19, 77, 63, 52, 71]])
motif_subspaces_ref = [np.array([2])]
motif_mdls_ref = [
np.array([411.60964047, 423.69925001, 449.11032383, 476.95855027, 506.62406252])
]
np.random.seed(0)
T = np.random.rand(500).reshape(5, 100)
m = 5
excl_zone = int(np.ceil(m / config.STUMPY_EXCL_ZONE_DENOM))
P, I = naive.maamp(T, m, excl_zone)
(
motif_distances_cmp,
motif_indices_cmp,
motif_subspaces_cmp,
motif_mdls_cmp,
) = aamp_mmotifs(T, P, I)
npt.assert_array_almost_equal(motif_distances_ref, motif_distances_cmp)
npt.assert_array_almost_equal(motif_indices_ref, motif_indices_cmp)
npt.assert_array_almost_equal(motif_subspaces_ref, motif_subspaces_cmp)
npt.assert_array_almost_equal(motif_mdls_ref, motif_mdls_cmp)
@pytest.mark.parametrize("T", test_data)
def test_aamp_mmotifs_max_distance(T):
motif_distances_ref = np.array(
[[0.0, 1.41421356, 4.46430286, 6.85346628, 8.207923, 8.50529247]]
)
motif_indices_ref = np.array([[2, 9, 0, 11, 7, 5]])
motif_subspaces_ref = [np.array([3])]
motif_mdls_ref = [np.array([244.0, 260.67970001, 279.86313714, 281.35940001])]
m = 4
excl_zone = int(np.ceil(m / config.STUMPY_EXCL_ZONE_DENOM))
P, I = naive.maamp(T, m, excl_zone)
(
motif_distances_cmp,
motif_indices_cmp,
motif_subspaces_cmp,
motif_mdls_cmp,
) = aamp_mmotifs(T, P, I, max_distance=np.inf)
npt.assert_array_almost_equal(motif_distances_ref, motif_distances_cmp)
npt.assert_array_almost_equal(motif_indices_ref, motif_indices_cmp)
npt.assert_array_almost_equal(motif_subspaces_ref, motif_subspaces_cmp)
npt.assert_array_almost_equal(motif_mdls_ref, motif_mdls_cmp)
@pytest.mark.parametrize("T", test_data)
def test_aamp_mmotifs_max_distance_max_matches_none(T):
motif_distances_ref = np.array(
[[0.0, 1.41421356, 4.46430286, 6.85346628, 8.207923, 8.50529247]]
)
motif_indices_ref = np.array([[2, 9, 0, 11, 7, 5]])
motif_subspaces_ref = [np.array([3])]
motif_mdls_ref = [np.array([244.0, 260.67970001, 279.86313714, 281.35940001])]
m = 4
excl_zone = int(np.ceil(m / config.STUMPY_EXCL_ZONE_DENOM))
P, I = naive.maamp(T, m, excl_zone)
(
motif_distances_cmp,
motif_indices_cmp,
motif_subspaces_cmp,
motif_mdls_cmp,
) = aamp_mmotifs(T, P, I, max_distance=np.inf, max_matches=None)
npt.assert_array_almost_equal(motif_distances_ref, motif_distances_cmp)
npt.assert_array_almost_equal(motif_indices_ref, motif_indices_cmp)
npt.assert_array_almost_equal(motif_subspaces_ref, motif_subspaces_cmp)
npt.assert_array_almost_equal(motif_mdls_ref, motif_mdls_cmp)
@pytest.mark.parametrize("T", test_data)
def test_aamp_mmotifs_max_motifs_1_max_matches_2_k_1(T):
motif_distances_ref = np.array([[0.0, 2.87778559]])
motif_indices_ref = np.array([[0, 5]])
motif_subspaces_ref = [np.array([2, 1])]
motif_mdls_ref = [np.array([244.0, 260.67970001, 279.86313714, 281.35940001])]
m = 4
excl_zone = int(np.ceil(m / config.STUMPY_EXCL_ZONE_DENOM))
P, I = naive.maamp(T, m, excl_zone)
(
motif_distances_cmp,
motif_indices_cmp,
motif_subspaces_cmp,
motif_mdls_cmp,
) = aamp_mmotifs(T, P, I, max_distance=np.inf, max_matches=2, k=1)
npt.assert_array_almost_equal(motif_distances_ref, motif_distances_cmp)
npt.assert_array_almost_equal(motif_indices_ref, motif_indices_cmp)
npt.assert_array_almost_equal(motif_subspaces_ref, motif_subspaces_cmp)
npt.assert_array_almost_equal(motif_mdls_ref, motif_mdls_cmp)
@pytest.mark.parametrize("T", test_data)
def test_aamp_mmotifs_more_motif_pairs_cutoffs_3(T):
motif_distances_ref = np.array([[0.0, 1.41421356], [0.0, 2.06639783]])
motif_indices_ref = np.array([[2, 9], [0, 5]])
motif_subspaces_ref = [np.array([3]), np.array([2])]
motif_mdls_ref = [
np.array([244.0, 260.67970001, 279.86313714, 281.35940001]),
np.array([254.33985, 260.67970001, 279.86313714, 291.20703549]),
]
m = 4
excl_zone = int(np.ceil(m / config.STUMPY_EXCL_ZONE_DENOM))
P, I = naive.maamp(T, m, excl_zone)
(
motif_distances_cmp,
motif_indices_cmp,
motif_subspaces_cmp,
motif_mdls_cmp,
) = aamp_mmotifs(
T, P, I, max_distance=np.inf, cutoffs=3, max_matches=2, max_motifs=10
)
npt.assert_array_almost_equal(motif_distances_ref, motif_distances_cmp)
npt.assert_array_almost_equal(motif_indices_ref, motif_indices_cmp)
npt.assert_array_almost_equal(motif_subspaces_ref, motif_subspaces_cmp)
npt.assert_array_almost_equal(motif_mdls_ref, motif_mdls_cmp)
@pytest.mark.parametrize("T", test_data)
def test_aamp_mmotifs_more_motif_pairs_cutoffs_as_list(T):
motif_distances_ref = np.array([[0.0, 1.41421356]])
motif_indices_ref = np.array([[2, 9]])
motif_subspaces_ref = [np.array([3])]
motif_mdls_ref = [np.array([244.0, 260.67970001, 279.86313714, 281.35940001])]
m = 4
cutoffs = [2, 3, 4, 5]
excl_zone = int(np.ceil(m / config.STUMPY_EXCL_ZONE_DENOM))
P, I = naive.maamp(T, m, excl_zone)
(
motif_distances_cmp,
motif_indices_cmp,
motif_subspaces_cmp,
motif_mdls_cmp,
) = aamp_mmotifs(
T, P, I, max_distance=np.inf, cutoffs=cutoffs, max_matches=2, max_motifs=10
)
npt.assert_array_almost_equal(motif_distances_ref, motif_distances_cmp)
npt.assert_array_almost_equal(motif_indices_ref, motif_indices_cmp)
npt.assert_array_almost_equal(motif_subspaces_ref, motif_subspaces_cmp)
npt.assert_array_almost_equal(motif_mdls_ref, motif_mdls_cmp)
@pytest.mark.parametrize("T", test_data)
def test_aamp_mmotifs_two_motif_pairs_max_motifs_2(T):
motif_distances_ref = np.array([[0.0, 1.41421356], [0.0, 2.06639783]])
motif_indices_ref = np.array([[2, 9], [0, 5]])
motif_subspaces_ref = [np.array([3]), np.array([2])]
motif_mdls_ref = [
np.array([244.0, 260.67970001, 279.86313714, 281.35940001]),
np.array([254.33985, 260.67970001, 279.86313714, 291.20703549]),
]
m = 4
excl_zone = int(np.ceil(m / config.STUMPY_EXCL_ZONE_DENOM))
P, I = naive.maamp(T, m, excl_zone)
(
motif_distances_cmp,
motif_indices_cmp,
motif_subspaces_cmp,
motif_mdls_cmp,
) = aamp_mmotifs(
T,
P,
I,
max_distance=np.inf,
cutoffs=np.inf,
max_matches=2,
max_motifs=2,
)
npt.assert_array_almost_equal(motif_distances_ref, motif_distances_cmp)
npt.assert_array_almost_equal(motif_indices_ref, motif_indices_cmp)
npt.assert_array_almost_equal(motif_subspaces_ref, motif_subspaces_cmp)
npt.assert_array_almost_equal(motif_mdls_ref, motif_mdls_cmp)
```
#### File: stumpy/tests/test_core.py
```python
import numpy as np
import numpy.testing as npt
import pandas as pd
from scipy.spatial.distance import cdist
from stumpy import core, config
import pytest
import os
import math
import naive
def naive_rolling_window_dot_product(Q, T):
window = len(Q)
result = np.zeros(len(T) - window + 1)
for i in range(len(result)):
result[i] = np.dot(T[i : i + window], Q)
return result
def naive_compute_mean_std(T, m):
n = T.shape[0]
M_T = np.zeros(n - m + 1, dtype=float)
Σ_T = np.zeros(n - m + 1, dtype=float)
for i in range(n - m + 1):
Q = T[i : i + m].copy()
Q[np.isinf(Q)] = np.nan
M_T[i] = np.mean(Q)
Σ_T[i] = np.nanstd(Q)
M_T[np.isnan(M_T)] = np.inf
Σ_T[np.isnan(Σ_T)] = 0
return M_T, Σ_T
def naive_compute_mean_std_multidimensional(T, m):
n = T.shape[1]
nrows, ncols = T.shape
cumsum_T = np.empty((nrows, ncols + 1))
np.cumsum(T, axis=1, out=cumsum_T[:, 1:]) # store output in cumsum_T[1:]
cumsum_T[:, 0] = 0
cumsum_T_squared = np.empty((nrows, ncols + 1))
np.cumsum(np.square(T), axis=1, out=cumsum_T_squared[:, 1:])
cumsum_T_squared[:, 0] = 0
subseq_sum_T = cumsum_T[:, m:] - cumsum_T[:, : n - m + 1]
subseq_sum_T_squared = cumsum_T_squared[:, m:] - cumsum_T_squared[:, : n - m + 1]
M_T = subseq_sum_T / m
Σ_T = np.abs((subseq_sum_T_squared / m) - np.square(M_T))
Σ_T = np.sqrt(Σ_T)
return M_T, Σ_T
def naive_idx_to_mp(I, T, m, normalize=True):
I = I.astype(np.int64)
T = T.copy()
T_isfinite = np.isfinite(T)
T_subseqs_isfinite = np.all(core.rolling_window(T_isfinite, m), axis=1)
T[~T_isfinite] = 0.0
T_subseqs = core.rolling_window(T, m)
nn_subseqs = T_subseqs[I]
if normalize:
P = naive.distance(
naive.z_norm(T_subseqs, axis=1), naive.z_norm(nn_subseqs, axis=1), axis=1
)
else:
P = naive.distance(T_subseqs, nn_subseqs, axis=1)
P[~T_subseqs_isfinite] = np.inf
P[I < 0] = np.inf
return P
def split(node, out):
mid = len(node) // 2
out.append(node[mid])
return node[:mid], node[mid + 1 :]
def naive_bsf_indices(n):
a = np.arange(n)
nodes = [a.tolist()]
out = []
while nodes:
tmp = []
for node in nodes:
for n in split(node, out):
if n:
tmp.append(n)
nodes = tmp
return np.array(out)
test_data = [
(np.array([-1, 1, 2], dtype=np.float64), np.array(range(5), dtype=np.float64)),
(
np.array([9, 8100, -60], dtype=np.float64),
np.array([584, -11, 23, 79, 1001], dtype=np.float64),
),
(np.random.uniform(-1000, 1000, [8]), np.random.uniform(-1000, 1000, [64])),
]
n = [9, 10, 16]
def test_check_bad_dtype():
for dtype in [np.int32, np.int64, np.float32]:
with pytest.raises(TypeError):
core.check_dtype(np.random.rand(10).astype(dtype))
def test_check_dtype_float64():
assert core.check_dtype(np.random.rand(10))
def test_get_max_window_size():
for n in range(3, 10):
ref_max_m = (
int(
n
- math.floor(
(n + (config.STUMPY_EXCL_ZONE_DENOM - 1))
// (config.STUMPY_EXCL_ZONE_DENOM + 1)
)
)
- 1
)
cmp_max_m = core.get_max_window_size(n)
assert ref_max_m == cmp_max_m
def test_check_window_size():
for m in range(-1, 3):
with pytest.raises(ValueError):
core.check_window_size(m)
def test_check_max_window_size():
for m in range(4, 7):
with pytest.raises(ValueError):
core.check_window_size(m, max_size=3)
@pytest.mark.parametrize("Q, T", test_data)
def test_njit_sliding_dot_product(Q, T):
ref_mp = naive_rolling_window_dot_product(Q, T)
comp_mp = core._sliding_dot_product(Q, T)
npt.assert_almost_equal(ref_mp, comp_mp)
@pytest.mark.parametrize("Q, T", test_data)
def test_sliding_dot_product(Q, T):
ref_mp = naive_rolling_window_dot_product(Q, T)
comp_mp = core.sliding_dot_product(Q, T)
npt.assert_almost_equal(ref_mp, comp_mp)
def test_welford_nanvar():
T = np.random.rand(64)
m = 10
ref_var = np.nanvar(T)
comp_var = core.welford_nanvar(T)
npt.assert_almost_equal(ref_var, comp_var)
ref_var = np.nanvar(core.rolling_window(T, m), axis=1)
comp_var = core.welford_nanvar(T, m)
npt.assert_almost_equal(ref_var, comp_var)
def test_welford_nanvar_catastrophic_cancellation():
T = np.array([4.0, 7.0, 13.0, 16.0, 10.0]) + 10**8
m = 4
ref_var = np.nanvar(core.rolling_window(T, m), axis=1)
comp_var = core.welford_nanvar(T, m)
npt.assert_almost_equal(ref_var, comp_var)
def test_welford_nanvar_nan():
T = np.random.rand(64)
m = 10
T[1] = np.nan
T[10] = np.nan
T[13:18] = np.nan
ref_var = np.nanvar(T)
comp_var = core.welford_nanvar(T)
npt.assert_almost_equal(ref_var, comp_var)
ref_var = np.nanvar(core.rolling_window(T, m), axis=1)
comp_var = core.welford_nanvar(T, m)
npt.assert_almost_equal(ref_var, comp_var)
def test_welford_nanstd():
T = np.random.rand(64)
m = 10
ref_var = np.nanstd(T)
comp_var = core.welford_nanstd(T)
npt.assert_almost_equal(ref_var, comp_var)
ref_var = np.nanstd(core.rolling_window(T, m), axis=1)
comp_var = core.welford_nanstd(T, m)
npt.assert_almost_equal(ref_var, comp_var)
def test_rolling_nanmin_1d():
T = np.random.rand(64)
for m in range(1, 12):
ref_min = np.nanmin(T)
comp_min = core._rolling_nanmin_1d(T)
npt.assert_almost_equal(ref_min, comp_min)
ref_min = np.nanmin(T)
comp_min = core._rolling_nanmin_1d(T)
npt.assert_almost_equal(ref_min, comp_min)
def test_rolling_nanmin():
T = np.random.rand(64)
for m in range(1, 12):
ref_min = np.nanmin(core.rolling_window(T, m), axis=1)
comp_min = core.rolling_nanmin(T, m)
npt.assert_almost_equal(ref_min, comp_min)
ref_min = np.nanmin(core.rolling_window(T, m), axis=1)
comp_min = core.rolling_nanmin(T, m)
npt.assert_almost_equal(ref_min, comp_min)
def test_rolling_nanmax_1d():
T = np.random.rand(64)
for m in range(1, 12):
ref_max = np.nanmax(T)
comp_max = core._rolling_nanmax_1d(T)
npt.assert_almost_equal(ref_max, comp_max)
ref_max = np.nanmax(T)
comp_max = core._rolling_nanmax_1d(T)
npt.assert_almost_equal(ref_max, comp_max)
def test_rolling_nanmax():
T = np.random.rand(64)
for m in range(1, 12):
ref_max = np.nanmax(core.rolling_window(T, m), axis=1)
comp_max = core.rolling_nanmax(T, m)
npt.assert_almost_equal(ref_max, comp_max)
ref_max = np.nanmax(core.rolling_window(T, m), axis=1)
comp_max = core.rolling_nanmax(T, m)
npt.assert_almost_equal(ref_max, comp_max)
@pytest.mark.parametrize("Q, T", test_data)
def test_compute_mean_std(Q, T):
m = Q.shape[0]
ref_μ_Q, ref_σ_Q = naive_compute_mean_std(Q, m)
ref_M_T, ref_Σ_T = naive_compute_mean_std(T, m)
comp_μ_Q, comp_σ_Q = core.compute_mean_std(Q, m)
comp_M_T, comp_Σ_T = core.compute_mean_std(T, m)
npt.assert_almost_equal(ref_μ_Q, comp_μ_Q)
npt.assert_almost_equal(ref_σ_Q, comp_σ_Q)
npt.assert_almost_equal(ref_M_T, comp_M_T)
npt.assert_almost_equal(ref_Σ_T, comp_Σ_T)
@pytest.mark.parametrize("Q, T", test_data)
def test_compute_mean_std_chunked(Q, T):
m = Q.shape[0]
config.STUMPY_MEAN_STD_NUM_CHUNKS = 2
ref_μ_Q, ref_σ_Q = naive_compute_mean_std(Q, m)
ref_M_T, ref_Σ_T = naive_compute_mean_std(T, m)
comp_μ_Q, comp_σ_Q = core.compute_mean_std(Q, m)
comp_M_T, comp_Σ_T = core.compute_mean_std(T, m)
config.STUMPY_MEAN_STD_NUM_CHUNKS = 1
npt.assert_almost_equal(ref_μ_Q, comp_μ_Q)
npt.assert_almost_equal(ref_σ_Q, comp_σ_Q)
npt.assert_almost_equal(ref_M_T, comp_M_T)
npt.assert_almost_equal(ref_Σ_T, comp_Σ_T)
@pytest.mark.parametrize("Q, T", test_data)
def test_compute_mean_std_chunked_many(Q, T):
m = Q.shape[0]
config.STUMPY_MEAN_STD_NUM_CHUNKS = 128
ref_μ_Q, ref_σ_Q = naive_compute_mean_std(Q, m)
ref_M_T, ref_Σ_T = naive_compute_mean_std(T, m)
comp_μ_Q, comp_σ_Q = core.compute_mean_std(Q, m)
comp_M_T, comp_Σ_T = core.compute_mean_std(T, m)
config.STUMPY_MEAN_STD_NUM_CHUNKS = 1
npt.assert_almost_equal(ref_μ_Q, comp_μ_Q)
npt.assert_almost_equal(ref_σ_Q, comp_σ_Q)
npt.assert_almost_equal(ref_M_T, comp_M_T)
npt.assert_almost_equal(ref_Σ_T, comp_Σ_T)
@pytest.mark.parametrize("Q, T", test_data)
def test_compute_mean_std_multidimensional(Q, T):
m = Q.shape[0]
Q = np.array([Q, np.random.uniform(-1000, 1000, [Q.shape[0]])])
T = np.array([T, T, np.random.uniform(-1000, 1000, [T.shape[0]])])
ref_μ_Q, ref_σ_Q = naive_compute_mean_std_multidimensional(Q, m)
ref_M_T, ref_Σ_T = naive_compute_mean_std_multidimensional(T, m)
comp_μ_Q, comp_σ_Q = core.compute_mean_std(Q, m)
comp_M_T, comp_Σ_T = core.compute_mean_std(T, m)
npt.assert_almost_equal(ref_μ_Q, comp_μ_Q)
npt.assert_almost_equal(ref_σ_Q, comp_σ_Q)
npt.assert_almost_equal(ref_M_T, comp_M_T)
npt.assert_almost_equal(ref_Σ_T, comp_Σ_T)
@pytest.mark.parametrize("Q, T", test_data)
def test_compute_mean_std_multidimensional_chunked(Q, T):
m = Q.shape[0]
Q = np.array([Q, np.random.uniform(-1000, 1000, [Q.shape[0]])])
T = np.array([T, T, np.random.uniform(-1000, 1000, [T.shape[0]])])
config.STUMPY_MEAN_STD_NUM_CHUNKS = 2
ref_μ_Q, ref_σ_Q = naive_compute_mean_std_multidimensional(Q, m)
ref_M_T, ref_Σ_T = naive_compute_mean_std_multidimensional(T, m)
comp_μ_Q, comp_σ_Q = core.compute_mean_std(Q, m)
comp_M_T, comp_Σ_T = core.compute_mean_std(T, m)
config.STUMPY_MEAN_STD_NUM_CHUNKS = 1
npt.assert_almost_equal(ref_μ_Q, comp_μ_Q)
npt.assert_almost_equal(ref_σ_Q, comp_σ_Q)
npt.assert_almost_equal(ref_M_T, comp_M_T)
npt.assert_almost_equal(ref_Σ_T, comp_Σ_T)
@pytest.mark.parametrize("Q, T", test_data)
def test_compute_mean_std_multidimensional_chunked_many(Q, T):
m = Q.shape[0]
Q = np.array([Q, np.random.uniform(-1000, 1000, [Q.shape[0]])])
T = np.array([T, T, np.random.uniform(-1000, 1000, [T.shape[0]])])
config.STUMPY_MEAN_STD_NUM_CHUNKS = 128
ref_μ_Q, ref_σ_Q = naive_compute_mean_std_multidimensional(Q, m)
ref_M_T, ref_Σ_T = naive_compute_mean_std_multidimensional(T, m)
comp_μ_Q, comp_σ_Q = core.compute_mean_std(Q, m)
comp_M_T, comp_Σ_T = core.compute_mean_std(T, m)
config.STUMPY_MEAN_STD_NUM_CHUNKS = 1
npt.assert_almost_equal(ref_μ_Q, comp_μ_Q)
npt.assert_almost_equal(ref_σ_Q, comp_σ_Q)
npt.assert_almost_equal(ref_M_T, comp_M_T)
npt.assert_almost_equal(ref_Σ_T, comp_Σ_T)
@pytest.mark.parametrize("Q, T", test_data)
def test_calculate_squared_distance_profile(Q, T):
m = Q.shape[0]
ref = (
np.linalg.norm(
core.z_norm(core.rolling_window(T, m), 1) - core.z_norm(Q), axis=1
)
** 2
)
QT = core.sliding_dot_product(Q, T)
μ_Q, σ_Q = core.compute_mean_std(Q, m)
M_T, Σ_T = core.compute_mean_std(T, m)
comp = core._calculate_squared_distance_profile(
m, QT, μ_Q.item(0), σ_Q.item(0), M_T, Σ_T
)
npt.assert_almost_equal(ref, comp)
@pytest.mark.parametrize("Q, T", test_data)
def test_calculate_distance_profile(Q, T):
m = Q.shape[0]
ref = np.linalg.norm(
core.z_norm(core.rolling_window(T, m), 1) - core.z_norm(Q), axis=1
)
QT = core.sliding_dot_product(Q, T)
μ_Q, σ_Q = core.compute_mean_std(Q, m)
M_T, Σ_T = core.compute_mean_std(T, m)
comp = core.calculate_distance_profile(m, QT, μ_Q.item(0), σ_Q.item(0), M_T, Σ_T)
npt.assert_almost_equal(ref, comp)
@pytest.mark.parametrize("Q, T", test_data)
def test_mueen_calculate_distance_profile(Q, T):
m = Q.shape[0]
ref = np.linalg.norm(
core.z_norm(core.rolling_window(T, m), 1) - core.z_norm(Q), axis=1
)
comp = core.mueen_calculate_distance_profile(Q, T)
npt.assert_almost_equal(ref, comp)
@pytest.mark.parametrize("Q, T", test_data)
def test_mass(Q, T):
Q = Q.copy()
T = T.copy()
m = Q.shape[0]
ref = np.linalg.norm(
core.z_norm(core.rolling_window(T, m), 1) - core.z_norm(Q), axis=1
)
comp = core.mass(Q, T)
npt.assert_almost_equal(ref, comp)
@pytest.mark.parametrize("Q, T", test_data)
def test_mass_Q_nan(Q, T):
Q = Q.copy()
Q[1] = np.nan
T = T.copy()
m = Q.shape[0]
ref = np.linalg.norm(
core.z_norm(core.rolling_window(T, m), 1) - core.z_norm(Q), axis=1
)
ref[np.isnan(ref)] = np.inf
comp = core.mass(Q, T)
npt.assert_almost_equal(ref, comp)
@pytest.mark.parametrize("Q, T", test_data)
def test_mass_Q_inf(Q, T):
Q = Q.copy()
Q[1] = np.inf
T = T.copy()
m = Q.shape[0]
ref = np.linalg.norm(
core.z_norm(core.rolling_window(T, m), 1) - core.z_norm(Q), axis=1
)
ref[np.isnan(ref)] = np.inf
comp = core.mass(Q, T)
npt.assert_almost_equal(ref, comp)
T[1] = 1e10
@pytest.mark.parametrize("Q, T", test_data)
def test_mass_T_nan(Q, T):
Q = Q.copy()
T = T.copy()
T[1] = np.nan
m = Q.shape[0]
ref = np.linalg.norm(
core.z_norm(core.rolling_window(T, m), 1) - core.z_norm(Q), axis=1
)
ref[np.isnan(ref)] = np.inf
comp = core.mass(Q, T)
npt.assert_almost_equal(ref, comp)
@pytest.mark.parametrize("Q, T", test_data)
def test_mass_T_inf(Q, T):
Q = Q.copy()
T = T.copy()
T[1] = np.inf
m = Q.shape[0]
ref = np.linalg.norm(
core.z_norm(core.rolling_window(T, m), 1) - core.z_norm(Q), axis=1
)
ref[np.isnan(ref)] = np.inf
comp = core.mass(Q, T)
npt.assert_almost_equal(ref, comp)
T[1] = 1e10
@pytest.mark.parametrize("Q, T", test_data)
def test_p_norm_distance_profile(Q, T):
Q = Q.copy()
T = T.copy()
m = Q.shape[0]
for p in [1.0, 1.5, 2.0]:
ref = cdist(
core.rolling_window(Q, m),
core.rolling_window(T, m),
metric="minkowski",
p=p,
).flatten()
ref = np.power(ref, p)
cmp = core._p_norm_distance_profile(Q, T, p)
npt.assert_almost_equal(ref, cmp)
@pytest.mark.parametrize("Q, T", test_data)
def test_mass_asbolute(Q, T):
Q = Q.copy()
T = T.copy()
m = Q.shape[0]
for p in [1.0, 2.0, 3.0]:
ref = np.linalg.norm(core.rolling_window(T, m) - Q, axis=1, ord=p)
comp = core.mass_absolute(Q, T, p=p)
npt.assert_almost_equal(ref, comp)
@pytest.mark.parametrize("Q, T", test_data)
def test_mass_absolute_Q_nan(Q, T):
Q = Q.copy()
Q[1] = np.nan
T = T.copy()
m = Q.shape[0]
ref = np.linalg.norm(core.rolling_window(T, m) - Q, axis=1)
ref[np.isnan(ref)] = np.inf
comp = core.mass_absolute(Q, T)
npt.assert_almost_equal(ref, comp)
@pytest.mark.parametrize("Q, T", test_data)
def test_mass_absolute_Q_inf(Q, T):
Q = Q.copy()
Q[1] = np.inf
T = T.copy()
m = Q.shape[0]
ref = np.linalg.norm(core.rolling_window(T, m) - Q, axis=1)
ref[np.isnan(ref)] = np.inf
comp = core.mass_absolute(Q, T)
npt.assert_almost_equal(ref, comp)
@pytest.mark.parametrize("Q, T", test_data)
def test_mass_absolute_T_nan(Q, T):
Q = Q.copy()
T = T.copy()
T[1] = np.nan
m = Q.shape[0]
ref = np.linalg.norm(core.rolling_window(T, m) - Q, axis=1)
ref[np.isnan(ref)] = np.inf
comp = core.mass_absolute(Q, T)
npt.assert_almost_equal(ref, comp)
@pytest.mark.parametrize("Q, T", test_data)
def test_mass_absolute_T_inf(Q, T):
Q = Q.copy()
T = T.copy()
T[1] = np.inf
m = Q.shape[0]
ref = np.linalg.norm(core.rolling_window(T, m) - Q, axis=1)
ref[np.isnan(ref)] = np.inf
comp = core.mass_absolute(Q, T)
npt.assert_almost_equal(ref, comp)
def test_mass_absolute_sqrt_input_negative():
Q = np.array(
[
-13.09,
-14.1,
-15.08,
-16.31,
-17.13,
-17.5,
-18.07,
-18.07,
-17.48,
-16.24,
-14.88,
-13.56,
-12.65,
-11.93,
-11.48,
-11.06,
-10.83,
-10.67,
-10.59,
-10.81,
-10.92,
-11.15,
-11.37,
-11.53,
-11.19,
-11.08,
-10.48,
-10.14,
-9.92,
-9.99,
-10.11,
-9.92,
-9.7,
-9.47,
-9.06,
-9.01,
-8.79,
-8.67,
-8.33,
-8.0,
-8.26,
-8.0,
-7.54,
-7.32,
-7.13,
-7.24,
-7.43,
-7.93,
-8.8,
-9.71,
]
)
ref = 0.0
comp = core.mass_absolute(Q, Q)
npt.assert_almost_equal(ref, comp)
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_mass_distance_matrix(T_A, T_B):
m = 3
ref_distance_matrix = naive.distance_matrix(T_A, T_B, m)
k = T_A.shape[0] - m + 1
l = T_B.shape[0] - m + 1
comp_distance_matrix = np.full((k, l), np.inf)
core._mass_distance_matrix(T_A, T_B, m, comp_distance_matrix)
npt.assert_almost_equal(ref_distance_matrix, comp_distance_matrix)
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_mass_absolute_distance_matrix(T_A, T_B):
m = 3
ref_distance_matrix = cdist(
core.rolling_window(T_A, m), core.rolling_window(T_B, m)
)
k = T_A.shape[0] - m + 1
l = T_B.shape[0] - m + 1
comp_distance_matrix = np.full((k, l), np.inf)
core._mass_absolute_distance_matrix(T_A, T_B, m, comp_distance_matrix)
npt.assert_almost_equal(ref_distance_matrix, comp_distance_matrix)
def test_apply_exclusion_zone():
T = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.float64)
ref = np.empty(T.shape, dtype=np.float64)
comp = np.empty(T.shape, dtype=np.float64)
exclusion_zone = 2
for i in range(T.shape[0]):
ref[:] = T[:]
naive.apply_exclusion_zone(ref, i, exclusion_zone, np.inf)
comp[:] = T[:]
core.apply_exclusion_zone(comp, i, exclusion_zone, np.inf)
naive.replace_inf(ref)
naive.replace_inf(comp)
npt.assert_array_equal(ref, comp)
def test_apply_exclusion_zone_int():
T = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.int64)
ref = np.empty(T.shape, dtype=np.int64)
comp = np.empty(T.shape, dtype=np.int64)
exclusion_zone = 2
for i in range(T.shape[0]):
ref[:] = T[:]
naive.apply_exclusion_zone(ref, i, exclusion_zone, -1)
comp[:] = T[:]
core.apply_exclusion_zone(comp, i, exclusion_zone, -1)
naive.replace_inf(ref)
naive.replace_inf(comp)
npt.assert_array_equal(ref, comp)
def test_apply_exclusion_zone_bool():
T = np.ones(10, dtype=bool)
ref = np.empty(T.shape, dtype=bool)
comp = np.empty(T.shape, dtype=bool)
exclusion_zone = 2
for i in range(T.shape[0]):
ref[:] = T[:]
naive.apply_exclusion_zone(ref, i, exclusion_zone, False)
comp[:] = T[:]
core.apply_exclusion_zone(comp, i, exclusion_zone, False)
naive.replace_inf(ref)
naive.replace_inf(comp)
npt.assert_array_equal(ref, comp)
def test_apply_exclusion_zone_multidimensional():
T = np.array(
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]],
dtype=np.float64,
)
ref = np.empty(T.shape, dtype=np.float64)
comp = np.empty(T.shape, dtype=np.float64)
exclusion_zone = 2
for i in range(T.shape[1]):
ref[:, :] = T[:, :]
naive.apply_exclusion_zone(ref, i, exclusion_zone, np.inf)
comp[:, :] = T[:, :]
core.apply_exclusion_zone(comp, i, exclusion_zone, np.inf)
naive.replace_inf(ref)
naive.replace_inf(comp)
npt.assert_array_equal(ref, comp)
def test_preprocess():
T = np.array([0, np.nan, 2, 3, 4, 5, 6, 7, np.inf, 9])
m = 3
ref_T = np.array([0, 0, 2, 3, 4, 5, 6, 7, 0, 9], dtype=float)
ref_M, ref_Σ = naive_compute_mean_std(T, m)
comp_T, comp_M, comp_Σ = core.preprocess(T, m)
npt.assert_almost_equal(ref_T, comp_T)
npt.assert_almost_equal(ref_M, comp_M)
npt.assert_almost_equal(ref_Σ, comp_Σ)
T = pd.Series(T)
comp_T, comp_M, comp_Σ = core.preprocess(T, m)
npt.assert_almost_equal(ref_T, comp_T)
npt.assert_almost_equal(ref_M, comp_M)
npt.assert_almost_equal(ref_Σ, comp_Σ)
def test_preprocess_non_normalized():
T = np.array([0, np.nan, 2, 3, 4, 5, 6, 7, np.inf, 9])
m = 3
ref_T_subseq_isfinite = np.full(T.shape[0] - m + 1, False, dtype=bool)
for i in range(T.shape[0] - m + 1):
if np.all(np.isfinite(T[i : i + m])):
ref_T_subseq_isfinite[i] = True
ref_T = np.array([0, 0, 2, 3, 4, 5, 6, 7, 0, 9], dtype=float)
comp_T, comp_T_subseq_isfinite = core.preprocess_non_normalized(T, m)
npt.assert_almost_equal(ref_T, comp_T)
npt.assert_almost_equal(ref_T_subseq_isfinite, comp_T_subseq_isfinite)
T = pd.Series(T)
comp_T, comp_T_subseq_isfinite = core.preprocess_non_normalized(T, m)
npt.assert_almost_equal(ref_T, comp_T)
npt.assert_almost_equal(ref_T_subseq_isfinite, comp_T_subseq_isfinite)
def test_preprocess_diagonal():
T = np.array([0, np.nan, 2, 3, 4, 5, 6, 7, np.inf, 9])
m = 3
ref_T = np.array([0, 0, 2, 3, 4, 5, 6, 7, 0, 9], dtype=float)
ref_M, ref_Σ = naive_compute_mean_std(ref_T, m)
ref_Σ_inverse = 1.0 / ref_Σ
ref_M_m_1, _ = naive_compute_mean_std(ref_T, m - 1)
(
comp_T,
comp_M,
comp_Σ_inverse,
comp_M_m_1,
comp_T_subseq_isfinite,
comp_T_subseq_isconstant,
) = core.preprocess_diagonal(T, m)
npt.assert_almost_equal(ref_T, comp_T)
npt.assert_almost_equal(ref_M, comp_M)
npt.assert_almost_equal(ref_Σ_inverse, comp_Σ_inverse)
npt.assert_almost_equal(ref_M_m_1, comp_M_m_1)
T = pd.Series(T)
(
comp_T,
comp_M,
comp_Σ_inverse,
comp_M_m_1,
comp_T_subseq_isfinite,
comp_T_subseq_isconstant,
) = core.preprocess_diagonal(T, m)
npt.assert_almost_equal(ref_T, comp_T)
npt.assert_almost_equal(ref_M, comp_M)
npt.assert_almost_equal(ref_Σ_inverse, comp_Σ_inverse)
npt.assert_almost_equal(ref_M_m_1, comp_M_m_1)
def test_replace_distance():
right = np.random.rand(30).reshape(5, 6)
left = right.copy()
np.fill_diagonal(right, config.STUMPY_MAX_DISTANCE - 1e-9)
np.fill_diagonal(left, np.inf)
core.replace_distance(right, config.STUMPY_MAX_DISTANCE, np.inf, 1e-6)
def test_array_to_temp_file():
left = np.random.rand()
fname = core.array_to_temp_file(left)
right = np.load(fname, allow_pickle=False)
os.remove(fname)
npt.assert_almost_equal(left, right)
def test_count_diagonal_ndist():
for n_A in range(10, 15):
for n_B in range(10, 15):
for m in range(3, 6):
diags = np.random.permutation(
range(-(n_A - m + 1) + 1, n_B - m + 1)
).astype(np.int64)
ones_matrix = np.ones((n_A - m + 1, n_B - m + 1), dtype=np.int64)
ref_ndist_counts = np.empty(len(diags))
for i, diag in enumerate(diags):
ref_ndist_counts[i] = ones_matrix.diagonal(offset=diag).sum()
comp_ndist_counts = core._count_diagonal_ndist(diags, m, n_A, n_B)
npt.assert_almost_equal(ref_ndist_counts, comp_ndist_counts)
def test_get_array_ranges():
x = np.array([3, 9, 2, 1, 5, 4, 7, 7, 8, 6], dtype=np.int64)
for n_chunks in range(2, 5):
ref = naive.get_array_ranges(x, n_chunks, False)
cmp = core._get_array_ranges(x, n_chunks, False)
npt.assert_almost_equal(ref, cmp)
def test_get_array_ranges_exhausted():
x = np.array([3, 3, 3, 11, 11, 11], dtype=np.int64)
n_chunks = 6
ref = naive.get_array_ranges(x, n_chunks, False)
cmp = core._get_array_ranges(x, n_chunks, False)
npt.assert_almost_equal(ref, cmp)
def test_get_array_ranges_exhausted_truncated():
x = np.array([3, 3, 3, 11, 11, 11], dtype=np.int64)
n_chunks = 6
ref = naive.get_array_ranges(x, n_chunks, True)
cmp = core._get_array_ranges(x, n_chunks, True)
npt.assert_almost_equal(ref, cmp)
def test_get_array_ranges_empty_array():
x = np.array([], dtype=np.int64)
n_chunks = 6
ref = naive.get_array_ranges(x, n_chunks, False)
cmp = core._get_array_ranges(x, n_chunks, False)
npt.assert_almost_equal(ref, cmp)
def test_get_ranges():
ref = np.array([[0, 3], [3, 6]])
size = 6
n_chunks = 2
cmp = core._get_ranges(size, n_chunks, False)
npt.assert_almost_equal(ref, cmp)
def test_get_ranges_exhausted():
ref = np.array([[0, 1], [1, 2], [2, 3], [3, 3], [3, 4], [4, 5], [5, 6], [6, 6]])
size = 6
n_chunks = 8
cmp = core._get_ranges(size, n_chunks, False)
npt.assert_almost_equal(ref, cmp)
def test_get_ranges_exhausted_truncated():
ref = np.array([[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6]])
size = 6
n_chunks = 8
cmp = core._get_ranges(size, n_chunks, True)
npt.assert_almost_equal(ref, cmp)
def test_get_ranges_zero_size():
ref = np.empty((0, 2))
size = 0
n_chunks = 8
cmp = core._get_ranges(size, n_chunks, True)
npt.assert_almost_equal(ref, cmp)
def test_rolling_isfinite():
a = np.arange(12).astype(np.float64)
w = 3
a[1] = np.nan
a[5] = np.nan
a[9] = np.nan
ref = np.all(core.rolling_window(np.isfinite(a), w), axis=1)
comp = core.rolling_isfinite(a, w)
npt.assert_almost_equal(ref, comp)
def test_compare_parameters():
assert (
core._compare_parameters(core.rolling_window, core.z_norm, exclude=[]) is False
)
def test_jagged_list_to_array():
arr = [np.array([0, 1]), np.array([0]), np.array([0, 1, 2, 3])]
left = np.array([[0, 1, -1, -1], [0, -1, -1, -1], [0, 1, 2, 3]], dtype="int64")
right = core._jagged_list_to_array(arr, fill_value=-1, dtype="int64")
npt.assert_array_equal(left, right)
left = np.array(
[[0, 1, np.nan, np.nan], [0, np.nan, np.nan, np.nan], [0, 1, 2, 3]],
dtype="float64",
)
right = core._jagged_list_to_array(arr, fill_value=np.nan, dtype="float64")
npt.assert_array_equal(left, right)
def test_jagged_list_to_array_empty():
arr = []
left = np.array([[]], dtype="int64")
right = core._jagged_list_to_array(arr, fill_value=-1, dtype="int64")
npt.assert_array_equal(left, right)
left = np.array([[]], dtype="float64")
right = core._jagged_list_to_array(arr, fill_value=np.nan, dtype="float64")
npt.assert_array_equal(left, right)
def test_get_mask_slices():
bool_lst = [False, True]
mask_cases = [
[x, y, z, w]
for x in bool_lst
for y in bool_lst
for z in bool_lst
for w in bool_lst
]
for mask in mask_cases:
ref_slices = naive._get_mask_slices(mask)
comp_slices = core._get_mask_slices(mask)
npt.assert_array_equal(ref_slices, comp_slices)
def test_idx_to_mp():
n = 64
m = 5
T = np.random.rand(n)
# T[1] = np.nan
# T[8] = np.inf
# T[:] = 1.0
I = np.random.randint(0, n - m + 1, n - m + 1)
ref_mp = naive_idx_to_mp(I, T, m)
cmp_mp = core._idx_to_mp(I, T, m)
npt.assert_almost_equal(ref_mp, cmp_mp)
ref_mp = naive_idx_to_mp(I, T, m, normalize=False)
cmp_mp = core._idx_to_mp(I, T, m, normalize=False)
npt.assert_almost_equal(ref_mp, cmp_mp)
def test_total_diagonal_ndists():
tile_height = 9
tile_width = 11
for tile_lower_diag in range(-tile_height - 2, tile_width + 2):
for tile_upper_diag in range(tile_lower_diag, tile_width + 2):
assert naive._total_diagonal_ndists(
tile_lower_diag, tile_upper_diag, tile_height, tile_width
) == core._total_diagonal_ndists(
tile_lower_diag, tile_upper_diag, tile_height, tile_width
)
tile_height = 11
tile_width = 9
for tile_lower_diag in range(-tile_height - 2, tile_width + 2):
for tile_upper_diag in range(tile_lower_diag, tile_width + 2):
assert naive._total_diagonal_ndists(
tile_lower_diag, tile_upper_diag, tile_height, tile_width
) == core._total_diagonal_ndists(
tile_lower_diag, tile_upper_diag, tile_height, tile_width
)
@pytest.mark.parametrize("n", n)
def test_bsf_indices(n):
ref_bsf_indices = naive_bsf_indices(n)
cmp_bsf_indices = np.array(list(core._bfs_indices(n)))
npt.assert_almost_equal(ref_bsf_indices, cmp_bsf_indices)
```
#### File: stumpy/tests/test_gpu_stump.py
```python
import numpy as np
import numpy.testing as npt
import pandas as pd
from stumpy import gpu_stump
from stumpy import config
from numba import cuda
try:
from numba.errors import NumbaPerformanceWarning
except ModuleNotFoundError:
from numba.core.errors import NumbaPerformanceWarning
import pytest
import naive
config.THREADS_PER_BLOCK = 10
if not cuda.is_available(): # pragma: no cover
pytest.skip("Skipping Tests No GPUs Available", allow_module_level=True)
test_data = [
(
np.array([9, 8100, -60, 7], dtype=np.float64),
np.array([584, -11, 23, 79, 1001, 0, -19], dtype=np.float64),
),
(
np.random.uniform(-1000, 1000, [8]).astype(np.float64),
np.random.uniform(-1000, 1000, [64]).astype(np.float64),
),
]
window_size = [8, 16, 32]
substitution_locations = [(slice(0, 0), 0, -1, slice(1, 3), [0, 3])]
substitution_values = [np.nan, np.inf]
def test_gpu_stump_int_input():
with pytest.raises(TypeError):
gpu_stump(np.arange(10), 5, ignore_trivial=True)
@pytest.mark.filterwarnings("ignore", category=NumbaPerformanceWarning)
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_gpu_stump_self_join(T_A, T_B):
m = 3
zone = int(np.ceil(m / 4))
ref_mp = naive.stamp(T_B, m, exclusion_zone=zone)
comp_mp = gpu_stump(T_B, m, ignore_trivial=True)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp, comp_mp)
comp_mp = gpu_stump(pd.Series(T_B), m, ignore_trivial=True)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp, comp_mp)
@pytest.mark.filterwarnings("ignore", category=NumbaPerformanceWarning)
@pytest.mark.parametrize("T_A, T_B", test_data)
@pytest.mark.parametrize("m", window_size)
def test_gpu_stump_self_join_larger_window(T_A, T_B, m):
if len(T_B) > m:
zone = int(np.ceil(m / 4))
ref_mp = naive.stamp(T_B, m, exclusion_zone=zone)
comp_mp = gpu_stump(T_B, m, ignore_trivial=True)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp, comp_mp)
# comp_mp = gpu_stump(
# pd.Series(T_B),
# m,
# ignore_trivial=True,
# )
# naive.replace_inf(comp_mp)
# npt.assert_almost_equal(ref_mp, comp_mp)
@pytest.mark.filterwarnings("ignore", category=NumbaPerformanceWarning)
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_gpu_stump_A_B_join(T_A, T_B):
m = 3
ref_mp = naive.stamp(T_B, m, T_B=T_A)
comp_mp = gpu_stump(T_B, m, T_A, ignore_trivial=False)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp, comp_mp)
# comp_mp = gpu_stump(pd.Series(T_A), m, pd.Series(T_B), ignore_trivial=False)
# naive.replace_inf(comp_mp)
# npt.assert_almost_equal(ref_mp, comp_mp)
@pytest.mark.filterwarnings("ignore", category=NumbaPerformanceWarning)
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_parallel_gpu_stump_self_join(T_A, T_B):
device_ids = [device.id for device in cuda.list_devices()]
if len(T_B) > 10:
m = 3
zone = int(np.ceil(m / 4))
ref_mp = naive.stamp(T_B, m, exclusion_zone=zone)
comp_mp = gpu_stump(
T_B,
m,
ignore_trivial=True,
device_id=device_ids,
)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp, comp_mp)
# comp_mp = gpu_stump(
# pd.Series(T_B),
# m,
# ignore_trivial=True,
# device_id=device_ids,
# )
# naive.replace_inf(comp_mp)
# npt.assert_almost_equal(ref_mp, comp_mp)
@pytest.mark.filterwarnings("ignore", category=NumbaPerformanceWarning)
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_parallel_gpu_stump_A_B_join(T_A, T_B):
device_ids = [device.id for device in cuda.list_devices()]
if len(T_B) > 10:
m = 3
ref_mp = naive.stamp(T_B, m, T_B=T_A)
comp_mp = gpu_stump(
T_B,
m,
T_A,
ignore_trivial=False,
device_id=device_ids,
)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp, comp_mp)
# comp_mp = gpu_stump(
# pd.Series(T_B),
# m,
# pd.Series(T_A),
# ignore_trivial=False,
# device_id=device_ids,
# )
# naive.replace_inf(comp_mp)
# npt.assert_almost_equal(ref_mp, comp_mp)
@pytest.mark.filterwarnings("ignore", category=NumbaPerformanceWarning)
def test_gpu_stump_constant_subsequence_self_join():
T_A = np.concatenate((np.zeros(20, dtype=np.float64), np.ones(5, dtype=np.float64)))
m = 3
zone = int(np.ceil(m / 4))
ref_mp = naive.stamp(T_A, m, exclusion_zone=zone)
comp_mp = gpu_stump(T_A, m, ignore_trivial=True)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp[:, 0], comp_mp[:, 0]) # ignore indices
# comp_mp = gpu_stump(pd.Series(T_A), m, ignore_trivial=True)
# naive.replace_inf(comp_mp)
# npt.assert_almost_equal(ref_mp[:, 0], comp_mp[:, 0]) # ignore indices
@pytest.mark.filterwarnings("ignore", category=NumbaPerformanceWarning)
def test_gpu_stump_one_constant_subsequence_A_B_join():
T_A = np.random.rand(20)
T_B = np.concatenate((np.zeros(20, dtype=np.float64), np.ones(5, dtype=np.float64)))
m = 3
ref_mp = naive.stamp(T_B, m, T_B=T_A)
comp_mp = gpu_stump(T_B, m, T_A, ignore_trivial=False)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp[:, 0], comp_mp[:, 0]) # ignore indices
# comp_mp = gpu_stump(pd.Series(T_A), m, pd.Series(T_B), ignore_trivial=False)
# naive.replace_inf(comp_mp)
# npt.assert_almost_equal(ref_mp[:, 0], comp_mp[:, 0]) # ignore indices
# Swap inputs
ref_mp = naive.stamp(T_A, m, T_B=T_B)
comp_mp = gpu_stump(T_A, m, T_B, ignore_trivial=False)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp[:, 0], comp_mp[:, 0]) # ignore indices
# comp_mp = gpu_stump(pd.Series(T_A), m, pd.Series(T_B), ignore_trivial=False)
# naive.replace_inf(comp_mp)
# npt.assert_almost_equal(ref_mp[:, 0], comp_mp[:, 0]) # ignore indices
@pytest.mark.filterwarnings("ignore", category=NumbaPerformanceWarning)
def test_gpu_stump_two_constant_subsequences_A_B_join():
T_A = np.array([0, 0, 0, 0, 0, 1], dtype=np.float64)
T_B = np.concatenate((np.zeros(20, dtype=np.float64), np.ones(5, dtype=np.float64)))
m = 3
ref_mp = naive.stamp(T_B, m, T_B=T_A)
comp_mp = gpu_stump(T_B, m, T_A, ignore_trivial=False)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp[:, 0], comp_mp[:, 0]) # ignore indices
# comp_mp = gpu_stump(pd.Series(T_B), m, pd.Series(T_A), ignore_trivial=False)
# naive.replace_inf(comp_mp)
# npt.assert_almost_equal(ref_mp[:, 0], comp_mp[:, 0]) # ignore indices
# Swap inputs
ref_mp = naive.stamp(T_A, m, T_B=T_B)
comp_mp = gpu_stump(T_A, m, T_B, ignore_trivial=False)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp[:, 0], comp_mp[:, 0]) # ignore indices
# comp_mp = gpu_stump(pd.Series(T_A), m, pd.Series(T_B), ignore_trivial=False)
# naive.replace_inf(comp_mp)
# npt.assert_almost_equal(ref_mp[:, 0], comp_mp[:, 0]) # ignore indices
@pytest.mark.filterwarnings("ignore", category=NumbaPerformanceWarning)
def test_gpu_stump_identical_subsequence_self_join():
identical = np.random.rand(8)
T_A = np.random.rand(20)
T_A[1 : 1 + identical.shape[0]] = identical
T_A[11 : 11 + identical.shape[0]] = identical
m = 3
zone = int(np.ceil(m / 4))
ref_mp = naive.stamp(T_A, m, exclusion_zone=zone)
comp_mp = gpu_stump(T_A, m, ignore_trivial=True)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(
ref_mp[:, 0], comp_mp[:, 0], decimal=config.STUMPY_TEST_PRECISION
) # ignore indices
# comp_mp = gpu_stump(pd.Series(T_A), m, ignore_trivial=True)
# naive.replace_inf(comp_mp)
# npt.assert_almost_equal(
# ref_mp[:, 0], comp_mp[:, 0], decimal=config.STUMPY_TEST_PRECISION
# ) # ignore indices
@pytest.mark.filterwarnings("ignore", category=NumbaPerformanceWarning)
def test_gpu_stump_identical_subsequence_A_B_join():
identical = np.random.rand(8)
T_A = np.random.rand(20)
T_B = np.random.rand(20)
T_A[1 : 1 + identical.shape[0]] = identical
T_B[11 : 11 + identical.shape[0]] = identical
m = 3
ref_mp = naive.stamp(T_B, m, T_B=T_A)
comp_mp = gpu_stump(T_B, m, T_A, ignore_trivial=False)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(
ref_mp[:, 0], comp_mp[:, 0], decimal=config.STUMPY_TEST_PRECISION
) # ignore indices
# comp_mp = gpu_stump(pd.Series(T_B), m, pd.Series(T_A), ignore_trivial=False)
# naive.replace_inf(comp_mp)
# npt.assert_almost_equal(
# ref_mp[:, 0], comp_mp[:, 0], decimal=config.STUMPY_TEST_PRECISION
# ) # ignore indices
# Swap inputs
ref_mp = naive.stamp(T_A, m, T_B=T_B)
comp_mp = gpu_stump(T_A, m, T_B, ignore_trivial=False)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(
ref_mp[:, 0], comp_mp[:, 0], decimal=config.STUMPY_TEST_PRECISION
) # ignore indices
# comp_mp = gpu_stump(pd.Series(T_A), m, pd.Series(T_B), ignore_trivial=False)
# naive.replace_inf(comp_mp)
# npt.assert_almost_equal(
# ref_mp[:, 0], comp_mp[:, 0], decimal=config.STUMPY_TEST_PRECISION
# ) # ignore indices
@pytest.mark.filterwarnings("ignore", category=NumbaPerformanceWarning)
@pytest.mark.parametrize("T_A, T_B", test_data)
@pytest.mark.parametrize("substitute_B", substitution_values)
@pytest.mark.parametrize("substitution_locations", substitution_locations)
def test_gpu_stump_nan_inf_self_join(T_A, T_B, substitute_B, substitution_locations):
m = 3
stop = 16
T_B_sub = T_B.copy()[:stop]
for substitution_location_B in substitution_locations:
T_B_sub[:] = T_B[:stop]
T_B_sub[substitution_location_B] = substitute_B
zone = int(np.ceil(m / 4))
ref_mp = naive.stamp(T_B_sub, m, exclusion_zone=zone)
comp_mp = gpu_stump(T_B_sub, m, ignore_trivial=True)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp, comp_mp)
# comp_mp = gpu_stump(pd.Series(T_B_sub), m, ignore_trivial=True)
# naive.replace_inf(comp_mp)
# npt.assert_almost_equal(ref_mp, comp_mp)
@pytest.mark.filterwarnings("ignore", category=NumbaPerformanceWarning)
@pytest.mark.parametrize("T_A, T_B", test_data)
@pytest.mark.parametrize("substitute_A", substitution_values)
@pytest.mark.parametrize("substitute_B", substitution_values)
@pytest.mark.parametrize("substitution_locations", substitution_locations)
def test_gpu_stump_nan_inf_A_B_join(
T_A, T_B, substitute_A, substitute_B, substitution_locations
):
m = 3
stop = 16
T_A_sub = T_A.copy()
T_B_sub = T_B.copy()[:stop]
for substitution_location_B in substitution_locations:
for substitution_location_A in substitution_locations:
T_A_sub[:] = T_A
T_B_sub[:] = T_B[:stop]
T_A_sub[substitution_location_A] = substitute_A
T_B_sub[substitution_location_B] = substitute_B
ref_mp = naive.stamp(T_B_sub, m, T_B=T_A_sub)
comp_mp = gpu_stump(T_B_sub, m, T_A_sub, ignore_trivial=False)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp, comp_mp)
# comp_mp = gpu_stump(
# pd.Series(T_B_sub), m, pd.Series(T_A_sub), ignore_trivial=False
# )
# naive.replace_inf(comp_mp)
# npt.assert_almost_equal(ref_mp, comp_mp)
@pytest.mark.filterwarnings("ignore", category=NumbaPerformanceWarning)
def test_gpu_stump_nan_zero_mean_self_join():
T = np.array([-1, 0, 1, np.inf, 1, 0, -1])
m = 3
zone = int(np.ceil(m / 4))
ref_mp = naive.stamp(T, m, exclusion_zone=zone)
comp_mp = gpu_stump(T, m, ignore_trivial=True)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp, comp_mp)
``` |
{
"source": "jjbits/AlgorithmicBits",
"score": 3
} |
#### File: AlgorithmicBits/matrix/matrix.py
```python
import numpy as np
# INPUT: This implementation only allows square
# even numbered matrix. The user of
# this function is expected to pad or
# do something similar before feeding
# the data.
# OUTPUT: Multiplied Matrix
# Big O bound
# Each call of strassen_multiplication is composed of
# 7f(n-1) + constant operations. Therefore the bound is
# O((7 + O(1))^n and n = log(2, N).
# This leads to O(pow(N, log(2, 7)+O(1))) and gives
# roughly O(pow(N, 2.8)) bound.
def strassen_multiplication(a, b, n):
n1, n2 = a.shape
if n != n1 or n != n2:
print("Only square matrices are acceptted. Please pad your matrices.")
return 0
elif n%2 != 0:
print("Only even matrices are acceptted. Please pad your matrices.")
return 0
if n == 2:
m1 = (a[0][0] + a[1][1]) * (b[0][0] + b[1][1])
m2 = (a[1][0] + a[1][1]) * b[0][0]
m3 = a[0][0] * (b[0][1] - b[1][1])
m4 = a[1][1] * (b[1][0] - b[0][0])
m5 = (a[0][0] + a[0][1]) * b[1][1]
m6 = (a[1][0] - a[0][0]) * (b[0][0] + b[0][1])
m7 = (a[0][1] - a[1][1]) * (b[1][0] + b[1][1])
c11 = m1 + m4 - m5 + m7
c12 = m3 + m5
c21 = m2 + m4
c22 = m1 - m2 + m3 + m6
a = np.stack((c11, c21), axis=0)
b = np.stack((c12, c22), axis=0)
c = np.stack((a, b), axis=1)
return c
a11 = a[0:n//2, 0:n//2]
a12 = a[0:n//2, n//2:n]
a21 = a[n//2:n, 0:n//2]
a22 = a[n//2:n, n//2:n]
b11 = b[0:n//2, 0:n//2]
b12 = b[0:n//2, n//2:n]
b21 = b[n//2:n, 0:n//2]
b22 = b[n//2:n, n//2:n]
m1 = strassen_multiplication(a11 + a22, b11 + b22, n//2)
m2 = strassen_multiplication(a21 + a22, b11, n//2)
m3 = strassen_multiplication(a11, b12 - b22, n//2)
m4 = strassen_multiplication(a22, b21 - b11, n//2)
m5 = strassen_multiplication(a11 + a12, b22, n//2)
m6 = strassen_multiplication(a21 - a11, b11 + b12, n//2)
m7 = strassen_multiplication(a12 - a22, b21 + b22, n//2)
c11 = m1 + m4 - m5 + m7
c12 = m3 + m5
c21 = m2 + m4
c22 = m1 - m2 + m3 + m6
a = np.concatenate((c11, c12), axis=1)
b = np.concatenate((c21, c22), axis=1)
c = np.concatenate((a, b), axis=0)
return c
n = 8
a = np.zeros((n, n))
k = 0
for i in range(n):
for j in range(n):
a[i, j] = k
k += 1
b = np.zeros((n, n))
for i in range(n):
for j in range(n):
b[i, j] = k
k += 1
c = a.dot(b)
print(a)
print(b)
print(c)
c = strassen_multiplication(a, b, n)
print("Result:")
print(c)
```
#### File: AlgorithmicBits/search/findinversion.py
```python
def count_split_inv(a):
n = len(a)
left_a = a[0:n//2]
right_a = a[n//2:]
n_left = len(left_a)
n_right = len(right_a)
out_list = []
i = 0
j = 0
k = 0
count = 0
for i in range(n):
if j == n_left and k < n_right:
while(k < n_right):
out_list.append(right_a[k])
k += 1
return count, out_list
elif j < n_left and k == n_right:
while(j < n_left):
out_list.append(left_a[j])
j += 1
return count, out_list
else:
if left_a[j] < right_a[k]:
out_list.append(left_a[j])
j += 1
else:
out_list.append(right_a[k])
count += (n//2 - j)
k += 1
return count, out_list
def count_inv(a):
n = len(a)
if n <= 1: return 0, a
a_left = a[0 : n//2]
a_right = a[n//2:]
x, a_left = count_inv(a_left)
y, a_right = count_inv(a_right)
z, a = count_split_inv(a_left+a_right)
return (x + y + z), a
test_list = [7, 3, 5, 1, 9, 2]
#test_list = []
#test_list = [1]
#test_list = [1, 2]
#test_list = [2, 1]
#test_list = [3, 2, 1]
#test_list = [6, 7, 5]
count, a = count_inv(test_list)
print("count", count)
print("a", a)
``` |
{
"source": "J-J-B-J/Lazy-Susan-Emulator",
"score": 4
} |
#### File: Lazy-Susan-Emulator/classes_and_functions/get_input.py
```python
raw_input = ""
test = False
def get(prompt=""):
"""Replaces the input function, to allow for input from a test file."""
if not test: # If this is the main program, just run the normal input
# function
return_value = str(input(prompt))
else: # If in testing mode, return the input from the test program
return_value = str(raw_input)
return_value = return_value.title()
if "Cancel" in return_value:
return None
return return_value
```
#### File: Lazy-Susan-Emulator/txt_managers/item_manager.py
```python
class ItemManager:
"""A class to manage items.txt."""
def __init__(self):
self.items_file = 'txt_files/items.txt'
def clear_items(self):
with open(self.items_file, 'w') as File:
File.write("Items:\n")
print("Items Cleared!")
def recover_items(self):
try:
with open(self.items_file, 'r') as file: # Try to read the file
read_data = file.readlines()
except FileNotFoundError: # If the file doesn't exist
with open(self.items_file, 'w') as file: # Create the file
file.write("Items:\n")
# Write this line at the start
with open(self.items_file, 'r') as file:
read_data = file.readlines()
read_data = read_data[1:] # Remove first line of file, which is
# "Items:"
if not read_data:
print("No Data Found.")
return None
else:
new_read_data = [] # New read data gets set to the old read
# data, but
# without the newlines at the end of each line.
for line in read_data:
new_read_data.append(line.rstrip("\n"))
read_data = new_read_data
print("Data Found!")
item_data = []
for line in read_data: # Save the old data to this program
item_data.append((str(line[4:]), int(line[:3])))
print("Item data saved!")
return item_data
def add_item(self, item, position):
position = str(position)
for _ in range(0, 3 - len(position)):
position = f"0{position}" # Make positions 3-digit numbers.
# E.g. 12 becomes 012
position += " " # Add a space at the end of position
text = position + item + "\n"
try:
with open(self.items_file, 'a') as file:
file.write(text)
except FileNotFoundError:
with open(self.items_file, 'w') as file:
file.write("Items:\n{text}")
def remove_item(self, item):
newlines = ""
try:
with open(self.items_file, 'r') as file:
old_lines = file.readlines()
except FileNotFoundError:
with open(self.items_file, 'w') as file:
file.write("Items:\n")
print("File Not Found.")
return
for line in old_lines[1:]:
if item not in line:
newlines += line
newlines += "\n"
with open(self.items_file, 'w') as file:
file.write("Items:\n")
with open(self.items_file, 'a') as file:
file.write(newlines)
``` |
{
"source": "JJBong/marl",
"score": 3
} |
#### File: marl/network/vdn.py
```python
import torch.nn as nn
import torch
class VDNNet(nn.Module):
def __init__(self):
super(VDNNet, self).__init__()
@staticmethod
def forward(q_values):
return torch.sum(q_values, dim=1)
```
#### File: marl/runner/runner.py
```python
from common.replay_memory import ReplayMemoryForMLP, ReplayMemoryForRNN, init_hidden
from agent.agents import Agents
class Runner:
def __init__(self, env, args):
self.env = env
self.args = args
assert self.args.base_net in ['mlp', 'rnn']
if self.args.base_net == 'mlp':
self.replay_memory = ReplayMemoryForMLP(self.args)
else:
self.replay_memory = ReplayMemoryForRNN(self.args)
self.training_steps = self.args.training_steps
self.playing_steps = self.args.playing_steps
self.agents = Agents(self.args)
def run(self):
step = 0
while step < self.training_steps:
state, observations = self.env.reset()
done = False
if self.args.base_net == 'rnn':
h_out = init_hidden(self.args)
while not done:
if self.args.base_net == 'rnn':
h_in = h_out
actions, h_out = self.agents.choose_action(observations, h_in)
else:
actions = self.agents.choose_action(observations)
next_state, next_observations, reward, done = self.env.step(actions)
# print('step: {0}, state: {1}, actions: {2}, reward: {3}'.format(step, state, actions, reward))
done_mask = 0.0 if done else 1.0
if self.args.base_net == 'rnn':
self.replay_memory.put(
[state, observations, actions, reward, next_state, next_observations, h_in, h_out, done_mask]
)
else:
self.replay_memory.put(
[state, observations, actions, reward, next_state, next_observations, done_mask]
)
if self.replay_memory.size() >= self.args.batch_size:
batch = {}
if self.args.base_net == 'rnn':
s, o, a, r, s_prime, o_prime, hidden_in, hidden_out, done_mask = self.replay_memory.sample(
self.args.batch_size
)
batch['hidden_in'] = hidden_in
batch['hidden_out'] = hidden_out
else:
s, o, a, r, s_prime, o_prime, done_mask = self.replay_memory.sample(self.args.batch_size)
batch['state'] = s
batch['observation'] = o
batch['action'] = a
batch['reward'] = r
batch['next_state'] = s_prime
batch['next_observation'] = o_prime
batch['done_mask'] = done_mask
loss = self.agents.train(batch, step)
if step % self.args.print_interval == 0:
print("step: {0}, loss: {1}".format(step, loss))
state = next_state
observations = next_observations
step += 1
if done:
break
self.agents.save_model()
def play(self):
self.agents.load_model()
q_value_list, iteration, selected_q_value_list, q_value_list_0, q_value_list_1, q_value_list_2, \
iteration_0, iteration_1, iteration_2 = None, None, None, None, None, None, None, None, None
if self.args.env_name == 'one_step_payoff_matrix':
q_value_list = [[0. for _ in range(self.args.n_actions)] for _ in range(self.args.n_actions)]
iteration = [[0 for _ in range(self.args.n_actions)] for _ in range(self.args.n_actions)]
elif self.args.env_name == 'two_step_payoff_matrix':
q_value_list_0 = [[0. for _ in range(self.args.n_actions)] for _ in range(self.args.n_actions)]
iteration_0 = [[0 for _ in range(self.args.n_actions)] for _ in range(self.args.n_actions)]
q_value_list_1 = [[0. for _ in range(self.args.n_actions)] for _ in range(self.args.n_actions)]
iteration_1 = [[0 for _ in range(self.args.n_actions)] for _ in range(self.args.n_actions)]
q_value_list_2 = [[0. for _ in range(self.args.n_actions)] for _ in range(self.args.n_actions)]
iteration_2 = [[0 for _ in range(self.args.n_actions)] for _ in range(self.args.n_actions)]
else:
raise Exception("Wrong env name.")
step = 0
while step < self.playing_steps:
state, observations = self.env.reset()
done = False
if self.args.base_net == 'rnn':
h_out = init_hidden(self.args)
state_num = 0
while not done:
if self.args.base_net == 'rnn':
h_in = h_out
actions, h_out, q_total_evals = self.agents.choose_action(observations, h_in=h_in, state=state)
else:
actions, q_total_evals = self.agents.choose_action(observations, state=state)
next_state, next_observations, reward, done = self.env.step(actions)
state = next_state
observations = next_observations
if self.args.env_name == 'one_step_payoff_matrix':
q_value_list[actions[0]][actions[1]] += q_total_evals
iteration[actions[0]][actions[1]] += 1
elif self.args.env_name == 'two_step_payoff_matrix':
if state_num == 0:
if actions[0] == 0:
state_num = 1
if actions[0] == 1:
state_num = 2
q_value_list_0[actions[0]][actions[1]] += q_total_evals
iteration_0[actions[0]][actions[1]] += 1
else:
if state_num == 1:
q_value_list_1[actions[0]][actions[1]] += q_total_evals
iteration_1[actions[0]][actions[1]] += 1
elif state_num == 2:
q_value_list_2[actions[0]][actions[1]] += q_total_evals
iteration_2[actions[0]][actions[1]] += 1
step += 1
if done:
break
if self.args.env_name == 'one_step_payoff_matrix':
for i in range(self.args.n_actions):
for j in range(self.args.n_actions):
q_value_list[i][j] /= iteration[i][j]
print(q_value_list)
elif self.args.env_name == 'two_step_payoff_matrix':
for i in range(self.args.n_actions):
for j in range(self.args.n_actions):
q_value_list_0[i][j] /= iteration_0[i][j]
q_value_list_1[i][j] /= iteration_1[i][j]
q_value_list_2[i][j] /= iteration_2[i][j]
print(q_value_list_0)
print(q_value_list_1)
print(q_value_list_2)
``` |
{
"source": "jjbrophy47/tree_deletion",
"score": 3
} |
#### File: tree_deletion/dare/_classes.py
```python
import numbers
import numpy as np
from ._manager import _DataManager
from ._config import _Config
from ._splitter import _Splitter
from ._remover import _Remover
from ._simulator import _Simulator
from ._tree import _Tree
from ._tree import _TreeBuilder
MAX_DEPTH_LIMIT = 1000
MAX_INT = 2147483647
class Forest(object):
"""
DaRE forest, a random forests model that can efficiently
remove training data AFTER training.
Parameters:
-----------
topd: int (default=0)
Number of random-node layers, starting from the top.
k: int (default=25)
Number of candidate thresholds per feature to consider
through uniform sampling.
n_estimators: int (default=100)
Number of trees in the forest.
max_features: int float, or str (default='sqrt')
If int, then max_features at each split.
If float, then max_features=int(max_features * n_features) at each split.
If None or 'sqrt', then max_features=sqrt(n_features).
max_depth: int (default=None)
The maximum depth of a tree.
criterion: str (default='gini')
Splitting criterion to use.
min_samples_split: int (default=2)
The minimum number of samples needed to make a split when building a tree.
min_samples_leaf: int (default=1)
The minimum number of samples needed to make a leaf.
random_state: int (default=None)
Random state for reproducibility.
verbose: int (default=0)
Verbosity level.
"""
def __init__(self,
topd=0,
k=25,
n_estimators=100,
max_features='sqrt',
max_depth=10,
criterion='gini',
min_samples_split=2,
min_samples_leaf=1,
random_state=None,
verbose=0):
self.topd = topd
self.k = k
self.n_estimators = n_estimators
self.max_features = max_features
self.max_depth = max_depth
self.criterion = criterion
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.random_state = random_state
self.verbose = verbose
def __str__(self):
s = 'Forest:'
s += '\ntopd={}'.format(self.topd)
s += '\nk={}'.format(self.k)
s += '\nn_estimators={}'.format(self.n_estimators)
s += '\nmax_features={}'.format(self.max_features)
s += '\nmax_depth={}'.format(self.max_depth)
s += '\ncriterion={}'.format(self.criterion)
s += '\nmin_samples_split={}'.format(self.min_samples_split)
s += '\nmin_samples_leaf={}'.format(self.min_samples_leaf)
s += '\nrandom_state={}'.format(self.random_state)
s += '\nverbose={}'.format(self.verbose)
return s
def fit(self, X, y):
"""
Build DaRE forest.
"""
assert X.ndim == 2
assert y.ndim == 1
self.n_samples_ = X.shape[0]
self.n_features_ = X.shape[1]
X, y = check_data(X, y)
# set random state
self.random_state_ = get_random_int(self.random_state)
# set max. features
self.max_features_ = check_max_features(self.max_features, self.n_features_)
# set max_depth
self.max_depth_ = MAX_DEPTH_LIMIT if not self.max_depth else self.max_depth
# set top d
self.topd_ = min(self.topd, self.max_depth_ + 1)
# make sure k is positive
assert self.k > 0
# one central location for the data
self.manager_ = _DataManager(X, y)
# build forest
self.trees_ = []
for i in range(self.n_estimators):
# print('\n\nTree {:,}'.format(i))
# build tree
tree = Tree(topd=self.topd_,
k=self.k,
max_depth=self.max_depth_,
criterion=self.criterion,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
random_state=self.random_state_ + i,
verbose=self.verbose)
tree = tree.fit(X, y, max_features=self.max_features_, manager=self.manager_)
# add to forest
self.trees_.append(tree)
return self
def predict(self, X):
"""
Classify samples one by one and return the set of labels.
"""
y_proba = self.predict_proba(X)
y_pred = np.argmax(y_proba, axis=1)
return y_pred
def predict_proba(self, X):
"""
Classify samples one by one and return the set of labels.
"""
assert X.ndim == 2
X = check_data(X)
# sum all predictions instead of storing them
forest_preds = np.zeros(X.shape[0])
for i, tree in enumerate(self.trees_):
forest_preds += tree.predict_proba(X)[:, 1]
y_mean = (forest_preds / len(self.trees_)).reshape(-1, 1)
y_proba = np.hstack([1 - y_mean, y_mean])
return y_proba
def delete(self, remove_indices):
"""
Removes instances from the database and updates the model.
"""
# copy indices to an int array
if isinstance(remove_indices, int):
remove_indices = [remove_indices]
if not (isinstance(remove_indices, np.ndarray) and remove_indices.dtype == np.int32):
remove_indices = np.array(remove_indices, dtype=np.int32)
remove_indices = np.unique(remove_indices).astype(np.int32)
# update trees
for i in range(len(self.trees_)):
self.trees_[i].delete(remove_indices)
# remove data from the database
self.manager_.remove_data(remove_indices)
def sim_delete(self, remove_index):
"""
Simulate the deletion of a SINGLE example.
Returns the number of samples that needs to be retrained
if this example were to be deleted.
"""
# change `remove_index` into the right data type
if not isinstance(remove_index, np.int64):
remove_index = np.int64(remove_index)
# simulate a deletion for each tree
n_samples_to_retrain = 0
for i in range(len(self.trees_)):
n_samples_to_retrain += self.trees_[i].sim_delete(remove_index)
return n_samples_to_retrain
def get_delete_metrics(self):
"""
Retrieve deletion statistics.
"""
types_list, depths_list, costs_list = [], [], []
# get metrics for each tree
for tree in self.trees_:
types, depths, costs = tree.get_delete_metrics()
types_list.append(types)
depths_list.append(depths)
costs_list.append(costs)
# process metrics from all trees
types = np.concatenate(types_list)
depths = np.concatenate(depths_list)
costs = np.concatenate(costs_list)
return types, depths, costs
def get_node_statistics(self):
"""
Return average no. random, greedy, and total node counts over all trees.
"""
n_nodes_list, n_random_nodes_list, n_greedy_nodes_list = [], [], []
# get metrics for each tree
for tree in self.trees_:
n_nodes, n_random_nodes, n_greedy_nodes = tree.get_node_statistics()
n_nodes_list.append(n_nodes)
n_random_nodes_list.append(n_random_nodes)
n_greedy_nodes_list.append(n_greedy_nodes)
# take the avg. of the counts
avg_n_nodes = np.mean(n_nodes_list)
avg_n_random_nodes = np.mean(n_random_nodes_list)
avg_n_greedy_nodes = np.mean(n_greedy_nodes_list)
return avg_n_nodes, avg_n_random_nodes, avg_n_greedy_nodes
def clear_delete_metrics(self):
"""
Delete removal statistics.
"""
for tree in self.trees_:
tree.clear_delete_metrics()
def get_memory_usage(self):
"""
Return total memory (in bytes) used by the forest.
"""
structure_memory = 0
decision_stats_memory = 0
leaf_stats_memory = 0
# add up memory used by each tree
for tree in self.trees_:
struc_mem, decision_mem, leaf_mem = tree.get_memory_usage()
structure_memory += struc_mem
decision_stats_memory += decision_mem
leaf_stats_memory += leaf_mem
return structure_memory, decision_stats_memory, leaf_stats_memory
def get_params(self, deep=False):
"""
Returns the parameter of this model as a dictionary.
"""
d = {}
d['topd'] = self.topd
d['k'] = self.k
d['n_estimators'] = self.n_estimators
d['max_features'] = self.max_features
d['max_depth'] = self.max_depth
d['criterion'] = self.criterion
d['min_samples_split'] = self.min_samples_split
d['min_samples_leaf'] = self.min_samples_leaf
d['random_state'] = self.random_state
d['verbose'] = self.verbose
if deep:
d['trees'] = {}
for i, tree in enumerate(self.trees_):
d['trees'][i] = tree.get_params(deep=deep)
return d
def set_params(self, **params):
"""
Set the parameters of this model.
"""
for key, value in params.items():
setattr(self, key, value)
return self
class Tree(object):
"""
Dare tree, a decision tree that can efficiently
remove training data AFTER training.
Parameters:
-----------
topd: int (default=0)
Number of random-node layers, starting from the top.
k: int (default=25)
No. candidate thresholds to consider through uniform sampling.
max_depth: int (default=None)
The maximum depth of the tree.
criterion: str (default='gini')
Splitting criterion to use.
min_samples_split: int (default=2)
The minimum number of samples needed to make a split when building the tree.
min_samples_leaf: int (default=1)
The minimum number of samples needed to make a leaf.
random_state: int (default=None)
Random state for reproducibility.
verbose: int (default=0)
Verbosity level.
"""
def __init__(self,
topd=0,
k=25,
max_depth=10,
criterion='gini',
min_samples_split=2,
min_samples_leaf=1,
random_state=None,
verbose=0):
self.topd = topd
self.k = k
self.max_depth = max_depth
self.criterion = criterion
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.random_state = random_state
self.verbose = verbose
def __str__(self):
s = 'Tree:'
s += '\ntopd={}'.format(self.topd)
s += '\nk={}'.format(self.k)
s += '\nmax_depth={}'.format(self.max_depth)
s += '\ncriterion={}'.format(self.criterion)
s += '\nmin_samples_split={}'.format(self.min_samples_split)
s += '\nmin_samples_leaf={}'.format(self.min_samples_leaf)
s += '\nrandom_state={}'.format(self.random_state)
s += '\nverbose={}'.format(self.verbose)
return s
def fit(self, X, y, max_features=None, manager=None):
"""
Build DaRE tree.
"""
assert X.ndim == 2
assert y.ndim == 1
# set random state
self.random_state_ = check_random_state(self.random_state)
# configure data manager
if max_features is not None:
assert manager is not None
self.max_features_ = max_features
self.manager_ = manager
self.single_tree_ = False
else:
X, y = check_data(X, y)
self.max_features_ = X.shape[1]
self.manager_ = _DataManager(X, y)
self.single_tree_ = True
# set hyperparameters
self.max_depth_ = MAX_DEPTH_LIMIT if not self.max_depth else self.max_depth
self.topd_ = min(self.topd, self.max_depth_ + 1)
self.use_gini_ = True if self.criterion == 'gini' else False
# make sure k is positive
assert self.k > 0, 'k must be greater than zero!'
# create tree objects
self.tree_ = _Tree()
self.config_ = _Config(self.min_samples_split,
self.min_samples_leaf,
self.max_depth_,
self.topd_,
self.k,
self.max_features_,
self.use_gini_,
self.random_state_)
self.splitter_ = _Splitter(self.config_)
self.tree_builder_ = _TreeBuilder(self.manager_,
self.splitter_,
self.config_)
self.remover_ = _Remover(self.manager_,
self.tree_builder_,
self.config_)
self.simulator_ = _Simulator(self.manager_,
self.config_)
self.tree_builder_.build(self.tree_)
return self
def predict(self, X):
"""
Classify samples one by one and return the set of labels.
"""
y_proba = self.predict_proba(X)
y_pred = np.argmax(y_proba, axis=1)
return y_pred
def predict_proba(self, X):
"""
Classify samples one by one and return the set of labels.
"""
assert X.ndim == 2
X = check_data(X)
y_pos = self.tree_.predict(X).reshape(-1, 1)
y_proba = np.hstack([1 - y_pos, y_pos])
return y_proba
def delete(self, remove_indices):
"""
Removes instances from the database and updates the model.
"""
# copy remove indices to int array
if self.single_tree_:
if isinstance(remove_indices, int):
remove_indices = [remove_indices]
if not (isinstance(remove_indices, np.ndarray) and remove_indices.dtype == np.int32):
remove_indices = np.array(remove_indices, dtype=np.int32)
remove_indices = np.unique(remove_indices).astype(np.int32)
# update model
rc = self.remover_.remove(self.tree_, remove_indices)
if rc == -1:
exit('Removal index invalid!')
# remove data from the database
if self.single_tree_:
self.manager_.remove_data(remove_indices)
def sim_delete(self, remove_index):
"""
Removes instances from the database and updates the model.
"""
# change `remove_index` into the right data type
if self.single_tree_:
if not isinstance(remove_index, np.int64):
remove_index = np.int64(remove_index)
# update model
n_samples_to_retrain = self.simulator_.sim_delete(self.tree_, remove_index)
if n_samples_to_retrain == -1:
exit('Removal index invalid!')
return n_samples_to_retrain
def get_delete_metrics(self):
"""
Retrieve deletion statistics.
"""
remove_types = np.array(self.remover_.remove_types, dtype=np.int32)
remove_depths = np.array(self.remover_.remove_depths, dtype=np.int32)
remove_costs = np.array(self.remover_.remove_costs, dtype=np.int32)
return remove_types, remove_depths, remove_costs
def clear_delete_metrics(self):
"""
Delete removal statistics.
"""
self.remover_.clear_metrics()
def get_node_statistics(self):
"""
Returns the no. total nodes, no. random nodes, and no. greedy nodes.
"""
n_nodes = self.tree_.get_node_count()
n_random_nodes = self.tree_.get_random_node_count(self.topd_)
n_greedy_nodes = self.tree_.get_greedy_node_count(self.topd_)
return n_nodes, n_random_nodes, n_greedy_nodes
def set_sim_mode(self, sim_mode=False):
"""
Turns simulation mode on/off.
"""
self.tree_builder_.set_sim_mode(sim_mode)
def get_memory_usage(self):
"""
Return total memory (in bytes) used by the tree.
"""
structure_memory = self.tree_.get_structure_memory()
decision_stats_memory = self.tree_.get_decision_stats_memory()
leaf_stats_memory = self.tree_.get_leaf_stats_memory()
return structure_memory, decision_stats_memory, leaf_stats_memory
def get_params(self, deep=False):
"""
Returns the parameter of this model as a dictionary.
"""
d = {}
d['topd'] = self.topd
d['k'] = self.k
d['max_depth'] = self.max_depth
d['criterion'] = self.criterion
d['min_samples_split'] = self.min_samples_split
d['min_samples_leaf'] = self.min_samples_leaf
d['random_state'] = self.random_state
d['verbose'] = self.verbose
return d
def set_params(self, **params):
"""
Set the parameters of this model.
"""
for key, value in params.items():
setattr(self, key, value)
return self
# ========================================================================
# Validation Methods
# ========================================================================
def get_random_int(seed):
"""
Get a random number from the whole range of large integer values.
"""
np.random.seed(seed)
return np.random.randint(MAX_INT)
# https://github.com/scikit-learn/scikit-learn/blob/\
# 95d4f0841d57e8b5f6b2a570312e9d832e69debc/sklearn/utils/validation.py#L800
def check_random_state(seed):
"""
Turn seed into a np.random.RandomState instance.
Parameters
----------
seed : None | int | instance of RandomState
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
elif isinstance(seed, numbers.Integral):
return np.random.RandomState(seed)
elif isinstance(seed, np.random.RandomState):
return seed
else:
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def check_max_features(max_features, n_features):
"""
Takes an int, float, or str.
-Int > 0: Returns min(`max_features`, `n_features`)
-Float in range (0.0, 1.0]: Returns fration of `n_features`.
-[-1, None, 'sqrt']: Returns sqrt(`n_features`).
Returns a valid number for the max. features, or throws an error.
"""
assert n_features > 0
result = None
# return square root of no. features
if max_features in [-1, None, 'sqrt']:
result = int(np.sqrt(n_features))
# may be an int, float, or string representation of an int or float
else:
temp_max_features = None
# try converting to an int
try:
temp_max_features = int(max_features)
# try converting to a float
except ValueError:
try:
temp_max_features = float(max_features)
except ValueError:
pass
if isinstance(temp_max_features, int):
assert temp_max_features > 0
result = min(n_features, temp_max_features)
elif isinstance(temp_max_features, float):
assert temp_max_features > 0 and temp_max_features <= 1.0
result = int(temp_max_features * n_features)
else:
raise ValueError('max_features {} unknown!'.format(max_features))
return result
def check_data(X, y=None):
"""
Makes sure data is of double type,
and labels are of integer type.
"""
result = None
if X.dtype != np.float32:
X = X.astype(np.float32)
if y is not None:
if y.dtype != np.int32:
y = y.astype(np.int32)
result = X, y
else:
result = X
return result
```
#### File: tree_deletion/dare/setup.py
```python
import os
import numpy
from numpy.distutils.misc_util import Configuration
from numpy.distutils.core import setup
from Cython.Build import cythonize
def configuration(parent_package='', top_path=None):
config = Configuration('dare', parent_name=parent_package, top_path=top_path)
libraries = []
if os.name == 'posix':
libraries.append('m')
config.add_extension("_config",
sources=["_config.pyx"],
include_dirs=[numpy.get_include()],
libraries=libraries,
extra_compile_args=["-O3"])
config.add_extension("_manager",
sources=["_manager.pyx"],
include_dirs=[numpy.get_include()],
libraries=libraries,
extra_compile_args=["-O3"])
config.add_extension("_tree",
sources=["_tree.pyx"],
include_dirs=[numpy.get_include()],
libraries=libraries,
extra_compile_args=["-O3"])
config.add_extension("_splitter",
sources=["_splitter.pyx"],
include_dirs=[numpy.get_include()],
libraries=libraries,
extra_compile_args=["-O3"])
config.add_extension("_remover",
sources=["_remover.pyx"],
include_dirs=[numpy.get_include()],
libraries=libraries,
extra_compile_args=["-O3"])
config.add_extension("_simulator",
sources=["_simulator.pyx"],
include_dirs=[numpy.get_include()],
libraries=libraries,
extra_compile_args=["-O3"])
config.add_extension("_utils",
sources=["_utils.pyx"],
include_dirs=[numpy.get_include()],
libraries=libraries,
extra_compile_args=["-O3"])
config.add_extension("_argsort",
sources=["_argsort.pyx"],
include_dirs=[numpy.get_include()],
libraries=libraries,
extra_compile_args=["-O3"])
config.ext_modules = cythonize(
config.ext_modules,
compiler_directives={'language_level': 3},
annotate=True
)
return config
if __name__ == "__main__":
setup(**configuration(top_path='').todict())
```
#### File: scripts/experiments/delete.py
```python
import os
import sys
import time
import argparse
import resource
from datetime import datetime
from collections import defaultdict
import numpy as np
here = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, here + '/../../')
sys.path.insert(0, here + '/../')
import dare
from utility import data_util
from utility import exp_util
from utility import print_util
def count_depths(types, depths):
"""
Compress the information about deletion types and depths into counts.
"""
# get list of deletion types
r = {k: defaultdict(int) for k in set(types)}
# count no. deletions at each depth for each deletion type
for t, d in zip(types, depths):
r[t][d] += 1
# convert defaultdicts to regular dicts
for k in r.keys():
r[k] = dict(r[k])
return r
def count_costs(types, depths, costs):
"""
For retrains (types = 1), compute the total cost
for each depth.
"""
# only use indices where a retrain occurred
retrain_indices = np.where(types == 1)[0]
# get list of all retrain depths
r = {d: 0 for d in set(depths[retrain_indices])}
# compute total cost for each depth
for d, c in zip(depths[retrain_indices], costs[retrain_indices]):
r[d] += c
return r
def get_model(args):
"""
Return model.
"""
model = dare.Forest(max_depth=args.max_depth,
criterion=args.criterion,
topd=args.topd,
k=args.k,
n_estimators=args.n_estimators,
max_features=args.max_features,
verbose=args.verbose,
random_state=args.rs)
return model
def get_naive(args):
"""
Return naive model.
"""
model = dare.Forest(max_depth=args.max_depth,
criterion=args.criterion,
topd=0,
k=args.k,
n_estimators=args.n_estimators,
max_features=args.max_features,
verbose=args.verbose,
random_state=args.rs)
return model
def train_naive(args, X_train, y_train, X_test, y_test, rng, logger=None):
"""
Compute the time it takes to delete a specified number of
samples from a naive model sequentially.
"""
# initial naive training time
model = get_naive(args)
start = time.time()
model = model.fit(X_train, y_train)
before_train_time = time.time() - start
logger.info('\n[{}] before train time: {:.3f}s'.format('naive', before_train_time))
# predictive performance of the naive model
auc, acc, ap = exp_util.performance(model, X_test, y_test, logger=logger, name='naive')
# naive train after deleting data
delete_indices = rng.choice(np.arange(X_train.shape[0]), size=args.n_delete, replace=False)
new_X_train = np.delete(X_train, delete_indices, axis=0)
new_y_train = np.delete(y_train, delete_indices)
# after training time
model = get_naive(args)
start = time.time()
model = model.fit(new_X_train, new_y_train)
after_train_time = time.time() - start
logger.info('[{}] after train time: {:.3f}s'.format('naive', after_train_time))
# interpolate sequential updates
total_time = ((before_train_time + after_train_time) / 2) * args.n_delete
initial_utility = auc, acc, ap
return total_time, initial_utility
def get_delete_index(model, X_train, y_train, indices, rng):
"""
Randomly select a subset of samples, simulate deleting each one,
then pick the sample that causes the largest no. samples to be retrained.
"""
start = time.time()
# randomly samples a subset of indices to simulate deleting
subsample_indices = rng.choice(indices, size=args.subsample_size, replace=False)
# return the only sample if the subset size is 1
if args.subsample_size == 1:
return subsample_indices[0], time.time() - start
# simulate deleting samples
best_ndx = -1
best_score = -1
for j, subsample_ndx in enumerate(subsample_indices):
# simulate deletion
sample_cost = model.sim_delete(subsample_ndx)
# save best sample
if sample_cost > best_score:
best_ndx = subsample_ndx
best_score = sample_cost
# record search time
search_time = time.time() - start
return best_ndx, search_time
def experiment(args, logger, out_dir, seed):
"""
Delete as many samples in the time it takes the naive
approach to delete one sample.
"""
# random number generator
rng = np.random.default_rng(args.rs)
# get data
X_train, X_test, y_train, y_test = data_util.get_data(args.dataset, data_dir=args.data_dir)
# dataset statistics
logger.info('\ntrain instances: {:,}'.format(X_train.shape[0]))
logger.info('test instances: {:,}'.format(X_test.shape[0]))
logger.info('features: {:,}'.format(X_train.shape[1]))
# experiment settings
logger.info('\nrandom state: {}'.format(seed))
logger.info('criterion: {}'.format(args.criterion))
logger.info('n_estimators: {}'.format(args.n_estimators))
logger.info('max_depth: {}'.format(args.max_depth))
logger.info('topd: {}'.format(args.topd))
logger.info('k: {}'.format(args.k))
logger.info('subsample_size: {}'.format(args.subsample_size))
logger.info('n_delete: {}'.format(args.n_delete))
# train a naive model, before and after deleting 1 sample
naive_avg_delete_time, naive_utility = train_naive(args, X_train, y_train, X_test, y_test, rng, logger=logger)
# begin experiment
begin = time.time()
# amount of time given to delete as many samples as possible
allotted_time = naive_avg_delete_time
# result containers
total_delete_time = 0
delete_types_list = []
delete_depths_list = []
delete_costs_list = []
# train target model
model = get_model(args)
start = time.time()
model = model.fit(X_train, y_train)
train_time = time.time() - start
logger.info('[{}] train time: {:.3f}s'.format('model', train_time))
# evaluate predictive performance between naive and the model
naive_auc, naive_acc, naive_ap = naive_utility
model_auc, model_acc, model_ap = exp_util.performance(model, X_test, y_test, logger=logger, name='model')
# available indices
indices = np.arange(len(X_train))
# find the most damaging samples heuristically
progress_str = '[{}] sample {}, sample_cost: {:,}, search time: {:3f}s, allotted: {:.3f}s, cum time: {:.3f}s'
logger.info('\nDelete samples:')
n_deleted = 0
while allotted_time > 0 and time.time() - begin <= args.time_limit:
# adversarially select a sample out of a subset of candidate samples
delete_ndx, search_time = get_delete_index(model, X_train, y_train, indices, rng)
# delete the adversarially selected sample
start = time.time()
model.delete(delete_ndx)
delete_time = time.time() - start
# get deletion statistics
delete_types, delete_depths, delete_costs = model.get_delete_metrics()
delete_types_list.append(delete_types)
delete_depths_list.append(delete_depths)
delete_costs_list.append(delete_costs)
sample_cost = np.sum(delete_costs) # sum over all trees
model.clear_delete_metrics()
# update counters
allotted_time -= delete_time # available time
total_delete_time += delete_time # total deletion time
cum_time = time.time() - begin # total time
n_deleted += 1
# progress update
logger.info(progress_str.format(n_deleted, delete_ndx, sample_cost, search_time, allotted_time, cum_time))
# remove the chosen ndx from the list of available indices
indices = np.setdiff1d(indices, [delete_ndx])
# estimate how many additional updates would finish in the remaining time
if allotted_time > 0:
average_delete_time = total_delete_time / n_deleted
n_deleted += int(allotted_time) / average_delete_time
# get model statistics
n_nodes_avg, n_random_nodes_avg, n_greedy_nodes_avg = model.get_node_statistics()
delete_types = np.concatenate(delete_types_list)
delete_depths = np.concatenate(delete_depths_list)
delete_costs = np.concatenate(delete_costs_list)
# save model results
result = model.get_params()
result['naive_auc'] = naive_auc
result['naive_acc'] = naive_acc
result['naive_ap'] = naive_ap
result['naive_avg_delete_time'] = naive_avg_delete_time
result['naive_n_deleted'] = args.n_delete
result['model_n_deleted'] = n_deleted
result['model_train_%_deleted'] = n_deleted / len(X_train)
result['model_delete_depths'] = count_depths(delete_types, delete_depths)
result['model_delete_costs'] = count_costs(delete_types, delete_depths, delete_costs)
result['model_auc'] = model_auc
result['model_acc'] = model_acc
result['model_ap'] = model_ap
result['model_n_nodes_avg'] = n_nodes_avg
result['model_n_random_nodes_avg'] = n_random_nodes_avg
result['model_n_greedy_nodes_avg'] = n_greedy_nodes_avg
result['max_rss'] = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
logger.info('\nResults:\n{}'.format(result))
np.save(os.path.join(out_dir, 'results.npy'), result)
return result
def main(args):
# assertions
assert args.criterion in ['gini', 'entropy']
# create output dir
out_dir = os.path.join(args.out_dir,
args.dataset,
args.criterion,
'rs_{}'.format(args.rs),
'topd_{}'.format(args.topd),
'k_{}'.format(args.k),
'sub_{}'.format(args.subsample_size))
# create output directory and clear previous contents
os.makedirs(out_dir, exist_ok=True)
print_util.clear_dir(out_dir)
# skip experiment if results already exist
if args.append_results and os.path.exists(os.path.join(out_dir, 'results.npy')):
print('results exist: {}'.format(out_dir))
return
# create logger
log_fp = os.path.join(out_dir, 'log.txt')
logger = print_util.get_logger(log_fp)
logger.info(args)
logger.info(datetime.now())
# run experiment
experiment(args, logger, out_dir, seed=args.rs)
# remove logger
print_util.remove_logger(logger)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# I/O settings
parser.add_argument('--out_dir', type=str, default='output/delete/', help='output directory.')
parser.add_argument('--data_dir', type=str, default='data', help='data directory.')
parser.add_argument('--dataset', default='surgical', help='dataset to use for the experiment.')
parser.add_argument('--append_results', action='store_true', default=False, help='add results.')
# experiment settings
parser.add_argument('--rs', type=int, default=1, help='seed to enhance reproducibility.')
parser.add_argument('--n_delete', type=int, default=1, help='number of instances for naive to delete.')
parser.add_argument('--time_limit', type=int, default=72000, help='seconds given for the entire experiment.')
# tree hyperparameters
parser.add_argument('--criterion', type=str, default='gini', help='gini or entropy.')
parser.add_argument('--n_estimators', type=int, default=100, help='no. trees in the forest.')
parser.add_argument('--max_depth', type=int, default=10, help='max. depth of the tree.')
parser.add_argument('--max_features', type=str, default='sqrt', help='maximum no. features to sample.')
# DARE parameters
parser.add_argument('--topd', type=int, default=0, help='no. top layers to be random.')
parser.add_argument('--k', type=int, default=10, help='no. thresholds to sample.')
# adversary settings
parser.add_argument('--subsample_size', type=int, default=1, help='number samples to test at a time.')
# display settings
parser.add_argument('--verbose', type=int, default=1, help='verbosity level.')
args = parser.parse_args()
main(args)
```
#### File: scripts/experiments/memory.py
```python
import os
import sys
import time
import pickle
import argparse
import resource
from datetime import datetime
import numpy as np
from sklearn.ensemble import RandomForestClassifier
here = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, here + '/../../')
sys.path.insert(0, here + '/../')
import dare
from utility import data_util
from utility import exp_util
from utility import print_util
def get_model(args, model, n_estimators, max_depth, topd=0, k=5):
"""
Return the appropriate model.
"""
if 'dare' in args.model:
model = dare.Forest(criterion=args.criterion,
max_depth=max_depth,
n_estimators=n_estimators,
max_features=args.max_features,
topd=topd,
k=k,
verbose=args.verbose,
random_state=args.rs)
elif 'sklearn' in model:
bootstrap = True if 'bootstrap' in model else False
model = RandomForestClassifier(n_estimators=n_estimators,
max_depth=max_depth,
max_features=args.max_features,
criterion=args.criterion,
random_state=args.rs,
bootstrap=bootstrap)
else:
raise ValueError('model {} unknown!'.format(args.model))
return model
def experiment(args, out_dir, logger):
# stat timer
begin = time.time()
# obtain data
X_train, X_test, y_train, y_test = data_util.get_data(args.dataset, data_dir=args.data_dir)
# compute data size
data_mem = X_train.nbytes + y_train.nbytes
# dataset statistics
logger.info('\ntrain instances: {:,}'.format(X_train.shape[0]))
logger.info('test instances: {:,}'.format(X_test.shape[0]))
logger.info('attributes: {:,}'.format(X_train.shape[1]))
logger.info('data size: {:,} bytes'.format(data_mem))
# get hyperparameters
params = exp_util.get_params(dataset=args.dataset, criterion=args.criterion)
n_estimators = params[0]
max_depth = params[1]
k = params[2]
topd_list = params[3:]
tol_list = [0.0, 0.1, 0.25, 0.5, 1.0]
assert len(topd_list) == len(tol_list)
# create result object
result = {}
result['n_estimators'] = n_estimators
result['max_depth'] = max_depth
result['criterion'] = args.criterion
result['max_features'] = args.max_features
result['model'] = args.model
# SKLearn RF
if 'sklearn' in args.model:
clf = get_model(args, model='sklearn', n_estimators=n_estimators, max_depth=max_depth)
# train
start = time.time()
model = clf.fit(X_train, y_train)
train_time = time.time() - start
# get memory usage
structure_memory = sys.getsizeof(pickle.dumps(model))
decision_stats_memory = -1
leaf_stats_memory = -1
logger.info('\n[SKLearn] train: {:.3f}s, structure: {:,} bytes'.format(train_time, structure_memory))
# DARE model
else:
# extract topd info
dare_ndx = int(args.model.split('_')[1])
tol = tol_list[dare_ndx]
topd = topd_list[dare_ndx]
# get model
clf = get_model(args, model='dare', n_estimators=n_estimators, max_depth=max_depth, k=k, topd=topd)
# train
start = time.time()
model = clf.fit(X_train, y_train)
train_time = time.time() - start
# get memory usage
structure_memory, decision_stats_memory, leaf_stats_memory = model.get_memory_usage()
s = '\n[DARE (tol={:.2f}%, topd={:,}, k={:,})] train: {:.3f}s'
s += ', structure: {:,} bytes, decision stats.: {:,} bytes, leaf stats.: {:,} bytes'
logger.info(s.format(tol, topd, k, train_time, structure_memory, decision_stats_memory, leaf_stats_memory))
# get node statistics
n_nodes, n_random, n_greedy = model.get_node_statistics()
logger.info('\nno. nodes: {:,}, no. random: {:,}, no. greedy: {:,}'.format(n_nodes, n_random, n_greedy))
# add to results
result['data_mem'] = data_mem
result['structure_mem'] = structure_memory
result['decision_stats_mem'] = decision_stats_memory
result['leaf_stats_mem'] = leaf_stats_memory
result['model_mem'] = structure_memory + decision_stats_memory + leaf_stats_memory
result['train_time'] = train_time
result['n_nodes'] = n_nodes
result['n_random'] = n_random
result['n_greedy'] = n_greedy
# save results
result['max_rss'] = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
np.save(os.path.join(out_dir, 'results.npy'), result)
logger.info('\ntotal time: {:.3f}s'.format(time.time() - begin))
logger.info('max_rss: {:,}'.format(result['max_rss']))
logger.info('\nresults:\n{}'.format(result))
def main(args):
# create output dir
out_dir = os.path.join(args.out_dir,
args.dataset,
args.criterion,
args.model,
'rs_{}'.format(args.rs))
# create output directory and clear any previous contents
os.makedirs(out_dir, exist_ok=True)
print_util.clear_dir(out_dir)
# create logger
logger = print_util.get_logger(os.path.join(out_dir, 'log.txt'))
logger.info(args)
logger.info('\ntimestamp: {}'.format(datetime.now()))
# run experiment
experiment(args, out_dir, logger)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# I/O settings
parser.add_argument('--data_dir', type=str, default='data', help='data directory.')
parser.add_argument('--out_dir', type=str, default='output/memory/', help='output directory.')
parser.add_argument('--dataset', default='surgical', help='dataset to use for the experiment.')
# experiment settings
parser.add_argument('--model', type=str, default='dare_0', help='model to test.')
parser.add_argument('--rs', type=int, default=1, help='random state.')
parser.add_argument('--criterion', type=str, default='gini', help='splitting criterion.')
# tree/forest hyperparameters
parser.add_argument('--max_features', type=str, default='sqrt', help='maximum no. features to sample.')
# display settings
parser.add_argument('--verbose', type=int, default=2, help='verbosity level.')
args = parser.parse_args()
main(args)
```
#### File: scripts/experiments/performance.py
```python
import os
import sys
import time
import argparse
import resource
from datetime import datetime
import numpy as np
import pandas as pd
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import StratifiedKFold
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
here = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, here + '/../../')
sys.path.insert(0, here + '/../')
import dare
from utility import data_util
from utility import exp_util
from utility import print_util
def _get_model(args):
"""
Return the appropriate model.
"""
if args.model in ['dare']:
model = dare.Forest(criterion=args.criterion,
max_depth=args.max_depth,
n_estimators=args.n_estimators,
max_features=args.max_features,
topd=args.topd,
k=args.k,
verbose=args.verbose,
random_state=args.rs)
elif args.model == 'extra_trees':
model = ExtraTreesClassifier(n_estimators=args.n_estimators,
max_depth=args.max_depth,
max_features=args.max_features,
criterion=args.criterion,
random_state=args.rs)
elif args.model == 'extra_trees_k1':
model = ExtraTreesClassifier(n_estimators=args.n_estimators,
max_depth=args.max_depth,
max_features=1,
criterion=args.criterion,
random_state=args.rs)
elif args.model == 'sklearn':
model = RandomForestClassifier(n_estimators=args.n_estimators,
max_depth=args.max_depth,
max_features=args.max_features,
criterion=args.criterion,
random_state=args.rs,
bootstrap=args.bootstrap)
else:
raise ValueError('model {} unknown!'.format(args.model))
return model
def _get_model_dict(args, params):
"""
Return the appropriate model.
"""
if args.model == 'dare':
model = dare.Forest(criterion=args.criterion,
max_depth=params['max_depth'],
n_estimators=params['n_estimators'],
max_features=args.max_features,
topd=args.topd,
k=params['k'],
verbose=args.verbose,
random_state=args.rs)
elif args.model == 'extra_trees':
model = ExtraTreesClassifier(n_estimators=params['n_estimators'],
max_depth=params['max_depth'],
max_features=args.max_features,
criterion=args.criterion,
random_state=args.rs)
elif args.model == 'extra_trees_k1':
model = ExtraTreesClassifier(n_estimators=params['n_estimators'],
max_depth=params['max_depth'],
max_features=1,
criterion=args.criterion,
random_state=args.rs)
elif args.model == 'sklearn':
model = RandomForestClassifier(n_estimators=params['n_estimators'],
max_depth=params['max_depth'],
max_features=args.max_features,
criterion=args.criterion,
random_state=args.rs,
bootstrap=args.bootstrap)
else:
raise ValueError('model {} unknown!'.format(args.model))
return model
def _get_best_params(gs, param_grid, keys, logger, tol=1e-3):
"""
Chooses the set of hyperparameters whose `mean_fit_score` is within
`tol` of the best `mean_fit_score` and has the lowest `mean_fit_time`.
"""
pd.set_option('display.max_columns', 100)
pd.set_option('display.max_rows', 100)
cols = ['mean_fit_time', 'mean_test_score', 'rank_test_score']
cols += ['param_{}'.format(param) for param in keys]
df = pd.DataFrame(gs.cv_results_)
logger.info('gridsearch results:')
logger.info(df[cols].sort_values('rank_test_score'))
# filter the parameters with the highest performances
logger.info('tolerance: {}'.format(args.tol))
df = df[df['mean_test_score'].max() - df['mean_test_score'] <= tol]
best_df = df.sort_values('mean_fit_time').reset_index().loc[0]
best_ndx = best_df['index']
best_params = best_df['params']
logger.info('best_index: {}, best_params: {}'.format(best_ndx, best_params))
return best_params
def performance(args, out_dir, logger):
begin = time.time()
# obtain data
X_train, X_test, y_train, y_test = data_util.get_data(args.dataset, data_dir=args.data_dir)
# dataset statistics
logger.info('train instances: {:,}'.format(X_train.shape[0]))
logger.info('test instances: {:,}'.format(X_test.shape[0]))
logger.info('attributes: {:,}'.format(X_train.shape[1]))
logger.info('split criterion: {}'.format(args.criterion))
# tune on a fraction of the training data
if not args.no_tune:
if args.tune_frac < 1.0:
sss = StratifiedShuffleSplit(n_splits=1, test_size=2,
train_size=args.tune_frac,
random_state=args.rs)
tune_indices, _ = list(sss.split(X_train, y_train))[0]
X_train_sub, y_train_sub = X_train[tune_indices], y_train[tune_indices]
logger.info('tune instances: {:,}'.format(X_train_sub.shape[0]))
else:
X_train_sub, y_train_sub = X_train, y_train
else:
X_train_sub, y_train_sub = X_train, y_train
# hyperparameter values
n_estimators = [10, 50, 100, 250]
max_depth = [1, 3, 5, 10, 20]
# set hyperparameter grid
param_grid = {'max_depth': max_depth,
'n_estimators': n_estimators}
# add additional parameter for DaRE
if args.model == 'dare':
param_grid['k'] = [5, 10, 25, 50]
# get hyperparameter names
keys = list(param_grid.keys())
# test model
logger.info('\n{}'.format(args.model.capitalize()))
start = time.time()
model = _get_model(args)
# tune hyperparameters
if not args.no_tune:
logger.info('param_grid: {}'.format(param_grid))
# cross-validation
skf = StratifiedKFold(n_splits=args.cv, shuffle=True, random_state=args.rs)
gs = GridSearchCV(model, param_grid, scoring=args.scoring,
cv=skf, verbose=args.verbose, refit=False)
gs = gs.fit(X_train_sub, y_train_sub)
best_params = _get_best_params(gs, param_grid, keys, logger, args.tol)
model = _get_model_dict(args, best_params)
# record time it takes to tune the model
tune_time = time.time() - start
# train best model
start = time.time()
model = model.fit(X_train, y_train)
train_time = time.time() - start
logger.info('train time: {:.3f}s'.format(train_time))
n_nodes, n_random, n_greedy = model.trees_[0].get_node_statistics()
print('[Tree 0] no. nodes: {:,}, no. random: {:,}, no. greedy: {:,}'.format(n_nodes, n_random, n_greedy))
print('[Tree 0] memory usage: {:,} bytes'.format(model.trees_[0].get_memory_usage()))
print('[Forest] memory usage: {:,} bytes'.format(model.get_memory_usage()))
print('max_rss: {:,}'.format(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss))
exit(0)
# evaluate
auc, acc, ap = exp_util.performance(model, X_test, y_test, name=args.model, logger=logger)
# save results
result = model.get_params()
result['model'] = args.model
result['bootstrap'] = args.bootstrap
result['auc'] = auc
result['acc'] = acc
result['ap'] = ap
result['train_time'] = train_time
result['tune_train_time'] = tune_time + train_time
result['max_rss'] = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
np.save(os.path.join(out_dir, 'results.npy'), result)
logger.info('total time: {:.3f}s'.format(time.time() - begin))
logger.info('max_rss: {:,}'.format(result['max_rss']))
def main(args):
# create output dir
out_dir = os.path.join(args.out_dir, args.dataset, args.criterion)
# add tuning to filepath
if args.no_tune:
out_dir = os.path.join(out_dir, 'no_tune', 'rs_{}'.format(args.rs))
else:
out_dir = os.path.join(out_dir, 'tuned', 'rs_{}'.format(args.rs))
# create filename
if args.model == 'sklearn':
out_dir = os.path.join(out_dir, args.model)
if args.bootstrap:
out_dir = os.path.join(out_dir, 'bootstrap')
elif args.model == 'dare':
assert args.topd == 0
out_dir = os.path.join(out_dir, args.model)
elif args.model in ['extra_trees', 'extra_trees_k1', 'borat']:
out_dir = os.path.join(out_dir, args.model)
else:
raise ValueError('model {} unknown!'.format(args.model))
# create output directory and clear any previous contents
os.makedirs(out_dir, exist_ok=True)
print_util.clear_dir(out_dir)
# create logger
logger = print_util.get_logger(os.path.join(out_dir, 'log.txt'))
logger.info(args)
logger.info(datetime.now())
# write everything printed to stdout to this log file
logfile, stdout, stderr = print_util.stdout_stderr_to_log(os.path.join(out_dir, 'log+.txt'))
# run experiment
performance(args, out_dir, logger)
# restore original stdout and stderr settings
print_util.reset_stdout_stderr(logfile, stdout, stderr)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# I/O settings
parser.add_argument('--data_dir', type=str, default='data', help='data directory.')
parser.add_argument('--out_dir', type=str, default='output/performance/', help='output directory.')
parser.add_argument('--dataset', default='surgical', help='dataset to use for the experiment.')
# experiment settings
parser.add_argument('--rs', type=int, default=1, help='random state.')
parser.add_argument('--model', type=str, default='dare', help='type of model.')
parser.add_argument('--criterion', type=str, default='gini', help='splitting criterion.')
parser.add_argument('--topd', type=int, default=0, help='0 for exact, 1000 for random.')
parser.add_argument('--k', type=int, default=25, help='no. of candidate thresholds to sample.')
parser.add_argument('--bootstrap', action='store_true', default=False, help='use bootstrapping with sklearn.')
# tuning settings
parser.add_argument('--no_tune', action='store_true', default=False, help='do not tune.')
parser.add_argument('--tune_frac', type=float, default=1.0, help='fraction of training to use for tuning.')
parser.add_argument('--cv', type=int, default=5, help='number of cross-validation folds for tuning.')
parser.add_argument('--scoring', type=str, default='roc_auc', help='metric for tuning.')
parser.add_argument('--tol', type=float, default=1e-3, help='allowable accuracy difference from the best.')
# tree/forest hyperparameters
parser.add_argument('--n_estimators', type=int, default=100, help='number of trees in the forest.')
parser.add_argument('--max_features', type=str, default='sqrt', help='maximum no. features to sample.')
parser.add_argument('--max_depth', type=int, default=20, help='maximum depth of the tree.')
# display settings
parser.add_argument('--verbose', type=int, default=2, help='verbosity level.')
args = parser.parse_args()
main(args)
```
#### File: scripts/experiments/topd_tuning.py
```python
import os
import sys
import time
import argparse
import resource
from datetime import datetime
import numpy as np
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_val_score
here = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, here + '/../../')
sys.path.insert(0, here + '/../')
import dare
from utility import data_util
from utility import print_util
def _get_model(args, topd=0):
"""
Return model with the specified `topd`.
"""
model = dare.Forest(max_depth=args.max_depth,
criterion=args.criterion,
topd=topd,
k=args.k,
n_estimators=args.n_estimators,
max_features=args.max_features,
verbose=args.verbose,
random_state=args.rs)
return model
def performance(args, out_dir, logger):
begin = time.time()
# obtain data
X_train, X_test, y_train, y_test = data_util.get_data(args.dataset, data_dir=args.data_dir)
# dataset statistics
logger.info('\nno. train instances: {:,}'.format(X_train.shape[0]))
logger.info('no. test instances: {:,}'.format(X_test.shape[0]))
logger.info('no. features: {:,}'.format(X_train.shape[1]))
logger.info('split criterion: {}'.format(args.criterion))
logger.info('scoring: {}'.format(args.scoring))
# tune on a fraction of the training data
if args.tune_frac < 1.0:
sss = StratifiedShuffleSplit(n_splits=1, test_size=2,
train_size=args.tune_frac,
random_state=args.rs)
tune_indices, _ = list(sss.split(X_train, y_train))[0]
X_train_sub, y_train_sub = X_train[tune_indices], y_train[tune_indices]
logger.info('tune instances: {:,}'.format(X_train_sub.shape[0]))
else:
X_train_sub, y_train_sub = X_train, y_train
skf = StratifiedKFold(n_splits=args.cv, shuffle=True, random_state=args.rs)
# train exact model
start = time.time()
model = _get_model(args, topd=0)
exact_score = cross_val_score(model, X_train_sub, y_train_sub, scoring=args.scoring, cv=skf).mean()
logger.info('\n[topd=0] CV score: {:.5f}, time: {:.3f}s'.format(exact_score, time.time() - start))
# train topd=0 model
s = '[topd={}] CV score: {:.5f}, CV diff: {:.5f}, time: {:.3f}s'
scores = {}
best_scores = {tol: 0 for tol in args.tol}
for topd in range(1, args.max_depth + 1):
start = time.time()
# obtain score for this topd
model = _get_model(args, topd=topd)
score = cross_val_score(model, X_train_sub, y_train_sub, scoring=args.scoring, cv=skf).mean()
score_diff = exact_score - score
scores[topd] = score
end = time.time() - start
logger.info(s.format(topd, score, score_diff, end))
# update best score for each tolerance
for tol in args.tol:
if best_scores[tol] == topd - 1 and score_diff <= tol:
best_scores[tol] = topd
total_time = time.time() - begin
logger.info('{}, total time: {:.3f}s'.format(best_scores, total_time))
logger.info('max_rss: {:,}'.format(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss))
np.save(os.path.join(out_dir, 'results.npy'), best_scores)
def main(args):
# create output dir
out_dir = os.path.join(args.out_dir,
args.dataset,
args.criterion,
'rs_{}'.format(args.rs))
# create output directory and clear previous contents
os.makedirs(out_dir, exist_ok=True)
print_util.clear_dir(out_dir)
# create logger
logger = print_util.get_logger(os.path.join(out_dir, 'log.txt'))
logger.info(args)
logger.info('timestamp: {}'.format(datetime.now()))
# run experiment
performance(args, out_dir, logger)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# I/O settings
parser.add_argument('--data_dir', type=str, default='data', help='data directory.')
parser.add_argument('--out_dir', type=str, default='output/topd_tuning/', help='output directory.')
parser.add_argument('--dataset', default='surgical', help='dataset to use for the experiment.')
# experiment settings
parser.add_argument('--rs', type=int, default=1, help='random state.')
parser.add_argument('--cv', type=int, default=5, help='number of cross-validation folds for tuning.')
parser.add_argument('--scoring', type=str, default='roc_auc', help='metric for tuning.')
parser.add_argument('--tune_frac', type=float, default=1.0, help='fraction of training to use for tuning.')
parser.add_argument('--tol', type=float, default=[0.001, 0.0025, 0.005, 0.01], help='allowable metric difference.')
# tree/forest hyperparameters
parser.add_argument('--n_estimators', type=int, default=100, help='number of trees in the forest.')
parser.add_argument('--max_features', type=str, default='sqrt', help='maximum features to sample.')
parser.add_argument('--max_depth', type=int, default=1, help='maximum depth of the tree.')
parser.add_argument('--k', type=int, default=10, help='no. thresholds to sample for greedy nodes.')
parser.add_argument('--criterion', type=str, default='gini', help='splitting criterion.')
# display settings
parser.add_argument('--verbose', type=int, default=2, help='verbosity level.')
args = parser.parse_args()
main(args)
```
#### File: scripts/postprocess/delete.py
```python
import os
import sys
import argparse
from datetime import datetime
from itertools import product
import numpy as np
import pandas as pd
from scipy.stats import sem
from tqdm import tqdm
here = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, here + '/../')
from utility import print_util
def get_result(template, in_dir):
"""
Obtain the results for this baseline method.
"""
result = template.copy()
fp = os.path.join(in_dir, 'results.npy')
if not os.path.exists(fp):
result = None
else:
d = np.load(fp, allow_pickle=True)[()]
result.update(d)
return result
def process_utility(gf):
"""
Processes utility differences BEFORE deletion,
and averages the results over different random states.
"""
result = {}
model_acc_list = []
model_auc_list = []
model_ap_list = []
acc_diff_list = []
auc_diff_list = []
ap_diff_list = []
model_delete_time_list = []
for row in gf.itertuples(index=False):
# extract model predictive performance
model_acc_list.append(row.model_acc)
model_auc_list.append(row.model_auc)
model_ap_list.append(row.model_ap)
# compare model predictive performance to naive
acc_diff_list.append(row.naive_acc - row.model_acc)
auc_diff_list.append(row.naive_auc - row.model_auc)
ap_diff_list.append(row.naive_ap - row.model_ap)
# record avg. deletion time for the model
model_delete_time_list.append(row.naive_avg_delete_time / row.model_n_deleted)
# compute mean and sem for predictive performances
result['model_acc_mean'] = np.mean(model_acc_list)
result['model_auc_mean'] = np.mean(model_auc_list)
result['model_ap_mean'] = np.mean(model_ap_list)
result['model_acc_sem'] = sem(model_acc_list)
result['model_auc_sem'] = sem(model_auc_list)
result['model_ap_sem'] = sem(model_ap_list)
result['model_delete_time_mean'] = np.mean(model_delete_time_list)
result['model_delete_time_sem'] = sem(model_delete_time_list)
# compute mean and sem for predictive performance differences
result['acc_diff_mean'] = np.mean(acc_diff_list)
result['auc_diff_mean'] = np.mean(auc_diff_list)
result['ap_diff_mean'] = np.mean(ap_diff_list)
result['acc_diff_sem'] = sem(acc_diff_list)
result['auc_diff_sem'] = sem(auc_diff_list)
result['ap_diff_sem'] = sem(ap_diff_list)
return result
def process_retrains(gf, max_depth=20):
"""
Averages no. retrains and retrain costs over multiple runs for each depth.
"""
n_retrains = np.zeros(shape=(len(gf), max_depth))
retrain_costs = np.zeros(shape=(len(gf), max_depth))
i = 0
for row in gf.itertuples(index=False):
for j in range(max_depth):
# add deletions to this depth
if 1 in row.model_delete_depths and j in row.model_delete_depths[1]:
n_retrains[i][j] = row.model_delete_depths[1][j]
if j in row.model_delete_costs:
retrain_costs[i][j] = row.model_delete_costs[j]
# compute average no. rertains and retrain costs for each depths
n_retrains_mean = np.mean(n_retrains, axis=0)
retrain_costs_mean = np.mean(retrain_costs, axis=0)
# build results
n_retrains_result = {k: v for k, v in zip(range(n_retrains_mean.shape[0]), n_retrains_mean)}
retrain_costs_result = {k: v for k, v in zip(range(retrain_costs_mean.shape[0]), retrain_costs_mean)}
return n_retrains_result, retrain_costs_result
def process_results(df):
"""
Processes utility differences, retrains, and averages the results
over different random states.
"""
setting_cols = ['dataset', 'criterion', 'n_estimators', 'max_depth',
'topd', 'k', 'subsample_size']
keep_cols = ['naive_avg_delete_time',
'naive_n_deleted',
'model_n_deleted',
'model_train_%_deleted',
'model_n_nodes_avg',
'model_n_random_nodes_avg',
'model_n_greedy_nodes_avg']
# result containers
main_result_list = []
n_retrain_result_list = []
retrain_cost_result_list = []
# loop through each experiment setting
i = 0
for tup, gf in tqdm(df.groupby(setting_cols)):
# create main result
main_result = {k: v for k, v in zip(setting_cols, tup)}
main_result['id'] = i
main_result.update(process_utility(gf))
for c in keep_cols:
main_result[c] = gf[c].mean()
main_result['{}_std'.format(c)] = gf[c].std()
main_result_list.append(main_result)
# process retrain results
n_retrain_result, retrain_cost_result = process_retrains(gf)
# create no. retrain result
n_retrain_result['id'] = i
n_retrain_result_list.append(n_retrain_result)
# create retrain cost result
retrain_cost_result['id'] = i
retrain_cost_result_list.append(retrain_cost_result)
i += 1
# compile results
main_df = pd.DataFrame(main_result_list)
n_retrain_df = pd.DataFrame(n_retrain_result_list)
retrain_cost_df = pd.DataFrame(retrain_cost_result_list)
return main_df, n_retrain_df, retrain_cost_df
def create_csv(args, out_dir, logger):
logger.info('\nGathering results...')
experiment_settings = list(product(*[args.dataset, args.criterion, args.rs,
args.topd, args.k, args.subsample_size]))
# cedar_settings = list(product(*[args.epsilon, args.lmbda]))
results = []
for dataset, criterion, rs, topd, k, sub_size in tqdm(experiment_settings):
template = {'dataset': dataset,
'criterion': criterion,
'rs': rs,
'topd': topd,
'k': k,
'subsample_size': sub_size}
experiment_dir = os.path.join(args.in_dir,
dataset,
criterion,
'rs_{}'.format(rs),
'topd_{}'.format(topd),
'k_{}'.format(k),
'sub_{}'.format(sub_size))
# skip empty experiments
if not os.path.exists(experiment_dir):
continue
# add results to result dict
result = get_result(template, experiment_dir)
if result is not None:
results.append(result)
# display more columns
pd.set_option('display.max_columns', 100)
pd.set_option('display.width', 180)
# collect raw results
df = pd.DataFrame(results)
logger.info('\nRaw results:\n{}'.format(df))
# process results
logger.info('\nProcessing results...')
main_df, n_retrain_df, retrain_cost_df = process_results(df)
logger.info('\nProcessed results:\n{}'.format(main_df))
logger.info('\nNo. retrain results:\n{}'.format(n_retrain_df))
logger.info('\nRetrain cost results:\n{}'.format(retrain_cost_df))
# create filepaths
main_fp = os.path.join(out_dir, 'results.csv')
n_retrain_fp = os.path.join(out_dir, 'n_retrain.csv')
retrain_cost_fp = os.path.join(out_dir, 'retrain_cost.csv')
# save processed results
main_df.to_csv(main_fp, index=None)
n_retrain_df.to_csv(n_retrain_fp, index=None)
retrain_cost_df.to_csv(retrain_cost_fp, index=None)
def main(args):
out_dir = os.path.join(args.out_dir)
# create logger
os.makedirs(out_dir, exist_ok=True)
logger = print_util.get_logger(os.path.join(out_dir, 'log.txt'))
logger.info(args)
logger.info(datetime.now())
create_csv(args, out_dir, logger)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# I/O settings
parser.add_argument('--in_dir', type=str, default='output', help='input directory.')
parser.add_argument('--out_dir', type=str, default='output/delete/csv/', help='output directory.')
# experiment settings
parser.add_argument('--dataset', type=str, nargs='+',
default=['surgical', 'vaccine', 'adult', 'bank_marketing', 'flight_delays', 'diabetes',
'census', 'credit_card', 'no_show', 'olympics', 'twitter', 'synthetic',
'higgs', 'ctr'], help='dataset.')
parser.add_argument('--criterion', type=str, nargs='+', default=['gini', 'entropy'], help='criterion.')
parser.add_argument('--rs', type=int, nargs='+', default=[1, 2, 3, 4, 5], help='random state.')
parser.add_argument('--subsample_size', type=int, nargs='+', default=[1, 1000], help='subsampling size.')
# hyperparameter settings
parser.add_argument('--topd', type=int, nargs='+', default=list(range(21)), help='top d.')
parser.add_argument('--k', type=int, nargs='+', default=[1, 5, 10, 25, 50, 100], help='no. thresholds.')
args = parser.parse_args()
main(args)
```
#### File: scripts/tests/split_score.py
```python
import os
import sys
import time
import argparse
from datetime import datetime
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from tqdm import tqdm
here = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, here + '/../../')
sys.path.insert(0, here + '/../')
from utility import data_util
from utility import print_util
class Threshold:
def __init__(self, v, n, n_pos, n_left, n_left_pos):
self.v = v
self.n = n
self.n_pos = n_pos
self.n_left = n_left
self.n_left_pos = n_left_pos
def get_thresholds(x_arr, y_arr):
"""
Find all candidate threshold values and return a list of Threshold objects.
"""
# sort values and labels
indices = np.argsort(x_arr)
x = x_arr[indices]
y = y_arr[indices]
# get unique values
vals = np.unique(x)
# compute node statistics
n = len(x)
n_pos = np.sum(y)
# return variable
thresholds = []
# find each valid threshold between every adjacent pair of attribute values
for i in tqdm(range(1, vals.shape[0])):
v1 = vals[i-1]
v2 = vals[i]
v1_indices = np.where(x == v1)
v2_indices = np.where(x == v2)
v1_ratio = np.sum(y[v1_indices]) / len(v1_indices)
v2_ratio = np.sum(y[v2_indices]) / len(v2_indices)
valid = (v1_ratio != v2_ratio) or (v1_ratio > 0 and v2_ratio < 1.0)
if valid:
left_indices = np.where(x <= v1)[0]
n_left = len(left_indices)
n_left_pos = np.sum(y[left_indices])
T = Threshold(v=v1, n=n, n_pos=n_pos, n_left=n_left, n_left_pos=n_left_pos)
thresholds.append(T)
return thresholds
def compute_scores(C):
"""
Compute split criterion for each valid threshold.
"""
results = []
# compute score for each threshold
for T in tqdm(C):
score = compute_gini_index(T)
results.append((T, score))
return results
def compute_gini_index(T):
"""
Compute Gini index for this threshold.
"""
# get statistics to compute Gini index
n = T.n
n_pos = T.n_pos
n_left = T.n_left
n_left_pos = T.n_left_pos
n_right = n - n_left
n_right_pos = n_pos - n_left_pos
if n_left > 0:
weight = n_left / n
pos_prob = n_left_pos / n_left
neg_prob = 1 - pos_prob
index = 1 - (pos_prob * pos_prob) - (neg_prob * neg_prob)
left_weighted_index = weight * index
if n_right > 0:
weight = n_right / n
pos_prob = n_right_pos / n_right
neg_prob = 1 - pos_prob
index = 1 - (pos_prob * pos_prob) - (neg_prob * neg_prob)
right_weighted_index = weight * index
return left_weighted_index + right_weighted_index
def main(args):
# create output directory
out_dir = os.path.join(args.out_dir, args.dataset)
os.makedirs(out_dir, exist_ok=True)
# create logger
logger_fp = os.path.join(out_dir, 'log.txt')
logger = print_util.get_logger(logger_fp)
logger.info('{}'.format(args))
logger.info('\ntimestamp: {}'.format(datetime.now()))
# get dataset
X_train, X_test, y_train, y_test = data_util.get_data(args.dataset, args.data_dir)
logger.info('X_train.shape: {}'.format(X_train.shape))
# collect top threshold scores
top_scores = []
# get best threshold(s) for each feature
for i in range(X_train.shape[1]):
vals = np.unique(X_train[:, i])
C = get_thresholds(X_train[:, i], y_train)
S = compute_scores(C)
logger.info('\n[FEATURE {}] no. unique: {:,}, no. valid thresholds: {:,}'.format(i, len(vals), len(C)))
# sort thresholds based on score
S = sorted(S, key=lambda x: x[1])
# display split score for each threshold
for T, s in S[:args.k]:
logger.info(' threshold value: {:.5f}, score: {:.5f}'.format(T.v, s))
top_scores.append(s)
# plot distribution of top threshold scores
ax = sns.distplot(top_scores, rug=True, hist=False)
ax.set_title('{}: Scores for Top {} Threshold(s) / Feature'.format(args.dataset.title(), args.k))
ax.set_xlabel('Gini index')
ax.set_ylabel('Density')
plt.savefig(os.path.join(out_dir, 'k_{}.pdf'.format(args.k)), bbox_inches='tight')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# I/O settings
parser.add_argument('--data_dir', type=str, default='data', help='data directory.')
parser.add_argument('--out_dir', type=str, default='output/split_score/', help='output directory.')
parser.add_argument('--dataset', type=str, default='surgical', help='dataset to use for the experiment.')
parser.add_argument('--k', type=int, default=1, help='no. top thresholds to analyze.')
args = parser.parse_args()
main(args)
``` |
{
"source": "jjbrophy47/tree_influence",
"score": 3
} |
#### File: scripts/config/exp_args.py
```python
import configargparse
def get_general_args(cmd=None):
"""
Create an ArgParser object and add general arguments to it.
Return ArgParser object.
"""
if cmd is None:
cmd = configargparse.ArgParser(config_file_parser_class=configargparse.YAMLConfigFileParser)
cmd.add('--data_dir', type=str, default='data/')
cmd.add('--dataset', type=str, default='surgical')
cmd.add('--tree_type', type=str, default='lgb')
return cmd
def get_explainer_args(cmd=None):
"""
Add arguments used by the explainers.
Input
cmd: ArgParser, object to add commandline arguments to.
Return ArgParser object.
"""
if cmd is None:
cmd = configargparse.ArgParser(config_file_parser_class=configargparse.YAMLConfigFileParser)
cmd.add('--method', type=str, default='random')
cmd.add('--leaf_inf_update_set', type=int, default=-1) # LeafInfluence
cmd.add('--leaf_inf_atol', type=int, default=1e-5) # LeafInfluence
cmd.add('--input_sim_measure', type=str, default='euclidean') # InputSim
cmd.add('--tree_sim_measure', type=str, default='dot_prod') # TreeSim
cmd.add('--tree_kernel', type=str, default='lpw') # Trex, TreeSim
cmd.add('--trex_target', type=str, default='actual') # Trex
cmd.add('--trex_lmbd', type=float, default=0.003) # Trex
cmd.add('--trex_n_epoch', type=str, default=3000) # Trex
cmd.add('--dshap_trunc_frac', type=float, default=0.25) # DShap
cmd.add('--dshap_check_every', type=int, default=100) # DShap
cmd.add('--subsample_sub_frac', type=float, default=0.7) # SubSample
cmd.add('--subsample_n_iter', type=int, default=4000) # SubSample
cmd.add('--n_jobs', type=int, default=-1) # LOO, DShap, SubSample, LeafInf, LeafRefit
cmd.add('--random_state', type=int, default=1) # DShap, LOO, Minority, Random, SubSample, Target, Trex
return cmd
# Single test example experiments
def get_influence_args():
"""
Add arguments specific to the "Influence" experiment.
Return ArgParser object.
"""
cmd = get_general_args()
cmd = get_explainer_args(cmd)
cmd.add('--out_dir', type=str, default='output/influence/')
cmd.add('--n_test', type=int, default=100)
return cmd
def get_influenceLE_args():
"""
Add arguments specific to the "InfluenceLE" experiment.
Return ArgParser object.
"""
cmd = get_general_args()
cmd = get_explainer_args(cmd)
cmd.add('--out_dir', type=str, default='output/influenceLE/')
cmd.add('--n_test', type=int, default=100)
return cmd
def get_remove_args():
"""
Add arguments specific to the "Remove" experiment.
Return ArgParser object.
"""
cmd = get_general_args()
cmd = get_explainer_args(cmd)
cmd.add('--in_dir', type=str, default='output/influence/')
cmd.add('--out_dir', type=str, default='output/remove/')
cmd.add('--n_test', type=int, default=100)
cmd.add('--remove_frac', type=float, nargs='+', default=[0.0, 0.001, 0.005, 0.01, 0.015, 0.02])
return cmd
def get_label_args():
"""
Add arguments specific to the "Label" experiment.
Return ArgParser object.
"""
cmd = get_general_args()
cmd = get_explainer_args(cmd)
cmd.add('--in_dir', type=str, default='output/influence/')
cmd.add('--out_dir', type=str, default='output/label/')
cmd.add('--n_test', type=int, default=100)
cmd.add('--edit_frac', type=float, nargs='+', default=[0.0, 0.001, 0.005, 0.01, 0.015, 0.02])
return cmd
def get_poison_args():
"""
Add arguments specific to the "Poison" experiment.
Return ArgParser object.
"""
cmd = get_general_args()
cmd = get_explainer_args(cmd)
cmd.add('--in_dir', type=str, default='output/influence/')
cmd.add('--out_dir', type=str, default='output/poison/')
cmd.add('--n_test', type=int, default=100)
cmd.add('--poison_frac', type=float, nargs='+', default=[0.0, 0.001, 0.005, 0.01, 0.015, 0.02])
return cmd
def get_counterfactual_args():
"""
Add arguments specific to the "Counterfactual" experiment.
Return ArgParser object.
"""
cmd = get_general_args()
cmd = get_explainer_args(cmd)
cmd.add('--in_dir', type=str, default='output/influence/')
cmd.add('--out_dir', type=str, default='output/counterfactual/')
cmd.add('--n_test', type=int, default=100)
cmd.add('--remove_frac', type=float, nargs='+', default=[0.0, 0.001, 0.005, 0.01, 0.015, 0.02])
cmd.add('--step_size', type=int, default=10)
return cmd
def get_resources_args():
"""
Add arguments specific to the "Resources" experiment.
Return ArgParser object.
"""
cmd = get_general_args()
cmd = get_explainer_args(cmd)
cmd.add('--out_dir', type=str, default='output/resources/')
cmd.add('--n_repeat', type=int, default=5)
cmd.add('--seed', type=int, default=-1)
return cmd
def get_structure_args():
"""
Add arguments specific to the "Structure" experiment.
Return ArgParser object.
"""
cmd = get_general_args()
cmd = get_explainer_args(cmd)
cmd.add('--in_dir', type=str, default='output/influence/')
cmd.add('--out_dir', type=str, default='output/structure/')
cmd.add('--n_test', type=int, default=100)
cmd.add('--remove_frac', type=float, nargs='+', default=[0.0, 0.001, 0.005, 0.01, 0.015, 0.02])
cmd.add('--n_remove', type=float, nargs='+', default=[1, 10, 100])
return cmd
def get_reinfluence_args():
"""
Add arguments specific to the "Reinfluence" experiment.
Return ArgParser object.
"""
cmd = get_general_args()
cmd = get_explainer_args(cmd)
cmd.add('--out_dir', type=str, default='output/reinfluence/')
cmd.add('--n_test', type=int, default=100)
cmd.add('--remove_frac', type=float, default=0.02)
cmd.add('--strategy', type=str, default='reestimate')
cmd.add('--n_early_stop', type=int, default=0)
return cmd
def get_label_edit_args():
"""
Add arguments specific to the "Label Edit" experiment.
Return ArgParser object.
"""
cmd = get_general_args()
cmd = get_explainer_args(cmd)
cmd.add('--in_dir', type=str, default='output/influence/')
cmd.add('--out_dir', type=str, default='output/label_edit/')
cmd.add('--n_test', type=int, default=100)
cmd.add('--remove_frac', type=float, nargs='+', default=[0.0, 0.001, 0.005, 0.01, 0.015, 0.02])
cmd.add('--step_size', type=int, default=10)
return cmd
def get_targeted_edit_args():
"""
Add arguments specific to the "Targeted Edit" experiment.
Return ArgParser object.
"""
cmd = get_general_args()
cmd = get_explainer_args(cmd)
cmd.add('--in_dir', type=str, default='output/influence/')
cmd.add('--out_dir', type=str, default='output/targeted_edit/')
cmd.add('--n_test', type=int, default=100)
cmd.add('--edit_frac', type=float, nargs='+',
default=[0.0, 0.002, 0.004, 0.006, 0.008, 0.01, 0.012, 0.014, 0.016, 0.018, 0.02])
return cmd
# Set of test examples experiments
def get_influence_set_args():
"""
Add arguments specific to the "Influence Set" experiment.
Return ArgParser object.
"""
cmd = get_general_args()
cmd = get_explainer_args(cmd)
cmd.add('--out_dir', type=str, default='output/influence_set/')
cmd.add('--val_frac', type=float, default=0.1)
return cmd
def get_remove_set_args():
"""
Add arguments specific to the "Remove Set" experiment.
Return ArgParser object.
"""
cmd = get_general_args()
cmd = get_explainer_args(cmd)
cmd.add('--in_dir', type=str, default='output/influence_set/')
cmd.add('--out_dir', type=str, default='output/remove_set/')
cmd.add('--val_frac', type=float, default=0.1)
cmd.add('--remove_frac', type=float, nargs='+',
default=[0.0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5])
return cmd
def get_label_set_args():
"""
Add arguments specific to the "Label Set" experiment.
Return ArgParser object.
"""
cmd = get_general_args()
cmd = get_explainer_args(cmd)
cmd.add('--in_dir', type=str, default='output/influence_set/')
cmd.add('--out_dir', type=str, default='output/label_set/')
cmd.add('--val_frac', type=float, default=0.1)
cmd.add('--edit_frac', type=float, nargs='+',
default=[0.0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5])
return cmd
def get_poison_set_args():
"""
Add arguments specific to the "Poison Set" experiment.
Return ArgParser object.
"""
cmd = get_general_args()
cmd = get_explainer_args(cmd)
cmd.add('--in_dir', type=str, default='output/influence_set/')
cmd.add('--out_dir', type=str, default='output/poison_set/')
cmd.add('--val_frac', type=float, default=0.1)
cmd.add('--poison_frac', type=float, nargs='+',
default=[0.0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5])
return cmd
def get_noise_set_args():
"""
Add arguments specific to the "Noise Set" experiment.
Return ArgParser object.
"""
cmd = get_general_args()
cmd = get_explainer_args(cmd)
cmd.add('--out_dir', type=str, default='output/noise_set/')
cmd.add('--strategy', type=str, default='test_sum')
cmd.add('--noise_frac', type=float, default=0.4)
cmd.add('--val_frac', type=float, default=0.1)
cmd.add('--check_frac', type=float, nargs='+', default=[0.0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3])
return cmd
```
#### File: scripts/config/status_args.py
```python
from . import rank_args
from . import exp_args
def get_experiments_args():
"""
Add arguments specific to the "Experiments" status script.
Return ArgParser object.
"""
cmd = rank_args.get_general_args()
cmd = exp_args.get_explainer_args(cmd)
cmd.add('--method_list', type=str, nargs='+', default=['random', 'target', 'leaf_sim', 'boostin',
'leaf_infSP', 'trex', 'subsample', 'loo', 'leaf_inf',
'leaf_refit', 'boostinW1', 'boostinW2'])
cmd.add('--exp', type=str, default='influence/')
cmd.add('--in_dir', type=str, default='results/temp_influence/')
cmd.add('--out_dir', type=str, default='output/status/')
# single-test experiment args
cmd.add('--n_test', type=int, default=100)
cmd.add('--remove_frac', type=float, nargs='+', default=[0.0, 0.001, 0.005, 0.01, 0.015, 0.02])
cmd.add('--edit_frac', type=float, nargs='+', default=[0.0, 0.001, 0.005, 0.01, 0.015, 0.02])
cmd.add('--poison_frac', type=float, nargs='+', default=[0.0, 0.001, 0.005, 0.01, 0.015, 0.02])
cmd.add('--targeted_edit_frac', type=float, nargs='+',
default=[0.0, 0.002, 0.004, 0.006, 0.008, 0.01, 0.012, 0.014, 0.016, 0.018, 0.02])
# multi-test experiment args
cmd.add('--val_frac', type=float, default=0.1)
cmd.add('--remove_frac_set', type=float, nargs='+',
default=[0.0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5])
cmd.add('--edit_frac_set', type=float, nargs='+',
default=[0.0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5])
cmd.add('--poison_frac_set', type=float, nargs='+',
default=[0.0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5])
# noise only
cmd.add('--noise_frac', type=float, default=0.4)
cmd.add('--check_frac', type=float, nargs='+', default=[0.0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3])
cmd.add('--agg_type', type=int, nargs='+', default=['self', 'test_sum'])
# other
cmd.add('--status_type', type=str, default='time')
return cmd
```
#### File: experiments/multi_test/influence_set.py
```python
import os
import sys
import time
import joblib
import argparse
import resource
from datetime import datetime
import numpy as np
from sklearn.base import clone
from sklearn.model_selection import train_test_split
here = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, here + '/../../../') # tree_influence
sys.path.insert(0, here + '/../../') # config
sys.path.insert(0, here + '/../') # util
import tree_influence
import util
from config import exp_args
from single_test.influence import get_special_case_tol
def experiment(args, logger, params, out_dir):
# initialize experiment
begin = time.time()
rng = np.random.default_rng(args.random_state)
result = {}
# data
X_train, X_test, y_train, y_test, objective = util.get_data(args.data_dir, args.dataset)
# use a fraction of the test data for validation
stratify = None if objective == 'regression' else y_test
test_indices = np.arange(y_test.shape[0])
X_val, X_test, y_val, y_test, val_idxs, test_idxs = train_test_split(X_test, y_test, test_indices,
test_size=1.0 - args.val_frac,
stratify=stratify,
random_state=args.random_state)
# display dataset statistics
logger.info(f'\nno. train: {X_train.shape[0]:,}')
logger.info(f'no. val.: {X_val.shape[0]:,}')
logger.info(f'no. test: {X_test.shape[0]:,}')
logger.info(f'no. features: {X_train.shape[1]:,}\n')
# train tree-ensemble
hp = util.get_hyperparams(tree_type=args.tree_type, dataset=args.dataset)
tree = util.get_model(tree_type=args.tree_type, objective=objective, random_state=args.random_state)
tree.set_params(**hp)
tree = tree.fit(X_train, y_train)
res_clean = util.eval_pred(objective, tree, X_test, y_test, logger, prefix='Test (clean)')
# fit explainer
start = time.time()
explainer = tree_influence.TreeExplainer(args.method, params, logger).fit(tree, X_train, y_train)
fit_time = time.time() - start - explainer.parse_time_
logger.info(f'\n[INFO] explainer fit time: {fit_time:.5f}s\n')
# compute influence
start2 = time.time()
# aggregate local influences
local_influence = explainer.get_local_influence(X_val, y_val) # shape=(no. test,)
influence = np.sum(local_influence, axis=1) # shape=(no. train,)
inf_time = time.time() - start2
logger.info(f'[INFO] influence time: {inf_time:.5f}s\n')
total_time = time.time() - begin
logger.info(f'\n[INFO] total time: {total_time:.3f}s')
# save results
result['influence'] = influence
result['val_idxs'] = val_idxs
result['test_idxs'] = test_idxs
result['max_rss_MB'] = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1e6 # MB if OSX, GB if Linux
result['fit_time'] = fit_time
result['inf_time'] = inf_time
result['total_time'] = total_time
result['tree_params'] = tree.get_params()
logger.info('\nResults:\n{}'.format(result))
logger.info('\nsaving results to {}...'.format(os.path.join(out_dir, 'results.npy')))
np.save(os.path.join(out_dir, 'results.npy'), result)
def main(args):
# get unique hash for this experiment setting
exp_dict = {'val_frac': args.val_frac}
exp_hash = util.dict_to_hash(exp_dict)
# get unique hash for the explainer
args.leaf_inf_atol = get_special_case_tol(args.dataset, args.tree_type, args.method, args.leaf_inf_atol)
params, hash_str = util.explainer_params_to_dict(args.method, vars(args))
# create output dir
out_dir = os.path.join(args.out_dir,
args.dataset,
args.tree_type,
f'exp_{exp_hash}',
f'{args.method}_{hash_str}')
# create output directory and clear previous contents
os.makedirs(out_dir, exist_ok=True)
util.clear_dir(out_dir)
logger = util.get_logger(os.path.join(out_dir, 'log.txt'))
logger.info(args)
logger.info(f'\ntimestamp: {datetime.now()}')
experiment(args, logger, params, out_dir)
# clean up
util.remove_logger(logger)
if __name__ == '__main__':
main(exp_args.get_influence_set_args().parse_args())
```
#### File: experiments/unused/compress.py
```python
import os
import sys
import time
import joblib
import argparse
import resource
from datetime import datetime
import numpy as np
from sklearn.base import clone
here = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, here + '/../../')
import intent
import util
from influence import select_elements
def get_pred(objective, model, X, y):
"""
Return predictions of shape=(X.shape[0], no. class).
"""
if objective == 'regression':
pred = model.predict(X)
elif objective == 'binary':
pred = model.predict_proba(X)[:, 1]
elif objective == 'multiclass':
pred = model.predict_proba(X)
else:
raise ValueError(f'objective {objective} unknown!')
return pred
def get_ranking(inf_obj, method, params, agg, tree, X_train, y_train, X, y, logger=None):
"""
Return influence values.
"""
# fit explainer
start = time.time()
explainer = intent.TreeExplainer(method, params, logger).fit(tree, X_train, y_train)
fit_time = time.time() - start - explainer.parse_time_
if logger:
logger.info(f'\n[INFO] explainer fit time: {fit_time:.5f}s')
# compute infu
start2 = time.time()
if inf_obj == 'local':
influence = explainer.get_local_influence(X, y)
if agg == 'sum':
influence = np.sum(influence, axis=1) # shape=(no. train,)
elif agg == 'abs_sum':
influence = np.sum(np.abs(influence), axis=1) # shape=(no. train,)
elif agg == 'mean':
influence = np.mean(influence, axis=1) # shape=(no. train,)
else:
assert agg == 'abs_mean'
influence = np.mean(np.abs(influence), axis=1) # shape=(no. train,)
else:
assert inf_obj == 'global'
influence = explainer.get_global_influence()
inf_time = time.time() - start2
if logger:
logger.info(f'[INFO] explainer influence time: {inf_time:.5f}s')
ranking = np.argsort(np.abs(influence))
return ranking
def remove_and_evaluate(args, objective, params, tree, X_train, y_train, X_test, y_test, test_idxs, logger):
# initial predictions
pred = get_pred(objective, tree, X_test, y_test)
# get list of remove fractions
remove_frac_arr = np.linspace(0, args.remove_frac, args.n_ckpt + 1)
n_remove = int(args.remove_frac * X_train.shape[0] / args.n_ckpt)
# # result container
# result = {}
# result['remove_frac'] = remove_frac_arr
# result['loss'] = np.full(remove_frac_arr.shape[0], np.nan, dtype=np.float32)
# result['pred'] = []
new_X_train = X_train.copy()
new_y_train = y_train.copy()
new_tree = clone(tree).fit(new_X_train, new_y_train)
for i in range(1, args.n_ckpt):
ranking = get_ranking(args.inf_obj, args.method, params, args.agg, new_tree,
new_X_train, new_y_train, X_test[test_idxs], y_test[test_idxs], logger=None)
new_X_train = np.delete(new_X_train, ranking[:n_remove], axis=0)
new_y_train = np.delete(new_y_train, ranking[:n_remove])
if objective == 'binary' and len(np.unique(new_y_train)) == 1:
logger.info('Only samples from one class remain!')
break
elif objective == 'multiclass' and len(np.unique(new_y_train)) < len(np.unique(y_train)):
logger.info('At least 1 sample is not present for all classes!')
break
else:
new_tree = clone(tree).fit(new_X_train, new_y_train)
new_pred = get_pred(objective, new_tree, X_test, y_test)
diff_pred = np.abs(pred - new_pred)
diff_max = np.max(diff_pred)
diff_min = np.min(diff_pred)
diff_avg = np.mean(diff_pred)
diff_median = np.median(diff_pred)
diff_std = np.std(diff_pred)
diff_n_delta = len(np.where(diff_pred > args.delta)[0])
logger.info(f"[{i:>5}: {remove_frac_arr[i] * 100:>5.2f}%] "
f"max.: {diff_max:>5.3f},\tno. > {args.delta}: {diff_n_delta:>10,},\t"
f"min.: {diff_min:>5.3f},\t"
f"avg.: {diff_avg:>5.3f},\tmedian: {diff_median:>5.3f},\t"
f"s.d.: {diff_std:>5.3f}")
# return result
def experiment(args, logger, params, out_dir):
# initialize experiment
begin = time.time()
rng = np.random.default_rng(args.random_state)
result = {}
# data
X_train, X_test, y_train, y_test, objective = util.get_data(args.data_dir, args.dataset)
logger.info(f'\nno. train: {X_train.shape[0]:,}')
logger.info(f'no. test: {X_test.shape[0]:,}')
logger.info(f'no. features: {X_train.shape[1]:,}\n')
# train tree-ensemble
hp = util.get_hyperparams(tree_type=args.tree_type, dataset=args.dataset)
tree = util.get_model(tree_type=args.tree_type, objective=objective, random_state=args.random_state)
tree.set_params(**hp)
tree = tree.fit(X_train, y_train)
util.eval_pred(objective, tree, X_test, y_test, logger, prefix='Test')
# randomly select test instances to compute influence values for
avail_idxs = np.arange(X_test.shape[0])
n_test = min(args.n_test, len(avail_idxs))
test_idxs = select_elements(avail_idxs, rng, n=n_test)
# ranking = get_ranking(args.inf_obj, args.method, params, agg, tree,
# X_train, y_train, X_test[test_idxs], y_test[test_idxs], logger=logger)
# # fit explainer
# start = time.time()
# explainer = intent.TreeExplainer(args.method, params, logger).fit(tree, X_train, y_train)
# fit_time = time.time() - start - explainer.parse_time_
# logger.info(f'\n[INFO] explainer fit time: {fit_time:.5f}s')
# # compute influence
# start2 = time.time()
# if args.inf_obj == 'local':
# influence = explainer.get_local_influence(X_test[test_idxs], y_test[test_idxs])
# if args.agg == 'sum':
# influence = np.sum(influence, axis=1) # shape=(no. train,)
# elif args.agg == 'abs_sum':
# influence = np.sum(np.abs(influence), axis=1) # shape=(no. train,)
# elif args.agg == 'mean':
# influence = np.mean(influence, axis=1) # shape=(no. train,)
# else:
# assert args.agg == 'abs_mean'
# influence = np.mean(np.abs(influence), axis=1) # shape=(no. train,)
# else:
# assert args.inf_obj == 'global'
# influence = explainer.get_global_influence()
# inf_time = time.time() - start2
# logger.info(f'[INFO] explainer influence time: {inf_time:.5f}s')
# logger.info(f'[INFO] total time: {time.time() - begin:.5f}s')
# # get ranking
# ranking = np.argsort(np.abs(influence)) # least to most influential, shape=(no. train,)
remove_and_evaluate(args, objective, params, tree, X_train, y_train, X_test, y_test, test_idxs, logger)
# combine results from each test example
result['remove_frac'] = res_list[0]['remove_frac'] # shape=(no. ckpts,)
result['loss'] = np.vstack([res['loss'] for res in res_list]) # shape=(no. test, no. ckpts)
result['pred'] = [res['pred'] for res in res_list] # shape=(no. test, no. completed ckpts, no class)
# store ALL train and test predictions
if objective == 'regression':
y_train_pred = tree.predict(X_train).reshape(-1, 1)
y_test_pred = tree.predict(X_test).reshape(-1, 1)
elif objective == 'binary':
y_train_pred = tree.predict_proba(X_train)[:, 1].reshape(-1, 1)
y_test_pred = tree.predict_proba(X_test)[:, 1].reshape(-1, 1)
else:
assert objective == 'multiclass'
y_train_pred = tree.predict_proba(X_train)
y_test_pred = tree.predict_proba(X_test)
# save results
result['influence'] = influence
result['ranking'] = ranking
result['test_idxs'] = test_idxs
result['y_train_pred'] = y_train_pred
result['y_test_pred'] = y_test_pred
result['max_rss_MB'] = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1e6 # MB if OSX, GB if Linux
result['fit_time'] = fit_time
result['inf_time'] = inf_time
result['total_time'] = time.time() - begin
result['tree_params'] = tree.get_params()
result['n_jobs'] = n_jobs
logger.info('\nResults:\n{}'.format(result))
logger.info('\nsaving results to {}...'.format(os.path.join(out_dir, 'results.npy')))
np.save(os.path.join(out_dir, 'results.npy'), result)
def main(args):
# get unique hash for this experiment setting
exp_dict = {'inf_obj': args.inf_obj, 'n_test': args.n_test,
'remove_frac': args.remove_frac, 'n_ckpt': args.n_ckpt}
exp_hash = util.dict_to_hash(exp_dict)
# get unique hash for the explainer
params, hash_str = util.explainer_params_to_dict(args.method, vars(args))
# special cases
if args.method == 'leaf_influence':
if args.dataset == 'flight_delays':
params['atol'] = 1e-1
# create output dir
out_dir = os.path.join(args.out_dir,
args.dataset,
args.tree_type,
f'exp_{exp_hash}',
f'{args.method}_{hash_str}')
# create output directory and clear previous contents
os.makedirs(out_dir, exist_ok=True)
util.clear_dir(out_dir)
logger = util.get_logger(os.path.join(out_dir, 'log.txt'))
logger.info(args)
logger.info(f'\ntimestamp: {datetime.now()}')
experiment(args, logger, params, out_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# I/O settings
parser.add_argument('--data_dir', type=str, default='data/')
parser.add_argument('--out_dir', type=str, default='output/compress/')
# Experiment settings
parser.add_argument('--dataset', type=str, default='surgical')
parser.add_argument('--tree_type', type=str, default='lgb')
parser.add_argument('--inf_obj', type=str, default='local')
parser.add_argument('--n_test', type=int, default=100) # local
parser.add_argument('--remove_frac', type=float, default=0.95)
parser.add_argument('--n_ckpt', type=int, default=95)
parser.add_argument('--delta', type=float, default=0.1)
parser.add_argument('--agg', type=str, default='abs_sum')
# Explainer settings
parser.add_argument('--method', type=str, default='random')
parser.add_argument('--leaf_scale', type=float, default=-1.0) # BoostIn
parser.add_argument('--local_op', type=str, default='normal') # BoostIn
parser.add_argument('--update_set', type=int, default=0) # LeafInfluence
parser.add_argument('--similarity', type=str, default='dot_prod') # Similarity
parser.add_argument('--kernel', type=str, default='lpw') # Trex & similarity
parser.add_argument('--target', type=str, default='actual') # Trex
parser.add_argument('--lmbd', type=float, default=0.003) # Trex
parser.add_argument('--n_epoch', type=str, default=3000) # Trex
parser.add_argument('--trunc_frac', type=float, default=0.25) # DShap
parser.add_argument('--check_every', type=int, default=100) # DShap
parser.add_argument('--n_jobs', type=int, default=-1) # LOO and DShap
parser.add_argument('--random_state', type=int, default=1) # Trex, DShap, random
parser.add_argument('--global_op', type=str, default='self') # Trex, loo, DShap
args = parser.parse_args()
main(args)
```
#### File: scripts/postprocess/leaf_analysis.py
```python
import os
import sys
import time
import hashlib
import argparse
import resource
from datetime import datetime
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.stats import sem
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import log_loss
here = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, here + '/../')
sys.path.insert(0, here + '/../../')
import intent
import util
from experiments import util as exp_util
def experiment(args, logger, out_dir):
rng = np.random.default_rng(args.random_state)
# initialize experiment
begin = time.time()
# get dataset
X_train, X_test, y_train, y_test, objective = exp_util.get_data(args.data_dir, args.dataset)
# get results
inf_results = util.get_results(args, args.in_dir, logger)
inf_results = util.filter_results(inf_results, args.skip)
color, line, label = util.get_plot_dicts()
assert objective == 'binary'
# extract test examples
_, res = inf_results[0]
test_idxs = res['test_idxs']
test_proba = res['y_test_pred'][:, 0]
test_pred = np.where(test_proba <= 0.5, 0, 1)
y_test = y_test[test_idxs]
test_proba = test_proba[test_idxs]
test_pred = test_pred[test_idxs]
# no. correct/incorrect predictions
acc = accuracy_score(y_test, test_pred)
auc = roc_auc_score(y_test, test_proba)
logloss = log_loss(y_test, test_proba)
logger.info(f'\nAcc.: {acc:.3f}, AUC: {auc:.3f}, log loss: {logloss:.3f}')
# no. pos.
pos = np.where(y_test == 1)[0]
acc = accuracy_score(y_test[pos], test_pred[pos])
logloss = log_loss(y_test[pos], test_proba[pos], labels=[0, 1])
logger.info(f'No. pos.: {len(pos):,}, acc.: {acc:.3f}, log loss: {logloss:.3f}')
# no neg.
neg = np.where(y_test == 0)[0]
acc = accuracy_score(y_test[neg], test_pred[neg])
logloss = log_loss(y_test[neg], test_proba[neg], labels=[0, 1])
logger.info(f'No. neg.: {len(neg):,}, acc.: {acc:.3f}, log loss: {logloss:.3f}')
# plot arrived at leaf weights for 1+ test examples
logger.info('\nLeaf Analysis')
hp = exp_util.get_hyperparams(tree_type=args.tree_type, dataset=args.dataset)
tree = exp_util.get_model(tree_type=args.tree_type, objective=objective, random_state=args.random_state)
tree.set_params(**hp)
tree = tree.fit(X_train, y_train)
explainer = intent.TreeExplainer('boostin', {}, logger).fit(tree, X_train, y_train)
test_leaves = explainer.model_.apply(X_test) # shape=(no. test, no. boost, no. class)
test_weights = explainer._get_leaf_weights(test_leaves)[test_idxs] # shape=(no. test, no. boost, no. class)
# pick test examples
fig, axs = plt.subplots(3, 3, figsize=(16, 12), sharey=False)
axs = axs.flatten()
avail_idxs = pos if args.test == 'pos' else neg
for i, selected_idx in enumerate(rng.choice(len(avail_idxs), size=9, replace=False)):
test_idx = avail_idxs[selected_idx]
logger.info(f'[No. {i:,}, Test {test_idx}]')
test_weight = test_weights[test_idx].flatten() # flatten across boosts/classes
ax = axs[i]
sns.barplot(x=np.arange(len(test_weight)), y=test_weight, ax=ax)
ax.set_xticklabels(ax.get_xticklabels(), fontsize=1)
ax.set_title(f'Test No. {test_idx}, pred.: {test_proba[test_idx]:.3f}, target: {y_test[test_idx]}')
if i in [0, 3, 6]:
ax.set_ylabel('Leaf weight (1 / no. train at that leaf)')
if i in [6, 7, 8]:
ax.set_xlabel('Tree index')
if args.scale == 'log':
ax.set_yscale('log')
plt_dir = os.path.join(args.out_dir, args.inf_obj, args.dataset)
suffix = ''
os.makedirs(plt_dir, exist_ok=True)
fp = os.path.join(plt_dir, f'{args.test}{args.scale}')
logger.info(f'\nsaving plots to {fp + suffix + ".pdf"}...')
plt.tight_layout()
plt.savefig(fp + suffix + '.pdf', bbox_inches='tight')
def main(args):
# get method params and unique settings hash
_, hash_str = exp_util.explainer_params_to_dict(args.method, vars(args))
# create output dir
out_dir = os.path.join(args.out_dir)
# create output directory and clear previous contents
os.makedirs(out_dir, exist_ok=True)
logger = exp_util.get_logger(os.path.join(out_dir, 'log.txt'))
logger.info(args)
logger.info(datetime.now())
experiment(args, logger, out_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# I/O settings
parser.add_argument('--data_dir', type=str, default='data/')
parser.add_argument('--in_dir', type=str, default='/Volumes/30/intent/temp_influence/')
parser.add_argument('--out_dir', type=str, default='output/plot/leaf_analysis/')
# Data settings
parser.add_argument('--dataset', type=str, default='surgical')
# Tree-ensemble settings
parser.add_argument('--tree_type', type=str, default='lgb')
# Method settings
parser.add_argument('--method', type=str, nargs='+',
default=['random', 'target', 'boostin', 'trex', 'similarity',
'leaf_influence', 'loo', 'dshap'])
parser.add_argument('--skip', type=str, nargs='+',
default=['minority', 'loss', 'boostin_9e', 'boostin_08'])
parser.add_argument('--use_leaf', type=int, nargs='+', default=[1, 0]) # BoostIn
parser.add_argument('--local_op', type=str, nargs='+', default=['normal', 'sign', 'sim']) # BoostIn
parser.add_argument('--update_set', type=int, nargs='+', default=[-1, 0]) # LeafInfluence
parser.add_argument('--similarity', type=str, nargs='+', default=['dot_prod']) # Similarity
parser.add_argument('--kernel', type=str, nargs='+', default=['lpw']) # Trex & Similarity
parser.add_argument('--target', type=str, nargs='+', default=['actual']) # Trex
parser.add_argument('--lmbd', type=float, nargs='+', default=[0.003]) # Trex
parser.add_argument('--n_epoch', type=str, nargs='+', default=[3000]) # Trex
parser.add_argument('--trunc_frac', type=float, nargs='+', default=[0.25]) # DShap
parser.add_argument('--check_every', type=int, nargs='+', default=[100]) # DShap
parser.add_argument('--global_op', type=str, nargs='+', default=['self', 'expected']) # TREX, LOO, DShap
parser.add_argument('--n_jobs', type=int, default=-1) # LOO and DShap
parser.add_argument('--random_state', type=int, default=1) # Trex, DShap, random
# Experiment settings
parser.add_argument('--inf_obj', type=str, default='local')
parser.add_argument('--test', type=str, default='pos')
parser.add_argument('--scale', type=str, default='')
args = parser.parse_args()
main(args)
```
#### File: scripts/postprocess/remove.py
```python
import os
import sys
import argparse
from datetime import datetime
from itertools import product
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import sem
from tqdm import tqdm
here = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, here + '/../')
import util
from experiments import util as exp_util
from config import post_args
def process(args, exp_hash, out_dir, logger):
color, line, label, marker = util.get_plot_dicts(markers=True)
n_test = None
# get dataset
X_train, X_test, y_train, y_test, objective = exp_util.get_data(args.data_dir, args.dataset)
# get results
exp_dir = os.path.join(args.in_dir,
args.dataset,
args.tree_type,
f'exp_{exp_hash}')
results = util.get_results(args, exp_dir, logger)
results = util.filter_results(results, args.skip)
results_dict = {label[method]: (method, res) for method, res in results}
order = ['BoostIn', 'LeafInfSP', 'TREX', 'TreeSim', 'LeafRefit',
'LeafInfluence', 'SubSample', 'LOO', 'Random', 'RandomSL']
util.plot_settings(fontsize=23)
fig, ax = plt.subplots()
for i, key in enumerate(order):
if key not in results_dict:
continue
method, res = results_dict[key]
# sanity check
if i == 0:
n_test = res['loss'].shape[0]
else:
temp = res['loss'].shape[0]
assert n_test == temp, f'Inconsistent no. test: {temp:,} != {n_test:,}'
# plot loss
x = res['remove_frac'] * 100
y = res['loss'].mean(axis=0)
y_err = sem(res['loss'], axis=0)
y_err = y_err if args.std_err else None
ax.errorbar(x, y, yerr=y_err, label=label[method], color=color[method],
linestyle=line[method], marker=marker[method], alpha=0.75)
ax.set_xlabel('Train data removed (%)')
ax.set_ylabel(f'Average test loss')
if args.legend:
ax.legend(fontsize=10)
plt.tight_layout()
plt.savefig(os.path.join(out_dir, f'{args.dataset}.pdf'), bbox_inches='tight')
logger.info(f'\nSaving results to {out_dir}/...')
def main(args):
exp_dict = {'n_test': args.n_test, 'remove_frac': args.remove_frac}
exp_hash = exp_util.dict_to_hash(exp_dict)
out_dir = os.path.join(args.out_dir, args.tree_type, f'exp_{exp_hash}', 'postprocess')
log_dir = os.path.join(out_dir, 'logs')
# create logger
os.makedirs(out_dir, exist_ok=True)
os.makedirs(log_dir, exist_ok=True)
logger = exp_util.get_logger(os.path.join(log_dir, f'{args.dataset}.txt'))
logger.info(args)
logger.info(datetime.now())
process(args, exp_hash, out_dir, logger)
if __name__ == '__main__':
main(post_args.get_remove_args().parse_args())
```
#### File: scripts/rank/poison.py
```python
import os
import sys
import time
import argparse
from datetime import datetime
from itertools import product
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.stats import sem
from scipy.stats import gmean
from tqdm import tqdm
here = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, here + '/../')
from postprocess import util as pp_util
from experiments import util as exp_util
from config import rank_args
from remove import get_mean_df
def process(args, exp_hash, out_dir, logger):
begin = time.time()
color, line, label = pp_util.get_plot_dicts()
df_list = []
df_li_list = []
df_time_list = []
df_mem_list = []
for tree_type in args.tree_type:
in_dir = os.path.join(args.in_dir,
tree_type,
f'exp_{exp_hash}',
'summary')
# get resource usage
ckpt_dir = os.path.join(in_dir, f'ckpt_{args.ckpt[0]}')
fp_time = os.path.join(ckpt_dir, 'runtime.csv')
fp_mem = os.path.join(ckpt_dir, 'mem.csv')
assert os.path.exists(fp_time), f'{fp_time} does not exist!'
assert os.path.exists(fp_mem), f'{fp_mem} does not exist!'
df_time_list.append(pd.read_csv(fp_time))
df_mem_list.append(pd.read_csv(fp_mem))
# get loss
for ckpt in args.ckpt:
ckpt_dir = os.path.join(in_dir, f'ckpt_{ckpt}')
fp = os.path.join(ckpt_dir, 'loss_rank.csv')
fp_li = os.path.join(ckpt_dir, 'loss_rank_li.csv')
assert os.path.exists(fp), f'{fp} does not exist!'
assert os.path.exists(fp_li), f'{fp_li} does not exist!'
df_list.append(pd.read_csv(fp))
df_li_list.append(pd.read_csv(fp_li))
df_all = pd.concat(df_list)
df_li_all = pd.concat(df_li_list)
df_time_all = pd.concat(df_time_list)
df_mem_all = pd.concat(df_mem_list)
# average ranks among different checkpoints and/or tree types
group_cols = ['dataset']
df_all = df_all.groupby(group_cols).mean().reset_index()
df_li_all = df_li_all.groupby(group_cols).mean().reset_index()
df_time_all = df_time_all.groupby(group_cols).mean().reset_index()
df_mem_all = df_mem_all.groupby(group_cols).mean().reset_index()
# compute average ranks
skip_cols = ['dataset', 'tree_type', 'poison_frac']
df = get_mean_df(df_all, skip_cols=skip_cols, sort='ascending')
df_li = get_mean_df(df_li_all, skip_cols=skip_cols, sort='ascending')
logger.info(f'\nLoss:\n{df}')
logger.info(f'\nLoss (li):\n{df_li}')
# # combine dataframes
# index = df_li.index
# df = df_li.reset_index().merge(df.reset_index(), on='index', how='left')
# means_df = df[['index', 'mean_x', 'mean_y']].copy()
# sems_df = df[['index', 'sem_x', 'sem_y']].copy()
# # rename and clean up
# means_df.index = means_df['index']
# sems_df.index = means_df['index']
# del means_df['index']
# del sems_df['index']
# means_df.columns = ['Subgroup A', 'All datasets']
# sems_df.columns = ['Subgroup A', 'All datasets']
# print(means_df)
# print(sems_df)
# # plot
# fig, ax = plt.subplots(figsize=(4, 4))
# means_df.plot.bar(yerr=sems_df, ax=ax, rot=45,
# title=f'Loss ({len(means_df)} datasets)', capsize=3,
# ylabel='Avg. rank', xlabel='Method')
# plot
n_datasets = len(df_all['dataset'].unique())
n_li_datasets = len(df_li_all['dataset'].unique())
label_dict = {'LeafInfluence': 'LeafInf.', 'SubSample': 'SubS.', 'Target': 'RandomSL'}
df = df.rename(columns={'mean': 'All datasets'}, index=label_dict)
df_li = df_li.rename(columns={'mean': 'SDS'}, index=label_dict)
# reorder methods
order = ['BoostIn', 'LeafInfSP', 'TreeSim', 'TREX', 'SubS.', 'LOO', 'RandomSL', 'Random']
order_li = ['LeafRefit', 'LeafInf.', 'BoostIn', 'LeafInfSP', 'TreeSim', 'TREX', 'SubS.', 'LOO',
'RandomSL', 'Random']
# order_li = ['BoostIn', 'LeafInfSP', 'TreeSim', 'TREX', 'SubS.', 'LOO', 'RandomSL', 'Random',
# 'LeafRefit', 'LeafInf.']
df = df.reindex(order)
df_li = df_li.reindex(order_li)
labels = [c if i % 2 != 0 else f'\n{c}' for i, c in enumerate(df.index)]
labels_li = [c if i % 2 != 0 else f'\n{c}' for i, c in enumerate(df_li.index)]
pp_util.plot_settings(fontsize=28)
width = 22
height = pp_util.get_height(width, subplots=(1, 2))
fig, axs = plt.subplots(1, 2, figsize=(width, height), gridspec_kw={'width_ratios': [6, 8]})
ax = axs[0]
df.plot(kind='bar', y='All datasets', yerr='sem', ax=ax, title=None, capsize=3,
ylabel='Average rank', xlabel=None, legend=True, color='#3e9ccf')
ax.set_xticklabels(labels, rotation=0)
ax = axs[1]
df_li.plot(kind='bar', y='SDS', yerr='sem', ax=ax, title=None, capsize=3,
ylabel=None, xlabel=None, legend=True, color='#ff7600')
ax.set_xticklabels(labels_li, rotation=0)
ax.axvline(1.5, color='gray', linestyle='--')
# ax.axvline(7.5, color='gray', linestyle='--')
logger.info(f'\nSaving results to {out_dir}/...')
plt.tight_layout()
plt.savefig(os.path.join(out_dir, 'roar.pdf'), bbox_inches='tight')
df.to_csv(os.path.join(out_dir, 'loss_rank.csv'))
df_li.to_csv(os.path.join(out_dir, 'loss_rank_li.csv'))
logger.info(f'\nTotal time: {time.time() - begin:.3f}s')
def main(args):
exp_dict = {'n_test': args.n_test, 'poison_frac': args.poison_frac}
exp_hash = exp_util.dict_to_hash(exp_dict)
assert len(args.tree_type) > 0
out_dir = os.path.join(args.in_dir,
'rank',
f'exp_{exp_hash}',
f'+'.join(args.tree_type))
# create logger
os.makedirs(out_dir, exist_ok=True)
logger = exp_util.get_logger(os.path.join(out_dir, 'log.txt'))
logger.info(args)
logger.info(f'\ntimestamp: {datetime.now()}')
process(args, exp_hash, out_dir, logger)
if __name__ == '__main__':
main(rank_args.get_poison_args().parse_args())
```
#### File: scripts/summarize/correlation.py
```python
import os
import sys
import time
import tqdm
import hashlib
import argparse
import resource
import seaborn as sns
from datetime import datetime
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.base import clone
from sklearn.metrics import r2_score
from scipy.stats import pearsonr
from scipy.stats import spearmanr
from scipy.stats import sem
here = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, here + '/../')
from postprocess import util as pp_util
from experiments import util as exp_util
from config import summ_args
def reorder_heatmap(arr, idxs):
"""
Reorder heatmap based on the given indices.
Input
arr: 2d Array.
idxs: 1d array of indices.
Return
2d array with relevant positions swapped.
"""
assert arr.ndim == 2
assert idxs.ndim == 1 and len(idxs) == len(arr)
idxs = idxs.copy()
# make sure idxs starts at 0 and increse by 1
if np.min(idxs) > 0 or np.max(idxs) >= len(idxs):
sorted_positions = np.argsort(idxs)
new_idx_vals = np.arange(len(idxs))
for pos, new_idx_val in zip(sorted_positions, new_idx_vals):
idxs[pos] = new_idx_val
# build pairwise dictionary
pw = {}
for i in range(len(arr)):
for j in range(len(arr)):
pw[f'{i}_{j}'] = arr[i, j]
# rebuild heatmap with specified ordering
res = np.zeros(arr.shape)
for i1, i2 in zip(np.arange(len(arr)), idxs):
for j1, j2 in zip(np.arange(len(arr)), idxs):
res[i1, j1] = pw[f'{i2}_{j2}']
return res
def experiment(args, logger, out_dir):
begin = time.time()
p_mean_list = []
s_mean_list = []
j_mean_list = []
idx_dict = None
# hardcode classification and regression datasets
if args.out_sub_dir == 'classification':
args.dataset_list = ['adult', 'bank_marketing', 'bean', 'compas', 'credit_card',
'diabetes', 'flight_delays', 'german_credit', 'htru2', 'no_show',
'spambase', 'surgical', 'twitter', 'vaccine']
elif args.out_sub_dir == 'regression':
args.dataset_list = ['concrete', 'energy', 'life', 'naval', 'obesity',
'power', 'protein', 'wine']
# get correlation results
n_finish = 0
for tree_type in args.tree_type_list:
logger.info(f'\n{tree_type}')
for dataset in args.dataset_list:
logger.info(f'{dataset}')
res_dir = os.path.join(args.in_dir, tree_type, dataset)
if args.in_sub_dir is not None:
res_dir = os.path.join(res_dir, args.in_sub_dir)
fp = os.path.join(res_dir, 'results.npy')
if not os.path.exists(fp):
logger.info(f'skipping {fp}...')
continue
res = np.load(fp, allow_pickle=True)[()]
p_mean_list.append(res['p_mean_mat'])
s_mean_list.append(res['s_mean_mat'])
j_mean_list.append(res['j_mean_mat'])
# sanity check
if idx_dict is None:
idx_dict = res['idx_dict']
else:
assert idx_dict == res['idx_dict']
label_dict = {'target': 'RandomSL', 'leaf_sim': 'TreeSim', 'boostin': 'BoostIn',
'trex': 'TREX', 'leaf_infSP': 'LeafInfSP', 'loo': 'LOO',
'subsample': 'SubSample', 'leaf_inf': 'LeafInfluence', 'leaf_refit': 'LeafRefit'}
inv_idx_dict = {v: k for k, v in idx_dict.items()}
idxs = np.array([k for k, v in inv_idx_dict.items() if v in args.method_list], dtype=np.int32)
names = [inv_idx_dict[i] for i in idxs]
n_method = len(names)
p_mean = np.dstack(p_mean_list).mean(axis=2)
s_mean = np.dstack(s_mean_list).mean(axis=2)
j_mean = np.dstack(j_mean_list).mean(axis=2)
p_mean = p_mean[np.ix_(idxs, idxs)]
s_mean = s_mean[np.ix_(idxs, idxs)]
j_mean = j_mean[np.ix_(idxs, idxs)]
# reorder heatmap
order = ['boostin', 'leaf_infSP', 'trex', 'leaf_sim', 'subsample', 'loo']
if args.out_sub_dir == 'li':
order = ['boostin', 'leaf_infSP', 'leaf_sim', 'trex', 'subsample', 'loo', 'leaf_refit', 'leaf_inf']
new_idxs = np.array([idx_dict[m] for m in order if m in args.method_list], dtype=np.int32)
p_mean = reorder_heatmap(p_mean, new_idxs)
s_mean = reorder_heatmap(s_mean, new_idxs)
j_mean = reorder_heatmap(j_mean, new_idxs)
p_mean_df = pd.DataFrame(p_mean, columns=order, index=order)
s_mean_df = pd.DataFrame(s_mean, columns=order, index=order)
j_mean_df = pd.DataFrame(j_mean, columns=order, index=order)
logger.info(f'\nPearson results:\n{p_mean_df}')
logger.info(f'\nSpearman results:\n{s_mean_df}')
logger.info(f'\nJaccard (10%) results:\n{j_mean_df}')
logger.info(f'\nSaving results to {out_dir}...')
p_mean_df.to_csv(os.path.join(out_dir, 'pearson.csv'))
s_mean_df.to_csv(os.path.join(out_dir, 'spearman.csv'))
j_mean_df.to_csv(os.path.join(out_dir, 'jaccard_10.csv'))
# plot correlations
cmap = 'Oranges' if args.out_sub_dir == 'li' else 'Blues'
fontsize = 15 if args.out_sub_dir == 'li' else 15
pp_util.plot_settings(fontsize=fontsize)
# mask = None
mask = np.triu(np.ones_like(p_mean, dtype=bool)) # uncomment for mask
labels = [label_dict[name] for name in order]
labels_x = [c if i % 2 != 0 else f'\n{c}' for i, c in enumerate(labels)]
suffix = '_li' if args.out_sub_dir == 'li' else ''
fig, ax = plt.subplots()
sns.heatmap(p_mean, xticklabels=labels, yticklabels=labels, ax=ax,
cmap='Greens', mask=mask, fmt='.2f', cbar=True, annot=False)
ax.set_xticklabels(ax.get_xticklabels(), rotation=45, ha='right')
plt.savefig(os.path.join(out_dir, f'pearson{suffix}.pdf'), bbox_inches='tight')
fig, ax = plt.subplots()
sns.heatmap(s_mean, xticklabels=labels, yticklabels=labels, ax=ax,
cmap=cmap, mask=mask, fmt='.2f', annot=False)
ax.set_xticklabels(ax.get_xticklabels(), rotation=45, ha='right')
plt.savefig(os.path.join(out_dir, f'spearman{suffix}.pdf'), bbox_inches='tight')
fig, ax = plt.subplots()
sns.heatmap(j_mean, xticklabels=labels, yticklabels=labels, ax=ax,
cmap='Blues', mask=mask, fmt='.2f', annot=False)
ax.set_title('Jaccard (first 10% of sorted)')
ax.set_xticklabels(ax.get_xticklabels(), rotation=45, ha='right')
plt.savefig(os.path.join(out_dir, f'jaccard_10{suffix}.pdf'), bbox_inches='tight')
logger.info(f'\nTotal time: {time.time() - begin:.3f}s')
def main(args):
# create output dir
assert len(args.tree_type_list) > 0
out_dir = os.path.join(args.out_dir,
'summary',
'+'.join(args.tree_type_list))
if args.out_sub_dir is not None:
out_dir = os.path.join(out_dir, args.out_sub_dir)
# create output directory and clear previous contents
os.makedirs(out_dir, exist_ok=True)
logger = exp_util.get_logger(os.path.join(out_dir, 'log.txt'))
logger.info(args)
logger.info(f'\ntimestamp: {datetime.now()}')
experiment(args, logger, out_dir)
if __name__ == '__main__':
main(summ_args.get_correlation_args().parse_args())
```
#### File: scripts/summarize/remove.py
```python
import os
import sys
import time
import argparse
from datetime import datetime
from itertools import product
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import sem
from scipy.stats import gmean
from tqdm import tqdm
here = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, here + '/../')
from postprocess import util as pp_util
from experiments import util as exp_util
from config import summ_args
def get_relative_df(df, ref_col, skip_cols=[], remove_cols=[]):
"""
Compute relative values based on a reference column.
Input
df: pd.DataFrame, input dataframe values.
ref_col: str, reference column.
skip_cols: list, columns to skip.
remove_cols: list, columns to remove from return dataframe.
Return
- New dataframe with relative values.
"""
result_df = df.copy()
cols = [c for c in df.columns if c not in skip_cols]
result_df.loc[:, cols] = result_df.loc[:, cols].div(result_df[ref_col], axis=0)
# remove columns
drop_cols = [c for c in result_df.columns if c in remove_cols]
result_df = result_df.drop(columns=drop_cols)
return result_df
def get_rank_df(df, skip_cols=[], remove_cols=[], ascending=False):
"""
Rank values in dataframe.
Input
df: pd.DataFrame, input dataframe values.
skip_cols: list, columns to skip.
remove_cols: list, columns to remove from return dataframe (skipped too).
ascending: bool, if True, rank 1 has lowest value.
Return df with values replaced by rankings.
"""
result_df = df.copy()
cols = [c for c in df.columns if c not in skip_cols + remove_cols]
df = df[cols]
# drop rows in which all values are nan
df = df.dropna(axis=0, how='all')
result_df = result_df.dropna(axis=0, how='all', subset=cols)
if ascending:
df = df.fillna(1e300) # missing values get last place ranking
vals = df.values
ranks = vals.argsort(axis=1).argsort(axis=1) + 1
else:
df = df.fillna(-1e300)
vals = df.values
ranks = np.flip(vals.argsort(axis=1), axis=1).argsort(axis=1) + 1
for i, col in enumerate(cols):
result_df[col] = ranks[:, i]
drop_cols = [c for c in result_df.columns if c in remove_cols]
result_df = result_df.drop(columns=drop_cols)
return result_df
def process(args, exp_hash, out_dir, logger):
begin = time.time()
color, line, label = pp_util.get_plot_dicts()
n_test = None
rows = []
logger.info('')
for dataset in args.dataset_list:
logger.info(f'{dataset}')
exp_dir = os.path.join(args.in_dir,
dataset,
args.tree_type,
f'exp_{exp_hash}')
res_list = pp_util.get_results(args, exp_dir, logger, progress_bar=False)
res_list = pp_util.filter_results(res_list, args.skip)
row = {'dataset': dataset, 'tree_type': args.tree_type}
for j, (method, res) in enumerate(res_list):
# sanity check
if j == 0:
n_test = res['loss'].shape[0]
else:
temp = res['loss'].shape[0]
assert n_test == temp, f'Inconsistent no. test: {temp:,} != {n_test:,}'
loss_mean = res['loss'].mean(axis=0)[args.ckpt]
row['remove_frac'] = res['remove_frac'][args.ckpt]
row[f'{label[method]}'] = loss_mean
rows.append(row)
df = pd.DataFrame(rows)
# drop rows with missing values
skip_cols = ['dataset', 'tree_type', 'remove_frac']
remove_cols = ['LeafInfluence', 'LeafRefit']
cols = [x for x in df.columns if x not in skip_cols + remove_cols]
df = df.dropna(subset=cols)
logger.info(f'\nLoss:\n{df}')
# compute relative performance and rankings
skip_cols = ['dataset', 'tree_type', 'remove_frac']
ref_col = 'Random'
# relative performance
df_rel = get_relative_df(df, ref_col=ref_col, skip_cols=skip_cols, remove_cols=[ref_col])
logger.info(f'\nLoss (relative increase):\n{df_rel}')
# rank
rank_df = get_rank_df(df, skip_cols=skip_cols, remove_cols=['LeafInfluence', 'LeafRefit'] + [ref_col])
rank_li_df = get_rank_df(df[~pd.isna(df['LeafInfluence'])], skip_cols=skip_cols, remove_cols=[ref_col])
logger.info(f'\nLoss ranking:\n{rank_df}')
logger.info(f'\nLoss ranking (w/ leafinf):\n{rank_li_df}')
logger.info(f'\nSaving results to {out_dir}...')
df.to_csv(os.path.join(out_dir, 'loss.csv'), index=None)
df_rel.to_csv(os.path.join(out_dir, 'loss_rel.csv'), index=None)
rank_df.to_csv(os.path.join(out_dir, 'loss_rank.csv'), index=None)
rank_li_df.to_csv(os.path.join(out_dir, 'loss_rank_li.csv'), index=None)
logger.info(f'\nTotal time: {time.time() - begin:.3f}s')
def main(args):
exp_dict = {'n_test': args.n_test, 'remove_frac': args.remove_frac}
exp_hash = exp_util.dict_to_hash(exp_dict)
out_dir = os.path.join(args.out_dir,
args.tree_type,
f'exp_{exp_hash}',
'summary',
f'ckpt_{args.ckpt}')
# create logger
os.makedirs(out_dir, exist_ok=True)
logger = exp_util.get_logger(os.path.join(out_dir, 'log.txt'))
logger.info(args)
logger.info(datetime.now())
process(args, exp_hash, out_dir, logger)
if __name__ == '__main__':
main(summ_args.get_remove_args().parse_args())
```
#### File: influence_boosting/ut/loss_ut.py
```python
import numpy as np
import tensorflow as tf
from ..loss import CrossEntropyLoss, BinaryCrossEntropyLoss
def _test_loss(sample_shape_fn):
our_loss_fn = CrossEntropyLoss()
single_vector_inputs = len(sample_shape_fn()) == 1 and sample_shape_fn() == sample_shape_fn()
length = [None] if not single_vector_inputs else [sample_shape_fn()[0]]
targets_ph = tf.placeholder(tf.float64, length * len(sample_shape_fn()))
logits_ph = tf.placeholder(tf.float64, length * len(sample_shape_fn()))
loss = tf.nn.softmax_cross_entropy_with_logits(labels=targets_ph, logits=logits_ph)
grad = tf.gradients(loss, logits_ph)
if single_vector_inputs:
ihvp = tf.reshape(tf.matrix_solve(tf.hessians(loss, logits_ph)[0] + tf.eye(length[0], dtype=tf.float64),
tf.reshape(grad, (-1,1))), (-1,))
session = tf.Session()
for _ in xrange(1000):
shape = sample_shape_fn()
targets = np.random.rand(*shape)
targets = targets ** 2
targets /= np.sum(targets, axis=-1, keepdims=True)
logits = np.random.rand(*shape)
our_loss = our_loss_fn(targets, logits)
our_grad = our_loss_fn.gradient(targets, logits)
if single_vector_inputs:
our_ihvp = our_loss_fn.ihvp(targets, logits, l2_reg=1)
feed_dict = {targets_ph: targets, logits_ph: logits}
true_loss = session.run(loss, feed_dict=feed_dict)
true_gradient = session.run(grad, feed_dict=feed_dict)
if single_vector_inputs:
true_ihvp = session.run(ihvp, feed_dict=feed_dict)
assert np.allclose(our_loss, true_loss) and np.allclose(our_grad, true_gradient)
if single_vector_inputs:
assert np.allclose(our_ihvp, true_ihvp)
def test_batch_vector_loss():
def generate_2d_array_shape():
return np.random.randint(2, 1000), np.random.randint(2, 20)
_test_loss(generate_2d_array_shape)
def test_single_vector_loss():
def generate_1d_array_shape():
return (np.random.randint(2, 20),)
_test_loss(generate_1d_array_shape)
def test_constant_length_single_vector():
# TF can compute hessians only with constant-shape inputs
length = np.random.randint(1, 10)
def fixed_1d_array_shape():
return (length,)
_test_loss(fixed_1d_array_shape)
def test_log_loss():
our_loss_fn = BinaryCrossEntropyLoss()
target_ph = tf.placeholder(tf.float64, (100,))
logits_ph = tf.placeholder(tf.float64, (100,))
loss_vector = -target_ph * tf.log(tf.sigmoid(logits_ph)) - (1 - target_ph) * tf.log(1 - tf.sigmoid(logits_ph))
loss = tf.reduce_sum(loss_vector)
grad = tf.gradients(loss, logits_ph)
seconders = tf.diag_part(tf.hessians(loss, logits_ph)[0])
s = tf.Session()
for _ in xrange(100):
random_targets = (np.random.rand(100) > 0.5).astype(int)
random_logits = np.random.rand(100) * 2
fd = {target_ph: random_targets, logits_ph: random_logits}
true_losses = s.run(loss_vector, fd)
true_grad = s.run(grad, fd)
true_seconders = s.run(seconders, fd)
our_losses = our_loss_fn(random_targets, random_logits)
our_grad = our_loss_fn.gradient(random_targets, random_logits)
our_seconders = our_loss_fn.hessian(random_targets, random_logits)
assert np.allclose(true_losses, our_losses), (true_losses, our_losses)
assert np.allclose(true_grad, our_grad)
assert np.allclose(true_seconders, our_seconders), (true_seconders, our_seconders)
if __name__ == '__main__':
test_log_loss()
test_batch_vector_loss()
test_single_vector_loss()
test_constant_length_single_vector()
```
#### File: scripts/test/test_leaf_refit.py
```python
import os
import sys
import shutil
import argparse
from copy import deepcopy
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from scipy.stats import pearsonr
from scipy.stats import spearmanr
here = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, here + '/../../')
import test_util
from tree_influence.explainers import LeafRefit
from tree_influence.explainers.parsers.util import LogisticLoss
from influence_boosting.influence.leaf_refit import CBOneStepLeafRefitEnsemble
from test_util import _get_model
from test_util import _get_test_data
from test_parser import compare_predictions
def get_cb_influence_original_method(model, X_train, y_train, X_test, y_test, kwargs):
"""
Compute influence values using the original source.
"""
update_set = kwargs['update_set']
k = update_set
if k == -1:
update_set = 'AllPoints'
elif k == 0:
update_set = 'SinglePoint'
else:
update_set = 'TopKLeaves'
# save CatBoost model
temp_dir = os.path.join('.catboost_info', 'leaf_refit')
temp_fp = os.path.join(temp_dir, 'cb.json')
os.makedirs(temp_dir, exist_ok=True)
model.save_model(temp_fp, format='json')
# initialize Leaf Influence
explainer = CBOneStepLeafRefitEnsemble(temp_fp,
X_train,
y_train,
k=k,
learning_rate=model.learning_rate_,
update_set=update_set)
buf = deepcopy(explainer)
influence = np.zeros((X_train.shape[0], X_test.shape[0]), dtype=np.float32)
loss_fn = LogisticLoss()
# compute influence for each training instance
for train_idx in tqdm(range(X_train.shape[0])):
explainer.fit(removed_point_idx=train_idx, destination_model=buf)
original_loss = loss_fn(y_test, explainer(X_test), raw=True)
new_loss = loss_fn(y_test, buf(X_test), raw=True)
influence[train_idx, :] = new_loss - original_loss
# clean up
shutil.rmtree('.catboost_info')
return influence
def test_local_influence_binary_original_vs_adapted(args, kwargs, n=10, show_plot=False):
print(f'\n***** test_local_influence_binary_original_vs_adapted *****')
args.model_type = 'binary'
X_train, X_test, y_train, y_test = _get_test_data(args, n_class=2)
test_ids = np.array([0])
X_test, y_test = X_train[test_ids], y_train[test_ids]
tree = _get_model(args)
tree = tree.fit(X_train, y_train)
explainer = LeafRefit(**kwargs).fit(tree, X_train, y_train)
# compute influences, shape=(no. train, no. test)
influences1 = explainer.get_local_influence(X_train[test_ids], y_train[test_ids])
print('finished influence 1...')
influences2 = get_cb_influence_original_method(tree, X_train, y_train, X_test, y_test, kwargs)
print('finished influence 2...')
for i, test_idx in enumerate(test_ids):
# influence #1
influence = influences1[:, i]
s_ids = np.argsort(np.abs(influence))[::-1]
test_pred = tree.predict_proba(X_train[[test_idx]])[0]
test_label = y_train[test_idx]
print(f'\nexplain y_train {test_idx}, pred: {test_pred}, target: {test_label}\n')
print('sorted indices (head):', s_ids[:n])
print('y_train (head, sorted):', y_train[s_ids][:n])
print('influence (head, sorted):', influence[s_ids][:n])
# influence #2
influence = influences2[:, i]
s_ids = np.argsort(np.abs(influence))[::-1]
test_pred = tree.predict_proba(X_train[[test_idx]])[0]
test_label = y_train[test_idx]
print(f'\nexplain y_train {test_idx}, pred: {test_pred}, target: {test_label}\n')
print('sorted indices (head):', s_ids[:n])
print('y_train (head, sorted):', y_train[s_ids][:n])
print('influence (head, sorted):', influence[s_ids][:n])
p1 = influences1[:, 0]
p2 = influences2[:, 0]
spearman = spearmanr(p1, p2)[0]
pearson = pearsonr(p1, p2)[0]
status = compare_predictions(p1, p2)
print('\nspearmanr:', spearman)
print('pearsonr:', pearson)
if show_plot:
plt.scatter(p1, p2)
plt.show()
print(f'\n{status}')
def main(args):
# explainer arguments
kwargs = {'update_set': args.update_set, 'n_jobs': args.n_jobs}
kwargs2 = {'update_set': args.update_set, 'atol': args.atol, 'n_jobs': args.n_jobs}
# tests
test_util.test_local_influence_regression(args, LeafRefit, 'LeafRefit', kwargs)
test_util.test_local_influence_binary(args, LeafRefit, 'LeafRefit', kwargs)
test_util.test_local_influence_multiclass(args, LeafRefit, 'LeafRefit', kwargs2)
if args.tree_type == 'cb':
test_local_influence_binary_original_vs_adapted(args, kwargs)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# data settings
parser.add_argument('--n_train', type=int, default=100)
parser.add_argument('--n_test', type=int, default=100)
parser.add_argument('--n_local', type=int, default=2)
parser.add_argument('--n_class', type=int, default=3)
parser.add_argument('--n_feat', type=int, default=10)
# tree-ensemble settings
parser.add_argument('--n_tree', type=int, default=100)
parser.add_argument('--n_leaf', type=int, default=31)
parser.add_argument('--max_depth', type=int, default=7)
parser.add_argument('--tree_type', type=str, default='lgb')
parser.add_argument('--model_type', type=str, default='dummy')
parser.add_argument('--rs', type=int, default=1)
# explainer settings
parser.add_argument('--update_set', type=int, default=-1)
parser.add_argument('--atol', type=float, default=1e-2)
parser.add_argument('--n_jobs', type=int, default=1)
args = parser.parse_args()
main(args)
```
#### File: tree_influence/explainers/dshap.py
```python
import time
import joblib
import numpy as np
from sklearn.base import clone
from .base import Explainer
from .parsers import util
class DShap(Explainer):
"""
Explainer that approx. data Shapley values using
the TMC-Shapley algorithm.
Local-Influence Semantics
- Inf.(x_i, x_t) = Avg. L(y_i, f_{w/o x_i}(x_t)) - L(y_i, f(x_t))
over all possible permutations of the training data.
- Pos. value means a decrease in test loss (a.k.a. proponent, helpful).
- Neg. value means an increase in test loss (a.k.a. opponent, harmful).
Reference
- https://github.com/amiratag/DataShapley
Paper
- http://proceedings.mlr.press/v97/ghorbani19c.html
Note
- Supports both GBDTs and RFs.
- No validation set, we are computing loss on training or ONE test example;
thus, there is no average loss score and use of a `tolerance` parameter
for early truncation.
* However, we can use a hard truncation limit via `trunc_frac`.
"""
def __init__(self, trunc_frac=0.25, n_jobs=1,
check_every=100, random_state=1, logger=None):
"""
Input
trunc_frac: float, fraction of instances to compute marginals for per iter.
n_jobs: int, no. iterations / processes to run in parallel.
check_every: int, no. iterations to run between checking convergence.
random_state: int, random seed to enhance reproducibility.
logger: object, If not None, output to logger.
"""
self.trunc_frac = trunc_frac
self.n_jobs = n_jobs
self.check_every = check_every
self.random_state = random_state
self.logger = logger
def fit(self, model, X, y):
"""
- Convert model to internal standardized tree structures.
- Perform any initialization necessary for the chosen method.
Input
model: tree ensemble.
X: 2d array of train data.
y: 1d array of train targets.
"""
super().fit(model, X, y)
X, y = util.check_data(X, y, objective=self.model_.objective)
self.original_model_ = model
self.objective_ = self.model_.objective
self.n_class_ = self.model_.n_class_
self.X_train_ = X.copy()
self.y_train_ = y.copy()
self.loss_fn_ = util.get_loss_fn(self.objective_, self.n_class_, self.model_.factor)
self.random_loss_ = self._get_random_loss()
return self
def get_local_influence(self, X, y):
"""
- Compute influence of each training instance on the test loss.
Input
X: 2d array of test examples.
y: 1d array of test targets.
Return
- 2d array of shape=(no. train, X.shape[0]).
* Arrays are returned in the same order as the training data.
"""
X, y = util.check_data(X, y, objective=self.model_.objective)
return self._run_tmc_shapley(X_test=X, y_test=y, inf='local')
# private
def _run_tmc_shapley(self, X_test=None, y_test=None, batch=False, inf='global', stability_tol=0.1):
"""
- Run the TMC-Shapley algorithm until marginal contributions converge.
Return
- 2d array of average marginals, shape=(no. train, 1 or X_test.shape[0]).
* Arrays are returned in the same order as the traing data.
"""
# extract parameters
original_model = self.original_model_
X_train = self.X_train_
y_train = self.y_train_
loss_fn = self.loss_fn_
random_loss = self.random_loss_
truncation_frac = self.trunc_frac
objective = self.objective_
n_class = self.n_class_
random_state = self.random_state
check_every = self.check_every
# select no. processes to run in parallel
if self.n_jobs == -1:
n_jobs = joblib.cpu_count()
else:
assert self.n_jobs >= 1
n_jobs = min(self.n_jobs, joblib.cpu_count())
start = time.time()
if self.logger:
self.logger.info('\n[INFO] computing approx. data Shapley values...')
self.logger.info(f'[INFO] no. cpus: {n_jobs:,}...')
# run TMC-Shapley alg. until convergence
with joblib.Parallel(n_jobs=n_jobs) as parallel:
# result container
if inf == 'local':
marginals = np.zeros((0, self.X_train_.shape[0], X_test.shape[0]), dtype=util.dtype_t)
result = np.zeros((self.X_train_.shape[0], X_test.shape[0]), dtype=util.dtype_t)
stable = np.zeros(X_test.shape[0], dtype=util.dtype_t)
else:
assert inf == 'global'
marginals = np.zeros((0, self.X_train_.shape[0], 1), dtype=util.dtype_t) # shape=(no. train, 1)
result = np.zeros((self.X_train_.shape[0], 1), dtype=util.dtype_t)
stable = np.zeros(1, dtype=util.dtype_t)
iteration = 0
while True:
# shape=(check_every, no. train, 1 or no. test)
results = parallel(joblib.delayed(_run_iteration)
(original_model, X_train, y_train, loss_fn,
random_loss, truncation_frac, objective, n_class,
random_state, iteration, i, X_test, y_test,
batch, inf) for i in range(check_every))
iteration += check_every
# synchronization barrier
marginals = np.vstack([marginals, results]) # shape=(check_every + (1), no. train, 1 or X.shape[0])
# check convergence
# - add up all marginals using axis=0, then divide by their iteration
# - diff. between last `check_every` runs and last run, divide by last run, average over all points
errors = np.zeros(marginals.shape[2], dtype=util.dtype_t) # shape=(X.shape[0],)
for i in range(marginals.shape[2]):
divisor = np.arange(1, iteration + 1)[-check_every:].reshape(-1, 1) # shape=(iteration, 1)
v = (np.cumsum(marginals[:, :, i], axis=0)[-check_every:] / divisor) # (check_every, no. train)
errors[i] = np.max(np.mean(np.abs(v - v[-1:]) / (np.abs(v[-1:]) + 1e-12), axis=1))
if self.logger:
cum_time = time.time() - start
self.logger.info(f'[INFO] Iter. {iteration:,}, stability: {errors}, cum. time: {cum_time:.3f}s')
# save last cum. sum of marginals without saving entire history
marginals = np.cumsum(marginals, axis=0)[-1:]
# marginals have converged
idxs = np.where(errors < stability_tol)[0] # shape=(1 or X_test.shape[0],)
if len(idxs) > 0:
stable[idxs] = 1.0
# update results
influence = marginals[-1] / iteration
result[:, idxs] = influence[:, idxs] # shape=(len(idxs), 1 or X_test.shape[0])
if np.all(stable):
break
return result
def _get_random_loss(self):
"""
Input
X: 2d array of data.
y: 1d array of targets.
Return 1d array of losses resulting from a random guess; shape=(X.shape[0],)
"""
if self.model_.objective == 'regression':
loss = 0
elif self.model_.objective == 'binary':
loss = -np.log(0.5)
else:
assert self.model_.objective == 'multiclass'
loss = -np.log(1.0 / self.model_.n_class_)
return loss
def _run_iteration(original_model, X_train, y_train, loss_fn, random_loss,
truncation_frac, objective, n_class, finished_iterations,
cur_iter, random_state, X_test=None, y_test=None, batch=False, inf='global'):
"""
- Run one iteration of the TMC-Shapley algorithm.
Return
- 1d array of marginals, shape=(no. train, 1) if global influence,
otherwise shape=(no. train, X_test.shape[0]).
Note
- Parallelizable method.
"""
rng = np.random.default_rng(random_state + finished_iterations + cur_iter)
# get order of training examples to add
train_idxs = rng.permutation(y_train.shape[0]) # shape=(no. train,)
train_idxs = train_idxs[:int(len(train_idxs) * truncation_frac)] # truncate examples
# result container
if inf == 'local':
marginals = np.zeros((X_train.shape[0], X_test.shape[0]), dtype=util.dtype_t)
else: # global influence
marginals = np.zeros((X_train.shape[0], 1), dtype=util.dtype_t) # shape=(no. train, 1)
# empty containers
X_batch = np.zeros((0,) + (X_train.shape[1],), dtype=util.dtype_t) # shape=(0, no. feature)
y_batch = np.zeros(0, dtype=np.int32) # shape=(0,)
old_loss = random_loss # tracker
old_model = None
# add training examples one at a time to measure the effect of each one
for train_idx in train_idxs:
# add example to batch of examples
X_batch = np.vstack([X_batch, X_train[train_idx].reshape(1, -1)])
y_batch = np.concatenate([y_batch, y_train[train_idx].reshape(1)])
# skip batches that do not have enough examples
if objective == 'regression' and X_batch.shape[0] < 2:
continue
elif objective == 'binary' and len(np.unique(y_batch)) < 2:
continue
elif objective == 'multiclass' and len(np.unique(y_batch)) < n_class:
continue
# train and score
model = clone(original_model).fit(X_batch, y_batch)
# local influence
if inf == 'local':
loss = _get_loss(loss_fn, model, objective, X=X_test, y=y_test) # shape=(X_test.shape[0],)
marginals[train_idx, :] = old_loss - loss # loss(x_t) w/o x_i - loss(x_t) w/ x_i
old_loss = loss
# global influence
elif inf == 'global' and X_test is not None and batch:
loss = _get_loss(loss_fn, model, objective, X=X_test, y=y_test, batch=batch)
marginals[train_idx, 0] = old_loss - loss # loss(X_test) w/o x_i - loss(X_test) w/ x_i
old_loss = loss
# self influence
else:
assert inf == 'global' and not batch
X_temp = X_train[[train_idx]]
y_temp = y_train[[train_idx]]
if old_model is None:
old_loss = random_loss
else:
old_loss = _get_loss(loss_fn, old_model, objective, X=X_temp, y=y_temp)
loss = _get_loss(loss_fn, model, objective, X=X_temp, y=y_temp)[0]
marginals[train_idx, 0] = old_loss - loss # loss(x_i) w/o x_i - loss(x_i) w/ x_i
old_model = model
return marginals
def _get_loss(loss_fn, model, objective, X, y, batch=False):
"""
Return
- 1d array of individual losses of shape=(X.shape[0],).
Note
- Parallelizable method.
"""
if objective == 'regression':
y_pred = model.predict(X) # shape=(X.shape[0])
elif objective == 'binary':
y_pred = model.predict_proba(X)[:, 1] # 1d arry of pos. probabilities
else:
assert objective == 'multiclass'
y_pred = model.predict_proba(X) # shape=(X.shape[0], no. class)
result = loss_fn(y, y_pred, raw=False, batch=batch) # shape=(X.shape[0],) or single float
return result
```
#### File: tree_influence/explainers/leaf_sim.py
```python
import numpy as np
from .base import Explainer
from .parsers import util
class LeafSim(Explainer):
"""
Explainer that returns higher influence for train examples with
the same target and larger similarity in the
"weighted leaf path" tree-kernel space.
Local-Influence Semantics
- More positive values are assigned to train examples with
higher loss AND are in the same leaf as the test example.
Note
- Supports GBDTs and RFs.
- More efficient version of the TreeSim explainer using the
'weighted leaf path' tree kernel.
"""
def __init__(self, logger=None):
"""
Input
logger: object, If not None, output to logger.
"""
self.logger = logger
def fit(self, model, X, y):
"""
- Convert model to internal standardized tree structure.
- Precompute gradients and leaf indices for each x in X.
Input
model: tree ensemble.
X: 2d array of train examples.
y: 1d array of train targets.
"""
super().fit(model, X, y)
X, y = util.check_data(X, y, objective=self.model_.objective)
self.original_model_ = model
self.y_train_ = y.copy()
self.objective_ = self.model_.objective
self.n_train_ = X.shape[0]
self.n_boost_ = self.model_.n_boost_
self.n_class_ = self.model_.n_class_
self.model_.update_node_count(X)
self.train_leaves_ = self.model_.apply(X) # shape=(X.shape[0], no. boost, no. class)
self.leaf_counts_ = self.model_.get_leaf_counts() # shape=(no. boost, no. class)
self.leaf_weights_ = self.model_.get_leaf_weights(-2) # shape=(total no. leaves,)
return self
def get_local_influence(self, X, y):
"""
- Computes effect of each train example on the loss of the test example.
Input
X: 2d array of test data.
y: 2d array of test targets.
Return
- 2d array of shape=(no. train, X.shape[0]).
* Array is returned in the same order as the training data.
Note
- Attribute train attribution to the test loss ONLY if the train example
is in the same leaf(s) as the test example.
"""
X, y = util.check_data(X, y, objective=self.model_.objective)
# result container, shape=(X.shape[0], no. train, no. class)
influence = np.zeros((self.n_train_, X.shape[0]), dtype=util.dtype_t)
# get leaf indices each example arrives in
train_leaves = self.train_leaves_ # shape=(no. train, no. boost, no. class)
train_weights = self._get_leaf_weights(train_leaves) # shape=(no.train, no boost, no. class)
test_leaves = self.model_.apply(X) # shape=(X.shape[0], no. boost, no. class)
# compute attributions for each test example
for i in range(X.shape[0]):
mask = np.where(train_leaves == test_leaves[i], 1, 0) # shape=(no. train, no. boost, no. class)
weighted_mask = train_weights * mask # shape=(no. train, no. boost, no. class)
sim = np.sum(weighted_mask, axis=(1, 2)) # shape=(no. train,)
# determine if each train example helps or hurts test loss
if self.objective_ in ['binary', 'multiclass']:
sgn = np.where(self.y_train_ == y[i], 1.0, -1.0) # shape=(no. train,)
else: # if train and test targets both on same side of the prediction, then pos. influence
assert self.objective_ == 'regression'
pred = self.original_model_.predict(X[[i]])
test_sgn = 1.0 if pred >= y[i] else -1.0
train_sgn = np.where(self.y_train_ >= pred, 1.0, -1.0) # shape=(no. train,)
sgn = np.where(train_sgn != test_sgn, 1.0, -1.0)
influence[:, i] = sim * sgn
return influence
def _get_leaf_weights(self, leaf_idxs):
"""
Retrieve leaf weights given the leaf indices.
Input
leaf_idxs: Leaf indices, shape=(no. examples, no. boost, no. class)
Return
- 3d array of shape=(no. examples, no. boost, no. class)
"""
leaf_counts = self.leaf_counts_ # shape=(no. boost, no. class)
leaf_weights = self.leaf_weights_ # shape=(no. leaves across all trees,)
# result container
weights = np.zeros(leaf_idxs.shape, dtype=util.dtype_t) # shape=(no. examples, no. boost, no. class)
n_prev_leaves = 0
for b_idx in range(self.n_boost_):
for c_idx in range(self.n_class_):
leaf_count = leaf_counts[b_idx, c_idx]
weights[:, b_idx, c_idx] = leaf_weights[n_prev_leaves:][leaf_idxs[:, b_idx, c_idx]]
n_prev_leaves += leaf_count
return weights
```
#### File: tree_influence/explainers/loo.py
```python
import time
import joblib
import numpy as np
from sklearn.base import clone
from .base import Explainer
from .parsers import util
class LOO(Explainer):
"""
Leave-one-out influence explainer. Retrains the model
for each train example to get change in loss.
Local-Influence Semantics
- Inf.(x_i, x_t) := L(y_t, f_{w/o x_i}(x_t)) - L(y_t, f(x_t))
- Pos. value means removing x_i increases loss (adding x_i decreases loss, helpful).
- Neg. value means removing x_i decreases loss (adding x_i increases loss, harmful).
Note
- Supports both GBDTs and RFs.
- Supports parallelization.
"""
def __init__(self, n_jobs=-1, logger=None):
"""
Input
n_jobs: int, No. processes to run in parallel.
-1 means use the no. of available CPU cores.
logger: object, If not None, output to logger.
"""
self.n_jobs = n_jobs
self.logger = logger
def fit(self, model, X, y):
"""
- Fit one model with for each training example,
with that training example removed.
Note
- Very memory intensive to save all models,
may have to switch to a streaming approach.
Input
model: tree ensemble.
X: training data.
y: training targets.
"""
super().fit(model, X, y)
X, y = util.check_data(X, y, objective=self.model_.objective)
self.n_class_ = self.model_.n_class_
self.loss_fn_ = util.get_loss_fn(self.model_.objective, self.model_.n_class_, self.model_.factor)
self.X_train_ = X.copy()
self.y_train_ = y.copy()
self.objective_ = self.model_.objective
# select no. processes to run in parallel
if self.n_jobs == -1:
n_jobs = joblib.cpu_count()
else:
assert self.n_jobs >= 1
n_jobs = min(self.n_jobs, joblib.cpu_count())
self.n_jobs_ = n_jobs
self.original_model_ = model
return self
def get_local_influence(self, X, y, verbose=1):
"""
- Compute influence of each training instance on each test loss.
Input
X: 2d array of test data.
y: 1d array of test targets
Return
- 2d array of shape=(no. train, X.shape[0]).
* Arrays are returned in the same order as the training data.
"""
X, y = util.check_data(X, y, objective=self.model_.objective)
return self._run_loo(X_test=X, y_test=y, inf='local')
# private
def _run_loo(self, X_test=None, y_test=None, batch=False, inf='global'):
"""
- Retrain model for each tain example and measure change in train/test loss.
Return
- 2d array of average marginals, shape=(no. train, 1 or X_test.shape[0]).
* Arrays are returned in the same order as the traing data.
"""
X_train = self.X_train_
y_train = self.y_train_
loss_fn = self.loss_fn_
n_jobs = self.n_jobs_
original_model = self.original_model_
objective = self.objective_
start = time.time()
if self.logger:
self.logger.info('\n[INFO] computing LOO values...')
self.logger.info(f'[INFO] no. cpus: {n_jobs:,}...')
# fit each model in parallel
with joblib.Parallel(n_jobs=n_jobs) as parallel:
# result container
if inf == 'local':
original_loss = _get_loss(loss_fn, original_model, objective, X=X_test, y=y_test) # (X_test.shape[0],)
influence = np.zeros((0, X_test.shape[0]), dtype=util.dtype_t)
elif inf == 'global' and X_test is not None and batch: # global expected influence
original_loss = _get_loss(loss_fn, original_model, objective, X=X_test, y=y_test, batch=batch) # float
influence = np.zeros((0, 1), dtype=util.dtype_t)
else:
assert inf == 'global' and not batch
original_loss = _get_loss(loss_fn, original_model, objective, X=X_train, y=y_train) # (no. train,)
influence = np.zeros((0, 1), dtype=util.dtype_t)
# trackers
fits_completed = 0
fits_remaining = X_train.shape[0]
# get number of fits to perform for this iteration
while fits_remaining > 0:
n = min(100, fits_remaining)
results = parallel(joblib.delayed(_run_iteration)
(original_model, X_train, y_train, train_idx, X_test, y_test,
loss_fn, objective, original_loss,
batch, inf) for train_idx in range(fits_completed,
fits_completed + n))
# synchronization barrier
results = np.vstack(results) # shape=(n, 1 or X_test.shape[0])
influence = np.vstack([influence, results])
fits_completed += n
fits_remaining -= n
if self.logger:
cum_time = time.time() - start
self.logger.info(f'[INFO] fits: {fits_completed:,} / {X_train.shape[0]:,}'
f', cum. time: {cum_time:.3f}s')
return influence
def _run_iteration(model, X_train, y_train, train_idx, X_test, y_test,
loss_fn, objective, original_loss, batch, inf):
"""
Fit model after leaving out the specified `train_idx` train example.
Return
- 1d array of shape=(X_test.shape[0],) or single float.
Note
- Parallelizable method.
"""
new_X = np.delete(X_train, train_idx, axis=0)
new_y = np.delete(y_train, train_idx)
new_model = clone(model).fit(new_X, new_y)
start = time.time()
if inf == 'local':
loss = _get_loss(loss_fn, new_model, objective, X=X_test, y=y_test) # shape=(X_test.shape[0],)
influence = loss - original_loss
elif inf == 'global' and X_test is not None and batch:
loss = _get_loss(loss_fn, new_model, objective, X=X_test, y=y_test, batch=True) # single float
influence = np.array([loss - original_loss])
else:
assert inf == 'global' and not batch
X_temp = X_train[[train_idx]]
y_temp = y_train[[train_idx]]
loss = _get_loss(loss_fn, new_model, objective, X=X_temp, y=y_temp) # shape=(1,)
influence = loss - original_loss[train_idx]
inf_time = time.time() - start
return influence
def _get_loss(loss_fn, model, objective, X, y, batch=False):
"""
Return
- 1d array of individual losses of shape=(X.shape[0],),
unless batch=True, then return a single float.
Note
- Parallelizable method.
"""
if objective == 'regression':
y_pred = model.predict(X) # shape=(X.shape[0])
elif objective == 'binary':
y_pred = model.predict_proba(X)[:, 1] # 1d arry of pos. probabilities, shape=(X.shape[0],)
else:
assert objective == 'multiclass'
y_pred = model.predict_proba(X) # shape=(X.shape[0], no. class)
result = loss_fn(y, y_pred, raw=False, batch=batch) # shape(X.shape[0],) or single float
return result
```
#### File: tree_influence/explainers/loss.py
```python
from .base import Explainer
from .parsers import util
class Loss(Explainer):
"""
Explainer that randomly returns higher influence
for train examples with larger loss.
Global-Influence Semantics
- More positive values are assigned to train examples with higher loss.
Note
- Supports GBDTs and RFs.
"""
def __init__(self, logger=None):
"""
Input
logger: object, If not None, output to logger.
"""
self.logger = logger
def fit(self, model, X, y):
"""
Input
model: tree ensemble.
X: 2d array of train examples.
y: 1d array of train targets.
"""
super().fit(model, X, y)
X, y = util.check_data(X, y, objective=self.model_.objective)
self.X_train_ = X.copy()
self.y_train_ = y.copy()
self.original_model_ = model
self.n_class_ = self.model_.n_class_
self.objective_ = self.model_.objective
self.loss_fn_ = util.get_loss_fn(self.objective_, self.n_class_, self.model_.factor)
return self
def get_self_influence(self, X, y, batch_size=None):
"""
Input
X: 2d array of test data.
y: 2d array of test targets.
batch_size: Unused, exists for compatibility.
Return
- 1d array of shape=(no. train,).
* Arrays are returned in the same order as the traing data.
"""
return self._get_loss(self.loss_fn_, self.original_model_, self.objective_,
self.X_train_, self.y_train_)
def get_local_influence(self, X, y):
"""
- Compute influence of each training instance on the test loss.
Input
X: 2d array of test examples.
y: 1d array of test targets.
* Could be the actual label or the predicted label depending on the explainer.
Return
- 2d array of shape=(no. train, X.shape[0]).
* Arrays are returned in the same order as the training data.
"""
raise ValueError('get_local_influence not implemented for Loss explainer.')
# private
def _get_loss(self, loss_fn, model, objective, X, y, batch=False):
"""
Return
- 1d array of individual losses of shape=(X.shape[0],),
unless batch=True, then return a single float.
Note
- Parallelizable method.
"""
if objective == 'regression':
y_pred = model.predict(X) # shape=(X.shape[0])
elif objective == 'binary':
y_pred = model.predict_proba(X)[:, 1] # 1d arry of pos. probabilities
else:
assert objective == 'multiclass'
y_pred = model.predict_proba(X) # shape=(X.shape[0], no. class)
result = loss_fn(y, y_pred, raw=False, batch=batch) # shape(X.shape[0],) or single float
return result
```
#### File: tree_influence/explainers/subsample.py
```python
import time
import joblib
import numpy as np
from sklearn.base import clone
from .base import Explainer
from .parsers import util
n_influence = 0
influence_time = 0
class SubSample(Explainer):
"""
Explainer that approximates data Shapley values. Trains many models on different
subsets of the data to obtain expected marginal influence values.
Local-Influence Semantics (i.e. influence)
- Inf.(x_i, x_t) := E[L(y_t, f_{w/o x_i}(x_t))] - E[L(y_t, f(x_t))]
- Pos. value means removing x_i increases loss (adding x_i decreases loss, helpful).
- Neg. value means removing x_i decreases loss (adding x_i increases loss, harmful).
Note
- Supports both GBDTs and RFs.
- Supports parallelization.
"""
def __init__(self, sub_frac=0.7, n_iter=4000, n_jobs=1, random_state=1, logger=None):
"""
Input
sub_frac: float, Fraction of train data to use for training.
n_iter: int, No. sub-models to train.
n_jobs: int, No. processes to run in parallel.
-1 means use the no. of available CPU cores.
random_state: int, Seed for reproducibility.
logger: object, If not None, output to logger.
"""
self.sub_frac = sub_frac
self.n_iter = n_iter
self.n_jobs = n_jobs
self.random_state = random_state
self.logger = logger
def fit(self, model, X, y):
"""
- Setup.
Input
model: tree ensemble.
X: training data.
y: training targets.
"""
super().fit(model, X, y)
X, y = util.check_data(X, y, objective=self.model_.objective)
self.n_class_ = self.model_.n_class_
self.loss_fn_ = util.get_loss_fn(self.model_.objective, self.model_.n_class_, self.model_.factor)
self.X_train_ = X.copy()
self.y_train_ = y.copy()
self.objective_ = self.model_.objective
# select no. processes to run in parallel
if self.n_jobs == -1:
n_jobs = joblib.cpu_count()
else:
assert self.n_jobs >= 1
n_jobs = min(self.n_jobs, joblib.cpu_count())
self.n_jobs_ = n_jobs
self.original_model_ = model
return self
def get_local_influence(self, X, y, verbose=1):
"""
- Compute influence of each training instance on each test loss.
Input
X: 2d array of test data.
y: 1d array of test targets
Return
- 2d array of shape=(no. train, X.shape[0]).
* Arrays are returned in the same order as the training data.
"""
X, y = util.check_data(X, y, objective=self.model_.objective)
return self._run_subsample(X_test=X, y_test=y)
# private
def _run_subsample(self, X_test=None, y_test=None):
"""
- Train multiple models on different training subsets
and measure expected change in train/test loss.
Return
- 2d array of average marginals, shape=(no. train, 1 or X_test.shape[0]).
* Arrays are returned in the same order as the traing data.
"""
X_train = self.X_train_
y_train = self.y_train_
loss_fn = self.loss_fn_
n_jobs = self.n_jobs_
original_model = self.original_model_
objective = self.objective_
n_iter = self.n_iter
sub_frac = self.sub_frac
random_state = self.random_state
start = time.time()
if self.logger:
self.logger.info('\n[INFO] computing influence values...')
self.logger.info(f'[INFO] no. cpus: {n_jobs:,}...')
# fit each model in parallel
with joblib.Parallel(n_jobs=n_jobs) as parallel:
# result containers
in_loss = np.zeros((X_train.shape[0], X_test.shape[0]), dtype=util.dtype_t)
out_loss = np.zeros((X_train.shape[0], X_test.shape[0]), dtype=util.dtype_t)
in_count = np.zeros(X_train.shape[0], dtype=np.int32)
out_count = np.zeros(X_train.shape[0], dtype=np.int32)
# trackers
fits_completed = 0
fits_remaining = n_iter
# get number of fits to perform for this iteration
while fits_remaining > 0:
n = min(100, fits_remaining)
results = parallel(joblib.delayed(_run_iteration)
(original_model, X_train, y_train, X_test, y_test,
loss_fn, objective, sub_frac,
random_state + i) for i in range(fits_completed,
fits_completed + n))
# synchronization barrier
for losses, in_idxs in results:
out_idxs = np.setdiff1d(np.arange(X_train.shape[0]), in_idxs)
for test_idx, loss in enumerate(losses):
in_loss[in_idxs, test_idx] += loss
out_loss[out_idxs, test_idx] += loss
in_count[in_idxs] += 1
out_count[out_idxs] += 1
fits_completed += n
fits_remaining -= n
if self.logger:
cum_time = time.time() - start
self.logger.info(f'[INFO] fits: {fits_completed:>7,} / {n_iter:,}'
f', cum. time: {cum_time:.3f}s')
# compute difference in expected losses
influence = (out_loss / out_count.reshape(-1, 1)) - (in_loss / in_count.reshape(-1, 1))
return influence
def _run_iteration(model, X_train, y_train, X_test, y_test, loss_fn, objective, sub_frac, seed):
"""
Fit model after leaving out the specified `train_idx` train example.
Return
- 1d array of shape=(X_test.shape[0],) or single float.
Note
- Parallelizable method.
"""
rng = np.random.default_rng(seed)
idxs = rng.choice(X_train.shape[0], size=int(X_train.shape[0] * sub_frac), replace=False)
new_X_train = X_train[idxs].copy()
new_y_train = y_train[idxs].copy()
new_model = clone(model).fit(new_X_train, new_y_train)
start = time.time()
loss = _get_loss(loss_fn, new_model, objective, X=X_test, y=y_test) # shape=(X_test.shape[0],)
inf_time = time.time() - start
print(inf_time)
global n_influence
global influence_time
influence_time += inf_time
n_influence += 1
if n_influence == 5:
print(f'\n{influence_time / 5.0}\n')
exit(0)
return loss, idxs, inf_time
def _get_loss(loss_fn, model, objective, X, y, batch=False):
"""
Return
- 1d array of individual losses of shape=(X.shape[0],),
unless batch=True, then return a single float.
Note
- Parallelizable method.
"""
if objective == 'regression':
y_pred = model.predict(X) # shape=(X.shape[0])
elif objective == 'binary':
y_pred = model.predict_proba(X)[:, 1] # 1d arry of pos. probabilities, shape=(X.shape[0],)
else:
assert objective == 'multiclass'
y_pred = model.predict_proba(X) # shape=(X.shape[0], no. class)
result = loss_fn(y, y_pred, raw=False, batch=batch) # shape(X.shape[0],) or single float
return result
```
#### File: tree_influence/explainers/target.py
```python
import numpy as np
from .base import Explainer
from .parsers import util
class Target(Explainer):
"""
Explainer that randomly returns higher influence
for train examples with similar targets to the test examples.
Local-Influence Semantics
- More positive values are assigned to train examples
with similar targets to the test examples.
Note
- Supports GBDTs and RFs.
"""
def __init__(self, random_state=1, logger=None):
"""
Input
random_state: int, random seed to enhance reproducibility.
logger: object, If not None, output to logger.
"""
self.random_state = random_state
self.logger = logger
def fit(self, model, X, y):
"""
Input
model: tree ensemble.
X: 2d array of train examples.
y: 1d array of train targets.
"""
super().fit(model, X, y)
X, y = util.check_data(X, y, objective=self.model_.objective)
self.X_train_ = X.copy()
self.y_train_ = y.copy()
self.n_class_ = self.model_.n_class_
self.objective_ = self.model_.objective
return self
def get_local_influence(self, X, y):
"""
Input
X: 2d array of test data.
y: 2d array of test targets.
Return
- 2d array of shape=(no. train, X.shape[0]).
* Array is returned in the same order as the training data.
"""
X, y = util.check_data(X, y, objective=self.model_.objective)
influence = np.zeros((self.X_train_.shape[0], X.shape[0]), dtype=util.dtype_t)
for i in range(X.shape[0]):
influence[:, i] = self._get_influence(target=y[i], seed=i + 1)
return influence
# private
def _get_influence(self, target, seed=1):
"""
Input
seed: seeds the random number generator.
Return
- 1d array of shape=(no. train,).
* Arrays are returned in the same order as the traing data.
"""
rng = np.random.default_rng(self.random_state + seed)
influence = np.zeros(self.X_train_.shape[0], dtype=util.dtype_t)
if self.objective_ in ['binary', 'multiclass']:
target_idxs = np.where(self.y_train_ == target)[0]
non_target_idxs = np.where(self.y_train_ != target)[0]
influence[target_idxs] = rng.uniform(0.0, 1.0, size=len(target_idxs))
influence[non_target_idxs] = rng.uniform(-1.0, 0.0, size=len(non_target_idxs))
# assigns more positive values to examples with targets close to the test target
else:
assert self.objective_ == 'regression'
diffs = np.abs(self.y_train_ - target)
influence[:] = (1.0 / diffs) + rng.normal(0, np.std(diffs))
return influence
``` |
{
"source": "jjbrosnan/deephaven-core",
"score": 3
} |
#### File: server/tests/test_partitioned_table.py
```python
import unittest
from deephaven.filters import Filter
from deephaven import read_csv, DHError, new_table
from tests.testbase import BaseTestCase
class PartitionedTableTestCase(BaseTestCase):
def setUp(self):
self.test_table = read_csv("tests/data/test_table.csv")
self.partitioned_table = self.test_table.partition_by(by=["c", "e"])
def tearDown(self):
self.partitioned_table = None
self.test_table = None
def test_table(self):
self.assertIsNotNone(self.partitioned_table.table)
def test_key_columns(self):
self.assertEqual(self.partitioned_table.key_columns, ["c", "e"])
def test_constituent_column(self):
self.assertEqual(self.partitioned_table.constituent_column, "__CONSTITUENT__")
def test_unique_keys(self):
self.assertTrue(self.partitioned_table.unique_keys)
def test_constituent_change_permitted(self):
self.assertFalse(self.partitioned_table.constituent_changes_permitted)
def test_constituent_table_columns(self):
self.assertEqual(self.test_table.columns, self.partitioned_table.constituent_table_columns)
def test_merge(self):
t = self.partitioned_table.merge()
self.assert_table_equals(t, self.test_table)
def test_filter(self):
conditions = ["c < 0", "e > 0"]
filters = Filter.from_(conditions)
pt = self.partitioned_table.filter(filters)
self.assertIsNotNone(pt)
filters = ["c < 0", "e > 0"]
pt = self.partitioned_table.filter(filters)
self.assertIsNotNone(pt)
with self.assertRaises(DHError) as cm:
conditions = ["a > 100", "b < 1000"]
filters = Filter.from_(conditions)
pt = self.partitioned_table.filter(filters)
self.assertIn("RuntimeError", str(cm.exception))
def test_sort(self):
new_pt = self.partitioned_table.sort(order_by=["c"])
self.assertIsNotNone(new_pt)
with self.assertRaises(DHError) as cm:
new_pt = self.partitioned_table.sort(order_by=["a", "b"])
self.assertIn("NoSuchColumnException", str(cm.exception))
with self.assertRaises(DHError) as cm:
new_pt = self.partitioned_table.sort(order_by=self.partitioned_table.constituent_column)
self.assertIn("Unsupported sort on constituent column", str(cm.exception))
def test_get_constituent(self):
keys = [917, 167]
self.assertIsNotNone(self.partitioned_table.get_constituent(keys))
from deephaven.column import string_col, int_col, double_col
houses = new_table([
string_col("HomeType", ["Colonial", "Contemporary", "Contemporary", "Condo", "Colonial", "Apartment"]),
int_col("HouseNumber", [1, 3, 4, 15, 4, 9]),
string_col("StreetName", ["Test Drive", "Test Drive", "Test Drive", "Deephaven Road", "Community Circle",
"Community Circle"]),
int_col("SquareFeet", [2251, 1914, 4266, 1280, 3433, 981]),
int_col("Price", [450000, 400000, 1250000, 300000, 600000, 275000]),
double_col("LotSizeAcres", [0.41, 0.26, 1.88, 0.11, 0.95, 0.10])
])
houses_by_type = houses.partition_by("HomeType")
colonial_homes = houses_by_type.get_constituent("Colonial")
self.assertIsNotNone(colonial_homes)
def test_constituents(self):
constituent_tables = self.partitioned_table.constituent_tables
self.assertGreater(len(constituent_tables), 0)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jjbskir/connexion",
"score": 2
} |
#### File: connexion/tests/test_cli.py
```python
import logging
from click.testing import CliRunner
import connexion
import pytest
from conftest import FIXTURES_FOLDER
from connexion.cli import main
from connexion.exceptions import ResolverError
from mock import MagicMock
@pytest.fixture()
def mock_app_run(monkeypatch):
test_server = MagicMock(wraps=connexion.FlaskApp(__name__))
test_server.run = MagicMock(return_value=True)
test_app = MagicMock(return_value=test_server)
monkeypatch.setattr('connexion.cli.connexion.FlaskApp', test_app)
return test_app
@pytest.fixture()
def expected_arguments():
"""
Default values arguments used to call `connexion.App` by cli.
"""
return {
"swagger_json": True,
"swagger_ui": True,
"swagger_path": None,
"swagger_url": None,
"auth_all_paths": False,
"debug": False
}
@pytest.fixture()
def spec_file():
return str(FIXTURES_FOLDER / 'simple/swagger.yaml')
def test_print_version():
runner = CliRunner()
result = runner.invoke(main, ['--version'], catch_exceptions=False)
assert "Connexion {}".format(connexion.__version__) in result.output
def test_run_missing_spec():
runner = CliRunner()
result = runner.invoke(main, ['run'], catch_exceptions=False)
assert "Missing argument" in result.output
def test_run_simple_spec(mock_app_run, spec_file):
default_port = 5000
runner = CliRunner()
runner.invoke(main, ['run', spec_file], catch_exceptions=False)
app_instance = mock_app_run()
app_instance.run.assert_called_with(
port=default_port,
host=None,
server=None,
debug=False)
def test_run_spec_with_host(mock_app_run, spec_file):
default_port = 5000
runner = CliRunner()
runner.invoke(main, ['run', spec_file, '--host', 'custom.host'], catch_exceptions=False)
app_instance = mock_app_run()
app_instance.run.assert_called_with(
port=default_port,
host='custom.host',
server=None,
debug=False)
def test_run_no_options_all_default(mock_app_run, expected_arguments, spec_file):
runner = CliRunner()
runner.invoke(main, ['run', spec_file], catch_exceptions=False)
mock_app_run.assert_called_with('connexion.cli', **expected_arguments)
def test_run_using_option_hide_spec(mock_app_run, expected_arguments,
spec_file):
runner = CliRunner()
runner.invoke(main, ['run', spec_file, '--hide-spec'],
catch_exceptions=False)
expected_arguments['swagger_json'] = False
mock_app_run.assert_called_with('connexion.cli', **expected_arguments)
def test_run_using_option_hide_console_ui(mock_app_run, expected_arguments,
spec_file):
runner = CliRunner()
runner.invoke(main, ['run', spec_file, '--hide-console-ui'],
catch_exceptions=False)
expected_arguments['swagger_ui'] = False
mock_app_run.assert_called_with('connexion.cli', **expected_arguments)
def test_run_using_option_console_ui_from(mock_app_run, expected_arguments,
spec_file):
user_path = '/some/path/here'
runner = CliRunner()
runner.invoke(main, ['run', spec_file, '--console-ui-from', user_path],
catch_exceptions=False)
expected_arguments['swagger_path'] = user_path
mock_app_run.assert_called_with('connexion.cli', **expected_arguments)
def test_run_using_option_console_ui_url(mock_app_run, expected_arguments,
spec_file):
user_url = '/console_ui_test'
runner = CliRunner()
runner.invoke(main, ['run', spec_file, '--console-ui-url', user_url],
catch_exceptions=False)
expected_arguments['swagger_url'] = user_url
mock_app_run.assert_called_with('connexion.cli', **expected_arguments)
def test_run_using_option_auth_all_paths(mock_app_run, expected_arguments,
spec_file):
runner = CliRunner()
runner.invoke(main, ['run', spec_file, '--auth-all-paths'],
catch_exceptions=False)
expected_arguments['auth_all_paths'] = True
mock_app_run.assert_called_with('connexion.cli', **expected_arguments)
def test_run_in_debug_mode(mock_app_run, expected_arguments, spec_file,
monkeypatch):
logging_config = MagicMock(name='connexion.cli.logging.basicConfig')
monkeypatch.setattr('connexion.cli.logging.basicConfig',
logging_config)
runner = CliRunner()
runner.invoke(main, ['run', spec_file, '-d'], catch_exceptions=False)
logging_config.assert_called_with(level=logging.DEBUG)
expected_arguments['debug'] = True
mock_app_run.assert_called_with('connexion.cli', **expected_arguments)
def test_run_in_very_verbose_mode(mock_app_run, expected_arguments, spec_file,
monkeypatch):
logging_config = MagicMock(name='connexion.cli.logging.basicConfig')
monkeypatch.setattr('connexion.cli.logging.basicConfig',
logging_config)
runner = CliRunner()
runner.invoke(main, ['run', spec_file, '-vv'], catch_exceptions=False)
logging_config.assert_called_with(level=logging.DEBUG)
expected_arguments['debug'] = True
mock_app_run.assert_called_with('connexion.cli', **expected_arguments)
def test_run_in_verbose_mode(mock_app_run, expected_arguments, spec_file,
monkeypatch):
logging_config = MagicMock(name='connexion.cli.logging.basicConfig')
monkeypatch.setattr('connexion.cli.logging.basicConfig',
logging_config)
runner = CliRunner()
runner.invoke(main, ['run', spec_file, '-v'], catch_exceptions=False)
logging_config.assert_called_with(level=logging.INFO)
expected_arguments['debug'] = False
mock_app_run.assert_called_with('connexion.cli', **expected_arguments)
def test_run_using_option_base_path(mock_app_run, expected_arguments,
spec_file):
runner = CliRunner()
runner.invoke(main, ['run', spec_file, '--base-path', '/foo'],
catch_exceptions=False)
expected_arguments = dict(base_path='/foo',
resolver_error=None,
validate_responses=False,
strict_validation=False)
mock_app_run().add_api.assert_called_with(spec_file, **expected_arguments)
def test_run_unimplemented_operations_and_stub(mock_app_run):
runner = CliRunner()
spec_file = str(FIXTURES_FOLDER / 'missing_implementation/swagger.yaml')
with pytest.raises(AttributeError):
runner.invoke(main, ['run', spec_file], catch_exceptions=False)
# yet can be run with --stub option
result = runner.invoke(main, ['run', spec_file, '--stub'], catch_exceptions=False)
assert result.exit_code == 0
spec_file = str(FIXTURES_FOLDER / 'module_does_not_exist/swagger.yaml')
with pytest.raises(ImportError):
runner.invoke(main, ['run', spec_file], catch_exceptions=False)
# yet can be run with --stub option
result = runner.invoke(main, ['run', spec_file, '--stub'], catch_exceptions=False)
assert result.exit_code == 0
def test_run_unimplemented_operations_and_mock(mock_app_run):
runner = CliRunner()
spec_file = str(FIXTURES_FOLDER / 'missing_implementation/swagger.yaml')
with pytest.raises(AttributeError):
runner.invoke(main, ['run', spec_file], catch_exceptions=False)
# yet can be run with --mock option
result = runner.invoke(main, ['run', spec_file, '--mock=all'], catch_exceptions=False)
assert result.exit_code == 0
def test_run_with_wsgi_containers(mock_app_run, spec_file):
runner = CliRunner()
# missing gevent
result = runner.invoke(main,
['run', spec_file, '-w', 'gevent'],
catch_exceptions=False)
assert 'gevent library is not installed' in result.output
assert result.exit_code == 1
# missing tornado
result = runner.invoke(main,
['run', spec_file, '-w', 'tornado'],
catch_exceptions=False)
assert 'tornado library is not installed' in result.output
assert result.exit_code == 1
# using flask
result = runner.invoke(main,
['run', spec_file, '-w', 'flask'],
catch_exceptions=False)
assert result.exit_code == 0
```
#### File: connexion/tests/test_validation.py
```python
import json
import flask
from connexion.apis.flask_api import FlaskApi
from connexion.decorators.validation import ParameterValidator
# we are using "mock" module here for Py 2.7 support
from mock import MagicMock
def test_parameter_validator(monkeypatch):
request = MagicMock(name='request')
request.args = {}
request.headers = {}
request.params = {}
app = MagicMock(name='app')
app.response_class = flask.Response
monkeypatch.setattr('flask.request', request)
monkeypatch.setattr('flask.current_app', app)
def orig_handler(*args, **kwargs):
return 'OK'
params = [{'name': 'p1', 'in': 'path', 'type': 'integer', 'required': True},
{'name': 'h1', 'in': 'header', 'type': 'string', 'enum': ['a', 'b']},
{'name': 'q1', 'in': 'query', 'type': 'integer', 'maximum': 3},
{'name': 'a1', 'in': 'query', 'type': 'array', 'minItems': 2, 'maxItems': 3,
'items': {'type': 'integer', 'minimum': 0}}]
validator = ParameterValidator(params, FlaskApi)
handler = validator(orig_handler)
kwargs = {'query': {}, 'headers': {}}
request = MagicMock(path_params={}, **kwargs)
assert json.loads(handler(request).data.decode())['detail'] == "Missing path parameter 'p1'"
request = MagicMock(path_params={'p1': '123'}, **kwargs)
assert handler(request) == 'OK'
request = MagicMock(path_params={'p1': ''}, **kwargs)
assert json.loads(handler(request).data.decode())['detail'] == "Wrong type, expected 'integer' for path parameter 'p1'"
request = MagicMock(path_params={'p1': 'foo'}, **kwargs)
assert json.loads(handler(request).data.decode())['detail'] == "Wrong type, expected 'integer' for path parameter 'p1'"
request = MagicMock(path_params={'p1': '1.2'}, **kwargs)
assert json.loads(handler(request).data.decode())['detail'] == "Wrong type, expected 'integer' for path parameter 'p1'"
request = MagicMock(path_params={'p1': 1}, query={'q1': '4'}, headers={})
assert json.loads(handler(request).data.decode())['detail'].startswith('4 is greater than the maximum of 3')
request = MagicMock(path_params={'p1': 1}, query={'q1': '3'}, headers={})
assert handler(request) == 'OK'
request = MagicMock(path_params={'p1': 1}, query={'a1': "1,2"}, headers={})
assert handler(request) == "OK"
request = MagicMock(path_params={'p1': 1}, query={'a1': "1,a"}, headers={})
assert json.loads(handler(request).data.decode())['detail'].startswith("'a' is not of type 'integer'")
request = MagicMock(path_params={'p1': 1}, query={'a1': "1,-1"}, headers={})
assert json.loads(handler(request).data.decode())['detail'].startswith("-1 is less than the minimum of 0")
request = MagicMock(path_params={'p1': 1}, query={'a1': "1"}, headers={})
assert json.loads(handler(request).data.decode())['detail'].startswith("[1] is too short")
request = MagicMock(path_params={'p1': 1}, query={'a1': "1,2,3,4"}, headers={})
assert json.loads(handler(request).data.decode())['detail'].startswith("[1, 2, 3, 4] is too long")
request = MagicMock(path_params={'p1': '123'}, query={}, headers={'h1': 'a'})
assert handler(request) == 'OK'
request = MagicMock(path_params={'p1': '123'}, query={}, headers={'h1': 'x'})
assert json.loads(handler(request).data.decode())['detail'].startswith("'x' is not one of ['a', 'b']")
``` |
{
"source": "jjbuchanan/gp_blendclass_singleband",
"score": 3
} |
#### File: gp_blendclass_singleband/image_simulation/coadd_simulation.py
```python
import numpy as np
import pandas as pd
from collections import namedtuple
import galsim
from galsim import degrees, arcsec
from flux_conversion import npho_dark_sky
from galaxy_drawing import select_gals, build_galaxy_composite
def draw_exposure(gal_composite_drawer, mean_npho_sky, seed, params):
sky_conditions = params.kolmogorov_fwhm
npho_sky = mean_npho_sky
# Introduce some variability between exposures
rng = np.random.default_rng()
sky_conditions *= (1 + params.kolmogorov_variability)**rng.standard_normal()
npho_sky *= (1 + params.skycount_variability)**rng.standard_normal()
# Sky background, including random per-pixel fluctuations
image = galsim.ImageF(params.ngrid_x, params.ngrid_y, init_value=npho_sky)
seed += 1
sky_noise = galsim.PoissonNoise(rng=galsim.BaseDeviate(seed))
image.addNoise(sky_noise)
image *= params.gain
# Digitize
image = galsim.ImageI(image)
# Record the mean sky level actually applied to this image
npho_sky = np.mean(image.array)
# Convolve the galaxy light profile with the atmospheric PSF
psf = galsim.Kolmogorov(fwhm=sky_conditions)
smeared_gal_drawer = galsim.Convolve(psf, gal_composite_drawer)
# Simulate telescope dither, which (after final image corrections) is roughly
# equivalent to a random sub-pixel-scale shift of the image coordinates.
offset = rng.uniform(low=-1, high=1, size=2)
# Compute the final image pixel values
seed += 2
image = smeared_gal_drawer.drawImage(
image=image, add_to_image=True,
scale=params.scale, gain=params.gain, offset=offset,
method='phot', rng=galsim.BaseDeviate(seed-1),
sensor=galsim.SiliconSensor(name='lsst_itl_32',
rng=galsim.BaseDeviate(seed))
)
pixels = image.array.astype(float)
return pixels, offset, seed, npho_sky
def dither_correction(pixels, offset):
dx, dy = offset
sx, sy = int(np.sign(dx)), int(np.sign(dy))
dx, dy = abs(dx), abs(dy)
'''To invert the dither we need to roll the image array in the
opposite direction of the original offset.
In the convention used by both galsim and matplotlib, the
x direction corresponds to numpy array axis 0, and y to 1.'''
translated_pixels = (1-dx)*(1-dy)*pixels
translated_pixels += dy*(1-dx)*np.roll(pixels, -sy, axis=0)
translated_pixels += dx*(1-dy)*np.roll(pixels, -sx, axis=1)
translated_pixels += dx*dy*np.roll(np.roll(pixels, -sy, axis=0), -sx, axis=1)
return translated_pixels
# Simulate multiple, coadded exposures
def draw_coadd(band_idx, band, gal_composite_drawer, params, verbose=False):
rng = np.random.default_rng()
seed = rng.integers(100000000)
coadded_image_pixels = np.zeros((params.ngrid_x, params.ngrid_y), dtype=int)
mean_npho_sky = npho_dark_sky(band, params.exposure_time, params.area, params.scale)
sum_applied_npho_sky = 0
if verbose:
print('Exposure', end='')
for exposure in range(params.n_exposures):
if verbose:
print('', exposure+1, end='')
if (exposure+1)%10 == 0:
print()
# Draw this single exposure
pixels, offset, seed, applied_npho_sky = draw_exposure(gal_composite_drawer[band_idx],
mean_npho_sky, seed, params)
sum_applied_npho_sky += applied_npho_sky
# Coadd
translated_pixels = dither_correction(pixels, offset)
coadded_image_pixels += translated_pixels.round().astype(int)
return coadded_image_pixels, sum_applied_npho_sky
def draw_postage_stamp(window, params, image_center=None, label=None):
if image_center is None:
# Pick some RA,Dec center for the scene
# # Random center
# center_ra = rng.uniform(window.ra.min()+p.image_halfwidth/degrees,
# window.ra.max()-p.image_halfwidth/degrees)*degrees
# center_dec = rng.uniform(window.dec.min()+p.image_halfwidth/degrees,
# window.dec.max()-p.image_halfwidth/degrees)*degrees
# Center of window
center_ra = (window.ra.min() + (window.ra.max()-window.ra.min())/2)*degrees
center_dec = (window.dec.min() + (window.dec.max()-window.dec.min())/2)*degrees
image_center = galsim.CelestialCoord(center_ra, center_dec)
# Given the image center and width, select all the galaxies
# from the source table inside that box.
gals = select_gals(image_center, params.image_halfwidth, window)
# Work out how to draw each galaxy in relation to the image center
gal_composite_drawer, true_pos, npho = build_galaxy_composite(gals,
image_center, params)
# Simulate a coadded image in each band
for band_idx, band in enumerate(params.filters):
coadded_image_pixels, mean_npho_sky = draw_coadd(band_idx, band,
gal_composite_drawer, params)
# Save the image pixel data
fname = f'pixelCounts_{band}-band'
if label is not None:
fname += '_' + label
np.savetxt(fname + '.csv', coadded_image_pixels, delimiter=",")
# Save other image data
pdict = params._asdict()
for pname in ['image_halfwidth', 'area']:
del(pdict[pname])
pdict['mean_npho_sky'] = mean_npho_sky
imdata = pd.Series(pdict)
fname = 'imdata'
if label is not None:
fname += '_' + label
imdata.to_csv(fname + '.csv')
# Some drawn galaxies have centers that lie outside the image frame,
# so select just those that lie inside.
# Exclude a 1-pixel band around the edge of the image.
in_image_x_filter = (true_pos.x >= 1) & (true_pos.x < params.ngrid_x - 1)
in_image_y_filter = (true_pos.y >= 1) & (true_pos.y < params.ngrid_y - 1)
# Give columns more informative names
npho = npho.rename(columns={c: 'meanNphoPerExposure_'+c for c in npho.columns})
true_pos = true_pos.rename(columns={c: 'pixIdx_'+c for c in true_pos.columns})
gals = gals.rename(columns={'shear_1': 'gamma1', 'shear_2': 'gamma2', 'convergence': 'kappa'})
gals = gals.rename(columns={f'mag_{band}_noMW': f'mag_{band}' for band in params.filters})
gals = gals[['gamma1', 'gamma2', 'kappa', 'redshift'] +
[f'mag_{band}' for band in params.filters]]
# Merge galaxy truth information
gals = gals.reset_index(drop=True) # Use same index as other tables
galinfo = pd.concat([gals, true_pos, npho], axis=1)
galinfo_in_image = galinfo[in_image_x_filter & in_image_y_filter]
# Save galaxy truth information
fname = 'galinfo'
if label is not None:
fname += '_' + label
galinfo_in_image.to_csv(fname + '.csv', index=False)
def draw_large_scene(window, params, image_center=None, label=None):
if label is not None:
print('Drawing', label)
if image_center is None:
# Pick some RA,Dec center for the scene
# Center of window
center_ra = (window.ra.min() + (window.ra.max()-window.ra.min())/2)*degrees
center_dec = (window.dec.min() + (window.dec.max()-window.dec.min())/2)*degrees
image_center = galsim.CelestialCoord(center_ra, center_dec)
# # Random center
# center_ra = rng.uniform(window.ra.min()+p.image_halfwidth/degrees,
# window.ra.max()-p.image_halfwidth/degrees)*degrees
# center_dec = rng.uniform(window.dec.min()+p.image_halfwidth/degrees,
# window.dec.max()-p.image_halfwidth/degrees)*degrees
# Given the image center and width, select all the galaxies from the source
# table inside that box.
gals = select_gals(image_center, params.image_halfwidth, window)
# Work out how to draw each galaxy in relation to the image center
gal_composite_drawer, true_pos, npho = build_galaxy_composite(gals,
image_center, params, verbose=True)
# Save all galaxy info
fname = f'{params.ngrid_x}x{params.ngrid_y}_gal_info.csv'
if label is not None:
fname = f'gal_info_{label}.csv'
gals.to_csv(fname)
# Save true galaxy positions, in image pixel grid coordinates
fname = f'{params.ngrid_x}x{params.ngrid_y}_true_pos.csv'
if label is not None:
fname = f'true_pos_{label}.csv'
true_pos.to_csv(fname)
# Save mean total npho per exposure, in each filter band, for each galaxy
fname = f'{params.ngrid_x}x{params.ngrid_y}_true_npho_allbands.csv'
if label is not None:
fname = f'true_npho_allbands_{label}.csv'
npho.to_csv(fname)
# Simulate a coadded image in each filter band
for band_idx, band in enumerate(params.filters):
print(band, 'band')
coadded_image_pixels, mean_npho_sky = draw_coadd(band_idx, band,
gal_composite_drawer, params, verbose=True)
# Save image pixel values
fname = f'{params.ngrid_x}x{params.ngrid_y}-pix_{band}-band.csv'
if label is not None:
fname = f'{band}-band_{label}.csv'
np.savetxt(fname, coadded_image_pixels, delimiter=',')
def draw_specific_tiles(draw_method, window, params, image_indices):
dec_span_degrees = window['dec'].max() - window['dec'].min()
ra_span_degrees_raw = window['ra'].max() - window['ra'].min()
'''Convert RA span into gnomonic (TAN) span.
For a fixed raw RA span, the gnomonic span decreases as Dec approaches +/-90 degrees.
Thus to define a gnomonic span with the widest possible validity
(not going beyond the bounds of the dataset),
use the most extreme Dec value attested in the dataset.'''
ra_span_degrees_min = ra_span_degrees_raw * np.cos(np.abs(window['dec']).max() * np.pi/180)
dec_span_arcsec = dec_span_degrees*degrees / arcsec
ra_span_arcsec_min = ra_span_degrees_min*degrees / arcsec
dec_span_pixels = dec_span_arcsec / params.scale
ra_span_pixels_min = ra_span_arcsec_min / params.scale
# Without checking I don't know which of RA vs. Dec corresponds to x vs. y,
# but for square images, that difference doesn't matter here.
dec_span_tiles = dec_span_pixels / params.ngrid_y
ra_span_tiles_min = ra_span_pixels_min / params.ngrid_x
print('ra_span_degrees_min =', ra_span_degrees_min)
print('dec_span_degrees =', dec_span_degrees)
# For every desired image, draw a coadded exposure at that location.
for dec_tile_idx, ra_tile_idx in image_indices:
print('dec_tile_idx, ra_tile_idx =', dec_tile_idx, ra_tile_idx)
# Specify the RA,Dec corresponding to the selected image location
center_ra = (window['ra'].min() +
ra_span_degrees_min * (ra_tile_idx/ra_span_tiles_min))*degrees
center_dec = (window['dec'].min() +
dec_span_degrees * (dec_tile_idx/dec_span_tiles))*degrees
print('center_ra =', center_ra)
print('center_dec =', center_dec)
image_center = galsim.CelestialCoord(center_ra, center_dec)
# Draw the image at that location
imlabel = f'image-decidx_{dec_tile_idx}-raidx_{ra_tile_idx}'
draw_method(window, params, image_center, label=imlabel)
print('image indices drawn:', image_indices)
'''
For any given image drawing method, pick out out multiple nonoverlapping image
regions to draw.
Parameters:
draw_method - A function that takes window, params, image_center, label as
input and draws an image
window - Galaxy table with rectangular boundaries in RA--Dec space
params - Image drawing parameters ('Params' namedtuple)
n_images - Number of distinct images to draw
'''
def draw_random_tiles(draw_method, window, params, n_images, ignore_indices=None):
dec_span_degrees = window['dec'].max() - window['dec'].min()
ra_span_degrees_raw = window['ra'].max() - window['ra'].min()
'''Convert RA span into gnomonic (TAN) span.
For a fixed raw RA span, the gnomonic span decreases as Dec approaches +/-90 degrees.
Thus to define a gnomonic span with the widest possible validity
(not going beyond the bounds of the dataset),
use the most extreme Dec value attested in the dataset.'''
ra_span_degrees_min = ra_span_degrees_raw * np.cos(np.abs(window['dec']).max() * np.pi/180)
dec_span_arcsec = dec_span_degrees*degrees / arcsec
ra_span_arcsec_min = ra_span_degrees_min*degrees / arcsec
dec_span_pixels = dec_span_arcsec / params.scale
ra_span_pixels_min = ra_span_arcsec_min / params.scale
# Without checking I don't know which of RA vs. Dec corresponds to x vs. y,
# but for square images, that difference doesn't matter here.
dec_span_tiles = dec_span_pixels / params.ngrid_y
ra_span_tiles_min = ra_span_pixels_min / params.ngrid_x
print('ra_span_degrees_min =', ra_span_degrees_min)
print('dec_span_degrees =', dec_span_degrees)
# For every desired image, pick out a random unique location in the sky,
# and draw a coadded exposure at that location.
rng = np.random.default_rng()
selected_indices = set() if ignore_indices is None else ignore_indices
report_interval = max(1, n_images//100)
for i in range(n_images):
# Pick out at random an image location that hasn't yet been selected
dec_tile_idx = rng.integers(low=1, high=dec_span_tiles)
ra_tile_idx = rng.integers(low=1, high=ra_span_tiles_min)
while (dec_tile_idx, ra_tile_idx) in selected_indices:
dec_tile_idx = rng.integers(low=1, high=dec_span_tiles)
ra_tile_idx = rng.integers(low=1, high=ra_span_tiles_min)
# Remember locations that have already been selected, to avoid repeats
print('dec_tile_idx, ra_tile_idx =', dec_tile_idx, ra_tile_idx)
selected_indices.add((dec_tile_idx, ra_tile_idx))
# Specify the RA,Dec corresponding to the selected image location
center_ra = (window['ra'].min() +
ra_span_degrees_min * (ra_tile_idx/ra_span_tiles_min))*degrees
center_dec = (window['dec'].min() +
dec_span_degrees * (dec_tile_idx/dec_span_tiles))*degrees
print('center_ra =', center_ra)
print('center_dec =', center_dec)
image_center = galsim.CelestialCoord(center_ra, center_dec)
# Draw the image at that location
draw_method(window, params, image_center, label='image-'+str(i))
if (i+1)%report_interval == 0:
print(i+1, 'images drawn')
print('selected_indices:', selected_indices)
# Thin wrapper for draw_random_tiles
def draw_random_postage_stamps(window, params, n_images):
draw_random_tiles(draw_postage_stamp, window, params, n_images)
# Thin wrapper for draw_random_tiles
def draw_random_scenes(window, params, n_images, ignore_indices):
draw_random_tiles(draw_large_scene, window, params, n_images, ignore_indices)
# Thin wrapper for draw_specific_tiles
def draw_specific_scenes(window, params, image_indices):
draw_specific_tiles(draw_large_scene, window, params, image_indices)
paramnames = ['filters', 'n_exposures', 'gain',
'exposure_time', 'area', 'scale', 'kolmogorov_fwhm',
'kolmogorov_variability', 'skycount_variability', 'ngrid_x', 'ngrid_y',
'image_halfwidth']
Params = namedtuple('Params', paramnames, defaults=[None]*len(paramnames))
def main():
# Read galaxy table from parquet file
window = pd.read_parquet('window.parquet')
params = dict()
# params.filters = ['u','g','r','i','z','y']
params['filters'] = ['i']
# params['n_exposures'] = 100
params['n_exposures'] = 1
params['gain'] = 1
params['exposure_time'] = 30 # s
d_eff = 642.3 # cm
params['area'] = np.pi * (d_eff/2)**2
params['scale'] = 0.2 # arcsec/pixel
params['kolmogorov_fwhm'] = 0.7
# Fractional variability from exposure to exposure
params['kolmogorov_variability'] = 0.1
params['skycount_variability'] = 0.05
# Large scene
params['ngrid_x'] = 2048
params['ngrid_y'] = 2048
# Select large enough region to cover pixel grid and then some
params['image_halfwidth'] = 224*arcsec
# # Postage stamp
# params['ngrid_x'] = 32
# params['ngrid_y'] = 32
# # Select large enough region to cover pixel grid and then some
# params['image_halfwidth'] = 3.5*arcsec
# Using a namedtuple because it's just nicer to call p.name instead of p['name'],
# and also because (named)tuples are immutable.
params_ = Params(**params)
# draw_large_scene(window, params_)
# N_IMAGES = 1
# draw_random_postage_stamps(window, params_, N_IMAGES)
ignore_indices = {(6, 2), (1, 2), (5, 5), (2, 4), (6, 5), (6, 1), (6, 4),
(4, 5), (3, 6), (4, 1), (5, 6), (2, 1), (3, 3), (1, 4), (1, 5)}
# draw_random_scenes(window, params_, N_IMAGES, ignore_indices)
image_indices = ignore_indices
draw_specific_scenes(window, params_, image_indices)
if __name__ == "__main__":
main()
``` |
{
"source": "jjbuchan/rackspace-monitoring-agent-plugins-contrib",
"score": 2
} |
#### File: jjbuchan/rackspace-monitoring-agent-plugins-contrib/mongodb_stats.py
```python
import sys
try:
from pymongo import MongoClient as Client
except ImportError:
from pymongo import Connection as Client
from pymongo.errors import ConnectionFailure, AutoReconnect
def mongodb_stats(host, port):
try:
c = Client(host, port)
except ConnectionFailure, AutoReconnect:
return None
else:
return c.test.command("serverStatus")
def main():
if len(sys.argv) != 3:
print "Usage: %s <host> <port>" % sys.argv[0]
sys.exit(0)
host = sys.argv[1]
port = int(sys.argv[2])
s = mongodb_stats(host, port)
if not s:
print "status err unable to generate statistics"
sys.exit(0)
print "status ok mongodb statistics generated"
print "metric uptime float", s['uptime']
print "metric conn_available int", s['connections']['available']
print "metric conn_current int", s['connections']['current']
print "metric conn_percent float", float(s['connections']['current']
/ s['connections']['available'])
print "metric mem_mapped int", s['mem']['mapped']
print "metric index_hits int", s['indexCounters']['hits']
print "metric index_misses int", s['indexCounters']['misses']
try:
print "metric index_percent int", float(s['indexCounters']['hits']
/ s['indexCounters']['accesses'])
except ZeroDivisionError:
print "metric index_percent int 0"
if 'repl' in s:
print "metric is_replicating string true"
print "metric is_master string", s['repl']['ismaster']
print "metric is_secondary string", s['repl']['secondary']
else:
print "metric is_replicating string false"
if __name__ == '__main__':
main()
``` |
{
"source": "jjbuck/amazon-sagemaker-examples",
"score": 2
} |
#### File: amazon-sagemaker-examples/ground_truth_labeling_jobs/sagemaker-ground-truth-semantic-segmentation-mask-index-correction.py
```python
'''
pip install -r requirements.txt
'''
# Usage
'''
python sem_seg_fix.py --output-manifest output.manifest
--save-location path/to/local/save/directory --fixed-manifest-location
fixed.manifest
`output.manifest` is an output manifest from the Amazon SageMaker
Ground Truth semantic segmentation labeling job with a single
annotator with flawed annotations (i.e. incorrect label indices in
the pngs). The script will read the entries in this manifest, download
the resulting annotations into memory, fix the annotations using the
label map in the manifest, save those results into
`path/to/local/save/directory`, and also save a modified version of
the output manifest with the location of the saved results in
`fixed.manifest`. The entries in `fixed.manifest` will contain a
`'fixed-label-ref'` field with the absolute (local) location of the
fixed annotations and an `'original-label-ref'` field with the
original s3 URI of the label. The corrected annotations will be saved
in `path/to/local/save/directory/` +
`s3/key/of/original/annotation.png`, that is, the absolute s3 key (without the
bucket name) of the original, flawed annotation.
'''
import argparse
import base64
from collections import defaultdict
from io import BytesIO
import json
from pathlib import Path
from urllib.parse import urlparse
import boto3
import numpy as np
from PIL import Image
s3 = boto3.resource('s3')
try:
next(iter(s3.buckets.all()))
except Exception as e:
raise Exception(
'Could not access your s3 resources. '
'Please verify that your AWS credentials are correctly configured and '
'try again.'
) from e
class SemSegParser(object):
def __init__(self, annotation, label_categories):
self._annotation = annotation
image_bytes = base64.b64decode(self._annotation)
img = np.asarray(Image.open(BytesIO(image_bytes)).convert("RGBA"))
self.hex_colors = defaultdict(str)
self._img_array, self._class_names, self._label_map = \
self.get_class_masks(img, label_categories)
def get_class_masks(self, img, label_categories):
img_no_alpha = img[:, :, 0:3]
masks = []
class_names = []
rgb_label_maps = self.initialize_label_map()
for idx_str, category_info in label_categories.items():
i = int(idx_str)
class_name = category_info['class-name']
class_names.append(class_name)
class_hex_color = category_info['hex-color']
self.hex_colors[class_name] = class_hex_color
class_rgb_color = self.hex_to_rgb(class_hex_color)
rgb_label_maps.append(class_rgb_color)
class_mask = np.all(img_no_alpha == class_rgb_color, axis=-1)
class_mask = class_mask * (i+1)
masks.append(class_mask)
masks = np.array(masks)
masks = masks.sum(axis=0)
return masks.astype(np.uint8), class_names, rgb_label_maps
# Set background to white
def initialize_label_map(self):
return [(255, 255, 255)]
@property
def class_names(self):
return self._class_names
@property
def img_array(self):
return self._img_array
@staticmethod
def hex_to_rgb(hexcode):
h = hexcode.lstrip('#')
return tuple(int(h[i:i + 2], 16) for i in (0, 2, 4))
@property
def img_w_palette(self):
im = Image.fromarray(np.uint8(self._img_array))
num_classes = len(self._label_map)
palette = self._label_map + [(255, 255, 255) for i
in range(256 - num_classes)]
palette = [item for rgb in palette for item in rgb] + ([255, ])
im.putpalette(palette)
return im
def get_bucket_and_key(s3uri):
"""Get the bucket name and key associated with an s3 object.
Args:
s3uri (str): The s3 uri.
Return:
bucket_name and key strings.
"""
url = urlparse(s3uri)
bucket_name = url.netloc
key = url.path.lstrip('/')
return bucket_name, key
def get_object_bytes(s3_obj):
"""Get bytes for an object stored in s3.
Arg:
s3_obj (boto3.resources.factory.s3.ObjectSummary): object for thing in s3
"""
body = s3_obj.get()['Body']
return body.read()
def get_metadata(entry):
return next(iter(
v for k, v in entry.items() if ('-metadata' in k)
))
def fix_annotations(output_manifest, save_location, fixed_manifest_location):
manifest = []
with open(output_manifest, 'r') as f:
for line in f:
manifest.append(json.loads(line))
if save_location.endswith('/'):
save_location = save_location[:-1]
fixed_manifest = []
for entry in manifest:
metadata = get_metadata(entry)
job_name = metadata['job-name'].replace('labeling-job/', '')
label_key = job_name + '-ref'
label_uri = entry[label_key]
bucket_name, key = get_bucket_and_key(label_uri)
bucket = s3.Bucket(bucket_name)
annotation_bytes = get_object_bytes(bucket.Object(key))
annotation_b64 = base64.b64encode(annotation_bytes)
color_map = metadata['internal-color-map']
parser = SemSegParser(annotation_b64, color_map)
save_dir = Path(save_location)
save_path = Path.joinpath(save_dir, key)
try:
save_path.parent.mkdir(parents=True)
except FileExistsError as e:
raise FileExistsError(
'File already exists at save location for fixed annotation. '
'Please change save-location and try again.'
)
parser.img_w_palette.save(save_path)
entry['original-label-ref'] = entry[label_key]
entry['fixed-label-ref'] = str(save_path.absolute())
fixed_manifest.append(entry)
Path(fixed_manifest_location).parent.mkdir(exist_ok=True, parents=True)
with open(fixed_manifest_location, 'w') as f:
for line in fixed_manifest:
f.write(json.dumps(line) + '\n')
def parse_args():
parser = argparse.ArgumentParser(
description=(
'Correct semantic segmentation masks from Amazon SageMaker Ground '
'Truth.'
)
)
parser.add_argument(
'--output-manifest', type=str, required=True,
help=(
'Path to the output manifest from your labeling job.'
)
)
parser.add_argument(
'--save-location', type=str, default='./output',
help=(
'Local directory to save corrected annotations.'
)
)
parser.add_argument(
'--fixed-manifest-location', type=str, default='fixed.manifest',
help=(
'Location to save manifest with fixed annotation information.'
)
)
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
print(
'Fixing annotations from {} and saving to {}...'.format(
args.output_manifest, args.save_location,
)
)
fix_annotations(
args.output_manifest, args.save_location, args.fixed_manifest_location,
)
print('Done.')
``` |
{
"source": "JJBUP/yolov3_pytorch",
"score": 3
} |
#### File: code/utils/datasets.py
```python
import glob
import torch
from torch.utils.data import Dataset
import torch.nn.functional as F
import numpy as np
from PIL import Image
# 检测的时候读取图片路径
class ImageFolder(Dataset):
def __init__(self, folder_path, transform=None):
self.files = sorted(glob.glob("%s/*.*" % folder_path))
self.transform = transform # 数据增强器
def __getitem__(self, index):
img_path = self.files[index % len(self.files)]
img = np.array(
Image.open(img_path).convert('RGB'),
dtype=np.uint8)
# Label Placeholder 填充类别
boxes = np.zeros((1, 5))
# Apply transforms
if self.transform:
img, _ = self.transform((img, boxes))
return img_path, img
def __len__(self):
return len(self.files)
# 训练时候的数据集
class ListDataSet(Dataset):
def __init__(self, labels_file, input_shape, transform):
self.input_shape = input_shape # (w h)
self.transform = transform
super(ListDataSet, self).__init__()
with open(labels_file, 'r') as f:
self.lines = [line.strip() for line in f.readlines()]
def __getitem__(self, index):
# 读取一行标签
line = self.lines[index % len(self.lines)]
line_splited = line.split(" ")
img_path = line_splited[0]
boxes = np.array([box.split(",") for box in line_splited[1:]], dtype=np.float32)
pil_img = Image.open(img_path).convert('RGB') # 将通道的转为3通道
img = np.array(pil_img, dtype=np.uint8)
if self.transform:
img, bb_targets = self.transform((img, boxes))
return img, bb_targets
def __len__(self):
return len(self.lines)
def collate_fn(self, batch):
imgs, bb_targets = list(zip(*batch))
# 将pad正方形图片resize成模型输入的大小 拼接batchsize
imgs = torch.stack([resize(img, self.input_shape[0]) for img in imgs])
#设置属于一个batchsize中的第几个数据
for i, boxes in enumerate(bb_targets):
boxes[:, 0] = i
# 将这个batch的多个数据,整合在一起 (nums,6),因为数据里面设置了对应的batchid 所以后面可以找到属于哪个batch
bb_targets = torch.cat(bb_targets, dim=0)
# imgs (batchsize,3,416,416)
# bb_targets (num,6)
return imgs, bb_targets
def resize(image, size):
image = F.interpolate(image.unsqueeze(0), size=size, mode="nearest").squeeze(0)
return image
if __name__ == '__main__':
lable = "/Users/weimingan/work/python_code/myyolo/yolov3_pytorch/code/voc2007_train.txt"
dataset = ListDataSet(labels_file=lable, input_shape=[416, 416])
dataset.__getitem__(5)
``` |
{
"source": "JJC1138/aws-lambda-runtime-interface-emulator",
"score": 2
} |
#### File: integration/testdata/main.py
```python
import time
import os
def sleep_handler(event, context):
time.sleep(5)
return "I was sleeping"
def exception_handler(event, context):
raise Exception("Raising an exception")
def success_handler(event, context):
print("Printing data to console")
return "My lambda ran succesfully"
def check_env_var_handler(event, context):
return os.environ.get("MyEnv")
def assert_env_var_is_overwritten(event, context):
print(os.environ.get("AWS_LAMBDA_FUNCTION_NAME"))
if os.environ.get("AWS_LAMBDA_FUNCTION_NAME") == "test_function":
raise("Function name was not overwritten")
else:
return "My lambda ran succesfully"
def assert_lambda_arn_in_context(event, context):
if context.invoked_function_arn == f"arn:aws:lambda:us-east-1:012345678912:function:{os.environ.get('AWS_LAMBDA_FUNCTION_NAME', 'test_function')}":
return "My lambda ran succesfully"
else:
raise("Function Arn was not there")
``` |
{
"source": "JJC1138/conan-ue4cli",
"score": 2
} |
#### File: conan_ue4cli/common/DelegateManager.py
```python
import os
from os.path import exists, join
from .ConanTools import ConanTools
class DelegateManager(object):
'''
Manages delegates, which are used to provide package-specific custom functionality for wrapper packages
'''
def __init__(self, delegatesDir):
# Read the contents of the default (no-op) delegate class for generated packages
self.delegatesDir = delegatesDir
self.defaultDelegate = ConanTools.load(join(self.delegatesDir, '__default.py'))
def getDelegateClass(self, libName):
'''
Retrieves the delegate class code for the specified package (if one exists),
or else returns the default (no-op) delegate class
'''
delegateFile = join(self.delegatesDir, '{}.py'.format(libName))
if exists(delegateFile):
return ConanTools.load(delegateFile)
return self.defaultDelegate
```
#### File: conan_ue4cli/common/ExecutableResolver.py
```python
from os.path import exists, join
class ExecutableResolver(object):
'''
Provides functionality for resolving executable files given search paths and library names
'''
def __init__(self, platform, searchPaths):
'''
Creates a new executable resolver for the specified platform and executable search paths
'''
self.platform = platform
self.searchPaths = searchPaths
def resolve(self, executableName):
'''
Attempts to resolve the path to the executable file for the specified name
'''
# Determine the appropriate filename suffix for the target platform
suffix = '.exe' if self.platform == 'Windows' else ''
# Iterate through each of our search paths and attempt to find the executable file
for searchDir in self.searchPaths:
resolved = join(searchDir, executableName + suffix)
if exists(resolved):
return resolved
# Failed to resolve the executable file
return None
```
#### File: conan_ue4cli/common/RecipeCache.py
```python
from .PluginConfiguration import PluginConfiguration
from .ConanTools import ConanTools
import os, shutil
# The URL from which we retrieve the zip file containing the latest recipe data
RECIPE_ZIP_URL = 'https://github.com/adamrehn/ue4-conan-recipes/archive/master.zip'
# The root directory name of the files in the recipe data zip file
ZIP_ROOT_DIR = 'ue4-conan-recipes-master'
class RecipeCache(object):
'''
Provides functionality for managing the conan-ue4cli recipe cache
'''
@staticmethod
def getCacheDirectory():
'''
Returns the path to the recipe cache directory
'''
return os.path.join(PluginConfiguration.getConfigDirectory(), 'recipes')
@staticmethod
def updateCache():
'''
Updates the contents of the recipe cache with the latest recipes from our repo
'''
# Remove the cache directory if it already exists
cacheDir = RecipeCache.getCacheDirectory()
if os.path.exists(cacheDir):
shutil.rmtree(cacheDir)
# Download and extract the latest recipes from our GitHub repository
parentDir = os.path.dirname(cacheDir)
ConanTools.get(RECIPE_ZIP_URL, destination=parentDir)
shutil.move(os.path.join(parentDir, ZIP_ROOT_DIR), cacheDir)
```
#### File: conan_ue4cli/common/RecipeManagement.py
```python
from os.path import basename, dirname, join
from pkg_resources import parse_version
from .Utility import Utility
import glob, re
class RecipeManagement(object):
'''
Provides functionality for managing Conan recipes
'''
@staticmethod
def getLatestVersion(name, user, channel):
'''
Determines the latest available version of the specified Conan package
'''
# Retrieve the list of available versions of the specified package that are in Conan's local cache
found = Utility.getJSON(['conan', 'search', '{}/*@{}/{}'.format(name, user, channel)], ['--json', '{}'])
recipes = [instance['items'] for instance in found['results'] if instance['remote'] is None]
# Verify that at least one version was found
if len(recipes) == 0:
raise RuntimeError('could not find the package "{}" in the local Conan cache!'.format(name))
# Extract the list of version numbers and return the highest available version
references = [RecipeManagement.parseReference(recipe['recipe']['id']) for recipe in recipes[0]]
versions = sorted([parse_version(reference['version']) for reference in references])
return str(versions[-1])
@staticmethod
def listRecipesInDir(directory):
'''
Retrieves the list of available package recipes contained in a directory.
Return value is a list of tuples containing (package, version).
'''
recipes = glob.glob(join(directory, '*', '*', 'conanfile.py'))
return list([
(basename(dirname(dirname(recipe))),basename(dirname(recipe)))
for recipe in recipes
])
@staticmethod
def parseReference(reference):
'''
Parses a fully-qualified Conan package reference into its constituent components
'''
match = re.match('(.+)/(.+)@(.+)/(.+)', reference)
return {
'name': match.group(1),
'version': match.group(2),
'user': match.group(3),
'channel': match.group(4)
}
```
#### File: boilerplate_templates/common/conanfile.py
```python
from conans import ConanFile
import os
class ${MODULE}Conan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "json"
def _requireUnreal(self, dependency):
'''
Adds a package as a dependency, filling in the Unreal Engine version string.
Call this with a package reference that uses a placeholder for the channel, e.g.:
self._requireUnreal("my-package/1.0.0@adamrehn/{}")
'''
self.requires(dependency.format(os.environ["UNREAL_ENGINE_VERSION"]))
def requirements(self):
# TODO:
# LIST YOUR DEPENDENCIES HERE, USING THE `self._requireUnreal()` METHOD.
# MAKE SURE YOU USE A PLACEHOLDER `{}` FOR THE PACKAGE CHANNEL, e.g.:
# self._requireUnreal("my-package/1.0.0@adamrehn/{}")
# TODO:
# REMOVE THIS `pass` STATEMENT ONCE YOU'VE ADDED AT LEAST ONE DEPENDENCY,
# SINCE IT'S ONLY HERE BECAUSE PYTHON FORBIDS EMPTY FUNCTION BODIES.
pass
```
#### File: data/delegates/OpenSSL.py
```python
class PackageDelegate(object):
@staticmethod
def post_requirements(conanfile):
'''
Called at the end of the Conan recipe requirements() method
'''
pass
@staticmethod
def post_build(conanfile):
'''
Called at the end of the Conan recipe build() method
'''
# Isolate our imports from the outer Conan recipe
from os.path import exists, join
import shutil
# Under Windows, CMake expects the filenames libeay32.lib and ssleay32.lib,
# but the UE4-bundled versions don't have the "32" suffix, so we need to
# duplicate the .lib files to ensure CMake can consume the OpenSSL package
libDir = join(conanfile.package_folder, "lib")
if exists(join(libDir, "ssleay.lib")) and not exists(join(libDir, "ssleay32.lib")):
shutil.copy2(join(libDir, "libeay.lib"), join(libDir, "libeay32.lib"))
shutil.copy2(join(libDir, "ssleay.lib"), join(libDir, "ssleay32.lib"))
@staticmethod
def post_info(conanfile):
'''
Called at the end of the Conan recipe package_info() method
'''
pass
```
#### File: packages/libcxx/libcxx.py
```python
import os
class LibCxx(object):
@staticmethod
def set_vars(conanFile):
"""
This is a no-op provided for backwards compatibility with recipes written for older versions of conan-ue4cli
"""
LibCxx._show_deprecation_notice()
@staticmethod
def fix_autotools(autotools):
"""
This is a no-op provided for backwards compatibility with recipes written for older versions of conan-ue4cli
"""
LibCxx._show_deprecation_notice()
@staticmethod
def _show_deprecation_notice():
print("Warning: the libcxx package has been deprecated and will be removed in a future version of conan-ue4cli.")
```
#### File: packages/ue4lib/ue4lib.py
```python
from ue4cli import UnrealManagerFactory, PrintingFormat
class UE4Lib():
def __init__(self, libName):
"""
Queries ue4cli to retrieve the details for the specified library
"""
self.unreal = UnrealManagerFactory.create()
self.engineRoot = self.unreal.getEngineRoot()
self.details = self.unreal.getThirdpartyLibs([libName], includePlatformDefaults = False)
def __repr__(self):
return repr(self.details)
def includedirs(self):
"""
Returns the header include directories for this library
"""
return self.details.resolveRoot(self.details.includeDirs, self.engineRoot)
def libdirs(self):
"""
Returns the library linker directories for this library
"""
return self.details.resolveRoot(self.details.linkDirs, self.engineRoot)
def libs(self):
"""
Returns the list of library files for this library
"""
return self.details.resolveRoot(self.details.libs, self.engineRoot)
def systemlibs(self):
"""
Returns the list of system library files for this library
"""
return self.details.systemLibs
def defines(self):
"""
Returns the preprocessor definitions for this library
"""
return self.details.resolveRoot(self.details.definitions, self.engineRoot)
def cxxflags(self):
"""
Returns the compiler flags for this library
"""
return self.details.resolveRoot(self.details.cxxFlags, self.engineRoot)
def ldflags(self):
"""
Returns the linker flags for this library
"""
return self.details.resolveRoot(self.details.ldFlags, self.engineRoot)
def combined_compiler_flags(self):
"""
Returns the combined compiler flags (defines + includedirs + cxxflags) for this library as a single string
"""
return self.details.getCompilerFlags(self.engineRoot, PrintingFormat.singleLine())
def combined_linker_flags(self):
"""
Returns the combined linker flags (libdirs + libs + ldflags) for this library as a single string
"""
return self.details.getLinkerFlags(self.engineRoot, PrintingFormat.singleLine())
``` |
{
"source": "jjc2718/generic-expression-patterns",
"score": 3
} |
#### File: generic-expression-patterns/figure_generation/figure_generation.py
```python
from IPython.display import Image, display, SVG
import svgutils.transform as sg
import numpy as np
from lxml import etree
import os
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# Directory of output figures
local_directory = "/home/alexandra/Documents/Data/Generic_expression_patterns/"
output_directory = "output/"
os.makedirs(output_directory, exist_ok=True)
# ## Function to plot
def make_figure_panel(filename, scale_x_input, scale_y_input, x_loc, y_loc):
panel = sg.fromfile(filename)
panel_size = (
np.round(float(panel.root.attrib["width"][:-2]) * 1.33, 0),
np.round(float(panel.root.attrib["height"][:-2]) * 1.33, 0),
)
scale_x = scale_x_input
scale_y = scale_y_input
print(f"original: {panel_size}")
print(f"scaled:{(panel_size[0]*scale_x,panel_size[1]*scale_y)}")
panel = panel.getroot()
panel.scale_xy(x=scale_x, y=scale_y)
panel.moveto(x_loc, y_loc)
return panel
# ## Figure 1
# Create panels for figure 1
panel_1a = make_figure_panel(
"fig1A.svg", scale_x_input=2.5, scale_y_input=2.5, x_loc=30, y_loc=10
)
panel_1b = make_figure_panel(
"../human_general_analysis/logs/NN_2500_30/tybalt_2layer_30latent_hist.svg",
scale_x_input=0.8,
scale_y_input=0.8,
x_loc=600,
y_loc=10,
)
panel_1c = make_figure_panel(
"fig1C.svg", scale_x_input=4.2, scale_y_input=4.2, x_loc=30, y_loc=250
)
panel_1dleft = make_figure_panel(
os.path.join(local_directory, "template_volcano_DEG_SRP061689.svg"),
scale_x_input=0.6,
scale_y_input=0.6,
x_loc=30,
y_loc=700,
)
panel_1dright = make_figure_panel(
os.path.join(local_directory, "simulated_volcano_DEG_SRP061689.svg"),
scale_x_input=0.65,
scale_y_input=0.65,
x_loc=300,
y_loc=685,
)
panel_1a_label = sg.TextElement(10, 20, "A", size=18, weight="bold", font="Verdana")
panel_1b_label = sg.TextElement(580, 20, "B", size=18, weight="bold", font="Verdana")
panel_1c_label = sg.TextElement(10, 260, "C", size=18, weight="bold", font="Verdana")
panel_1d_label = sg.TextElement(10, 710, "D", size=18, weight="bold", font="Verdana")
figure_1 = sg.SVGFigure("1000", "1000")
figure_1.append(
[
etree.Element("rect", {"width": "100%", "height": "100%", "fill": "white"}),
panel_1a,
panel_1b,
panel_1c,
panel_1dleft,
panel_1dright,
panel_1a_label,
panel_1b_label,
panel_1c_label,
panel_1d_label,
]
)
display(SVG(figure_1.to_str()))
# save generated SVG files
figure_1.save("output/figure_1.svg")
# ## Figure 2
# Create panels for figure 2
panel_2a = make_figure_panel(
"fig2A.svg", scale_x_input=2, scale_y_input=2, x_loc=30, y_loc=10
)
panel_2b = make_figure_panel(
"../human_cancer_analysis/gene_ranking_logFC.svg",
scale_x_input=0.6,
scale_y_input=0.6,
x_loc=30,
y_loc=300,
)
panel_2c = make_figure_panel(
"../human_general_analysis/gene_ranking_log2FoldChange.svg",
scale_x_input=0.6,
scale_y_input=0.6,
x_loc=300,
y_loc=300,
)
panel_2d = make_figure_panel(
"../pseudomonas_analysis/gene_ranking_logFC.svg",
scale_x_input=0.6,
scale_y_input=0.6,
x_loc=600,
y_loc=300,
)
panel_2e = make_figure_panel(
"../compare_experiments/concordance_between_same_recount2_templates.svg",
scale_x_input=0.57,
scale_y_input=0.57,
x_loc=30,
y_loc=600,
)
panel_2f = make_figure_panel(
"../compare_experiments/concordance_between_diff_recount2_templates.svg",
scale_x_input=0.57,
scale_y_input=0.57,
x_loc=300,
y_loc=600,
)
panel_2a_label = sg.TextElement(10, 20, "A", size=18, weight="bold", font="Verdana")
panel_2b_label = sg.TextElement(10, 300, "B", size=18, weight="bold", font="Verdana")
panel_2c_label = sg.TextElement(300, 300, "C", size=18, weight="bold", font="Verdana")
panel_2d_label = sg.TextElement(600, 300, "D", size=18, weight="bold", font="Verdana")
panel_2e_label = sg.TextElement(10, 600, "E", size=18, weight="bold", font="Verdana")
panel_2f_label = sg.TextElement(300, 600, "F", size=18, weight="bold", font="Verdana")
figure_2 = sg.SVGFigure("1000", "1000")
figure_2.append(
[
etree.Element("rect", {"width": "100%", "height": "100%", "fill": "white"}),
panel_2a,
panel_2b,
panel_2c,
panel_2d,
panel_2e,
panel_2f,
panel_2a_label,
panel_2b_label,
panel_2c_label,
panel_2d_label,
panel_2e_label,
panel_2f_label,
]
)
display(SVG(figure_2.to_str()))
# save generated SVG files
figure_2.save("output/figure_2.svg")
# ## Figure 3
# Create panels for figure 3
panel_3a = make_figure_panel(
"fig2D.svg", scale_x_input=2, scale_y_input=2, x_loc=30, y_loc=10
)
panel_3b = make_figure_panel(
"../human_cancer_analysis/pathway_ranking_padj.svg",
scale_x_input=1,
scale_y_input=1,
x_loc=500,
y_loc=30,
)
panel_3c = make_figure_panel(
os.path.join(local_directory, "fig3C.svg"),
scale_x_input=2,
scale_y_input=2,
x_loc=30,
y_loc=300,
)
panel_3d = make_figure_panel(
"../other_enrichment_methods/enrichment_paired_plot_rnaseq.svg",
scale_x_input=0.5,
scale_y_input=0.5,
x_loc=500,
y_loc=320,
)
panel_3e = make_figure_panel(
"../other_enrichment_methods/enrichment_paired_plot_array.svg",
scale_x_input=0.5,
scale_y_input=0.5,
x_loc=1000,
y_loc=320,
)
panel_3a_label = sg.TextElement(10, 20, "A", size=18, weight="bold", font="Verdana")
panel_3b_label = sg.TextElement(500, 20, "B", size=18, weight="bold", font="Verdana")
panel_3c_label = sg.TextElement(10, 300, "C", size=18, weight="bold", font="Verdana")
panel_3d_label = sg.TextElement(500, 300, "D", size=18, weight="bold", font="Verdana")
panel_3e_label = sg.TextElement(1000, 300, "E", size=18, weight="bold", font="Verdana")
figure_3 = sg.SVGFigure("1500", "800")
figure_3.append(
[
etree.Element("rect", {"width": "100%", "height": "100%", "fill": "white"}),
panel_3a,
panel_3b,
panel_3c,
panel_3d,
panel_3e,
panel_3a_label,
panel_3b_label,
panel_3c_label,
panel_3d_label,
panel_3e_label,
]
)
display(SVG(figure_3.to_str()))
# save generated SVG files
figure_3.save("output/figure_3.svg")
# ## Figure 4
# Create panels for figure 4
panel_4a = make_figure_panel(
"../LV_analysis/nonzero_LV_coverage.svg",
scale_x_input=0.8,
scale_y_input=0.8,
x_loc=30,
y_loc=10,
)
panel_4b = make_figure_panel(
"../LV_analysis/highweight_LV_coverage.svg",
scale_x_input=0.8,
scale_y_input=0.8,
x_loc=350,
y_loc=10,
)
panel_4c = make_figure_panel(
"../LV_analysis/weight_dist_LV61.svg",
scale_x_input=0.8,
scale_y_input=0.8,
x_loc=700,
y_loc=10,
)
## TO DO
# Add network results when ready
panel_4a_label = sg.TextElement(10, 20, "A", size=18, weight="bold", font="Verdana")
panel_4b_label = sg.TextElement(350, 20, "B", size=18, weight="bold", font="Verdana")
panel_4c_label = sg.TextElement(700, 20, "C", size=18, weight="bold", font="Verdana")
figure_4 = sg.SVGFigure("1200", "500")
figure_4.append(
[
etree.Element("rect", {"width": "100%", "height": "100%", "fill": "white"}),
panel_4a,
panel_4b,
panel_4c,
panel_4a_label,
panel_4b_label,
panel_4c_label,
]
)
display(SVG(figure_4.to_str()))
# save generated SVG files
figure_4.save("output/figure_4.svg")
# ## Figure 5
# Create panels for figure 5
panel_5a = make_figure_panel(
os.path.join(local_directory, "cbrAB_simpler_arg_model.svg"),
scale_x_input=2,
scale_y_input=2,
x_loc=30,
y_loc=20,
)
panel_5b = make_figure_panel(
os.path.join(local_directory, "template_zscore_volcano_ArgR_E-GEOD-33245.svg"),
scale_x_input=0.8,
scale_y_input=0.8,
x_loc=300,
y_loc=10,
)
panel_5c = make_figure_panel(
os.path.join(local_directory, "template_traditional_volcano_ArgR_E-GEOD-33245.svg"),
scale_x_input=0.8,
scale_y_input=0.8,
x_loc=700,
y_loc=10,
)
panel_5d = make_figure_panel(
os.path.join(local_directory, "2.17.21_arg_growth.svg"),
scale_x_input=0.5,
scale_y_input=0.5,
x_loc=300,
y_loc=300,
)
panel_5a_label = sg.TextElement(10, 20, "A", size=18, weight="bold", font="Verdana")
panel_5b_label = sg.TextElement(300, 20, "B", size=18, weight="bold", font="Verdana")
panel_5c_label = sg.TextElement(700, 20, "C", size=18, weight="bold", font="Verdana")
panel_5d_label = sg.TextElement(300, 300, "D", size=18, weight="bold", font="Verdana")
figure_5 = sg.SVGFigure("1200", "800")
figure_5.append(
[
etree.Element("rect", {"width": "100%", "height": "100%", "fill": "white"}),
panel_5a,
panel_5b,
panel_5c,
panel_5d,
panel_5a_label,
panel_5b_label,
panel_5c_label,
panel_5d_label,
]
)
display(SVG(figure_5.to_str()))
# save generated SVG files
figure_5.save("output/figure_5.svg")
# ## Supplement 1
# Create panels for Supplement 1
panel_S1a = make_figure_panel(
"../human_general_analysis/gene_ranking_log2FoldChange.svg",
scale_x_input=0.8,
scale_y_input=0.8,
x_loc=30,
y_loc=20,
)
panel_S1b = make_figure_panel(
"../human_cancer_analysis/gene_ranking_logFC.svg",
scale_x_input=0.8,
scale_y_input=0.8,
x_loc=400,
y_loc=10,
)
panel_S1c = make_figure_panel(
"../human_general_analysis/pathway_ranking_padj.svg",
scale_x_input=0.8,
scale_y_input=0.8,
x_loc=30,
y_loc=400,
)
panel_S1d = make_figure_panel(
"../human_cancer_analysis/pathway_ranking_padj.svg",
scale_x_input=0.8,
scale_y_input=0.8,
x_loc=400,
y_loc=400,
)
panel_S1a_label = sg.TextElement(10, 20, "A", size=18, weight="bold", font="Verdana")
panel_S1b_label = sg.TextElement(400, 20, "B", size=18, weight="bold", font="Verdana")
panel_S1c_label = sg.TextElement(10, 400, "C", size=18, weight="bold", font="Verdana")
panel_S1d_label = sg.TextElement(400, 400, "D", size=18, weight="bold", font="Verdana")
figure_S1 = sg.SVGFigure("800", "800")
figure_S1.append(
[
etree.Element("rect", {"width": "100%", "height": "100%", "fill": "white"}),
panel_S1a,
panel_S1b,
panel_S1c,
panel_S1d,
panel_S1a_label,
panel_S1b_label,
panel_S1c_label,
panel_S1d_label,
]
)
display(SVG(figure_S1.to_str()))
# save generated SVG files
figure_S1.save("output/figure_S1.svg")
# ## Output png version
# !inkscape --export-png=output/figure_1.png output/figure_1.svg
# !inkscape --export-png=output/figure_2.png output/figure_2.svg
# !inkscape --export-png=output/figure_3.png output/figure_3.svg
# !inkscape --export-png=output/figure_4.png output/figure_4.svg
# !inkscape --export-png=output/figure_5.png output/figure_5.svg
# !inkscape --export-png=output/figure_S1.png output/figure_S1.svg
```
#### File: generic-expression-patterns/generic_expression_patterns_modules/lv.py
```python
from glob import glob
import pandas as pd
import seaborn as sns
def get_generic_specific_genes(summary_data, generic_threshold):
"""
This function returns a dictionary of generic genes and other
(non-generic) genes, based on the statistics contained within the
summary dataframes
Here genes are determined as generic based on their
ranking across multiple simulated experiments (i.e. generic genes
are those that are high ranked = genes were found to be consistently
changed across multiple simulated experiments. All other genes
are 'other'
Arguments
---------
summary_data: df
Dataframe containing gene summary statistics
generic_threshold: int (0,100)
Threshold to use to define generic genes. Based on
Percentile (simulated) column
"""
print(summary_data.shape)
# Generic genes
ls_generic_genes = list(
(
summary_data[summary_data["Percentile (simulated)"] >= generic_threshold]
.set_index("Gene ID")
.index
)
)
print(f"No. of generic genes: {len(ls_generic_genes)}")
# Other (non-generic) genes
ls_other_genes = list(
(
summary_data[summary_data["Percentile (simulated)"] < generic_threshold]
.set_index("Gene ID")
.index
)
)
print(f"No. of other genes: {len(ls_other_genes)}")
# Create dictionary
dict_genes = {
"generic": ls_generic_genes,
"other": ls_other_genes,
}
return dict_genes
def process_generic_specific_gene_lists(dict_genes, LV_matrix):
"""
This function returns the dictionary of generic genes and specific genes
that were included in the multiPLIER or eADAGE analyses. We want to make
sure that our gene lists obtained from SOPHIE vs multiPLIER or eADAGE
are consistent. This will prevent indexing by a gene that doesn't
exist and resulting in NA values downstream.
Arguments
---------
dict_genes: dict
Dictionary mapping gene ids to label="generic", "other"
LV_matrix: df
Dataframe containing contribution of gene to LV (gene x LV matrix)
"""
model_genes = list(LV_matrix.index)
processed_dict_genes = {}
for gene_label, ls_genes in dict_genes.items():
ls_genes_processed = list(set(model_genes).intersection(ls_genes))
processed_dict_genes[gene_label] = ls_genes_processed
return processed_dict_genes
def get_nonzero_LV_coverage(dict_genes, LV_matrix):
"""
This function counts the number of LVs that each
gene is present in (i.e. has a nonzero contribution).
This function returns a dictionary [gene id]: number of LVs
Arguments
---------
dict_genes: dict
Dictionary mapping gene ids to label="generic", "other"
LV_matrix: df
Dataframe containing contribution of gene to LV (gene x LV matrix)
"""
dict_nonzero_coverage = {}
for gene_label, ls_genes in dict_genes.items():
LV_series = (LV_matrix.loc[ls_genes] != 0).sum(axis=1)
dict_nonzero_coverage[gene_label] = LV_series
return dict_nonzero_coverage
def get_highweight_LV_coverage(dict_genes, LV_matrix, quantile=0.9):
"""
This function count the number of LVs that each
gene contributes a lot to (i.e. has a high negative or positive
weight contribution).
This function returns a dictionary [gene id]: number of LVs
Note: Using the quantile means that each LV has the same number
of high weight values. Also here we are using a quantile cutoff
since our distribution is not normal (exponential PDF)
Arguments
---------
dict_genes: dict
Dictionary mapping gene ids to label="generic", "other"
LV_matrix: df
Dataframe containing contribution of gene to LV (gene x LV matrix)
quantile: float(0,1)
Quantile to use to threshold weights. Default set to 90th quantile.
"""
thresholds_per_LV = LV_matrix.quantile(quantile)
# Manually checked that genes selected as high weight
# are above threshold using below print statements
# print(thresholds_per_LV)
# print(LV_matrix)
# print(
# LV_matrix.loc[
# (LV_matrix.abs() > thresholds_per_LV)["Node2"].values, "Node2"
# ]
# )
dict_highweight_coverage = {}
for gene_label, ls_genes in dict_genes.items():
LV_series = (LV_matrix.abs() > thresholds_per_LV).sum(axis=1)[ls_genes]
dict_highweight_coverage[gene_label] = LV_series
return dict_highweight_coverage
def get_highweight_LV_coverage_pseudomonas(dict_genes, LV_matrix):
"""
This function count the number of LVs that each
gene contributes a lot to (i.e. has a high negative or positive
weight contribution).
The high weight genes are determined based on the eADAGE paper
(https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5532071/).
Though the method is described in an earlier paper
(https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5700673/).
In this paper genes are considered high weight if their weight
is at least 2.5 standard deviations from the mean since weights
are normally distributed.
This function returns a dictionary [gene id]: number of LVs
Arguments
---------
dict_genes: dict
Dictionary mapping gene ids to label="generic", "other"
LV_matrix: df
Dataframe containing contribution of gene to LV (gene x LV matrix)
"""
eADAGE_std_cutoff = 2.5
mean_per_LV = LV_matrix.mean()
std_per_LV = LV_matrix.std() * eADAGE_std_cutoff
upper_threshold = mean_per_LV + std_per_LV
lower_threshold = mean_per_LV - std_per_LV
# Manually checked that genes selected as high weight
# are above threshold using below print statements
# print(upper_threshold)
# print(lower_threshold)
# print(LV_matrix.head(10))
# print((LV_matrix > upper_threshold).head(10)
# print((LV_matrix > upper_threshold).loc["PA0007"].sum())
# print((LV_matrix > upper_threshold).sum(axis=1).head(10))
dict_highweight_coverage = {}
for gene_label, ls_genes in dict_genes.items():
HW_pos = (LV_matrix > upper_threshold).sum(axis=1)[ls_genes]
HW_neg = (LV_matrix < lower_threshold).sum(axis=1)[ls_genes]
LV_series = HW_pos.add(HW_neg)
dict_highweight_coverage[gene_label] = LV_series
return dict_highweight_coverage
def assemble_coverage_df(dict_genes, nonzero_dict, highweight_dict):
"""
This function assembles the coverage dfs into
one df to be used for plotting
Arguments
---------
dict_genes: dict
Dictionary mapping gene ids to label="generic", "other"
nonzero_dict: dict
Dictionary mapping [gene type]: number of LVs present
highweight_dict: dict
Dictionary mapping [gene type]: number of LVs gene is highweight in
"""
all_coverage = []
for gene_label in dict_genes.keys():
merged_df = pd.DataFrame(
nonzero_dict[gene_label], columns=["nonzero LV coverage"]
).merge(
pd.DataFrame(
highweight_dict[gene_label], columns=["highweight LV coverage"]
),
left_index=True,
right_index=True,
)
merged_df["gene type"] = gene_label
all_coverage.append(merged_df)
all_coverage_df = pd.concat(all_coverage)
return all_coverage_df
def get_prop_highweight_generic_genes(dict_genes, LV_matrix, quantile=0.9):
"""
This function returns a dictionary mapping
[LV id]: proportion of high weight generic genes
Arguments
---------
Arguments
---------
dict_genes: dict
Dictionary mapping gene ids to label="generic", "other"
LV_matrix: df
Dataframe containing contribution of gene to LV (gene x LV matrix)
quantile: float(0,1)
Quantile to use to threshold weights. Default set to 90th quantile.
"""
prop_highweight_generic_dict = {}
generic_gene_ids = dict_genes["generic"]
thresholds_per_LV = LV_matrix.quantile(quantile)
# print(thresholds_per_LV)
num_highweight_genes = (LV_matrix.abs() > thresholds_per_LV).sum()[0]
# Manually checks
# Note: all LV have the same number of total high weight genes since
# we used quantile here
# print((LV_matrix.abs() > thresholds_per_LV).sum())
# print(num_highweight_genes)
for LV_id in LV_matrix.columns:
# print(thresholds_per_LV[LV_id])
highweight_genes_per_LV = list(
LV_matrix[(LV_matrix.abs() > thresholds_per_LV)[LV_id] == True].index
)
# print(LV_matrix.abs()[LV_id])
# print((LV_matrix.abs() > thresholds_per_LV)[LV_id])
# print(highweight_genes_per_LV)
# break
num_highweight_generic_genes = len(
set(generic_gene_ids).intersection(highweight_genes_per_LV)
)
prop_highweight_generic_genes = (
num_highweight_generic_genes / num_highweight_genes
)
prop_highweight_generic_dict[LV_id] = prop_highweight_generic_genes
return prop_highweight_generic_dict
def get_prop_highweight_generic_genes_pseudomonas(dict_genes, LV_matrix):
"""
This function returns a dictionary mapping
[LV id]: proportion of high weight generic genes
Arguments
---------
Arguments
---------
dict_genes: dict
Dictionary mapping gene ids to label="generic", "other"
LV_matrix: df
Dataframe containing contribution of gene to LV (gene x LV matrix)
"""
eADAGE_std_cutoff = 2.5
prop_highweight_generic_dict = {}
generic_gene_ids = dict_genes["generic"]
mean_per_LV = LV_matrix.mean()
std_per_LV = LV_matrix.std() * eADAGE_std_cutoff
upper_threshold = mean_per_LV + std_per_LV
lower_threshold = mean_per_LV - std_per_LV
num_highweight_pos_genes = (LV_matrix > upper_threshold).sum()
num_highweight_neg_genes = (LV_matrix < lower_threshold).sum()
num_highweight_genes = num_highweight_pos_genes.add(num_highweight_neg_genes)
# print((LV_matrix > upper_threshold).sum())
# print((LV_matrix < lower_threshold).sum())
# print(num_highweight_genes)
for LV_id in LV_matrix.columns:
# print(LV_matrix[LV_id])
# print(upper_threshold[LV_id])
# print(lower_threshold[LV_id])
pos_highweight_genes_per_LV = list(
LV_matrix[(LV_matrix > upper_threshold)[LV_id] == True].index
)
neg_highweight_genes_per_LV = list(
LV_matrix[(LV_matrix < lower_threshold)[LV_id] == True].index
)
highweight_genes_per_LV = (
pos_highweight_genes_per_LV + neg_highweight_genes_per_LV
)
# print(pos_highweight_genes_per_LV)
# print(neg_highweight_genes_per_LV)
# print(highweight_genes_per_LV)
# print(num_highweight_genes[LV_id])
num_highweight_generic_genes = len(
set(generic_gene_ids).intersection(highweight_genes_per_LV)
)
prop_highweight_generic_genes = (
num_highweight_generic_genes / num_highweight_genes[LV_id]
)
prop_highweight_generic_dict[LV_id] = prop_highweight_generic_genes
return prop_highweight_generic_dict
def create_LV_df(
prop_highweight_generic_dict,
multiplier_model_summary,
proportion_generic,
out_filename,
):
"""
This function creates and saves dataframe that contains the metadata
associated with the LV that is contributed most by generic genes
Note: This is only used for multiPLIER model, where we have
information of LV and pathways associations.
Arguments
---------
prop_highweight_generic_dict: dict
Dictionary mapping LV_id: proportion of generic genes that are high weight
multiplier_model_summary: df
Dataframe containing summary statistics for which pathways LV are significantly associated
proportion_generic: float
Threshold for the proportion of high weight genes to be generic in a LV
"""
generic_LV = []
for k, v in prop_highweight_generic_dict.items():
if v > proportion_generic:
print(k, v)
generic_LV.append(k)
if len(generic_LV) > 0:
LV_ids = [int(i.replace("LV", "")) for i in generic_LV]
LV_df = multiplier_model_summary[
multiplier_model_summary["LV index"].isin(LV_ids)
]
LV_df.to_csv(out_filename, sep="\t")
else:
print("No LVs with high proportion of generic genes")
def plot_dist_weights(
LV_id, LV_matrix, shared_genes, num_genes, gene_id_mapping, out_filename
):
"""
This function creates a distribution of weights for selected
`LV_id`. This allows us to explore the contribution of genes
to this LV
Arguments
----------
LV_id: str
identifier for LV
LV_matrix: df
gene x LV matrix with weight values
shared_genes: list
list of genes that are shared by the multiPLIER or eADAGE analysis
(so they have LV weight information) and SOPHIE analysis (so they have
generic label)
num_genes: int
Number of genes to display
gene_id_mapping: df
dataframe containing mapping between genes and "generic" or "other"
label
out_filename: str
file to save plot to
"""
# Get index name
LV_matrix.index.rename("geneID", inplace=True)
# Get gene with num_gene top weights
top_genes = list(LV_matrix.loc[shared_genes, LV_id].abs().nlargest(num_genes).index)
weight_df = LV_matrix.loc[top_genes].reset_index()
print(weight_df[LV_id])
# Add label for if generic or not
gene_ids = list(weight_df["geneID"].values)
weight_df["gene type"] = list(gene_id_mapping.loc[gene_ids, "gene type"].values)
fig = sns.barplot(
data=weight_df,
x=LV_id,
y="geneID",
hue="gene type",
hue_order=["generic", "other"],
dodge=False,
palette=["#2c7fb8", "lightgrey"],
)
fig.set_xlabel("Weight", fontsize=14, fontname="Verdana")
fig.set_ylabel("Gene", fontsize=14, fontname="Verdana")
fig.set_title(f"Weight distribution for {LV_id}", fontsize=14, fontname="Verdana")
fig.figure.savefig(
out_filename,
format="svg",
bbox_inches="tight",
transparent=True,
pad_inches=0,
dpi=300,
)
def plot_dist_weights_pseudomonas(
LV_id, LV_matrix, shared_genes, num_genes, gene_id_mapping, out_filename
):
"""
This function creates a distribution of weights for selected
`LV_id`. This allows us to explore the contribution of genes
to this LV.
Here we are looking at only those HW genes identified using
2.5 standard deviation from the mean weight at the `LV_id`
Arguments
----------
LV_id: str
identifier for LV
LV_matrix: df
gene x LV matrix with weight values
shared_genes: list
list of genes that are shared by the multiPLIER or eADAGE analysis
(so they have LV weight information) and SOPHIE analysis (so they have
generic label)
num_genes: int
Number of genes to display
gene_id_mapping: df
dataframe containing mapping between genes and "generic" or "other"
label
out_filename: str
file to save plot to
"""
# Get weight for LV_id
LV_id_weight = LV_matrix[LV_id]
# Calculate thresholds
eADAGE_std_cutoff = 2.5
mean_weight = LV_id_weight.mean()
std_weight = LV_id_weight.std() * eADAGE_std_cutoff
upper_threshold = mean_weight + std_weight
lower_threshold = mean_weight - std_weight
# Get high weight genes
HW_pos_genes = list(LV_id_weight[(LV_id_weight > upper_threshold).values].index)
HW_neg_genes = list(LV_id_weight[(LV_id_weight < lower_threshold).values].index)
HW_genes = HW_pos_genes + HW_neg_genes
# Sort HW genes by abs weight
sorted_HW_genes = list(
LV_id_weight[HW_genes].abs().sort_values(ascending=False).index
)[0:num_genes]
# Get gene with num_gene top weights
LV_matrix.index.rename("geneID", inplace=True)
weight_df = LV_matrix.loc[sorted_HW_genes, LV_id].reset_index()
print(weight_df)
# Add label for if generic or not
gene_ids = list(weight_df["geneID"].values)
weight_df["gene type"] = list(gene_id_mapping.loc[gene_ids, "gene type"].values)
fig = sns.barplot(
data=weight_df,
x=LV_id,
y="geneID",
hue="gene type",
hue_order=["generic", "other"],
dodge=False,
palette=["#2c7fb8", "lightgrey"],
)
fig.set_xlabel("Weight", fontsize=14, fontname="Verdana")
fig.set_ylabel("Gene", fontsize=14, fontname="Verdana")
fig.set_title(f"Weight distribution for {LV_id}", fontsize=14, fontname="Verdana")
fig.figure.savefig(
out_filename,
format="svg",
bbox_inches="tight",
transparent=True,
pad_inches=0,
dpi=300,
)
``` |
{
"source": "jjc2718/mpmp",
"score": 3
} |
#### File: 00_download_data/nbconverted/1B_preprocess_27k_methylation_data.py
```python
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import mpmp.config as cfg
import mpmp.utilities.tcga_utilities as tu
# ### Read TCGA Barcode Curation Information
#
# Extract information from TCGA barcodes - `cancer-type` and `sample-type`. See https://github.com/cognoma/cancer-data for more details
# In[2]:
(cancer_types_df,
cancertype_codes_dict,
sample_types_df,
sampletype_codes_dict) = tu.get_tcga_barcode_info()
cancer_types_df.head(2)
# In[3]:
sample_types_df.head(2)
# ### Load and process methylation data
# In[4]:
# first load manifest file, this tells us the filenames of the raw data files
manifest_df = pd.read_csv(os.path.join(cfg.data_dir, 'manifest.tsv'),
sep='\t', index_col=0)
manifest_df.head(2)
# In[5]:
tcga_methylation_df = (
pd.read_csv(os.path.join(cfg.raw_data_dir, manifest_df.loc['methylation_27k'].filename),
index_col=0, sep='\t')
.transpose()
)
tcga_methylation_df.index.rename('sample_id', inplace=True)
print(tcga_methylation_df.shape)
tcga_methylation_df.iloc[:5, :5]
# In[6]:
# how many missing values does each probe (column) have?
# TODO could move analyses to a separate script (so preprocessing is more
# straightforward/less cluttered)
probe_na = tcga_methylation_df.isna().sum()
print(probe_na.shape)
probe_na.sort_values(ascending=False).head()
# In[7]:
# how many missing values does each probe (column) have?
sample_na = tcga_methylation_df.transpose().isna().sum()
print(sample_na.shape)
sample_na.sort_values(ascending=False).head()
# In[8]:
# how many probes (columns) have missing values?
sns.set({'figure.figsize': (15, 5)})
fig, axarr = plt.subplots(1, 2)
# plot distribution of all NA sample probes, with log scale y axis
probe_na.plot.hist(bins=20, ax=axarr[0])
axarr[0].set_yscale('log')
axarr[0].set_xlabel('Number of NA samples')
axarr[0].set_ylabel('Probe count')
axarr[0].set_title('Methylation NA count per probe, all values')
# hard to see distribution of few NA sample probes, so filter and plot
# without log scale
#
# this plot answers the question: how many additional features/predictors
# would we gain by imputing values for probes with only very few NAs?
probe_na_low = probe_na[probe_na.values < 10]
probe_na_low.plot.hist(bins=len(probe_na_low.unique()), ax=axarr[1])
axarr[1].set_xlabel('Number of NA samples')
axarr[1].set_xticks(range(10))
axarr[1].set_ylabel('Probe count')
axarr[1].set_title('Methylation NA count per probe, <20 NA values')
# In[9]:
# as an alternate approach to imputation, we could filter out "bad" samples,
# or samples with many NA probes
#
# we'll start by asking: how many samples (rows) have missing values?
sns.set({'figure.figsize': (15, 5)})
fig, axarr = plt.subplots(1, 2)
# plot distribution of all NA probe samples, with log scale y axis
sample_na.plot.hist(bins=20, ax=axarr[0])
axarr[0].set_yscale('log')
axarr[0].set_xlabel('Number of NA probes')
axarr[0].set_ylabel('Sample count')
axarr[0].set_title('Methylation NA count per sample, all values')
# let's look in more detail at the "bad" samples, defined as samples with NA
# counts over some min threshold
sample_na_high = sample_na[sample_na.values > 500]
sample_na_high.plot.hist(bins=20, ax=axarr[1])
axarr[1].set_xlabel('Number of NA probes')
axarr[1].set_ylabel('Sample count')
axarr[1].set_title('Methylation NA count per sample, >500 NA values')
# In[10]:
# now, the question we want to answer is: if we remove "bad" samples,
# how many more valid probes do we get?
#
# to explore this, we'll remove samples in descending order of their
# NA count, and see how many additional probes (predictors) this filtering
# gives us
def filter_bad_samples(methylation_df, bad_samples):
return (
methylation_df.copy()
.loc[~methylation_df.index.isin(bad_samples)]
.dropna(axis='columns')
)
def count_probes_for_range(sample_counts):
probe_sample_count = []
sample_sorted = sample_na.sort_values(ascending=False)
for filter_count in sample_counts:
bad_samples = sample_sorted.iloc[:filter_count].index.values
filtered_df = filter_bad_samples(tcga_methylation_df, bad_samples)
probe_sample_count.append((filter_count, len(filtered_df.columns)))
return list(zip(*probe_sample_count))
probe_counts_small = count_probes_for_range(range(20))
probe_counts_large = count_probes_for_range(range(0, 510, 10))
# In[11]:
sns.set({'figure.figsize': (12, 6)})
fig, axarr = plt.subplots(2, 1)
sns.lineplot(x=probe_counts_small[0], y=probe_counts_small[1], ax=axarr[0])
axarr[0].set_xticks(probe_counts_small[0])
axarr[0].set_xlabel('Number of samples removed')
axarr[0].set_ylabel('Number of valid probes')
axarr[0].set_title('Samples removed vs. valid probes, small range')
sns.lineplot(x=probe_counts_large[0], y=probe_counts_large[1], ax=axarr[1])
axarr[0].set_xlabel('Number of samples removed')
axarr[0].set_ylabel('Number of valid probes')
axarr[1].set_title('Samples removed vs. valid probes, large range')
plt.tight_layout()
# In[12]:
# remove 10 samples, then impute for probes with 1 or 2 NA values
n_filter = 10
n_impute = 5
samples_sorted = sample_na.sort_values(ascending=False)
output_dir = os.path.join(cfg.data_dir, 'methylation_preprocessed')
os.makedirs(output_dir, exist_ok=True)
def filter_na_samples(methylation_df, bad_samples):
# don't drop NA columns, we'll do that after imputation
return (
methylation_df.copy()
.loc[~methylation_df.index.isin(bad_samples)]
)
def impute_leq(methylation_df, n_na):
if n_na == 0:
return methylation_df
else:
return methylation_df.fillna(methylation_df.mean(), limit=n_na)
# filter, impute, drop NA columns
print(tcga_methylation_df.shape)
samples_for_count = samples_sorted.iloc[:n_filter].index.values
tcga_methylation_df = filter_na_samples(tcga_methylation_df,
samples_for_count)
print(tcga_methylation_df.shape)
tcga_methylation_df = (
impute_leq(tcga_methylation_df, n_impute).dropna(axis='columns')
)
print(tcga_methylation_df.shape)
# update sample IDs to remove multiple samples measured on the same tumor
# and to map with the clinical information
tcga_methylation_df.index = tcga_methylation_df.index.str.slice(start=0, stop=15)
tcga_methylation_df = tcga_methylation_df.loc[~tcga_methylation_df.index.duplicated(), :]
print(tcga_methylation_df.shape)
filtered_file = os.path.join(output_dir,
'methylation_processed_n{}_i{}.tsv.gz'.format(n_filter, n_impute))
print(filtered_file)
# In[ ]:
tcga_methylation_df.to_csv(filtered_file, sep='\t', float_format='%.3g')
# ### Process TCGA cancer type and sample type info from barcodes
#
# See https://gdc.cancer.gov/resources-tcga-users/tcga-code-tables/tissue-source-site-codes for more details.
# In[13]:
# get sample info and save to file
tcga_id = tu.get_and_save_sample_info(tcga_methylation_df,
sampletype_codes_dict,
cancertype_codes_dict,
training_data='me_27k')
print(tcga_id.shape)
tcga_id.head()
# In[14]:
# get cancer type counts and save to file
cancertype_count_df = (
pd.DataFrame(tcga_id.cancer_type.value_counts())
.reset_index()
.rename({'index': 'cancertype', 'cancer_type': 'n ='}, axis='columns')
)
file = os.path.join(cfg.sample_info_dir, 'tcga_me_27k_sample_counts.tsv')
cancertype_count_df.to_csv(file, sep='\t', index=False)
cancertype_count_df.head()
# ### Dimension reduction
#
# Compress the data using PCA with various dimensions, and save the results to tsv files.
# In[15]:
# take PCA + save to file, for equal comparison with methylation
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
pca_dir = os.path.join(cfg.data_dir, 'me_compressed')
os.makedirs(pca_dir, exist_ok=True)
n_pcs_list = [100, 1000, 5000]
for n_pcs in n_pcs_list:
print(n_pcs)
tu.compress_and_save_data('me_27k',
tcga_methylation_df,
pca_dir,
n_pcs,
standardize_input=True,
verbose=True,
save_variance_explained=True)
# In[16]:
# plot PCA variance explained
sns.set({'figure.figsize': (15, 4)})
fig, axarr = plt.subplots(1, 3)
for ix, n_pcs in enumerate(n_pcs_list):
ve = np.loadtxt(
os.path.join(pca_dir, '{}_ve.tsv.gz'.format(
tu.get_compress_output_prefix('me_27k', n_pcs, cfg.default_seed, True)
))
)
sns.lineplot(x=range(len(ve)), y=np.cumsum(ve), ax=axarr[ix])
axarr[ix].set_title('{} PCs, variance explained: {:.4f}'.format(
n_pcs_list[ix], sum(ve, 0)))
axarr[ix].set_xlabel('# of PCs')
if ix == 0:
axarr[ix].set_ylabel('Cumulative variance explained')
plt.suptitle('27k methylation data, # PCs vs. variance explained')
plt.subplots_adjust(top=0.85)
```
#### File: 00_download_data/nbconverted/2_sample_intersection.py
```python
import os
from pathlib import Path
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from venn import venn, pseudovenn, generate_petal_labels
import upsetplot as up
import mpmp.config as cfg
import mpmp.utilities.data_utilities as du
# In[2]:
# if True, save figures to ./images directory
SAVE_FIGS = True
# In[3]:
# map data types to readable names
# TODO: store this somewhere central
data_map = {
'expression': 'gene expression',
'me_27k': '27k methylation',
'me_450k': '450k methylation',
'rppa': 'RPPA',
'mirna': 'microRNA',
'mut_sigs': 'mut sigs',
}
# get sample list for each -omics data type
sample_lists = {}
for training_data, sample_info_file in cfg.sample_infos.items():
samples = pd.read_csv(sample_info_file, sep='\t', index_col=0).index
try:
sample_lists[data_map[training_data]] = set(samples)
except KeyError:
# bias-corrected results, ignore them here
import sys
print(training_data, file=sys.stderr)
continue
# In[4]:
# add mutation data to sample list
pancan_data = du.load_pancancer_data()
(sample_freeze_df,
mutation_df,
copy_loss_df,
copy_gain_df,
mut_burden_df) = pancan_data
print(sample_freeze_df.shape)
print(mutation_df.shape)
print(copy_loss_df.shape)
print(copy_gain_df.shape)
print(mut_burden_df.shape)
# In[5]:
# all these dfs contain the same samples, so just use one of the indexes
sample_lists['mutation'] = set(mutation_df.index)
# In[6]:
# counts per data type
print('\n'.join(['{}\t{}'.format(n, len(v)) for n, v in sample_lists.items()]))
# ### Count overlap between gene expression and mutation data
# We'll start by just counting the number of samples that have data for gene expression and mutations, corresponding to the first figure panel.
# In[7]:
def series_from_samples(samples, labels):
# use pyvenn to generate overlaps/labels from sample IDs
venn_labels = generate_petal_labels(samples)
# generate format upset plot package expects
df_ix = [[(i == '1') for i in list(b)] + [int(v)] for b, v in venn_labels.items()]
# generate dataframe from list
rename_map = {ix: labels[ix] for ix in range(len(labels))}
index_names = list(rename_map.values())
rename_map[len(labels)] = 'id'
df = (pd.DataFrame(df_ix)
.rename(columns=rename_map)
.set_index(index_names)
)
# and return as series
return df['id']
# In[8]:
# probably stick with venn diagram here
sns.set({'figure.figsize': (8, 10)})
sns.set_style('white')
# get only sample lists from gene expression and mutation
labels = ['gene expression', 'mutation']
label_map = {l: sample_lists[l] for l in labels}
venn(label_map)
plt.title('TCGA sample intersections, gene expression data', size=14)
# ### Count overlap between gene expression, methylation, and mutation datasets
# In[9]:
sns.set_style('white')
labels = ['gene expression', 'mutation', '27k methylation', '450k methylation']
samples = [sample_lists[l] for l in labels]
upset_series = series_from_samples(samples, labels)
upset_series[upset_series != 0].sort_values().head(20)
# In[10]:
subplots = up.plot(upset_series[upset_series != 0], element_size=60)
plt.title('TCGA sample intersections, expression/methylation datasets', size=13)
plt.ylabel('Intersection size', size=13)
plt.yticks(fontsize=13)
subplots['matrix'].set_yticklabels(labels=subplots['matrix'].get_yticklabels(), fontsize=12)
# we can clean up whitespace below in figure assembly script
if SAVE_FIGS:
images_dir = Path(cfg.images_dirs['data'])
images_dir.mkdir(exist_ok=True)
plt.savefig(images_dir / 'expression_me_overlap_upset.svg', bbox_inches='tight')
plt.savefig(images_dir / 'expression_me_overlap_upset.png',
dpi=300, bbox_inches='tight')
# ### Count overlap between all datasets
# In[11]:
sns.set_style('white')
labels = ['gene expression', 'mutation', '27k methylation', '450k methylation',
'RPPA', 'microRNA', 'mut sigs']
samples = [sample_lists[l] for l in labels]
upset_series = series_from_samples(samples, labels)
upset_series[upset_series >= 100].sort_values().head(20)
# In[12]:
subplots = up.plot(upset_series[upset_series >= 100], element_size=60)
plt.title('TCGA sample intersections, all datasets', size=13)
plt.ylabel('Intersection size', size=13)
plt.yticks(fontsize=13)
subplots['matrix'].set_yticklabels(labels=subplots['matrix'].get_yticklabels(), fontsize=12)
if SAVE_FIGS:
images_dir = Path(cfg.images_dirs['data'])
images_dir.mkdir(exist_ok=True)
plt.savefig(images_dir / 'all_overlap_upset.svg', bbox_inches='tight')
plt.savefig(images_dir / 'all_overlap_upset.png',
dpi=300, bbox_inches='tight')
# ### Cancer type analysis
#
# The sample counts by themselves aren't that informative. More specifically, we want to know which cancer types are getting dropped when we take the overlap between data types. That is, are there certain cancer types that are or are not generally in the overlap, or are the samples we filter out roughly uniformly distributed between cancer types?
# In[13]:
# get sample info (sample IDs and cancer types) for each data modality
# there are no samples for which we have mutation data and not other data types,
# so we only need these four
sample_info_dfs = {}
for data_type in cfg.sample_infos.keys():
sample_info_dfs[data_type] = pd.read_csv(cfg.sample_infos[data_type],
sep='\t', index_col=0)
print(
sample_info_dfs['expression'].shape,
sample_info_dfs['me_27k'].shape,
sample_info_dfs['me_450k'].shape,
sample_info_dfs['mut_sigs'].shape,
sample_info_dfs['rppa'].shape
)
sample_info_dfs['expression'].head()
# In[14]:
# the goal here is to examine how the proportion of cancer types changes when we add
# new data modalities
# here, we specify these manually since the order matters
# (i.e. order in data_types => order in which new data is "added")
data_types = ['expression', 'me_27k', 'me_450k', 'mut_sigs', 'mirna', 'rppa']
exp_cancer_types = (sample_info_dfs['expression']
.groupby('cancer_type')
.size()
.sort_values(ascending=False)
)
mutation_samples = sample_lists['mutation']
exp_mut_cancer_types = (sample_info_dfs['expression']
.loc[mutation_samples]
.groupby('cancer_type')
.size()
.sort_values(ascending=False)
)
diff_df = exp_cancer_types - exp_mut_cancer_types
# In[15]:
# check these are all in expression data (they should be)
cur_samples = set(mutation_samples)
diff_df = pd.DataFrame(diff_df, columns=['mutation'])
cur_counts = exp_mut_cancer_types
for ix, data_type in enumerate(data_types[1:], 1):
cur_samples = cur_samples.intersection(
sample_lists[data_map[data_type]]
)
print(data_type, len(cur_samples))
overlap_counts = (sample_info_dfs[data_type]
.loc[cur_samples]
.groupby('cancer_type')
.size()
.reindex(cur_counts.index)
.fillna(0)
.sort_values(ascending=False)
)
diff = (cur_counts - overlap_counts).astype(int)
cur_counts = overlap_counts
df = pd.DataFrame(diff, columns=[data_type])
diff_df = pd.concat((diff_df, df), axis=1)
diff_df = pd.concat((diff_df,
pd.DataFrame(
overlap_counts,
columns=['rppa_base']
)), axis=1).fillna(0).astype(int)
print(diff_df.shape)
diff_df.head(33)
# In[16]:
# make sure number of removed samples equals number of samples we started with
compare_df = pd.concat((
pd.DataFrame(
sample_info_dfs['expression'].groupby('cancer_type').size().fillna(0),
columns=['expression']
),
pd.DataFrame(
diff_df.sum(axis=1),
columns=['other']
)
), axis=1)
assert (compare_df.expression.values == compare_df.other.values).all()
# In[17]:
def flip(items, ncol):
# fill in legend by rows instead of columns
# https://stackoverflow.com/a/10101532
import itertools as it
return it.chain(*[items[i::ncol] for i in range(ncol)])
sns.set()
diff_df.T.plot.bar(stacked=True, figsize=(12, 6), linewidth=0)
h, l = plt.gca().get_legend_handles_labels()
plt.legend(flip(h, 8), flip(l, 8), bbox_to_anchor=(-0.025, -0.55),
loc='lower left', ncol=8, title='Cancer type')
plt.title('Samples dropped when taking data type overlap, by cancer type')
plt.ylabel('Sample count')
# In[18]:
# instead of plotting absolute number of each cancer type dropped at
# each step, plot the proportion of each cancer type (i.e. count divided
# by total sample count for that cancer type)
diff_norm_df = diff_df / np.tile(diff_df.sum(axis=1).values, (diff_df.shape[1], 1)).T
diff_norm_df.head()
# In[19]:
sns.set()
diff_norm_df.T.plot.bar(stacked=True, figsize=(12, 6), linewidth=0)
h, l = plt.gca().get_legend_handles_labels()
plt.legend(flip(h, 8), flip(l, 8), bbox_to_anchor=(-0.025, -0.55),
loc='lower left', ncol=8, title='Cancer type')
plt.title('Proportion of samples dropped when taking data type overlap, by cancer type')
plt.ylabel('Proportion')
if SAVE_FIGS:
images_dir = Path(cfg.images_dirs['data'])
images_dir.mkdir(exist_ok=True)
plt.savefig(images_dir / 'cancer_type_proportions.svg', bbox_inches='tight')
plt.savefig(images_dir / 'cancer_type_proportions.png',
dpi=300, bbox_inches='tight')
```
#### File: 00_download_data/nbconverted/3_methylation_beta.py
```python
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from venn import venn
import mpmp.config as cfg
import mpmp.utilities.tcga_utilities as tu
# In[2]:
# if SAVE_RESULTS = True, overwrite existing data files generated by this script
# generating results can be slow, so for debugging/visualization without changing
# the data, it may make sense to set this to False
SAVE_RESULTS = False
# ### Load data
#
# Load methylation data (just 27K for now) and Illumina manifest data.
#
# The Illumina manifest maps probes to their probe type (type I or II), their chromosome/nearest gene, their functional classification (e.g. TSS, gene body, UTR, etc), and lots of other metadata about each probe.
# In[3]:
tcga_methylation_df = (
pd.read_csv(cfg.methylation_27k_data, index_col=0, sep='\t')
.transpose()
)
print(tcga_methylation_df.shape)
tcga_methylation_df.iloc[:5, :5]
# In[4]:
if os.path.isfile(cfg.methylation_manifest):
manifest_df = pd.read_csv(
cfg.methylation_manifest,
index_col=0
)
else:
# if manifest file hasn't already been downloaded, get it from Illumina website
print('loading manifest from URL')
manifest_df = pd.read_csv(
cfg.manifest_url,
header=7, # skip first 6 lines
index_col=0
)
manifest_df.to_csv(cfg.methylation_manifest)
manifest_df.iloc[:5, :5]
# In[5]:
# look at overlap of probes in manifest and probes in TCGA dataset
# all probes in TCGA should be in manifest, but we just want to make sure
manifest_probes = manifest_df.index
tcga_probes = tcga_methylation_df.index
sns.set_style('white')
venn({'manifest': set(manifest_probes),
'tcga': set(tcga_probes)})
plt.title('Probe overlap between Illumina manifest and TCGA dataset')
# In[6]:
# get probe type (type I or type II) for TCGA probes from manifest
tcga_methylation_df = (tcga_methylation_df
.merge(manifest_df[['Infinium_Design_Type', 'CHR']],
left_index=True, right_index=True)
.astype({'CHR': 'str'})
.rename(columns={'Infinium_Design_Type': 'probe_type',
'CHR': 'chromosome'})
)
# check for NA rows, shouldn't be any
print('NA rows: {}'.format(
tcga_methylation_df.shape[0] - tcga_methylation_df.dropna().shape[0]))
tcga_methylation_df.iloc[:5, -5:]
# ### Probe filtering
#
# We want to try removing certain classes of methylation probes:
#
# * Probes on sex chromosomes (X or Y chromosome)
# * Cross-reactive probes (these were identified in [Chen et al. 2013](https://doi.org/10.4161/epi.23470), we downloaded them using the [maxprobes R package](https://github.com/markgene/maxprobes))
#
# Both of these probe classes could provide spurious signal for classification/regression modeling.
# In[7]:
cross_reactive_probes = pd.read_csv(cfg.cross_reactive_probe_list,
header=None,
index_col=0,
squeeze=True)
cross_reactive_probes.head()
# In[8]:
# look at overlap of cross-reactive probes and probes in TCGA dataset
sns.set_style('white')
venn({'cross-reactive': set(cross_reactive_probes.index),
'tcga': set(tcga_probes)})
plt.title('Probe overlap between cross-reactive probe list and TCGA dataset')
# In[9]:
print(tcga_methylation_df.shape)
tcga_methylation_df = (
tcga_methylation_df[(~tcga_methylation_df.chromosome.isin(['X', 'Y'])) &
(~tcga_methylation_df.index.isin(cross_reactive_probes.index))]
)
print(tcga_methylation_df.shape)
# In[10]:
if SAVE_RESULTS:
# save probe types
# BMIQ expects these to be labeled as 1/2 rather than I/II, so change them here
tcga_methylation_df.loc[tcga_methylation_df.probe_type == 'I', 'probe_type'] = 1
tcga_methylation_df.loc[tcga_methylation_df.probe_type == 'II', 'probe_type'] = 2
tcga_methylation_df['probe_type'].to_csv(
os.path.join(cfg.data_dir, 'methylation_27k_filtered_probe_types.txt')
)
# save methylation data without probe metadata
(tcga_methylation_df
.drop(columns=['chromosome', 'probe_type'])
.transpose()
.to_csv(os.path.join(cfg.data_dir, 'methylation_27k_filtered_probes.tsv'),
sep='\t')
)
# ### Normalize type I vs. type II beta values
#
# This happens on the sample level.
#
# At this point, you should run the `run_bmiq.R` script in the `wateRmelon` conda environment. This should generate the `methylation_27k_bmiq_normalized.tsv` output that we load below.
# In[11]:
tcga_norm_df = (
pd.read_csv(os.path.join(cfg.data_dir, 'methylation_27k_bmiq_normalized.tsv'),
sep='\t', index_col=0)
.merge(manifest_df[['Infinium_Design_Type', 'CHR']],
left_index=True, right_index=True)
.astype({'CHR': 'str'})
.rename(columns={'Infinium_Design_Type': 'probe_type',
'CHR': 'chromosome'})
#.transpose()
)
# R replaces hyphens in columns with dots, so we need to switch them back
tcga_norm_df.columns = tcga_norm_df.columns.str.replace('.', '-')
print(tcga_norm_df.shape)
tcga_norm_df.iloc[:5, -5:]
# In[12]:
# there should be some NA samples, these are samples that the beta mixture
# model didn't converge correctly for
print(np.count_nonzero(tcga_norm_df.isna().sum(axis=0)))
# In[13]:
# we'll just drop the samples with poor convergence, there shouldn't be many of them
tcga_norm_df.dropna(axis='columns', inplace=True)
print(tcga_norm_df.shape)
if SAVE_RESULTS:
(tcga_norm_df
.drop(columns=['probe_type', 'chromosome'])
.transpose()
.to_csv(os.path.join(cfg.data_dir, 'methylation_27k_bmiq_normalized_nona.tsv'),
sep='\t'))
# In[14]:
from sklearn.decomposition import PCA
pca_dir = os.path.join(cfg.data_dir, 'me_compressed')
os.makedirs(pca_dir, exist_ok=True)
tcga_processed_df = (tcga_norm_df
.drop(columns=['probe_type', 'chromosome'])
.transpose()
)
n_pcs_list = [100, 1000, 5000]
var_exp_list = []
pca_file_string = 'me_27k_bmiq_pc{}.tsv.gz'
ve_file_string = 'me_27k_bmiq_pc{}_ve.tsv'
if SAVE_RESULTS:
for n_pcs in n_pcs_list:
pca = PCA(n_components=n_pcs, random_state=cfg.default_seed)
me_pca = pca.fit_transform(tcga_processed_df)
print(me_pca.shape)
var_exp_list.append(pca.explained_variance_ratio_)
me_pca = pd.DataFrame(me_pca, index=tcga_processed_df.index)
me_pca.to_csv(os.path.join(pca_dir, pca_file_string.format(n_pcs)),
sep='\t',
float_format='%.3g')
# save explained variance array to load when SAVE_RESULTS=False
np.savetxt(os.path.join(pca_dir, ve_file_string.format(n_pcs)),
pca.explained_variance_ratio_,
fmt='%.4f',
delimiter='\t')
else:
for n_pcs in n_pcs_list:
# load explained variance array from file, to plot it
var_exp_list.append(
np.loadtxt(os.path.join(pca_dir, ve_file_string.format(n_pcs)),
delimiter='\t')
)
# In[15]:
# plot PCA variance explained
# we can only do this if we've calcluated and saved PCA-transformed data
sns.set({'figure.figsize': (15, 4)})
fig, axarr = plt.subplots(1, 3)
for ix, ve in enumerate(var_exp_list):
sns.lineplot(x=range(len(ve)), y=np.cumsum(ve), ax=axarr[ix])
axarr[ix].set_title('{} PCs, variance explained: {:.4f}'.format(
n_pcs_list[ix], sum(ve, 0)))
axarr[ix].set_xlabel('# of PCs')
if ix == 0:
axarr[ix].set_ylabel('Cumulative variance explained')
plt.suptitle('BMIQ normalized 27k methylation data, # PCs vs. variance explained')
plt.subplots_adjust(top=0.85)
# ### Plot probe intensity distributions
#
# Split probes into type I and type II (based on Illumina manifest info), and plot beta value distributions for each probe type.
#
# In the first distribution plot, we'll take 100 individual probes of each probe type, and plot their beta distributions over all samples (resulting in 200 total distributions, 1 per probe).
#
# In the second plot, we'll take 100 TCGA samples (all samples would be too slow), and look at the beta distribution averaged over all probes of each type (2 total distributions averaging over many probes).
# In[16]:
# sample n_samples probes of each type
# using all the samples takes forever to run and produces a plot that's hard to read
def sample_probes(methylation_df, n_samples=100):
tI_probes = (
methylation_df[methylation_df.probe_type == 'I']
.sample(n=n_samples, random_state=cfg.default_seed)
)
tII_probes = (
methylation_df[methylation_df.probe_type == 'II']
.sample(n=n_samples, random_state=cfg.default_seed)
)
return tI_probes, tII_probes
tI_raw, tII_raw = sample_probes(tcga_methylation_df)
tI_norm, tII_norm = sample_probes(tcga_norm_df)
# In[17]:
sns.set_style('whitegrid')
sns.set({'figure.figsize': (15, 6)})
fig, axarr = plt.subplots(1, 2)
for ix, (probe, row) in enumerate(tI_raw.iterrows()):
betas = row.values[:-2].astype('float')
if ix == 0:
sns.kdeplot(x=betas, color='red', label='type I', ax=axarr[0])
else:
sns.kdeplot(x=betas, color='red', ax=axarr[0])
for ix, (probe, row) in enumerate(tII_raw.iterrows()):
betas = row.values[:-2].astype('float')
if ix == 0:
sns.kdeplot(x=betas, color='green', label='type II', ax=axarr[0])
else:
sns.kdeplot(x=betas, color='green', ax=axarr[0])
axarr[0].set_title(r'Distribution of $\beta$ values for individual probes, before correction')
axarr[0].set_xlabel(r'$\beta$')
axarr[0].legend()
for ix, (probe, row) in enumerate(tI_norm.iterrows()):
betas = row.values[:-2].astype('float')
if ix == 0:
sns.kdeplot(x=betas, color='red', label='type I', ax=axarr[1])
else:
sns.kdeplot(x=betas, color='red', ax=axarr[1])
for ix, (probe, row) in enumerate(tII_norm.iterrows()):
betas = row.values[:-2].astype('float')
if ix == 0:
sns.kdeplot(x=betas, color='green', label='type II', ax=axarr[1])
else:
sns.kdeplot(x=betas, color='green', ax=axarr[1])
axarr[1].set_title(r'Distribution of $\beta$ values for individual probes, after correction')
axarr[1].set_xlabel(r'$\beta$')
axarr[1].legend()
# In[18]:
# single distribution from all probes of each type
# first take all probes from the given number of random samples
def sample_and_aggregate_probes(methylation_df, n_samples=100):
sampled_df = (methylation_df
.drop(columns=['probe_type', 'chromosome'])
.sample(n=n_samples,
replace=False,
random_state=cfg.default_seed,
axis='columns')
)
sampled_df['probe_type'] = methylation_df.probe_type
# split into type I and type II probes
all_tI_probes = (
sampled_df[sampled_df.probe_type == 'I']
.drop(columns=['probe_type'])
.values.astype('float').flatten()
)
all_tII_probes = (
sampled_df[sampled_df.probe_type == 'II']
.drop(columns=['probe_type'])
.values.astype('float').flatten()
)
# there should be far more tII than tI probes, just check
assert len(all_tI_probes) < len(all_tII_probes)
# downsample number of type II probes to equal number of type I probes
sampled_tII_probes = np.random.choice(all_tII_probes,
size=all_tI_probes.shape[0],
replace=False)
return all_tI_probes, sampled_tII_probes
tI_raw, tII_raw = sample_and_aggregate_probes(tcga_methylation_df)
tI_norm, tII_norm = sample_and_aggregate_probes(tcga_norm_df)
# In[19]:
# now plot the aggregate distribution over all the probes
sns.set_style('whitegrid')
sns.set({'figure.figsize': (15, 6)})
fig, axarr = plt.subplots(1, 2)
sns.kdeplot(x=tI_raw, color='red', label='type I', ax=axarr[0])
sns.kdeplot(x=tII_raw, color='green', label='type II', ax=axarr[0])
axarr[0].set_title(r'Distribution of $\beta$ values, all probes combined, before correction')
axarr[0].set_xlabel(r'$\beta$')
axarr[0].legend()
sns.kdeplot(x=tI_norm, color='red', label='type I', ax=axarr[1])
sns.kdeplot(x=tII_norm, color='green', label='type II', ax=axarr[1])
axarr[1].set_title(r'Distribution of $\beta$ values, all probes combined, after correction')
axarr[1].set_xlabel(r'$\beta$')
axarr[1].legend()
```
#### File: 00_download_data/nbconverted/sample_random_genes.py
```python
import os
import numpy as np
import pandas as pd
import mpmp.config as cfg
import mpmp.utilities.data_utilities as du
# In[2]:
# this is the number of valid genes in the Vogelstein gene set
NUM_GENES = 85
# sample random genes from set of genes with every gene with >= NUM_CANCERS
# valid cancer types
#
# if we sampled them randomly from all genes, it's likely that many of them
# would end up with no valid cancer types (i.e. not enough mutations to train
# a classifier), so we add this criterion to make sure they have at least one
NUM_CANCERS = 1
# ### Load mutation and sample/cancer type info
# In[3]:
sample_info_df = du.load_sample_info('expression', verbose=True)
pancancer_data = du.load_pancancer_data(verbose=True)
mutation_df = pancancer_data[1]
mut_burden_df = pancancer_data[4]
print(sample_info_df.shape)
print(mutation_df.shape)
print(mut_burden_df.shape)
# In[4]:
# merge sample info and mutation burden info
hyper_filter = 5
print(mutation_df.shape)
mutations_df = (mutation_df
.merge(sample_info_df, how='inner', left_index=True, right_index=True)
.merge(mut_burden_df, how='inner', left_index=True, right_index=True)
)
# then filter to remove hyper-mutated samples
burden_filter = mutations_df['log10_mut'] < hyper_filter * mutations_df['log10_mut'].std()
mutations_df = mutations_df.loc[burden_filter, :]
# and get rid of unnecessary columns
mutations_df.drop(columns=['sample_type', 'id_for_stratification', 'log10_mut'],
inplace=True)
print(mutations_df.shape)
# ### Get number of mutations per gene, per cancer type
# In[5]:
sum_df = mutations_df.groupby('cancer_type').agg('sum')
count_df = mutations_df.groupby('cancer_type').agg('count')
ratio_df = sum_df / count_df
sum_df.iloc[:5, :5]
# In[6]:
SUM_THRESHOLD = 10
PROP_THRESHOLD = 0.1
sum_df = (sum_df > SUM_THRESHOLD)
ratio_df = (ratio_df > PROP_THRESHOLD)
valid_df = sum_df & ratio_df
print(sum_df.sum().sum())
print(ratio_df.sum().sum())
valid_df.iloc[:5, :5]
# ### Sample randomly from set of all valid genes
# In[7]:
valid_genes = valid_df.sum()[valid_df.sum() >= NUM_CANCERS]
print(valid_genes.head(10))
print(len(valid_genes))
# In[8]:
# sample randomly from valid genes and write to dataframe
sampled_genes = valid_genes.sample(n=NUM_GENES, random_state=cfg.default_seed)
print(sampled_genes.sort_values(ascending=False).head(20))
# In[9]:
# get oncogene/TSG status from Vogelstein gene list
# this is just used to decide whether to add copy number gains/losses in mutation labeling
vogelstein_df = du.load_vogelstein()
gene_to_class_map = dict(zip(vogelstein_df.gene, vogelstein_df.classification))
def get_class(gene):
# if genes aren't in other gene lists, mark as 'neither'
try:
return gene_to_class_map[gene]
except KeyError:
return 'neither'
random_classes = [get_class(gene) for gene in sampled_genes.index.values]
random_df = pd.DataFrame({
'gene': sampled_genes.index.values,
'classification': random_classes
}).set_index('gene')
random_df.head()
# In[10]:
random_df.to_csv(cfg.random_genes, sep='\t')
# ### Get top mutated genes
#
# Same methods as in https://github.com/greenelab/BioBombe/blob/master/9.tcga-classify/top-50-pancanatlas-mutations.ipynb (but we want more than 50 genes, since we want a gene set of the same size as Vogelstein)
# In[11]:
mutation_count_df = mutation_df.sum().sort_values(ascending=False)
mutation_count_df.head()
# In[12]:
top_genes = mutation_count_df[:NUM_GENES]
top_classes = [get_class(gene) for gene in top_genes.index.values]
top_df = pd.DataFrame({
'gene': top_genes.index.values,
'classification': top_classes
}).set_index('gene')
top_df.head()
# In[13]:
top_df.to_csv(cfg.top_genes, sep='\t')
```
#### File: 01_explore_data/nbconverted/explore_data.py
```python
import os
import sys
import numpy as np; np.random.seed(42)
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
import mpmp.config as cfg
import mpmp.utilities.data_utilities as du
# In[2]:
DATA_TYPE = 'mut_sigs'
# load gene/classification info and sample/cancer type info
print('Loading gene label data...', file=sys.stderr)
genes_df = du.load_vogelstein()
sample_info_df = du.load_sample_info(DATA_TYPE, verbose=True)
# load mutation info
# this returns a tuple of dataframes, unpack it below
pancancer_data = du.load_pancancer_data(verbose=True)
(sample_freeze_df,
mutation_df,
copy_loss_df,
copy_gain_df,
mut_burden_df) = pancancer_data
# In[3]:
# load relevant data
data_df = du.load_raw_data(DATA_TYPE, verbose=True)
# standardize columns of expression dataframe
if DATA_TYPE in cfg.standardize_data_types:
print('Standardizing columns of {} data...'.format(DATA_TYPE),
file=sys.stderr)
data_df[data_df.columns] = StandardScaler().fit_transform(data_df[data_df.columns])
print(data_df.shape)
data_df.iloc[:5, :5]
# First, let's look at the low-dimensional representation of the chosen data type.
#
# We'll choose a few cancer types that are similar to one another (LUSC/LUAD, LGG/GBM) and a few that should be dissimilar (BRCA, THCA).
# In[25]:
assert sample_info_df.index.equals(data_df.index)
# data_cancer_types = sorted(sample_info_df.cancer_type.unique())
data_cancer_types = ['LUAD', 'LUSC', 'THCA', 'LGG', 'GBM', 'BRCA']
data_types_df = (data_df
.merge(sample_info_df, left_index=True, right_index=True)
.query('cancer_type in @data_cancer_types')
.drop(columns=['sample_type', 'id_for_stratification'])
.reset_index()
)
print(data_types_df.cancer_type.unique())
data_types_df.iloc[:5, -5:]
# In[26]:
from sklearn.decomposition import PCA
from umap import UMAP
sns.set({'figure.figsize': (20, 8)})
fig, axarr = plt.subplots(1, 2)
pca = PCA(n_components=2)
X_proj_pca = pca.fit_transform(data_types_df.drop(columns=['sample_id', 'cancer_type']))
reducer = UMAP(n_components=2, random_state=42)
X_proj_umap = reducer.fit_transform(data_types_df.drop(columns=['sample_id', 'cancer_type']))
for i, cancer_type in enumerate(data_cancer_types):
ixs = data_types_df.index[data_types_df.cancer_type == cancer_type].tolist()
axarr[0].scatter(X_proj_pca[ixs, 0], X_proj_pca[ixs, 1], label=cancer_type, s=5)
axarr[1].scatter(X_proj_umap[ixs, 0], X_proj_umap[ixs, 1], label=cancer_type, s=5)
axarr[0].set_xlabel('PC1')
axarr[0].set_ylabel('PC2')
axarr[0].set_title('PCA projection of {} data, colored by cancer type'.format(DATA_TYPE))
axarr[0].legend()
axarr[1].set_xlabel('UMAP dimension 1')
axarr[1].set_ylabel('UMAP dimension 2')
axarr[1].set_title('UMAP projection of {} data, colored by cancer type'.format(DATA_TYPE))
axarr[1].legend()
# Now we want to dig a bit deeper into LGG and GBM, using expression and methylation data. It's fairly well-known that IDH1 mutation status defines distinct subtypes in both classes of brain tumors. We'll compare methylation and gene expression in IDH1-mutated vs. non-mutated samples, expecting to see a separation in our low dimensional representation.
#
# IDH1 plays a direct role in DNA methylation, so we anticipate that this separation between mutated and non-mutated samples will be slightly clearer in the methylation data.
# In[5]:
# load relevant data
rnaseq_df = du.load_raw_data('expression', verbose=True)
print('Standardizing columns of expression data...', file=sys.stderr)
rnaseq_df[rnaseq_df.columns] = StandardScaler().fit_transform(rnaseq_df[rnaseq_df.columns])
methylation_df = du.load_raw_data('me_27k', verbose=True)
print(methylation_df.shape)
methylation_df.iloc[:5, :5]
# In[6]:
from mpmp.utilities.tcga_utilities import process_y_matrix
def generate_labels(gene, classification):
# process the y matrix for the given gene or pathway
y_mutation_df = mutation_df.loc[:, gene]
# include copy number gains for oncogenes
# and copy number loss for tumor suppressor genes (TSG)
include_copy = True
if classification == "Oncogene":
y_copy_number_df = copy_gain_df.loc[:, gene]
elif classification == "TSG":
y_copy_number_df = copy_loss_df.loc[:, gene]
else:
y_copy_number_df = pd.DataFrame()
include_copy = False
# construct labels from mutation/CNV information, and filter for
# cancer types without an extreme label imbalance
y_df = process_y_matrix(
y_mutation=y_mutation_df,
y_copy=y_copy_number_df,
include_copy=include_copy,
gene=gene,
sample_freeze=sample_freeze_df,
mutation_burden=mut_burden_df,
filter_count=1,
filter_prop=0.01,
output_directory=None,
hyper_filter=5,
test=True # don't write filter info to file
)
return y_df
# In[7]:
gene = 'IDH1'
cancer_types = ['LGG', 'GBM']
classification = du.get_classification(gene, genes_df)
y_df = generate_labels(gene, classification)
y_df = y_df[y_df.DISEASE.isin(cancer_types)]
print(y_df.shape)
y_df.tail()
# In[8]:
# generate UMAP 2-dimensional representations of data
shuffle = False
def shuffle_cols(input_df):
# randomly permute genes of each sample in the rnaseq matrix
shuf_df = input_df.apply(lambda x:
np.random.permutation(x.tolist()),
axis=1)
# set up new dataframe
shuf_df = pd.DataFrame(shuf_df, columns=['col_list'])
shuf_df = pd.DataFrame(shuf_df.col_list.values.tolist(),
columns=input_df.columns,
index=input_df.index)
return shuf_df
# get samples that are present in all 3 datasets (expression, methylation, mutations)
ix_overlap = y_df.index.intersection(rnaseq_df.index).intersection(methylation_df.index)
y_mut_df = y_df.loc[ix_overlap, :]
rnaseq_mut_df = rnaseq_df.loc[ix_overlap, :]
me_mut_df = methylation_df.loc[ix_overlap, :]
if shuffle:
rnaseq_mut_df = shuffle_cols(rnaseq_mut_df)
me_mut_df = shuffle_cols(me_mut_df)
reducer = UMAP(n_components=2, random_state=42)
X_proj_rnaseq = reducer.fit_transform(rnaseq_mut_df)
X_proj_me = reducer.fit_transform(me_mut_df)
print(X_proj_rnaseq.shape)
print(X_proj_me.shape)
# In[9]:
gene_label = '{} mutant'.format(gene)
me_proj_df = pd.DataFrame({
'UMAP1': X_proj_me[:, 0],
'UMAP2': X_proj_me[:, 1],
'Cancer type': y_mut_df.DISEASE.values,
gene_label: y_mut_df.status.values.astype('bool')
})
rnaseq_proj_df = pd.DataFrame({
'UMAP1': X_proj_rnaseq[:, 0],
'UMAP2': X_proj_rnaseq[:, 1],
'Cancer type': y_mut_df.DISEASE.values,
gene_label: y_mut_df.status.values.astype('bool')
})
me_proj_df.head()
# In[10]:
sns.set({'figure.figsize': (20, 8)})
fig, axarr = plt.subplots(1, 2)
sns.scatterplot(x='UMAP1', y='UMAP2', data=me_proj_df, hue=gene_label,
style='Cancer type', ax=axarr[0])
axarr[0].set_xlabel('UMAP dimension 1')
axarr[0].set_ylabel('UMAP dimension 2')
axarr[0].set_title('UMAP projection of TCGA methylation data, colored by mutation status')
axarr[0].legend()
sns.scatterplot(x='UMAP1', y='UMAP2', data=rnaseq_proj_df, hue=gene_label,
style='Cancer type', ax=axarr[1])
axarr[1].set_xlabel('UMAP dimension 1')
axarr[1].set_ylabel('UMAP dimension 2')
axarr[1].set_title('UMAP projection of TCGA gene expression data, colored by mutation status')
axarr[1].legend()
# As expected, we can see that there's a nice separation between (most) IDH1 mutants and non-mutants in the methylation data. They separate to some degree in the gene expression data, but not quite as clearly.
#
# It's likely (although I haven't checked this yet) that the non-mutated samples in the IDH1-mutant methylation cluster are actually IDH2 mutants. IDH2 is thought to phenocopy IDH1 in gliomas, having a similar effect on methylation and gene expression as IDH1 when mutated.
```
#### File: mpmp/data_models/tcga_data_model.py
```python
import sys
import typing
from pathlib import Path
import numpy as np
import pandas as pd
import mpmp.config as cfg
import mpmp.utilities.data_utilities as du
from mpmp.utilities.tcga_utilities import (
process_y_matrix,
process_y_matrix_cancertype,
align_matrices,
filter_to_cross_data_samples,
)
class TCGADataModel():
"""
Class containing data necessary to run TCGA mutation prediction experiments.
Provides an interface to load and preprocess mutation data and training data
modalities, and to split data into train/test sets for each target gene.
"""
def __init__(self,
seed=cfg.default_seed,
subset_mad_genes=-1,
training_data='expression',
overlap_data_types=None,
load_compressed_data=False,
standardize_input=False,
n_dim=None,
sample_info_df=None,
verbose=False,
debug=False,
test=False):
"""
Initialize mutation prediction model/data
Arguments
---------
seed (int): seed for random number generator
subset_mad_genes (int): how many genes to keep (top by mean absolute deviation).
-1 doesn't do any filtering (all genes will be kept).
training_data (str): what data type to train the model on
overlap_data_types (list): what data types to use to determine sample set
load_compressed_data (bool): whether or not to use compressed data
n_dim (int): how many dimensions to use for compression algorithm
verbose (bool): whether or not to write verbose output
sample_info_df (pd.DataFrame): dataframe containing info about TCGA samples
debug (bool): if True, use a subset of expression data for quick debugging
test (bool): if True, don't save results to files
"""
# save relevant parameters
np.random.seed(seed)
self.seed = seed
self.subset_mad_genes = subset_mad_genes
self.compressed_data = load_compressed_data
self.overlap_data_types = overlap_data_types
self.n_dim = n_dim
self.verbose = verbose
self.debug = debug
self.test = test
# load and store data in memory
self._load_data(train_data_type=training_data,
compressed_data=load_compressed_data,
standardize_input=standardize_input,
n_dim=n_dim,
sample_info_df=sample_info_df,
debug=debug,
test=self.test)
def load_gene_set(self, gene_set='top_50'):
"""
Load gene set data from previous GitHub repos.
Arguments
---------
gene_set (str): which predefined gene set to use, or a list of gene names
to use a custom list.
Returns
-------
genes_df (pd.DataFrame): list of genes to run cross-validation experiments for,
contains gene names and oncogene/TSG classifications
"""
if self.verbose:
print('Loading gene label data...', file=sys.stderr)
if gene_set == 'top_50':
genes_df = du.load_top_genes()
elif gene_set == 'vogelstein':
genes_df = du.load_vogelstein()
elif gene_set == '50_random':
genes_df = du.load_random_genes()
else:
from mpmp.exceptions import GenesNotFoundError
assert isinstance(gene_set, typing.List)
genes_df = du.load_vogelstein()
# if all genes in gene_set are in vogelstein dataset, use it
if set(gene_set).issubset(set(genes_df.gene.values)):
genes_df = genes_df[genes_df.gene.isin(gene_set)]
# else if all genes in gene_set are in top50 dataset, use it
else:
genes_df = du.load_top_50()
if set(gene_set).issubset(set(genes_df.gene.values)):
genes_df = genes_df[genes_df.gene.isin(gene_set)]
else:
# else throw an error
raise GenesNotFoundError(
'Gene list was not a subset of Vogelstein or top50'
)
return genes_df
def process_data_for_cancer_type(self,
cancer_type,
cancer_type_dir):
"""
Prepare to run cancer type prediction experiments.
This has to be rerun to generate the labels for each cancer type.
Arguments
---------
cancer_type (str): cancer type to predict (one vs. rest binary)
cancer_type_dir (str): directory to write output to, if None don't
write output
"""
y_df_raw = self._generate_cancer_type_labels(cancer_type)
filtered_data = self._filter_data(
self.data_df,
y_df_raw
)
train_filtered_df, y_filtered_df, gene_features = filtered_data
train_filtered_df, y_filtered_df = filter_to_cross_data_samples(
train_filtered_df,
y_filtered_df,
data_types=self.overlap_data_types,
use_subsampled=(self.debug or self.test),
verbose=self.verbose
)
self.X_df = train_filtered_df
self.y_df = y_filtered_df
self.gene_features = gene_features
def process_data_for_gene(self,
gene,
classification,
gene_dir,
use_pancancer=False):
"""
Prepare to run mutation prediction experiments for a given gene.
Arguments
---------
gene (str): gene to run experiments for
classification (str): 'oncogene' or 'TSG'; most likely cancer function for
the given gene
gene_dir (str): directory to write output to, if None don't write output
use_pancancer (bool): whether or not to use pancancer data
"""
y_df_raw, valid_samples = self._generate_gene_labels(
gene, classification, gene_dir)
filtered_data = self._filter_data(
self.data_df,
y_df_raw,
add_cancertype_covariate=True
)
train_filtered_df, y_filtered_df, gene_features = filtered_data
# add non-gene features to data_types array if necessary
# this is used when building multi-omics models
if hasattr(self, 'data_types'):
# this has to have a different name than the general data_types
# array, since this preprocessing may happen multiple times (for
# each gene) in the same script call
self.gene_data_types = np.concatenate(
(self.data_types, np.array([cfg.NONGENE_FEATURE] *
np.count_nonzero(~gene_features)))
)
assert self.gene_data_types.shape[0] == gene_features.shape[0]
train_filtered_df, y_filtered_df = filter_to_cross_data_samples(
train_filtered_df,
y_filtered_df,
valid_samples=valid_samples,
data_types=self.overlap_data_types,
n_dim=self.n_dim,
use_subsampled=(self.debug or self.test),
verbose=self.verbose
)
self.X_df = train_filtered_df
self.y_df = y_filtered_df
self.gene_features = gene_features
assert np.count_nonzero(self.X_df.index.duplicated()) == 0
assert np.count_nonzero(self.y_df.index.duplicated()) == 0
def process_purity_data(self,
output_dir,
classify=False):
"""Prepare to run experiments predicting tumor purity.
Arguments
---------
output_dir (str): directory to write output to, if None don't write output
classify (bool): if True do classification, else regression
"""
y_df_raw = du.load_purity(self.mut_burden_df,
self.sample_info_df,
classify=classify,
verbose=self.verbose)
filtered_data = self._filter_data(
self.data_df,
y_df_raw,
add_cancertype_covariate=True
)
train_filtered_df, y_filtered_df, gene_features = filtered_data
train_filtered_df, y_filtered_df = filter_to_cross_data_samples(
train_filtered_df,
y_filtered_df,
data_types=self.overlap_data_types,
n_dim=self.n_dim,
use_subsampled=(self.debug or self.test),
verbose=self.verbose
)
# filter to samples in common between training data and tumor purity
self.X_df = train_filtered_df
self.y_df = y_filtered_df
self.gene_features = gene_features
assert np.count_nonzero(self.X_df.index.duplicated()) == 0
assert np.count_nonzero(self.y_df.index.duplicated()) == 0
def process_msi_data(self, cancer_type, output_dir):
"""Prepare to run experiments predicting microsatellite instability status.
Arguments
---------
output_dir (str): directory to write output to, if None don't write output
classify (bool): if True do classification, else regression
"""
y_df_raw = du.load_msi(cancer_type,
self.mut_burden_df,
self.sample_info_df,
verbose=self.verbose)
filtered_data = self._filter_data(
self.data_df,
y_df_raw,
add_cancertype_covariate=(cancer_type == 'pancancer')
)
train_filtered_df, y_filtered_df, gene_features = filtered_data
train_filtered_df, y_filtered_df = filter_to_cross_data_samples(
train_filtered_df,
y_filtered_df,
data_types=self.overlap_data_types,
n_dim=self.n_dim,
use_subsampled=(self.debug or self.test),
verbose=self.verbose
)
# filter to samples in common between training data and tumor purity
self.X_df = train_filtered_df
self.y_df = y_filtered_df
self.gene_features = gene_features
assert np.count_nonzero(self.X_df.index.duplicated()) == 0
assert np.count_nonzero(self.y_df.index.duplicated()) == 0
def process_survival_data(self,
output_dir,
cancer_type):
"""Prepare to run experiments predicting survival from omics data.
Arguments
---------
output_dir (str): directory to write output to, if None don't write output
"""
y_df_raw = du.load_survival_labels(cancer_type,
self.mut_burden_df,
self.sample_info_df,
verbose=self.verbose)
filtered_data = self._filter_data(
self.data_df,
y_df_raw,
# add cancer type covariate only in pan-cancer prediction case
add_cancertype_covariate=(cancer_type == 'pancancer'),
add_age_covariate=True
)
train_filtered_df, y_filtered_df, gene_features = filtered_data
train_filtered_df, y_filtered_df = filter_to_cross_data_samples(
train_filtered_df,
y_filtered_df,
data_types=self.overlap_data_types,
n_dim=self.n_dim,
use_subsampled=(self.debug or self.test),
verbose=self.verbose
)
# filter to samples in common between training data and tumor purity
self.X_df = train_filtered_df
self.y_df = y_filtered_df
self.gene_features = gene_features
assert np.count_nonzero(self.X_df.index.duplicated()) == 0
assert np.count_nonzero(self.y_df.index.duplicated()) == 0
def _load_data(self,
train_data_type,
compressed_data=False,
standardize_input=False,
n_dim=None,
sample_info_df=None,
debug=False,
test=False):
"""Load and store relevant data.
This data does not vary based on the gene/cancer type being considered
(i.e. it can be loaded only once when the class is instantiated).
Arguments:
----------
debug (bool): whether or not to subset data for faster debugging
test (bool): whether or not to subset columns in mutation data, for testing
"""
# load training data
if not isinstance(train_data_type, str):
# if a list of train data types is provided, we have to load each
# of them and concatenate columns
# n_dim should be a list here
self.data_df, self.data_types = du.load_multiple_data_types(
train_data_type,
n_dims=n_dim,
verbose=self.verbose)
elif compressed_data:
self.data_df = du.load_compressed_data(train_data_type,
n_dim=n_dim,
verbose=self.verbose,
standardize_input=standardize_input,
load_subset=(debug or test))
else:
self.data_df = du.load_raw_data(train_data_type,
verbose=self.verbose,
load_subset=(debug or test))
if sample_info_df is None:
self.sample_info_df = du.load_sample_info(train_data_type,
verbose=self.verbose)
else:
# sometimes we load sample info in the calling script as part of
# argument processing, etc
# in that case, we don't need to load it again
self.sample_info_df = sample_info_df
# load and unpack pancancer mutation/CNV/TMB data
# this data is described in more detail in the load_pancancer_data docstring
if test:
# for testing, just load a subset of pancancer data,
# this is much faster than loading mutation data for all genes
import mpmp.test_config as tcfg
pancan_data = du.load_pancancer_data(verbose=self.verbose,
test=True,
subset_columns=tcfg.test_genes)
else:
pancan_data = du.load_pancancer_data(verbose=self.verbose)
(self.sample_freeze_df,
self.mutation_df,
self.copy_loss_df,
self.copy_gain_df,
self.mut_burden_df) = pancan_data
def _generate_cancer_type_labels(self, cancer_type):
y_df, count_df = process_y_matrix_cancertype(
acronym=cancer_type,
sample_freeze=self.sample_freeze_df,
mutation_burden=self.mut_burden_df,
hyper_filter=5,
)
return y_df
def _generate_gene_labels(self, gene, classification, gene_dir):
# process the y matrix for the given gene or pathway
y_mutation_df = self.mutation_df.loc[:, gene]
# include copy number gains for oncogenes
# and copy number loss for tumor suppressor genes (TSG)
include_copy = True
if classification == "Oncogene":
y_copy_number_df = self.copy_gain_df.loc[:, gene]
elif classification == "TSG":
y_copy_number_df = self.copy_loss_df.loc[:, gene]
else:
y_copy_number_df = pd.DataFrame()
include_copy = False
# construct labels from mutation/CNV information, and filter for
# cancer types without an extreme label imbalance
y_df, valid_samples = process_y_matrix(
y_mutation=y_mutation_df,
y_copy=y_copy_number_df,
include_copy=include_copy,
gene=gene,
sample_freeze=self.sample_freeze_df,
mutation_burden=self.mut_burden_df,
filter_count=cfg.filter_count,
filter_prop=cfg.filter_prop,
output_directory=gene_dir,
hyper_filter=5,
test=self.test,
overlap_data_types=self.overlap_data_types
)
return y_df, valid_samples
def _filter_data(self,
data_df,
y_df,
add_cancertype_covariate=False,
add_age_covariate=False):
use_samples, data_df, y_df, gene_features = align_matrices(
x_file_or_df=data_df,
y=y_df,
add_cancertype_covariate=add_cancertype_covariate,
add_mutation_covariate=True,
add_age_covariate=add_age_covariate
)
return data_df, y_df, gene_features
```
#### File: mpmp/prediction/cross_validation.py
```python
import warnings
import contextlib
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold, StratifiedKFold
import mpmp.config as cfg
from mpmp.exceptions import (
NoTrainSamplesError,
NoTestSamplesError,
OneClassError,
)
import mpmp.prediction.classification as clf
import mpmp.prediction.regression as reg
import mpmp.prediction.survival as surv
import mpmp.utilities.tcga_utilities as tu
def run_cv_stratified(data_model,
exp_string,
identifier,
training_data,
sample_info,
num_folds,
predictor='classify',
shuffle_labels=False,
standardize_columns=False,
output_preds=False,
output_survival_fn=False,
stratify=True,
results_dir=None):
"""
Run stratified cross-validation experiments for a given dataset, then write
the results to files in the results directory. If the relevant files already
exist, skip this experiment.
Arguments
---------
data_model (TCGADataModel): class containing preprocessed train/test data
exp_string (str): string describing the experiment being run
identifier (str): string describing the target value/environment
training_data (str): what type of data is being used to train model
sample_info (pd.DataFrame): df with TCGA sample information
num_folds (int): number of cross-validation folds to run
predictor (str): one of 'classify', 'regress', 'survival'
classify (bool): if True predict binary labels, else do regression
shuffle_labels (bool): whether or not to shuffle labels (negative control)
standardize_columns (bool): whether or not to standardize predictors
output_preds (bool): whether or not to write predictions to file
Returns
-------
results (dict): maps results metrics to values across CV folds
"""
if predictor == 'classify':
results = {
'{}_metrics'.format(exp_string): [],
'{}_auc'.format(exp_string): [],
'{}_aupr'.format(exp_string): [],
'{}_coef'.format(exp_string): [],
}
else:
results = {
'{}_metrics'.format(exp_string): [],
'{}_coef'.format(exp_string): [],
}
signal = 'shuffled' if shuffle_labels else 'signal'
if output_preds:
results['{}_preds'.format(exp_string)] = []
for fold_no in range(num_folds):
try:
with warnings.catch_warnings():
# sklearn warns us if one of the stratification classes has fewer
# members than num_folds: in our case that will be the 'other'
# class, and it's fine to distribute those unevenly. so here we
# can ignore that warning.
warnings.filterwarnings('ignore',
message='The least populated class in y')
X_train_raw_df, X_test_raw_df, _ = split_stratified(
data_model.X_df, sample_info, num_folds=num_folds,
fold_no=fold_no, seed=data_model.seed, stratify=stratify)
except ValueError:
if data_model.X_df.shape[0] == 0:
raise NoTrainSamplesError(
'No train samples found for identifier: {}'.format(
identifier)
)
y_train_df = data_model.y_df.reindex(X_train_raw_df.index)
y_test_df = data_model.y_df.reindex(X_test_raw_df.index)
# shuffle labels for train/test sets separately
# this ensures that overall label balance isn't affected
# (see https://github.com/greenelab/mpmp/issues/44)
if shuffle_labels:
# we set a temp seed here to make sure this shuffling order
# is the same for each gene between data types, otherwise
# it might be slightly different depending on the global state
with temp_seed(data_model.seed):
y_train_df.status = np.random.permutation(y_train_df.status.values)
y_test_df.status = np.random.permutation(y_test_df.status.values)
# choose single-omics or multi-omics preprocessing function based on
# data_model.gene_data_types class attribute
if hasattr(data_model, 'gene_data_types'):
X_train_df, X_test_df = tu.preprocess_multi_data(X_train_raw_df,
X_test_raw_df,
data_model.gene_features,
data_model.gene_data_types,
standardize_columns,
data_model.subset_mad_genes)
else:
X_train_df, X_test_df = tu.preprocess_data(X_train_raw_df,
X_test_raw_df,
data_model.gene_features,
standardize_columns,
data_model.subset_mad_genes)
models_list = {
'classify': clf.train_classifier,
'regress': reg.train_regressor,
'survival': surv.train_survival
}
train_model = models_list[predictor]
# save model results for survival prediction
if predictor == 'survival':
from functools import partial
train_model = partial(train_model, output_fn=output_survival_fn)
if predictor == 'survival' and cfg.survival_debug:
debug_info = {
'fold_no': fold_no,
'prefix': '{}/{}_{}'.format(
results_dir, identifier, predictor
),
'signal': signal
}
from functools import partial
# the non-survival model training functions don't take a debug_info
# parameter, so we do a partial function application to make all the
# model training functions take the same arguments
train_model = partial(train_model, debug_info=debug_info)
try:
model_results = train_model(
X_train=X_train_df,
X_test=X_test_df,
y_train=y_train_df,
alphas=cfg.alphas_map[predictor],
l1_ratios=cfg.l1_ratios_map[predictor],
seed=data_model.seed,
n_folds=cfg.folds,
max_iter=cfg.max_iter_map[predictor],
)
except ValueError as e:
if ('Only one class' in str(e)) or ('got 1 class' in str(e)):
raise OneClassError(
'Only one class present in test set for identifier: '
'{}'.format(identifier)
)
elif ('All samples are censored' in str(e)):
raise OneClassError(
'All samples are censored in test set for identifier:'
'{}'.format(identifier)
)
else:
# if not only one class error, just re-raise
raise e
(cv_pipeline,
y_pred_train,
y_pred_test,
y_cv_df) = model_results
# get coefficients
coef_df = extract_coefficients(
cv_pipeline=cv_pipeline,
feature_names=X_train_df.columns,
signal=signal,
seed=data_model.seed,
name=predictor
)
coef_df = coef_df.assign(identifier=identifier)
if isinstance(training_data, str):
coef_df = coef_df.assign(training_data=training_data)
else:
coef_df = coef_df.assign(training_data='.'.join(training_data))
coef_df = coef_df.assign(fold=fold_no)
results['{}_coef'.format(exp_string)].append(coef_df)
# get relevant metrics
if predictor == 'classify':
try:
metric_df, auc_df, aupr_df = clf.get_metrics(
y_train_df, y_test_df, y_cv_df, y_pred_train,
y_pred_test, identifier, training_data, signal,
data_model.seed, fold_no
)
except ValueError as e:
if 'Only one class' in str(e):
raise OneClassError(
'Only one class present in test set for identifier: '
'{}'.format(identifier)
)
else:
# if not only one class error, just re-raise
raise e
results['{}_metrics'.format(exp_string)].append(metric_df)
results['{}_auc'.format(exp_string)].append(auc_df)
results['{}_aupr'.format(exp_string)].append(aupr_df)
else:
if predictor == 'survival':
metric_df = surv.get_metrics(
cv_pipeline,
X_train_df,
X_test_df,
X_train_df,
y_train_df,
y_test_df,
y_cv_df,
identifier=identifier,
training_data=training_data,
signal=signal,
seed=data_model.seed,
fold_no=fold_no
)
else:
metric_df = reg.get_metrics(
y_train_df,
y_test_df,
y_cv_df,
y_pred_train,
y_pred_test,
identifier=identifier,
training_data=training_data,
signal=signal,
seed=data_model.seed,
fold_no=fold_no
)
results['{}_metrics'.format(exp_string)].append(metric_df)
if output_preds:
if predictor == 'survival':
raise NotImplementedError
get_preds = clf.get_preds if classify else reg.get_preds
results['{}_preds'.format(exp_string)].append(
get_preds(X_test_df, y_test_df, cv_pipeline, fold_no)
)
if output_survival_fn:
import pickle as pkl
if predictor != 'survival':
raise NotImplementedError
surv_fns = surv.get_survival_function(cv_pipeline, X_test_df)
fn_prefix = '{}/{}_{}'.format(results_dir, identifier, predictor)
fn_file = '{}_{}_fold{}_functions.pkl'.format(fn_prefix,
signal,
fold_no)
with open(fn_file, 'wb') as f:
pkl.dump(surv_fns, f)
return results
def split_stratified(data_df,
sample_info_df,
num_folds=4,
fold_no=1,
seed=cfg.default_seed,
stratify=True):
"""Split expression data into train and test sets.
The train and test sets will both contain data from all cancer types,
in roughly equal proportions.
Arguments
---------
data_df (pd.DataFrame): samples x features dataframe
sample_info_df (pd.DataFrame): maps samples to cancer types
num_folds (int): number of cross-validation folds
fold_no (int): cross-validation fold to hold out
seed (int): seed for deterministic splits
Returns
-------
train_df (pd.DataFrame): samples x features train data
test_df (pd.DataFrame): samples x features test data
"""
# subset sample info to samples in pre-filtered expression data
sample_info_df = sample_info_df.reindex(data_df.index)
# generate id for stratification
# this is a concatenation of cancer type and sample/tumor type, since we want
# to stratify by both
sample_info_df = sample_info_df.assign(
id_for_stratification = sample_info_df.cancer_type.str.cat(
sample_info_df.sample_type)
)
# recode stratification id if they are singletons or near-singletons,
# since these won't work with StratifiedKFold
stratify_counts = sample_info_df.id_for_stratification.value_counts().to_dict()
sample_info_df = sample_info_df.assign(
stratify_samples_count = sample_info_df.id_for_stratification
)
sample_info_df.stratify_samples_count = sample_info_df.stratify_samples_count.replace(
stratify_counts)
sample_info_df.loc[
sample_info_df.stratify_samples_count < num_folds, 'id_for_stratification'
] = 'other'
# now do stratified CV splitting and return the desired fold
if stratify: # TODO this is a mess, clean up
kf = StratifiedKFold(n_splits=num_folds, shuffle=True, random_state=seed)
for fold, (train_ixs, test_ixs) in enumerate(
kf.split(data_df, sample_info_df.id_for_stratification)):
if fold == fold_no:
train_df = data_df.iloc[train_ixs]
test_df = data_df.iloc[test_ixs]
else:
kf = KFold(n_splits=num_folds, shuffle=True, random_state=seed)
for fold, (train_ixs, test_ixs) in enumerate(kf.split(data_df)):
if fold == fold_no:
train_df = data_df.iloc[train_ixs]
test_df = data_df.iloc[test_ixs]
return train_df, test_df, sample_info_df
def extract_coefficients(cv_pipeline,
feature_names,
signal,
seed,
name='classify'):
"""
Pull out the coefficients from the trained models
Arguments
---------
cv_pipeline: the trained sklearn cross validation pipeline
feature_names: the column names of the x matrix used to train model (features)
results: a results object output from `get_threshold_metrics`
signal: the signal of interest
seed: the seed used to compress the data
"""
final_pipeline = cv_pipeline.best_estimator_
final_classifier = final_pipeline.named_steps[name]
if name == 'survival':
weights = final_classifier.coef_.flatten()
else:
weights = final_classifier.coef_[0]
coef_df = pd.DataFrame.from_dict(
{"feature": feature_names, "weight": weights}
)
coef_df = (coef_df
.assign(abs=coef_df["weight"].abs())
.sort_values("abs", ascending=False)
.reset_index(drop=True)
.assign(signal=signal, seed=seed)
)
return coef_df
@contextlib.contextmanager
def temp_seed(cntxt_seed):
"""Set a temporary np.random seed in the resulting context.
This saves the global random number state and puts it back once the context
is closed. See https://stackoverflow.com/a/49557127 for more detail.
"""
state = np.random.get_state()
np.random.seed(cntxt_seed)
try:
yield
finally:
np.random.set_state(state)
```
#### File: mpmp/utilities/analysis_utilities.py
```python
import os
import sys
import glob
import pickle as pkl
import warnings
from pathlib import Path
import numpy as np
import pandas as pd
from scipy.stats import ttest_rel
def load_stratified_prediction_results(results_dir, experiment_descriptor):
"""Load results of stratified prediction experiments.
Arguments
---------
results_dir (str): directory to look in for results, subdirectories should
be experiments for individual genes or cancer types
experiment_descriptor (str): string describing this experiment, can be
useful to segment analyses involving multiple
experiments or results sets
Returns
-------
results_df (pd.DataFrame): results of classification experiments
"""
results_df = pd.DataFrame()
results_dir = Path(results_dir)
for identifier in results_dir.iterdir():
identifier_dir = Path(results_dir, identifier)
if identifier_dir.is_file(): continue
for results_file in identifier_dir.iterdir():
if not results_file.is_file(): continue
results_filename = str(results_file.stem)
# skip compressed files here, use load_compressed* functions
# to load that data separately
if check_compressed_file(results_filename): continue
if ('classify' not in results_filename or
'metrics' not in results_filename): continue
if results_filename[0] == '.': continue
id_results_df = pd.read_csv(results_file, sep='\t')
id_results_df['experiment'] = experiment_descriptor
results_df = pd.concat((results_df, id_results_df))
return results_df
def load_compressed_prediction_results(results_dir,
experiment_descriptor,
old_filenames=False):
"""Load results of compressed prediction experiments.
Arguments
---------
results_dir (str): directory to look in for results, subdirectories should
be experiments for individual genes or cancer types
experiment_descriptor (str): string describing this experiment, can be
useful to segment analyses involving multiple
experiments or results sets
old_filenames (bool): use old filename format
Returns
-------
results_df (pd.DataFrame): results of classification experiments
"""
results_df = pd.DataFrame()
results_dir = Path(results_dir)
for identifier in results_dir.iterdir():
identifier_dir = Path(results_dir, identifier)
if identifier_dir.is_file(): continue
for results_file in identifier_dir.iterdir():
if not results_file.is_file(): continue
results_filename = str(results_file.stem)
if not check_compressed_file(results_filename): continue
if ('classify' not in results_filename or
'metrics' not in results_filename): continue
if results_filename[0] == '.': continue
if old_filenames:
try:
n_dims = int(results_filename.split('_')[-3].replace('n', ''))
except ValueError:
n_dims = int(results_filename.split('_')[-2].replace('n', ''))
else:
n_dims = int(results_filename.split('_')[-2].replace('n', ''))
id_results_df = pd.read_csv(results_file, sep='\t')
id_results_df['n_dims'] = n_dims
id_results_df['experiment'] = experiment_descriptor
results_df = pd.concat((results_df, id_results_df))
return results_df
def load_purity_results(results_dir, classify=True):
"""Load results of tumor purity experiments.
Arguments
---------
results_dir (str): directory containing results files
Returns
-------
results_df (pd.DataFrame): results of prediction experiments
"""
results_df = pd.DataFrame()
results_dir = Path(results_dir)
for results_file in results_dir.iterdir():
if not results_file.is_file(): continue
results_filename = str(results_file.stem)
if classify and ('classify' not in results_filename
or 'metrics' not in results_filename): continue
if not classify and ('regress' not in results_filename
or 'metrics' not in results_filename): continue
if results_filename[0] == '.': continue
id_results_df = pd.read_csv(results_file, sep='\t')
if check_compressed_file(results_filename):
id_results_df.training_data += '_compressed'
results_df = pd.concat((results_df, id_results_df))
return results_df
def load_msi_results(results_dir):
"""Load results of microsatellite instability prediction experiments.
Arguments
---------
results_dir (str): directory containing results files
Returns
-------
results_df (pd.DataFrame): results of prediction experiments
"""
results_df = pd.DataFrame()
results_dir = Path(results_dir)
for results_file in results_dir.iterdir():
if not results_file.is_file(): continue
results_filename = str(results_file.stem)
if ('classify' not in results_filename
or 'metrics' not in results_filename): continue
if results_filename[0] == '.': continue
id_results_df = pd.read_csv(results_file, sep='\t')
# TODO: n_dims?
results_df = pd.concat((results_df, id_results_df))
return results_df
def load_purity_by_cancer_type(results_dir, sample_info_df, classify=True):
"""Load results of tumor purity prediction, grouped by cancer type.
Assumes labels are binarized into above/below median.
Arguments
---------
results_dir (str): directory containing results files
sample_info_df (pd.DataFrame): contains cancer type info for samples
classify (bool): look for classification results if true
Returns
-------
results_df (pd.DataFrame): results of prediction experiments
"""
results_df = pd.DataFrame()
results_dir = Path(results_dir)
for results_file in results_dir.iterdir():
if not results_file.is_file(): continue
results_filename = str(results_file.stem)
if classify and ('classify' not in results_filename
or 'preds' not in results_filename): continue
if not classify and ('regress' not in results_filename
or 'preds' not in results_filename): continue
if results_filename[0] == '.': continue
if check_compressed_file(results_filename):
training_data = '_'.join(results_filename.split('_')[:-5])
training_data += '_compressed'
signal = results_filename.split('_')[-5]
seed = int(results_filename.split('_')[-3].replace('s', ''))
else:
training_data = '_'.join(results_filename.split('_')[:-4])
signal = results_filename.split('_')[-4]
seed = int(results_filename.split('_')[-2].replace('s', ''))
id_results_df = pd.read_csv(results_file, sep='\t', index_col=0)
cancer_type_results_df = calculate_metrics_for_cancer_type(id_results_df,
training_data,
signal,
seed,
sample_info_df,
classify=classify)
results_df = pd.concat((results_df, cancer_type_results_df))
return results_df
def load_survival_curves(results_dir,
cancer_type,
signal='signal'):
samples, functions = [], []
path_name = os.path.join(
results_dir,
'{}_survival_{}_fold*_functions.pkl'.format(cancer_type, signal)
)
for fname in glob.glob(path_name):
with open(fname, 'rb') as f:
fns_dict = pkl.load(f)
samples += list(fns_dict['samples'])
functions += list(fns_dict['functions'])
return samples, functions
def calculate_metrics_for_cancer_type(id_results_df,
training_data,
signal,
seed,
sample_info_df,
classify=True):
cancer_type_results = []
for fold in id_results_df.fold_no.unique():
fold_df = (id_results_df[id_results_df.fold_no == fold]
.merge(sample_info_df, left_index=True, right_index=True)
.drop(columns=['sample_type', 'id_for_stratification'])
)
for cancer_type in fold_df.cancer_type.unique():
samples_df = fold_df[fold_df.cancer_type == cancer_type]
if classify:
from mpmp.prediction.classification import get_threshold_metrics
try:
with warnings.catch_warnings():
# get rid of ROC/PR sample imbalance warnings, we'll catch
# that case below
warnings.filterwarnings('ignore',
message='No negative samples')
warnings.filterwarnings('ignore',
message='No positive samples')
warnings.filterwarnings('ignore',
message='invalid value encountered')
aupr = (
get_threshold_metrics(samples_df.true_class,
samples_df.positive_prob)
)['aupr']
auroc = (
get_threshold_metrics(samples_df.true_class,
samples_df.positive_prob)
)['auroc']
except ValueError: # only one class in y_true
aupr = np.nan
auroc = np.nan
metric_names = ['aupr', 'auroc']
cancer_type_results.append((training_data, signal, seed,
fold, cancer_type, aupr, auroc))
else:
from mpmp.prediction.regression import get_continuous_metrics
metrics = get_continuous_metrics(samples_df.true_label,
samples_df.predicted_output)
rmse = metrics['rmse']
r2 = metrics['r2']
metric_names = ['rmse', 'r2']
cancer_type_results.append((training_data, signal, seed,
fold, cancer_type, rmse, r2))
return pd.DataFrame(cancer_type_results,
columns=['training_data', 'signal', 'seed',
'fold_no', 'cancer_type'] + metric_names)
def check_compressed_file(results_filename):
"""Check if results file is from compressed experiments."""
def string_is_int(s):
# https://stackoverflow.com/a/1267145
try:
int(s)
return True
except ValueError:
return False
# if a file uses compressed data, one component of the filename
# should have the format 'n{integer}'
for rs in results_filename.split('_'):
if rs.startswith('n') and string_is_int(rs.split('n')[1]):
return True
return False
def load_preds_to_matrix(preds_dir,
sample_info_df,
training_data='expression'):
"""Load model predictions into a heatmap/confusion matrix.
Arguments
---------
preds_dir (str): directory where preds files are located
sample_info_df (pd.DataFrame): dataframe containing sample information
training_data (str): type of training data to filter to, if None don't
filter
Returns
---------
preds_df (str): a cancer type x cancer type dataframe, index contains
target label and columns are true labels, cells contain
average positive class probability for a model trained on
the target label and evaluated on the true label (high
probability = model predicts column class when trained on
row class)
"""
preds_df = pd.DataFrame()
for identifier in Path(preds_dir).iterdir():
identifier_dir = Path(preds_dir, identifier)
if identifier_dir.is_file():
continue
for results_file in identifier_dir.iterdir():
if not results_file.is_file():
continue
results_filename = str(results_file.stem)
if 'preds' not in results_filename:
continue
if 'signal' not in results_filename:
continue
if (training_data is not None and
training_data not in results_filename):
continue
cancer_type_preds_df = (
pd.read_csv(results_file, sep='\t', index_col=0)
.merge(sample_info_df[['cancer_type']],
left_index=True, right_index=True)
.drop(columns=['fold_no', 'true_class'])
.groupby('cancer_type')
.mean()
.T
.rename(index={'positive_prob': results_filename.split('_')[0]})
)
preds_df = pd.concat((preds_df, cancer_type_preds_df))
return preds_df.sort_index()
def compare_results(condition_1_df,
condition_2_df=None,
identifier='gene',
metric='auroc',
correction=False,
correction_method='fdr_bh',
correction_alpha=0.05,
verbose=False):
"""Compare cross-validation results between two experimental conditions.
Main uses for this are comparing an experiment against its negative control
(shuffled labels), and for comparing two experimental "conditions" (e.g.
different models, different data types) against one another.
Arguments
---------
condition_1_df (pd.DataFrame): either a single dataframe to compare against
its negative control, or the first of 2
conditions to compare against each other
condition_2_df (pd.DataFrame): if provided, a second dataframe to compare
against condition_1_df
identifier (str): column to use as the sample identifier
metric (str): column to use as the evaluation metric
correction (bool): whether or not to use a multiple testing correction
correction_method (str): which method to use for multiple testing correction
(from options in statsmodels.stats.multitest)
correction_alpha (float): significance cutoff to use
verbose (bool): if True, print verbose output to stderr
Returns
-------
results_df (pd.DataFrame): identifiers and results of statistical test
"""
if condition_2_df is None:
results_df = compare_control(condition_1_df, identifier, metric, verbose)
else:
results_df = compare_experiment(condition_1_df, condition_2_df,
identifier, metric, verbose)
if correction:
from statsmodels.stats.multitest import multipletests
corr = multipletests(results_df['p_value'],
alpha=correction_alpha,
method=correction_method)
results_df = results_df.assign(corr_pval=corr[1], reject_null=corr[0])
return results_df
def compare_control(results_df,
identifier='gene',
metric='auroc',
verbose=False):
results = []
unique_identifiers = np.unique(results_df[identifier].values)
for id_str in unique_identifiers:
conditions = ((results_df[identifier] == id_str) &
(results_df.data_type == 'test') &
(results_df.signal == 'signal'))
signal_results = results_df[conditions][metric].values
signal_seeds = results_df[conditions]['seed'].values
signal_folds = results_df[conditions]['fold'].values
conditions = ((results_df[identifier] == id_str) &
(results_df.data_type == 'test') &
(results_df.signal == 'shuffled'))
shuffled_results = results_df[conditions][metric].values
shuffled_seeds = results_df[conditions]['seed'].values
shuffled_folds = results_df[conditions]['fold'].values
if signal_results.shape != shuffled_results.shape:
if verbose:
print('shapes unequal for {}, skipping'.format(id_str),
file=sys.stderr)
continue
if not (np.array_equal(np.unique(signal_seeds), np.unique(shuffled_seeds))
and np.array_equal(np.unique(signal_folds), np.unique(shuffled_folds))):
if verbose:
print('samples unequal for {}, skipping'.format(id_str),
file=sys.stderr)
continue
if (signal_results.size == 0) or (shuffled_results.size == 0):
if verbose:
print('size 0 results array for {}, skipping'.format(id_str),
file=sys.stderr)
continue
# make sure seeds and folds are in same order
# this is necessary for paired t-test
try:
assert np.array_equal(signal_seeds, shuffled_seeds)
assert np.array_equal(signal_folds, shuffled_folds)
except AssertionError:
print(id_str, file=sys.stderr)
print(signal_seeds, shuffled_seeds, file=sys.stderr)
print(signal_folds, shuffled_folds, file=sys.stderr)
if np.array_equal(signal_results, shuffled_results):
delta_mean = 0
p_value = 1.0
else:
delta_mean = np.mean(signal_results) - np.mean(shuffled_results)
p_value = ttest_rel(signal_results, shuffled_results)[1]
results.append([id_str, delta_mean, p_value])
return pd.DataFrame(results, columns=['identifier', 'delta_mean', 'p_value'])
def compare_control_ind(results_df,
identifier='gene',
metric='auroc',
verbose=False):
"""Compare signal vs. shuffled results for each seed/CV fold independently.
This allows customized statistical analysis after performing comparison
(as opposed to compare_control which automatically aggregates over
seeds/folds).
"""
results = []
unique_identifiers = np.unique(results_df[identifier].values)
for id_str in unique_identifiers:
conditions = ((results_df[identifier] == id_str) &
(results_df.data_type == 'test') &
(results_df.signal == 'signal'))
signal_results = results_df[conditions].copy()
conditions = ((results_df[identifier] == id_str) &
(results_df.data_type == 'test') &
(results_df.signal == 'shuffled'))
shuffled_results = results_df[conditions].copy()
if signal_results.shape != shuffled_results.shape:
if verbose:
print('shapes unequal for {}, skipping'.format(id_str),
file=sys.stderr)
continue
if (signal_results.size == 0) or (shuffled_results.size == 0):
if verbose:
print('size 0 results array for {}, skipping'.format(id_str),
file=sys.stderr)
continue
for seed in results_df.seed.unique():
for fold in results_df.fold.unique():
try:
signal_value = signal_results[(signal_results.seed == seed) &
(signal_results.fold == fold)][metric].values[0]
shuffled_value = shuffled_results[(shuffled_results.seed == seed) &
(shuffled_results.fold == fold)][metric].values[0]
delta = signal_value - shuffled_value
results.append([id_str, seed, fold, delta])
except IndexError:
# this seed/fold combo doesn't exist, just skip it
continue
return pd.DataFrame(results,
columns=['identifier', 'seed', 'fold',
'delta_{}'.format(metric)])
def compare_experiment(condition_1_df,
condition_2_df,
identifier='gene',
metric='auroc',
verbose=False):
results = []
condition_1_ids = np.unique(condition_1_df[identifier].values)
condition_2_ids = np.unique(condition_2_df[identifier].values)
unique_identifiers = list(set(condition_1_ids).intersection(condition_2_ids))
for id_str in unique_identifiers:
conditions = ((condition_1_df[identifier] == id_str) &
(condition_1_df.data_type == 'test') &
(condition_1_df.signal == 'signal'))
condition_1_results = condition_1_df[conditions][metric].values
condition_1_seeds = condition_1_df[conditions]['seed'].values
condition_1_folds = condition_1_df[conditions]['fold'].values
conditions = ((condition_2_df[identifier] == id_str) &
(condition_2_df.data_type == 'test') &
(condition_2_df.signal == 'signal'))
condition_2_results = condition_2_df[conditions][metric].values
condition_2_seeds = condition_2_df[conditions]['seed'].values
condition_2_folds = condition_2_df[conditions]['fold'].values
if condition_1_results.shape != condition_2_results.shape:
if verbose:
print('shapes unequal for {}, skipping'.format(id_str),
file=sys.stderr)
continue
if (condition_1_results.size == 0) or (condition_2_results.size == 0):
if verbose:
print('size 0 results array for {}, skipping'.format(id_str),
file=sys.stderr)
continue
# make sure seeds and folds are in same order
# this is necessary for paired t-test
try:
assert np.array_equal(condition_1_seeds, condition_2_seeds)
assert np.array_equal(condition_1_folds, condition_2_folds)
except AssertionError:
print(id_str, file=sys.stderr)
print(condition_1_seeds, condition_2_seeds, file=sys.stderr)
print(condition_1_folds, condition_2_folds, file=sys.stderr)
if np.array_equal(condition_2_results, condition_1_results):
delta_mean = 0
p_value = 1.0
else:
# note that a positive value = better performance in condition 2
delta_mean = np.mean(condition_2_results) - np.mean(condition_1_results)
p_value = ttest_rel(condition_2_results, condition_1_results)[1]
results.append([id_str, delta_mean, p_value])
return pd.DataFrame(results, columns=['identifier', 'delta_mean', 'p_value'])
def generate_nonzero_coefficients(results_dir):
"""Generate coefficients from mutation prediction model fits.
Loading all coefficients into memory at once is prohibitive, so we generate
them individually and analyze/summarize in analysis scripts.
Arguments
---------
results_dir (str): directory to look in for results, subdirectories should
be experiments for individual genes
Yields
------
identifier (str): identifier for given coefficients
coefs (dict): list of nonzero coefficients for each fold of CV, for the
given identifier
"""
coefs = {}
all_features = None
for gene_name in os.listdir(results_dir):
gene_dir = os.path.join(results_dir, gene_name)
if not os.path.isdir(gene_dir): continue
for coefs_file in os.listdir(gene_dir):
if coefs_file[0] == '.': continue
if 'signal' not in coefs_file: continue
if 'coefficients' not in coefs_file: continue
training_data = coefs_file.split('_')[1]
full_coefs_file = os.path.join(gene_dir, coefs_file)
coefs_df = pd.read_csv(full_coefs_file, sep='\t')
if all_features is None:
all_features = np.unique(coefs_df.feature.values)
identifier = '{}_{}'.format(gene_name, training_data)
coefs = process_coefs(coefs_df)
yield identifier, coefs
def generate_nz_coefs_msi(results_dir):
"""Generate coefficients from MSI model fits.
Arguments
---------
results_dir (str): directory to look in for results, subdirectories should
be experiments for individual genes
Yields
------
identifier (str): identifier for given coefficients
coefs (dict): list of nonzero coefficients for each fold of CV, for the
given identifier
"""
coefs = {}
all_features = None
for coefs_file in os.listdir(results_dir):
if coefs_file[0] == '.': continue
if 'signal' not in coefs_file: continue
if 'coefficients' not in coefs_file: continue
full_coefs_file = os.path.join(results_dir, coefs_file)
coefs_df = pd.read_csv(full_coefs_file, sep='\t')
seed = coefs_df.seed.values[0]
cancer_type = coefs_df.identifier.values[0]
training_data = coefs_df.training_data.values[0]
if all_features is None:
all_features = np.unique(coefs_df.feature.values)
identifier = '{}_{}'.format(cancer_type, training_data)
coefs = process_coefs(coefs_df)
yield identifier, seed, coefs
def process_coefs(coefs_df):
"""Process and return nonzero coefficients for a single identifier"""
id_coefs = []
for fold in np.sort(np.unique(coefs_df.fold.values)):
conditions = ((coefs_df.fold == fold) &
(coefs_df['abs'] > 0))
nz_coefs_df = coefs_df[conditions]
id_coefs.append(list(zip(nz_coefs_df.feature.values,
nz_coefs_df.weight.values)))
return id_coefs
def compare_all_data_types(results_df,
sig_alpha,
filter_genes=True,
compare_ind=False,
identifier='identifier',
metric='aupr'):
"""Run compare_results for each data type + assemble into dataframe.
Returns a dataframe with mean difference and statistical testing results
for each gene in each data type, typically aggregated across multiple
train/test folds and random seeds.
Arguments
---------
results_df (pd.DataFrame): dataframe with unprocessed results
sig_alpha (float): significance testing threshold
filter_genes (bool): whether to filter genes that are not present
in all data types or not
compare_ind (bool): whether to compare each fold to baseline independently,
or to aggregate and compare means (default aggregate)
identifier (str): name of distinguishing identifier (usually gene)
metric (str): performance metric
Returns
-------
all_results_df (pd.DataFrame): dataframe with processed results
"""
all_results_df = pd.DataFrame()
for training_data in results_df.training_data.unique():
data_df = results_df[results_df.training_data == training_data].copy()
# sorting is necessary for paired/repeated measures statistical tests
data_df.sort_values(by=['seed', 'fold'], inplace=True)
if compare_ind:
data_results_df = compare_control_ind(data_df,
identifier=identifier,
metric=metric,
verbose=True)
else:
data_results_df = compare_results(data_df,
identifier=identifier,
metric=metric,
correction=True,
correction_method='fdr_bh',
correction_alpha=sig_alpha,
verbose=True)
data_results_df['training_data'] = training_data
data_results_df.rename(columns={'identifier': 'gene'}, inplace=True)
all_results_df = pd.concat((all_results_df, data_results_df))
# now filter out genes that don't have comparisons for all data types
if filter_genes:
data_type_counts = all_results_df.groupby('gene').count().training_data
valid_genes = data_type_counts[data_type_counts == len(results_df.training_data.unique())].index
all_results_df = all_results_df[
all_results_df.gene.isin(valid_genes)
].copy()
if not compare_ind:
all_results_df['nlog10_p'] = -np.log10(all_results_df.corr_pval)
return all_results_df
def compare_data_types_and_dims(results_df,
sig_alpha,
filter_genes=True,
compare_ind=False,
identifier='identifier',
metric='aupr'):
"""Run compare_results for each data type and dimension.
Returns a dataframe with mean difference and statistical testing results
for each gene in each data type, typically aggregated across multiple
train/test folds and random seeds.
Arguments
---------
results_df (pd.DataFrame): dataframe with unprocessed results
sig_alpha (float): significance testing threshold
filter_genes (bool): whether to filter genes that are not present
in all data types or not
compare_ind (bool): whether to compare each fold to baseline independently,
or to aggregate and compare means (default aggregate)
identifier (str): name of distinguishing identifier (usually gene)
metric (str): performance metric
Returns
-------
all_results_df (pd.DataFrame): dataframe with processed results
"""
all_results_df = pd.DataFrame()
for training_data in results_df.training_data.unique():
for n_dims in results_df.n_dims.unique():
data_df = (
results_df[(results_df.training_data == training_data) &
(results_df.n_dims == n_dims)]
.drop(columns=['training_data'])
.copy()
)
# sorting is necessary for paired/repeated measures statistical tests
data_df.sort_values(by=['seed', 'fold'], inplace=True)
if compare_ind:
data_results_df = compare_control_ind(data_df,
identifier=identifier,
metric=metric,
verbose=True)
else:
data_results_df = compare_results(data_df,
identifier=identifier,
metric=metric,
correction=True,
correction_method='fdr_bh',
correction_alpha=sig_alpha,
verbose=True)
data_results_df['training_data'] = training_data
data_results_df['n_dims'] = n_dims
data_results_df.rename(columns={'identifier': 'gene'}, inplace=True)
all_results_df = pd.concat((all_results_df, data_results_df))
# now filter out genes that don't have comparisons for all data types
if filter_genes:
data_type_counts = all_results_df.groupby('gene').count().training_data
valid_genes = data_type_counts[data_type_counts == len(results_df.training_data.unique())].index
all_results_df = all_results_df[
all_results_df.gene.isin(valid_genes)
].copy()
if not compare_ind:
all_results_df['nlog10_p'] = -np.log10(all_results_df.corr_pval)
return all_results_df
```
#### File: mpmp/utilities/survival_utilities.py
```python
from pathlib import Path
import pandas as pd
def load_survival_results(results_dir):
"""Load results of survival prediction experiments.
Arguments
---------
results_dir (str): directory to look in for results, subdirectories should
be experiments for individual genes or cancer types
Returns
-------
results_df (pd.DataFrame): summarizes experiment results
"""
results_df = pd.DataFrame()
results_dir = Path(results_dir)
for results_file in results_dir.iterdir():
if not results_file.is_file(): continue
results_filename = str(results_file.stem)
if ('survival' not in results_filename or
'metrics' not in results_filename): continue
if results_filename[0] == '.': continue
id_results_df = pd.read_csv(results_file, sep='\t')
results_df = pd.concat((results_df, id_results_df))
return results_df
```
#### File: mpmp/tests/test_model.py
```python
import pytest
import numpy as np
import pandas as pd
import mpmp.test_config as tcfg
from mpmp.data_models.tcga_data_model import TCGADataModel
from mpmp.prediction.cross_validation import run_cv_stratified
import mpmp.utilities.data_utilities as du
@pytest.fixture
def data_model(data_type):
"""Load data model and sample info data"""
# passing arguments to fixtures (like data_type here), then using them
# in tests isn't widely documented in pytest, but seems to work
# see, e.g. https://stackoverflow.com/a/60148972
tcga_data = TCGADataModel(training_data=data_type,
debug=True, test=True)
sample_info_df = du.load_sample_info(train_data_type=data_type)
return tcga_data, sample_info_df
@pytest.mark.parametrize('data_type', [tcfg.test_data_types[0]])
def test_simple(data_model):
"""Just test that the data model loads correctly with test option"""
assert data_model is not None
@pytest.mark.parametrize('data_type', tcfg.test_data_types)
@pytest.mark.parametrize('gene_info', tcfg.stratified_gene_info)
def test_stratified_classification(data_model, data_type, gene_info):
"""Regression test for prediction using stratified cross-validation"""
tcga_data, sample_info_df = data_model
gene, classification = gene_info
tcga_data.process_data_for_gene(gene,
classification,
gene_dir=None)
results = run_cv_stratified(tcga_data,
'gene',
gene,
data_type,
sample_info_df,
num_folds=4,
standardize_columns=True,
shuffle_labels=False)
metrics_df = pd.concat(results['gene_metrics'])
results_file = tcfg.test_stratified_results.format(data_type, gene)
old_results = np.loadtxt(results_file)
# make sure our results haven't changed; i.e. regression testing for model
# if a change to model output is intentional, the saved results can be
# regenerated by running mpmp/scripts/generate_test_data.py
assert np.allclose(metrics_df['auroc'].values, old_results)
```
#### File: mpmp/tests/test_multimodal_preprocessing.py
```python
import string
import itertools as it
import pytest
import numpy as np
import pandas as pd
import mpmp.config as cfg
import mpmp.utilities.tcga_utilities as tu
@pytest.fixture(scope='module')
def raw_data():
"""Generate a raw dataset"""
cols = list(string.ascii_lowercase)[:11]
X_train_raw_df = pd.DataFrame(np.random.uniform(size=(20, 11)), columns=cols)
X_test_raw_df = pd.DataFrame(np.random.uniform(size=(10, 11)), columns=cols)
data_types = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2,
cfg.NONGENE_FEATURE, cfg.NONGENE_FEATURE])
gene_features = np.array([True] * 9 + [False] * 2)
return (X_train_raw_df,
X_test_raw_df,
gene_features,
data_types)
@pytest.mark.parametrize('standardize_columns', [[False, False, False],
[True, True, True],
[True, False, True]])
@pytest.mark.parametrize('subset_mad_genes', [-1, 1, 2])
def test_preprocessing(raw_data, standardize_columns, subset_mad_genes):
(X_train_raw_df,
X_test_raw_df,
gene_features,
data_types) = raw_data
X_train_df, X_test_df = tu.preprocess_multi_data(X_train_raw_df,
X_test_raw_df,
gene_features,
data_types,
standardize_columns,
subset_mad_genes)
# make sure no samples were lost
assert X_train_df.shape[0] == X_train_raw_df.shape[0]
assert X_test_df.shape[0] == X_test_raw_df.shape[0]
# make sure no NaN values
assert X_train_df.isna().sum().sum() == 0
# make sure no non-gene features were lost, and make sure that
# each data type has the correct number of subset columns
if subset_mad_genes == -1:
# if no subsetting by MAD, number of features shouldn't change
assert (X_train_df.shape[1] == X_train_raw_df.shape[1])
else:
# if we do subset by MAD:
# total number of features = (number of data types * number of features to
# subset to) + number of non-gene features
assert (X_train_df.shape[1] ==
(subset_mad_genes * (np.unique(data_types).shape[0] - 1)) +
(np.count_nonzero(data_types == cfg.NONGENE_FEATURE)))
# make sure standardized columns were actually standardized
for ix, std_col in enumerate(standardize_columns):
if subset_mad_genes == -1:
data_types_filtered = data_types
else:
# here we have to reconstruct the data types of each column, since
# we filtered columns by MAD
#
# we can do that by adding each index in order * the number of
# top MAD columns we took, then adding the non-gene features to the end
data_types_filtered = sum(
[[ix] * subset_mad_genes
for ix in range(np.unique(data_types).shape[0] - 1)],
[]
)
data_types_filtered += [cfg.NONGENE_FEATURE] * np.count_nonzero(
data_types == cfg.NONGENE_FEATURE)
data_types_filtered = np.array(data_types_filtered)
valid_cols = (data_types_filtered == ix)
if std_col:
# if a column is standardized, it should be ~standard normal, with
# values above and below 0
assert X_train_df.loc[:, valid_cols].values.flatten().min() < 0
assert X_train_df.loc[:, valid_cols].values.flatten().max() > 0
else:
# if a column is not standardized, we sampled from a uniform (0, 1)
# so it should only have values above 0
assert X_train_df.loc[:, valid_cols].values.flatten().min() > 0
assert X_train_df.loc[:, valid_cols].values.flatten().max() > 0
``` |
{
"source": "jjc2718/netreg",
"score": 3
} |
#### File: netreg/tests/generate_fixtures.py
```python
import os
import numpy as np
import pandas as pd
import sys; sys.path.append('.')
import config as cfg
import simdata.simulate_loglinear as ll
from tcga_util import train_model, extract_coefficients
def generate_data(seed, params):
np.random.seed(seed)
# generate simulated data from a log-linear model
X, y, _, __ = ll.simulate_ll(params['n_train']+params['n_test'],
params['p'], 0, seed=seed)
train_ixs = ll.split_train_test(params['n_train']+params['n_test'],
params['n_train']/(params['n_train']+params['n_test']),
seed=seed)
X_train, X_test = X[train_ixs], X[~train_ixs]
y_train, y_test = y[train_ixs], y[~train_ixs]
# put things into dataframes
train_index = ['S{}'.format(i) for i in range(params['n_train'])]
test_index = ['S{}'.format(i) for i in range(
params['n_train'], params['n_train']+params['n_test'])]
columns = ['G{}'.format(j) for j in range(params['p'])]
X_train_df = pd.DataFrame(X_train, index=train_index, columns=columns)
X_test_df = pd.DataFrame(X_test, index=test_index, columns=columns)
y_train_df = pd.DataFrame(y_train, index=train_index, columns=['status'])
y_test_df = pd.DataFrame(y_test, index=train_index, columns=['status'])
return X_train_df, X_test_df, y_train_df, y_test_df
def predict(X_train, X_test, y_train, y_test):
cv_pipeline, y_pred_train, y_pred_test, y_cv = train_model(
x_train=X_train,
x_test=X_test,
y_train=y_train,
alphas=cfg.alphas,
l1_ratios=cfg.l1_ratios,
n_folds=cfg.folds,
max_iter=cfg.max_iter
)
y_pred_train_df = pd.DataFrame(y_pred_train,
index=y_train.index,
columns=y_train.columns)
y_pred_test_df = pd.DataFrame(y_pred_test,
index=y_test.index,
columns=y_test.columns)
coef_df = extract_coefficients(
cv_pipeline=cv_pipeline,
feature_names=X_train.columns,
signal='signal',
z_dim=cfg.num_features_raw,
seed=cfg.default_seed,
algorithm='raw'
)
return y_pred_train_df, y_pred_test_df, coef_df
if __name__ == '__main__':
# generate data
X_train_df, X_test_df, y_train_df, y_test_df = generate_data(cfg.default_seed,
cfg.test_params)
# make predictions
y_pred_train_df, y_pred_test_df, coef_df = predict(
X_train_df, X_test_df, y_train_df, y_test_df)
# save results
if not os.path.exists(cfg.fixtures_dir):
os.makedirs(cfg.fixtures_dir)
y_pred_train_df.to_csv(
cfg.saved_results_train, sep="\t", index=False,
compression="gzip", float_format="%.5g"
)
y_pred_test_df.to_csv(
cfg.saved_results_test, sep="\t", index=False,
compression="gzip", float_format="%.5g"
)
coef_df.to_csv(
cfg.saved_coefs, sep="\t", index=False,
compression="gzip", float_format="%.5g"
)
```
#### File: netreg/tests/test_output_shapes.py
```python
import os
import pytest
import tempfile
import numpy as np
import pandas as pd
import sys; sys.path.append('.')
import config as cfg
from data_models import DataModel
@pytest.fixture
def shapes_test():
# n = number of samples
# p = number of features
# k = latent space dimension
# m = number of pathways for PLIER
np.random.seed(cfg.default_seed)
params = {
'n_train': 10,
'n_test': 5,
'p': 20,
'k': 5,
'm': 10
}
exp_data = {
'train': pd.DataFrame(
np.random.uniform(size=(params['n_train'], params['p'])),
index=['S{}'.format(i) for i in range(params['n_train'])],
columns=['G{}'.format(j) for j in range(params['p'])]),
'test': pd.DataFrame(
np.random.uniform(size=(params['n_test'], params['p'])),
index=['S{}'.format(i) for i in range(
params['n_train'], params['n_train']+params['n_test'])],
columns=['G{}'.format(j) for j in range(params['p'])])
}
return params, exp_data
def _generate_and_save_pathways(p, m):
"""Function to generate random pathways and save in a temporary file.
File is closed but not deleted, so code that calls this function
must delete the file after it is used.
"""
# it really doesn't matter exactly what the simulated pathways are,
# this is just used for testing that dimensions are correct
pathways = pd.DataFrame(np.random.randint(2, size=(p, m)),
index=['G{}'.format(j) for j in range(p)],
columns=['PW{}'.format(k) for k in range(m)])
tf = tempfile.NamedTemporaryFile(mode='w', delete=False)
filename = tf.name
pathways.to_csv(filename, sep='\t')
tf.close()
return filename
def test_pca_output(shapes_test):
"""Test dimensions of PCA output."""
params, exp_data = shapes_test
dm = DataModel(df=exp_data['train'], test_df=exp_data['test'])
dm.transform(how='zscore')
dm.pca(n_components=params['k'], transform_test_df=True)
assert dm.pca_df.shape == (params['n_train'], params['k'])
assert dm.pca_test_df.shape == (params['n_test'], params['k'])
assert dm.pca_weights.shape == (params['k'], params['p'])
def test_plier_output(shapes_test):
"""Test dimensions of PLIER output."""
params, exp_data = shapes_test
pathways_file = _generate_and_save_pathways(params['p'],
params['m'])
dm = DataModel(df=exp_data['train'], test_df=exp_data['test'])
dm.transform(how='zscore')
dm.plier(n_components=params['k'], pathways_file=pathways_file,
transform_test_df=True, skip_cache=True)
os.remove(pathways_file)
assert dm.plier_df.shape == (params['n_train'], params['k'])
assert dm.plier_test_df.shape == (params['n_test'], params['k'])
assert dm.plier_weights.shape == (params['k'], params['p'])
```
#### File: netreg/utilities/classify_sklearn.py
```python
from sklearn.pipeline import Pipeline
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import (
GridSearchCV,
RandomizedSearchCV,
cross_val_predict
)
def train_sklearn_model(X_train, X_test, y_train, alphas, l1_ratios,
seed=0, n_folds=5, max_iter=1000):
# Setup the classifier parameters
clf_parameters = {
"classify__loss": ["log"],
"classify__penalty": ["elasticnet"],
"classify__alpha": alphas,
"classify__l1_ratio": l1_ratios,
}
estimator = Pipeline(
steps=[
(
"classify",
SGDClassifier(
random_state=seed,
class_weight="balanced",
loss="log",
max_iter=max_iter,
tol=1e-3,
),
)
]
)
cv_pipeline = GridSearchCV(
estimator=estimator,
param_grid=clf_parameters,
n_jobs=-1,
cv=n_folds,
scoring="roc_auc",
iid=True,
return_train_score=True,
)
# Fit the model
cv_pipeline.fit(X=X_train, y=y_train)
# Obtain cross validation results
y_cv = cross_val_predict(
cv_pipeline.best_estimator_,
X=X_train,
y=y_train,
cv=n_folds,
method="decision_function",
)
# Get all performance results
y_pred_train = cv_pipeline.decision_function(X_train)
y_pred_test = cv_pipeline.decision_function(X_test)
y_pred_bn_train = cv_pipeline.predict(X_train)
y_pred_bn_test = cv_pipeline.predict(X_test)
return y_pred_train, y_pred_test, y_pred_bn_train, y_pred_bn_test
```
#### File: netreg/utilities/jaccard_utilities.py
```python
import os
import itertools as it
import pandas as pd
def compute_jaccard(v1, v2):
v1, v2 = set(v1), set(v2)
intersection = v1.intersection(v2)
union = v1.union(v2)
return ((len(intersection) / len(union) if len(union) != 0 else 0),
len(intersection),
len(union))
def get_inter_method_similarity(sk_coefs_folds, torch_coefs_folds,
seeds, folds, signal='signal'):
inter_method_sims = []
for seed in seeds:
for fold in folds:
sk_coefs = sk_coefs_folds[signal][seed][fold][0]
sk_genes = sk_coefs_folds[signal][seed][fold][1]
sk_nz_coefs = (sk_coefs != 0)
sk_nz_genes = sk_genes[sk_nz_coefs]
torch_coefs = torch_coefs_folds[signal][seed][fold][0]
torch_genes = torch_coefs_folds[signal][seed][fold][1]
torch_nz_coefs = (torch_coefs != 0)
torch_nz_genes = torch_genes[torch_nz_coefs]
inter_method_sims.append(compute_jaccard(set(sk_nz_genes), set(torch_nz_genes))[0])
return inter_method_sims
def get_intra_method_similarity(sk_coefs_folds, torch_coefs_folds,
seeds, folds, signal='signal'):
intra_method_sims_sk = []
intra_method_sims_torch = []
for seed in seeds:
for f1, f2 in it.combinations(folds, 2):
# first for scikit-learn
sk_coefs_f1 = sk_coefs_folds[signal][seed][f1][0]
sk_genes_f1 = sk_coefs_folds[signal][seed][f1][1]
sk_coefs_f2 = sk_coefs_folds[signal][seed][f2][0]
sk_genes_f2 = sk_coefs_folds[signal][seed][f2][1]
sk_nz_coefs_f1 = (sk_coefs_f1 != 0)
sk_nz_genes_f1 = sk_genes_f1[sk_nz_coefs_f1]
sk_nz_coefs_f2 = (sk_coefs_f2 != 0)
sk_nz_genes_f2 = sk_genes_f2[sk_nz_coefs_f2]
intra_method_sims_sk.append(compute_jaccard(set(sk_nz_genes_f1), set(sk_nz_genes_f2))[0])
# then for torch
torch_coefs_f1 = torch_coefs_folds[signal][seed][f1][0]
torch_genes_f1 = torch_coefs_folds[signal][seed][f1][1]
torch_coefs_f2 = torch_coefs_folds[signal][seed][f2][0]
torch_genes_f2 = torch_coefs_folds[signal][seed][f2][1]
torch_nz_coefs_f1 = (torch_coefs_f1 != 0)
torch_nz_genes_f1 = torch_genes_f1[torch_nz_coefs_f1]
torch_nz_coefs_f2 = (torch_coefs_f2 != 0)
torch_nz_genes_f2 = torch_genes_f2[torch_nz_coefs_f2]
intra_method_sims_torch.append(compute_jaccard(set(torch_nz_genes_f1), set(torch_nz_genes_f2))[0])
return (intra_method_sims_sk, intra_method_sims_torch)
```
#### File: netreg/utilities/latent_space_utilities.py
```python
import os
import glob
import numpy as np
import pandas as pd
def get_overlap_cols_from_plier(models_dirs):
# this implements approach 1 from 5.analyze_plier_compression.ipynb
genesets = []
for m_dir in models_dirs:
plier_pattern = os.path.join(m_dir,
'components_10',
'plier_*_weight_matrix.tsv.gz')
fnames = glob.glob(plier_pattern)
genesets.append(set(pd.read_csv(fnames[0], sep='\t', index_col=0).columns.values))
return sorted(list(genesets[0].intersection(*genesets[1:])))
def get_overlap_cols_from_files(f1, f2):
# this implements approach 2 from 5.analyze_plier_compression.ipynb
f1_names = set(pd.read_csv(f1, sep='\t', index_col=0).columns.values)
f2_names = set(pd.read_csv(f2, sep='\t', index_col=0).columns.values)
return sorted(list(f1_names.intersection(f2_names)))
def get_matrices_from_files(files, gene_subset, shuffled=False):
mtxs, filenames = [], []
if shuffled:
files = [f for f in files if 'shuffled' in f]
else:
files = [f for f in files if 'shuffled' not in f]
for f in files:
mtxs.append(pd.read_csv(f, sep='\t', header=0, index_col=0)[gene_subset])
filenames.append(f)
return (mtxs, files)
def calculate_avg_cca(z_dims, models_map, overlap=False, verbose=False):
import itertools
import utilities.cca_core as cca_core
algorithms = list(models_map.keys())
avg_cca_mtx = {z_dim: np.zeros((len(algorithms), len(algorithms))) for z_dim in z_dims}
for z_dim in z_dims:
for alg1, alg2 in itertools.combinations_with_replacement(algorithms, 2):
if verbose:
print('Comparing {} with {} for z={}...'.format(alg1, alg2, z_dim), end='')
i1, i2 = algorithms.index(alg1), algorithms.index(alg2)
cca_values = []
alg1_pattern = os.path.join(models_map[alg1],
'components_{}'.format(z_dim),
'{}_*_weight_matrix.tsv.gz'.format(alg1.split('_')[0]))
alg2_pattern = os.path.join(models_map[alg2],
'components_{}'.format(z_dim),
'{}_*_weight_matrix.tsv.gz'.format(alg2.split('_')[0]))
alg1_files = glob.glob(alg1_pattern)
alg2_files = glob.glob(alg2_pattern)
if overlap:
overlap_cols = get_overlap_cols_from_files(alg1_files[0],
alg2_files[0])
else:
overlap_cols = get_overlap_cols_from_plier(
list(set(models_map.values())))
(alg1_matrices, alg1_files) = get_matrices_from_files(alg1_files,
overlap_cols)
(alg2_matrices, alg2_files) = get_matrices_from_files(alg2_files,
overlap_cols)
for s1, s2 in itertools.product(range(len(alg1_matrices)),
range(len(alg2_matrices))):
cca_result = cca_core.robust_cca_similarity(alg1_matrices[s1],
alg2_matrices[s2],
verbose=False)
cca_values.append(np.mean(cca_result['mean']))
avg_cca_mtx[z_dim][i1, i2] = np.mean(cca_values)
avg_cca_mtx[z_dim][i2, i1] = avg_cca_mtx[z_dim][i1, i2]
if verbose:
print('done')
return avg_cca_mtx
```
#### File: netreg/utilities/symbol_to_entrez_id.py
```python
import time
import mygene
import numpy as np
import pandas as pd
def filter_query_result(df, entrezgene=False):
"""Get the total number of result genes from a MyGene query."""
unmatched_genes = df.index.values
# first filter for notfound
try:
matched_df = df[df['notfound'].isnull()]
except KeyError:
matched_df = df
# then filter for not null entrezgene, if applicable, since some symbols
# may have a NaN entrezgene
# (why the DB distinguishes this case from symbols that are not found,
# I don't know)
if entrezgene:
try:
matched_df = matched_df[matched_df['entrezgene'].notnull()]
except KeyError:
# this shouldn't ever happen, but if it does nothing matches
matched_df = pd.DataFrame(columns=df.columns)
num_matched_genes = len(matched_df)
matched_genes = np.unique(matched_df.index.values)
unmatched_genes = list(set(unmatched_genes) - set(matched_genes))
return matched_df, matched_genes, unmatched_genes
def query_to_map(df_query, target_name, map_to_lists=False):
"""Convert results of a MyGene query to a dict."""
query_map = {}
if 'notfound' not in df_query:
df_query['notfound'] = np.nan
for query, row in df_query[df_query['notfound'].isnull()].iterrows():
if map_to_lists:
if query in query_map:
query_map[query].append(row[target_name])
else:
query_map[query] = [row[target_name]]
else:
query_map[query] = row[target_name]
return query_map
def invert_list_map(orig_map):
"""Invert a dict mapping keys to lists.
The result is a dict as follows:
For each key: [v1, v2, ..., vN] pair in the original dict, the inverted
dict will have elements
{v1: key, v2: key, ..., vN: key}.
Warning: may cause unexpected behavior on a dict containing non-list
values.
"""
inverse_map = {}
for k, v in orig_map.items():
for vi in v:
inverse_map[vi] = k
return inverse_map
def map_loc_genes(gene_list):
"""Map gene names beginning with 'LOC'.
See https://www.biostars.org/p/129299/ : these are genes with no
published symbol, and thus have the format 'LOC' + Entrez ID.
"""
gene_map = {}
unmatched = []
for gene in gene_list:
if gene.startswith('LOC'):
gene_map[gene] = gene.replace('LOC', '')
else:
unmatched.append(gene)
return gene_map, unmatched
def fill_na(symbols_map, symbols_list):
"""Fill symbol map with 'N/A' for unmapped symbols."""
filled_map = symbols_map.copy()
for s in symbols_list:
if s not in filled_map:
filled_map[s] = 'N/A'
return filled_map
def get_list_duplicates(in_list):
"""Identify duplicates in a list."""
seen = set()
duplicates = set()
for item in in_list:
if item in seen and item != 'N/A':
duplicates.add(item)
seen.add(item)
return list(duplicates)
def symbol_to_entrez_id(symbols_list, verbose=False, sleep_time=5):
"""Map a list of gene symbols to Entrez IDs.
Uses the MyGene API to query first for exact symbol/Entrez ID mappings,
then queries the same API for aliases of unmatched symbols and finds
mappings for the aliases.
Parameters
----------
symbols_list : list of str
List of symbols to map.
verbose : bool, default=False
Whether or not to print information about progress/output.
sleep_time : int, default=5
How many seconds to sleep between calls to the MyGene API.
Returns
-------
symbol_map : (dict of str: str)
Maps symbols to Entrez IDs. Unidentified symbols will map
to the string 'N/A'.
"""
mg = mygene.MyGeneInfo()
if verbose:
print('Querying for exact matches:')
# first query for exact matches
df_exact = mg.querymany(symbols_list,
scopes='symbol',
fields='entrezgene',
species='human',
verbose=False,
as_dataframe=True)
# some symbols may have higher confidence results for
# Ensembl keys, so sorting by entrezgene and dropping
# duplicates keeps the result with an Entrez id
try:
df_exact.sort_values(by='entrezgene', inplace=True)
df_exact = df_exact.loc[~df_exact.index.duplicated(keep='first')]
df_exact, matched, unmatched = filter_query_result(df_exact,
entrezgene=True)
symbol_map = query_to_map(df_exact, 'entrezgene')
except KeyError:
symbol_map = {}
if verbose:
print('-- Matched {} of {} genes'.format(
len(matched), len(matched) + len(unmatched)))
if len(unmatched) == 0:
return symbol_map
if verbose:
print('Trying to manually map unmapped genes:')
unmatched_before = len(unmatched)
loc_map, unmatched = map_loc_genes(unmatched)
symbol_map = {**symbol_map, **loc_map}
if verbose:
print('-- Matched {} of {} genes'.format(
unmatched_before - len(unmatched), unmatched_before))
if len(unmatched) == 0:
return symbol_map
time.sleep(sleep_time)
if verbose:
print('Querying MyGene for aliases of {} unmatched genes:'.format(
len(unmatched)))
# then query for aliases of unmatched symbols
df_alias = mg.querymany(unmatched,
scopes='alias',
fields='symbol',
species='human',
verbose=False,
as_dataframe=True)
# get rid of rows where the alias has already been matched
df_alias = df_alias.loc[~df_alias['symbol'].isin(matched)]
df_alias = df_alias.loc[~df_alias['_id'].isin(list(symbol_map.values()))]
# duplicates are sorted in order of MyGene confidence score,
# so keep the most confident and drop others
#
# TODO: maybe revisit this and try to keep genes that match
# with TCGA data?
df_alias = df_alias.loc[~df_alias.index.duplicated(keep='first')]
df_alias = df_alias.loc[~df_alias['symbol'].duplicated(keep='first')]
df_alias, matched, _ = filter_query_result(df_alias)
if verbose:
print('-- Found aliases for {} of {} genes'.format(
len(matched), len(unmatched)))
alias_map = query_to_map(df_alias, 'symbol', map_to_lists=True)
inverse_alias_map = invert_list_map(alias_map)
time.sleep(sleep_time)
if verbose:
print('Querying for alias entrez IDs:')
# and get entrez IDs of aliases
flat_aliases = [i for sl in alias_map.values() for i in sl]
df_inexact = mg.querymany(flat_aliases,
scopes='symbol',
fields='entrezgene',
species='human',
verbose=False,
as_dataframe=True)
try:
df_inexact, matched, unmatched = filter_query_result(df_inexact,
entrezgene=True)
inexact_map = query_to_map(df_inexact, 'entrezgene')
inexact_map = {inverse_alias_map[k]: v
for k, v in inexact_map.items()}
symbol_map = fill_na({**symbol_map, **inexact_map}, symbols_list)
except KeyError:
# keep symbol map the same if no entrez genes found
pass
if verbose:
print('-- Matched {} of {} genes'.format(
len(matched), len(matched) + len(unmatched)))
if verbose:
eids = list(symbol_map.values())
total_count = len(eids)
na_count = eids.count('N/A')
duplicates = get_list_duplicates(eids)
print('RESULTS: matched {} of {} genes ({} duplicate Entrez IDs)'.format(
total_count - na_count, total_count, len(duplicates)))
return symbol_map
``` |
{
"source": "jjcao/capsule-network",
"score": 2
} |
#### File: jjcao/capsule-network/trainer.py
```python
import torch
import torch.nn as nn
import torch.optim as optim
import os
from numpy import prod
from datetime import datetime
from model import CapsuleNetwork
from loss import CapsuleLoss
from time import time
SAVE_MODEL_PATH = 'checkpoints/'
if not os.path.exists(SAVE_MODEL_PATH):
os.mkdir(SAVE_MODEL_PATH)
class CapsNetTrainer:
"""
Wrapper object for handling training and evaluation
"""
def __init__(self, loaders, batch_size, learning_rate, num_routing=3, lr_decay=0.9, device=torch.device("cuda" if torch.cuda.is_available() else "cpu"), multi_gpu=(torch.cuda.device_count() > 1)):
self.device = device
self.multi_gpu = multi_gpu
self.loaders = loaders
img_shape = self.loaders['train'].dataset[0][0].numpy().shape
self.net = CapsuleNetwork(img_shape=img_shape, channels=256, primary_dim=8, num_classes=10, out_dim=16, num_routing=num_routing, device=self.device).to(self.device)
if self.multi_gpu:
self.net = nn.DataParallel(self.net)
self.criterion = CapsuleLoss(loss_lambda=0.5, recon_loss_scale=5e-4)
self.optimizer = optim.Adam(self.net.parameters(), lr=learning_rate)
self.scheduler = optim.lr_scheduler.ExponentialLR(self.optimizer, gamma=lr_decay)
print(8*'#', 'PyTorch Model built'.upper(), 8*'#')
print('Num params:', sum([prod(p.size()) for p in self.net.parameters()]))
def __repr__(self):
return repr(self.net)
def run(self, epochs, classes):
print(8*'#', 'Run started'.upper(), 8*'#')
eye = torch.eye(len(classes)).to(self.device)
for epoch in range(1, epochs+1):
for phase in ['train', 'test']:
print(f'{phase}ing...'.capitalize())
if phase == 'train':
self.net.train()
else:
self.net.eval()
t0 = time()
running_loss = 0.0
correct = 0; total = 0
for i, (images, labels) in enumerate(self.loaders[phase]):
t1 = time()
images, labels = images.to(self.device), labels.to(self.device)
# One-hot encode labels
labels = eye[labels]
self.optimizer.zero_grad()
outputs, reconstructions = self.net(images)
loss = self.criterion(outputs, labels, images, reconstructions)
if phase == 'train':
loss.backward()
self.optimizer.step()
running_loss += loss.item()
_, predicted = torch.max(outputs, 1)
_, labels = torch.max(labels, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
accuracy = float(correct) / float(total)
if phase == 'train':
print(f'Epoch {epoch}, Batch {i+1}, Loss {running_loss/(i+1)}',
f'Accuracy {accuracy} Time {round(time()-t1, 3)}s')
print(f'{phase.upper()} Epoch {epoch}, Loss {running_loss/(i+1)}',
f'Accuracy {accuracy} Time {round(time()-t0, 3)}s')
self.scheduler.step()
now = str(datetime.now()).replace(" ", "-")
error_rate = round((1-accuracy)*100, 2)
torch.save(self.net.state_dict(), os.path.join(SAVE_MODEL_PATH, f'{error_rate}_{now}.pth.tar'))
class_correct = list(0. for _ in classes)
class_total = list(0. for _ in classes)
for images, labels in self.loaders['test']:
images, labels = images.to(self.device), labels.to(self.device)
outputs, reconstructions = self.net(images)
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).squeeze()
for i in range(labels.size(0)):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
for i in range(len(classes)):
print('Accuracy of %5s : %2d %%' % (
classes[i], 100 * class_correct[i] / class_total[i]))
``` |
{
"source": "jjcapellan/flask-examples-pyjwt",
"score": 2
} |
#### File: flask-examples-pyjwt/app/security.py
```python
import jwt
from functools import wraps
from flask import Blueprint, request, make_response, current_app
from werkzeug.security import check_password_hash
from app.db import users_table as db
from datetime import datetime, timedelta
bp = Blueprint('security', __name__)
@bp.route('/login',methods=(['POST']))
def login():
data = request.get_json()
username = data['username']
password = data['password']
error = None
try:
user = db[username]
except:
user = None
if user is None or not check_password_hash(user['password'], password):
error = 'Incorrect user or password'
if error is None:
payload = {
'iat': datetime.utcnow(), # Current time
'exp': datetime.utcnow() + timedelta(minutes=10), # Expiration time
'sub': user['name'],
'rol': user['rol']
}
return make_response(jwt.encode(payload, current_app.config['SECRET_KEY'],algorithm='HS256'), 200)
return make_response(error, 401)
def token_required(rol):
def decorator(f):
@wraps(f)
def decorated(*args, **kwargs):
token = request.headers.get('Authorization')
if not token:
return make_response('Invalid credentials', 401)
try:
data = jwt.decode(token, current_app.config['SECRET_KEY'])
except:
return make_response('Invalid credentials', 401)
if rol != data['rol']:
return make_response('Invalid role', 403)
return f(*args, **kwargs)
return decorated
return decorator
``` |
{
"source": "jjcapellan/flask-examples-sqlite-crud",
"score": 2
} |
#### File: flask-examples-sqlite-crud/app/__init__.py
```python
import os
from flask import Flask
from app.db import init_db
def create_app():
app = Flask(__name__, instance_relative_config=True)
app.config['DATABASE'] = os.path.join(app.instance_path, 'db.sqlite')
try:
os.makedirs(app.instance_path)
except OSError:
pass
with app.app_context():
init_db()
from . import crud
app.register_blueprint(crud.bp)
return app
``` |
{
"source": "jjcbsn/mbed-cli",
"score": 2
} |
#### File: jjcbsn/mbed-cli/circle_tests.py
```python
from __future__ import print_function
import os
import sys
import subprocess
import shutil
import yaml
def rmtree_readonly(directory):
def remove_readonly(func, path, _):
os.chmod(path, stat.S_IWRITE)
func(path)
shutil.rmtree(directory, onerror=remove_readonly)
# Source tests from yaml config
tests = None
with open('.circleci/config.yml', 'r') as f:
data = yaml.safe_load(f)
# Read yaml tree
if sys.version_info[0] == 3:
tests = data['jobs']['py3']['steps']
else:
tests = data['jobs']['py2']['steps']
# Filter command list to only contain commands
tests = [item['run'] for item in list(filter(lambda x : type(x) is dict, tests))]
# ... and replace new lines with ampersands
tests = [item.replace('\n', ' && ') for item in tests]
# Exit if no tests are found
if tests == None:
sys.exit(1)
# Ignore all tests found before `pip install -e`
startIndex = -1
for cmd in tests:
if 'pip install -e' in cmd:
startIndex = tests.index(cmd) + 1
break
if startIndex == -1:
sys.exit(1)
# Delete `.test` directory if it exists
cwd = os.path.abspath(os.path.dirname(__file__))
if os.path.exists(os.path.join(cwd, '.tests')):
rmtree_readonly(os.path.join(cwd, '.tests'))
os.mkdir(os.path.join(cwd, '.tests'))
# Run tests
for cmd in tests[startIndex:]:
os.chdir(cwd)
print("\n----------\nEXEC: \"%s\" " % cmd)
proc = subprocess.Popen(cmd, shell=True)
proc.communicate()
if proc.returncode != 0:
print "\n------------\nERROR: \"%s\"" % cmd
sys.exit(1)
```
#### File: mbed-cli/test/util.py
```python
from __future__ import print_function
import contextlib
import subprocess
import pytest
import os
import re
import shutil
import stat
MBED_PATH = os.path.abspath(os.path.join('mbed', 'mbed.py'))
# Process execution
class ProcessException(Exception):
pass
def popen(command, stdin=None, **kwargs):
print(' '.join(command))
proc = subprocess.Popen(command, **kwargs)
if proc.wait() != 0:
raise ProcessException(proc.returncode)
def pquery(command, stdin=None, **kwargs):
print(' '.join(command))
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
stdout, _ = proc.communicate(stdin)
if proc.returncode != 0:
raise ProcessException(proc.returncode)
return stdout.decode("utf-8")
# Directory navigation
@contextlib.contextmanager
def cd(newdir):
prevdir = os.getcwd()
os.chdir(newdir)
try:
yield
finally:
os.chdir(prevdir)
# Handling test environment
@pytest.fixture
def mbed(tmpdir):
tmpdir.chdir()
return MBED_PATH
# Higher level functions
def remove(path):
def remove_readonly(func, path, _):
os.chmod(path, stat.S_IWRITE)
func(path)
shutil.rmtree(path, onerror=remove_readonly)
def move(src, dst):
shutil.move(src, dst)
def copy(src, dst):
shutil.copytree(src, dst)
# Test specific utils
def mkgit(name):
os.mkdir(name)
with cd(name):
popen(['git', 'init'])
with open('test', 'w') as f:
f.write('hello')
popen(['git', 'add', 'test'])
popen(['git', 'commit', '-m', '"commit 1"'])
bare = os.path.abspath(name + '.git')
popen(['git', 'clone', '--bare', name, bare])
remove(name)
return os.path.abspath(bare).replace('\\', '/')
def mkhg(name):
os.mkdir(name+'.hg')
with cd(name+'.hg'):
popen(['hg', 'init'])
with open('test', 'w') as f:
f.write('hello')
popen(['hg', 'add', 'test'])
popen(['hg', 'commit', '-m', '"commit 1"'])
return os.path.abspath(name+'.hg').replace('\\', '/')
def assertls(mbed, dir, tree):
tree = ''.join(re.escape(l)+r'.*\n' for l in tree)
with cd(dir):
result = pquery(['python', mbed, 'ls'])
print(result)
assert re.match(tree, result, re.MULTILINE)
def scm(dir=None):
if not dir:
dir = os.getcwd()
if os.path.isdir(os.path.join(dir, '.git')):
return 'git'
elif os.path.isdir(os.path.join(dir, '.hg')):
return 'hg'
def mkcommit(dir=None, files=[]):
with cd(dir or os.getcwd()):
if scm() == 'git':
if files:
popen(['git', 'add'] + files)
popen(['git', 'commit', '-a', '-m', 'test commit'])
popen(['git', 'push', 'origin', 'master'])
elif scm() == 'hg':
if files:
popen(['hg', 'add'] + files)
popen(['hg', 'commit', '-m', 'test commit'])
popen(['hg', 'push'])
# Different repository structures
@pytest.fixture(params=['git1', 'hg1', 'alt1', 'alt2'])
def testrepos(mbed, request):
if request.param in ['git1', 'alt1']:
test1 = mkgit('test1')
popen(['git', 'clone', test1, 'test1'])
else:
test1 = mkhg('test1')
popen(['hg', 'clone', test1, 'test1'])
if request.param in ['git1', 'alt2']:
test2 = mkgit('test2')
popen(['git', 'clone', test2, 'test1/test2'])
else:
test2 = mkhg('test2')
popen(['hg', 'clone', test2, 'test1/test2'])
if request.param in ['git1', 'alt1']:
test3 = mkgit('test3')
popen(['git', 'clone', test3, 'test1/test2/test3'])
else:
test3 = mkhg('test3')
popen(['hg', 'clone', test3, 'test1/test2/test3'])
if request.param in ['git1', 'alt2']:
test4 = mkgit('test4')
popen(['git', 'clone', test4, 'test1/test2/test3/test4'])
else:
test4 = mkhg('test4')
popen(['hg', 'clone', test4, 'test1/test2/test3/test4'])
with cd('test1/test2/test3'):
with open('test4.lib', 'w') as f:
hash = 'none'
with cd('test4'):
if scm() == 'git':
hash = pquery(['git', 'rev-parse', 'HEAD'])
elif scm() == 'hg':
hash = pquery(['hg', 'id', '-i'])
f.write(test4 + '/#' + hash + '\n')
if scm() == 'git':
popen(['git', 'add', 'test4.lib'])
popen(['git', 'commit', '-m', 'test commit'])
popen(['git', 'push', 'origin', 'master'])
elif scm() == 'hg':
popen(['hg', 'add', 'test4.lib'])
popen(['hg', 'commit', '-m', 'test commit'])
popen(['hg', 'push'])
with cd('test1/test2'):
with open('test3.lib', 'w') as f:
with cd('test3'):
if scm() == 'git':
hash = pquery(['git', 'rev-parse', 'HEAD'])
elif scm() == 'hg':
hash = pquery(['hg', 'id', '-i'])
f.write(test3 + '/#' + hash + '\n')
if scm() == 'git':
popen(['git', 'add', 'test3.lib'])
popen(['git', 'commit', '-m', 'test commit'])
popen(['git', 'push', 'origin', 'master'])
elif scm() == 'hg':
popen(['hg', 'add', 'test3.lib'])
popen(['hg', 'commit', '-m', 'test commit'])
popen(['hg', 'push'])
with cd('test1'):
with open('test2.lib', 'w') as f:
with cd('test2'):
if scm() == 'git':
hash = pquery(['git', 'rev-parse', 'HEAD'])
elif scm() == 'hg':
hash = pquery(['hg', 'id', '-i'])
f.write(test2 + '/#' + hash + '\n')
if scm() == 'git':
popen(['git', 'add', 'test2.lib'])
popen(['git', 'commit', '-m', 'test commit'])
popen(['git', 'push', 'origin', 'master'])
elif scm() == 'hg':
popen(['hg', 'add', 'test2.lib'])
popen(['hg', 'commit', '-m', 'test commit'])
popen(['hg', 'push'])
return test1, test2, test3, test4
```
#### File: tools/bash_completion/generator.py
```python
from collections import defaultdict
import pystache
import re
import subprocess
# Top level --version is a pain to deal with so ignoring for now
# This one extracts single commands and the help txt
commandRegex = r"^\s+(?P<command>\w+)\s+(?P<helptxt>[a-zA-Z ]*)$"
# Why the hell do spaces get regexed in command1 ?
subcommandRegex = r"^\s+(?P<command1>-+[a-zA-Z_\-]+(?P<modifier1>\s+[A-Z_\-]+)?)"\
r"(?P<command2>,\s+-+[a-zA-Z_-]+(?P<modifier2>\s+[A-Z_-]+)?)?"\
r"\s+(?P<helptxt>.*)$"
def getHelpTxt(command=None):
if command:
p = subprocess.Popen(["mbed", command, "-h"], stdout=subprocess.PIPE)
else:
p = subprocess.Popen(["mbed", "-h"], stdout=subprocess.PIPE)
out, err = p.communicate()
return out
def getTargetCode():
with open("templates/target.tmplt") as fp:
txt = fp.read()
return txt
def getToolchainCode():
with open("templates/toolchain.tmplt") as fp:
txt = fp.read()
return txt
def getSCMCode():
with open("templates/scm.tmplt") as fp:
txt = fp.read()
return txt
def getIDECode():
with open("templates/ide.tmplt") as fp:
txt = fp.read()
return txt
def getProtocolCode():
with open("templates/protocol.tmplt") as fp:
txt = fp.read()
return txt
def parseCommands():
commands = defaultdict(defaultdict)
commands["COMMAND"] = []
helpTxt = getHelpTxt()
# print helpTxt
for line in helpTxt.split('\n'):
match = re.search(commandRegex, line)
if match:
g = match.groupdict()
commands[g["command"]]["helptxt"] = g["helptxt"]
commands[g["command"]]["subcommands"] = []
# Subcommand mustache generation
commands[g["command"]]["DDASH_COMMANDS"] = []
commands[g["command"]]["DASH_COMMANDS"] = []
commands[g["command"]]["COMMAND"] = g["command"]
commands[g["command"]]["HAVE_PREV"] = {"PREV_CASE": []}
# Main function generation
commands["COMMAND"].append({"name": g["command"]})
for commandKey in commands:
# Skip
if commandKey == "COMMAND":
continue
helpTxt = getHelpTxt(commandKey)
for line in helpTxt.split('\n'):
match = re.search(subcommandRegex, line)
if match:
commandMatch = match.groupdict()
# Clean up the subcommands
command1 = commandMatch["command1"]
command2 = commandMatch["command2"]
if command1:
command1 = re.sub(",", "", command1)
command1.strip()
command1 = command1.split()[0]
if command2:
command2 = re.sub(",", "", command2)
command2.strip()
command2 = command2.split()[0]
# Not sure why the cleaning is even necessary,
# the regex looks correct
commandMatch["command1"] = command1
commandMatch["command2"] = command2
commands[commandKey]["subcommands"].append(commandMatch)
# Push format for mustache
if command1 and '--' in command1:
commands[commandKey]["DDASH_COMMANDS"].append(
{"name": command1})
if command2 and '--' in command2:
commands[commandKey]["DDASH_COMMANDS"].append(
{"name": command2})
if command1:
m = re.match("^-[a-zA-Z]{1,2}", command1)
if m:
commands[commandKey]["DASH_COMMANDS"].append(
{"name": command1})
else:
command1 = ""
if command2:
m = re.match("^-[a-zA-Z]{1,2}", command2)
if m:
commands[commandKey]["DASH_COMMANDS"].append(
{"name": command2})
else:
command2 = ""
# Adding the dependent command handlers
if "target" in command1 or "target" in command2:
commands[commandKey]["HAVE_PREV"]["PREV_CASE"].append({"case": "|".join(filter(None, [command1, command2])), "code": getTargetCode()})
if "toolchain" in command1 or "toolchain" in command2:
commands[commandKey]["HAVE_PREV"]["PREV_CASE"].append({"case": "|".join(filter(None, [command1, command2])), "code": getToolchainCode()})
if "--ide" in command1 or "--ide" in command2:
commands[commandKey]["HAVE_PREV"]["PREV_CASE"].append({"case": "|".join(filter(None, [command1, command2])), "code": getIDECode()})
if "scm" in command1 or "scm" in command2:
commands[commandKey]["HAVE_PREV"]["PREV_CASE"].append({"case": "|".join(filter(None, [command1, command2])), "code": getSCMCode()})
if "protocol" in command1 or "protocol" in command2:
commands[commandKey]["HAVE_PREV"]["PREV_CASE"].append({"case": "|".join(filter(None, [command1, command2])), "code": getProtocolCode()})
# Adding the dependent command handlers for target and toolchain
if "target" in commandKey:
commands[commandKey]["HAVE_PREV"]["PREV_CASE"].append({"case": commandKey, "code": getTargetCode()})
if "toolchain" in commandKey:
commands[commandKey]["HAVE_PREV"]["PREV_CASE"].append({"case": commandKey, "code": getToolchainCode()})
return commands
def generateMain(commands):
txt = []
with open("templates/mbed.tmplt") as fp:
tmplt = fp.read()
txt.append(pystache.render(tmplt, commands))
return txt
def generateCompleters(commands):
txt = []
renderer = pystache.Renderer(escape=lambda u: u)
with open("templates/command.tmplt") as fp:
tmplt = fp.read()
for commandKey in commands:
txt.append(renderer.render(tmplt, commands[commandKey]))
# if need to add hacks add them here
return txt
def generateBoilerPlate(_):
txt = []
with open("templates/boilerplate.tmplt") as fp:
txt.append(fp.read())
return txt
def generateScript(commands):
txt = []
txt.extend(generateBoilerPlate(commands))
txt.extend(generateCompleters(commands))
txt.extend(generateMain(commands))
with open("mbed-completion", "w") as fp:
for x in txt:
fp.write("%s\n" % x)
if __name__ == '__main__':
commands = parseCommands()
# At this point we have a list of all the commands and sub commands
# for each command create a Bash function
# register each subcommand
generateScript(commands)
``` |
{
"source": "jjccero/pbrl",
"score": 2
} |
#### File: algorithms/ppg/ppg.py
```python
from typing import Optional
import numpy as np
import torch
from torch.distributions import Normal, Categorical
from pbrl.algorithms.ppg.aux_buffer import AuxBuffer
from pbrl.algorithms.ppo import PPO, Policy
class PPG(PPO):
def __init__(
self,
policy: Optional[Policy] = None,
batch_size: int = 64,
chunk_len: Optional[int] = None,
eps: float = 0.2,
gamma: float = 0.99,
gae_lambda: float = 0.95,
lr: float = 5e-4,
grad_norm: float = 0.5,
entropy_coef: float = 0.0,
optimizer=torch.optim.Adam,
optimizer_aux=torch.optim.Adam,
aux_batch_size: int = 256,
n_pi: int = 32,
epoch_pi: int = 1,
epoch_vf: int = 1,
epoch_aux: int = 6,
beta_clone: float = 1.0,
lr_aux: float = 5e-4
):
super(PPG, self).__init__(
policy=policy,
batch_size=batch_size,
chunk_len=chunk_len,
eps=eps,
gamma=gamma,
gae_lambda=gae_lambda,
repeat=1,
lr=lr,
weight_decay=0.0,
grad_norm=grad_norm,
entropy_coef=entropy_coef,
adv_norm=True,
recompute_adv=False,
optimizer=optimizer
)
self.aux_batch_size = aux_batch_size
self.n_pi = n_pi
self.epoch_pi = epoch_pi
self.epoch_vf = epoch_vf
self.epoch_aux = epoch_aux
self.beta_clone = beta_clone
self.lr_aux = lr_aux
self.optimizer_aux = optimizer_aux(
(
{'params': self.policy.actor.parameters()},
{'params': self.policy.critic.parameters()}
),
lr=self.lr_aux
)
self.aux_buffer = AuxBuffer()
self.ks_vf = ['observations', 'returns']
self.ks_pi = ['observations', 'actions', 'advantages', 'log_probs_old']
self.ks_aux = ['observations', 'vtargs', 'dists_old']
if self.policy.rnn:
self.ks_vf.append('dones')
self.ks_pi.append('dones')
self.ks_aux.append('dones')
def train_vf(self, loss_info):
for mini_batch in self.buffer.generator(self.batch_size, self.chunk_len, self.ks_vf):
mini_batch['observations'] = self.policy.normalize_observations(mini_batch['observations'])
mini_batch = {k: self.policy.n2t(v) for k, v in mini_batch.items()}
observations = mini_batch['observations']
returns = mini_batch['returns']
dones = None
if self.policy.rnn:
dones = mini_batch['dones']
value_loss = self.critic_loss(observations, returns, dones)
self.optimizer.zero_grad()
value_loss.backward()
if self.grad_norm:
torch.nn.utils.clip_grad_norm_(self.policy.critic.parameters(), self.grad_norm)
self.optimizer.step()
loss_info['value'].append(value_loss.item())
def train_pi(self, loss_info):
for mini_batch in self.buffer.generator(self.batch_size, self.chunk_len, self.ks_pi):
mini_batch['observations'] = self.policy.normalize_observations(mini_batch['observations'])
mini_batch = {k: self.policy.n2t(v) for k, v in mini_batch.items()}
observations = mini_batch['observations']
actions = mini_batch['actions']
advantages = mini_batch['advantages']
log_probs_old = mini_batch['log_probs_old']
dones = None
if self.policy.rnn:
dones = mini_batch['dones']
policy_loss, entropy_loss = self.actor_loss(observations, actions, advantages, log_probs_old, dones)
loss = - policy_loss - self.entropy_coef * entropy_loss
self.optimizer.zero_grad()
loss.backward()
if self.grad_norm:
torch.nn.utils.clip_grad_norm_(self.policy.actor.parameters(), self.grad_norm)
self.optimizer.step()
loss_info['policy'].append(policy_loss.item())
loss_info['entropy'].append(entropy_loss.item())
def auxiliary_phase(self, loss_info):
for mini_batch in self.aux_buffer.generator(self.aux_batch_size, self.chunk_len, self.ks_aux):
mini_batch['observations'] = self.policy.normalize_observations(mini_batch['observations'])
mini_batch = {k: self.policy.n2t(v) for k, v in mini_batch.items()}
observations = mini_batch['observations']
vtargs = mini_batch['vtargs']
dists_old = mini_batch['dists_old']
if self.policy.actor.continuous:
dists_old = dists_old.permute(-1, *[i for i in range(len(dists_old.shape[:-1]))])
dists_old = Normal(
loc=dists_old[0],
scale=dists_old[1]
)
else:
dists_old = Categorical(logits=dists_old)
dones = None
if self.policy.rnn:
dones = mini_batch['dones']
dists, values, _ = self.policy.actor.aux(observations, dones=dones)
aux_loss = 0.5 * torch.square(values - vtargs).mean()
clone_loss = torch.distributions.kl_divergence(dists_old, dists).mean()
value_loss = self.critic_loss(observations, vtargs, dones)
self.optimizer_aux.zero_grad()
(aux_loss + self.beta_clone * clone_loss + value_loss).backward()
if self.grad_norm:
torch.nn.utils.clip_grad_norm_(self.policy.actor.parameters(), self.grad_norm)
torch.nn.utils.clip_grad_norm_(self.policy.critic.parameters(), self.grad_norm)
self.optimizer_aux.step()
loss_info['kl'].append(clone_loss.item())
loss_info['aux_pi'].append(aux_loss.item())
loss_info['aux_vf'].append(value_loss.item())
@torch.no_grad()
def compute_dists_old(self):
states_actor = None
for i in range(self.n_pi):
observations = self.policy.n2t(
self.policy.normalize_observations(self.aux_buffer.observations[i].swapaxes(0, 1))
)
dones = None
if self.policy.rnn:
dones = self.policy.n2t(self.aux_buffer.dones[i].swapaxes(0, 1))
dists, states_actor = self.policy.actor.forward(observations, states_actor, dones)
if isinstance(dists, Categorical):
dists = self.policy.t2n(dists.logits)
elif isinstance(dists, Normal):
dists = np.stack(
(self.policy.t2n(dists.loc), self.policy.t2n(dists.scale)),
axis=-1
)
dists = dists.swapaxes(0, 1)
self.aux_buffer.dists_old.append(dists)
def update(self):
loss_info = dict(value=[], policy=[], entropy=[])
# it is PPO
self.policy.critic.eval()
self.gae()
self.policy.actor.train()
self.policy.critic.train()
if self.epoch_pi == self.epoch_vf:
for i in range(self.epoch_pi):
self.train_pi_vf(loss_info)
else:
for i in range(self.epoch_vf):
self.train_vf(loss_info)
for i in range(self.epoch_pi):
self.train_pi(loss_info)
self.policy.actor.eval()
# auxiliary phase
if self.n_pi > 0 and self.epoch_aux > 0:
self.aux_buffer.append(
observations=self.buffer.observations,
dones=self.buffer.dones,
vtargs=self.buffer.returns
)
if (self.iteration + 1) % self.n_pi == 0:
loss_info.update(dict(kl=[], aux_pi=[], aux_vf=[]))
self.compute_dists_old()
self.policy.actor.train()
for i in range(self.epoch_aux):
self.auxiliary_phase(loss_info)
self.policy.actor.eval()
self.aux_buffer.clear()
# on-policy
self.buffer.clear()
return loss_info
``` |
{
"source": "jjccero/rl",
"score": 2
} |
#### File: examples/ppo/competitive_eval.py
```python
import gym
import torch
from pbrl.algorithms.ppo import Policy
from pbrl.competitive import Agent, CompetitiveEnv
class DemoCompetitiveEnv(CompetitiveEnv):
try:
import gym_compete
except ModuleNotFoundError:
raise ModuleNotFoundError
def init(self, env_name, config_policy):
worker_ids = [4, 2]
for i, worker_id in enumerate(worker_ids):
filename_policy = 'result/{}-0-current/{}.pkl'.format(env_name, worker_id)
agent = Agent(
Policy(
observation_space=self.observation_space.spaces[i],
action_space=self.action_space.spaces[i],
**config_policy
)
)
agent.load_from_dir(filename_policy)
self.agents.append(agent)
self.indices.append([i])
def after_done(self):
print(self.infos)
def main():
config_policy = dict(
rnn='lstm',
hidden_sizes=[64, 64],
activation=torch.nn.ReLU,
obs_norm=True,
critic=False,
device=torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
)
env_name = 'sumo-humans-v0'
seed = 0
torch.manual_seed(seed)
env_test = DemoCompetitiveEnv(gym.make(env_name), config_policy=config_policy, env_name=env_name)
env_test.seed(seed)
env_test.reset()
while True:
_, _, done, info = env_test.step()
env_test.render()
if True in done:
print(info)
env_test.reset()
if __name__ == '__main__':
main()
``` |
{
"source": "jjccero/rliccd",
"score": 2
} |
#### File: examples/ppg/pbt_train.py
```python
import argparse
import time
from multiprocessing.connection import Connection
import gym
import numpy as np
import torch
from pbrl.algorithms.ppg import AuxActor, PPG
from pbrl.algorithms.ppo import Runner, Policy
from pbrl.common import Logger, update_dict
from pbrl.common.map import treemap, map_cpu
from pbrl.env import DummyVecEnv
from pbrl.pbt import PBT
def test(runner_test, policy, episode_num_test, info):
runner_test.reset()
eval_info = runner_test.run(policy=policy, episode_num=episode_num_test)
update_dict(info, eval_info, 'test/')
return np.mean(eval_info['reward'])
def worker_fn(
worker_num: int, worker_id: int, remote: Connection, remote_parent: Connection,
trainer_config: dict,
policy_config: dict,
env,
seed,
env_num,
env_num_test,
timestep: int,
ready_timestep: int,
log_interval,
buffer_size,
episode_num_test,
log_dir: str
):
remote_parent.close()
seed_worker = seed + worker_id
torch.manual_seed(seed_worker)
np.random.seed(seed_worker)
env_train = DummyVecEnv([lambda: gym.make(env) for _ in range(env_num)])
env_test = DummyVecEnv([lambda: gym.make(env) for _ in range(env_num_test)])
env_train.seed(seed_worker)
env_test.seed(seed_worker)
filename_log = '{}/{}'.format(log_dir, worker_id)
filename_policy = '{}/policy.pkl'.format(filename_log)
logger = Logger(filename_log)
# define policy
policy = Policy(
observation_space=env_train.observation_space,
action_space=env_train.action_space,
**policy_config
)
# define trainer for the task
trainer = PPG(
policy,
**trainer_config
)
# define train and test runner
runner_train = Runner(env_train)
runner_test = Runner(env_test)
info = dict()
test(runner_test, policy, episode_num_test, info)
logger.log(trainer.timestep, info)
while trainer.timestep < timestep:
trainer.learn(
timestep=ready_timestep,
runner_train=runner_train,
timestep_update=buffer_size,
logger=logger,
log_interval=log_interval
)
hyperparameter = dict(lr=trainer.lr)
update_dict(info, hyperparameter, 'hyperparameter/')
x = dict(
actor=treemap(map_cpu, policy.actor.state_dict()),
critic=treemap(map_cpu, policy.critic.state_dict()),
optimizer=treemap(map_cpu, trainer.optimizer.state_dict()),
optimizer_aux=treemap(map_cpu, trainer.optimizer_aux.state_dict()),
lr=trainer.lr,
rms_obs=policy.rms_obs,
rms_reward=policy.rms_reward
)
# evaluate
score = test(runner_test, policy, episode_num_test, info)
remote.send(('exploit', (trainer.iteration, score, x)))
exploit, y = remote.recv()
if exploit is not None:
policy.actor.load_state_dict(y['actor'])
policy.critic.load_state_dict(y['critic'])
trainer.optimizer.load_state_dict(y['optimizer'])
trainer.optimizer_aux.load_state_dict(y['optimizer_aux'])
for param in trainer.optimizer.param_groups:
param['lr'] = y['lr']
if policy.obs_norm:
policy.rms_obs.load(y['rms_obs'])
if policy.reward_norm:
policy.rms_reward.load(y['rms_reward'])
# log
logger.log(trainer.timestep, info)
# save
trainer.save(filename_policy)
remote.send(('close', None))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='Walker2d-v3')
parser.add_argument('--log_interval', type=int, default=10)
parser.add_argument('--worker_num', type=int, default=5)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--env_num', type=int, default=16)
parser.add_argument('--buffer_size', type=int, default=2048)
parser.add_argument('--env_num_test', type=int, default=2)
parser.add_argument('--episode_num_test', type=int, default=10)
parser.add_argument('--ready_timestep', type=int, default=40960)
parser.add_argument('--timestep', type=int, default=1024000)
args = parser.parse_args()
policy_config = dict(
rnn=None,
hidden_sizes=[64, 64],
activation=torch.nn.Tanh,
obs_norm=True,
reward_norm=True,
gamma=0.99,
device=torch.device('cuda:0'),
actor_type=AuxActor
)
trainer_config = dict(
batch_size=64,
chunk_len=None,
eps=0.2,
gamma=policy_config['gamma'],
gae_lambda=0.95,
lr=3e-4,
lr_aux=3e-4,
beta_clone=1.0,
epoch_aux=6,
epoch_pi=4,
epoch_vf=4,
n_pi=10
)
pbt = PBT(
worker_fn=worker_fn,
policy_config=policy_config,
trainer_config=trainer_config,
log_dir='result/{}/{}-{}'.format(args.env, args.seed, int(time.time())),
**vars(args)
)
pbt.seed(args.seed)
pbt.run()
if __name__ == '__main__':
main()
```
#### File: examples/ppo/train.py
```python
import argparse
import time
import gym
import numpy as np
import torch
from pbrl.algorithms.ppo import PPO, Runner, Policy
from pbrl.common import Logger
from pbrl.env import SubProcVecEnv, DummyVecEnv
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='Walker2d-v3')
parser.add_argument('--test_interval', type=int, default=10)
parser.add_argument('--log_interval', type=int, default=10)
parser.add_argument('--subproc', action='store_true')
parser.add_argument('--resume', action='store_true')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--env_num', type=int, default=16)
parser.add_argument('--buffer_size', type=int, default=2048)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--chunk_len', type=int, default=None)
parser.add_argument('--rnn', type=str, default=None)
parser.add_argument('--env_num_test', type=int, default=2)
parser.add_argument('--episode_num_test', type=int, default=10)
parser.add_argument('--timestep', type=int, default=3000000)
parser.add_argument('--eps', type=float, default=0.2)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--gae_lambda', type=float, default=0.95)
parser.add_argument('--vf_coef', type=float, default=1.0)
parser.add_argument('--entropy_coef', type=float, default=0.0)
parser.add_argument('--repeat', type=int, default=10)
parser.add_argument('--adv_norm', action='store_true')
parser.add_argument('--recompute_adv', action='store_true')
parser.add_argument('--obs_norm', action='store_true')
parser.add_argument('--reward_norm', action='store_true')
parser.add_argument('--lr_decay', action='store_true')
parser.add_argument('--lr', type=float, default=3e-4)
parser.add_argument('--grad_norm', type=float, default=0.5)
parser.add_argument('--weight_decay', type=float, default=0.0)
args = parser.parse_args()
torch.manual_seed(args.seed)
np.random.seed(args.seed)
filename_log = 'result/{}-{}-{}'.format(args.env, args.seed, int(time.time()))
filename_policy = '{}/policy.pkl'.format(filename_log)
logger = Logger(filename_log)
# define train and test environment
env_class = SubProcVecEnv if args.subproc else DummyVecEnv
env_train = env_class([lambda: gym.make(args.env) for _ in range(args.env_num)])
env_test = DummyVecEnv([lambda: gym.make(args.env) for _ in range(args.env_num_test)])
env_train.seed(args.seed)
env_test.seed(args.seed)
# define policy
policy = Policy(
observation_space=env_train.observation_space,
action_space=env_train.action_space,
rnn=args.rnn,
hidden_sizes=[64, 64],
activation=torch.nn.Tanh,
obs_norm=args.obs_norm,
reward_norm=args.reward_norm,
gamma=args.gamma,
device=torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
)
# define trainer for the task
trainer = PPO(
policy=policy,
batch_size=args.batch_size,
chunk_len=args.chunk_len,
eps=args.eps,
gamma=args.gamma,
gae_lambda=args.gae_lambda,
repeat=args.repeat,
lr=args.lr,
weight_decay=args.weight_decay,
grad_norm=args.grad_norm,
entropy_coef=args.entropy_coef,
vf_coef=args.vf_coef,
adv_norm=args.adv_norm,
recompute_adv=args.recompute_adv
)
# lr scheduler
if args.lr_decay:
total_update = args.timestep / args.buffer_size
trainer.scheduler = torch.optim.lr_scheduler.LambdaLR(
trainer.optimizer,
lambda update: 1.0 - update / total_update
)
if args.resume:
PPO.load(filename_policy, policy, trainer)
# define train and test runner
runner_train = Runner(env=env_train)
runner_test = Runner(env=env_test)
trainer.learn(
timestep=args.timestep,
runner_train=runner_train,
timestep_update=args.buffer_size,
logger=logger,
log_interval=args.log_interval,
runner_test=runner_test,
test_interval=args.test_interval,
episode_test=args.episode_num_test
)
# save result
trainer.save(filename_policy)
print(filename_policy)
if __name__ == '__main__':
main()
```
#### File: rliccd/examples/quick_start.py
```python
import os
import gym
import numpy as np
import torch
from pbrl.algorithms.dqn import DQN, Policy, Runner
from pbrl.common import Logger
from pbrl.env import DummyVecEnv
def main(env='CartPole-v0', seed=0):
# define train and test environment
env_train = DummyVecEnv([lambda: gym.make(env) for _ in range(10)])
env_test = DummyVecEnv([lambda: gym.make(env).unwrapped for _ in range(10)])
# define train and test runner
runner_train = Runner(env=env_train, start_timestep=10000, fill=True, epsilon=0.2)
runner_test = Runner(env=env_test)
env_train.seed(seed)
env_test.seed(seed)
torch.manual_seed(seed)
np.random.seed(seed)
# define policy
policy = Policy(
observation_space=env_train.observation_space,
action_space=env_train.action_space,
hidden_sizes=[128, 128, 128],
activation=torch.nn.ReLU,
gamma=0.9,
device=torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
)
# define trainer for the task
DQN(
policy=policy,
buffer_size=20000,
batch_size=64,
target_freq=10,
gamma=0.9,
lr_critic=3e-4
).learn(
timestep=50000,
runner_train=runner_train,
timestep_update=10,
logger=Logger('result/quick_start'),
log_interval=500,
runner_test=runner_test,
test_interval=500,
episode_test=10
)
os.system('tensorboard --logdir result/quick_start')
if __name__ == '__main__':
main()
```
#### File: algorithms/td3/net.py
```python
from typing import Optional, List
import torch
import torch.nn as nn
from pbrl.policy.base import Mlp, Cnn, Rnn, Deterministic
class DeterministicActor(nn.Module):
def __init__(
self,
obs_dim: tuple,
action_dim: int,
hidden_sizes: List,
activation,
rnn: Optional[str]
):
super(DeterministicActor, self).__init__()
self.hidden_size = hidden_sizes[-1]
if len(obs_dim) == 3:
self.f = Cnn(obs_dim, hidden_sizes, activation)
else:
self.f = Mlp(obs_dim, hidden_sizes, activation)
self.rnn = rnn
if self.rnn:
self.f2 = Rnn(self.hidden_size, activation)
self.act = Deterministic(self.hidden_size, action_dim)
def forward(
self,
observations,
states: Optional[torch.Tensor] = None,
dones: Optional[torch.Tensor] = None
):
x = self.f(observations)
if self.rnn:
x, states = self.f2(x, states, dones)
actions = self.act(x).tanh()
return actions, states
class DoubleQ(nn.Module):
def __init__(
self,
obs_dim: tuple,
action_dim: int,
hidden_sizes: List[int],
activation
):
super(DoubleQ, self).__init__()
self.hidden_size = hidden_sizes[-1]
if len(obs_dim) == 1:
self.f1 = Mlp((obs_dim[0] + action_dim,), hidden_sizes, activation)
self.f2 = Mlp((obs_dim[0] + action_dim,), hidden_sizes, activation)
else:
raise NotImplementedError
self.q1 = Deterministic(self.hidden_size, 1)
self.q2 = Deterministic(self.hidden_size, 1)
def forward(self, observations, actions):
x = torch.cat((observations, actions), dim=1)
x1 = self.f1(x)
q1 = self.q1(x1).squeeze(-1)
x2 = self.f2(x)
q2 = self.q2(x2).squeeze(-1)
return q1, q2
def Q1(self, observations, actions):
x = torch.cat((observations, actions), dim=-1)
x1 = self.f1(x)
q1 = self.q1(x1).squeeze(-1)
return q1
```
#### File: pbrl/env/dummy.py
```python
import numpy as np
from pbrl.env.env import VectorEnv, reset_after_done
class DummyVecEnv(VectorEnv):
def __init__(self, make_env):
self.envs = [env_fn() for env_fn in make_env]
env = self.envs[0]
super(DummyVecEnv, self).__init__(len(make_env), env.observation_space, env.action_space)
def reset(self):
observations = np.asarray([env.reset() for env in self.envs])
return observations
def step(self, actions):
results = [reset_after_done(env, action) for env, action in zip(self.envs, actions)]
observations, rewards, dones, infos = map(np.asarray, zip(*results))
return observations, rewards, dones, infos
def render(self):
for env in self.envs:
env.render()
def seed(self, seed):
for i, env in enumerate(self.envs):
env.seed(seed + i)
def close(self):
super(DummyVecEnv, self).close()
for env in self.envs:
env.close()
```
#### File: pbrl/env/subproc.py
```python
import multiprocessing
from multiprocessing.connection import Connection
import numpy as np
from pbrl.common.pickle import CloudpickleWrapper
from pbrl.env.env import VectorEnv, reset_after_done
RESET = 0
STEP = 1
SPACE = 2
RENDER = 3
SEED = 4
CLOSE = 5
def worker_fn(env_fns: CloudpickleWrapper, remote: Connection, remote_parent: Connection):
remote_parent.close()
envs = [env_fn() for env_fn in env_fns.x]
while True:
cmd, data = remote.recv()
if cmd == STEP:
remote.send([reset_after_done(env, action) for env, action in zip(envs, data)])
elif cmd == RENDER:
for env in envs:
env.render()
elif cmd == RESET:
remote.send([env.reset() for env in envs])
elif cmd == SPACE:
remote.send((envs[0].observation_space, envs[0].action_space))
elif cmd == SEED:
for i, env in enumerate(envs):
env.seed(data + i)
elif cmd == CLOSE:
for env in envs:
env.close()
break
def flatten(x):
return [x__ for x_ in x for x__ in x_]
class SubProcVecEnv(VectorEnv):
def __init__(self, make_env, worker_num=4):
self.remotes = []
self.ps = []
self.worker_num = worker_num
self.env_nums = []
ctx = multiprocessing.get_context('spawn')
for env_fns in np.array_split(make_env, self.worker_num):
remote, remote_worker = ctx.Pipe()
p = ctx.Process(
target=worker_fn,
args=(
CloudpickleWrapper(env_fns),
remote_worker,
remote
),
daemon=False
)
p.start()
self.ps.append(p)
self.remotes.append(remote)
remote_worker.close()
self.env_nums.append(len(env_fns))
self.remotes[0].send((SPACE, None))
observation_space, action_space = self.remotes[0].recv()
super(SubProcVecEnv, self).__init__(len(make_env), observation_space, action_space)
def reset(self):
for remote in self.remotes:
remote.send((RESET, None))
observations = flatten([remote.recv() for remote in self.remotes])
observations = np.asarray(observations)
return observations
def step(self, actions):
actions = np.array_split(actions, self.worker_num)
for remote, action in zip(self.remotes, actions):
remote.send((STEP, action))
results = flatten([remote.recv() for remote in self.remotes])
observations, rewards, dones, infos = map(np.asarray, zip(*results))
return observations, rewards, dones, infos
def render(self):
for remote in self.remotes:
remote.send((RENDER, None))
def seed(self, seed):
index = 0
for remote, env_num in zip(self.remotes, self.env_nums):
remote.send((SEED, seed + index))
index += env_num
def close(self):
super(SubProcVecEnv, self).close()
for remote in self.remotes:
remote.send((CLOSE, None))
for p in self.ps:
p.join()
```
#### File: pbrl/policy/base.py
```python
import torch
import torch.nn as nn
from torch.distributions import Normal, Categorical
def init_weights(module: nn.Module, gain=1.414):
for m in module.modules():
if isinstance(m, (nn.Linear, nn.Conv2d)):
torch.nn.init.zeros_(m.bias)
torch.nn.init.orthogonal_(m.weight, gain)
if isinstance(m, (nn.GRU, nn.LSTM)):
torch.nn.init.zeros_(m.bias_ih_l0)
torch.nn.init.zeros_(m.bias_hh_l0)
torch.nn.init.orthogonal_(m.weight_ih_l0)
torch.nn.init.orthogonal_(m.weight_hh_l0)
class Mlp(nn.Module):
def __init__(self, input_dim, hidden_sizes, activation):
super(Mlp, self).__init__()
self.flat = len(input_dim) == 2
last_size = input_dim[0] * input_dim[1] if self.flat else input_dim[0]
mlp = []
for hidden_size in hidden_sizes:
mlp.append(nn.Linear(last_size, hidden_size))
mlp.append(activation())
last_size = hidden_size
self.mlp = nn.Sequential(*mlp)
def forward(self, x):
if self.flat:
x = torch.flatten(x, -2)
x = self.mlp(x)
return x
class Cnn(nn.Module):
def __init__(self, shape, hidden_sizes, activation):
super(Cnn, self).__init__()
h, w, in_channels = shape
cnn = []
mlp_idx = 0
for conv in hidden_sizes:
if isinstance(conv, tuple):
out_channels, kernel_size, pool_size = conv
cnn.append(nn.Conv2d(in_channels, out_channels, (kernel_size, kernel_size)))
cnn.append(nn.MaxPool2d(pool_size))
cnn.append(activation())
h = (h - kernel_size + 1) // pool_size
w = (w - kernel_size + 1) // pool_size
in_channels = out_channels
else:
break
mlp_idx += 1
self.cnn = nn.Sequential(*cnn)
self.mlp = Mlp(
(h * w * in_channels,),
hidden_sizes[mlp_idx:],
activation
)
self.activation = activation()
def forward(self, x):
x = x.transpose(-1, -3)
if len(x.shape) == 5:
l, b = x.shape[:2]
x = x.flatten(0, 1)
x = self.cnn(x)
x = x.flatten(1)
x = x.unflatten(0, (l, b))
else:
x = self.cnn(x)
x = x.flatten(1)
x = self.activation(self.mlp(x))
return x
class Rnn(nn.Module):
def __init__(self, hidden_size, activation, rnn='lstm'):
super(Rnn, self).__init__()
if rnn == 'lstm':
self.rnn = nn.LSTM(hidden_size, hidden_size)
elif rnn == 'gru':
self.rnn = nn.GRU(hidden_size, hidden_size)
else:
raise NotImplementedError
self.activation = activation()
def forward(self, x, states, dones):
if len(x.shape) == 3:
# reshape to (chunk_len, batch_size, ...)
x = x.transpose(0, 1)
chunk_len = x.shape[0]
xs = []
for step in range(chunk_len):
x_ = x[step:step + 1, :, :]
x_, states = self.rnn(x_, states)
done = dones[:, step]
if isinstance(states, tuple):
for states_ in states:
states_[:, done, :] = 0.
else:
states[:, done, :] = 0.
xs.append(x_)
# reshape to (1, batch_size, chunk_len, ...)
x = torch.stack(xs, dim=2)
# reshape to (batch_size, chunk_len, ...)
x = x.squeeze(0)
else:
# reshape to (1, batch_size, ...)
x = x.unsqueeze(0)
x, states = self.rnn(x, states)
# reshape to (batch_size, ...)
x = x.squeeze(0)
x = self.activation(x)
return x, states
class Discrete(nn.Module):
def __init__(self, hidden_size, action_dim):
super(Discrete, self).__init__()
self.logits = nn.Linear(hidden_size, action_dim)
def forward(self, x):
logits = self.logits(x)
return Categorical(logits=logits)
class Continuous(nn.Module):
def __init__(self, hidden_size, action_dim):
super(Continuous, self).__init__()
self.mean = nn.Linear(hidden_size, action_dim)
self.logstd = nn.Parameter(torch.zeros(action_dim))
def forward(self, x):
mean = self.mean(x)
std = self.logstd.exp().expand_as(mean)
return Normal(mean, std)
class Deterministic(nn.Module):
def __init__(self, hidden_size, output_dim):
super(Deterministic, self).__init__()
self.x = nn.Linear(hidden_size, output_dim)
def forward(self, x):
return self.x(x)
```
#### File: pbrl/policy/wrapper.py
```python
import numpy as np
class TanhWrapper:
def __init__(self, low, high):
self.low = low
self.high = high
def __call__(self, x):
return 0.5 * (self.high - self.low) * np.tanh(x) + 0.5 * (self.low + self.high)
class ClipWrapper:
def __init__(self, low, high):
self.low = low
self.high = high
def __call__(self, x):
return 0.5 * (self.high - self.low) * np.clip(x, -1.0, 1.0) + 0.5 * (self.low + self.high)
``` |
{
"source": "jjccero/rl",
"score": 2
} |
#### File: algorithms/td3/buffer.py
```python
import numpy as np
from gym.spaces import Space
class ReplayBuffer:
def __init__(
self,
buffer_size: int,
observation_space: Space,
action_space: Space
):
self.buffer_size = buffer_size
self.ptr = 0
self.len = 0
self.observations = np.zeros((buffer_size, *observation_space.shape), dtype=float)
self.actions = np.zeros((buffer_size, *action_space.shape), dtype=float)
self.observations_next = np.zeros((buffer_size, *observation_space.shape), dtype=float)
self.rewards = np.zeros(buffer_size)
self.dones = np.zeros(buffer_size, dtype=float)
def append(
self,
observations: np.ndarray,
actions: np.ndarray,
observations_next: np.ndarray,
rewards: np.ndarray,
dones: np.ndarray
):
env_num = observations.shape[0]
for i in range(env_num):
index = (self.ptr + i) % self.buffer_size
self.observations[index] = observations[i]
self.actions[index] = actions[i]
self.observations_next[index] = observations_next[i]
self.rewards[index] = rewards[i]
self.dones[index] = dones[i]
self.ptr = (self.ptr + env_num) % self.buffer_size
self.len = min(self.len + env_num, self.buffer_size)
def sample(self, batch_size: int):
index = np.random.randint(self.len, size=batch_size)
return (
self.observations[index],
self.actions[index],
self.observations_next[index],
self.rewards[index],
self.dones[index]
)
def clear(self):
self.ptr = 0
self.len = 0
```
#### File: pbrl/competitive/multi.py
```python
import numpy as np
from pbrl.env.dummy import DummyVecEnv
def transpose(xs):
return tuple(np.asarray(x) for x in zip(*xs))
def reset_after_done(env, action):
obs, reward, done, info = env.step(action)
if True in done:
obs = env.reset()
return obs, reward, done, info
class MultiDummyEnv(DummyVecEnv):
def reset(self):
observations = transpose([env.reset() for env in self.envs])
return observations
def step(self, actions):
actions = transpose(actions)
results = [reset_after_done(env, action) for env, action in zip(self.envs, actions)]
observations, rewards, dones, infos = zip(*results)
return list(map(transpose, (observations, rewards, dones, infos)))
```
#### File: pbrl/competitive/runner.py
```python
import time
from typing import List
import numpy as np
from pbrl.algorithms.ppo.policy import Policy
class MultiPolicyRunner:
def __init__(
self,
env,
policy_num,
episode_num,
render=None
):
self.env = env
self.env_num = env.env_num
self.policy_num = policy_num
self.episode_num = episode_num
self.observations = None
self.states_actor = None
self.episode_rewards = np.zeros((self.policy_num, self.env_num))
self.render = render
def reset(self):
self.observations = self.env.reset()
self.states_actor = tuple(None for _ in range(self.policy_num))
self.episode_rewards[:, :] = 0.0
if self.render is not None:
self.env.render()
time.sleep(self.render)
def run(self, policies: List[Policy]):
timestep = 0
episode = 0
episode_rewards = tuple([] for _ in range(self.policy_num))
episode_infos = tuple([] for _ in range(self.policy_num))
while True:
observations = tuple(map(Policy.normalize_observations, policies, self.observations))
actions, log_probs, self.states_actor = zip(
*map(Policy.step, policies, observations, self.states_actor)
)
actions_ = tuple(map(Policy.wrap_actions, policies, actions))
self.observations, rewards, dones, infos = self.env.step(actions_)
timestep += self.env_num
self.episode_rewards += rewards
if self.render is not None:
self.env.render()
time.sleep(self.render)
for i in range(self.env_num):
if dones[0][i]:
episode += 1
for index in range(self.policy_num):
states_actor = self.states_actor[index]
policy = policies[index]
if policy.rnn:
if isinstance(states_actor, tuple):
# lstm
for states_ in states_actor:
states_[:, i, :] = 0.
else:
# gru
states_actor[:, i, :] = 0.
episode_rewards[index].append(self.episode_rewards[index, i])
episode_infos[index].append(infos[index][i])
self.episode_rewards[index, i] = 0.0
if episode > self.episode_num:
break
return dict(
episode=episode,
timestep=timestep,
reward=episode_rewards,
info=episode_infos
)
```
#### File: pbrl/policy/policy.py
```python
from typing import Callable, Optional, Tuple, Any, List, Type
import numpy as np
import torch
from gym.spaces import Box, Discrete, Space
from pbrl.common.rms import RunningMeanStd
from pbrl.policy.wrapper import TanhWrapper, ClipWrapper
def get_action_wrapper(action_space, clip_fn: str) -> Optional[Callable[[np.ndarray], np.ndarray]]:
action_wrapper = None
if isinstance(action_space, Box):
low = action_space.low
high = action_space.high
if clip_fn == 'tanh':
return TanhWrapper(low, high)
elif clip_fn == 'clip':
return ClipWrapper(low, high)
else:
raise NotImplementedError
return action_wrapper
class BasePolicy:
def __init__(
self,
observation_space: Space,
action_space: Space,
hidden_sizes: List[int],
activation: Type[torch.nn.Module],
rnn: Optional[str],
clip_fn: str,
obs_norm: bool,
reward_norm: bool,
gamma: float,
obs_clip: float,
reward_clip: float,
device: torch.device
):
self.observation_space = observation_space
self.action_space = action_space
self.hidden_sizes = hidden_sizes
self.activation = activation
if rnn is not None:
rnn = rnn.lower()
assert rnn in ('lstm', 'gru')
self.rnn = rnn
self.device = device
self.obs_norm = obs_norm
self.rms_obs = RunningMeanStd(
np.zeros(observation_space.shape, dtype=np.float64),
np.ones(observation_space.shape, dtype=np.float64)
) if self.obs_norm else None
self.gamma = gamma
self.obs_clip = obs_clip
self.reward_norm = reward_norm
self.rms_reward = RunningMeanStd(0.0, 1.0) if self.reward_norm else None
self.reward_clip = reward_clip
self.action_wrapper = get_action_wrapper(action_space, clip_fn)
self.actor: Optional[torch.nn.Module] = None
self.critic: Optional[torch.nn.Module] = None
def step(
self,
observations: np.ndarray,
states_actor
):
raise NotImplementedError
def act(
self,
observations: np.ndarray,
states_actor
) -> Tuple[np.ndarray, Any]:
raise NotImplementedError
@staticmethod
def t2n(t: torch.Tensor) -> np.ndarray:
return t.detach().cpu().numpy()
def n2t(self, n: np.ndarray) -> torch.Tensor:
if n.dtype == np.float64:
n = n.astype(np.float32)
return torch.from_numpy(n).to(self.device)
def normalize_observations(self, observations: np.ndarray, update=False):
if self.obs_norm:
if update:
self.rms_obs.update(observations)
observations = (observations - self.rms_obs.mean) / np.sqrt(self.rms_obs.var + self.rms_obs.eps)
observations = np.clip(observations, -self.obs_clip, self.obs_clip)
return observations
def normalize_rewards(
self,
rewards: np.ndarray,
update=False,
returns: np.ndarray = None,
dones: np.ndarray = None
):
if self.reward_norm:
if update:
returns[:] = returns * self.gamma + rewards
self.rms_reward.update(returns)
returns[dones] = 0.0
rewards = rewards / np.sqrt(self.rms_reward.var + self.rms_reward.eps)
rewards = np.clip(rewards, -self.reward_clip, self.reward_clip)
return rewards
def wrap_actions(self, actions: np.ndarray):
if self.action_wrapper:
return self.action_wrapper(actions)
else:
return actions
def reset_state(self, states_actor, i):
if self.rnn == 'lstm':
for states_ in states_actor:
states_[:, i, :] = 0.
elif self.rnn == 'gru':
states_actor[:, i, :] = 0.
def random_action(
self,
env_num: int
):
if isinstance(self.action_space, Box):
return np.random.uniform(-1.0, 1.0, size=(env_num, *self.action_space.shape))
elif isinstance(self.action_space, Discrete):
return np.random.randint(self.action_space.n, size=env_num)
else:
raise NotImplementedError
``` |
{
"source": "jjcc/OpenData",
"score": 2
} |
#### File: opendatatools/amac/amac_interface.py
```python
import datetime
from .amac_agent import AMACAgent
amac_agent = AMACAgent()
def set_proxies(proxies):
amac_agent.set_proxies(proxies)
def get_company_list():
return amac_agent.get_company_list()
def get_company_detail(company_id):
return amac_agent.get_company_detail(company_id)
def get_fund_list():
return amac_agent.get_fund_list()
def get_fund_detail(fund_id):
return amac_agent.get_fund_detail(fund_id)
```
#### File: opendatatools/aqi2/aqi2_interface.py
```python
from .aqi2_agent import AQIStudyAgent
aqistudy_agent = AQIStudyAgent()
def set_proxies(proxies):
aqistudy_agent.set_proxies(proxies)
def get_city_list():
return aqistudy_agent.get_city_list()
def get_hist_daily_aqi(city, begindate, enddate):
return aqistudy_agent.get_hist_daily_aqi(city, begindate, enddate)
def get_daily_hour_aqi(city, date):
return aqistudy_agent.get_daily_hour_aqi(city, date)
def get_aqi_map(type, timepoint):
return aqistudy_agent.get_api_map(type, timepoint)
```
#### File: opendatatools/hedgefund/simu_agent.py
```python
from opendatatools.common import RestAgent, md5
from progressbar import ProgressBar
import json
import pandas as pd
import io
import hashlib
import time
index_map = {
'Barclay_Hedge_Fund_Index' : 'ghsndx',
'Convertible_Arbitrage_Index' : 'ghsca',
'Distressed_Securities_Index' : 'ghsds',
'Emerging_Markets_Index' : 'ghsem',
'Equity_Long_Bias_Index' : 'ghselb',
'Equity_Long_Short_Index' : 'ghsels',
'Equity_Market_Neutral_Index' : 'ghsemn',
'European_Equities_Index' : 'ghsee',
'Event_Driven_Index' : 'ghsed',
'Fixed_Income_Arbitrage_Index' : 'ghsfia',
'Fund_of_Funds_Index' : 'ghsfof',
'Global_Macro_Index' : 'ghsmc',
'Healthcare_&_Biotechnology_Index': 'ghsbio',
'Merger_Arbitrage_Index' : 'ghsma',
'Multi_Strategy_Index' : 'ghsms',
'Pacific_Rim_Equities_Index' : 'ghspre',
'Technology_Index' : 'ghstec',
}
class SimuAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
self.user_info = None
self.df_fundlist = None
self.cookies = None
def login(self, username, password):
url = 'https://passport.simuwang.com/index.php?m=Passport&c=auth&a=login&type=login&name=%s&pass=%s&reme=1&rn=1' % (username, password)
self.add_headers({'Referer': 'https://dc.simuwang.com/'})
response = self.do_request(url)
if response is None:
return None, '登录失败'
jsonobj = json.loads(response)
suc = jsonobj['suc']
msg = jsonobj['msg']
if suc != 1:
return None, msg
self.cookies = self.get_cookies()
self.user_info = jsonobj['data']
return self.user_info, msg
def prepare_cookies(self, url):
response = self.do_request(url, None)
if response is not None:
cookies = self.get_cookies()
return cookies
else:
return None
def _get_rz_token(self, time):
mk = time * 158995555893
mtoken = md5(md5(str(mk))) + '.' + str(time)
return mtoken
def _get_fund_list_page(self, page_no):
url = 'https://dc.simuwang.com/ranking/get?page=%s&condition=fund_type:1,6,4,3,8,2;ret:9;rating_year:1;istiered:0;company_type:1;sort_name:profit_col2;sort_asc:desc;keyword:' % page_no
response = self.do_request(url)
if response is None:
return None, '获取数据失败', None
jsonobj = json.loads(response)
code = jsonobj['code']
msg = jsonobj['msg']
if code != 1000:
return None, msg, None
df = pd.DataFrame(jsonobj['data'])
pageinfo = jsonobj['pager']
return df, '', pageinfo
def load_data(self):
page_no = 1
df_list = []
df, msg, pageinfo = self._get_fund_list_page(page_no)
if df is None:
return None, msg
df_list.append(df)
page_count = pageinfo['pagecount']
process_bar = ProgressBar().start(max_value=page_count)
page_no = page_no + 1
while page_no <= page_count:
df, msg, pageinfo = self._get_fund_list_page(page_no)
if df is None:
return None, msg
df_list.append(df)
process_bar.update(page_no)
page_no = page_no + 1
self.df_fundlist = pd.concat(df_list)
return self.df_fundlist, ''
def get_fund_list(self):
if self.df_fundlist is None:
return None, '请先加载数据 load_data'
return self.df_fundlist, ''
def _get_sign(self, url, params):
str = url
for k,v in params.items():
str = str + k + params[k]
sha1 = hashlib.sha1()
sha1.update(str.encode('utf8'))
sign = sha1.hexdigest()
return sign
def _get_token(self, fund_id):
sign = self._get_sign('https://dc.simuwang.com/Api/getToken', {'id' : fund_id})
url = 'https://dc.simuwang.com/Api/getToken?id=%s&sign=%s' % (fund_id, sign)
self.add_headers({'Referer': 'https://dc.simuwang.com/'})
response = self.do_request(url)
if response is None:
return None, '获取数据失败'
jsonobj = json.loads(response)
code = jsonobj['code']
msg = jsonobj['message']
if code != 1000 :
return code, msg
self.cookies.update(self.get_cookies())
salt = jsonobj['data']
muid = self.user_info['userid']
#str = 'id%smuid%spage%s%s' % (fund_id, muid, page_no, salt)
str = '%s%s' % (fund_id, salt)
sha1 = hashlib.sha1()
sha1.update(str.encode('utf8'))
token = sha1.hexdigest()
return token, ''
def _get_fund_nav_page(self, fund_id, page_no):
muid = self.user_info['userid']
token, msg = self._get_token(fund_id)
if token is None:
return None, '获取token失败: ' + msg, ''
url = 'https://dc.simuwang.com/fund/getNavList.html'
self.add_headers({'Referer': 'https://dc.simuwang.com/product/%s.html' % fund_id})
data = {
'id' : fund_id,
'muid' : muid,
'page' : str(page_no),
'token': token,
}
response = self.do_request(url, param=data, cookies=self.cookies, encoding="utf8")
if response is None:
return None, '获取数据失败', ''
jsonobj = json.loads(response)
code = jsonobj['code']
msg = jsonobj['msg']
if code != 1000 :
return code, msg, ''
df = pd.DataFrame(jsonobj['data'])
pageinfo = jsonobj['pager']
return df, '', pageinfo
def _bit_encrypt(self, str, key):
cryText = ''
keyLen = len(key)
strLen = len(str)
for i in range(strLen):
k = i % keyLen
cryText = cryText + chr(ord(str[i]) - k)
return cryText
def _bit_encrypt2(self, str, key):
cryText = ''
keyLen = len(key)
strLen = len(str)
for i in range(strLen):
k = i % keyLen
cryText = cryText + chr(ord(str[i]) ^ ord(key[k]))
return cryText
def _decrypt_data(self, str, func, key):
# return self._bit_encrypt(str, 'cd0a8bee4c6b2f8a91ad5538dde2eb34')
# return self._bit_encrypt(str, '937ab03370497f2b4e8d0599ad25c44c')
# return self._bit_encrypt(str, '083975ce19392492bbccff21a52f1ace')
return func(str, key)
def _get_decrypt_info(self, fund_id):
url = 'https://dc.simuwang.com/product/%s.html' % fund_id
response = self.do_request(url, param=None, cookies=self.cookies, encoding="utf8")
if response is None:
return None, '获取数据失败', ''
if "String.fromCharCode(str.charCodeAt(i) - k)" in response:
decrypt_func = self._bit_encrypt
else:
decrypt_func = self._bit_encrypt2
if response.find("return xOrEncrypt(str, ")> 0:
tag = "return xOrEncrypt(str, "
else:
tag = "return bitEncrypt(str, "
pos = response.index(tag) + len(tag) + 1
key = response[pos:pos+32]
return decrypt_func, key
def get_fund_nav(self, fund_id, time_elapse = 0):
if self.user_info is None:
return None, '请先登录'
page_no = 1
df_list = []
df, msg, pageinfo = self._get_fund_nav_page(fund_id, page_no)
if df is None:
return None, msg
df_list.append(df)
page_count = pageinfo['pagecount']
page_no = page_no + 1
while page_no <= page_count:
try_times = 1
while try_times <= 3:
df, msg, pageinfo = self._get_fund_nav_page(fund_id, page_no)
if df is None:
if try_times > 3:
return None, msg
else:
try_times = try_times + 1
continue
else:
df_list.append(df)
break
page_no = page_no + 1
if time_elapse > 0:
time.sleep(time_elapse)
df_nav = pd.concat(df_list)
df_nav.drop('c', axis=1, inplace=True)
df_nav.rename(columns={'d': 'date', 'n': 'nav', 'cn' : 'accu_nav', 'cnw' : 'accu_nav_w'}, inplace=True)
# 这个网站搞了太多的小坑
func, key = self._get_decrypt_info(fund_id)
df_nav['nav'] = df_nav['nav'].apply(lambda x : self._decrypt_data(x, func, key))
df_nav['accu_nav'] = df_nav['accu_nav'].apply(lambda x : self._decrypt_data(x, func, key))
df_nav['accu_nav_w'] = df_nav['accu_nav_w'].apply(lambda x : self._decrypt_data(x, func, key))
#df_nav['nav'] = df_nav['nav'] - df_nav.index * 0.01 - 0.01
#df_nav['accu_nav'] = df_nav['accu_nav'].apply(lambda x: float(x) - 0.01)
#df_nav['accu_nav_w'] = df_nav['accu_nav_w'].apply(lambda x: float(x) - 0.02)
return df_nav, ''
class BarclayAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
self.add_headers({'Referer': 'https://www.barclayhedge.com/research/indices/ghs/Equity_Long_Short_Index.html'})
self.add_headers({'Content - Type': 'application / x - www - form - urlencoded'})
def get_data(self, index):
prog_cod = index_map[index]
url = "https://www.barclayhedge.com/cgi-bin/barclay_stats/ghsndx.cgi"
param = {
'dump': 'excel',
'prog_cod': prog_cod,
}
response = self.do_request(url, param=param, method='POST', type='binary')
if response is not None:
excel = pd.ExcelFile(io.BytesIO(response))
df = excel.parse('Sheet1').dropna(how='all').copy().reset_index().drop(0)
df.columns = ['year', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec', 'YTD']
df = df.set_index('year')
return df, ''
return None, "获取数据失败"
``` |
{
"source": "jjccrraa/lottoProject",
"score": 3
} |
#### File: jjccrraa/lottoProject/bot.py
```python
import json
import requests
FACEBOOK_GRAPH_URL = 'https://graph.facebook.com/v2.6/me/'
class Bot(object):
def __init__(self, access_token, api_url=FACEBOOK_GRAPH_URL):
self.access_token = access_token
self.api_url = api_url
def send_text_message(self, psid, message, messaging_type="RESPONSE"):
headers = {
'Content-Type': 'application/json'
}
data = {
'messaging_type': messaging_type,
'recipient': {'id': psid},
'message': {'text': message}
}
params = {'access_token': self.access_token}
self.api_url = self.api_url + 'messages'
response = requests.post(self.api_url, headers=headers, params=params, data=json.dumps(data))
print(response.content)
bot = Bot('<KEY>')
#bot.send_text_message(2045925488848914, 'Test 1')
```
#### File: site-packages/soupsieve/__init__.py
```python
from __future__ import unicode_literals
from .__meta__ import __version__, __version_info__ # noqa: F401
from . import css_parser as cp
from . import css_match as cm
from . import css_types as ct
from .util import DEBUG, _QUIRKS # noqa: F401
__all__ = (
'SoupSieve', 'compile', 'purge', 'DEBUG', "_QUIRKS"
'comments', 'icomments', 'closest', 'select', 'select_one',
'iselect', 'match', 'filter'
)
SoupSieve = cm.SoupSieve
def compile(pattern, namespaces=None, flags=0): # noqa: A001
"""Compile CSS pattern."""
if namespaces is None:
namespaces = ct.Namespaces()
if not isinstance(namespaces, ct.Namespaces):
namespaces = ct.Namespaces(**(namespaces))
if isinstance(pattern, SoupSieve):
if flags != pattern.flags:
raise ValueError("Cannot change flags of a pattern")
elif namespaces != pattern.namespaces:
raise ValueError("Cannot change namespaces of a pattern")
return pattern
return cp._cached_css_compile(pattern, namespaces, flags)
def purge():
"""Purge cached patterns."""
cp._purge_cache()
def closest(select, tag, namespaces=None, flags=0):
"""Match closest ancestor."""
return compile(select, namespaces, flags).closest(tag)
def match(select, tag, namespaces=None, flags=0):
"""Match node."""
return compile(select, namespaces, flags).match(tag)
def filter(select, iterable, namespaces=None, flags=0): # noqa: A001
"""Filter list of nodes."""
return compile(select, namespaces, flags).filter(iterable)
def comments(tag, limit=0, flags=0):
"""Get comments only."""
return list(icomments(tag, limit, flags))
def icomments(tag, limit=0, flags=0):
"""Iterate comments only."""
for comment in cm.CommentsMatch(tag).get_comments(limit):
yield comment
def select_one(select, tag, namespaces=None, flags=0):
"""Select a single tag."""
return compile(select, namespaces, flags).select_one(tag)
def select(select, tag, namespaces=None, limit=0, flags=0):
"""Select the specified tags."""
return compile(select, namespaces, flags).select(tag, limit)
def iselect(select, tag, namespaces=None, limit=0, flags=0):
"""Iterate the specified tags."""
for el in compile(select, namespaces, flags).iselect(tag, limit):
yield el
``` |
{
"source": "jjchacona/spark-snowflake",
"score": 2
} |
#### File: test/python/unittest.py
```python
from pyspark import SparkConf, SparkContext
from pyspark.sql import SQLContext
from pyspark.sql.types import *
from pyspark import SparkConf, SparkContext
from pyspark.sql.functions import udf
import json
sc = SparkContext("local", "Simple App")
spark = SQLContext(sc)
spark_conf = SparkConf().setMaster('local').setAppName('mruifirstsparkapp')
# Below options are only used by external stage
# sc._jsc.hadoopConfiguration().set("fs.s3n.awsAccessKeyId", "<YOUR_AWS_KEY>")
# sc._jsc.hadoopConfiguration().set("fs.s3n.awsSecretAccessKey", "<YOUR_AWS_SECRET>")
# Utility function to get configration info from snowflake.travis.json
def getConfig():
# with open('/Users/mrui/spark/spark-snowflake/snowflake.travis.json') as f:
with open('../../../snowflake.travis.json') as f:
snowflake_travis_json = json.load(f)
sfOptions = {}
sfOptions["sfCompress"] = snowflake_travis_json["common"]["sfCompress"]
sfOptions["sfSSL"] = snowflake_travis_json["common"]["sfSSL"]
sfOptions["dbtable"] = snowflake_travis_json["common"]["dbtable"]
sfOptions["runOption"] = snowflake_travis_json["common"]["runOption"]
sfOptions["sfTimeZone"] = snowflake_travis_json["common"]["sfTimeZone"]
sfOptions["sfDatabase"] = snowflake_travis_json["common"]["sfDatabase"]
sfOptions["sfSchema"] = snowflake_travis_json["common"]["sfSchema"]
sfOptions["sfWarehouse"] = snowflake_travis_json["common"]["sfWarehouse"]
sfOptions["sfUser"] = snowflake_travis_json["common"]["sfUser"]
sfOptions["pem_private_key"] = snowflake_travis_json["common"]["pem_private_key"]
sfOptions["useProxy"] = snowflake_travis_json["common"]["useProxy"]
# Use COPY UNLOAD or SELECT
sfOptions["use_copy_unload"] = "false"
for item in snowflake_travis_json["account_info"] :
if item["name"] == 'aws' :
sfOptions["sfURL"] = item["config"]["sfURL"]
data = item["config"]["sfURL"].split(".")
sfOptions["sfAccount"] = data[0]
return sfOptions
# UDF for testing
def squared(s):
return s * s
sfOptions = getConfig()
SNOWFLAKE_SOURCE_NAME = "net.snowflake.spark.snowflake"
spark.udf.register("squaredWithPython", squared)
tmpdf = spark.read.format(SNOWFLAKE_SOURCE_NAME) \
.options(**sfOptions) \
.option("dbtable", "TEST_TABLE_LARGE_RESULT_2518893414841183039") \
.load()
tmpdf.createOrReplaceTempView("mrui_large_table")
df = spark.sql("select int_c, c_string, squaredWithPython(int_c) from mrui_large_table")
df.show(100)
``` |
{
"source": "jjcherry56/bsg_fakeram",
"score": 3
} |
#### File: scripts/utils/generate_lef.py
```python
import os
import sys
import math
################################################################################
# GENERATE LEF VIEW
#
# Generate a .lef file based on the given SRAM.
################################################################################
def generate_lef( mem ):
# File pointer
fid = open(os.sep.join([mem.results_dir, mem.name + '.lef']), 'w')
# Memory parameters
name = mem.name
depth = mem.depth
bits = mem.width_in_bits
w = mem.width_um
h = mem.height_um
num_rwport = mem.rw_ports
addr_width = math.ceil(math.log2(mem.depth))
# Process parameters
min_pin_width = mem.process.pinWidth_um
min_pin_pitch = mem.process.pinPitch_um
metalPrefix = mem.process.metalPrefix
flip = mem.process.flipPins.lower() == 'true'
# Offset from bottom edge to first pin
x_offset = 10 * min_pin_pitch ;# arbitrary offset (looks decent)
y_offset = 10 * min_pin_pitch ;# arbitrary offset (looks decent)
#########################################
# Calculate the pin spacing (pitch)
#########################################
number_of_pins = 3*bits + addr_width + 3
number_of_tracks_available = math.floor((h - 2*y_offset) / min_pin_pitch)
number_of_spare_tracks = number_of_tracks_available - number_of_pins
if number_of_spare_tracks < 0:
print("Error: not enough tracks (num pins: %d, available tracks: %d)." % (number_of_pins, number_of_tracks_available))
sys.exit(1)
track_count = 1
while number_of_spare_tracks > 0:
track_count += 1
number_of_spare_tracks = number_of_tracks_available - number_of_pins*track_count
track_count -= 1
pin_pitch = min_pin_pitch * track_count
group_pitch = math.floor((number_of_tracks_available - number_of_pins*track_count) / 4)*mem.process.pinPitch_um
#########################################
# LEF HEADER
#########################################
fid.write('VERSION 5.7 ;\n')
fid.write('BUSBITCHARS "[]" ;\n')
fid.write('MACRO %s\n' % (name))
fid.write(' FOREIGN %s 0 0 ;\n' % (name))
fid.write(' SYMMETRY X Y R90 ;\n')
fid.write(' SIZE %.3f BY %.3f ;\n' % (w,h))
fid.write(' CLASS BLOCK ;\n')
########################################
# LEF SIGNAL PINS
########################################
y_step = y_offset
for i in range(int(bits)) :
y_step = lef_add_pin( fid, mem, 'w_mask_in[%d]'%i, True, y_step, pin_pitch )
y_step += group_pitch-pin_pitch
for i in range(int(bits)) :
y_step = lef_add_pin( fid, mem, 'rd_out[%d]'%i, False, y_step, pin_pitch )
y_step += group_pitch-pin_pitch
for i in range(int(bits)) :
y_step = lef_add_pin( fid, mem, 'wd_in[%d]'%i, True, y_step, pin_pitch )
y_step += group_pitch-pin_pitch
for i in range(int(addr_width)) :
y_step = lef_add_pin( fid, mem, 'addr_in[%d]'%i, True, y_step, pin_pitch )
y_step += group_pitch-pin_pitch
y_step = lef_add_pin( fid, mem, 'we_in', True, y_step, pin_pitch )
y_step = lef_add_pin( fid, mem, 'ce_in', True, y_step, pin_pitch )
y_step = lef_add_pin( fid, mem, 'clk', True, y_step, pin_pitch )
########################################
# Create VDD/VSS Strapes
########################################
supply_pin_width = min_pin_width*4
supply_pin_half_width = supply_pin_width/2
supply_pin_pitch = min_pin_pitch*8
supply_pin_layer = '%s4' % metalPrefix
# Vertical straps
if flip:
x_step = x_offset
fid.write(' PIN VSS\n')
fid.write(' DIRECTION INOUT ;\n')
fid.write(' USE GROUND ;\n')
fid.write(' PORT\n')
fid.write(' LAYER %s ;\n' % supply_pin_layer)
while x_step <= w - x_offset:
fid.write(' RECT %.3f %.3f %.3f %.3f ;\n' % (x_step-supply_pin_half_width, y_offset, x_step+supply_pin_half_width, h-y_offset))
x_step += supply_pin_pitch*2
fid.write(' END\n')
fid.write(' END VSS\n')
x_step = x_offset + supply_pin_pitch
fid.write(' PIN VDD\n')
fid.write(' DIRECTION INOUT ;\n')
fid.write(' USE POWER ;\n')
fid.write(' PORT\n')
fid.write(' LAYER %s ;\n' % supply_pin_layer)
while x_step <= w - x_offset:
fid.write(' RECT %.3f %.3f %.3f %.3f ;\n' % (x_step-supply_pin_half_width, y_offset, x_step+supply_pin_half_width, h-y_offset))
x_step += supply_pin_pitch*2
fid.write(' END\n')
fid.write(' END VDD\n')
# Horizontal straps
else:
y_step = y_offset
fid.write(' PIN VSS\n')
fid.write(' DIRECTION INOUT ;\n')
fid.write(' USE GROUND ;\n')
fid.write(' PORT\n')
fid.write(' LAYER %s ;\n' % supply_pin_layer)
while y_step <= h - y_offset:
fid.write(' RECT %.3f %.3f %.3f %.3f ;\n' % (x_offset, y_step-supply_pin_half_width, w-x_offset, y_step+supply_pin_half_width))
y_step += supply_pin_pitch*2
fid.write(' END\n')
fid.write(' END VSS\n')
y_step = y_offset + supply_pin_pitch
fid.write(' PIN VDD\n')
fid.write(' DIRECTION INOUT ;\n')
fid.write(' USE POWER ;\n')
fid.write(' PORT\n')
fid.write(' LAYER %s ;\n' % supply_pin_layer)
while y_step <= h - y_offset:
fid.write(' RECT %.3f %.3f %.3f %.3f ;\n' % (x_offset, y_step-supply_pin_half_width, w-x_offset, y_step+supply_pin_half_width))
y_step += supply_pin_pitch*2
fid.write(' END\n')
fid.write(' END VDD\n')
########################################
# Create obstructions
########################################
fid.write(' OBS\n')
################
# Layer 1
################
# No pins (full rect)
fid.write(' LAYER %s1 ;\n' % metalPrefix)
fid.write(' RECT 0 0 %.3f %.3f ;\n' % (w,h))
################
# Layer 2
################
# No pins (full rect)
fid.write(' LAYER %s2 ;\n' % metalPrefix)
fid.write(' RECT 0 0 %.3f %.3f ;\n' % (w,h))
################
# Layer 3
################
fid.write(' LAYER %s3 ;\n' % metalPrefix)
# Flipped therefore pins on M3
if flip:
# Rect from top to bottom, just right of pins to right edge
fid.write(' RECT %.3f 0 %.3f %.3f ;\n' % (min_pin_width,w,h))
# Walk through same calculation as pins and draw from bottom of the
# current pin to the top of last pin (start with bottom edge)
prev_y = 0
y_step = y_offset
for i in range(int(bits)) :
fid.write(' RECT 0 %.3f %.3f %.3f ;\n' % (prev_y,min_pin_width,y_step-min_pin_width/2))
prev_y = y_step+min_pin_width/2
y_step += pin_pitch
y_step += group_pitch-pin_pitch
for i in range(int(bits)) :
fid.write(' RECT 0 %.3f %.3f %.3f ;\n' % (prev_y,min_pin_width,y_step-min_pin_width/2))
prev_y = y_step+min_pin_width/2
y_step += pin_pitch
y_step += group_pitch-pin_pitch
for i in range(int(bits)) :
fid.write(' RECT 0 %.3f %.3f %.3f ;\n' % (prev_y,min_pin_width,y_step-min_pin_width/2))
prev_y = y_step+min_pin_width/2
y_step += pin_pitch
y_step += group_pitch-pin_pitch
for i in range(int(addr_width)) :
fid.write(' RECT 0 %.3f %.3f %.3f ;\n' % (prev_y,min_pin_width,y_step-min_pin_width/2))
prev_y = y_step+min_pin_width/2
y_step += pin_pitch
y_step += group_pitch-pin_pitch
for i in range(3):
fid.write(' RECT 0 %.3f %.3f %.3f ;\n' % (prev_y,min_pin_width,y_step-min_pin_width/2))
prev_y = y_step+min_pin_width/2
y_step += pin_pitch
# Final shapre from top of last pin to top edge
fid.write(' RECT 0 %.3f %.3f %.3f ;\n' % (prev_y,min_pin_width,h))
# Not flipped therefore no pins on M3 (Full rect)
else:
fid.write(' RECT 0 0 %.3f %.3f ;\n' % (w,h))
################
# Layer 4
################
fid.write(' LAYER %s4 ;\n' % metalPrefix)
# Flipped therefore only vertical pg straps
if flip:
# Block under and above the vertical power straps (full width)
fid.write(' RECT 0 0 %.3f %.3f ;\n' % (w, y_offset))
fid.write(' RECT 0 %.3f %.3f %.3f ;\n' % (h-y_offset,w,h))
# Walk through the same calculations to create pg straps and create obs
# from the left of the current strap to the right of the previous strap
# (start with the left edge)
prev_x = 0
x_step = x_offset
while x_step <= w - x_offset:
fid.write(' RECT %.3f %.3f %.3f %.3f ;\n' % (prev_x,y_offset,x_step-supply_pin_half_width,h-y_offset))
prev_x = x_step+supply_pin_half_width
x_step += supply_pin_pitch
# Create a block from the right of the last strap to the right edge
fid.write(' RECT %.3f %.3f %.3f %.3f ;\n' % (prev_x,y_offset,w,h-y_offset))
# Not flipped therefore pins on M4 and horizontal pg straps
else:
# Block from right of pins to left of straps and a block to the right
# of the straps (full height)
fid.write(' RECT %.3f 0 %.3f %.3f ;\n' % (min_pin_width, x_offset, h))
fid.write(' RECT %.3f 0 %.3f %.3f ;\n' % (w-x_offset, w, h))
# Walk through the same calculations to create pg straps and create obs
# from the bottom of the current strap to the top of the previous strap
# (start with the bottom edge)
prev_y = 0
y_step = y_offset
while y_step <= h - y_offset:
fid.write(' RECT %.3f %.3f %.3f %.3f ;\n' % (x_offset, prev_y, w-x_offset, y_step-supply_pin_half_width))
prev_y = y_step+supply_pin_half_width
y_step += supply_pin_pitch
# Create a block from the top of the last strap to the top edge
fid.write(' RECT %.3f %.3f %.3f %.3f ;\n' % (x_offset, prev_y, w-x_offset, h))
# Walk through same calculation as pins and draw from bottom of the
# current pin to the top of last pin (start with bottom edge)
prev_y = 0
y_step = y_offset
for i in range(int(bits)) :
fid.write(' RECT 0 %.3f %.3f %.3f ;\n' % (prev_y,min_pin_width,y_step-min_pin_width/2))
prev_y = y_step+min_pin_width/2
y_step += pin_pitch
y_step += group_pitch-pin_pitch
for i in range(int(bits)) :
fid.write(' RECT 0 %.3f %.3f %.3f ;\n' % (prev_y,min_pin_width,y_step-min_pin_width/2))
prev_y = y_step+min_pin_width/2
y_step += pin_pitch
y_step += group_pitch-pin_pitch
for i in range(int(bits)) :
fid.write(' RECT 0 %.3f %.3f %.3f ;\n' % (prev_y,min_pin_width,y_step-min_pin_width/2))
prev_y = y_step+min_pin_width/2
y_step += pin_pitch
y_step += group_pitch-pin_pitch
for i in range(int(addr_width)) :
fid.write(' RECT 0 %.3f %.3f %.3f ;\n' % (prev_y,min_pin_width,y_step-min_pin_width/2))
prev_y = y_step+min_pin_width/2
y_step += pin_pitch
y_step += group_pitch-pin_pitch
for i in range(3):
fid.write(' RECT 0 %.3f %.3f %.3f ;\n' % (prev_y,min_pin_width,y_step-min_pin_width/2))
prev_y = y_step+min_pin_width/2
y_step += pin_pitch
# Final shapre from top of last pin to top edge
fid.write(' RECT 0 %.3f %.3f %.3f ;\n' % (prev_y,min_pin_width,h))
# Overlap layer (full rect)
fid.write(' LAYER OVERLAP ;\n')
fid.write(' RECT 0 0 %.3f %.3f ;\n' % (w,h))
# Finish up LEF file
fid.write(' END\n')
fid.write('END %s\n' % name)
fid.write('\n')
fid.write('END LIBRARY\n')
fid.close()
#
# Helper function that adds a signal pin
#
def lef_add_pin( fid, mem, pin_name, is_input, y, pitch ):
layer = mem.process.metalPrefix + ('3' if mem.process.flipPins.lower() == 'true' else '4')
pw = mem.process.pinWidth_um
hpw = (mem.process.pinWidth_um/2.0) ;# half pin width
fid.write(' PIN %s\n' % pin_name)
fid.write(' DIRECTION %s ;\n' % ('INPUT' if is_input else 'OUTPUT'))
fid.write(' USE SIGNAL ;\n')
fid.write(' SHAPE ABUTMENT ;\n')
fid.write(' PORT\n')
fid.write(' LAYER %s ;\n' % layer)
fid.write(' RECT %.3f %.3f %.3f %.3f ;\n' % (0, y-hpw, pw, y+hpw))
fid.write(' END\n')
fid.write(' END %s\n' % pin_name)
return y + pitch
``` |
{
"source": "jjcherry56/OpenROAD-flow-scripts",
"score": 2
} |
#### File: flow/util/genMetrics.py
```python
import os
from sys import exit
from datetime import datetime, timedelta
from collections import defaultdict
from uuid import uuid4 as uuid
from subprocess import check_output, call, STDOUT
import argparse
import json
import pandas as pd
import re
# make sure the working dir is flow/
os.chdir(os.path.join(os.path.dirname(os.path.abspath(__file__)) , '..'))
# Parse and validate arguments
# =============================================================================
def parse_args():
parser = argparse.ArgumentParser(
description='Generates metadata from OpenROAD flow')
parser.add_argument('--flowPath',
'-f',
required=False,
default='./',
help='Path to the flow directory')
parser.add_argument('--design',
'-d',
required=False,
default='all_designs',
help='Design Name for metrics')
parser.add_argument('--flowVariant',
'-v',
required=False,
default='base',
help='FLOW_VARIANT for the design')
parser.add_argument('--platform',
'-p',
required=False,
default='nangate45',
help='Design Platform')
parser.add_argument('--comment',
'-c',
required=False,
default='',
help='Additional comments to embed')
parser.add_argument('--output',
'-o',
required=False,
default='metadata.json',
help='Output file')
parser.add_argument('--hier',
'-x',
action='store_true',
help='Hierarchical JSON')
args = parser.parse_args()
if not os.path.isdir(args.flowPath):
print('[ERROR] flowPath does not exist')
print('Path: ' + args.flowPath)
exit(1)
return args
# Functions
# =============================================================================
# Main function to do specific extraction of patterns from a file
# This function will look for a regular expression 'pattern' in a 'file', and
# set the key, 'jsonTag', to the value found. The specific 'occurrence' selects
# which occurrence it uses (default -1, i.e., last). If pattern not found, it
# will print an error and set the value to N/A. If a 'defaultNotFound' is set,
# it will use that instead. If count is set to True, it will return the count
# of the pattern.
def extractTagFromFile(jsonTag, jsonFile, pattern, file, count=False,
occurrence=-1, defaultNotFound='N/A', t=str,
required=True):
if jsonTag in jsonFile:
print('[WARN] Overwriting Tag', jsonTag)
# Open file
try:
searchFilePath = os.path.join(args.flowPath, file)
with open(searchFilePath) as f:
content = f.read()
parsedMetrics = re.findall(pattern, content, re.M)
patternNotFound = (len(parsedMetrics) < abs(occurrence))
if patternNotFound and not required:
jsonFile[jsonTag] = defaultNotFound
return
if parsedMetrics:
if count:
# Return the count
jsonFile[jsonTag] = len(parsedMetrics)
else:
# Note: This gets the specified occurrence
value = parsedMetrics[occurrence]
value = value.strip()
try:
jsonFile[jsonTag] = float(value)
except BaseException:
jsonFile[jsonTag] = str(value)
else:
# Only print a warning if the defaultNotFound is not set
print('[WARN] Tag {} not found in {}.'.format(jsonTag,
searchFilePath),
'Will use {}.'.format(defaultNotFound))
jsonFile[jsonTag] = defaultNotFound
except IOError:
print('[ERROR] Failed to open file:', searchFilePath)
jsonFile[jsonTag] = 'ERR'
def extractGnuTime(prefix, jsonFile, file):
extractTagFromFile(
prefix +
'__runtime__total',
jsonFile,
'^Elapsed time: (\S+)\[h:\]min:sec.*',
file)
extractTagFromFile(
prefix +
'__cpu__total',
jsonFile,
'^Elapsed time:.*CPU time: user (\S+) .*',
file)
extractTagFromFile(
prefix +
'__mem__peak',
jsonFile,
'^Elapsed time:.*Peak memory: (\S+)KB.',
file)
#
# Extract Clock Latency, Skew numbers
# Need to extract these from native json
#
def get_skew_latency(file_name):
f = None
try:
f = open(file_name, 'r')
except IOError:
print('[WARN] Failed to open file:', file_name)
return ('ERR', 'ERR', 'ERR')
lines = f.readlines()
f.close()
latency_section = False
latency_max = latency_min = skew = 0.0
worst_latency_max = worst_latency_min = worst_skew = 0.0
for line in lines:
if len(line.split()) < 1:
continue
if line.startswith('Latency'):
latency_section = True
continue
if latency_section and len(line.split()) == 1:
latency_max = float(line.split()[0])
continue
if latency_section and len(line.split()) > 2:
latency_min = float(line.split()[0])
skew = float(line.split()[2])
if skew > worst_skew:
worst_skew = skew
worst_latency_max = latency_max
worst_latency_min = latency_min
latency_section = False
return(worst_latency_max, worst_latency_min, worst_skew)
#
# Extract clock info from sdc file
#
def read_sdc(file_name):
clkList = []
sdcFile = None
try:
sdcFile = open(file_name, 'r')
except IOError:
print('[WARN] Failed to open file:', file_name)
return clkList
lines = sdcFile.readlines()
sdcFile.close()
for line in lines:
if len(line.split()) < 2:
continue
if line.split()[0] == 'create_clock':
clk_idx = line.split().index('-name')
clkName = line.split()[clk_idx + 1]
period_idx = line.split().index('-period')
period = line.split()[period_idx + 1]
clk = '%s: %s' % (clkName, period)
clkList.append(clk)
clkList.sort()
return clkList
# Main
# =============================================================================
def is_git_repo(folder=None):
cmd = ['git', 'branch']
if folder is not None:
return call(cmd, stderr=STDOUT, stdout=open(os.devnull, 'w'),
cwd=folder) == 0
else:
return call(cmd, stderr=STDOUT, stdout=open(os.devnull, 'w')) == 0
def extract_metrics(cwd, platform, design, flow_variant, output, hier_json):
baseRegEx = '^{}\n^-*\n^{}'
logPath = os.path.join(cwd, 'logs', platform, design, flow_variant)
rptPath = os.path.join(cwd, 'reports', platform, design, flow_variant)
resultPath = os.path.join(cwd, 'results', platform, design, flow_variant)
metrics_dict = defaultdict(dict)
metrics_dict['run__flow__generate_date'] = now.strftime('%Y-%m-%d %H:%M')
metrics_dict['run__flow__metrics_version'] = 'Metrics_2.1.2'
cmdOutput = check_output(['openroad', '-version'])
cmdFields = [x.decode('utf-8') for x in cmdOutput.split()]
metrics_dict['run__flow__openroad_version'] = str(cmdFields[0])
if len(cmdFields) > 1:
metrics_dict['run__flow__openroad_commit'] = str(cmdFields[1])
else:
metrics_dict['run__flow__openroad_commit'] = 'N/A'
if is_git_repo():
cmdOutput = check_output(['git', 'rev-parse', 'HEAD'])
cmdOutput = cmdOutput.decode('utf-8').strip()
else:
cmdOutput = 'not a git repo'
print('[WARN]', cmdOutput)
metrics_dict['run__flow__scripts_commit'] = cmdOutput
metrics_dict['run__flow__uuid'] = str(uuid())
metrics_dict['run__flow__design'] = design
metrics_dict['run__flow__platform'] = platform
platformDir = os.environ.get('PLATFORM_DIR')
if platformDir is None:
print('[INFO]', 'PLATFORM_DIR env variable not set')
cmdOutput = 'N/A'
elif is_git_repo(folder=platformDir):
cmdOutput = check_output(['git', 'rev-parse', 'HEAD'], cwd=platformDir)
cmdOutput = cmdOutput.decode('utf-8').strip()
else:
print('[WARN]', 'not a git repo')
cmdOutput = 'N/A'
metrics_dict['run__flow__platform_commit'] = cmdOutput
metrics_dict['run__flow__variant'] = flow_variant
extractTagFromFile('run__flow__platform__timing_units',
metrics_dict,
'^ time (\S+)',
logPath + '/2_1_floorplan.log')
extractTagFromFile('run__flow__platform__power_units',
metrics_dict,
'^ power (\S+)',
logPath + '/2_1_floorplan.log')
extractTagFromFile('run__flow__platform__distance_units',
metrics_dict,
'^ distance (\S+)',
logPath + '/2_1_floorplan.log')
# Synthesis
# =========================================================================
extractTagFromFile('synth__design__instance__count__stdcell',
metrics_dict,
'Number of cells: +(\S+)',
rptPath + '/synth_stat.txt')
extractTagFromFile('synth__design__instance__area__stdcell',
metrics_dict,
'Chip area for module.*: +(\S+)',
rptPath + '/synth_stat.txt')
extractGnuTime('synth', metrics_dict, logPath + '/1_1_yosys.log')
# Clocks
# =========================================================================
clk_list = read_sdc(resultPath + '/2_floorplan.sdc')
metrics_dict['constraints__clocks__count'] = len(clk_list)
metrics_dict['constraints__clocks__details'] = clk_list
# Floorplan
# =========================================================================
extractTagFromFile('floorplan__design__instance__count__stdcell__pre_restruct',
metrics_dict,
'number instances before restructure is (\d+)',
logPath + '/2_1_floorplan.log',
defaultNotFound=0,
required=False)
extractTagFromFile('floorplan__design__instance__count__stdcell__post_restruct',
metrics_dict,
'number instances after restructure is (\d+)',
logPath + '/2_1_floorplan.log',
defaultNotFound=0,
required=False)
extractTagFromFile('floorplan__design__instance__area__stdcell__pre_restruct',
metrics_dict,
'^Design area (\S+) u\^2',
logPath + '/2_1_floorplan.log',
occurrence=-2,
defaultNotFound=0,
required=False)
extractTagFromFile('floorplan__design__instance__area__stdcell__post_restruct',
metrics_dict,
'^Design area (\S+) u\^2',
logPath + '/2_1_floorplan.log',
defaultNotFound=0,
required=False)
extractTagFromFile('floorplan__timing__setup__tns',
metrics_dict,
baseRegEx.format('floorplan final report_tns',
'tns (\S+)'),
logPath + '/2_1_floorplan.log')
extractTagFromFile('floorplan__timing__setup__ws',
metrics_dict,
baseRegEx.format('floorplan final report_worst_slack',
'worst slack (\S+)'),
logPath + '/2_1_floorplan.log',
occurrence=0)
extractTagFromFile('floorplan__design__instance__area__stdcell',
metrics_dict,
baseRegEx.format('floorplan final report_design_area',
'^Design area (\S+) u\^2'),
logPath + '/2_1_floorplan.log')
extractTagFromFile('floorplan__design__instance__utilization',
metrics_dict,
baseRegEx.format('floorplan final report_design_area',
'^Design area .* (\S+)% utilization'),
logPath + '/2_1_floorplan.log')
extractTagFromFile('floorplan__design__io',
metrics_dict,
'Number of I/O +(\d+)',
logPath + '/3_2_place_iop.log')
extractTagFromFile('floorplan__design__instance__count__macros',
metrics_dict,
'Found (\S+) macros.',
logPath + '/2_4_mplace.log',
defaultNotFound=0)
extractGnuTime('floorplan', metrics_dict, logPath + '/2_4_mplace.log')
# Place
# =========================================================================
extractTagFromFile('globalplace__route__wirelength__estimated',
metrics_dict,
'Total wirelength: (\S+)',
logPath + '/3_1_place_gp.log')
extractTagFromFile('globalplace__timing__setup__tns',
metrics_dict,
baseRegEx.format('global place report_tns',
'tns (\S+)'),
logPath + '/3_1_place_gp.log')
extractTagFromFile('globalplace__timing__setup__ws',
metrics_dict,
baseRegEx.format('global place report_worst_slack',
'worst slack (\S+)'),
logPath + '/3_1_place_gp.log')
extractGnuTime('globalplace', metrics_dict, logPath + '/3_1_place_gp.log')
extractTagFromFile('placeopt__timing__setup__tns',
metrics_dict,
baseRegEx.format('resizer report_tns', 'tns (\S+)'),
logPath + '/3_3_resizer.log')
extractTagFromFile('placeopt__timing__setup__ws',
metrics_dict,
baseRegEx.format('resizer report_worst_slack',
'worst slack (\S+)'),
logPath + '/3_3_resizer.log')
extractTagFromFile('placeopt__design__instance__area',
metrics_dict,
baseRegEx.format('resizer report_design_area',
'^Design area (\S+) u\^2'),
logPath + '/3_3_resizer.log')
extractTagFromFile('placeopt__design__instance__utilization',
metrics_dict,
baseRegEx.format('resizer report_design_area',
'^Design area .* (\S+)% utilization'),
logPath + '/3_3_resizer.log')
extractTagFromFile('placeopt__timing__drv__max_slew',
metrics_dict,
baseRegEx.format('resizer max_slew_violation_count',
'max slew violation count (\S+)'),
logPath + '/3_3_resizer.log')
extractTagFromFile('placeopt__timing__drv__max_fanout',
metrics_dict,
baseRegEx.format('resizer max_fanout_violation_count',
'max fanout violation count (\S+)'),
logPath + '/3_3_resizer.log')
extractTagFromFile('placeopt__timing__drv__max_cap',
metrics_dict,
baseRegEx.format('resizer max_cap_violation_count',
'max cap violation count (\S+)'),
logPath + '/3_3_resizer.log')
extractTagFromFile('placeopt__design__instance__count__stdcell',
metrics_dict,
'^instance_count\n-*\n^(\S+)',
logPath + '/3_3_resizer.log')
extractGnuTime('placeopt', metrics_dict, logPath + '/3_3_resizer.log')
extractTagFromFile('detailedplace__design__violations',
metrics_dict,
'^\[INFO FLW-0012\] Placement violations (\S+).',
logPath + '/3_4_opendp.log', defaultNotFound=0)
extractTagFromFile('detailedplace__timing__setup__tns',
metrics_dict,
baseRegEx.format('detailed place report_tns',
'tns (\S+)'),
logPath + '/3_4_opendp.log')
extractTagFromFile('detailedplace__timing__setup__ws',
metrics_dict,
baseRegEx.format('detailed place report_worst_slack',
'worst slack (\S+)'),
logPath + '/3_4_opendp.log')
extractTagFromFile('detailedplace__design__instance__displacement__total',
metrics_dict,
'total displacement +(\d*\.?\d*)',
logPath + '/3_4_opendp.log')
extractTagFromFile('detailedplace__design__instance__displacement__mean',
metrics_dict,
'average displacement +(\d*\.?\d*)',
logPath + '/3_4_opendp.log')
extractTagFromFile('detailedplace__desgin__instance__displacement__max',
metrics_dict,
'max displacement +(\d*\.?\d*)',
logPath + '/3_4_opendp.log')
extractTagFromFile('detailedplace__route__wirelength__estimated',
metrics_dict,
'legalized HPWL +(\d*\.?\d*)',
logPath + '/3_4_opendp.log')
extractGnuTime('detailedplace', metrics_dict, logPath + '/3_4_opendp.log')
# CTS
# =========================================================================
latency_max, latency_min, skew = get_skew_latency(logPath + '/4_1_cts.log')
metrics_dict['cts__clock__latency__min'] = latency_min
metrics_dict['cts__clock__latency__max'] = latency_max
metrics_dict['cts__clock__skew__worst'] = skew
extractTagFromFile('cts__timing__setup__tns__pre_repair',
metrics_dict,
baseRegEx.format('cts pre-repair report_tns',
'tns (\S+)'),
logPath + '/4_1_cts.log')
extractTagFromFile('cts__timing__drv__max_slew__pre_repair',
metrics_dict,
baseRegEx.format(
'cts pre-repair max_slew_violation_count',
'max slew violation count (\S+)'),
logPath + '/4_1_cts.log')
extractTagFromFile('cts__timing__drv__max_fanout__pre_repair',
metrics_dict,
baseRegEx.format(
'cts pre-repair max_fanout_violation_count',
'max fanout violation count (\S+)'),
logPath + '/4_1_cts.log')
extractTagFromFile('cts__timing__drv__max_cap__pre_repair',
metrics_dict,
baseRegEx.format(
'cts pre-repair max_cap_violation_count',
'max cap violation count (\S+)'),
logPath + '/4_1_cts.log')
extractTagFromFile('cts__timing__setup__ws__pre_repair',
metrics_dict,
baseRegEx.format('cts pre-repair report_worst_slack',
'worst slack (\S+)'),
logPath + '/4_1_cts.log')
extractTagFromFile('cts__timing__setup__tns__post_repair',
metrics_dict,
baseRegEx.format('cts post-repair report_tns',
'tns (\S+)'),
logPath + '/4_1_cts.log')
extractTagFromFile('cts__timing__setup__ws__post_repair',
metrics_dict,
baseRegEx.format('cts post-repair report_worst_slack',
'worst slack (\S+)'),
logPath + '/4_1_cts.log')
extractTagFromFile('cts__timing__drv__max_slew__post_repair',
metrics_dict,
baseRegEx.format(
'cts post-repair max_slew_violation_count',
'max slew violation count (\S+)'),
logPath + '/4_1_cts.log')
extractTagFromFile('cts__timing__drv__max_fanout___post_repair',
metrics_dict,
baseRegEx.format(
'cts post-repair max_fanout_violation_count',
'max fanout violation count (\S+)'),
logPath + '/4_1_cts.log')
extractTagFromFile('cts__timing__drv__max_cap__post_repair',
metrics_dict,
baseRegEx.format(
'cts post-repair max_cap_violation_count',
'max cap violation count (\S+)'),
logPath + '/4_1_cts.log')
extractTagFromFile('cts__timing__setup__tns',
metrics_dict,
baseRegEx.format('cts final report_tns', 'tns (\S+)'),
logPath + '/4_1_cts.log')
extractTagFromFile('cts__timing__setup__ws',
metrics_dict,
baseRegEx.format('cts final report_worst_slack',
'worst slack (\S+)'),
logPath + '/4_1_cts.log')
extractTagFromFile('cts__timing__drv__max_slew',
metrics_dict,
baseRegEx.format('cts final max_slew_violation_count',
'max slew violation count (\S+)'),
logPath + '/4_1_cts.log')
extractTagFromFile('cts__timing__drv__max_fanout',
metrics_dict,
baseRegEx.format('cts final max_fanout_violation_count',
'max fanout violation count (\S+)'),
logPath + '/4_1_cts.log')
extractTagFromFile('cts__design__instance__count__hold_buffer',
metrics_dict,
'Inserted (\d+) hold buffers',
logPath + '/4_1_cts.log',
defaultNotFound=0)
# Route
# =========================================================================
logFile = logPath + '/5_1_fastroute.log'
latency_max, latency_min, skew = get_skew_latency(logFile)
metrics_dict['globalroute__clock__latency__min'] = latency_min
metrics_dict['globalroute__clock__latency__max'] = latency_max
metrics_dict['globalroute__clock__skew__worst'] = skew
extractTagFromFile('globalroute__timing__setup__tns',
metrics_dict,
baseRegEx.format('global route report_tns',
'tns (\S+)'),
logPath + '/5_1_fastroute.log')
extractTagFromFile('globalroute__timing__setup__ws',
metrics_dict,
baseRegEx.format('global route report_worst_slack',
'worst slack (\S+)'),
logPath + '/5_1_fastroute.log')
extractTagFromFile('globalroute__timing__drv__max_slew',
metrics_dict,
baseRegEx.format(
'global route max_slew_violation_count',
'max slew violation count (\S+)'),
logPath + '/5_1_fastroute.log')
extractTagFromFile('globalroute__timing__drv__max_fanout',
metrics_dict,
baseRegEx.format(
'global route max_fanout_violation_count',
'max fanout violation count (\S+)'),
logPath + '/5_1_fastroute.log')
extractTagFromFile('globalroute__timing__drv__max_cap',
metrics_dict,
baseRegEx.format('global route max_cap_violation_count',
'max cap violation count (\S+)'),
logPath + '/5_1_fastroute.log')
extractTagFromFile('globalroute__timing__clock__slack',
metrics_dict,
'^\[INFO FLW-....\] Clock .* slack (\S+)',
logPath + '/5_1_fastroute.log')
extractTagFromFile('globalroute__timing__clock__period',
metrics_dict,
'^\[INFO FLW-....\] Clock .* period (\S+)',
logPath + '/5_1_fastroute.log')
extractGnuTime('globalroute', metrics_dict, logPath + '/5_1_fastroute.log')
extractTagFromFile('detailedroute__route__wirelength',
metrics_dict,
'Total wire length = +(\S+) um.',
logPath + '/5_2_TritonRoute.log')
extractTagFromFile('detailedroute__route__vias',
metrics_dict,
'Total number of vias = +(\S+).',
logPath + '/5_2_TritonRoute.log')
extractTagFromFile('detailedroute__route__drc_errors',
metrics_dict,
'(?i)violation',
rptPath + '/5_route_drc.rpt',
count=True, defaultNotFound=0)
extractGnuTime('detailedroute',
metrics_dict,
logPath + '/5_2_TritonRoute.log')
# Finish
# =========================================================================
logFile = logPath + '/6_report.log'
latency_max, latency_min, skew = get_skew_latency(logFile)
metrics_dict['finish__clock__latency__min'] = latency_min
metrics_dict['finish__clock__latency__max'] = latency_max
metrics_dict['finish__clock__skew__worst'] = skew
extractTagFromFile('finish__timing__setup__tns',
metrics_dict,
baseRegEx.format('finish report_tns', 'tns (\S+)'),
logPath + '/6_report.log')
extractTagFromFile('finish__timing__setup__ws',
metrics_dict,
baseRegEx.format('finish report_worst_slack',
'worst slack (\S+)'),
logPath + '/6_report.log')
extractTagFromFile('finish__timing__drv__max_slew',
metrics_dict,
baseRegEx.format('finish max_slew_violation_count',
'max slew violation count (\S+)'),
logPath + '/6_report.log')
extractTagFromFile('finish__timing__drv__max_fanout',
metrics_dict,
baseRegEx.format('finish max_fanout_violation_count',
'max fanout violation count (\S+)'),
logPath + '/6_report.log')
extractTagFromFile('finish__timing__drv__max_cap',
metrics_dict,
baseRegEx.format('finish max_cap_violation_count',
'max cap violation count (\S+)'),
logPath + '/6_report.log')
extractTagFromFile('finish__power__internal__total',
metrics_dict,
'Total +(\S+) +\S+ +\S+ +\S+ +\S+',
logPath + '/6_report.log')
extractTagFromFile('finish__power__switchng__total',
metrics_dict,
'Total +\S+ +(\S+) +\S+ +\S+ +\S+',
logPath + '/6_report.log')
extractTagFromFile('finish__power__leakage__total',
metrics_dict,
'Total +\S+ +\S+ +(\S+) +\S+ +\S+',
logPath + '/6_report.log')
extractTagFromFile('finish__power__total',
metrics_dict,
'Total +\S+ +\S+ +\S+ +(\S+) +\S+',
logPath + '/6_report.log')
extractTagFromFile('finish__design__instance__area',
metrics_dict,
baseRegEx.format('finish report_design_area',
'^Design area (\S+) u\^2'),
logPath + '/6_report.log')
extractTagFromFile('finish__design__instance__utilization',
metrics_dict,
baseRegEx.format('finish report_design_area',
'^Design area .* (\S+)% utilization'),
logPath + '/6_report.log')
extractGnuTime('finish', metrics_dict, logPath + '/6_report.log')
# Accumulate time
# =========================================================================
failed = False
total = timedelta()
for key in metrics_dict:
if key.endswith('__runtime__total'):
# Big try block because Hour and microsecond is optional
try:
t = datetime.strptime(metrics_dict[key], '%H:%M:%S.%f')
except ValueError:
try:
t = datetime.strptime(metrics_dict[key], '%M:%S.%f')
except ValueError:
try:
t = datetime.strptime(metrics_dict[key], '%H:%M:%S')
except ValueError:
try:
t = datetime.strptime(metrics_dict[key], '%M:%S')
except ValueError:
failed = True
break
delta = timedelta(hours=t.hour, minutes=t.minute,
seconds=t.second, microseconds=t.microsecond)
total += delta
if failed:
metrics_dict['total_time'] = 'ERR'
else:
metrics_dict['total_time'] = str(total)
metrics_df = pd.DataFrame(list(metrics_dict.items()))
col_index = metrics_df.iloc[0][1] + '__' + metrics_df.iloc[1][1]
metrics_df.columns = ['Metrics', col_index]
if hier_json:
# Convert the Metrics dictionary to hierarchical format by stripping
# the stage as a 'key'
hier_dict = defaultdict(dict)
for metric in metrics_dict:
key_list = metric.split('__', 1)
if len(key_list) == 2:
hier_dict[key_list[0]][key_list[1]] = metrics_dict[metric]
metrics_dict = hier_dict
with open(output, 'w') as resultSpecfile:
json.dump(metrics_dict, resultSpecfile, indent=2)
return metrics_dict, metrics_df
args = parse_args()
now = datetime.now()
if args.design == 'all_designs':
print('List of designs')
rootdir = './logs'
all_df = pd.DataFrame()
all_d = []
flow_variants = args.flowVariant.split()
cwd = os.getcwd()
for platform_it in os.scandir(rootdir):
if not platform_it.is_dir():
continue
plt = platform_it.name
for design_it in os.scandir(platform_it.path):
if not design_it.is_dir():
continue
for variant in flow_variants:
des = design_it.name
print(plt, des, variant)
file = '/'.join(['reports', plt, des, variant, 'metrics.json'])
metrics, df = extract_metrics(cwd, plt, des, variant,
file, args.hier)
all_d.append(metrics)
if all_df.shape[0] == 0:
all_df = df
else:
all_df = all_df.merge(df, on='Metrics', how='inner')
with open('metrics.json', 'w') as outFile:
json.dump(all_d, outFile, indent=2)
with open('metrics.html', 'w') as f:
f.write(all_df.to_html())
else:
metrics_dict, metrics_df = extract_metrics(args.flowPath, args.platform,
args.design, args.flowVariant,
args.output, args.hier)
``` |
{
"source": "jjchinga/HAI_ligthbot",
"score": 2
} |
#### File: jjchinga/HAI_ligthbot/Scene.py
```python
class Scene:
def _init_(self, pid)
self.SceneHeight = 4
self.SceneWith = 4
wight
``` |
{
"source": "jjchshayan/AparatView",
"score": 3
} |
#### File: jjchshayan/AparatView/app.py
```python
from selenium import webdriver
from selenium.webdriver import DesiredCapabilities
import pickle
from proxylist import ProxyList
pl = ProxyList()
pl.load_file('./web/proxy.txt')
proxy = pl.random().address()
print(proxy[1:-5])
from selenium.webdriver.chrome.options import Options
# from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.common.proxy import Proxy, ProxyType
prox = Proxy()
prox.proxy_type = ProxyType.MANUAL
prox.http_proxy = proxy
prox.socks_proxy = proxy
prox.ssl_proxy = proxy
capabilities = webdriver.DesiredCapabilities.CHROME
prox.add_to_capabilities(capabilities)
# firefox_capabilities = DesiredCapabilities.FIREFOX
# firefox_capabilities['marionette'] = True
# firefox_capabilities['binary'] = '/usr/bin/firefox'
# chrome_options = Options()
# chrome_options.add_argument("user-data-dir=selenium")
# driver = webdriver.Firefox(capabilities=firefox_capabilities)
# driver = webdriver.Firefox(capabilities=firefox_capabilities ,firefox_options=chrome_options)
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--window-size=100,100")
# chrome_options.add_argument("user-data-dir=selenium")
def my_proxy(PROXY_HOST, PROXY_PORT):
fp = webdriver.FirefoxProfile()
# Direct = 0, Manual = 1, PAC = 2, AUTODETECT = 4, SYSTEM = 5
print
PROXY_PORT
print
PROXY_HOST
fp.set_preference("network.proxy.type", 1)
fp.set_preference("network.proxy.http", PROXY_HOST)
fp.set_preference("network.proxy.http_port", int(PROXY_PORT))
fp.set_preference("general.useragent.override", "whater_useragent")
fp.update_preferences()
return webdriver.Firefox(firefox_profile=fp)
webdriver.DesiredCapabilities.CHROME['proxy'] = {
"httpProxy": proxy,
"ftpProxy": proxy,
"sslProxy": proxy,
"noProxy": None,
"proxyType": "MANUAL",
"class": "org.openqa.selenium.Proxy",
"autodetect": False
}
# driver = webdriver.Chrome(chrome_options=chrome_options,desired_capabilities=capabilities)
# driver = my_proxy("172.16.17.32","80")
# you have to use remote, otherwise you'll have to code it yourself in python to
# driver = webdriver.Remote("https://www.aparat.com/v/aQfED", webdriver.DesiredCapabilities.CHROME)
# import browsercookie
#
# cookies1 = browsercookie.firefox()
# driver.add_cookie(cookies)
import time
import threading
exitFlag = 0
class myThread (threading.Thread):
def __init__(self, threadID, name, counter):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.counter = counter
def run(self):
print ("Starting " + self.name)
i = 0
driver = webdriver.Chrome(options=chrome_options)
# driver.set_page_load_timeout(10000000)
# driver.implicitly_wait(3000)
# driver.get("https://www.aparat.com/Gho3tShadow/live")
# time.sleep(100)
while 1:
driver = webdriver.Chrome(options=chrome_options)
# print("a")
# driver.get("https://www.khodrobank.com/TestDrive/12574/6-%D8%B3%DB%8C%D9%84%D9%86%D8%AF%D8%B1-%DA%98%D8%A7%D9%BE%D9%86%DB%8C-%D8%AF%D8%B1-%D8%A8%D8%B1%D8%A7%D8%A8%D8%B1-4-%D8%B3%DB%8C%D9%84%D9%86%D8%AF%D8%B1-%DA%A9%D8%B1%D9%87-%D8%A7%DB%8C%D8%9B-%D8%A8%D8%B1%D8%B1%D8%B3%DB%8C-%D8%AF%D9%88-%DA%A9%D8%B1%D8%A7%D8%B3-%D8%A7%D9%88%D9%88%D8%B1-%D8%AF%D8%B3%D8%AA-%D8%AF%D9%88%D9%85-%D8%A8%D8%A7%D8%B2%D8%A7%D8%B1")
driver.get("https://www.aparat.com/v/aQfED")
# driver.get("https://cafebazaar.ir/app/shayan.app.applock/")
time.sleep(5)
driver.close()
print(i)
i+=1
print ("Exiting " + self.name)
threads = []
for o in range(0, 10):
# for o in range(0, 2):
threads.append(myThread(1, "Thread-1", 1))
threads[o].start()
# for o in range(0, 10):
# # Create new threads
#
#
# thread2 = myThread(2, "Thread-2", 2,webdriver.Chrome())
#
# # Start new Threads
# thread1.start()
# thread2.start()
#
# # driver =
# driver.append(webdriver.Chrome())
# driver[o].get("https://www.aparat.com/v/aQfED")
# print("AAA1")
#
# print("AAA2")
# time.sleep(2)
# for ip in driver:
# ip.close()
# img = driver.find_element(By.ID,"cimg1").find_element(By.TAG_NAME,"img")
# src = img.get_attribute("src");
# number1 = src.split("/")[-1].split(".")[0]
#
# img = driver.find_element(By.ID,"cimg2").find_element(By.TAG_NAME,"img")
# src = img.get_attribute("src");
# number2 = src.split("/")[-1].split(".")[0]
#
# img = driver.find_element(By.ID,"cimg3").find_element(By.TAG_NAME,"img")
# src = img.get_attribute("src");
# number3 = src.split("/")[-1].split(".")[0]
#
# img = driver.find_element(By.ID,"cimg4").find_element(By.TAG_NAME,"img")
# src = img.get_attribute("src");
# number4 = src.split("/")[-1].split(".")[0]
# mynumber = int(number1+number2+number3+number4)
#
# # driver.execute_script("document.getElementsByTagName('input')[0].value='jjj'")
# driver.execute_script("document.getElementsByTagName('input')[0].value="+str(mynumber)+"")
# driver.execute_script("javascript:dosub()")
# time.sleep(5)
``` |
{
"source": "jjchshayan/heroku",
"score": 3
} |
#### File: jjchshayan/heroku/app.py
```python
from telegram.ext import Updater
from telegram import bot
#!/usr/bin/env python
# -*- coding: utf-8 -*-
updater = Updater(token='<KEY>')
dispatcher = updater.dispatcher
import logging
import requests
state = 1
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
def start(bot, update):
bot.send_message(chat_id=update.message.chat_id, text="سلام خوش آمدید لطفا عکس گرفته شده را اضافه نمایید")
state=2
from telegram.ext import CommandHandler
start_handler = CommandHandler('start', start)
dispatcher.add_handler(start_handler)
def echo(bot, update):
#my_id = 504335145
try:
# print(update)
user_id = update['message']['chat']['id']
user_name = update['message']['chat']['first_name']
file_id = bot.get_file(update['message']['photo'][2]['file_id'])
url =file_id["file_path"]
r = requests.post("http://shayan2020.ir/Api/Telegram/UploadData.php", data={'url': url,'filename':str(user_id)+'_'+str(user_name)})
if(r.text =="ok"):
bot.send_message(chat_id=update.message.chat_id, text="با تشکر از شما برای اضافه کردن عکسی دیگر دگمه /start را مجددا تایپ نمایید")
else:
print(r.text)
bot.send_message(chat_id=update.message.chat_id, text="خطا لطفا مجددا تلاش نمایید")
except:
print(update)
bot.send_message(chat_id=update.message.chat_id, text="لطفا فقط عکس اضافه کنید")
from telegram.ext import MessageHandler, Filters
echo_handler = MessageHandler(Filters.all, echo)
dispatcher.add_handler(echo_handler)
# def caps(bot, update, args=''):
# text_caps = ' '.join(args).upper()
# bot.send_message(chat_id=update.message.chat_id, text=text_caps)
#
#
# caps_handler = CommandHandler('caps', caps, pass_args=True)
# dispatcher.add_handler(caps_handler)
# from telegram import InlineQueryResultArticle, InputTextMessageContent
#
#
# def inline_caps(bot, update):
# query = update.inline_query.query
# if not query:
# return
# results = list()
# results.append(
# InlineQueryResultArticle(
# id=query.upper(),
# title='Caps',
# input_message_content=InputTextMessageContent(query.upper())
# )
# )
# bot.answer_inline_query(update.inline_query.id, results)
# from telegram.ext import InlineQueryHandler
#
# inline_caps_handler = InlineQueryHandler(inline_caps)
# dispatcher.add_handler(inline_caps_handler)
def unknown(bot, update):
bot.send_message(chat_id=update.message.chat_id, text="Sorry, I didn't understand that command.")
unknown_handler = MessageHandler(Filters.command, unknown)
dispatcher.add_handler(unknown_handler)
#
# TOKEN = '<KEY>'
# HOST = 'shayantt.herokuapp.com' # Same FQDN used when generating SSL Cert
# PORT = 8443
# updater.start_webhook(listen="0.0.0.0",
# port=PORT,
# # url_path=TOKEN)
# updater.bot.set_webhook("https://shayantt.herokuapp.com/" + TOKEN)
# updater.idle()
updater.start_polling()
``` |
{
"source": "jjclavijo/nyvred",
"score": 3
} |
#### File: nivelacion/.ipynb_checkpoints/lecturas-checkpoint.py
```python
from math import sin,cos,tan
import pandas as pd
import numpy as np
class Lectura(object):
def __init__(self):
return None
@classmethod
def trigonometrica(cls,origen,destino,distancia,angulo,hi=0,hp=0):
self = cls()
self.origen = origen
self.destino = destino
self.w = distancia ** -2
self.coef = {}
self.coef[self.origen] = -1
self.coef[self.destino] = 1
self.angulo = angulo * np.pi / 180.
self.hi = hi
self.hp = hp
self.type = 'Trigonometrica'
self.distancia = distancia
return self
@classmethod
def GPS(cls,origen,destino,w,lectura,hi=0,hp=0):
self = cls()
self.origen = origen
self.destino = destino
self.w = w
self.coef = {}
self.coef[self.origen] = -1
self.coef[self.destino] = 1
self.hi = hi
self.hp = hp
self.type = 'GPS'
self.l = lectura
return self
@classmethod
def fijo(cls,punto,lectura,w=100):
self = cls()
self.origen = punto
self.destino = None
self.w = w
self.coef = {}
self.coef[self.origen] = 1
self.type = 'fijo'
self.l = lectura
return self
def lectura(self,**kwargs):
ix_err=kwargs.get('ix_err',0)
col_error=kwargs.get('col_error',0)
offset=kwargs.get('offset',0)
if self.type == 'Trigonometrica':
angulo = self.angulo+ix_err
if angulo > np.pi:
angulo = 2*np.pi - angulo
angulo = np.pi/2 - angulo
return tan(angulo)*self.distancia -\
((self.hp-self.hi))
if self.type == 'GPS':
return self.l
if self.type == 'fijo':
return self.l + offset
def coeficientes(self):
return self.coef
def peso(self):
return self.w
class Red(object):
def __init__(self):
self.lecturas = []
def get_a(self):
series = [pd.Series(i.coeficientes()) for i in self.lecturas]
df = pd.DataFrame(series)
return np.nan_to_num(df.values), list(df.columns)
def get_p(self):
ws = [i.peso() for i in self.lecturas]
return np.diag(ws)
def get_l(self,**kwargs):
ls = [i.lectura(**kwargs) for i in self.lecturas]
return np.array(ls)
def calc_x(self,**kwargs):
a,names = self.get_a()
p = self.get_p()
l = self.get_l(**kwargs)
atp = np.dot(a.T,p)
atpa = np.dot(atp,a)
atpl = np.dot(atp,l)
atpa_ = np.linalg.inv(atpa)
x = np.dot(atpa_,atpl)
return x,names
def get_v(self,**kwargs):
x,n = self.calc_x(**kwargs)
a,n = self.get_a()
l = self.get_l(**kwargs)
v = np.dot(a,x) - l
return v
def get_sum_v(self,**kwargs):
return np.sum(self.get_v(**kwargs) ** 2)
``` |
{
"source": "jjcordano/xgb_portfolio",
"score": 2
} |
#### File: xgb_portfolio/builder/helpers.py
```python
import os
import numpy as np
import pandas as pd
import xgboost
from sklearn.metrics import accuracy_score
import streamlit as st
## Companies info
companies_dict = {"Phillips":'PHIA',
'Volkswagen':'VOW3',
'ING':'INGA',
'Credit Suisse':'CS',
'L\'Oreal':'OR',
'Deutsche Telekom':'DTE',
'BMW':'BMW',
'Kering':'KER',
'BASF':'BAS',
'Santander':'SAN',
'Air Liquide':'AI',
'Vivendi':'VIV',
'Enel':'ENEL',
'Kone':'KNEBV',
'Iberdrola':'IBE',
'Vonovia':'VNA',
'Eni':'ENI',
'Allianz':'ALV',
'<NAME>':'RI',
'Siemens':'SIE',
'Amadeus':'AMS',
'Banco Santander':'SAN_MC',
'Vinci':'DG',
'Anheuser-Busch InBev':'ABI',
'Schneider Electric':'SU',
'Linde':'LIN',
'EssilorLuxottica':'EL',
'Adyen':'ADYEN',
'TotalEnergies':'FP',
'Deutsche Post':'DPW',
'BNP Paribas':'BNP',
'Engie':'ENGI',
'Koninklijke A<NAME>':'AD',
'Muenchener RV':'MUV2',
'CRH':'CRH',
'Intesa Sanpaolo':'ISP',
'Adidas':'ADS',
'Danone':'BN',
'Daimler':'DAI',
'Bayer':'BAYN',
'Sanofi':'SAF',
'Deutsche Boerse':'DB1',
'SAP':'SAP',
'ASML':'ASML',
'Airbus':'AIR',
'LVMH':'MC'}
## Constants
DAYS_IN_YEAR = 252
DEFAULT_XGB_PARAMS = {'n_estimators': 200,
'learning_rate': 0.1,
'max_depth': 5,
'min_child_weight': 5,
'subsample': 0.8,
'colsample_bytree': 0.8
}
## Paths
parent_path = os.path.dirname(os.path.dirname(__file__))
DATAFOLDER = r'data/'
FILENAME_ESG_METRICS = r'data_fi_v2.csv'
FILENAME_PRICES = r'prices.csv'
FILENAME_EUROSTOXX600 = r'STOXX600.csv'
FILENAME_RUSSELL3000 = r'RUSSELL.csv'
FILENAME_RF_RATE = r'FR_TBill_10y.csv'
benchmark_path_dict = {
'eurostoxx_600' : FILENAME_EUROSTOXX600,
'russell_3000' : FILENAME_RUSSELL3000
}
## Lists and dictionaries
X_columns = ['NUMBER_EMPLOYEES_CSR','AUDIT_COMMITTEE_MEETINGS', 'SAY_PAY_SUPPORT_LEVEL',
'TOT_OTHER_COMP_AW_TO_CEO_&_EQUIV', 'TOTAL_EXEC_PAY_AS_PCT_OPEX',
'TOT_SALARIES_PAID_TO_CEO_&_EQUIV', 'TOT_SALARIES_&_BNS_PD_TO_EXECS',
'TOT_N_EQTY_INCENT_GIVEN_TO_EXECS', 'SAY_PAY_NUMBER_OF_VOTES_FOR',
'TOT_EXEC_PAY_AS_PCT_SG&A_NET_R&D', 'TOT_OPTION_AWARDS_GIVEN_TO_EXECS',
'TOT_EXEC_PAY_AS_PCT_TOT_PSNL_EXP', 'TOT_N_EQT_INCENT_GVN_TO_CEO_&_EQ',
'PCT_BOD_COMP_PD_IN_STK_AWD', 'NUM_EXECUTIVE_CHANGES',
'AVERAGE_BOD_TOTAL_COMPENSATION', 'ESG_DISCLOSURE_SCORE',
'CFO_TENURE_AS_OF_FY_END', 'CHG_OF_CTRL_BFIT_GOLD_CHUTE_AGR',
'CLAWBACK_PROVISION_FOR_EXEC_COMP', 'GENDER_PAY_GAP_BREAKOUT',
'BLANK_CHECK_PREFERRED_AUTHORIZED']
Y_label = 'Label'
benchmark_dict = {
'eurostoxx_600': 'Eurostoxx 600',
'russell_3000': 'Russell 3000'
}
## Helper functions
def get_annual_returns_col(data,
daily_prices):
prices = daily_prices
data['Price Y'] = np.nan
data['Price Y+1'] = np.nan
data['Return Y+1'] = np.nan
for row in data.index:
s = data.loc[row, 'Ticker']
y0 = data.loc[row, 'Year']
y1 = data.loc[row,'Year']+1
prices0_temp=prices[prices["Year"] == y0]
prices1_temp=prices[prices["Year"] == y1]
prices_y0_s=list(prices0_temp[s])
prices_y1_s=list(prices1_temp[s])
try:
p0 = 0
p1 = 0
i = -1
while pd.isnull(prices_y0_s[i]):
i=i-1
p0=prices_y0_s[i]
i = -1
while pd.isnull(prices_y1_s[i]):
i=i-1
p1=prices_y1_s[i]
data.loc[row,'Price Y'] = p0
data.loc[row,'Price Y+1'] = p1
data.loc[row,'Return Y+1'] = (p1-p0)/p0
except IndexError:
pass
data = data[['Date','Entreprises'] + X_columns + ['Ticker','Year','Price Y','Price Y+1','Return Y+1']]
## Delete rows where Return Y+1 is null
return data[data['Return Y+1'].notna()]
def get_benchmark_returns(data,
daily_benchmark_levels_path):
## Import and merge chosen index year-end levels
index = pd.read_csv(daily_benchmark_levels_path)
index['Date'] = pd.to_datetime(index['Date'])
index['Year'] = pd.DatetimeIndex(index['Date']).year
index.set_index('Date', inplace=True)
index_y = pd.DataFrame(index = list(range(2000,2021)))
index_y['levelYearEnd'] = np.nan
for row in index_y.index:
y = row
index_temp=index[index["Year"] == y]
level_y_temp=list(index_temp['Adj Close'])
try:
l = 0
i = -1
while pd.isnull(level_y_temp[i]):
i = i-1
l = level_y_temp[i]
index_y.loc[row,'levelYearEnd'] = l
except IndexError:
pass
index_y['Benchmark Returns'] = index_y['levelYearEnd'].pct_change()
index_y.index = index_y.index - 1
index_y.rename({"Benchmark Returns" : "Benchmark Returns Y+1"}, inplace = True, axis = 1)
data = data.merge(index_y, left_on = 'Year', right_index = True)
### Dependant variable : RUSSELL Excess Returns Y+1
data['Excess Returns Y+1'] = data['Return Y+1'] - data['Benchmark Returns Y+1']
data = data[['Date','Entreprises'] + X_columns + ['Ticker','Year','Price Y','Price Y+1','Return Y+1','Excess Returns Y+1']]
return data, index
def get_russell3000_returns(data,
russell3000_path):
## Import and merge RUSSELL 3000 year-end levels
russell = pd.read_csv(russell3000_path)
russell['Date'] = pd.to_datetime(russell['Date'])
russell['Year'] = [d.year for d in russell['Date']]
russell_y = pd.DataFrame(index = list(range(2000,2021)))
russell_y['levelYearEnd'] = np.nan
for row in russell_y.index:
y = row
russell_temp=russell[russell["Year"] == y]
level_y_temp=list(russell_temp['Adj Close'])
try:
l = 0
i = -1
while pd.isnull(level_y_temp[i]):
i = i-1
l = level_y_temp[i]
russell_y.loc[row,'levelYearEnd'] = l
except IndexError:
pass
russell_y['RUSSELL 3000 Returns'] = russell_y['levelYearEnd'].pct_change()
russell_y.index = russell_y.index - 1
russell_y.rename({"RUSSELL 3000 Returns":"RUSSELL 3000 Returns Y+1"},inplace = True,axis = 1)
data = pd.merge(data,russell_y,left_on = 'Year',right_index=True)
### Dependant variable : RUSSELL Excess Returns Y+1
data['Excess Returns Y+1 (RUSSELL)'] = data['Return Y+1'] - data['RUSSELL 3000 Returns Y+1']
return data.drop(['levelYearEnd','Unnamed: 24','RUSSELL 3000 Returns Y+1'],axis = 1,inplace = True)
def get_eurostoxx600_returns(data,
eurostoxx600_path):
## Import and merge EUROSTOXX 600 year-end levels
eurostoxx600 = pd.read_csv(eurostoxx600_path)
eurostoxx600['Date'] = pd.to_datetime(eurostoxx600['Date'])
eurostoxx600['Year'] = [d.year for d in eurostoxx600['Date']]
eurostoxx600_y = pd.DataFrame(index = list(range(2000,2021)))
eurostoxx600_y['levelYearEnd'] = np.nan
for row in eurostoxx600_y.index:
y = row
eurostoxx600_temp = eurostoxx600[eurostoxx600["Year"] == y]
level_y_temp = list(eurostoxx600_temp['Adj Close'])
try:
l = 0
i = -1
while pd.isnull(level_y_temp[i]):
i = i-1
l = level_y_temp[i]
eurostoxx600_y.loc[row,'levelYearEnd'] = l
except IndexError:
pass
eurostoxx600_y['EUROSTOXX 600 Returns'] = eurostoxx600_y['levelYearEnd'].pct_change()
eurostoxx600_y.index = eurostoxx600_y.index-1
eurostoxx600_y.rename({"EUROSTOXX 600 Returns":"EUROSTOXX 600 Returns Y+1"},inplace = True,axis = 1)
data = pd.merge(data,eurostoxx600_y,left_on = 'Year',right_index=True)
data.drop('levelYearEnd',axis = 1,inplace = True)
### Dependant variable 2 : EUROSTOXX Excess Returns Y+1
data['Excess Returns Y+1 (EUROSTOXX)'] = data['Return Y+1'] - data['EUROSTOXX 600 Returns Y+1']
return data.drop(['levelYearEnd','Unnamed: 24','EUROSTOXX 3000 Returns Y+1'], axis = 1, inplace = True)
def get_label(excess_returns,
short_lim,
long_lim):
label = 0
if excess_returns < short_lim:
label = 0
elif excess_returns > long_lim:
label = 2
else:
label = 1
return label
def get_train_test(data,
y,
X_columns,
Y_label):
years = list(range(y-7,y))
dataset = data[data['Year'].isin([y])]
X_train = data[data['Year'].isin(years)][X_columns]
X_test = data[data['Year'].isin([y])][X_columns]
Y_train = data[data['Year'].isin(years)][Y_label]
Y_test = data[data['Year'].isin([y])][Y_label]
return dataset, X_train, X_test, Y_train, Y_test
def xgb_predict(xgb_params,
X_train,
X_test,
Y_train,
Y_test,
print_accuracy):
model = xgboost.XGBClassifier(**xgb_params, use_label_encoder = False, verbosity = 0)
model.fit(X_train,Y_train)
y_pred = model.predict(X_test)
predictions = [round(value) for value in y_pred]
pred_proba_short_list = model.predict_proba(X_test)[:,0]
pred_proba_long_list = model.predict_proba(X_test)[:,2]
accuracy = accuracy_score(Y_test, predictions)
if print_accuracy:
print("Model Accuracy : %.2f%%" % (accuracy * 100.0))
print("------------------------------------")
return model, predictions, pred_proba_short_list, pred_proba_long_list, accuracy
def get_portfolio_weights(pred,
price0,
nb_short,
nb_long,
short_limit=10):
if pred==0:
if price0 < short_limit: # Stock not shorted if price at beginning of year below limit
return 0
else:
return (-1/nb_short)
elif pred==1:
return 0
else:
return (1/nb_long)
def get_portfolio_weights_wProba(pred,
price0,
sum_proba_short,
sum_proba_long,
val_proba_short,
val_proba_long,
short_limit=10):
if pred == 0:
if price0 < short_limit: # Stock not shorted if price at beginning of year below limit
return 0
else:
return (- val_proba_short / sum_proba_short)
elif pred == 1:
return 0
else:
return (val_proba_long / sum_proba_long)
def get_weight_col(year_dataset,
short_limit,
pred_proba_short_list,
pred_proba_long_list,
proba_weighted):
if proba_weighted:
year_dataset['Pred_proba_short'] = pred_proba_short_list
year_dataset['Pred_proba_long'] = pred_proba_long_list
df = year_dataset
df2 = df[df['Price Y'] > short_limit]
sum_proba_short = df2[df2['Predictions'] == 0]['Pred_proba_short'].sum()
sum_proba_long = df[df['Predictions'] == 2]['Pred_proba_long'].sum()
year_dataset['Weight'] = year_dataset.apply(lambda x: get_portfolio_weights_wProba(x['Predictions'],
x['Price Y'],
sum_proba_short,
sum_proba_long,
x['Pred_proba_short'],
x['Pred_proba_long'],
short_limit), axis=1)
else:
df = year_dataset
df2 = df[df['Price Y'] > short_limit]
nb_short = len(df2[df2['Predictions'] == 0])
nb_long = len(df[df['Predictions'] == 2])
year_dataset['Weight'] = year_dataset.apply(lambda x: get_portfolio_weights(x['Predictions'],
x['Price Y'],
nb_short,
nb_long,
short_limit), axis=1)
return year_dataset
def get_rf_rate(year,
parent_path = parent_path,
datafolder = DATAFOLDER,
filename_rf_rate = FILENAME_RF_RATE):
rf_rates = pd.read_csv(os.path.join(parent_path, datafolder, filename_rf_rate))
rf_rates['Date'] = pd.to_datetime(rf_rates['Date'])
rf_rates['Year'] = pd.DatetimeIndex(rf_rates['Date']).year
rf_year = rf_rates[rf_rates['Year'] == year]
return float(rf_year[rf_year['Date'] == rf_year['Date'].min()]['10y_FR_Treasury_Bond_Rate'])
def update_params(n_estimators=200, max_depth=5):
return {'n_estimators': n_estimators,
'learning_rate': 0.1,
'max_depth': max_depth,
'min_child_weight': 5,
'subsample': 0.8,
'colsample_bytree': 0.8
}
@st.cache(suppress_st_warning=True)
def get_year_end_prices(df_prices):
index = set([d.year for d in df_prices.index])
columns = list(companies_dict.values())
annual_prices = pd.DataFrame(columns=columns, index=index)
# Populate annual_prices dataframe
for year in annual_prices.index:
for stock in annual_prices.columns:
temp_df = df_prices[df_prices["Year"] == year]
temp_prices_s = list(temp_df[stock])
try:
stock_price_year_end = 0
i = -1
while pd.isnull(temp_prices_s[i]):
i=i-1
stock_price_year_end=temp_prices_s[i]
annual_prices.loc[year,stock] = stock_price_year_end
except IndexError:
pass
annual_prices.index = annual_prices.index.map(str)
return annual_prices
```
#### File: xgb_portfolio/code/helpers.py
```python
import numpy as np
import pandas as pd
import xgboost
from sklearn.metrics import accuracy_score
## Constants
DAYS_IN_YEAR = 252
DEFAULT_XGB_PARAMS = {'n_estimators': 200,
'learning_rate': 0.1,
'max_depth': 5,
'min_child_weight': 5,
'subsample': 0.8,
'colsample_bytree': 0.8
}
## Paths
PATH_DATAFOLDER = r'./data/'
PATH_ESG_METRICS = r'data_fi_v2.csv'
PATH_PRICES = r'prices.csv'
PATH_EUROSTOXX600 = r'STOXX600.csv'
PATH_RUSSELL3000 = r'RUSSELL.csv'
PATH_RF_RATE = r'FR_TBill_10y.csv'
benchmark_path_dict = {
'eurostoxx_600' : PATH_DATAFOLDER + PATH_EUROSTOXX600,
'russell_3000' : PATH_DATAFOLDER + PATH_RUSSELL3000
}
## Lists and dictionaries
X_columns = ['NUMBER_EMPLOYEES_CSR','AUDIT_COMMITTEE_MEETINGS', 'SAY_PAY_SUPPORT_LEVEL',
'TOT_OTHER_COMP_AW_TO_CEO_&_EQUIV', 'TOTAL_EXEC_PAY_AS_PCT_OPEX',
'TOT_SALARIES_PAID_TO_CEO_&_EQUIV', 'TOT_SALARIES_&_BNS_PD_TO_EXECS',
'TOT_N_EQTY_INCENT_GIVEN_TO_EXECS', 'SAY_PAY_NUMBER_OF_VOTES_FOR',
'TOT_EXEC_PAY_AS_PCT_SG&A_NET_R&D', 'TOT_OPTION_AWARDS_GIVEN_TO_EXECS',
'TOT_EXEC_PAY_AS_PCT_TOT_PSNL_EXP', 'TOT_N_EQT_INCENT_GVN_TO_CEO_&_EQ',
'PCT_BOD_COMP_PD_IN_STK_AWD', 'NUM_EXECUTIVE_CHANGES',
'AVERAGE_BOD_TOTAL_COMPENSATION', 'ESG_DISCLOSURE_SCORE',
'CFO_TENURE_AS_OF_FY_END', 'CHG_OF_CTRL_BFIT_GOLD_CHUTE_AGR',
'CLAWBACK_PROVISION_FOR_EXEC_COMP', 'GENDER_PAY_GAP_BREAKOUT',
'BLANK_CHECK_PREFERRED_AUTHORIZED']
Y_label = 'Label'
benchmark_dict = {
'eurostoxx_600': 'Eurostoxx 600',
'russell_3000': 'Russell 3000'
}
## Helper functions
def get_annual_returns_col(data,
daily_prices):
prices = daily_prices
data['Price Y'] = np.nan
data['Price Y+1'] = np.nan
data['Return Y+1'] = np.nan
for row in data.index:
s = data.loc[row, 'Ticker']
y0 = data.loc[row, 'Year']
y1 = data.loc[row,'Year']+1
prices0_temp=prices[prices["Year"] == y0]
prices1_temp=prices[prices["Year"] == y1]
prices_y0_s=list(prices0_temp[s])
prices_y1_s=list(prices1_temp[s])
try:
p0 = 0
p1 = 0
i = -1
while pd.isnull(prices_y0_s[i]):
i=i-1
p0=prices_y0_s[i]
i = -1
while pd.isnull(prices_y1_s[i]):
i=i-1
p1=prices_y1_s[i]
data.loc[row,'Price Y'] = p0
data.loc[row,'Price Y+1'] = p1
data.loc[row,'Return Y+1'] = (p1-p0)/p0
except IndexError:
pass
data = data[['Date','Entreprises'] + X_columns + ['Ticker','Year','Price Y','Price Y+1','Return Y+1']]
## Delete rows where Return Y+1 is null
return data[data['Return Y+1'].notna()]
def get_benchmark_returns(data,
daily_benchmark_levels_path):
## Import and merge chosen index year-end levels
index = pd.read_csv(daily_benchmark_levels_path)
index['Date'] = pd.to_datetime(index['Date'])
index['Year'] = pd.DatetimeIndex(index['Date']).year
index.set_index('Date', inplace=True)
index_y = pd.DataFrame(index = list(range(2000,2021)))
index_y['levelYearEnd'] = np.nan
for row in index_y.index:
y = row
index_temp=index[index["Year"] == y]
level_y_temp=list(index_temp['Adj Close'])
try:
l = 0
i = -1
while pd.isnull(level_y_temp[i]):
i = i-1
l = level_y_temp[i]
index_y.loc[row,'levelYearEnd'] = l
except IndexError:
pass
index_y['Benchmark Returns'] = index_y['levelYearEnd'].pct_change()
index_y.index = index_y.index - 1
index_y.rename({"Benchmark Returns" : "Benchmark Returns Y+1"}, inplace = True, axis = 1)
data = data.merge(index_y, left_on = 'Year', right_index = True)
### Dependant variable : RUSSELL Excess Returns Y+1
data['Excess Returns Y+1'] = data['Return Y+1'] - data['Benchmark Returns Y+1']
data = data[['Date','Entreprises'] + X_columns + ['Ticker','Year','Price Y','Price Y+1','Return Y+1','Excess Returns Y+1']]
return data, index
def get_russell3000_returns(data,
russell3000_path):
## Import and merge RUSSELL 3000 year-end levels
russell = pd.read_csv(russell3000_path)
russell['Date'] = pd.to_datetime(russell['Date'])
russell['Year'] = [d.year for d in russell['Date']]
russell_y = pd.DataFrame(index = list(range(2000,2021)))
russell_y['levelYearEnd'] = np.nan
for row in russell_y.index:
y = row
russell_temp=russell[russell["Year"] == y]
level_y_temp=list(russell_temp['Adj Close'])
try:
l = 0
i = -1
while pd.isnull(level_y_temp[i]):
i = i-1
l = level_y_temp[i]
russell_y.loc[row,'levelYearEnd'] = l
except IndexError:
pass
russell_y['RUSSELL 3000 Returns'] = russell_y['levelYearEnd'].pct_change()
russell_y.index = russell_y.index - 1
russell_y.rename({"RUSSELL 3000 Returns":"RUSSELL 3000 Returns Y+1"},inplace = True,axis = 1)
data = pd.merge(data,russell_y,left_on = 'Year',right_index=True)
### Dependant variable : RUSSELL Excess Returns Y+1
data['Excess Returns Y+1 (RUSSELL)'] = data['Return Y+1'] - data['RUSSELL 3000 Returns Y+1']
return data.drop(['levelYearEnd','Unnamed: 24','RUSSELL 3000 Returns Y+1'],axis = 1,inplace = True)
def get_eurostoxx600_returns(data,
eurostoxx600_path):
## Import and merge EUROSTOXX 600 year-end levels
eurostoxx600 = pd.read_csv(eurostoxx600_path)
eurostoxx600['Date'] = pd.to_datetime(eurostoxx600['Date'])
eurostoxx600['Year'] = [d.year for d in eurostoxx600['Date']]
eurostoxx600_y = pd.DataFrame(index = list(range(2000,2021)))
eurostoxx600_y['levelYearEnd'] = np.nan
for row in eurostoxx600_y.index:
y = row
eurostoxx600_temp = eurostoxx600[eurostoxx600["Year"] == y]
level_y_temp = list(eurostoxx600_temp['Adj Close'])
try:
l = 0
i = -1
while pd.isnull(level_y_temp[i]):
i = i-1
l = level_y_temp[i]
eurostoxx600_y.loc[row,'levelYearEnd'] = l
except IndexError:
pass
eurostoxx600_y['EUROSTOXX 600 Returns'] = eurostoxx600_y['levelYearEnd'].pct_change()
eurostoxx600_y.index = eurostoxx600_y.index-1
eurostoxx600_y.rename({"EUROSTOXX 600 Returns":"EUROSTOXX 600 Returns Y+1"},inplace = True,axis = 1)
data = pd.merge(data,eurostoxx600_y,left_on = 'Year',right_index=True)
data.drop('levelYearEnd',axis = 1,inplace = True)
### Dependant variable 2 : EUROSTOXX Excess Returns Y+1
data['Excess Returns Y+1 (EUROSTOXX)'] = data['Return Y+1'] - data['EUROSTOXX 600 Returns Y+1']
return data.drop(['levelYearEnd','Unnamed: 24','EUROSTOXX 3000 Returns Y+1'], axis = 1, inplace = True)
def get_label(excess_returns,
short_lim,
long_lim):
label = 0
if excess_returns < short_lim:
label = 0
elif excess_returns > long_lim:
label = 2
else:
label = 1
return label
def get_train_test(data,
y,
X_columns,
Y_label):
years = list(range(y-7,y))
dataset = data[data['Year'].isin([y])]
X_train = data[data['Year'].isin(years)][X_columns]
X_test = data[data['Year'].isin([y])][X_columns]
Y_train = data[data['Year'].isin(years)][Y_label]
Y_test = data[data['Year'].isin([y])][Y_label]
return dataset, X_train, X_test, Y_train, Y_test
def xgb_predict(xgb_params,
X_train,
X_test,
Y_train,
Y_test,
print_accuracy):
model = xgboost.XGBClassifier(**xgb_params, use_label_encoder = False, verbosity = 0)
model.fit(X_train,Y_train)
y_pred = model.predict(X_test)
predictions = [round(value) for value in y_pred]
pred_proba_short_list = model.predict_proba(X_test)[:,0]
pred_proba_long_list = model.predict_proba(X_test)[:,2]
accuracy = accuracy_score(Y_test, predictions)
if print_accuracy:
print("Model Accuracy : %.2f%%" % (accuracy * 100.0))
print("------------------------------------")
return predictions, pred_proba_short_list, pred_proba_long_list
def get_portfolio_weights(pred,
price0,
nb_short,
nb_long,
short_limit=10):
if pred==0:
if price0 < short_limit: # Stock not shorted if price at beginning of year below limit
return 0
else:
return (-1/nb_short)
elif pred==1:
return 0
else:
return (1/nb_long)
def get_portfolio_weights_wProba(pred,
price0,
sum_proba_short,
sum_proba_long,
val_proba_short,
val_proba_long,
short_limit=10):
if pred == 0:
if price0 < short_limit: # Stock not shorted if price at beginning of year below limit
return 0
else:
return (- val_proba_short / sum_proba_short)
elif pred == 1:
return 0
else:
return (val_proba_long / sum_proba_long)
def get_weight_col(year_dataset,
short_limit,
pred_proba_short_list,
pred_proba_long_list,
proba_weighted):
if proba_weighted:
year_dataset['Pred_proba_short'] = pred_proba_short_list
year_dataset['Pred_proba_long'] = pred_proba_long_list
df = year_dataset
df2 = df[df['Price Y'] > short_limit]
sum_proba_short = df2[df2['Predictions'] == 0]['Pred_proba_short'].sum()
sum_proba_long = df[df['Predictions'] == 2]['Pred_proba_long'].sum()
year_dataset['Weight'] = year_dataset.apply(lambda x: get_portfolio_weights_wProba(x['Predictions'],
x['Price Y'],
sum_proba_short,
sum_proba_long,
x['Pred_proba_short'],
x['Pred_proba_long'],
short_limit), axis=1)
else:
df = year_dataset
df2 = df[df['Price Y'] > short_limit]
nb_short = len(df2[df2['Predictions'] == 0])
nb_long = len(df[df['Predictions'] == 2])
year_dataset['Weight'] = year_dataset.apply(lambda x: get_portfolio_weights(x['Predictions'],
x['Price Y'],
nb_short,
nb_long,
short_limit), axis=1)
return year_dataset
def get_rf_rate(year,
path_datafolder = PATH_DATAFOLDER,
path_rf_rate = PATH_RF_RATE):
rf_rates = pd.read_csv(path_datafolder + path_rf_rate)
rf_rates['Date'] = pd.to_datetime(rf_rates['Date'])
rf_rates['Year'] = pd.DatetimeIndex(rf_rates['Date']).year
rf_year = rf_rates[rf_rates['Year'] == year]
return float(rf_year[rf_year['Date'] == rf_year['Date'].min()]['10y_FR_Treasury_Bond_Rate'])
``` |
{
"source": "jjcorporation/muspinB-py",
"score": 3
} |
#### File: jjcorporation/muspinB-py/stats.py
```python
from scipy.stats import lognorm
import numpy as np
def all_true(logical_list):
try:
logical_list[2]
return logical_list[0] and all_true(logical_list[1:])
except IndexError:
return logical_list[0] and logical_list[1]
class MarkovRenewalProcess():
""" Markov renewal process
- the transition matrix has entries [j][i] which is the probablity of next=j given current=i
- the residence times are log-normal distributed
"""
def __init__(self, states, tm=None, mu=None, sigma=None, m=None):
self.states = states
k = len(self.states)
# init the parameters for training
if tm == 'uniform' or tm is None:
tm = np.ones((k, k))
tm.flat[::k+1] = 0
self.__comx = tm
if sigma is None:
sigma = np.full((k, ), 10)
if mu is None:
mu = np.full((k, ), 1)
self.__mu = np.log(mu**2/np.sqrt(sigma**2 + mu**2))
self.__S2 = np.log(1 + sigma**2/mu**2) + self.__mu**2
@property
def states(self):
return self.__states
@property
def transition_matrix(self):
return self.__comx / self.__comx.sum(axis=0, keepdims=True)
@property
def residence_times(self):
# base the residence times upon the parameters mu and sigma
return {state: lognorm(s=self.s[self._ix[state]], loc=0, scale=self.scale[self._ix[state]]) for state in self.states}
@property
def sigma(self):
""" second moments for each of the states' residence times
based on self.n samples
"""
return [np.sqrt(rv.stats(moments='v')) for rv in self.residence_times.values()]
@property
def mu(self):
""" first moments for each of the states' residence times
"""
return [rv.stats(moments='m') for rv in self.residence_times.values()]
@property
def comx(self):
""" co-occurrence matrix
"""
return self.__comx
@states.setter
def states(self, states):
self.__states = list(states)
@property
def _ix(self):
return {s: ix for ix, s in enumerate(self.states)}
@property
def s(self):
return np.sqrt(self.__S2 - self.__mu ** 2)
@property
def scale(self):
return np.exp(self.__mu)
@property
def steady_state(self):
w, v = np.linalg.eig(self.transition_matrix)
ix = np.argmax(np.real(w))
steady_state = np.real(v[:, ix])
for ix, st in enumerate(self.residence_times.values()):
steady_state[ix] = steady_state[ix] * st.stats(moments='m') # steady_state[ix] = steady_state[ix] * self.mu[ix]
return steady_state / steady_state.sum()
def train(self, X):
""" X comes as a list of tuples (state, duration)
for a specific element that has no duration, it is not used to update the estimate
of the residence_times, but is used as an "end-state" (previous transition is taken
into account, not the next one)
"""
# residence times
surv_t = {
s: [] for s in self.states
}
# the number of observations we already had
m = self.__comx.sum(axis=0) # TODO: should this not be axis=0 ???
# get the current and next states together as x and y
for x, y in zip(X[:-1], X[1:]):
if x[1] is not None:
# only if the current state has a duration will we look at
# - current residence time
# - transition to next state
surv_t[x[0]].append(np.log(x[1]))
self.__comx[self._ix[y[0]], self._ix[x[0]]] += 1
# the number of observations after the update
n = self.__comx.sum(axis=0) # TODO: should this not be axis=0 ???
# update estimators of first two moments: mu and S2
for k, v in surv_t.items():
ix = self._ix[k]
# do not update if no samples are observed
# alternative, we may multiply
# the current estimator by alpha
# and the newly obtained estimator by (1 - alpha)
# this would allow to forget the past trials at a "constant" rate
# (not taking into account variance of the estimators)
if n[ix] - m[ix] > 0:
self.__mu[ix] = (self.__mu[ix] * m[ix] + np.array(v).sum()) / n[ix]
self.__S2[ix] = (self.__S2[ix] * m[ix] + (np.array(v) ** 2).sum()) / n[ix]
def log_likelihood(self, X, normalise=True):
""" compute log_likelihood for a list of (state, residence_time)
the last percept is particular
* if the next state is None, then the time of the one-to-last percept
is considered "interrupted" (censored)
* if only the residence time of the last percept is None,
only the transition is taken into consideration
* if both state and residence time are given, it is considered a fully valid
percept with veritable residence time
"""
L = 0
for x, y in zip(X[:-1], X[1:]):
if y[0] is not None:
L += np.log(self.residence_times[x[0]].pdf(x[1]))
ix = self._ix[x[0]], self._ix[y[0]]
L += np.log(self.transition_matrix[ix[1], ix[0]])
else: # censored data
L += np.log(1 - self.residence_times[x[0]].cdf(x[1]))
if X[-1][1] is not None:
L += np.log(self.residence_times[X[-1][0]].pdf(X[-1][1]))
if normalise:
observation_time = sum([x[1] for x in X if not x[1] is None])
L /= observation_time
return L
def sample_time(self, state):
return self.residence_times[state].rvs()
def transition(self, state):
return np.random.choice(self.states, p=self.transition_matrix[self._ix[state]])
def sample(self, t, initial_state=None):
steady_state = self.steady_state
t_ = 0
if initial_state is not None:
if initial_state in self.states:
s = initial_state
else:
raise ValueError('No state "{}"'.format(initial_state))
else:
s = np.random.choice(self.states, p=steady_state)
samples = []
tm = self.transition_matrix
st = self.residence_times
while t_ < t:
tau = st[s].rvs()
samples.append((s, t_, tau))
t_ += tau
s = np.random.choice(self.states, p=tm[:, self._ix[s]])
samples.append((s, t_, None))
return samples
if __name__ == '__main__':
states = ['coherent', 'transparent_left', 'transparent_right']
mrp_model = MarkovRenewalProcess(states, tm='uniform', mu=np.full((len(states), ), 3.), sigma=np.full((len(states), ), 1.))
for k, v in mrp_model.residence_times.items():
print('{}: mean={}, std={}'.format(k, v.stats(moments='m'), np.sqrt(v.stats(moments='v'))))
print('transition matrix = \n{}\n'.format(mrp_model.transition_matrix))
print('steady state vector = {}\n\n'.format(mrp_model.steady_state))
samples = [mrp_model.sample(100) for _ in range(10)]
# print(samples)
mrp = MarkovRenewalProcess(states)
mrp_prior = MarkovRenewalProcess(states)
for ix, sample in enumerate(samples):
print('training phase {i} on {n} extra samples'.format(i=ix+1, n = len(sample)))
mrp.train([(s[0], s[2]) for s in sample])
print('log_likelihood(true model) = {}'.format(
mrp_model.log_likelihood([(s[0], s[2]) for s in sample])))
print('log_likelihood(trained model) = {}'.format(
mrp.log_likelihood([(s[0], s[2]) for s in sample])))
print('log_likelihood(prior) = {}'.format(
mrp_prior.log_likelihood([(s[0], s[2]) for s in sample])))
for k, v in mrp.residence_times.items():
print('{}: mean={}, std={}'.format(k, v.stats(moments='m'), np.sqrt(v.stats(moments='v'))))
print('transition matrix = \n{}\n'.format(mrp.transition_matrix))
print('steady state vector = {}\n\n'.format(mrp.steady_state))
```
#### File: jjcorporation/muspinB-py/triggers.py
```python
from psychopy import parallel, core
from psychopy import logging
# add a logging level in between the EXP and INFO level
logging.addLevel(21, 'TRIG')
class Trigger():
trigger = dict(
acquisition = dict(
start = 10,
end = 11,
interrupt = 12
),
trial = dict(
start = 20,
consigne = 21,
feedback = 22,
end = 29
),
general = dict(
init = 0,
fixationcross = 30,
consigne = 34
),
condition = dict(
nAmb_nKp = 40, # 40 + 0x00
nAmb_Kp = 41, # 40 + 0x01
Amb_nKp = 42, # 40 + 0x10
Amb_Kp = 43, # 40 + 0x11
end = 49
),
keypress = dict(
nopercept = 80, # 80 + 0b000
left = 84, # 80 + 0b100
right = 81, # 80 + 0b001
up = 82, # 80 + 0b010
left_up = 86, # 80 + 0b110
up_right = 83, # 80 + 0b011
left_right = 85, # 80 + 0b101
left_up_right = 87, # 80 + 0b111
esc = 89
),
stimulus = dict(
transpL = 94, # 90 + 0b100
transpR = 91, # 90 + 0b001
coh = 92, # 90 + 0b010
amb = 99
),
eyelink = dict(
drift = 120,
calibration = 121
)
)
def __init__(self, port=None):
self.port = port
@property
def port(self):
return self.__port
@port.setter
def port(self, p):
if p is not None:
try:
self.__port = parallel.ParallelPort(address=p)
except FileNotFoundError:
self.__port = None
else:
self.__port = None
def send(self, triggertype, triggername, io=None):
value = self.trigger[triggertype][triggername]
logging.log('{}\t{}\t{}'.format(value, triggertype, triggername), level=logging.getLevel('TRIG'))
if not io is None:
io.sendMessageEvent(format(value), category='TRIGGER')
if self.port is not None:
sendTime = core.getTime()
self.port.setData(value)
while core.getTime() < sendTime + .004:
self.port.setData(0)
if __name__ == '__main__':
logging.LogFile(f='triggers.tsv', level=logging.getLevel('TRIG'))
trigger = Trigger()
trigger.send('acquisition', 'start')
trigger.send('acquisition', 'end')
``` |
{
"source": "jjcunningjam/socceraction",
"score": 3
} |
#### File: tests/datasets/download.py
```python
import os
import shutil
import ssl
import sys
from pathlib import Path
from urllib.parse import urlparse
from urllib.request import urlopen, urlretrieve
from zipfile import ZipFile, is_zipfile
import pandas as pd
try:
from tqdm import tqdm
except ImportError:
tqdm = None
import socceraction.atomic.spadl as atomicspadl
import socceraction.spadl as spadl
import socceraction.spadl.statsbomb as statsbomb
import socceraction.spadl.wyscout as wyscout
from socceraction.data.statsbomb import StatsBombLoader
from socceraction.data.wyscout import PublicWyscoutLoader
# optional: if you get a SSL CERTIFICATE_VERIFY_FAILED exception
ssl._create_default_https_context = ssl._create_unverified_context
_data_dir = os.path.dirname(__file__)
def download_statsbomb_data() -> None:
dataset_url = 'https://github.com/statsbomb/open-data/archive/master.zip'
tmp_datafolder = os.path.join(_data_dir, 'statsbomb', 'tmp')
raw_datafolder = os.path.join(_data_dir, 'statsbomb', 'raw')
for datafolder in [tmp_datafolder, raw_datafolder]:
if not os.path.exists(datafolder):
os.makedirs(datafolder, exist_ok=True)
statsbombzip = os.path.join(tmp_datafolder, 'statsbomb-open-data.zip')
with urlopen(dataset_url) as dl_file:
with open(statsbombzip, 'wb') as out_file:
out_file.write(dl_file.read())
with ZipFile(statsbombzip, 'r') as zipObj:
zipObj.extractall(tmp_datafolder)
shutil.rmtree(raw_datafolder)
Path(f'{tmp_datafolder}/open-data-master/data').rename(raw_datafolder)
shutil.rmtree(tmp_datafolder)
def convert_statsbomb_data() -> None:
seasons = {
3: '2018',
}
leagues = {
'FIFA World Cup': 'WorldCup',
}
spadl_datafolder = os.path.join(_data_dir, 'statsbomb')
free_open_data_remote = 'https://raw.githubusercontent.com/statsbomb/open-data/master/data/'
SBL = StatsBombLoader(root=free_open_data_remote, getter='remote')
# View all available competitions
df_competitions = SBL.competitions()
selected_competitions = df_competitions.competition_name.isin(leagues.keys())
df_selected_competitions = df_competitions.loc[selected_competitions]
for competition in df_selected_competitions.itertuples():
# Get games from all selected competition
games = SBL.games(competition.competition_id, competition.season_id)
if tqdm is not None:
games_verbose = tqdm(list(games.itertuples()), desc='Loading match data')
else:
games_verbose = games.itertuples()
teams, players = [], []
competition_id = leagues[competition.competition_name]
season_id = seasons[competition.season_id]
spadl_h5 = os.path.join(spadl_datafolder, f'spadl-{competition_id}-{season_id}.h5')
with pd.HDFStore(spadl_h5) as spadlstore:
spadlstore.put('actiontypes', spadl.actiontypes_df(), format='table')
spadlstore.put('results', spadl.results_df(), format='table')
spadlstore.put('bodyparts', spadl.bodyparts_df(), format='table')
for game in games_verbose:
# load data
teams.append(SBL.teams(game.game_id))
players.append(SBL.players(game.game_id))
events = SBL.events(game.game_id)
# convert data
spadlstore.put(
f'actions/game_{game.game_id}',
statsbomb.convert_to_actions(events, game.home_team_id),
format='table',
)
games.season_id = season_id
games.competition_id = competition_id
spadlstore.put('games', games)
spadlstore.put(
'teams',
pd.concat(teams).drop_duplicates('team_id').reset_index(drop=True),
)
spadlstore.put(
'players',
pd.concat(players).drop_duplicates('player_id').reset_index(drop=True),
)
def download_wyscout_data() -> None:
# https://figshare.com/collections/Soccer_match_event_dataset/4415000/5
dataset_urls = dict(
competitions='https://ndownloader.figshare.com/files/15073685',
teams='https://ndownloader.figshare.com/files/15073697',
players='https://ndownloader.figshare.com/files/15073721',
games='https://ndownloader.figshare.com/files/14464622',
events='https://ndownloader.figshare.com/files/14464685',
)
raw_datafolder = os.path.join(_data_dir, 'wyscout_public', 'raw')
if not os.path.exists(raw_datafolder):
os.makedirs(raw_datafolder, exist_ok=True)
# download and unzip Wyscout open data
for url in dataset_urls.values():
url_obj = urlopen(url).geturl()
path = Path(urlparse(url_obj).path)
file_name = os.path.join(raw_datafolder, path.name)
file_local, _ = urlretrieve(url_obj, file_name)
if is_zipfile(file_local):
with ZipFile(file_local) as zip_file:
zip_file.extractall(raw_datafolder)
def convert_wyscout_data() -> None:
seasons = {
10078: '2018',
}
leagues = {
28: 'WorldCup',
}
raw_datafolder = os.path.join(_data_dir, 'wyscout_public', 'raw')
spadl_datafolder = os.path.join(_data_dir, 'wyscout_public')
WYL = PublicWyscoutLoader(root=raw_datafolder)
# View all available competitions
df_competitions = WYL.competitions()
selected_competitions = df_competitions.competition_id.isin(leagues.keys())
df_selected_competitions = df_competitions.loc[selected_competitions]
for competition in df_selected_competitions.itertuples():
# Get games from all selected competition
games = WYL.games(competition.competition_id, competition.season_id)
if tqdm is not None:
games_verbose = tqdm(list(games.itertuples()), desc='Loading match data')
else:
games_verbose = games.itertuples()
teams, players = [], []
competition_id = leagues[competition.competition_id]
season_id = seasons[competition.season_id]
spadl_h5 = os.path.join(spadl_datafolder, f'spadl-{competition_id}-{season_id}.h5')
with pd.HDFStore(spadl_h5) as spadlstore:
spadlstore.put('actiontypes', spadl.actiontypes_df(), format='table')
spadlstore.put('results', spadl.results_df(), format='table')
spadlstore.put('bodyparts', spadl.bodyparts_df(), format='table')
for game in games_verbose:
# load data
teams.append(WYL.teams(game.game_id))
players.append(WYL.players(game.game_id))
events = WYL.events(game.game_id)
# convert data
spadlstore.put(
f'actions/game_{game.game_id}',
wyscout.convert_to_actions(events, game.home_team_id),
# format='table',
)
games.season_id = season_id
games.competition_id = competition_id
spadlstore.put('games', games)
spadlstore.put(
'teams',
pd.concat(teams).drop_duplicates('team_id').reset_index(drop=True),
)
spadlstore.put(
'players',
pd.concat(players).drop_duplicates('player_id').reset_index(drop=True),
)
def create_spadl(game_id: int, home_team_id: int) -> None:
spadl_datafolder = os.path.join(_data_dir, 'spadl')
if not os.path.exists(spadl_datafolder):
os.makedirs(spadl_datafolder, exist_ok=True)
# load events
free_open_data_remote = 'https://raw.githubusercontent.com/statsbomb/open-data/master/data/'
SBL = StatsBombLoader(root=free_open_data_remote, getter='remote')
events = SBL.events(game_id)
# convert to spadl
spadl_json = os.path.join(spadl_datafolder, 'spadl.json')
df_actions = statsbomb.convert_to_actions(events, home_team_id)
df_actions.head(n=200).to_json(spadl_json, orient='records')
# convert to atomic spadl
atomic_spadl_json = os.path.join(spadl_datafolder, 'atomic_spadl.json')
df_atomic_actions = atomicspadl.convert_to_atomic(df_actions)
df_atomic_actions.head(n=200).to_json(atomic_spadl_json, orient='records')
if __name__ == '__main__':
if len(sys.argv) == 1 or sys.argv[1] == 'statsbomb':
download_statsbomb_data()
if sys.argv[1] == 'convert-statsbomb':
convert_statsbomb_data()
if len(sys.argv) == 1 or sys.argv[1] == 'wyscout':
download_wyscout_data()
if sys.argv[1] == 'convert-wyscout':
convert_wyscout_data()
if len(sys.argv) == 1 or sys.argv[1] == 'spadl':
create_spadl(8657, 777)
``` |
{
"source": "jjd9/pydcol",
"score": 2
} |
#### File: pydcol/pydcol/Objective.py
```python
import numpy as np
from scipy.sparse import csr_matrix
from symengine import Lambdify
from sympy import Matrix, hessian
from typing import Union
# pydcol imports
from .SymUtils import fast_jac, fast_half_hess
class CustomObjective:
def __init__(self):
pass
def eval(self,arg):
pass
def jac(self,arg):
pass
def hess(self,arg):
pass
class Objective:
def __init__(self, parent, Obj):
self.N = parent.N
self.Ntilde = parent.Ntilde
self.h = parent.h
self._h = parent._h.copy()
self.colloc_method = parent.colloc_method
self.X_dim = parent.X_dim
self.U_dim = parent.U_dim
all_vars = parent.all_vars
mid_all_vars = parent.mid_all_vars
prev_all_vars = parent.prev_all_vars
if self.N != self.Ntilde:
self.obj_lambda = Lambdify(prev_all_vars+mid_all_vars+all_vars+[self.h], Obj, order='F')
# Gradient vector ("jac")
obj_jac = Matrix(fast_jac([Obj], prev_all_vars+all_vars + mid_all_vars)).T
self.obj_jac_lambda = Lambdify(prev_all_vars+mid_all_vars+all_vars+[self.h], obj_jac, order='F')
# hessian matrix ("hess")
obj_hess = Matrix(fast_half_hess(Obj, prev_all_vars+all_vars + mid_all_vars)).T
self.obj_hess_lambda = Lambdify(prev_all_vars+mid_all_vars+all_vars+[self.h], obj_hess, order='F')
else:
self._h = np.hstack((self._h[0],self._h))
self.obj_lambda = Lambdify(all_vars+[self.h], Obj, order='F')
# Gradient vector ("jac")
obj_jac = Matrix([Obj]).jacobian(all_vars)
self.obj_jac_lambda = Lambdify(all_vars+[self.h], obj_jac, order='F')
# hessian matrix ("hess")
obj_hess = hessian(Obj, all_vars)
self.obj_hess_lambda = Lambdify(all_vars+[self.h], obj_hess, order='F')
x0 = np.ones(self.Ntilde * (self.X_dim + self.U_dim))
self.hess_sparse_indices = self.hess(x0, return_sparse_indices=True)
self.hess_shape = (x0.size, x0.size)
self.hess_size = len(self.hess_sparse_indices[0])
self.hess_dict = dict()
for i in range(self.hess_size):
key = (self.hess_sparse_indices[0][i],self.hess_sparse_indices[1][i])
self.hess_dict[key] = i
# create callback for scipy
def eval(self, arg: np.array)->float:
"""
Evaluate objective function for given value of optimization variable.
Parameters
----------
arg -- optimization variables as 1-D numpy array.
Returns
-------
scalar objective value.
"""
if self.N != self.Ntilde:
V = arg[:self.N * (self.X_dim+self.U_dim)].reshape(self.N, self.X_dim+self.U_dim)
Vmid = arg[self.N * (self.X_dim+self.U_dim):].reshape(self.N - 1, self.X_dim+self.U_dim)
_in = np.hstack((V[:-1,:], Vmid, V[1:,:],self._h.reshape(-1,1)))
else:
_in = np.hstack((arg.reshape(self.Ntilde, self.X_dim+self.U_dim),self._h.reshape(-1,1)))
return self.obj_lambda(_in.T).sum()
def jac(self, arg: np.array)->np.array:
"""
Evaluate gradient vector of objective function for given value of optimization variable.
Parameters
----------
arg -- optimization variables as 1-D numpy array.
Returns
-------
gradient vector of object function as 1-D numpy array.
"""
if self.N != self.Ntilde:
V = arg[:self.N * (self.X_dim+self.U_dim)].reshape(self.N, self.X_dim+self.U_dim)
Vmid = arg[self.N * (self.X_dim+self.U_dim):].reshape(self.N - 1, self.X_dim+self.U_dim)
_in = np.hstack((V[:-1,:], Vmid, V[1:,:],self._h.reshape(-1,1)))
J = self.obj_jac_lambda(_in.T).squeeze()
SysDim = self.X_dim + self.U_dim
jac = np.zeros(self.Ntilde * SysDim)
for i in range(self.N-1):
jac[i*SysDim:(i+1)*SysDim+SysDim] += J[:SysDim*2,i]
jac[(i+self.N)*SysDim:(i+self.N)*SysDim+SysDim] += J[SysDim*2:,i]
else:
_in = np.hstack((arg.reshape(self.Ntilde, self.X_dim+self.U_dim),self._h.reshape(-1,1)))
jac = self.obj_jac_lambda(_in.T).squeeze().T.ravel()
return jac
def hess(self, arg: np.array, return_sparse_indices: bool = False)->Union[tuple, csr_matrix]:
"""
Evaluate gradient vector of objective function for given value of optimization variable.
Parameters
----------
arg -- optimization variables as 1-D numpy array.
return_sparse_indices -- if True return a tuple of the row, column indices of the non-zero entries of the hessian matrix. if False, return the actual hessian.
Returns
-------
hessian matrix of object function as a sparse numpy matrix (lil_matrix).
OR
tuple of (row,col) indices of non-zero elements of hessian matrix
"""
Sys_dim = self.X_dim + self.U_dim
Opt_dim = Sys_dim * self.Ntilde
if self.N != self.Ntilde:
V = arg[:self.N * (self.X_dim+self.U_dim)].reshape(self.N, self.X_dim+self.U_dim)
Vmid = arg[self.N * (self.X_dim+self.U_dim):].reshape(self.N - 1, self.X_dim+self.U_dim)
_in = np.hstack((V[:-1,:], Vmid, V[1:,:],self._h.reshape(-1,1)))
hess_block = self.obj_hess_lambda(_in.T) + 1e-9
# used for determining nonzero elements of hessian
if return_sparse_indices:
idx = set()
for i in range(self.N-1):
for j in range(2*Sys_dim):
for k in range(2*Sys_dim):
idx.add((i*Sys_dim+j, i*Sys_dim+k))
for j in range(Sys_dim):
for k in range(Sys_dim):
idx.add(((i + self.N)*Sys_dim+j, (i + self.N)*Sys_dim+k))
idx = np.array(list(idx))
return idx[:,0], idx[:,1]
else:
hess = np.zeros(self.hess_size, dtype=float)
for i in range(self.N-1):
Htemp = hess_block[:,:,i] + hess_block[:,:,i].T
for j in range(2*Sys_dim):
for k in range(2*Sys_dim):
hess[self.hess_dict[(i*Sys_dim+j, i*Sys_dim+k)]]+=Htemp[j,k]
for j in range(Sys_dim):
for k in range(Sys_dim):
hess[self.hess_dict[((i + self.N)*Sys_dim+j, (i + self.N)*Sys_dim+k)]]+=Htemp[2*Sys_dim+j,2*Sys_dim+k]
return csr_matrix((hess, self.hess_sparse_indices), shape = self.hess_shape)
else:
_in = np.hstack((arg.reshape(self.Ntilde, self.X_dim+self.U_dim),self._h.reshape(-1,1)))
hess_block = self.obj_hess_lambda(_in.T) + 1e-9
# used for determining nonzero elements of hessian
if return_sparse_indices:
rows = []
cols = []
for i in range(self.N):
for j in range(i*Sys_dim, i*Sys_dim + Sys_dim):
for k in range(i*Sys_dim, i*Sys_dim + Sys_dim):
rows.append(j)
cols.append(k)
return rows, cols
else:
return csr_matrix((hess_block.ravel(), self.hess_sparse_indices), shape = (Opt_dim, Opt_dim))
```
#### File: pydcol/pydcol/ProblemDefinition.py
```python
try:
import ipyopt
_ipyopt_imported = True
except:
_ipyopt_imported = False
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import minimize, NonlinearConstraint
from scipy.integrate import solve_ivp
from sympy import Matrix, Symbol, lambdify
from sympy.core.function import BadArgumentsError
# pydcol imports
from .Objective import Objective
from .EqualityConstraints import EqualityConstraints
from .CollocMethods import *
from .Solution import Solution
class CollocationProblem:
def __init__(self,
state_vars,
control_vars,
ode,
tspan,
X_start,
X_goal=None,
colloc_method=HERM,
custom_objective=None):
self.ode = ode
self.state_vars = state_vars
self.control_vars = control_vars
self.ode_fun = lambdify(self.state_vars+self.control_vars, Matrix(self.ode), 'numpy')
self.colloc_method = colloc_method
self.tspan = tspan
self.objective = custom_objective
self.X_start = X_start
self.X_goal = X_goal
# Get variable dimensions
self.N = self.tspan.size
self.Ntilde=self.tspan.size
self.X_dim = len(state_vars)
self.U_dim = len(control_vars)
self.all_vars = state_vars + control_vars
self.h = Symbol("h") # symbolic time step
self._h = self.tspan[1:] - self.tspan[:-1] # time steps
# Create a set of "prev" and "mid" variables for accessing values at previous time step
self.prev_all_vars = [Symbol(str(var)+"_prev") for var in self.all_vars]
self.prev_dict = {}
for i in range(len(self.all_vars)):
self.prev_dict[self.all_vars[i]] = self.prev_all_vars[i]
if self.colloc_method in MIDPOINT_METHODS:
self.mid_all_vars = [Symbol(str(var)+"_mid") for var in self.all_vars]
self.mid_dict = {}
for i in range(len(self.all_vars)):
self.mid_dict[self.all_vars[i]] = self.mid_all_vars[i]
else:
self.mid_all_vars = []
X = Matrix(state_vars)
U = Matrix(control_vars)
# Scalar Objective
if self.objective is None:
if self.colloc_method in [HERM]:
Obj = 0
for i in range(self.U_dim):
effort = self.control_vars[i]**2
Obj += (self.h/6.0) * (effort + 4.0 * effort.subs(self.mid_dict) + effort.subs(self.prev_dict))
elif self.colloc_method in [RADAU]:
Obj = 0
for i in range(self.U_dim):
effort = self.control_vars[i]**2
Obj += (self.h/4.0) * (3.0 * effort.subs(self.mid_dict) + effort)
else:
effort = self.h * U.multiply_elementwise(U)
Obj = np.sum(effort[:])
# Equality Constraints
C_eq = []
if colloc_method == TRAP:
# Trapezoid method
for i in range(self.X_dim):
C_eq += [state_vars[i] - state_vars[i].subs(self.prev_dict) - 0.5 * self.h * (ode[i] + ode[i].subs(self.prev_dict))]
elif colloc_method == EB:
# Euler Backward method
for i in range(self.X_dim):
C_eq += [state_vars[i] - state_vars[i].subs(self.prev_dict) - self.h * ode[i]]
elif colloc_method == EF:
# Euler Forward method
for i in range(self.X_dim):
C_eq += [state_vars[i] - state_vars[i].subs(self.prev_dict) - self.h * ode[i].subs(self.prev_dict)]
elif colloc_method == HERM:
# Hermite Simpson method
self.Ntilde=self.Ntilde*2-1 # actual number of node points due to addition of "mid" points
for i in range(self.X_dim):
C_eq+=[state_vars[i].subs(self.mid_dict) - 0.5 * (state_vars[i] + state_vars[i].subs(self.prev_dict)) - (self.h/8.0) * (ode[i].subs(self.prev_dict) - ode[i])]
for i in range(self.X_dim):
C_eq += [state_vars[i] - state_vars[i].subs(self.prev_dict) - (self.h/6.0) * (ode[i] + 4.0 * ode[i].subs(self.mid_dict) + ode[i].subs(self.prev_dict))]
elif colloc_method == RADAU:
# Radau 3rd order
self.Ntilde=self.Ntilde*2-1 # actual number of node points due to addition of "mid" points
for i in range(self.X_dim):
C_eq+=[state_vars[i].subs(self.mid_dict) - state_vars[i].subs(self.prev_dict)-5.0/12.0*self.h*ode[i].subs(self.mid_dict)+1.0/12.0*self.h*ode[i]] # intermediate point residue
for i in range(self.X_dim):
C_eq+=[state_vars[i] - state_vars[i].subs(self.prev_dict)-3.0/4.0*self.h*ode[i].subs(self.mid_dict)-1.0/4.0*self.h*ode[i]] # end point residue
# Compile objective and equality constraints
self.equality_constr = EqualityConstraints(self, Matrix(C_eq))
if self.objective is None:
self.objective = Objective(self, Obj)
def solve(self, x0: np.array = None, bounds: list = None, solver: str='scipy')->Solution:
"""
Solve the direct collocation problem as a nonlinear program.
Parameters
----------
x0 -- initial guess for solution, if not provided, an educated guess is based on initial/final state.
bounds -- list of [upper, lower] bound lists, one for each variable (order should match x0)
solver -- which optimizer to use (options: scipy, ipopt)
Returns
-------
pydcol.Solution containing solution and problem metadata
"""
self.is_solved = False
if x0 is None:
# Initialize optimization variables
if bounds is not None:
u_bounds = bounds[self.X_dim:]
u_mid = []
for ubnd in u_bounds:
if ubnd[0] is not None and ubnd[1] is not None:
u_mid += [(ubnd[0]+ubnd[1])/2.0]
elif ubnd[1] is not None:
u_mid += [ubnd[1]]
elif ubnd[0] is not None:
u_mid += [ubnd[0]]
else:
u_mid += [0.0]
else:
u_mid = [0.1] * self.U_dim
x0 = [self.X_start.tolist() + u_mid]
x0_mid = []
for i in range(self.N - 1):
if self.X_goal is not None:
xnew = self.X_start + (self.X_goal - self.X_start) * i / self.Ntilde
else:
xnew = self.X_start + i / self.Ntilde
x0.append(xnew.tolist() + u_mid)
if self.N != self.Ntilde:
x0_mid.append(0.5*(np.array(x0[-1]) + np.array(x0[-2])))
x0 = np.array(x0 + x0_mid).ravel()
if solver=='scipy':
_bounds = bounds * self.Ntilde
# Problem constraints
constr_eq = NonlinearConstraint(self.equality_constr.eval,
lb=0,
ub=0,
jac=self.equality_constr.jac,
hess=self.equality_constr.hess)
# Solve Problem
sol_opt = minimize(self.objective.eval,
x0,
method="trust-constr",
jac=self.objective.jac,
hess=self.objective.hess,
constraints=(constr_eq),
bounds=_bounds,
options={'sparse_jacobian': True})
# convert scipy solution to our format
self.sol_c = Solution(sol_opt, self.colloc_method, (self.N, self.Ntilde, self.X_dim, self.U_dim), self.tspan, solver)
self.is_solved = sol_opt.success
elif solver == "ipopt":
if not _ipyopt_imported:
raise(ImportError("Ipyopt could not be imported! Please use scipy solver."))
# setup variable bounds
nvar = self.Ntilde * len(bounds)
x_L = np.zeros(nvar)
x_U = np.zeros(nvar)
v_idx = 0
for i in range(self.Ntilde):
for b_pair in bounds:
if b_pair[0] is None:
x_L[v_idx] = -1e9
else:
x_L[v_idx] = b_pair[0]
if b_pair[1] is None:
x_U[v_idx] = 1e9
else:
x_U[v_idx] = b_pair[1]
v_idx += 1
# setup equality constraints
ncon = self.equality_constr.eval(x0).size
g_L = np.zeros((ncon,))
g_U = np.zeros((ncon,))
# finding out which entries of the constraint jacobian and problem hessian are allways
# nonzero.
jac_g_idx = self.equality_constr.jac(x0, return_sparse_indices=True)
lagrange = np.ones(ncon)
h_obj_idx = self.objective.hess(x0, return_sparse_indices=True)
h_con_idx = self.equality_constr.hess(x0, lagrange, return_sparse_indices=True)
# merge objective and constraint hessian indices
coords = set()
for i in range(len(h_obj_idx[0])):
coords.add((h_obj_idx[0][i], h_obj_idx[1][i]))
for i in range(len(h_con_idx[0])):
coords.add((h_con_idx[0][i], h_con_idx[1][i]))
coords = np.array(list(coords))
h_idx = (coords[:,0], coords[:,1])
def eval_grad_f(x, out):
out[()] = self.objective.jac(x).ravel()
return out
def eval_g(x, out):
out[()] = self.equality_constr.eval(x).ravel()
return out
def eval_jac_g(x, out):
out[()] = self.equality_constr.jac(x).data
return out
def eval_h(x, lagrange, obj_factor, out):
"""
Combined hessian for the problem.
"""
H = self.objective.hess(x) * (obj_factor) + self.equality_constr.hess(x, lagrange)
out[()] = H.data
return out
nlp = ipyopt.Problem(nvar, x_L, x_U,
ncon, g_L, g_U,
jac_g_idx, h_idx,
self.objective.eval, eval_grad_f,
eval_g, eval_jac_g, eval_h)
# nlp.set(print_level=0)
sol_x, obj, status = nlp.solve(x0)
# convert scipy solution to our format
self.sol_c = Solution(sol_x, self.colloc_method, (self.N, self.Ntilde, self.X_dim, self.U_dim), self.tspan, solver)
self.is_solved = (status == 0) or (status == 1) # solver either succeeded or converged to acceptable accuracy
else:
raise(BadArgumentsError("Error unsupported solver!"))
self.sol_c.obj = self.objective.eval(self.sol_c.opt_x)
print("Done")
if self.is_solved:
print("Success :-)")
else:
print("Failure :-(")
return self.sol_c
def evaluate(self, ivp_method: str='RK45'):
"""
Creates a plot comparing the direct collocation solution to an implicit IVP solver solution
generated by applying the U from the solution from the initial condition from t0 to tf.
Parameters
----------
ivp_method -- string representing ivp solution method to use
Returns
-------
None
"""
tspan = self.sol_c.t
X = self.sol_c.x.copy()
U = self.sol_c.u
def system_eqs(t, x_t):
U_t = self.sol_c.u_t(t)
return self.ode_fun(*x_t, *U_t).ravel()
eval_tspan = np.linspace(tspan[0],tspan[-1],100)
sol_ivp = solve_ivp(system_eqs, [tspan[0],tspan[-1]], self.X_start, method=ivp_method, t_eval=eval_tspan)
colors = ['k', 'g', 'b', 'r', 'c', 'm', 'y']
_, axs = plt.subplots(2, 1)
axs[0].set_title("Collocation Points vs. Integration Results")
for i in range(self.X_dim):
axs[0].plot(tspan, X[:,i],'o',color=colors[i],markersize=3)
axs[0].plot(sol_ivp.t, sol_ivp.y[i,:],color=colors[i])
axs[0].set_ylabel("State Variables")
axs[0].plot([], [],'o',color='k',label='Colloc solution')
axs[0].plot([], [],color='k',label='IVP solution')
axs[0].legend()
U_t = np.array(self.sol_c.u_t(sol_ivp.t)).T.reshape(-1, self.U_dim)
for j in range(self.U_dim):
axs[1].plot(tspan, U[:,j],'o',color=colors[j],markersize=3)
axs[1].plot(sol_ivp.t, U_t[:,j],color=colors[j])
axs[1].set_ylabel("Control Variables")
axs[1].set_xlabel("Time [s]")
plt.show()
``` |
{
"source": "jjd9/PythonPlaysSnake",
"score": 2
} |
#### File: PythonPlaysSnake/snakeNet_AC_CNN_v1/snakeTrain_CNN_AC.py
```python
import argparse
import numpy as np
from itertools import count
from collections import namedtuple
import cv2
import sys
import IPython
import matplotlib.pyplot as plt
import os
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Categorical
from snakeEnv import GridWorld
from collections import deque
import random
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter('runs/snakeNet_CNN_AC')
class ReplayBuffer(object):
def __init__(self, capacity):
self.buffer = deque(maxlen=capacity)
def push(self, state, action, reward, next_state, done):
state = np.expand_dims(state, 0)
next_state = np.expand_dims(next_state, 0)
self.buffer.append((state, action, reward,next_state, done))
def sample(self, batch_size):
state, action, reward, next_state, done = zip(*random.sample(self.buffer, batch_size))
return np.concatenate(state), action, reward, np.concatenate(next_state), done
def __len__(self):
return len(self.buffer)
# Snake game
env = GridWorld(grid_state=True)
env.seed(42)
torch.manual_seed(42)
class Policy(nn.Module):
"""
implements both actor and critic in one model
"""
def __init__(self):
super(Policy, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=2, stride=1, padding=1)
self.conv2 = nn.Conv2d(64, 128, kernel_size=3, stride=2)
self.conv3 = nn.Conv2d(128, 64, kernel_size=4, stride=2)
self.fc1 = nn.Linear(1024, 512)
# Actor layer
self.action_head = nn.Linear(512, 4)
# Critic layer
self.critic_head = nn.Linear(512, 1)
def forward(self, x):
x = x/255.0
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = self.conv3(x)
x = F.relu(x)
x = x.view(x.size(0), -1)
x = self.fc1(x)
x = F.relu(x)
# actor: choses action to take from state s_t
# by returning probability of each action
action_probs = F.softmax(self.action_head(x), dim=-1)
# critic, judges current state value
state_val = self.critic_head(x)
return action_probs, state_val
device = torch.device("cpu")#"cuda:0" if torch.cuda.is_available() else "cpu")
print("Training on: ", device)
save_net_path = "SnakeNet_CNN_AC"
# save_net_path = "drive/My Drive/SnakeNet_CNN_AC"
model = Policy()
if os.path.exists(save_net_path):
model.load_state_dict(torch.load(save_net_path))
model.eval()
model.to(device)
temp_optimizer = optim.Adam(model.parameters(), lr=7e-4)
eps = np.finfo(np.float32).eps.item()
def select_action(state):
with torch.no_grad():
state = torch.from_numpy(state.copy()).float().unsqueeze(0).to(device)
action_probs, _ = model(state)
m = Categorical(action_probs)
action = m.sample().item()
reverse_list = [1, 0, 3, 2]
step = env.snake.action_map[action]
if np.all(env.snake.body[1,:]==env.snake.body[0,:]+step):
action = reverse_list[action]
return action
def finish_episode(batch):
"""
Training code. Calculates actor and critic loss and performs backprop.
"""
gamma = 0.99
state, action, reward, next_state, done = batch
state = torch.FloatTensor(np.float32(state)).to(device)
next_state = torch.FloatTensor(np.float32(next_state)).to(device)
action = torch.LongTensor(action).reshape(-1,1).to(device)
reward = torch.FloatTensor(reward).reshape(-1,1).to(device)
done = torch.FloatTensor(done).reshape(-1,1).to(device)
probs, values = model(state)
_, next_values = model(next_state)
expected_values = reward + gamma * next_values * (1 - done)
# critic loss
value_loss = (expected_values - values).pow(2).mean()
# actor loss
advantage = torch.clamp(expected_values - values, 0, np.inf)
policy_loss = (-torch.log(probs.gather(1,action))*advantage.detach()).mean()
loss = 10.0*value_loss + policy_loss
# perform backprop
temp_optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 0.5)
temp_optimizer.step()
# reset rewards and action buffer
loss_val = loss.item()
return loss_val
def main():
running_reward = 0
render = False
log_interval = 10
# setup replay buffer
batch_size = 32
max_buffer_size = 100000
initial_buffer_size = 10000
replay_buffer = ReplayBuffer(max_buffer_size)
# used for frame multiplication
# rot = 1
act_90 = [3, 2, 0, 1]
# rot = 2
act_180 = [1, 0, 3, 2]
# rot = 3
act_270 = [2, 3, 1, 0]
# flip
act_flip = [1, 0, 2, 3]
rotate_action_list = [act_90, act_180, act_270]
# max_radius = 5
# dilation = 1
num_frames = 0
# run inifinitely many episodes
for i_episode in count(1):
# if i_episode % 2000 == 0:
# max_radius += dilation
# reset environment and episode reward
state = env.reset()#radius=np.random.randint(1,max_radius))
ep_reward = 0
# for each episode, only run 9999 steps so that we don't
# infinite loop while learning
for t in range(1, 10000):
if t % 500 == 0:
env.food = env.randomPosition()
env.update_state()
state = env.state
# select action from policy
action = select_action(state)
# take the action
next_state, reward, done = env.step(action)
reward = np.sign(reward)
if render:
env.render()
replay_buffer.push(state.copy(), int(action), float(reward), next_state.copy(), float(done))
ep_reward += reward
# perform training
if len(replay_buffer) >= initial_buffer_size:
loss_val = finish_episode(replay_buffer.sample(batch_size))
else:
loss_val = 0.0
num_frames+=1
if done:
break
# multiply state by 7 through flipping / rotating frame
for flip in [False,True]:
for rot in range(1,4):
if flip:
f_state = np.flip(state.reshape(20,20,3), 1).reshape(3,20,20)
f_next_state = np.flip(next_state.reshape(20,20,3),1).reshape(3,20,20)
else:
f_state = state.copy()
f_next_state = next_state.copy()
rotated_state = np.rot90(f_state.reshape(20,20,3), rot).reshape(3,20,20) # state gets rotated
rotated_next_state = np.rot90(f_next_state.reshape(20,20,3), rot).reshape(3,20,20) # state gets rotated
if flip:
f_action = act_flip[action]
else:
f_action = int(action)
rotated_action = rotate_action_list[rot-1][f_action] # action gets rotated
replay_buffer.push(rotated_state.copy(), int(rotated_action), float(reward), rotated_next_state.copy(), float(done))
num_frames+=1
# update state for next iteration
state = next_state.copy()
# update cumulative reward
running_reward = 0.05 * ep_reward + (1 - 0.05) * running_reward
# record average reward and loss
writer.add_scalar('Training loss', loss_val, i_episode)
writer.add_scalar('Average reward', running_reward, i_episode)
# log results
if i_episode % log_interval == 0:
print('Episode {}\tLast reward: {:.2f}\tAverage reward: {:.2f}\tFrames: {}'.format(
i_episode, ep_reward, running_reward, num_frames))
# save model every so often
if i_episode % 100 == 0:
print("Saving model")
torch.save(model.state_dict(), save_net_path)
# check if we have "solved" the problem
if running_reward > 100:
print("Solved! Running reward is now {} and "
"the last episode runs to {} time steps!".format(running_reward, t))
break
if __name__ == '__main__':
try:
main()
finally:
# save final model
print("Saving model")
torch.save(model.state_dict(), save_net_path)
```
#### File: PythonPlaysSnake/snakeNet_AC_v2/snakeTest_AC.py
```python
import argparse
import numpy as np
from itertools import count
from collections import namedtuple
import cv2
import sys
import IPython
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Categorical
from snakeEnv import GridWorld
# Snake game
env = GridWorld()
SavedAction = namedtuple('SavedAction', ['log_prob', 'value'])
class Policy(nn.Module):
"""
implements both actor and critic in one model
"""
def __init__(self):
super(Policy, self).__init__()
self.affine1 = nn.Linear(22, 128)
# actor's layer
self.action_head = nn.Linear(128, 4)
# critic's layer
self.value_head = nn.Linear(128, 1)
# action & reward buffer
self.saved_actions = []
self.rewards = []
self.dones = []
def forward(self, x):
"""
forward of both actor and critic
"""
x = F.relu(self.affine1(x))
# actor: choses action to take from state s_t
# by returning probability of each action
action_prob = F.softmax(self.action_head(x), dim=-1)
# critic: evaluates being in the state s_t
state_values = self.value_head(x)
# return values for both actor and critic as a tuple of 2 values:
# 1. a list with the probability of each action over the action space
# 2. the value from state s_t
return action_prob, state_values
model = Policy()
model.load_state_dict(torch.load("SnakeNet_AC"))
model.eval()
def select_action(state):
state = torch.from_numpy(state).float()
probs, state_value = model(state)
# take most probable action
action = torch.argmax(probs)
# the action to take
return action.item()
def main(render):
running_reward = 0
log_interval = 100
# run inifinitely many episodes
for i_episode in range(10000):
# reset environment and episode reward
env.reset()
state = env.state
ep_reward = 0
board = env.render()
fps = 10
size = (board.shape[0],board.shape[1])
out = cv2.VideoWriter("actor_critic_snake_v1.avi",cv2.VideoWriter_fourcc(*'DIVX'), fps, size)
# for each episode, only run 999 steps so that we don't
# infinite loop while learning
done = False
moves= 0
while not done:
moves+=1
# writing to a image array
out.write(board)
# select action from policy
action = select_action(state)
# take the action
state, reward, done = env.step(action)
if render:
board = env.render()
ep_reward += reward
if done:
break
cv2.destroyAllWindows()
out.release()
if ep_reward > 60:
break
# update cumulative reward
running_reward = 0.05 * ep_reward + (1 - 0.05) * running_reward
# log results
if i_episode % log_interval == 0:
print('Episode {}\tLast reward: {:.2f}\tAverage reward: {:.2f}'.format(
i_episode, ep_reward, running_reward))
if __name__ == '__main__':
render = True
main(render)
cv2.destroyAllWindows()
```
#### File: PythonPlaysSnake/snakeNet_CNN_DQN/snakeTrain_CNN_DQN.py
```python
import argparse
import numpy as np
from itertools import count
from collections import namedtuple
import cv2
import sys
import IPython
import matplotlib.pyplot as plt
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Categorical
from snakeEnv import GridWorld
from collections import deque
import random
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter('runs/snakeNet_CNN_DQN', flush_secs=60)
class ReplayBuffer(object):
def __init__(self, capacity):
self.buffer = deque(maxlen=capacity)
def push(self, state, action, reward, next_state, done):
state = np.expand_dims(state, 0)
next_state = np.expand_dims(next_state, 0)
self.buffer.append((state, action, reward,next_state, done))
def sample(self, batch_size):
state, action, reward, next_state, done = zip(*random.sample(self.buffer, batch_size))
return np.concatenate(state), action, reward, np.concatenate(next_state), done
def __len__(self):
return len(self.buffer)
# Snake game
env = GridWorld(grid_state=True)
env.seed(42)
torch.manual_seed(42)
class Policy(nn.Module):
"""
implements both actor and critic in one model
"""
def __init__(self):
super(Policy, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1)
self.conv2 = nn.Conv2d(32, 64, kernel_size=5, stride=2)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=2)
self.fc1 = nn.Linear(576, 512)
# Q layer
self.action_head = nn.Linear(512, 4)
def forward(self, x):
x = x/255.0
x = self.conv1(x)
x = F.leaky_relu(x, 0.1)
x = self.conv2(x)
x = F.leaky_relu(x, 0.1)
x = self.conv3(x)
x = F.leaky_relu(x, 0.1)
x = x.view(x.size(0), -1)
x = self.fc1(x)
x = F.leaky_relu(x, 0.1)
# actor: choses action to take from state s_t
# by returning probability of each action
action_vals = self.action_head(x)
return action_vals
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Training on: ", device)
temp_model = Policy()
temp_model.load_state_dict(torch.load("SnakeNet_CNN_DQN.pt"))
temp_model.eval()
temp_model.to(device)
target_model = Policy()
target_model.load_state_dict(torch.load("SnakeNet_CNN_DQN.pt"))
target_model.eval()
target_model.to(device)
temp_optimizer = optim.Adam(temp_model.parameters(), lr=7e-4)
eps = np.finfo(np.float32).eps.item()
def select_action(state, ucb):
with torch.no_grad():
state = torch.from_numpy(state.copy()).float().unsqueeze(0)
action_vals = temp_model(state.to(device)) + ucb.to(device)
action = torch.argmax(action_vals)
return action.item()
def finish_episode(batch):
"""
Training code. Calculates actor and critic loss and performs backprop.
"""
gamma = 0.99
state, action, reward, next_state, done = batch
state = torch.FloatTensor(np.float32(state))
next_state = torch.FloatTensor(np.float32(next_state))
action = torch.LongTensor(action).reshape(-1,1)
reward = torch.FloatTensor(reward).reshape(-1,1)
done = torch.FloatTensor(done).reshape(-1,1)
values = temp_model(state.to(device)).gather(1,action.to(device))
next_values = target_model(next_state.to(device)).max(1).values.reshape(-1,1).detach()
expected_values = reward.to(device) + gamma * next_values * (1 - done.to(device))
loss = (values - expected_values).pow(2).mean()
# perform backprop
temp_optimizer.zero_grad()
loss.backward()
temp_optimizer.step()
# reset rewards and action buffer
loss_val = loss.item()
return loss_val
def main():
running_reward = 0
render = False
log_interval = 10
plotting = False
# Exploration settings
N_visits = torch.ones((4,))
c = 10.0 # exploration coefficient
min_c = 1.0
c_decay = 0.999
number_of_frames = 1
# setup replay buffer
batch_size = 32
max_buffer_size = 100000
initial_buffer_size = 10000
replay_buffer = ReplayBuffer(max_buffer_size)
# used for frame multiplication
# rot = 1
act_90 = [3, 2, 0, 1]
# rot = 2
act_180 = [1, 0, 3, 2]
# rot = 3
act_270 = [2, 3, 1, 0]
# flip
act_flip = [1, 0, 2, 3]
rotate_action_list = [act_90, act_180, act_270]
# updating target model every so many episodes
update_target_freq = 5
losses=[]
avg_rewards=[]
if plotting:
plt.ion()
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
line0, = ax1.plot([],[])
line1, = ax2.plot([],[])
ax1.set_ylabel("Losses")
ax2.set_ylabel("Average Rewards")
ax1.grid()
ax2.grid()
# run inifinitely many episodes
for i_episode in count(1):
# reset environment and episode reward
state = env.reset()
ep_reward = 0
c = max(min_c, c_decay*c)
# for each episode, only run 9999 steps so that we don't
# infinite loop while learning
for t in range(1, 10000):
if t % 200 == 0 and ep_reward == 0:
env.food = np.array(env.randomPosition())
env.update_state()
state = env.state
# select action from policy
c = 0.0
ucb = c*torch.sqrt(np.log(number_of_frames)/N_visits)
action = select_action(state, ucb)
# update ucb parameters
N_visits[action] += 1
number_of_frames += 1
# take the action
next_state, reward, done = env.step(action)
reward = np.sign(reward)
if render:
env.render()
replay_buffer.push(state.copy(), int(action), float(reward), next_state.copy(), float(done))
ep_reward += reward
# perform training
if len(replay_buffer) >= initial_buffer_size:
loss_val = finish_episode(replay_buffer.sample(batch_size))
else:
loss_val = 0.0
if done:
break
# multiply state by 7 through flipping / rotating frame
for flip in [False,True]:
for rot in range(1,4):
if flip:
f_state = np.flip(state.reshape(20,20,3), 1).reshape(3,20,20)
f_next_state = np.flip(next_state.reshape(20,20,3),1).reshape(3,20,20)
else:
f_state = state.copy()
f_next_state = next_state.copy()
rotated_state = np.rot90(f_state.reshape(20,20,3), rot).reshape(3,20,20) # state gets rotated
rotated_next_state = np.rot90(f_next_state.reshape(20,20,3), rot).reshape(3,20,20) # state gets rotated
if flip:
f_action = act_flip[action]
else:
f_action = int(action)
rotated_action = rotate_action_list[rot-1][f_action] # action gets rotated
replay_buffer.push(rotated_state.copy(), int(rotated_action), float(reward), rotated_next_state.copy(), float(done))
# update state for next iteration
state = next_state.copy()
# update target model
if len(replay_buffer) >= initial_buffer_size:
if i_episode % update_target_freq == 0:
# copy temp model parameters into target model
target_model.load_state_dict(temp_model.state_dict())
target_model.eval()
# update cumulative reward
running_reward = 0.05 * ep_reward + (1 - 0.05) * running_reward
# record average reward and loss
avg_rewards.append(running_reward)
if loss_val == 0 and len(losses)>0:
losses.append(losses[-1])
else:
losses.append(loss_val)
writer.add_scalar('Training loss', loss_val, i_episode)
writer.add_scalar('Average reward', running_reward, i_episode)
# log results
if i_episode % log_interval == 0:
print('Episode {}\tLast reward: {:.2f}\tAverage reward: {:.2f}'.format(
i_episode, ep_reward, running_reward))
print(N_visits)
if plotting:
line0.set_ydata(losses)
line1.set_ydata(avg_rewards)
line0.set_xdata(np.arange(len(losses)))
line1.set_xdata(np.arange(len(avg_rewards)))
if len(losses) > 0:
ax1.set_xlim(0,len(losses))
ax2.set_xlim(0,len(losses))
ax1.set_ylim(np.min(losses), np.max(losses))
ax2.set_ylim(np.min(avg_rewards), np.max(avg_rewards))
fig.canvas.draw()
plt.pause(0.1)
# save model every so often
if i_episode % 1000 == 0:
print("Saving model")
torch.save(target_model.state_dict(), "SnakeNet_CNN_DQN.pt")
# check if we have "solved" the problem
if running_reward > 100:
print("Solved! Running reward is now {} and "
"the last episode runs to {} time steps!".format(running_reward, t))
break
# save final model
print("Saving model")
torch.save(target_model.state_dict(), "SnakeNet_CNN_DQN.pt")
if __name__ == '__main__':
try:
main()
finally:
# save final model
print("Saving model")
torch.save(target_model.state_dict(), "SnakeNet_CNN_DQN.pt")
``` |
{
"source": "jjdabr/forecastNet",
"score": 3
} |
#### File: forecastNet/Pytorch/evaluate.py
```python
import numpy as np
import torch
from dataHelpers import format_input
from calculateError import calculate_error
def evaluate(fcstnet, test_x, test_y, return_lists=False):
"""
Calculate various error metrics on a test dataset
:param fcstnet: A forecastNet object defined by the class in forecastNet.py
:param test_x: Input test data in the form [encoder_seq_length, n_batches, input_dim]
:param test_y: target data in the form [encoder_seq_length, n_batches, input_dim]
:return: mase: Mean absolute scaled error
:return: smape: Symmetric absolute percentage error
:return: nrmse: Normalised root mean squared error
"""
fcstnet.model.eval()
# Load model parameters
checkpoint = torch.load(fcstnet.save_file, map_location=fcstnet.device)
fcstnet.model.load_state_dict(checkpoint['model_state_dict'])
fcstnet.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
with torch.no_grad():
if type(test_x) is np.ndarray:
test_x = torch.from_numpy(test_x).type(torch.FloatTensor)
if type(test_y) is np.ndarray:
test_y = torch.from_numpy(test_y).type(torch.FloatTensor)
# Format the inputs
test_x = format_input(test_x)
# Send to CPU/GPU
test_x = test_x.to(fcstnet.device)
test_y = test_y.to(fcstnet.device)
# Number of batch samples
n_samples = test_x.shape[0]
# Inference
y_pred_list = []
# Compute outputs for a mixture density network output
if fcstnet.model_type == 'dense' or fcstnet.model_type == 'conv':
n_forecasts = 20
for i in range(n_forecasts):
y_pred, mu, sigma = fcstnet.model(test_x, test_y, is_training=False)
y_pred_list.append(y_pred)
y_pred = torch.mean(torch.stack(y_pred_list), dim=0)
# Compute outputs for a linear output
elif fcstnet.model_type == 'dense2' or fcstnet.model_type == 'conv2':
y_pred = fcstnet.model(test_x, test_y, is_training=False)
mase_list = []
smape_list = []
nrmse_list = []
for i in range(n_samples):
mase, se, smape, nrmse = calculate_error(y_pred[:, i, :].cpu().numpy(), test_y[:, i, :].cpu().numpy())
mase_list.append(mase)
smape_list.append(smape)
nrmse_list.append(nrmse)
# writer.close()
mase = np.mean(mase_list)
smape = np.mean(smape_list)
nrmse = np.mean(nrmse_list)
if return_lists:
return np.ndarray.flatten(np.array(mase_list)), np.ndarray.flatten(np.array(smape_list)), np.ndarray.flatten(
np.array(nrmse_list))
else:
return mase, smape, nrmse
``` |
{
"source": "jjdcorke/MeshGraphNets",
"score": 3
} |
#### File: MeshGraphNets/DataManagement/clean.py
```python
import os
import json
import sys
import shutil
def perform_clean():
save_dir = os.path.join(os.path.dirname(__file__), 'test2')
items = os.listdir(save_dir)
for item in items:
if item.endswith('.json'):
os.remove(os.path.join(save_dir, item))
items = [item for item in items if not item.endswith('.json')]
successes = 0
failures = 0
for item in items:
trajectory_dir = os.path.join(save_dir, item)
trajectory_items = os.listdir(trajectory_dir)
with open(os.path.join(trajectory_dir, 'conf.json'), 'r') as f:
config = json.load(f)
required_num = config['end_frame'] + 3
if len(trajectory_items) != required_num:
print(item, 'failed')
failures += 1
# delete the failed directory
shutil.rmtree(trajectory_dir)
else:
successes += 1
print('Total successes:', successes)
print('Total failures:', failures)
def main():
perform_clean()
if __name__ == "__main__":
main()
```
#### File: MeshGraphNets/Models/train_cfd.py
```python
import os
import pickle
import argparse
from tqdm import tqdm
import numpy as np
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
import common
import core_model
import cfd_model
from dataset import load_dataset_train
import datetime
gpus = tf.config.experimental.list_physical_devices('GPU')
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
print(e)
def frame_to_graph(frame):
"""Builds input graph."""
# construct graph nodes
node_type = tf.one_hot(frame['node_type'][:, 0], common.NodeType.SIZE)
node_features = tf.concat([frame['velocity'], node_type], axis=-1)
# construct graph edges
senders, receivers = common.triangles_to_edges(frame['cells'])
relative_mesh_pos = (tf.gather(frame['mesh_pos'], senders) -
tf.gather(frame['mesh_pos'], receivers))
edge_features = tf.concat([
relative_mesh_pos,
tf.norm(relative_mesh_pos, axis=-1, keepdims=True)], axis=-1)
del frame['cells']
return node_features, edge_features, senders, receivers, frame
def build_model(model, optimizer, dataset, checkpoint=None):
"""Initialize the model"""
node_features, edge_features, senders, receivers, frame = next(iter(dataset))
graph = core_model.MultiGraph(node_features, edge_sets=[core_model.EdgeSet(edge_features, senders, receivers)])
# call the model once to process all input shapes
model.loss(graph, frame)
# get the number of trainable parameters
total = 0
for var in model.trainable_weights:
total += np.prod(var.shape)
print(f'Total trainable parameters: {total}')
if checkpoint:
opt_weights = np.load(f'{checkpoint}_optimizer.npy', allow_pickle=True)
dummy_grads = [tf.zeros_like(w) for w in model.trainable_weights]
optimizer.apply_gradients(zip(dummy_grads, model.trainable_weights))
# only now set the weights of the optimizer and model
optimizer.set_weights(opt_weights)
model.load_weights(checkpoint, by_name=True)
def train(data_path=os.path.join(os.path.dirname(__file__), 'data', 'cylinder_flow'),num_steps=10000000, checkpoint = None):
dataset = load_dataset_train(
path=data_path,
split='train',
fields=['velocity'],
add_history=False,
noise_scale=0.02,
noise_gamma=1.0
)
dataset = dataset.map(frame_to_graph, num_parallel_calls=8)
dataset = dataset.prefetch(16)
model = core_model.EncodeProcessDecode(
output_dims=2,
embed_dims=128,
num_layers=3,
num_iterations=15,
num_edge_types=1
)
model = cfd_model.CFDModel(model)
lr = tf.keras.optimizers.schedules.ExponentialDecay(1e-4, decay_steps=num_steps // 2, decay_rate=0.1)
optimizer = Adam(learning_rate=lr)
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
train_log_dir = 'Visualization/logs/train/' + current_time
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
# build the model
build_model(model, optimizer, dataset, checkpoint = checkpoint)
#@<EMAIL>(jit_compile=True)
@tf.function(experimental_relax_shapes=True)
def warmup(graph, frame):
loss = model.loss(graph, frame)
return loss
#@tf.function(jit_compile=True)
@tf.function(experimental_relax_shapes=True)
def train_step(graph, frame):
with tf.GradientTape() as tape:
loss = model.loss(graph, frame)
grads = tape.gradient(loss, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
return loss
dataset_iter = iter(dataset)
train_loop = tqdm(range(num_steps))
moving_loss = 0
for s in train_loop:
node_features, edge_features, senders, receivers, frame = next(dataset_iter)
graph = core_model.MultiGraph(node_features, edge_sets=[core_model.EdgeSet(edge_features, senders, receivers)])
if s < 1000:
loss = warmup(graph, frame)
else:
loss = train_step(graph, frame)
moving_loss = 0.98 * moving_loss + 0.02 * loss
if s%500 == 0:
with train_summary_writer.as_default():
tf.summary.scalar('loss',loss,step = s) #s for training session
train_loop.set_description(f'Step {s}/{num_steps}, Loss {moving_loss:.5f}')
if s != 0 and s % 50000 == 0:
filename = f'weights-step{s:07d}-loss{moving_loss:.5f}.hdf5'
model.save_weights(os.path.join(os.path.dirname(__file__), 'checkpoints_cfd_long', filename))
np.save(os.path.join(os.path.dirname(__file__), 'checkpoints_cfd_long', f'{filename}_optimizer.npy'), optimizer.get_weights())
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--checkpoint", "-c", help="Path to checkpoint file used to resume training")
parser.add_argument("data_path", help="Path to dataset")
parser.add_argument("num_steps", type=int, help="Number of itterations to train (default :1e6)")
args = parser.parse_args()
train(args.data_path, num_steps=args.num_steps, checkpoint=args.checkpoint)
if __name__ == '__main__':
main()
```
#### File: MeshGraphNets/Visualization/plot_cfd.py
```python
import os
import pickle
import argparse
from pathlib import Path
import numpy as np
from matplotlib import animation
import matplotlib.pyplot as plt
from matplotlib import tri as mtri
def plot_cfd_color(data, filename):
figure =plt.figure(figsize = (12,7))
axis = figure.add_subplot(211)
axis2 = figure.add_subplot(212)
skip = 1
num_steps = data[0]['true_velocity'].shape[0]
num_frames = len(data) * num_steps // skip
# compute bounds
bounds = []
for trajectory in data:
bb_min = trajectory['true_velocity'].min(axis=(0, 1))
bb_max = trajectory['true_velocity'].max(axis=(0, 1))
bounds.append((bb_min, bb_max))
def animate(frame):
step = (frame*skip) % num_steps
traj = (frame*skip) // num_steps
axis.cla()
axis.set_aspect('equal')
axis.set_axis_off()
vmin, vmax = bounds[traj]
pos = data[traj]['mesh_pos'][step]
faces = data[traj]['cells'][step]
velocity = data[traj]['pred_velocity'][step]
triang = mtri.Triangulation(pos[:, 0], pos[:, 1], faces)
axis.tripcolor(triang, velocity[:, 0], vmin=vmin[0], vmax=vmax[0])
axis.triplot(triang, 'ko-', ms=0.5, lw=0.3)
axis.set_title("Predicted")
step = (frame*skip) % num_steps
traj = (frame*skip) // num_steps
axis2.cla()
axis2.set_aspect('equal')
axis2.set_axis_off()
vmin, vmax = bounds[traj]
pos = data[traj]['mesh_pos'][step]
faces = data[traj]['cells'][step]
velocity = data[traj]['true_velocity'][step]
triang = mtri.Triangulation(pos[:, 0], pos[:, 1], faces)
axis2.tripcolor(triang, velocity[:, 0], vmin=vmin[0], vmax=vmax[0])
axis2.triplot(triang, 'ko-', ms=0.5, lw=0.3)
axis2.set_title("Ground Truth")
figure.suptitle(f"Time step: {frame}")
return figure,
ani = animation.FuncAnimation(figure, animate, frames=num_frames, interval=30)
ani.save(filename)
def plot_cfd_quiver(data, filename):
figure =plt.figure(figsize = (12,7))
axis = figure.add_subplot(211)
axis2 = figure.add_subplot(212)
skip = 1
num_steps = data[0]['true_velocity'].shape[0]
num_frames = len(data) * num_steps // skip
# compute bounds
bounds = []
for trajectory in data:
bb_min = trajectory['true_velocity'].min(axis=(0, 1))
bb_max = trajectory['true_velocity'].max(axis=(0, 1))
bounds.append((bb_min, bb_max))
def animate(frame):
step = (frame*skip) % num_steps
traj = (frame*skip) // num_steps
axis.cla()
axis.set_aspect('equal')
axis.set_axis_off()
vmin, vmax = bounds[traj]
pos = data[traj]['mesh_pos'][step]
faces = data[traj]['cells'][step]
velocity = data[traj]['pred_velocity'][step]
triang = mtri.Triangulation(pos[:, 0], pos[:, 1], faces)
axis.quiver(pos[:, 0], pos[:, 1], velocity[:, 0], velocity[:, 1])
axis.triplot(triang, 'ko-', ms=0.5, lw=0.3)
axis.set_title("Predicted")
step = (frame*skip) % num_steps
traj = (frame*skip) // num_steps
axis2.cla()
axis2.set_aspect('equal')
axis2.set_axis_off()
vmin, vmax = bounds[traj]
pos = data[traj]['mesh_pos'][step]
faces = data[traj]['cells'][step]
velocity = data[traj]['true_velocity'][step]
triang = mtri.Triangulation(pos[:, 0], pos[:, 1], faces)
axis2.quiver(pos[:, 0], pos[:, 1], velocity[:, 0], velocity[:, 1])
axis2.triplot(triang, 'ko-', ms=0.5, lw=0.3)
axis2.set_title("Ground Truth")
figure.suptitle(f"Time step: {frame}")
return figure,
ani = animation.FuncAnimation(figure, animate, frames=num_frames, interval=30)
# plt.show()
ani.save(filename)
def generate_all():
results_path = os.path.join(os.path.dirname(__file__), '..', 'models', 'results', 'cfd', 'og_long-step4200000-loss0.01691.hdf5')
output_path = os.path.join(os.path.dirname(__file__), 'animations', 'cfd', 'og_long-step4200000-loss0.01691.hdf5')
Path(output_path).mkdir(parents=True, exist_ok=True)
for i in range(10, 100):
with open(os.path.join(results_path, f'{i:03d}.eval'), 'rb') as f:
data = pickle.load(f)
plot_cfd_color([data], os.path.join(output_path, f'c{i:03d}.mp4'))
plot_cfd_quiver([data], os.path.join(output_path, f'q{i:03d}.mp4'))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("datafile")
parser.add_argument("-o", "--output", help="output file")
args = parser.parse_args()
if (args.output == None):
args.output = "out.mp4"
with open(args.datafile, "rb") as f:
data = pickle.load(f)
plot_cfd(data, args.output)
if __name__ == "__main__":
generate_all()
# main()
``` |
{
"source": "jjdejong/zenbot",
"score": 2
} |
#### File: genetic_algo/tests/test_integration.py
```python
import random
from subprocess import CalledProcessError
from unittest.mock import patch
from main import main
def myoutput(cmdline):
print(cmdline)
if random.random()<0.9:
return str(random.random()),str(random.random())
else:
raise CalledProcessError('a','b')
def test_integration():
with patch('evaluation.runzen',myoutput) as cmd:
with patch('evaluation.subprocess.check_output',myoutput):
main('gdax.BTC-ETH',120)
``` |
{
"source": "jjdelc/django-external-links",
"score": 2
} |
#### File: django-external-links/external_links/tests.py
```python
from urllib import quote, urlencode
from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
from django.template import Template, Context
from external_links.models import LinkClick
from external_links.templatetags.external_link_tags import ExternalLink
DESTINATION = 'http://example.com/?param=val¶m2=val2'
class ExternalLinkTest(TestCase):
"""
Test External link
"""
def test_view(self):
clicks_count = LinkClick.objects.filter(link=DESTINATION).count()
client = Client()
external_url = reverse('external_link')
client.get(external_url, {'link': DESTINATION})
clicks_new_count = LinkClick.objects.filter(link=DESTINATION).count()
self.assertEqual(clicks_new_count - clicks_count, 1)
def test_ttag(self):
ctx = Context()
template = Template('{%% load external_link_tags %%}'
'{%% external "%s" %%}' % DESTINATION)
external_url = reverse('external_link')
params = urlencode({'link': DESTINATION})
self.assertEqual(template.render(ctx), external_url + '?' + params)
def test_blocktag(self):
external_link = ExternalLink([])
base = 'link1: <a href="%(link1)s" title="">hey</a>, <a href="%(link2)s">hoho</a> wee'
original_text = base % {
'link1': DESTINATION,
'link2': DESTINATION
}
external_url = reverse('external_link')
params = urlencode({'link': DESTINATION})
final_dest = external_url + '?' + params
final_text = base % {
'link1': final_dest,
'link2': final_dest,
}
self.assertEqual(final_text,
external_link.replace_links(original_text))
``` |
{
"source": "jjdelvalle/Merit-Order-Effect",
"score": 2
} |
#### File: Merit-Order-Effect/moepy/retrieval.py
```python
__all__ = ['query_API', 'dict_col_2_cols', 'clean_nested_dict_cols', 'set_dt_idx', 'create_df_dt_rng', 'clean_df_dts',
'retrieve_stream_df', 'check_streams', 'retrieve_streams_df', 'parse_A44_response', 'retreive_DAM_prices',
'parse_A75_response', 'retrieve_production']
# Cell
import json
import numpy as np
import pandas as pd
import os
import requests
import xmltodict
from datetime import date
from warnings import warn
from itertools import product
from dotenv import load_dotenv
from entsoe import EntsoePandasClient, EntsoeRawClient
# Cell
def query_API(start_date:str, end_date:str, stream:str, time_group='30m'):
"""
'Query API' makes the call to Electric Insights and returns the JSON response
Parameters:
start_date: Start date for data given as a string in the form '%Y-%m-%d'
end_date: End date for data given as a string in the form '%Y-%m-%d'
stream: One of 'prices_ahead', 'prices_ahead', 'prices', 'temperatures' or 'emissions'
time_group: One of '30m', '1h', '1d' or '7d'. The default is '30m'
"""
# Checking stream is an EI endpoint
possible_streams = ['prices_ahead', 'prices', 'temperatures', 'emissions', 'generation-mix']
assert stream in possible_streams, f"Stream must be one of {''.join([stream+', ' for stream in possible_streams])[:-2]}"
# Checking time_group will be accepted by API
possible_time_groups = ['30m', '1h', '1d', '7d']
assert time_group in possible_time_groups, f"Time group must be one of {''.join([time_group+', ' for time_group in possible_time_groups])[:-2]}"
# Formatting dates
format_dt = lambda dt: date.strftime(dt, '%Y-%m-%d') if isinstance(dt, date) else dt
start_date = format_dt(start_date)
end_date = format_dt(end_date)
# Running query and parsing response
response = requests.get(f'http://drax-production.herokuapp.com/api/1/{stream}?date_from={start_date}&date_to={end_date}&group_by={time_group}')
r_json = response.json()
return r_json
# Cell
def dict_col_2_cols(df:pd.DataFrame, value_col='value'):
"""Checks the `value_col`, if it contains dictionaries these are transformed into new columns which then replace it"""
## Checks the value col is found in the dataframe
if value_col not in df.columns:
return df
if isinstance(df.loc[0, value_col], dict):
df_values = pd.DataFrame(df[value_col].to_dict()).T
df[df_values.columns] = df_values
df = df.drop(columns=[value_col])
return df
# Cell
def clean_nested_dict_cols(df):
"""Unpacks columns contining nested dictionaries"""
# Calculating columns that are still dictionaries
s_types = df.iloc[0].apply(lambda val: type(val))
cols_with_dicts = s_types[s_types == dict].index
while len(cols_with_dicts) > 0:
for col_with_dicts in cols_with_dicts:
# Extracting dataframes from dictionary columns
df = dict_col_2_cols(df, col_with_dicts)
# Recalculating columns that are still dictionaries
s_types = df.iloc[0].apply(lambda val: type(val))
cols_with_dicts = s_types[s_types == dict].index
return df
# Cell
def set_dt_idx(df:pd.DataFrame, idx_name='local_datetime'):
"""
Converts the start datetime to UK local time, then sets it as the index and removes the original datetime columns
"""
idx_dt = pd.DatetimeIndex(pd.to_datetime(df['start'], utc=True)).tz_convert('Europe/London')
idx_dt.name = idx_name
df.index = idx_dt
df = df.drop(columns=['start', 'end'])
return df
def create_df_dt_rng(start_date, end_date, freq='30T', tz='Europe/London', dt_str_template='%Y-%m-%d'):
"""
Creates a dataframe mapping between local datetimes and electricity market dates/settlement periods
"""
# Creating localised datetime index
s_dt_rng = pd.date_range(start_date, end_date, freq=freq, tz=tz)
s_dt_SP_count = pd.Series(0, index=s_dt_rng).resample('D').count()
# Creating SP column
SPs = []
for num_SPs in list(s_dt_SP_count):
SPs += list(range(1, num_SPs+1))
# Creating datetime dataframe
df_dt_rng = pd.DataFrame(index=s_dt_rng)
df_dt_rng.index.name = 'local_datetime'
# Adding query call cols
df_dt_rng['SP'] = SPs
df_dt_rng['date'] = df_dt_rng.index.strftime(dt_str_template)
return df_dt_rng
def clean_df_dts(df):
"""Cleans the datetime index of the passed DataFrame"""
df = set_dt_idx(df)
df = df[~df.index.duplicated()]
df_dt_rng = create_df_dt_rng(df.index.min(), df.index.max())
df = df.reindex(df_dt_rng.index)
df['SP'] = df_dt_rng['SP'] # Adding settlement period designation
return df
# Cell
def retrieve_stream_df(start_date:str, end_date:str, stream:str, time_group='30m', renaming_dict={}):
"""
Makes the call to Electric Insights and parses the response into a dataframe which is returned
Parameters:
start_date: Start date for data given as a string in the form '%Y-%m-%d'
end_date: End date for data given as a string in the form '%Y-%m-%d'
stream: One of 'prices_ahead', 'prices_ahead', 'prices', 'temperatures' or 'emissions'
time_group: One of '30m', '1h', '1d' or '7d'. The default is '30m'
renaming_dict: Mapping from old to new column names
"""
# Calling data and parsing into dataframe
r_json = query_API(start_date, end_date, stream, time_group)
df = pd.DataFrame.from_dict(r_json)
# Handling entrys which are dictionarys
df = clean_nested_dict_cols(df)
# Setting index as localised datetime, reindexing with all intervals and adding SP
df = clean_df_dts(df)
# Renaming value col
if 'value' in df.columns:
df = df.rename(columns={'value':stream})
if 'referenceOnly' in df.columns:
df = df.drop(columns=['referenceOnly'])
df = df.rename(columns=renaming_dict)
return df
# Cell
def check_streams(streams='*'):
"""
Checks that the streams given are a list containing only possible streams, or is all streams - '*'.
"""
possible_streams = ['prices_ahead', 'prices', 'temperatures', 'emissions', 'generation-mix']
if isinstance(streams, list):
unrecognised_streams = list(set(streams) - set(possible_streams))
if len(unrecognised_streams) == 0:
return streams
else:
unrecognised_streams_2_print = ''.join(["'"+stream+"', " for stream in unrecognised_streams])[:-2]
raise ValueError(f"Streams {unrecognised_streams_2_print} could not be recognised, must be one of: {', '.join(possible_streams)}")
elif streams=='*':
return possible_streams
else:
raise ValueError(f"Streams could not be recognised, must be one of: {', '.join(possible_streams)}")
# Cell
def retrieve_streams_df(start_date:str, end_date:str, streams='*', time_group='30m', renaming_dict={}):
"""
Makes the calls to Electric Insights for the given streams and parses the responses into a dataframe which is returned
Parameters:
start_date: Start date for data given as a string in the form '%Y-%m-%d'
end_date: End date for data given as a string in the form '%Y-%m-%d'
streams: Contains 'prices_ahead', 'prices_ahead', 'prices', 'temperatures' or 'emissions', or is given as all, '*'
time_group: One of '30m', '1h', '1d' or '7d'. The default is '30m'
"""
df = pd.DataFrame()
streams = check_streams(streams)
for stream in streams:
df_stream = retrieve_stream_df(start_date, end_date, stream, renaming_dict=renaming_dict)
df[df_stream.columns] = df_stream
return df
# Cell
def parse_A44_response(r, freq='H', tz='UTC'):
"""Extracts the price time-series"""
s_price = pd.Series(dtype=float)
parsed_r = xmltodict.parse(r.text)
for timeseries in parsed_r['Publication_MarketDocument']['TimeSeries']:
dt_rng = pd.date_range(timeseries['Period']['timeInterval']['start'], timeseries['Period']['timeInterval']['end'], freq=freq, tz=tz)[:-1]
s_dt_price = pd.DataFrame(timeseries['Period']['Point'])['price.amount'].astype(float)
s_dt_price.index = dt_rng
s_price = s_price.append(s_dt_price)
assert s_price.index.duplicated().sum() == 0, 'There are duplicate date indexes'
return s_price
# Cell
def retreive_DAM_prices(dt_pairs, domain='10Y1001A1001A63L'):
"""Retrieves and collates the day-ahead prices for the specified date ranges"""
params = {
'documentType': 'A44',
'in_Domain': domain,
'out_Domain': domain
}
s_price = pd.Series(dtype=float)
for dt_pair in track(dt_pairs):
start = pd.Timestamp(dt_pair[0], tz='UTC')
end = pd.Timestamp(dt_pair[1], tz='UTC')
try:
r = client._base_request(params=params, start=start, end=end)
s_price_dt_rng = parse_A44_response(r)
s_price = s_price.append(s_price_dt_rng)
except:
warn(f"{start.strftime('%Y-%m-%d')} - {end.strftime('%Y-%m-%d')} failed")
return s_price
# Cell
def parse_A75_response(r, freq='15T', tz='UTC', warn_on_failure=False):
"""Extracts the production data by fuel-type from the JSON response"""
psr_code_to_type = {
'A03': 'Mixed',
'A04': 'Generation',
'A05': 'Load',
'B01': 'Biomass',
'B02': 'Fossil Brown coal/Lignite',
'B03': 'Fossil Coal-derived gas',
'B04': 'Fossil Gas',
'B05': 'Fossil Hard coal',
'B06': 'Fossil Oil',
'B07': 'Fossil Oil shale',
'B08': 'Fossil Peat',
'B09': 'Geothermal',
'B10': 'Hydro Pumped Storage',
'B11': 'Hydro Run-of-river and poundage',
'B12': 'Hydro Water Reservoir',
'B13': 'Marine',
'B14': 'Nuclear',
'B15': 'Other renewable',
'B16': 'Solar',
'B17': 'Waste',
'B18': 'Wind Offshore',
'B19': 'Wind Onshore',
'B20': 'Other',
'B21': 'AC Link',
'B22': 'DC Link',
'B23': 'Substation',
'B24': 'Transformer'
}
parsed_r = xmltodict.parse(r.text)
columns = [f'B{str(fuel_idx).zfill(2)}' for fuel_idx in np.arange(1, 24)]
index = pd.date_range(
parsed_r['GL_MarketDocument']['time_Period.timeInterval']['start'],
parsed_r['GL_MarketDocument']['time_Period.timeInterval']['end'],
freq=freq, tz=tz)[:-1]
df_production = pd.DataFrame(dtype=float, columns=columns, index=index)
for timeseries in parsed_r['GL_MarketDocument']['TimeSeries']:
try:
psr_type = timeseries['MktPSRType']['psrType']
dt_rng = pd.date_range(timeseries['Period']['timeInterval']['start'], timeseries['Period']['timeInterval']['end'], freq=freq, tz=tz)[:-1]
s_psr_type = pd.DataFrame(timeseries['Period']['Point'])['quantity'].astype(float)
s_psr_type.index = dt_rng
df_production[psr_type] = s_psr_type
except:
if warn_on_failure == True:
warn(f"{timeseries['Period']['timeInterval']['start']}-{timeseries['Period']['timeInterval']['start']} failed for {psr_type}")
assert df_production.index.duplicated().sum() == 0, 'There are duplicate date indexes'
df_production = df_production.dropna(how='all').dropna(how='all', axis=1)
df_production = df_production.rename(columns=psr_code_to_type)
return df_production
def retrieve_production(dt_pairs, domain='10Y1001A1001A63L', warn_on_failure=False):
"""Retrieves and collates the production data for the specified date ranges"""
params = {
'documentType': 'A75',
'processType': 'A16',
'in_Domain': domain
}
df_production = pd.DataFrame(dtype=float)
for dt_pair in track(dt_pairs):
start = pd.Timestamp(dt_pair[0], tz='UTC')
end = pd.Timestamp(dt_pair[1], tz='UTC')
try:
r = client._base_request(params=params, start=start, end=end)
df_production_dt_rng = parse_A75_response(r, warn_on_failure=warn_on_failure)
df_production = df_production.append(df_production_dt_rng)
except:
if warn_on_failure == True:
warn(f"{start.strftime('%Y-%m-%d')} - {end.strftime('%Y-%m-%d')} failed")
return df_production
``` |
{
"source": "jjdelvalle/transchikel",
"score": 3
} |
#### File: transchikel/app/prep_dataset.py
```python
from os import listdir
from os.path import isfile, join
import re
from math import floor
from math import ceil
from pydub import AudioSegment
import pandas as pd
DATA_PATH='../data/'
def main():
grid_files = [f for f in listdir(DATA_PATH) if isfile(join(DATA_PATH, f)) and f.endswith('.TextGrid')]
if len(grid_files) == 0:
return 1
time_regex = re.compile(r"[0-9\.]+")
file_dict = []
for grid_file in grid_files:
with open(join(DATA_PATH, grid_file), 'r', encoding='cp1252') as f:
audio_fname = join(DATA_PATH, grid_file[:grid_file.index('.')] + '.wav')
audio_file = AudioSegment.from_wav(audio_fname)
audio_file = audio_file.set_frame_rate(16000).set_channels(1)
init_t = None
end_t = None
text = ""
for line in f:
if 'xmin' in line:
init_t = float(time_regex.search(line).group(0)) * 1000
elif 'xmax' in line:
end_t = float(time_regex.search(line).group(0)) * 1000
elif 'text = ' in line and init_t is not None and end_t is not None:
text = line[(line.index('=') + 3):-2]
if len(text) < 5:
continue
temp_audio = audio_file[floor(init_t):ceil(end_t)]
filename = f"{grid_file[:(grid_file.index('.'))]}_{floor(init_t // 1000)}.wav"
temp_audio.export("../data/processed/" + filename, format="wav")
file_dict.append({'file': "../data/processed/" + filename, 'text': text})
init_t = None
end_t = None
else:
init_t = None
end_t = None
text = None
df = pd.DataFrame(file_dict, columns=['file', 'text'])
df['text'] = df['text'].str.strip(', "')
print(df)
df.to_csv("../data/processed/kaqchikel_dataset.csv")
if __name__ == '__main__':
main()
``` |
{
"source": "JJdeRidder/deid",
"score": 2
} |
#### File: deid/data/__init__.py
```python
from deid.utils import get_installdir
from deid.logger import bot
import os
data_base = os.path.dirname(os.path.abspath(__file__))
def get_dataset(dataset=None):
"""get_dataset will return some data provided by the application,
based on a user-provided label. In the future, we can add https endpoints
to retrieve online datasets.
"""
data_base = get_installdir()
valid_datasets = {
"dicom-cookies": os.path.join(data_base, "data", "dicom-cookies"),
"animals": os.path.join(data_base, "data", "animals"),
"humans": os.path.join(data_base, "data", "humans"),
"ultrasounds": os.path.join(data_base, "data", "ultrasounds"),
}
if dataset is not None:
# In case the user gave an extension
dataset = os.path.splitext(dataset)[0].lower()
if dataset in valid_datasets:
return valid_datasets[dataset]
bot.info("Valid datasets include: %s" % (",".join(list(valid_datasets.keys()))))
```
#### File: deid/dicom/header.py
```python
from deid.logger import bot
from deid.utils import read_json
from deid.config import DeidRecipe
from pydicom import read_file
from deid.dicom.utils import save_dicom
from deid.dicom.tags import remove_sequences, get_private
from deid.dicom.groups import extract_values_list, extract_fields_list
from deid.dicom.fields import get_fields
from deid.dicom.parser import DicomParser
import os
here = os.path.dirname(os.path.abspath(__file__))
def get_identifiers(
dicom_files, force=True, config=None, strip_sequences=False, remove_private=False
):
"""extract all identifiers from a dicom image.
This function returns a lookup by file name, where each value indexed
includes a dictionary of nested fields (indexed by nested tag).
Parameters
==========
dicom_files: the dicom file(s) to extract from
force: force reading the file (default True)
config: if None, uses default in provided module folder
strip_sequences: if True, remove all sequences
remove_private: remove private tags
"""
if config is None:
config = "%s/config.json" % here
if not os.path.exists(config):
bot.error("Cannot find config %s, exiting" % (config))
config = read_json(config, ordered_dict=True)["get"]
if not isinstance(dicom_files, list):
dicom_files = [dicom_files]
bot.debug("Extracting identifiers for %s dicom" % len(dicom_files))
lookup = dict()
# Parse each dicom file
for dicom_file in dicom_files:
parser = DicomParser(dicom_file, force=force)
lookup[parser.dicom_file] = parser.get_fields()
return lookup
def remove_private_identifiers(
dicom_files, save=True, overwrite=False, output_folder=None, force=True
):
"""remove_private_identifiers is a wrapper for the
simple call to dicom.remove_private_tags, it simply
reads in the files for the user and saves accordingly
"""
updated_files = []
if not isinstance(dicom_files, list):
dicom_files = [dicom_files]
for dicom_file in dicom_files:
dicom = read_file(dicom_file, force=force)
dicom.remove_private_tags()
dicom_name = os.path.basename(dicom_file)
bot.debug("Removed private identifiers for %s" % dicom_name)
if save:
dicom = save_dicom(
dicom=dicom,
dicom_file=dicom_file,
output_folder=output_folder,
overwrite=overwrite,
)
updated_files.append(dicom)
return updated_files
def replace_identifiers(
dicom_files,
ids=None,
deid=None,
save=False,
overwrite=False,
output_folder=None,
force=True,
config=None,
strip_sequences=False,
remove_private=False,
):
"""replace identifiers using pydicom, can be slow when writing
and saving new files. If you want to replace sequences, they need
to be extracted with get_identifiers and expand_sequences to True.
"""
if not isinstance(dicom_files, list):
dicom_files = [dicom_files]
# Warn the user that we use the default deid recipe
if not deid:
bot.warning("No deid specification provided, will use defaults.")
# ids (a lookup) is not required
ids = ids or {}
# Parse through dicom files, update headers, and save
updated_files = []
for dicom_file in dicom_files:
parser = DicomParser(dicom_file, force=force, config=config, recipe=deid)
# If a custom lookup was provided, update the parser
if parser.dicom_file in ids:
parser.lookup.update(ids[parser.dicom_file])
parser.parse(strip_sequences=strip_sequences, remove_private=remove_private)
# Save to file, otherwise return updated objects
if save is True:
ds = save_dicom(
dicom=parser.dicom,
dicom_file=parser.dicom_file,
output_folder=output_folder,
overwrite=overwrite,
)
updated_files.append(ds)
else:
updated_files.append(parser.dicom)
return updated_files
```
#### File: deid/tests/test_dicom_utils.py
```python
import unittest
import tempfile
import shutil
import json
import os
from deid.utils import get_installdir
from deid.data import get_dataset
global generate_uid
class TestDicomUtils(unittest.TestCase):
def setUp(self):
self.pwd = get_installdir()
self.deid = os.path.abspath("%s/../examples/deid/deid.dicom" % self.pwd)
self.dataset = get_dataset("dicom-cookies")
self.tmpdir = tempfile.mkdtemp()
print("\n######################START######################")
def tearDown(self):
shutil.rmtree(self.tmpdir)
print("\n######################END########################")
def test_get_files(self):
print("Test test_get_files")
print("Case 1: Test get files from dataset")
from deid.dicom import get_files
from deid.config import load_deid
found = 0
for dicom_file in get_files(self.dataset):
found += 1
expected = 7
self.assertEqual(found, expected)
print("Case 2: Ask for files from empty folder")
found = 0
for dicom_file in get_files(self.tmpdir):
found += 1
expected = 0
self.assertEqual(found, expected)
def test_get_files_as_list(self):
print("Test test_get_files_as_list")
print("Case 1: Test get files from dataset")
from deid.dicom import get_files
from deid.config import load_deid
dicom_files = list(get_files(self.dataset))
found = len(dicom_files)
expected = 7
self.assertEqual(found, expected)
print("Case 2: Ask for files from empty folder")
dicom_files = list(get_files(self.tmpdir))
found = len(dicom_files)
expected = 0
self.assertEqual(found, expected)
def test_jitter_timestamp(self):
from deid.dicom.actions import jitter_timestamp
dicom = get_dicom(self.dataset)
print("Testing test_jitter_timestamp")
print("Case 1: Testing jitter_timestamp with DICOM Date (DA)")
dicom.StudyDate = "20131210"
dicom.data_element("StudyDate").VR = "DA"
jitter_timestamp(dicom, "StudyDate", 10)
expected = "20131220"
self.assertEqual(dicom.StudyDate, expected)
print("Case 2: Testing with DICOM timestamp (DT)")
dicom.AcquisitionDateTime = "20131210081530"
dicom.data_element("AcquisitionDateTime").VR = "DT"
jitter_timestamp(dicom, "AcquisitionDateTime", 10)
expected = "20131220081530.000000"
self.assertEqual(dicom.AcquisitionDateTime, expected)
print("Case 3: Testing with non-standard DICOM date (DA)")
dicom.StudyDate = "2013/12/10"
dicom.data_element("StudyDate").VR = "DA"
jitter_timestamp(dicom, "StudyDate", 10)
expected = "20131220"
self.assertEqual(dicom.StudyDate, expected)
print("Case 4: Testing negative jitter value")
dicom.StudyDate = "20131210"
jitter_timestamp(dicom, "StudyDate", -5)
expected = "20131205"
self.assertEqual(dicom.StudyDate, expected)
print("Case 5: Testing with empty field")
dicom.StudyDate = expected = ""
jitter_timestamp(dicom, "StudyDate", 10)
self.assertEqual(dicom.StudyDate, expected)
print("Case 6: Testing with nonexistent field")
del dicom.StudyDate
jitter_timestamp(dicom, "StudyDate", 10)
self.assertTrue("StudyDate" not in dicom)
def get_dicom(dataset):
"""helper function to load a dicom"""
from deid.dicom import get_files
from pydicom import read_file
dicom_files = get_files(dataset)
return read_file(next(dicom_files))
if __name__ == "__main__":
unittest.main()
```
#### File: deid/tests/Xtest_dicom_header.py
```python
import unittest
import tempfile
import shutil
import json
import os
from deid.utils import get_installdir
from deid.data import get_dataset
class TestDicomHeader(unittest.TestCase):
def setUp(self):
self.pwd = get_installdir()
self.deid = os.path.abspath("%s/../examples/deid/deid.dicom" % self.pwd)
self.dataset = get_dataset("dicom-cookies")
self.tmpdir = tempfile.mkdtemp()
print("\n######################START######################")
def tearDown(self):
shutil.rmtree(self.tmpdir)
print("\n######################END########################")
def test_get_fields(self):
print("Case 1: Test get fields from dataset")
from deid.dicom.header import get_fields
dicom = get_dicom(self.dataset)
fields = get_fields(dicom)
self.assertEqual(len(fields), 28)
self.assertTrue("PatientID" in fields)
def test_get_identifiers(self):
print("Testing deid.dicom get_identifiers")
from deid.dicom import get_identifiers
dicom_files = get_dicom(self.dataset, return_dir=True)
ids = get_identifiers(dicom_files)
self.assertTrue(len(ids) == 1)
self.assertTrue(isinstance(ids, dict))
self.assertEqual(len(ids["cookie-47"]), 7)
def test_replace_identifiers(self):
print("Testing deid.dicom replace_identifiers")
from deid.dicom import replace_identifiers
from deid.dicom import get_identifiers
from pydicom import read_file
dicom_files = get_dicom(self.dataset, return_dir=True)
ids = get_identifiers(dicom_files)
# Before blanking, 28 fields don't have blanks
notblanked = read_file(dicom_files[0])
notblanked_fields = [
x for x in notblanked.dir() if notblanked.get(x) != ""
] # 28
self.assertTrue(len(notblanked_fields) == 28)
updated_files = replace_identifiers(dicom_files, ids, output_folder=self.tmpdir)
# After replacing only 9 don't have blanks
blanked = read_file(updated_files[0])
blanked_fields = [x for x in blanked.dir() if blanked.get(x) != ""]
self.assertTrue(len(blanked_fields) == 9)
def get_dicom(dataset, return_dir=False):
"""helper function to load a dicom"""
from deid.dicom import get_files
from pydicom import read_file
dicom_files = get_files(dataset)
if return_dir:
return list(dicom_files)
return read_file(next(dicom_files))
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "JJdeVries/aioasuswrt",
"score": 2
} |
#### File: aioasuswrt/tests/test_connection.py
```python
import pytest
from aioasuswrt.connection import TelnetConnection
from aioasuswrt.mocks import telnet_mock
from unittest import TestCase, mock
# @mock.patch(
# 'homeassistant.components.device_tracker.asuswrt.AsusWrtDeviceScanner',
# return_value=mock.MagicMock())
# def test_get_scanner_with_pubkey_no_password(self, asuswrt_mock):
# """Test creating an AsusWRT scanner with a pubkey and no password."""
# conf_dict = {
# device_tracker.DOMAIN: {
# CONF_PLATFORM: 'asuswrt',
# CONF_HOST: 'fake_host',
# CONF_USERNAME: 'fake_user',
# CONF_PUB_KEY: FAKEFILE,
# CONF_TRACK_NEW: True,
# CONF_CONSIDER_HOME: timedelta(seconds=180),
# CONF_NEW_DEVICE_DEFAULTS: {
# CONF_TRACK_NEW: True,
# CONF_AWAY_HIDE: False
# }
# }
# }
#
# with assert_setup_component(1, DOMAIN):
# assert setup_component(self.hass, DOMAIN, conf_dict)
#
# conf_dict[DOMAIN][CONF_MODE] = 'router'
# conf_dict[DOMAIN][CONF_PROTOCOL] = 'ssh'
# conf_dict[DOMAIN][CONF_PORT] = 22
# self.assertEqual(asuswrt_mock.call_count, 1)
# self.assertEqual(asuswrt_mock.call_args, mock.call(conf_dict[DOMAIN]))
#
# def test_ssh_login_with_pub_key(self):
# """Test that login is done with pub_key when configured to."""
# ssh = mock.MagicMock()
# ssh_mock = mock.patch('pexpect.pxssh.pxssh', return_value=ssh)
# ssh_mock.start()
# self.addCleanup(ssh_mock.stop)
# conf_dict = PLATFORM_SCHEMA({
# CONF_PLATFORM: 'asuswrt',
# CONF_HOST: 'fake_host',
# CONF_USERNAME: 'fake_user',
# CONF_PUB_KEY: FAKEFILE
# })
# update_mock = mock.patch(
# 'homeassistant.components.device_tracker.asuswrt.'
# 'AsusWrtDeviceScanner.get_asuswrt_data')
# update_mock.start()
# self.addCleanup(update_mock.stop)
# asuswrt = device_tracker.asuswrt.AsusWrtDeviceScanner(conf_dict)
# asuswrt.connection.run_command('ls')
# self.assertEqual(ssh.login.call_count, 1)
# self.assertEqual(
# ssh.login.call_args,
# mock.call('fake_host', 'fake_user', quiet=False,
# ssh_key=FAKEFILE, port=22)
# )
#
# def test_ssh_login_with_password(self):
# """Test that login is done with password when configured to."""
# ssh = mock.MagicMock()
# ssh_mock = mock.patch('pexpect.pxssh.pxssh', return_value=ssh)
# ssh_mock.start()
# self.addCleanup(ssh_mock.stop)
# conf_dict = PLATFORM_SCHEMA({
# CONF_PLATFORM: 'asuswrt',
# CONF_HOST: 'fake_host',
# CONF_USERNAME: 'fake_user',
# CONF_PASSWORD: '<PASSWORD>'
# })
# update_mock = mock.patch(
# 'homeassistant.components.device_tracker.asuswrt.'
# 'AsusWrtDeviceScanner.get_asuswrt_data')
# update_mock.start()
# self.addCleanup(update_mock.stop)
# asuswrt = device_tracker.asuswrt.AsusWrtDeviceScanner(conf_dict)
# asuswrt.connection.run_command('ls')
# self.assertEqual(ssh.login.call_count, 1)
# self.assertEqual(
# ssh.login.call_args,
# mock.call('fake_host', 'fake_user', quiet=False,
# password='<PASSWORD>', port=22)
# )
#
# def test_ssh_login_without_password_or_pubkey(self):
# """Test that login is not called without password or pub_key."""
# ssh = mock.MagicMock()
# ssh_mock = mock.patch('pexpect.pxssh.pxssh', return_value=ssh)
# ssh_mock.start()
# self.addCleanup(ssh_mock.stop)
#
# conf_dict = {
# CONF_PLATFORM: 'asuswrt',
# CONF_HOST: 'fake_host',
# CONF_USERNAME: 'fake_user',
# }
#
# with self.assertRaises(vol.Invalid):
# conf_dict = PLATFORM_SCHEMA(conf_dict)
#
# update_mock = mock.patch(
# 'homeassistant.components.device_tracker.asuswrt.'
# 'AsusWrtDeviceScanner.get_asuswrt_data')
# update_mock.start()
# self.addCleanup(update_mock.stop)
#
# with assert_setup_component(0, DOMAIN):
# assert setup_component(self.hass, DOMAIN,
# {DOMAIN: conf_dict})
# ssh.login.assert_not_called()
# def test_telnet_login_with_password(self):
# """Test that login is done with password when configured to."""
# telnet = mock.MagicMock()
# telnet_mock = mock.patch('telnetlib.Telnet', return_value=telnet)
# telnet_mock.start()
# self.addCleanup(telnet_mock.stop)
# conf_dict = PLATFORM_SCHEMA({
# CONF_PLATFORM: 'asuswrt',
# CONF_PROTOCOL: 'telnet',
# CONF_HOST: 'fake_host',
# CONF_USERNAME: 'fake_user',
# CONF_PASSWORD: '<PASSWORD>'
# })
# update_mock = mock.patch(
# 'homeassistant.components.device_tracker.asuswrt.'
# 'AsusWrtDeviceScanner.get_asuswrt_data')
# update_mock.start()
# self.addCleanup(update_mock.stop)
# asuswrt = device_tracker.asuswrt.AsusWrtDeviceScanner(conf_dict)
# asuswrt.connection.run_command('ls')
# self.assertEqual(telnet.read_until.call_count, 4)
# self.assertEqual(telnet.write.call_count, 3)
# self.assertEqual(
# telnet.read_until.call_args_list[0],
# mock.call(b'login: ')
# )
# self.assertEqual(
# telnet.write.call_args_list[0],
# mock.call(b'fake_user\n')
# )
# self.assertEqual(
# telnet.read_until.call_args_list[1],
# mock.call(b'Password: ')
# )
# self.assertEqual(
# telnet.write.call_args_list[1],
# mock.call(b'fake_pass\n')
# )
# self.assertEqual(
# telnet.read_until.call_args_list[2],
# mock.call(b'#')
# )
#
# def test_telnet_login_without_password(self):
# """Test that login is not called without password or pub_key."""
# telnet = mock.MagicMock()
# telnet_mock = mock.patch('telnetlib.Telnet', return_value=telnet)
# telnet_mock.start()
# self.addCleanup(telnet_mock.stop)
#
# conf_dict = {
# CONF_PLATFORM: 'asuswrt',
# CONF_PROTOCOL: 'telnet',
# CONF_HOST: 'fake_host',
# CONF_USERNAME: 'fake_user',
# }
#
# with self.assertRaises(vol.Invalid):
# conf_dict = PLATFORM_SCHEMA(conf_dict)
#
# update_mock = mock.patch(
# 'homeassistant.components.device_tracker.asuswrt.'
# 'AsusWrtDeviceScanner.get_asuswrt_data')
# update_mock.start()
# self.addCleanup(update_mock.stop)
#
# with assert_setup_component(0, DOMAIN):
# assert setup_component(self.hass, DOMAIN,
# {DOMAIN: conf_dict})
# telnet.login.assert_not_called()
#
#
# @pytest.mark.skip(
# reason="These tests are performing actual failing network calls. They "
# "need to be cleaned up before they are re-enabled. They're frequently "
# "failing in Travis.")
# class TestSshConnection(TestCase):
# """Testing SshConnection."""
#
# def setUp(self):
# """Set up test env."""
# self.connection = SshConnection(
# 'fake', 'fake', 'fake', 'fake', 'fake')
# self.connection._connected = True
#
# def test_run_command_exception_eof(self):
# """Testing exception in run_command."""
# from pexpect import exceptions
# self.connection._ssh = mock.Mock()
# self.connection._ssh.sendline = mock.Mock()
# self.connection._ssh.sendline.side_effect = exceptions.EOF('except')
# self.connection.run_command('test')
# self.assertFalse(self.connection._connected)
# self.assertIsNone(self.connection._ssh)
#
# def test_run_command_exception_pxssh(self):
# """Testing exception in run_command."""
# from pexpect import pxssh
# self.connection._ssh = mock.Mock()
# self.connection._ssh.sendline = mock.Mock()
# self.connection._ssh.sendline.side_effect = pxssh.ExceptionPxssh(
# 'except')
# self.connection.run_command('test')
# self.assertFalse(self.connection._connected)
# self.assertIsNone(self.connection._ssh)
#
# def test_run_command_assertion_error(self):
# """Testing exception in run_command."""
# self.connection._ssh = mock.Mock()
# self.connection._ssh.sendline = mock.Mock()
# self.connection._ssh.sendline.side_effect = AssertionError('except')
# self.connection.run_command('test')
# self.assertFalse(self.connection._connected)
# self.assertIsNone(self.connection._ssh)
class TestTelnetConnection(TestCase):
"""Testing TelnetConnection."""
def setUp(self):
"""Set up test env."""
self.connection = TelnetConnection(
'fake', 'fake', 'fake', 'fake')
# self.connection._connected = True
self.connection._prompt_string = ""
def test_determine_linelength_inf(self):
""" Test input for infinite breakline length."""
# An input without newlines results in infinite linebreak
# The input string is shorter than the limit
for i in (15, 50):
input_bytes = (" " * i).encode('ascii')
self.connection._determine_linebreak(input_bytes)
self.assertEqual(self.connection._linebreak, float('inf'))
def test_determine_linelength(self):
for i in (15, 50):
input_bytes = (" " * i + "\n" + " " * 5).encode('ascii')
self.connection._determine_linebreak(input_bytes)
self.assertEqual(self.connection._linebreak, i)
# And now with some more lines
input_bytes = ((" " * i + "\n") * 3 + " " * 5).encode('ascii')
self.connection._determine_linebreak(input_bytes)
self.assertEqual(self.connection._linebreak, i)
# And with a prompt string
prompt = "test_string"
input_bytes = "a" * (i - len(prompt)) + "\n" + "a" * 5
self.connection._prompt_string = prompt
self.connection._determine_linebreak(input_bytes.encode('ascii'))
self.assertEqual(self.connection._linebreak, i)
self.connection._prompt_string = ""
@pytest.mark.asyncio
async def test_sending_cmds():
with mock.patch('asyncio.open_connection',
new=telnet_mock.open_connection):
# Let's set a short linebreak of 10
telnet_mock.set_linebreak(22)
connection = TelnetConnection('fake', 'fake', 'fake', 'fake')
await connection.async_connect()
# Now let's send some arbitrary short command
exp_ret_val = "Some arbitrary long return string." + "." * 100
telnet_mock.set_return(exp_ret_val)
new_return = await connection.async_run_command("run command\n")
assert new_return[0] == exp_ret_val
# @pytest.mark.skip(
# reason="These tests are performing actual failing network calls. They "
# "need to be cleaned up before they are re-enabled. They're frequently "
# "failing in Travis.")
# def test_run_command_exception_eof(self):
# """Testing EOFException in run_command."""
# self.connection._telnet = mock.Mock()
# self.connection._telnet.write = mock.Mock()
# self.connection._telnet.write.side_effect = EOFError('except')
# self.connection.run_command('test')
# self.assertFalse(self.connection._connected)
#
# @pytest.mark.skip(
# reason="These tests are performing actual failing network calls. They "
# "need to be cleaned up before they are re-enabled. They're frequently "
# "failing in Travis.")
# def test_run_command_exception_connection_refused(self):
# """Testing ConnectionRefusedError in run_command."""
# self.connection._telnet = mock.Mock()
# self.connection._telnet.write = mock.Mock()
# self.connection._telnet.write.side_effect = ConnectionRefusedError(
# 'except')
# self.connection.run_command('test')
# self.assertFalse(self.connection._connected)
#
# @pytest.mark.skip(
# reason="These tests are performing actual failing network calls. They "
# "need to be cleaned up before they are re-enabled. They're frequently "
# "failing in Travis.")
# def test_run_command_exception_gaierror(self):
# """Testing socket.gaierror in run_command."""
# self.connection._telnet = mock.Mock()
# self.connection._telnet.write = mock.Mock()
# self.connection._telnet.write.side_effect = socket.gaierror('except')
# self.connection.run_command('test')
# self.assertFalse(self.connection._connected)
#
# @pytest.mark.skip(
# reason="These tests are performing actual failing network calls. They "
# "need to be cleaned up before they are re-enabled. They're frequently "
# "failing in Travis.")
# def test_run_command_exception_oserror(self):
# """Testing OSError in run_command."""
# self.connection._telnet = mock.Mock()
# self.connection._telnet.write = mock.Mock()
# self.connection._telnet.write.side_effect = OSError('except')
# self.connection.run_command('test')
# self.assertFalse(self.connection._connected)
```
#### File: aioasuswrt/tests/test_regex.py
```python
import asyncio
import pytest
from aioasuswrt.asuswrt import (AsusWrt, _LEASES_CMD, _WL_CMD, _IP_NEIGH_CMD,
_ARP_CMD, Device, _RX_COMMAND, _TX_COMMAND,
_TEMP_CMD, _LOADAVG_CMD, _MEMINFO_CMD,
_NETDEV_CMD)
RX_DATA = ["2703926881", ""]
TX_DATA = ["648110137", ""]
RX = 2703926881
TX = 648110137
TEMP_DATA = [
'59 (0x3b)\r',
'69 (0x45)\r',
'CPU temperature : 77'
]
NETDEV_DATA = [
'nter-| Receive | Transmit',
' face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed',
' lo: 129406077 639166 0 0 0 0 0 0 129406077 639166 0 0 0 0 0 0',
' ifb0: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0',
' ifb1: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0',
' fwd0: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0',
' fwd1: 0 32991574 0 0 0 0 0 0 2758131447 21323444 0 0 0 0 0 0',
' agg: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0',
' eth0: 1376394855 180111514 0 0 0 0 0 0 896208608 161258260 0 0 0 0 0 0',
' dpsta: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0',
' eth1: 240050447 1451957 0 0 0 0 0 47377 2112087504 43036729 0 26277918 0 0 0 0',
' eth2: 0 0 0 0 0 0 0 0 3283428721 33007901 0 2 0 0 0 0',
' vlan1: 35966691832 80394316 0 0 0 0 0 91875 29563557562 53006688 0 0 0 0 0 0',
' vlan2: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0',
' br0: 4643330713 15198823 0 0 0 0 0 0 5699827990 13109400 0 0 0 0 0 0',
' wl0.1: 72308780 385338 0 0 0 0 0 7706 311596615 4150488 0 199907 0 0 0 0',
'ds0.1: 0 0 0 0 0 0 0 0 102404809 805208 0 0 0 0 0 0',
' tun21: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0'
]
INTERFACES_COUNT = {
'lo': {'tx_bytes': 129406077, 'tx_packets': 639166, 'tx_errs': 0,
'tx_drop': 0, 'tx_fifo': 0, 'tx_frame': 0, 'tx_compressed': 0,
'tx_multicast': 0, 'rx_bytes': 129406077, 'rx_packets': 639166,
'rx_errs': 0, 'rx_drop': 0, 'rx_fifo': 0, 'rx_colls': 0,
'rx_carrier': 0, 'rx_compressed': 0},
'ifb0': {'tx_bytes': 0, 'tx_packets': 0, 'tx_errs': 0, 'tx_drop': 0,
'tx_fifo': 0, 'tx_frame': 0, 'tx_compressed': 0,
'tx_multicast': 0,
'rx_bytes': 0, 'rx_packets': 0, 'rx_errs': 0, 'rx_drop': 0,
'rx_fifo': 0, 'rx_colls': 0, 'rx_carrier': 0,
'rx_compressed': 0},
'ifb1': {'tx_bytes': 0, 'tx_packets': 0, 'tx_errs': 0, 'tx_drop': 0,
'tx_fifo': 0, 'tx_frame': 0, 'tx_compressed': 0,
'tx_multicast': 0,
'rx_bytes': 0, 'rx_packets': 0, 'rx_errs': 0, 'rx_drop': 0,
'rx_fifo': 0, 'rx_colls': 0, 'rx_carrier': 0,
'rx_compressed': 0},
'fwd0': {'tx_bytes': 0, 'tx_packets': 0, 'tx_errs': 0, 'tx_drop': 0,
'tx_fifo': 0, 'tx_frame': 0, 'tx_compressed': 0,
'tx_multicast': 0,
'rx_bytes': 0, 'rx_packets': 0, 'rx_errs': 0, 'rx_drop': 0,
'rx_fifo': 0, 'rx_colls': 0, 'rx_carrier': 0,
'rx_compressed': 0},
'fwd1': {'tx_bytes': 0, 'tx_packets': 32991574, 'tx_errs': 0,
'tx_drop': 0,
'tx_fifo': 0, 'tx_frame': 0, 'tx_compressed': 0,
'tx_multicast': 0,
'rx_bytes': 2758131447, 'rx_packets': 21323444, 'rx_errs': 0,
'rx_drop': 0, 'rx_fifo': 0, 'rx_colls': 0, 'rx_carrier': 0,
'rx_compressed': 0},
'agg': {'tx_bytes': 0, 'tx_packets': 0, 'tx_errs': 0, 'tx_drop': 0,
'tx_fifo': 0, 'tx_frame': 0, 'tx_compressed': 0,
'tx_multicast': 0,
'rx_bytes': 0, 'rx_packets': 0, 'rx_errs': 0, 'rx_drop': 0,
'rx_fifo': 0, 'rx_colls': 0, 'rx_carrier': 0,
'rx_compressed': 0},
'eth0': {'tx_bytes': 1376394855, 'tx_packets': 180111514, 'tx_errs': 0,
'tx_drop': 0, 'tx_fifo': 0, 'tx_frame': 0, 'tx_compressed': 0,
'tx_multicast': 0, 'rx_bytes': 896208608,
'rx_packets': 161258260,
'rx_errs': 0, 'rx_drop': 0, 'rx_fifo': 0, 'rx_colls': 0,
'rx_carrier': 0, 'rx_compressed': 0},
'dpsta': {'tx_bytes': 0, 'tx_packets': 0, 'tx_errs': 0, 'tx_drop': 0,
'tx_fifo': 0, 'tx_frame': 0, 'tx_compressed': 0,
'tx_multicast': 0,
'rx_bytes': 0, 'rx_packets': 0, 'rx_errs': 0, 'rx_drop': 0,
'rx_fifo': 0, 'rx_colls': 0, 'rx_carrier': 0,
'rx_compressed': 0},
'eth1': {'tx_bytes': 240050447, 'tx_packets': 1451957, 'tx_errs': 0,
'tx_drop': 0, 'tx_fifo': 0, 'tx_frame': 0, 'tx_compressed': 0,
'tx_multicast': 47377, 'rx_bytes': 2112087504,
'rx_packets': 43036729, 'rx_errs': 0, 'rx_drop': 26277918,
'rx_fifo': 0, 'rx_colls': 0, 'rx_carrier': 0,
'rx_compressed': 0},
'eth2': {'tx_bytes': 0, 'tx_packets': 0, 'tx_errs': 0, 'tx_drop': 0,
'tx_fifo': 0, 'tx_frame': 0, 'tx_compressed': 0,
'tx_multicast': 0,
'rx_bytes': 3283428721, 'rx_packets': 33007901, 'rx_errs': 0,
'rx_drop': 2, 'rx_fifo': 0, 'rx_colls': 0, 'rx_carrier': 0,
'rx_compressed': 0},
'vlan1': {'tx_bytes': 35966691832, 'tx_packets': 80394316,
'tx_errs': 0,
'tx_drop': 0, 'tx_fifo': 0, 'tx_frame': 0,
'tx_compressed': 0,
'tx_multicast': 91875, 'rx_bytes': 29563557562,
'rx_packets': 53006688, 'rx_errs': 0, 'rx_drop': 0,
'rx_fifo': 0,
'rx_colls': 0, 'rx_carrier': 0, 'rx_compressed': 0},
'vlan2': {'tx_bytes': 0, 'tx_packets': 0, 'tx_errs': 0, 'tx_drop': 0,
'tx_fifo': 0, 'tx_frame': 0, 'tx_compressed': 0,
'tx_multicast': 0,
'rx_bytes': 0, 'rx_packets': 0, 'rx_errs': 0, 'rx_drop': 0,
'rx_fifo': 0, 'rx_colls': 0, 'rx_carrier': 0,
'rx_compressed': 0},
'br0': {'tx_bytes': 4643330713, 'tx_packets': 15198823, 'tx_errs': 0,
'tx_drop': 0, 'tx_fifo': 0, 'tx_frame': 0, 'tx_compressed': 0,
'tx_multicast': 0, 'rx_bytes': 5699827990,
'rx_packets': 13109400,
'rx_errs': 0, 'rx_drop': 0, 'rx_fifo': 0, 'rx_colls': 0,
'rx_carrier': 0, 'rx_compressed': 0},
'wl0.1': {'tx_bytes': 72308780, 'tx_packets': 385338, 'tx_errs': 0,
'tx_drop': 0, 'tx_fifo': 0, 'tx_frame': 0,
'tx_compressed': 0,
'tx_multicast': 7706, 'rx_bytes': 311596615,
'rx_packets': 4150488,
'rx_errs': 0, 'rx_drop': 199907, 'rx_fifo': 0, 'rx_colls': 0,
'rx_carrier': 0, 'rx_compressed': 0},
'ds0.1': {'tx_bytes': 0, 'tx_packets': 0, 'tx_errs': 0, 'tx_drop': 0,
'tx_fifo': 0, 'tx_frame': 0, 'tx_compressed': 0,
'tx_multicast': 0,
'rx_bytes': 102404809, 'rx_packets': 805208, 'rx_errs': 0,
'rx_drop': 0, 'rx_fifo': 0, 'rx_colls': 0, 'rx_carrier': 0,
'rx_compressed': 0}
}
LOADAVG_DATA = [
'0.23 0.50 0.68 2/167 13095'
]
MEMINFO_DATA = [
'0.46 0.75 0.77 1/165 2609'
]
WL_DATA = [
'assoclist 01:02:03:04:06:08\r',
'assoclist 08:09:10:11:12:14\r',
'assoclist 08:09:10:11:12:15\r',
'assoclist AB:CD:DE:AB:CD:EF\r'
]
WL_DEVICES = {
'01:02:03:04:06:08': Device(
mac='01:02:03:04:06:08', ip=None, name=None),
'08:09:10:11:12:14': Device(
mac='08:09:10:11:12:14', ip=None, name=None),
'08:09:10:11:12:15': Device(
mac='08:09:10:11:12:15', ip=None, name=None),
'AB:CD:DE:AB:CD:EF': Device(
mac='AB:CD:DE:AB:CD:EF', ip=None, name=None)
}
ARP_DATA = [
'? (172.16.17.32) at 01:02:03:04:06:08 [ether] on eth0\r',
'? (172.16.58.3) at 08:09:10:11:12:14 [ether] on br0\r',
'? (192.168.127.12) at AB:CD:DE:AB:CD:EF [ether] on br0\r',
'? (172.16.31.10) at <incomplete> on br0\r',
'? (172.16.10.2) at 00:25:90:12:2D:90 [ether] on br0\r',
]
ARP_DEVICES = {
'01:02:03:04:06:08': Device(
mac='01:02:03:04:06:08', ip='172.16.17.32', name=None),
'08:09:10:11:12:14': Device(
mac='08:09:10:11:12:14', ip='172.16.58.3', name=None),
'AB:CD:DE:AB:CD:EF': Device(
mac='AB:CD:DE:AB:CD:EF', ip='192.168.127.12', name=None),
'00:25:90:12:2D:90': Device(
mac='00:25:90:12:2D:90', ip='172.16.10.2', name=None)
}
NEIGH_DATA = [
'172.16.17.32 dev eth0 lladdr 01:02:03:04:06:08 REACHABLE\r',
'172.16.58.3 dev br0 lladdr 08:09:10:11:12:14 REACHABLE\r',
'192.168.127.12 dev br0 lladdr ab:cd:de:ab:cd:ef REACHABLE\r',
'172.16.31.10 dev br0 FAILED\r',
'172.16.17.32 dev br0 lladdr 08:09:15:15:15:15 DELAY\r',
'fe80::feff:a6ff:feff:12ff dev br0 lladdr fc:ff:a6:ff:12:ff STALE\r',
]
NEIGH_DEVICES = {
'01:02:03:04:06:08': Device(
mac='01:02:03:04:06:08', ip='172.16.17.32', name=None),
'08:09:10:11:12:14': Device(
mac='08:09:10:11:12:14', ip='172.16.58.3', name=None),
'AB:CD:DE:AB:CD:EF': Device(
mac='AB:CD:DE:AB:CD:EF', ip='192.168.127.12', name=None)
}
LEASES_DATA = [
'51910 01:02:03:04:06:08 172.16.17.32 TV 01:02:03:04:06:08\r',
'79986 01:02:03:04:06:10 172.16.31.10 android 01:02:03:04:06:15\r',
'23523 08:09:10:11:12:14 172.16.58.3 * 08:09:10:11:12:14\r',
]
LEASES_DEVICES = {
'01:02:03:04:06:08': Device(
mac='01:02:03:04:06:08', ip='172.16.17.32', name='TV'),
'08:09:10:11:12:14': Device(
mac='08:09:10:11:12:14', ip='172.16.58.3', name='')
}
WAKE_DEVICES = {
'01:02:03:04:06:08': Device(
mac='01:02:03:04:06:08', ip='172.16.17.32', name='TV'),
'08:09:10:11:12:14': Device(
mac='08:09:10:11:12:14', ip='172.16.58.3', name=''),
'00:25:90:12:2D:90': Device(
mac='00:25:90:12:2D:90', ip='172.16.10.2', name=None)
}
WAKE_DEVICES_AP = {
'01:02:03:04:06:08': Device(
mac='01:02:03:04:06:08', ip='172.16.17.32', name=None),
'08:09:10:11:12:14': Device(
mac='08:09:10:11:12:14', ip='172.16.58.3', name=None),
'AB:CD:DE:AB:CD:EF': Device(
mac='AB:CD:DE:AB:CD:EF', ip='192.168.127.12', name=None),
'00:25:90:12:2D:90': Device(
mac='00:25:90:12:2D:90', ip='172.16.10.2', name=None)
}
WAKE_DEVICES_NO_IP = {
'01:02:03:04:06:08': Device(
mac='01:02:03:04:06:08', ip='172.16.17.32', name=None),
'08:09:10:11:12:14': Device(
mac='08:09:10:11:12:14', ip='172.16.58.3', name=None),
'08:09:10:11:12:15': Device(
mac='08:09:10:11:12:15', ip=None, name=None),
'AB:CD:DE:AB:CD:EF': Device(
mac='AB:CD:DE:AB:CD:EF', ip='192.168.127.12', name=None),
'00:25:90:12:2D:90': Device(
mac='00:25:90:12:2D:90', ip='172.16.10.2', name=None)
}
def RunCommandMock(command, *args, **kwargs):
print(command, *args, **kwargs)
f = asyncio.Future()
if command == _WL_CMD:
f.set_result(WL_DATA)
return f
if command == _LEASES_CMD.format('/var/lib/misc'):
f.set_result(LEASES_DATA)
return f
if command == _IP_NEIGH_CMD:
f.set_result(NEIGH_DATA)
return f
if command == _ARP_CMD:
f.set_result(ARP_DATA)
return f
if command == _RX_COMMAND.format('eth0'):
f.set_result(RX_DATA)
return f
if command == _TX_COMMAND.format('eth0'):
f.set_result(TX_DATA)
return f
if command == _TEMP_CMD:
f.set_result(TEMP_DATA)
return f
if command == _LOADAVG_CMD:
f.set_result(LOADAVG_DATA)
return f
if command == _MEMINFO_CMD:
f.set_result(MEMINFO_DATA)
return f
if command == _NETDEV_CMD:
f.set_result(NETDEV_DATA)
return f
raise Exception("Unhandled command: %s" % command)
def RunCommandEmptyMock(command, *args, **kwargs):
f = asyncio.Future()
f.set_result("")
return f
@pytest.mark.asyncio
async def test_get_wl(event_loop, mocker):
"""Testing wl."""
mocker.patch(
'aioasuswrt.connection.SshConnection.async_run_command',
side_effect=RunCommandMock)
scanner = AsusWrt(host="localhost", port=22)
devices = await scanner.async_get_wl()
assert WL_DEVICES == devices
@pytest.mark.asyncio
async def test_get_wl_empty(event_loop, mocker):
"""Testing wl."""
mocker.patch(
'aioasuswrt.connection.SshConnection.async_run_command',
side_effect=RunCommandEmptyMock)
scanner = AsusWrt(host="localhost", port=22)
devices = await scanner.async_get_wl()
assert {} == devices
@pytest.mark.asyncio
async def test_async_get_leases(event_loop, mocker):
"""Testing leases."""
mocker.patch(
'aioasuswrt.connection.SshConnection.async_run_command',
side_effect=RunCommandMock)
scanner = AsusWrt(host="localhost", port=22)
data = await scanner.async_get_leases(NEIGH_DEVICES.copy())
assert LEASES_DEVICES == data
@pytest.mark.asyncio
async def test_get_arp(event_loop, mocker):
"""Testing arp."""
mocker.patch(
'aioasuswrt.connection.SshConnection.async_run_command',
side_effect=RunCommandMock)
scanner = AsusWrt(host="localhost", port=22)
data = await scanner.async_get_arp()
assert ARP_DEVICES == data
@pytest.mark.asyncio
async def test_get_neigh(event_loop, mocker):
"""Testing neigh."""
mocker.patch(
'aioasuswrt.connection.SshConnection.async_run_command',
side_effect=RunCommandMock)
scanner = AsusWrt(host="localhost", port=22)
data = await scanner.async_get_neigh(NEIGH_DEVICES.copy())
assert NEIGH_DEVICES == data
@pytest.mark.asyncio
async def test_get_connected_devices_ap(event_loop, mocker):
"""Test for get asuswrt_data in ap mode."""
mocker.patch(
'aioasuswrt.connection.SshConnection.async_run_command',
side_effect=RunCommandMock)
scanner = AsusWrt(host="localhost", port=22, mode='ap', require_ip=True)
data = await scanner.async_get_connected_devices()
assert WAKE_DEVICES_AP == data
@pytest.mark.asyncio
async def test_get_connected_devices_no_ip(event_loop, mocker):
"""Test for get asuswrt_data and not requiring ip."""
mocker.patch(
'aioasuswrt.connection.SshConnection.async_run_command',
side_effect=RunCommandMock)
scanner = AsusWrt(host="localhost", port=22, mode='ap', require_ip=False)
data = await scanner.async_get_connected_devices()
assert WAKE_DEVICES_NO_IP == data
@pytest.mark.asyncio
async def test_get_packets_total(event_loop, mocker):
"""Test getting packet totals."""
mocker.patch(
'aioasuswrt.connection.SshConnection.async_run_command',
side_effect=RunCommandMock)
scanner = AsusWrt(host="localhost", port=22, mode='ap', require_ip=False)
data = await scanner.async_get_tx()
assert TX == data
data = await scanner.async_get_rx()
assert RX == data
@pytest.mark.asyncio
async def test_async_get_temperature(event_loop, mocker):
"""Test getting temperature."""
mocker.patch(
'aioasuswrt.connection.SshConnection.async_run_command',
side_effect=RunCommandMock)
scanner = AsusWrt(host="localhost", port=22, mode='ap', require_ip=False)
data = await scanner.async_get_temperature()
assert data == {'2.4GHz': 49.5, '5.0GHz': 54.5, 'CPU': 77.0}
@pytest.mark.asyncio
async def test_async_get_loadavg(event_loop, mocker):
"""Test getting loadavg."""
mocker.patch(
'aioasuswrt.connection.SshConnection.async_run_command',
side_effect=RunCommandMock)
scanner = AsusWrt(host="localhost", port=22, mode='ap', require_ip=False)
data = await scanner.async_get_loadavg()
assert data == [0.23, 0.5, 0.68]
@pytest.mark.asyncio
async def test_async_get_interfaces_counts(event_loop, mocker):
"""Test getting loadavg."""
mocker.patch(
'aioasuswrt.connection.SshConnection.async_run_command',
side_effect=RunCommandMock)
scanner = AsusWrt(host="localhost", port=22, mode='ap', require_ip=False)
data = await scanner.async_get_interfaces_counts()
assert data == INTERFACES_COUNT
# @pytest.mark.asyncio
# async def test_async_get_meminfo(event_loop, mocker):
# """Test getting meminfo."""
# mocker.patch(
# 'aioasuswrt.connection.SshConnection.async_run_command',
# side_effect=RunCommandMock)
# scanner = AsusWrt(host="localhost", port=22, mode='ap', require_ip=False)
# data = await scanner.async_get_meminfo()
# assert data == []
``` |
{
"source": "jjdmeier/python_utils",
"score": 3
} |
#### File: python_utils/google_utils/Gmail.py
```python
import os
import re
import sys
import time
import uuid
import base64
import httplib2
from email import message
from mimetypes import MimeTypes
from posixpath import expanduser
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.audio import MIMEAudio
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from apiclient import discovery
import oauth2client
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
from oauth2client.client import flow_from_clientsecrets
"""
Gmail: Class for interacting with a gmail account programmatically
- TODO: constructor - Do not auth in the constructor. Not generic enough.
Make the program that is using it set the application name and secrets
file path and call auth explicitly.
"""
class Gmail:
"""
Gmail(): constructor
returns:
Gmail class object
params:
scopes: String - google developer scope. Example: 'https://mail.google.com/'
client_secret_file_path: String - path to google creds json from google developer account
application_name: String - google developer application name
"""
def __init__(
self,
scopes = 'https://mail.google.com/',
client_secret_file_path = './client_secrets.json',
application_name = '',
):
self.scopes = scopes
self.client_secret_file_path = client_secret_file_path
self.application_name = application_name
self.message_ids = []
self.message_contents = []
credentials = self.get_credentials()
http = credentials.authorize(httplib2.Http())
self.service = discovery.build('gmail', 'v1', http=http)
"""
Gmail(): get_credentials - checks if credentials already exist, if not save them to credential
params:
returns:
credentials for oauth2client
"""
def get_credentials(self):
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'gmail-python-quickstart.json')
store = oauth2client.file.Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(self.client_secret_file_path, self.scopes)
flow.user_agent = self.application_name
credentials = tools.run_flow(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
"""
Gmail(): create_message - Create a message for an email.
params:
to: String - Email address of the receiver.
subject: String - The subject of the email message.
message_text: String - The text of the email message.
returns:
Dictionary (object): - email safe message string stored in item "raw"
"""
def create_message(self, to, subject, message_text):
message = MIMEText(message_text)
message['To'] = to
message['Subject'] = subject
raw_message = base64.urlsafe_b64encode(message.as_string().encode("utf-8"))
return {
'raw': raw_message.decode("utf-8")
}
"""
Gmail(): create_message_with_attachment - Create a message for an email.
params:
sender: String - Email address of the sender.
to: String - Email address of the receiver.
subject: String - The subject of the email message.
message_text: String - The text of the email message.
file: String - The path to the file to be attached.
returns:
An object containing a base64url encoded email object.
"""
def create_message_with_attachment(to, subject, message_text, file):
message = MIMEMultipart()
message['To'] = to
message['Subject'] = subject
msg = MIMEText(message_text)
message.attach(msg)
mime_types = MimeTypes()
content_type, encoding = mime_types.guess_type(file)
if content_type is None or encoding is not None:
content_type = 'application/octet-stream'
main_type, sub_type = content_type.split('/', 1)
if main_type == 'text':
fp = open(file, 'rb')
msg = MIMEText(fp.read(), _subtype=sub_type)
fp.close()
elif main_type == 'image':
fp = open(file, 'rb')
msg = MIMEImage(fp.read(), _subtype=sub_type)
fp.close()
elif main_type == 'audio':
fp = open(file, 'rb')
msg = MIMEAudio(fp.read(), _subtype=sub_type)
fp.close()
else:
fp = open(file, 'rb')
msg = MIMEBase(main_type, sub_type)
msg.set_payload(fp.read())
fp.close()
filename = os.path.basename(file)
msg.add_header('Content-Disposition', 'attachment', filename=filename)
message.attach(msg)
return {'raw': base64.urlsafe_b64encode(message.as_string())}
"""
Gmail(): send_message - Send an email message.
params:
message: Message object - Message to be sent.
returns:
Dictionary (object): Sent message object
"""
def send_message(self, message):
try:
message = (self.service.users().messages().send(userId='me', body=message)
.execute())
print("Sent message id: {}".format(message.get('id')))
return message
except Exception as e:
raise Exception("Error: issue occured while sending message: {}".format(e))
"""
Gmail(): pull_and_set_message_ids - loop through max_results number of message ids and set class variable message_ids eqaul to the ids
params:
max_results: Integer - number of (most recent) emails to pull and set ids for
returns:
"""
def pull_and_set_message_ids(self, max_results=5):
self.message_ids = []
result = self.service.users().messages().list(userId='me', maxResults=max_results).execute()
messages = result.get('messages')
for message in messages:
if message.get("id"):
self.message_ids.append(message.get("id"))
"""
Gmail(): pull_and_set_message_contents_from_message_ids - loop through class variable message_ids and set class variable relevant message_contents
params:
inbox: String - Ensure message came from a specifc inbox
users: List - Ensure message came from a specific email address
returns:
"""
def pull_and_set_message_contents_from_message_ids(self, inbox="INBOX", users=[]):
self.message_contents = []
for message_id in self.message_ids:
self.message_contents.append(self.get_message_content(message_id=message_id, inbox=inbox, users=users))
"""
Gmail(): save_attachment_from_message_id - pull relevant message using its id and download attachment to specified path
params:
message_id: String - message id provided by Google API
path_for_attachment: String - path to where the file will be saved. Defaults to current directory.
returns:
"""
def save_attachment_from_message_id(self, message_id, path_for_attachment=".", avoid_overwrite=True):
message = {}
try:
message = self.service.users().messages().get(userId='me', id=message_id).execute()
except Exception as e:
raise Exception("Error: unable to get messageId through google API call: {}".format(e))
message_parts = message.get("payload").get("parts")
if message_parts:
for part in message_parts:
if part.get('filename'):
if 'data' in part.get('body'):
data = part.get('body').get('data')
else:
att_id = part.get('body').get('attachmentId')
try:
att = self.service.users().messages().attachments().get(userId='me', messageId=message_id, id=att_id).execute()
except Exception as e:
raise Exception("Error: unable to get attachmentId from messageId through google API call: {}".format(e))
data = att.get('data')
if data:
file_data = base64.urlsafe_b64decode(data.encode('UTF-8'))
if avoid_overwrite:
path = path_for_attachment+"/"+str(uuid.uuid4())+"-"+part['filename']
else:
path = path_for_attachment+"/"+part['filename']
with open(path, 'wb') as f:
f.write(file_data)
else:
raise Exception("Error: data not found for attachment in message that contains filename")
"""
Gmail(): decode_base64 - Decode base64, padding being optional.
params:
data: String - Base64 data as an ASCII byte string
returns:
String: The decoded byte string.
credits:
https://stackoverflow.com/questions/2941995/python-ignore-incorrect-padding-error-when-base64-decoding
"""
def decode_base64(self, data, altchars=b'+/'):
data = re.sub(rb'[^a-zA-Z0-9%s]+' % altchars, b'', data) # normalize
missing_padding = len(data) % 4
if missing_padding:
data += b'='* (4 - missing_padding)
return base64.b64decode(data, altchars)
"""
Gmail(): get_message_content - returns custom object with pertinent content from a google response to a get
params:
message_id: String - message id provided by Google API
inbox: String - Ensure message came from a specifc inbox
users: List - Ensure message came from a specific email address
returns:
Dictionary (object): Custom object with pertinent content from a google response.
"""
def get_message_content(self, message_id, inbox="INBOX", users=[]):
response = {}
try:
response = self.service.users().messages().get(userId='me', id=message_id).execute()
except Exception as e:
raise Exception("Error: unable to get messageId through google API call: {}".format(e))
msg = dict()
if response and response.get("labelIds") and inbox in response.get("labelIds"):
payload = response.get("payload")
headers = payload.get("headers")
msg["Message-ID"] = message_id
for header in headers:
if header.get("name") == "Subject":
msg["Subject"] = header.get("value", "Subject has no value").replace('“','"').replace('”','"').replace("\r\n"," ")
if header.get("name") == "From":
if len(users) > 0 and not any(user in header.get("value") for user in users):
return dict()
msg["From"] = header.get("value", "From has no value")
if header.get("name") == "To":
msg["To"] = header.get("value", "To has no value")
if payload.get("body").get("data"):
base64_encoded_data = payload.get("body").get("data")
msg["Body"] = base64.urlsafe_b64decode(base64_encoded_data.encode("ASCII")).decode("utf-8")
elif payload.get("parts"):
for part in payload.get("parts"):
if part.get("mimeType") == "multipart/alternative":
if part.get("parts"):
for inner_part in part.get("parts"):
if inner_part.get("mimeType") == "text/plain":
base64_encoded_data = inner_part.get("body").get("data")
if base64_encoded_data:
msg["Body"] = base64.urlsafe_b64decode(base64_encoded_data.encode("ASCII")).decode("utf-8")
elif part.get("mimeType") == "text/plain":
base64_encoded_data = part.get("body").get("data")
if base64_encoded_data:
msg["Body"] = base64.urlsafe_b64decode(base64_encoded_data.encode("ASCII")).decode("utf-8")
else:
raise Exception("Error: Not able to parse email: {}".format(response))
return msg
"""
Gmail(): is_correct_email - checks to see if email header/body contains all non-optional phrases
params:
message_text: String - email header and body text
items_to_match: List - list of item keywords to search for in an email
returns:
Dictionary (object): Custom object with pertinent content from a google response.
"""
def is_correct_email(self, message_text, items_to_match):
for item in items_to_match:
if not item.optional and item.phrase not in message_text:
return False
return True
"""
Gmail(): get_response_string - grabs string from email text based on keyword phrase
params:
message_text: String - email header and body text
item: Dictionary - contains keyword to match with user email
returns:
String: string that is related to the keyword phrase or default if string not found in mail
"""
def get_response_string(self, message_text, item):
response_string = ""
string_start = message_text.find(item.phrase)
while string_start < len(message_text):
if message_text[string_start] == '"':
string_start += 1
while string_start < len(message_text) and message_text[string_start] != '"':
response_string += message_text[string_start]
string_start += 1
break
string_start += 1
return response_string if response_string != "" else item.default
"""
Gmail(): get_response_for_item_from_message - builds and returns response for a given message
params:
message_text: String - email header and body text
item: Dictionary - contains keywords to match with user email
returns:
response is varied based on item type. Could return string or bool. Defaults if value cannot be pulled from message
"""
def get_response_for_item_from_message(self, message_text, item):
response = item.default
item_type = item.type
if item_type == "string":
if item.phrase in message_text:
response = self.get_response_string(message_text=message_text, item=item)
elif item_type == "bool":
response = True if item.phrase in message_text else False
elif item_type == "uuid":
response = item.phrase
elif not item_type:
raise Exception("Error: no type provided for item to find in email. Item is: {}".format(item))
else:
raise Exception("Error: Unknown type provided for item to find in email. Item is: {}".format(item))
return response
"""
Gmail(): get_response_from_user_email - builds and returns object for a given email
params:
items_to_match: List - list of item keywords to search for in an email
returns:
List: list of objects containing pertinent response data for items passed in
"""
def get_response_from_user_email(self, items_to_match=[]):
user_response = []
for message_content in self.message_contents:
combined_message_text = "{message_subject} {message_body}".format(message_subject=message_content.get("Subject"), message_body=message_content.get("Body"))
if self.is_correct_email(message_text=combined_message_text, items_to_match=items_to_match):
for item in items_to_match:
user_response.append({
"name": item.name,
"type": item.type,
"from": message_content.get("From"),
"message_id": message_content.get("Message-ID"),
"response": self.get_response_for_item_from_message(message_text=combined_message_text, item=item),
})
return user_response
"""
Gmail(): trash_message - removes an email from inbox and places it in the trash folder
params:
message_id: String - message id provided by Google API
returns:
"""
def trash_message(self, message_id):
try:
message = (self.service.users().messages().trash(userId='me', id=message_id).execute())
print('Message Id: %s sent to Trash.' % message['id'])
except Exception as error:
print('An error occurred while trashing email: %s' % error)
"""
Gmail(): poll_email_and_get_response_from_user - polls email inbox and returns object for a given email
params:
items_to_match: List - list of item keywords to search for in an email
inbox: String - Ensure message came from a specifc inbox
users: List - Ensure message came from a specific email address
retry_count: Integer - number of times to retry search for email
seconds_between_retries: Integer - number of seconds to wait before retry
returns:
List: list of objects containing pertinent response data for items passed in
"""
def poll_email_and_get_response_from_user(self, items_to_match, inbox="INBOX", users=[], retry_count=20, seconds_between_retries=10, max_results=1):
tries = 0
user_response = None
while not user_response and tries < retry_count:
print("Polling email. Try #:{}".format(str(tries+1)))
self.pull_and_set_message_ids(max_results=max_results)
self.pull_and_set_message_contents_from_message_ids(inbox=inbox, users=users)
user_response = self.get_response_from_user_email(items_to_match=items_to_match)
if user_response:
break
time.sleep(seconds_between_retries)
tries += 1
return user_response
# Example usage and testing area
def main():
gmail = Gmail()
message = gmail.create_message(to="<EMAIL>", subject="Automated subject", message_text="Automated message body")
gmail.send_message(message=message)
gmail.pull_and_set_message_ids(max_results=1)
print(gmail.message_ids)
gmail.pull_and_set_message_contents_from_message_ids()
print(gmail.message_contents)
gmail.trash_message(message_id="ID_OF_A_MESSAGE")
from GmailSearchItem import GmailSearchItem # import GmailSearchItem class from local directory
items = [
GmailSearchItem(name="Test", type=1, phrase="test=", default="default value", optional=False),
GmailSearchItem(name="Test2", type=1, phrase="test2=", default="default value 2", optional=True),
]
user_response = gmail.poll_email_and_get_response_from_user(items_to_match=items, retry_count=5, seconds_between_retries=10)
print(user_response)
if __name__ == "__main__":
main()
``` |
{
"source": "jjdmol/lofar-antenna-positions",
"score": 3
} |
#### File: lofar-antenna-positions/lofarantpos/db.py
```python
import csv
import os
import pathlib
import numpy
from lofarantpos import geo
def install_prefix():
path_elements = pathlib.PurePath(__file__).parts
path_to_module = path_elements[:-2]
if path_to_module[-1] == 'site-packages':
if 'python' in path_to_module[-2] and 'lib' in path_to_module[-3]:
return os.path.join(*(path_to_module[:-3]))
return os.path.join(*path_to_module)
def parse_csv(file_name, data_type):
"""Read a CSV file and convert the elements to a given data type
Args:
file_name (str): name of file to be read
data_type (type): type to convert each line of the CSV to (e.g. `PhaseCentre`)
Returns:
list: List of objects of the given type
"""
return [data_type(row)
for row_id, row in enumerate(csv.reader(open(file_name)))
if row_id > 0]
def getcol(rows, col_name):
return [row.__dict__[col_name]
for row in rows]
def parse_hba_rotations(file_name):
hba_rotations = {}
for row in parse_csv(file_name, list):
if row[2].strip() == '':
hba_rotations[row[0] + 'HBA'] = float(row[1]) * numpy.pi / 180.0
else:
hba_rotations[row[0] + 'HBA0'] = float(row[1]) * numpy.pi / 180.0
hba_rotations[row[0] + 'HBA1'] = float(row[2]) * numpy.pi / 180.0
return hba_rotations
class Antenna(object):
def __init__(self, csv_row):
self.station = csv_row[0]
self.antenna_type = csv_row[1]
self.antenna_id = int(csv_row[2])
self.etrs = numpy.array([float(csv_row[3]),
float(csv_row[4]),
float(csv_row[5])])
self.rcu_x = int(csv_row[6])
self.rcu_y = int(csv_row[7])
def __repr__(self):
return repr(self.__dict__)
class PhaseCentre(object):
def __init__(self, csv_row):
self.station = csv_row[0]
self.field = csv_row[1]
self.etrs = numpy.array([float(csv_row[2]),
float(csv_row[3]),
float(csv_row[4])])
def __repr__(self):
return repr(self.__dict__)
class ContainerLocation(object):
def __init__(self, csv_row):
self.station = csv_row[0]
self.etrs = numpy.array([float(csv_row[14]),
float(csv_row[15]),
float(csv_row[16])])
def __repr__(self):
return repr(self.__dict__)
class RotationMatrix(object):
def __init__(self, csv_row):
self.station = csv_row[0]
self.field = csv_row[1]
self.matrix = numpy.array([float(x) for x in csv_row[2:]]).reshape((3, 3))
def __repr__(self):
return repr(self.__dict__)
class LofarAntennaDatabase(object):
"""Database with LOFAR antenna positions
This database contains the LOFAR antenna positions in both ETRS and the
station/field specific pqr coordinate system. The upstream source is
the LOFAR svn repository at https://svn.astron.nl/LOFAR.
Attributes:
antennas (list): all antenna information
phase_centres (dict): ETRS phase centres for each antenna field
hba_rotations (dict): HBA rotations (in radians) for each antenna field
pqr_to_etrs (dict): Rotation matrix from PQR to ETRS for each antenna field
"""
def __init__(self, path_to_files=None):
if path_to_files is None:
# Install_prefix can end up to be some_path/lib/site_packages,
# append to the search path the install_prefix minus last two directories
search_path = [install_prefix(),
os.sep.join(install_prefix().split(os.sep)[:-2]),
'/usr/local', '/usr']
for attempt in search_path:
share = os.path.join(attempt, os.path.join('share', 'lofarantpos'))
if os.path.exists(os.path.join(share, 'etrs-phase-centres.csv')):
break
else:
share = path_to_files
self.phase_centres = {
c.station + c.field: c.etrs
for c in parse_csv(os.path.join(share, 'etrs-phase-centres.csv'),
PhaseCentre)}
self.cabinet_etrs = {
c.station: c.etrs
for c in parse_csv(os.path.join(share, 'stationinfo.csv'), ContainerLocation)
}
self.antennas = parse_csv(os.path.join(share, 'etrs-antenna-positions.csv'),
Antenna)
pqr_to_etrs_rows = parse_csv(os.path.join(share, 'rotation_matrices.dat'),
RotationMatrix)
self.pqr_to_etrs = {m.station + m.field: m.matrix for m in pqr_to_etrs_rows}
self.hba_rotations = parse_hba_rotations(os.path.join(share, 'hba-rotations.csv'))
core_stations = numpy.unique([name[0:5] for name in self.phase_centres.keys()
if 'CS' in name])
for core_station in core_stations:
self.pqr_to_etrs[core_station + 'HBA'] = self.pqr_to_etrs[core_station + 'HBA0']
# self.antennas[core_station+'HBA'] = numpy.concatenate([self.antennas[core_station+'HBA0'],
# self.antennas[core_station+'HBA0']],
# axis=0)
def __repr__(self):
return repr(self.__dict__)
def antenna_etrs(self, field_name):
"""Return a list of all ETRS antenna coordinates for a given antenna field
Args:
field_name (str): Field name (e.g. 'CS001HBA0')
Returns:
array: array of ETRS coordinates
"""
station = field_name[0:5].upper()
subfield = field_name[5:].upper()
antenna_ids = {'LBA': numpy.arange(2048),
'HBA': numpy.arange(2048),
'HBA0': numpy.arange(0, 24),
'HBA1': numpy.arange(24, 48)}
return numpy.array(
getcol(
sorted([ant for ant in self.antennas
if ant.station == station
and ant.antenna_type == subfield[0:3]
and ant.antenna_id in antenna_ids[subfield]],
key=lambda x: x.antenna_id),
'etrs'))
def antenna_pqr(self, field_name):
"""Return a list of all PQR antenna coordinates for a given antenna field
Args:
field_name (str): Field name (e.g. 'CS001HBA0')
Returns:
array: array of PQR coordinates
"""
return geo.transform(
self.antenna_etrs(field_name),
self.phase_centres[field_name],
self.pqr_to_etrs[field_name].T)
def hba_dipole_pqr(self, field_name):
"""Return a list of all PQR dipole coordinates for a given HBA antenna field
Args:
field_name (str): Field name (e.g. "CS001HBA0")
Returns:
array: array of PQR coordinates
Example:
>>> import lofarantpos.db
>>> import numpy
>>> db = lofarantpos.db.LofarAntennaDatabase()
>>> db.hba_dipole_pqr("CS001HBA0")[:5]
array([[ 1.9336444 , 15.284... , 0.00008769],
[ 3.075576 , 14.776116 , 0.00008769],
[ 4.217508 , 14.267695 , 0.00008769],
[ 5.3594... , 13.7592745 , 0.00008769],
[ 1.4252236 , 14.142605 , 0.00008769]], dtype=float32)
"""
base_tile = numpy.array([[[-1.5, 1.5], [-0.5, 1.5], [+0.5, 1.5], [+1.5, +1.5]],
[[-1.5, 0.5], [-0.5, 0.5], [+0.5, 0.5], [+1.5, +0.5]],
[[-1.5, -0.5], [-0.5, -0.5], [+0.5, -0.5], [+1.5, -0.5]],
[[-1.5, -1.5], [-0.5, -1.5], [+0.5, -1.5], [+1.5, -1.5]]],
dtype=numpy.float32)
base_tile *= 1.25
base_tile_delta_pqr = base_tile.reshape((-1, 2))
tile_pqr = self.antenna_pqr(field_name)
if field_name[:2] == "CS" and field_name[-3:] == "HBA":
# Merge positions for HBA0 and HBA1
subfields = [field_name + "0", field_name + "1"]
else:
subfields = [field_name]
dipole_pqr = numpy.zeros((0, 3), dtype=numpy.float32)
for subfield, subfield_tile_pqr in zip(subfields, numpy.split(tile_pqr, len(subfields))):
rotation = self.hba_rotations[subfield]
matrix = numpy.array([[numpy.cos(rotation), numpy.sin(rotation)],
[-numpy.sin(rotation), numpy.cos(rotation)]],
dtype=numpy.float32)
rotated_tile_pqr = numpy.dot(matrix, base_tile_delta_pqr.T).T
subfield_dipole_pqr = numpy.array([[element[0] + tile[0], element[1] + tile[1], tile[2]]
for tile in subfield_tile_pqr
for element in rotated_tile_pqr],
dtype=numpy.float32).reshape((-1, 3))
dipole_pqr = numpy.concatenate((dipole_pqr, subfield_dipole_pqr))
return dipole_pqr
def hba_dipole_etrs(self, field_name):
"""Return a list of all ETRS dipole coordinates for a given HBA antenna field
Args:
field_name (str): Field name (e.g. 'CS001HBA0')
Returns:
array: array of ETRS coordinates
Example:
>>> import lofarantpos.db
>>> import numpy
>>> db = lofarantpos.db.LofarAntennaDatabase()
>>> db.hba_dipole_etrs("IE613HBA")[:5]
array([[3801679.57332033, -528959.80788382, 5076969.80405122],
[3801680.56726901, -528959.55814198, 5076969.08837304],
[3801681.56121763, -528959.30839824, 5076968.37269509],
[3801682.55516625, -528959.0586545 , 5076967.65701715],
[3801679.7113895 , -528961.02799576, 5076969.57003303]])
"""
return geo.transform(
self.hba_dipole_pqr(field_name),
numpy.zeros(3),
self.pqr_to_etrs[field_name]) + \
self.phase_centres[field_name][numpy.newaxis, :]
def pqr_to_localnorth(self, field_name):
"""
Compute a rotation matrix from local coordinates (pointing North) to PQR
Args:
field_name (str): Field name (e.g. 'IE613LBA')
Example:
>>> import lofarantpos.db
>>> db = lofarantpos.db.LofarAntennaDatabase()
>>> db.pqr_to_localnorth("IE613LBA")
array([[ 0.97847792, -0.20633485, -0.00262657],
[ 0.20632893, 0.97847984, -0.00235784],
[ 0.00305655, 0.00176516, 0.99999377]])
"""
localnorth_to_etrs = geo.localnorth_to_etrs(self.phase_centres[field_name])
return localnorth_to_etrs.T.dot(self.pqr_to_etrs[field_name])
def rotation_from_north(self, field_name):
"""
Compute the angle in radians between the positive Q-axis (projected onto
a local tangent to the WGS84 ellipsoid) and the local north.
Positive means Q is East of North.
Args:
field_name (str): Field name (e.g. 'IE613LBA')
Returns:
float: angle (in radians)
Example:
>>> import lofarantpos.db
>>> import numpy
>>> db = lofarantpos.db.LofarAntennaDatabase()
>>> numpy.rad2deg(db.rotation_from_north("IE613LBA"))
-11.907669843448476
"""
localnorth_to_pqr = self.pqr_to_localnorth(field_name).T
# Coordinates of the Q axis in localnorth coordinates
pqr_localnorth = localnorth_to_pqr.T.dot(numpy.array([0,1,0]))
return numpy.arctan2(pqr_localnorth[0], pqr_localnorth[1])
``` |
{
"source": "JJDSNT/testandobackend",
"score": 4
} |
#### File: testandobackend/app/contas.py
```python
class Conta:
def __init__(self, numero, titular, saldo=0.0, limite=1000.0):
print(f"Construindo objeto...{self}")
self.__numero = numero
self.__titular = titular
self.__saldo = saldo
self.__limite = limite
def criar_conta(numero, saldo, limite):
conta = {"numero": numero, "saldo": saldo, "limite": limite}
return conta
def depositar(conta, valor):
conta["saldo"] += valor
def sacar(conta, valor):
conta["saldo"] -= valor
def saldo(conta):
print("O saldo em conta é {}".format(conta["saldo"]))
``` |
{
"source": "jjeamin/obJDetection",
"score": 3
} |
#### File: example/cam/main.py
```python
import cv2
import sys
import torch
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from torchvision.transforms import transforms
from src.models.feature.ResNet import ResNet18
from example.cam.visualization import CAM
IMG_PATH = './test_img/stl10/test2.png'
MODEL_PATH = './pretrain/resnet18_stl10_gap_model.pth'
if torch.cuda.is_available():
device = 'cuda'
torch.set_default_tensor_type('torch.cuda.FloatTensor')
else:
device = 'cpu'
torch.set_default_tensor_type('torch.FloatTensor')
class_name = ['airplane', 'bird', 'car', 'cat', 'deer', 'dog', 'horse', 'monkey', 'ship', 'truck']
def drawing(cam, img_path, shape=(128, 128)):
img = cv2.imread(img_path)
img = cv2.resize(img, shape)
fig, axs = plt.subplots(1, 3, figsize=(10, 10))
axs[0].imshow(cam)
resized_cam = cv2.resize(cam, shape)
axs[1].imshow(resized_cam)
heatmap = cv2.applyColorMap(np.uint8(255 * resized_cam), cv2.COLORMAP_JET)
heatimg = heatmap * 0.3 + img * 0.5
print(heatimg.shape)
cv2.imwrite('./cam.jpg', heatimg)
cam_img = cv2.imread('./cam.jpg')
cam_img = cv2.cvtColor(heatimg, cv2.COLOR_BGR2RGB)
axs[2].imshow(cam_img)
model = ResNet18(classes=len(class_name),alpha=0).to(device)
model.load_state_dict(torch.load(MODEL_PATH))
model = model.eval()
img = Image.open(IMG_PATH)
transformer = transforms.Compose([transforms.Resize((128, 128)),
transforms.ToTensor()])
tensor_img = transformer(img).to(device)
tensor_img = tensor_img.view(1, 3, 128, 128)
# print(model._modules)
final_conv_name = 'Block4'
# hook the feature extractor
features_blobs = []
def hook_feature(module, input, output):
features_blobs.append(output.data.cpu().numpy())
model._modules.get(final_conv_name).register_forward_hook(hook_feature)
# CAM
cam = CAM()
cam_img = cam.get_cam(model, features_blobs, tensor_img)
drawing(IMG_PATH, cam_img)
```
#### File: models/detection/CornerNet.py
```python
import torch.nn as nn
from src.models.feature.Hourglass import hourglassNet
from src.models.module.corner_pooling import *
from src.models.module.layers import Conv_bn, Conv_bn_relu
class CornerNet(nn.Module):
def __init__(self, classes=20):
super(CornerNet, self).__init__()
self.backbone = hourglassNet()
# top
self.t_conv = Conv_bn_relu(256, 256, kernel_size=3, padding=1)
self.l_conv = Conv_bn_relu(256, 256, kernel_size=3, padding=1)
self.tl_conv = Conv_bn(256, 256, kernel_size=3, padding=1)
self.conv_bn_1x1_tl = Conv_bn(256, 256, kernel_size=1)
self.out_tl = nn.Sequential(
nn.ReLU(),
Conv_bn_relu(256, 256, kernel_size=3, padding=1)
)
self.h_tl = Conv_bn(256, 256, kernel_size=3, padding=1)
self.e_tl = Conv_bn(256, 256, kernel_size=3, padding=1)
self.o_tl = Conv_bn(256, 256, kernel_size=3, padding=1)
self.out_h_tl = nn.Conv2d(256, classes, kernel_size=1)
self.out_e_tl = nn.Conv2d(256, 1, kernel_size=1)
self.out_o_tl = nn.Conv2d(256, 2, kernel_size=1)
# bottom
self.b_conv = Conv_bn_relu(256, 256, kernel_size=3, padding=1)
self.r_conv = Conv_bn_relu(256, 256, kernel_size=3, padding=1)
self.br_conv = Conv_bn(256, 256, kernel_size=3, padding=1)
self.conv_bn_1x1_br = Conv_bn(256, 256, kernel_size=1)
self.out_br = nn.Sequential(
nn.ReLU(),
Conv_bn_relu(256, 256, kernel_size=3, padding=1)
)
self.h_br = Conv_bn(256, 256, kernel_size=3, padding=1)
self.e_br = Conv_bn(256, 256, kernel_size=3, padding=1)
self.o_br = Conv_bn(256, 256, kernel_size=3, padding=1)
self.out_h_br = nn.Conv2d(256, classes, kernel_size=1)
self.out_e_br = nn.Conv2d(256, 1, kernel_size=1)
self.out_o_br = nn.Conv2d(256, 2, kernel_size=1)
def forward(self, x):
x = self.backbone(x)
t = self.t_conv(x)
l = self.l_conv(x)
top = top_pool()(t)
left = left_pool()(l)
top_left = self.tl_conv(top + left)
conv_bn_tl = self.conv_bn_1x1_tl(x)
out_tl = self.out_tl(top_left + conv_bn_tl)
heat_tl = self.out_h_tl(self.h_tl(out_tl))
embed_tl = self.out_e_tl(self.e_tl(out_tl))
off_tl = self.out_o_tl(self.o_tl(out_tl))
b = self.b_conv(x)
r = self.r_conv(x)
bottom = bottom_pool()(b)
right = right_pool()(r)
bottom_right = self.br_conv(bottom + right)
conv_bn_br = self.conv_bn_1x1_br(x)
out_br = self.out_br(bottom_right + conv_bn_br)
heat_br = self.out_h_br(self.h_br(out_br))
embed_br = self.out_e_br(self.e_br(out_br))
off_br = self.out_o_br(self.o_br(out_br))
return [heat_tl, heat_br, off_tl, off_br, embed_tl, embed_br]
'''
from utils.tester import model_summary
# test
tester = model_summary(CornerNet())
tester.summary((3, 511, 511))
'''
```
#### File: models/feature/LeNet5.py
```python
import torch.nn as nn
# nn.Module을 상속받는다.
class LeNet5(nn.Module):
def __init__(self, classes=10):
# 다중 상속 중복 문제 해결
super(LeNet5, self).__init__()
# 1x32x32 -> 6x28x28
self.conv1 = nn.Conv2d(in_channels=1,
out_channels=6,
kernel_size=5,
stride=1, # default
padding=0, # default
bias=True)
self.sigmoid1 = nn.Sigmoid()
# 28x28x6 -> 14x14x6
self.avg_pool1 = nn.AvgPool2d(kernel_size=2)
# 14x14x6 -> 10x10x16
self.conv2 = nn.Conv2d(in_channels=6,
out_channels=16,
kernel_size=5,
bias=True)
self.sigmoid2 = nn.Sigmoid()
# 10x10x16 -> 5x5x16
self.avg_pool2 = nn.AvgPool2d(kernel_size=2)
# 5x5x16 -> 1x1x120
self.conv3 = nn.Conv2d(in_channels=16,
out_channels=120,
kernel_size=5,
bias=True)
self.sigmoid3 = nn.Sigmoid()
# 120 -> 84
self.dense1 = nn.Linear(120, 84)
self.sigmoid4 = nn.Sigmoid()
# 84 -> 10
self.output = nn.Linear(84, classes)
def forward(self, x):
x = self.conv1(x)
x = self.sigmoid1(x)
x = self.avg_pool1(x)
x = self.conv2(x)
x = self.sigmoid2(x)
x = self.avg_pool2(x)
x = self.conv3(x)
x = self.sigmoid3(x)
x = x.view(x.size(0), -1)
x = self.dense1(x)
x = self.sigmoid4(x)
x = self.output(x)
return x
```
#### File: models/feature/ZFNet.py
```python
import torch.nn as nn
class ZFNet(nn.Module):
'''
visualization Network
'''
def __init__(self, classes=1000):
super(ZFNet, self).__init__()
self.conv1 = nn.Conv2d(3, 96,
kernel_size=7,
stride=2,
padding=1)
self.relu1 = nn.ReLU()
self.max_pool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.conv2 = nn.Conv2d(96, 256,
kernel_size=5,
stride=2,
padding=0)
self.relu2 = nn.ReLU()
self.max_pool2 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.conv3 = nn.Conv2d(256, 384,
kernel_size=3,
stride=1,
padding=1)
self.relu3 = nn.ReLU()
self.conv4 = nn.Conv2d(384, 384,
kernel_size=3,
stride=1,
padding=1)
self.relu4 = nn.ReLU()
self.conv5 = nn.Conv2d(384, 256,
kernel_size=3,
stride=1,
padding=1)
self.relu5 = nn.ReLU()
self.max_pool3 = nn.MaxPool2d(kernel_size=3, stride=2)
self.dense1 = nn.Linear(6 * 6 * 256, 4096)
self.drop1 = nn.Dropout2d()
self.dense2 = nn.Linear(4096, 4096)
self.drop2 = nn.Dropout2d()
self.dense3 = nn.Linear(4096, classes)
def forward(self, x):
x = self.conv1(x)
x = self.relu1(x)
x = self.max_pool1(x)
x = self.conv2(x)
x = self.relu2(x)
x = self.max_pool2(x)
x = self.conv3(x)
x = self.relu3(x)
x = self.conv4(x)
x = self.relu4(x)
x = self.conv5(x)
x = self.relu5(x)
x = self.max_pool3(x)
x = x.view(x.size(0), -1)
x = self.dense1(x)
x = self.drop1(x)
x = self.dense2(x)
x = self.drop2(x)
x = self.dense3(x)
return x
```
#### File: src/tools/tester.py
```python
import numpy as np
import cv2
def save_tensor_image(image, boxes=None, saved_path='test.png'):
'''
:param image: (tensor) cpu image
:return: (file) save image
'''
image = image.permute(1, 2, 0).numpy() * 255.0
image = image.astype('uint8')
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if boxes is not None:
for box in boxes:
loc = np.floor(box)
image = cv2.rectangle(image, (loc[0], loc[1]), (loc[2], loc[3]), (255, 0, 0), 3)
cv2.imwrite(saved_path, image)
print('Finish image save testing')
``` |
{
"source": "jjeamin/sleep",
"score": 2
} |
#### File: jjeamin/sleep/encoder.py
```python
import argparse
import torch
import os
import torchvision.transforms as transforms
from copy import deepcopy
from med import SegNet, SegNetv2
from med.utils import testing
from pathlib import Path
from tqdm import tqdm
from torchvision.datasets import ImageFolder
def train(model, train_loader, optimizer, device="cuda"):
model.train()
total = len(train_loader)
train_loss = 0
for images, _ in tqdm(train_loader, total=total):
optimizer.zero_grad()
images = images.to(device)
true_images = deepcopy(images)
pred_images = model(images)
loss = torch.sqrt((pred_images - true_images).pow(2).mean())
loss.backward()
optimizer.step()
train_loss += loss.item()
return train_loss
def valid(model, valid_loader, device="cuda"):
model.eval()
total = len(valid_loader)
valid_loss = 0
for images, _ in tqdm(valid_loader, total=total):
images = images.to(device)
true_images = deepcopy(images)
pred_images = model(images)
loss = torch.sqrt((pred_images - true_images).pow(2).mean())
valid_loss += loss.item()
return valid_loss
def main(args):
DATA_PATH = Path("store/public_dataset/Fpz-Cz")
image_size = (128, 1024)
total_transforms = transforms.Compose([
transforms.Grayscale(),
transforms.Resize(image_size),
transforms.ToTensor(),
])
total_dataset = ImageFolder(root=DATA_PATH, transform=total_transforms)
total_length = len(total_dataset)
split = [round(total_length * 0.7), round(total_length *
0.15), round(total_length * 0.15) - 1]
train_dataset, valid_dataset, test_dataset = torch.utils.data.random_split(
total_dataset, split)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=True)
valid_loader = torch.utils.data.DataLoader(
valid_dataset, batch_size=args.batch_size, shuffle=False)
test_loader = torch.utils.data.DataLoader(
valid_dataset, batch_size=args.batch_size, shuffle=False)
train_total = len(train_loader)
valid_total = len(valid_loader)
model = SegNet().to("cuda")
model.load_state_dict(torch.load('./checkpoint/autoencoder_Fpz_Cz.pth'))
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, [30, 80], gamma=0.1)
minimum_loss = 1000
for e in range(0, args.epochs):
testing(model, test_loader, e)
train_loss = train(model, train_loader, optimizer, device=args.device)
train_loss = train_loss / train_total * args.batch_size
valid_loss = valid(model, valid_loader, device=args.device)
valid_loss = valid_loss / valid_total * args.batch_size
scheduler.step()
print(
f"[EPOCH : {args.epochs} / {e}] || [TRAIN LOSS : {train_loss}] || [VALID LOSS : {valid_loss}]")
if minimum_loss > valid_loss:
torch.save(model.state_dict(), args.save_path)
minimum_loss = valid_loss
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", default=8)
parser.add_argument("--epochs", default=100)
parser.add_argument("--lr", default=0.01)
parser.add_argument("--device", default="cuda")
parser.add_argument("--save_path", default="./checkpoint/autoencoder_fpz_cz.pth")
args = parser.parse_args()
main(args)
```
#### File: jjeamin/sleep/encoding_test.py
```python
import numpy as np
import matplotlib.pyplot as plt
import itertools
from tqdm import tqdm
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import argparse
from tqdm import tqdm
from med import resnet18
from med.utils import make_weights_for_balanced_classes, Encoding_Dataset
from torch.utils.data import Dataset
from pathlib import Path
from sklearn.metrics import confusion_matrix
LABELS = ["N1", "N2", "N3", "REM", "Wake"]
def test(model, test_loader, criterion, device="cuda"):
model.eval()
total = len(test_loader)
test_correct = 0
test_loss = 0
total_labels = []
total_predicted = []
for i, (data, labels) in enumerate(tqdm(test_loader, total=total)):
data = data.float().to(device)
labels = labels.to(device)
pred = model(data)
_, predicted = torch.max(pred, 1)
test_correct += (predicted == labels).sum().item()
total_labels.append(labels.detach().cpu().numpy()[0])
total_predicted.append(predicted.detach().cpu().numpy()[0])
loss = criterion(pred, labels)
test_loss += loss.item()
metrics = confusion_matrix(total_labels, total_predicted, labels=[0, 1, 2, 3, 4])
plot_confusion_matrix(metrics, classes=LABELS, normalize=False, title='Confusion matrix')
plot_confusion_matrix(metrics, classes=LABELS, normalize=True, title='Confusion matrix')
return test_correct, test_loss
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
def main(args):
DATA_PATH = Path(args.data_path)
test_data_path = DATA_PATH / "Fpz-Cz_test_encoding"
test_transforms = transforms.Compose([
transforms.ToTensor(),
])
test_dataset = Encoding_Dataset(root_path=test_data_path,
transform=test_transforms)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=True)
test_total = len(test_loader)
model = resnet18(num_classes=5).to(args.device)
model.load_state_dict(torch.load('./checkpoint/resnet18_encoding.pth'))
criterion = nn.CrossEntropyLoss().to(args.device)
test_correct, test_loss = test(model, test_loader, criterion, device=args.device)
test_acc = test_correct / (test_total * args.batch_size)
test_loss = test_loss / (test_total * args.batch_size)
print(f"[TEST ACC : {test_acc}] | [TEST LOSS : {test_loss}]")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--data_path", default="store/public_dataset")
parser.add_argument("--device", default="cuda")
parser.add_argument("--batch_size", default=1)
args = parser.parse_args()
main(args)
``` |
{
"source": "jjeamin/stylegan-pytorch-collections",
"score": 2
} |
#### File: jjeamin/stylegan-pytorch-collections/3_stylespace.py
```python
import argparse
import math
import os
import torch
import torch.nn.functional as F
import torchvision
from torch import optim
from tqdm import tqdm
from src.models.stylegan import Generator
def conv_warper(layer, input, style, noise):
"""[summary]
Args:
layer (nn.Module): StyleConv
input ([type]): [description]
style ([type]): [description]
noise ([type]): [description]
"""
conv = layer.conv
batch, in_channel, height, width = input.shape
style = style.view(batch, 1, in_channel, 1, 1)
weight = conv.scale * conv.weight * style
if conv.demodulate:
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)
weight = weight * demod.view(batch, conv.out_channel, 1, 1, 1)
weight = weight.view(
batch * conv.out_channel, in_channel, conv.kernel_size, conv.kernel_size
)
if conv.upsample:
input = input.view(1, batch * in_channel, height, width)
weight = weight.view(
batch, conv.out_channel, in_channel, conv.kernel_size, conv.kernel_size
)
weight = weight.transpose(1, 2).reshape(
batch * in_channel, conv.out_channel, conv.kernel_size, conv.kernel_size
)
out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, conv.out_channel, height, width)
out = conv.blur(out)
elif conv.downsample:
input = conv.blur(input)
_, _, height, width = input.shape
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, conv.out_channel, height, width)
else:
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=conv.padding, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, conv.out_channel, height, width)
out = layer.noise(out, noise=noise)
out = layer.activate(out)
return out
def encoder(G, noise):
styles = [noise]
style_space = []
styles = [G.style(s) for s in styles]
noise = [getattr(G.noises, f'noise_{i}') for i in range(G.num_layers)]
inject_index = G.n_latent
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
style_space.append(G.conv1.conv.modulation(latent[:, 0]))
i = 1
for conv1, conv2 in zip(G.convs[::2], G.convs[1::2]):
style_space.append(conv1.conv.modulation(latent[:, i]))
style_space.append(conv2.conv.modulation(latent[:, i + 1]))
i += 2
return style_space, latent, noise
def decoder(G, style_space, latent, noise):
out = G.input(latent)
out = conv_warper(G.conv1, out, style_space[0], noise[0])
skip, _ = G.to_rgb1(out, latent[:, 1])
i = 1
for conv1, conv2, noise1, noise2, to_rgb in zip(
G.convs[::2], G.convs[1::2], noise[1::2], noise[2::2], G.to_rgbs
):
out = conv_warper(conv1, out, style_space[i], noise=noise1)
out = conv_warper(conv2, out, style_space[i+1], noise=noise2)
skip, _ = to_rgb(out, latent[:, i + 2], skip)
i += 2
image = skip
return image
def main():
index = [0,1,1,2,2,3,4,4,5,6,6,7,8,8,9,10,10,11,12,12,13,14,14,15,16,16]
LOAD_PATH = '../checkpoint/stylegan2-ffhq-config-f.pt'
model = Generator(
size=1024,
style_dim=512,
n_mlp=8,
channel_multiplier=2
)
model.load_state_dict(torch.load(LOAD_PATH)['g_ema'])
model.eval()
model.cuda()
test_input = torch.randn(1,512).cuda()
output, _ = model([test_input], return_latents=False)
torchvision.utils.save_image(output.detach().cpu(),
os.path.join("./results/stylespace_origin.jpg"),
normalize=True,
scale_each=True,
range=(-1, 1))
style_space, latent, noise = encoder(model, test_input)
style_space[index[9]][:, 409] += 10
image = decoder(model, style_space, latent, noise)
torchvision.utils.save_image(image.detach().cpu(),
os.path.join("./results/stylespace_eye.jpg"),
normalize=True,
scale_each=True,
range=(-1, 1))
style_space, latent, noise = encoder(model, test_input)
style_space[index[12]][:, 330] -= 50
image = decoder(model, style_space, latent, noise)
torchvision.utils.save_image(image.detach().cpu(),
os.path.join("./results/stylespace_hair.jpg"),
normalize=True,
scale_each=True,
range=(-1, 1))
style_space, latent, noise = encoder(model, test_input)
style_space[index[6]][:, 259] -= 20
image = decoder(model, style_space, latent, noise)
torchvision.utils.save_image(image.detach().cpu(),
os.path.join("./results/stylespace_mouth.jpg"),
normalize=True,
scale_each=True,
range=(-1, 1))
style_space, latent, noise = encoder(model, test_input)
style_space[index[15]][:, 45] -= 3
image = decoder(model, style_space, latent, noise)
torchvision.utils.save_image(image.detach().cpu(),
os.path.join("./results/stylespace_lip.jpg"),
normalize=True,
scale_each=True,
range=(-1, 1))
if __name__ == "__main__":
main()
``` |
{
"source": "jjeamin/Visual_PyQTorch",
"score": 2
} |
#### File: jjeamin/Visual_PyQTorch/cls.py
```python
import sys
import logging
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5 import uic, QtCore
from vis.cam import CAM, GradCAM
from vis.grad import Vanilla, Smooth, Guided_Backprop
from util import use_theme, get_label
class Class_Form(QDialog, QPlainTextEdit):
def __init__(self, parent=None):
super().__init__(parent)
label_path = "./labels/imagenet_labels.pkl"
self.labels = get_label(label_path)
self.ui = uic.loadUi("./ui/Class_View.ui", self)
self.cls = -1
self.initUI()
def initUI(self):
# 검색 버튼 이벤트 핸들링
self.ui.setWindowTitle('Class')
self.ui.show()
self.search_line.setPlaceholderText("Input Class Name or Number")
self.search_btn.clicked.connect(self.search)
self.content_table.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.content_table.cellDoubleClicked.connect(self.click_table)
self.set_table()
def search(self):
text = self.search_line.text()
find_items = self.content_table.findItems(text, QtCore.Qt.MatchContains)
items = [item.row() for item in find_items]
for idx in range(0, len(self.labels)):
if idx in items:
self.content_table.setRowHidden(idx, False)
else:
self.content_table.setRowHidden(idx, True)
self.search_line.setText("")
def click_table(self):
cur_item = self.content_table.currentItem()
self.cls = cur_item.row() - 1
self.close()
def set_table(self):
self.content_table.setRowCount(len(self.labels) + 1)
self.content_table.setColumnCount(1)
self.content_table.setItem(0, 0, QTableWidgetItem("Prediction"))
for idx, name in self.labels.items():
self.content_table.setItem(idx+1, 0, QTableWidgetItem(name))
if __name__ == '__main__':
app = QApplication(sys.argv)
use_theme(app, "theme/darkgray.qss")
w = Class_Form()
sys.exit(app.exec())
```
#### File: jjeamin/Visual_PyQTorch/view.py
```python
import sys
import logging
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5 import uic
from vis.cam import CAM, GradCAM
from vis.grad import Vanilla, Smooth, Guided_Backprop
from util import use_theme, make_dir
from cls import Class_Form
class QTextEditLogger(logging.Handler):
def __init__(self, parent):
super().__init__()
self.widget = parent
self.widget.setReadOnly(True)
def emit(self, record):
msg = self.format(record)
self.widget.appendPlainText(msg)
class Visual_Form(QDialog, QPlainTextEdit):
def __init__(self, parent=None):
super().__init__(parent)
self.label_path = "./labels/imagenet_labels.pkl"
self.ui = uic.loadUi("./ui/View.ui", self)
self.initUI()
self.isInput = False
self.vis = None
self.islayer = False
def initUI(self):
self.ui.setWindowTitle('Visualization')
self.ui.show()
make_dir("./results")
logTextBox = QTextEditLogger(self.plainTextEdit)
# You can format what is printed to text box
logTextBox.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
logging.getLogger().addHandler(logTextBox)
# You can control the logging level
logging.getLogger().setLevel(logging.DEBUG)
self.input_btn.clicked.connect(self.get_input_image)
self.cls_btn.clicked.connect(self.selc_cls)
self.start_btn.clicked.connect(self.start)
def get_input_image(self):
self.img_path, i = QFileDialog.getOpenFileName(self, 'Open file', '.', "Image files (*.jpg *.gif *.png)")
if not self.img_path:
logging.info("\nNot Selected Image")
self.isInput = False
self.input_img.setPixmap(QPixmap(None))
else:
logging.info("\nInput Image")
self.main_label.setText("Press the start button")
self.isInput = True
self.input_img.setPixmap(QPixmap(self.img_path).scaledToWidth(self.input.width()))
def start(self):
if self.isInput is False:
QMessageBox.information(self, 'Message', "Upload the Image", QMessageBox.Yes)
return
if self.islayer is True:
self.layers_widget.itemSelectionChanged.disconnect()
self.islayer = False
self.layers_widget.clear()
mode = self.mode_box.currentText()
model_name = self.model_box.currentText()
cls = self.cls_box.value()
logging.info("\nMode : %s, Model : %s" % (mode, model_name))
if mode == 'cam':
cam = CAM(self.img_path,
self.label_path,
model_name=model_name)
info = cam.save_img(cls)
img = cam.load_img()
self.drawing(img)
elif mode == 'grad cam':
self.groupBox.setTitle("Layers")
self.main_label.setText("Select layer")
grad_cam = GradCAM(self.img_path,
self.label_path,
model_name=model_name)
info = grad_cam.save_img(cls)
# set list view
self.set_layers(grad_cam)
elif mode == 'guided backprop':
self.groupBox.setTitle("Modes")
self.main_label.setText("Select mode")
guided = Guided_Backprop(self.img_path,
self.label_path,
model_name=model_name)
info = guided.save_img(cls)
# set list view
self.set_layers(guided)
elif mode == 'vanilla grad':
vanilla = Vanilla(self.img_path,
self.label_path,
model_name=model_name)
info = vanilla.save_img(cls)
img = vanilla.load_img()
self.drawing(img)
elif mode == 'smooth grad':
smooth = Smooth(self.img_path,
self.label_path,
model_name=model_name)
info = smooth.save_img(prog=self.progressBar,
cls=cls)
img = smooth.load_img()
self.drawing(img)
else:
info = None
logging.info(info)
if self.progressBar == self.progressBar.maximum():
self.progressBar.setValue(0)
def set_layers(self, vis):
for name in vis.items:
item = QListWidgetItem(name, self.layers_widget)
custom_widget = QLabel(name)
item.setSizeHint(custom_widget.sizeHint())
self.layers_widget.setItemWidget(item, custom_widget)
self.layers_widget.addItem(item)
self.layers_widget.itemSelectionChanged.connect(self.item_clicked(vis))
self.islayer = True
def item_clicked(self, vis):
def clicked_drawing():
item_name = self.layers_widget.currentItem().text()
# set image
img = vis.load_img(item_name)
self.drawing(img)
return clicked_drawing
def selc_cls(self):
cls_form = Class_Form()
cls_form.exec_()
self.cls_box.setValue(cls_form.cls)
def drawing(self, img):
h, w, c = img.shape
qImg = QImage(img.data, w, h, w * c, QImage.Format_RGB888)
pixmap = QPixmap.fromImage(qImg)
self.main_label.setPixmap(pixmap.scaledToWidth(self.main_label.width()))
if __name__ == '__main__':
app = QApplication(sys.argv)
use_theme(app, "theme/darkgray.qss")
w = Visual_Form()
sys.exit(app.exec())
```
#### File: Visual_PyQTorch/vis/cam.py
```python
import cv2
import torch
import torch.nn as nn
from torch.nn import functional as F
from util import *
class CAM(object):
def __init__(self,
img_path,
label_path,
model_name,):
self.img_path = img_path
self.save_dir = "./results/cam"
self.label = get_label(label_path)
self.model = get_model(model_name)
self.model.eval()
self.feature = []
self.layer_names = []
def get_feature_hook(self, module, input, output):
self.feature.append(output.cpu().data.numpy())
def register(self, layer_name='layer4'):
self.layer_names.append(layer_name)
self.model._modules.get(layer_name).register_forward_hook(self.get_feature_hook)
def save_img(self, cls=-1):
make_dir(self.save_dir)
# register hook
self.register(layer_name='layer4')
# get softmax input weight
params = list(self.model.parameters())
class_weights = np.squeeze(params[-2].cpu().data.numpy())
# get tensor image
tensor_img = get_tensor_img(self.img_path)
# predict
output = self.model(tensor_img)
h_x = F.softmax(output, dim=1).data.squeeze()
pred = h_x.argmax(0).item()
# get cam
sel = cls if cls != -1 else pred
final_conv = self.feature[0][0]
total_cam = np.zeros(dtype=np.float32, shape=final_conv.shape[1:3])
for i, w in enumerate(class_weights[sel]):
total_cam += w * final_conv[i, :, :]
scaled_cam = scaling(total_cam)
resized_cam = cv2.resize(scaled_cam, (448, 448))
heatmap = cv2.applyColorMap(np.uint8(255 * resized_cam), cv2.COLORMAP_JET)
img = cv2.imread(self.img_path)
img = cv2.resize(img, (448, 448))
heatimg = heatmap * 0.4 + img * 0.5
cv2.imwrite(os.path.join(self.save_dir, 'cam.jpg'), heatimg)
label_info = "\nSELECT CLASS NUMBER : %d " \
"\nSELECT CLASS NAME : %s " \
"\nPREDICT CLASS NUMBER : %d " \
"\nPREDICT CLASS NAME : %s"\
% (sel, self.label[sel], pred, self.label[pred])
return label_info
def load_img(self):
cam_img = cv2.imread(os.path.join(self.save_dir, 'cam.jpg'))
cam_img = cv2.cvtColor(cam_img, cv2.COLOR_BGR2RGB)
return cam_img
class GradCAM(object):
def __init__(self,
img_path,
label_path,
model_name,):
self.img_path = img_path
self.save_dir = "./results/grad_cam"
self.label = get_label(label_path)
self.model = get_model(model_name)
self.model.eval()
self.grads = []
self.features = []
self.items = []
self.item_id = 0
def get_feature_hook(self, name):
def hook(module, input, output):
self.items.append('%d_%s' % (self.item_id, name))
self.features.append(output)
self.item_id += 1
return hook
def get_gradient_hook(self, module, grad_in, grad_out):
self.grads.append(grad_out[0])
def register(self):
for module, (name, _) in zip(self.model.modules(), self.model.named_modules()):
if type(module) == nn.Conv2d or type(module) == nn.BatchNorm2d or type(module) == nn.ReLU:
module.register_forward_hook(self.get_feature_hook(name))
module.register_backward_hook(self.get_gradient_hook)
def save_img(self, cls=-1):
make_dir(self.save_dir)
# get tensor image
tensor_img = get_tensor_img(self.img_path)
# register hook
self.register()
# predict
output = self.model(tensor_img)
h_x = F.softmax(output, dim=1).data.squeeze()
pred = h_x.argmax(0).item()
sel = cls if cls != -1 else pred
one_hot_output = torch.zeros(1, h_x.size()[0])
one_hot_output[0][sel] = 1
# backprop
output.backward(gradient=one_hot_output)
# get grad cam
self.grads = self.grads[::-1]
for idx, name in enumerate(self.items):
grad = self.grads[idx][0].mean(dim=-1, keepdim=True).mean(dim=-2, keepdim=True)
feature = self.features[idx][0]
grad_cam = F.relu((grad * feature).sum(dim=0)).squeeze(0)
scaled_grad_cam = scaling(grad_cam.detach().cpu().numpy())
resized_grad_cam = cv2.resize(scaled_grad_cam, (448, 448))
heatmap = cv2.applyColorMap(np.uint8(255 * resized_grad_cam), cv2.COLORMAP_JET)
img = cv2.imread(self.img_path)
img = cv2.resize(img, (448, 448))
heatimg = heatmap * 0.4 + img * 0.5
cv2.imwrite(os.path.join(self.save_dir, '%s.jpg' % (name)), heatimg)
label_info = "\nSELECT CLASS NUMBER : %d " \
"\nSELECT CLASS NAME : %s " \
"\nPREDICT CLASS NUMBER : %d " \
"\nPREDICT CLASS NAME : %s" \
% (sel, self.label[sel], pred, self.label[pred])
return label_info
def load_img(self, item):
grad_cam_img = cv2.imread(os.path.join(self.save_dir, '%s.jpg' % item))
grad_cam_img = cv2.cvtColor(grad_cam_img, cv2.COLOR_BGR2RGB)
return grad_cam_img
if __name__ == "__main__":
grad_cam = GradCAM(img_path="../test.png",
label_path="../labels/imagenet_labels.pkl",
model_name="resnet18")
info = grad_cam.save_img()
print(info)
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.