metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jiweiqi/CRNN-biomass-python",
"score": 3
} |
#### File: Figures/Rate Comparison/rate_constant_graph.py
```python
import numpy as np
import math
import pandas as pd
from collections import OrderedDict
import matplotlib.pyplot as plt
class CRNN:
def __init__(self):
self.Arrhenius = np.array([[222.4, 0, 17.3, 0],
[117.2, 0.15, 14.92, 0],
[218.0, 0.34, 36.75, 0],
[88.7, 0.05, 22.69, 0.45],
[110.5, 0.03, 14.08, 0.33],
[187.4, 0.04, 33.64, 0.19]])
self.lb = 1e-9
self.lnO2 = np.log(self.lb)
self.R = 8.314E-3
def cal_k(self, Ts):
lnA = self.Arrhenius[:, 2]
b = self.Arrhenius[:, 1]
Ea = self.Arrhenius[:, 0]
nu = self.Arrhenius[:, 3]
rates = []
for i in range(6):
rates.append(self.get_rates(Ea[i], lnA[i], b[i], nu[i], T))
return rates
def get_rates(self, E, A, b, nu, T):
return (lambda lnA = A, b = b, Ea = E, T = T, R = self.R, nu = nu, lnO2 = self.lnO2:
[np.exp(nu * lnO2 + lnA + b*np.log(x) - Ea / (R * x)) for x in T])()
class LITR:
def __init__(self):
print("Class started")
def cal_burnham_rates(self, T):
As = [13, 15]
Es = [197, 217]
rates =[]
for E in Es:
for A in As:
rates.append(self.cal_rates(E, A, T))
return self.get_min_max_rate(rates, T)
def cal_rates(self, E, A, T):
rate = (lambda A = A, E=E*1000, T = T:
[(10 ** A) * math.exp(-E/(8.314 * x)) for x in T])()
return rate
def get_min_max_rate(self, rates, T):
min_rate = []
max_rate = []
for pos in range(len(T)):
rates_at_pos = (lambda i = pos, rates = rates: [x[i] for x in rates])()
min_rate.append(min(rates_at_pos))
max_rate.append(max(rates_at_pos))
return [min_rate, max_rate]
def get_dauenhauer_rates(self):
return pd.read_csv("Dauenhauerdata.csv", skiprows = 1)
def cal_antal_rates(self, T):
data = pd.read_csv("Antal.csv")
data["rate"] = data.apply(lambda row: self.cal_rates(row["E"],
row["log A"],
T),
axis = 1)
return self.get_min_max_rate(data["rate"], T)
def getlinestyles():
linestyles = OrderedDict(
[('solid', (0, ())),
('loosely dotted', (0, (1, 10))),
('dotted', (0, (1, 5))),
('densely dotted', (0, (1, 1))),
('loosely dashed', (0, (5, 10))),
('dashed', (0, (5, 5))),
('densely dashed', (0, (5, 1))),
('loosely dashdotted', (0, (3, 10, 1, 10))),
('dashdotted', (0, (3, 5, 1, 5))),
('densely dashdotted', (0, (3, 1, 1, 1))),
('loosely dashdotdotted', (0, (3, 10, 1, 10, 1, 10))),
('dashdotdotted', (0, (3, 5, 1, 5, 1, 5))),
('densely dashdotdotted', (0, (3, 1, 1, 1, 1, 1)))])
names = [
"solid",
"dotted",
"dashed",
"dashdotted",
"densely dashdotdotted",
"densely dotted"
]
return [names, linestyles]
if __name__ == '__main__':
# Get Temperatures
T = [x + 273 for x in list(range(300, 600))]
iT = [1000/x for x in T]
# Open classes
crnn = CRNN()
literature = LITR()
#CRNN Rates
crnn_labels = [f"R{x}" for x in range(1, 7)]
names, linestyles = getlinestyles()
for i, rate in enumerate(crnn.cal_k(T)):
plt.plot(iT, rate,"k", linestyle = linestyles[names[i]],
label = crnn_labels[i])
# Burnhame Rates
min_rate, max_rate = literature.cal_burnham_rates(T)
plt.fill_between(iT,min_rate, max_rate,
alpha = 0.1, label = "Burnham et al 2015")
#Dauenhauer rates
dauenhauer_rates = literature.get_dauenhauer_rates()
plt.plot(dauenhauer_rates["X"], dauenhauer_rates["Y"], "-o",
label = "Cell. Consumption [Krumm et al 2016]")
plt.plot(dauenhauer_rates["X.1"], dauenhauer_rates["Y.1"], "-o",
label = "Furans Production [Krumm et al 2016]")
#Antal rates
min_rate, max_rate = literature.cal_antal_rates(T)
plt.fill_between(iT,min_rate, max_rate,
alpha = 0.1, label = "Antal et al 1980 - 2002")
# Additional information
plt.yscale("log")
plt.legend(loc = "lower left")
plt.xlabel("$10^3$/T (K$^{-1}$)")
plt.ylabel("Rate Constant AT$^b$exp(-E$_a$/RT)")
plt.savefig("figure.png")
``` |
{
"source": "jiweiqi/nnsubspace",
"score": 3
} |
#### File: nnsubspace/visual/subspaceplot.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
def imshow(x, figsize=None):
if figsize is None:
figsize = (5, 5)
plt.figure(figsize=figsize)
ax = plt.axes([0, 0, 1, 1], frameon=False)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.imshow(x)
plt.autoscale(tight=True)
plt.show()
def eigenplot(w, num_eigenvalue, figsize=None):
if figsize is None:
figsize = (5, 5)
plt.figure(figsize=figsize)
plt.semilogy(range(num_eigenvalue), w[0:num_eigenvalue]**2, '-o')
plt.xlabel('index')
plt.ylabel('eigenvalue')
plt.show()
def eigenvectorplot(eigenvector, figsize=None):
if figsize is None:
figsize = (5, 5)
plt.figure(figsize=figsize)
ax = plt.axes([0, 0, 1, 1], frameon=False)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
eigenvector = np.abs(eigenvector) / np.max(np.abs(eigenvector))
ax.imshow(eigenvector)
plt.autoscale(tight=True)
plt.show()
def summaryplot(xv, y, poly1d, figsize=None):
if figsize is None:
figsize = (5, 5)
plt.figure(figsize=figsize)
plt.plot(xv, y, 'o')
plt.plot(np.sort(xv), poly1d(np.sort(xv)), '-')
plt.xlabel(r'$w_1^T\xi$')
plt.ylabel('output')
plt.show()
``` |
{
"source": "jiwen624/datapipe",
"score": 3
} |
#### File: jiwen624/datapipe/db_load.py
```python
import psycopg2
import time
import logging
import sys
from settings import settings
log = logging.getLogger(__file__)
class IronMQFile:
"""A file-like wrapper for IronMQ, which will act as a input to the COPY FROM
command."""
def __init__(self, queue):
self.queue = queue
self.event_cache = list()
self.leftover = None
def _populate_cache(self):
"""Try to fetch as many of rows as possible, therefore we need a cache."""
msgs_json = self.queue.reserve(max=settings.MQ_FETCH_MSG_NUM, delete=True)
if not (msgs_json and isinstance(msgs_json, dict)):
return
for msg in msgs_json.get('messages', []):
event = msg.get('body', None)
if event:
self.event_cache.append(event)
def read(self, size=-1):
"""Read data with a chunk size"""
ret = []
size_left = size if size >= 0 else sys.maxsize
if self.leftover:
if len(self.leftover) <= size_left:
ret.append(self.leftover)
size_left -= len(self.leftover)
self.leftover = None
else:
ret.append(self.leftover[:size_left])
self.leftover = self.leftover[size_left:]
size_left = 0
while size_left > 0:
line = self.readline()
if not line:
break
len_line = len(line)
if size_left >= len_line:
ret.append(line)
else:
ret.append(line[:size_left])
self.leftover = line[size_left:]
if size == -1:
size_left = sys.maxsize
else:
size_left -= len_line
return ''.join(ret) if ret else ''
def readline(self, size=-1):
"""TODO: size parameter is not supported right now as
copy_from does not use it"""
ret = []
if size == -1:
if len(self.event_cache) == 0:
self._populate_cache()
if self.event_cache:
next_event = self.event_cache.pop(0)
if next_event:
ret.append(next_event)
if ret:
ret.append('')
ret_str = '\n'.join(ret)
else:
ret_str = ''
return ret_str
else:
raise NotImplemented
def load_data_to_postgre(queue_file, db_parms, tbl_name):
"""This function extract from the data source and load the data
to the backend database in a pre-configured interval."""
if not (queue_file and db_parms and tbl_name):
return
conn = None
while True:
curr_time = time.time()
try:
conn = psycopg2.connect(**db_parms)
curs = conn.cursor()
# The psycopg2 interface for \copy command
curs.copy_from(queue_file, tbl_name, sep=',', size=settings.DB_LOAD_CHUNK_SIZE)
curs.close()
conn.commit()
except psycopg2.DataError:
conn.rollback()
except (psycopg2.InternalError,
psycopg2.DatabaseError,
psycopg2.OperationalError,
psycopg2.InterfaceError) as e:
log.warning(e)
try:
if conn:
conn.close()
except psycopg2.InterfaceError:
pass
next_wakeup_time = settings.DB_LOAD_INTERVAL - time.time() + curr_time
sleep_time = max(next_wakeup_time, settings.DB_LOAD_MIN_INTERVAL)
time.sleep(sleep_time)
```
#### File: jiwen624/datapipe/iron_mq.py
```python
import iron_core
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
try:
import json
except:
import simplejson as json
try:
basestring
except NameError:
basestring = str
class Queue(object):
client = None
name = None
def __init__(self, mq, name):
"""Creates object for manipulating a queue.
Arguments:
mq -- An instance of IronMQ.
name -- The name of the queue.
"""
self.client = mq.client
self.name = name
def info(self):
"""Execute an HTTP request to get details on a queue, and
return it.
"""
url = "queues/%s" % (self.name,)
result = self.client.get(url)
return result['body']['queue']
def size(self):
"""Queue size"""
return self.info()['size']
def id(self):
"""Queue ID"""
return self.info()['id']
def total_messages(self):
"""Queue total messages count"""
return self.info()['total_messages']
def clear(self):
"""Executes an HTTP request to clear all contents of a queue."""
url = "queues/%s/messages" % self.name
result = self.client.delete(url = url,
body = json.dumps({}),
headers={'Content-Type': 'application/json'})
return result['body']
def delete(self, message_id, reservation_id=None, subscriber_name=None):
"""Execute an HTTP request to delete a message from queue.
Arguments:
message_id -- The ID of the message to be deleted.
reservation_id -- Reservation Id of the message. Reserved message could not be deleted without reservation Id.
subscriber_name -- This is required to acknowledge push after long-processing of message is finished.
"""
url = "queues/%s/messages/%s" % (self.name, message_id)
qitems = {}
if reservation_id is not None:
qitems['reservation_id'] = reservation_id
if subscriber_name is not None:
qitems['subscriber_name'] = subscriber_name
body = json.dumps(qitems)
result = self.client.delete(url=url, body=body,
headers={'Content-Type': 'application/json'})
return result['body']
def delete_multiple(self, ids=None, messages=None):
"""Execute an HTTP request to delete messages from queue.
Arguments:
ids -- A list of messages id to be deleted from the queue.
messages -- Response to message reserving.
"""
url = "queues/%s/messages" % self.name
items = None
if ids is None and messages is None:
raise Exception('Please, specify at least one parameter.')
if ids is not None:
items = [{'id': item} for item in ids]
if messages is not None:
items = [{'id': item['id'], 'reservation_id': item['reservation_id']} for item in
messages['messages']]
data = json.dumps({'ids': items})
result = self.client.delete(url=url, body=data,
headers={'Content-Type': 'application/json'})
return result['body']
def post(self, *messages):
"""Executes an HTTP request to create message on the queue.
Creates queue if not existed.
Arguments:
messages -- An array of messages to be added to the queue.
"""
url = "queues/%s/messages" % self.name
msgs = [{'body': msg} if isinstance(msg, basestring) else msg
for msg in messages]
data = json.dumps({'messages': msgs})
result = self.client.post(url=url, body=data,
headers={'Content-Type': 'application/json'})
return result['body']
def get(self, max=None, timeout=None, wait=None):
"""Deprecated. Use Queue.reserve() instead. Executes an HTTP request to get a message off of a queue.
Keyword arguments:
max -- The maximum number of messages to pull. Defaults to 1.
"""
response = self.reserve(max, timeout, wait)
return response
def reserve(self, max=None, timeout=None, wait=None, delete=None):
"""Retrieves Messages from the queue and reserves it.
Arguments:
max -- The maximum number of messages to reserve. Defaults to 1.
timeout -- Timeout in seconds.
wait -- Time to long poll for messages, in seconds. Max is 30 seconds. Default 0.
delete -- If true, do not put each message back on to the queue after reserving. Default false.
"""
url = "queues/%s/reservations" % self.name
qitems = {}
if max is not None:
qitems['n'] = max
if timeout is not None:
qitems['timeout'] = timeout
if wait is not None:
qitems['wait'] = wait
if delete is not None:
qitems['delete'] = delete
body = json.dumps(qitems)
response = self.client.post(url, body=body,
headers={'Content-Type': 'application/json'})
return response['body']
def get_message_by_id(self, message_id):
url = "queues/%s/messages/%s" % (self.name, message_id)
response = self.client.get(url)
return response['body']['message']
def peek(self, max=None):
url = "queues/%s/messages" % self.name
if max is not None:
url = "%s?n=%s" % (url, max)
response = self.client.get(url)
return response['body']
def touch(self, message_id, reservation_id, timeout=None):
"""Touching a reserved message extends its timeout to the duration specified when the message was created.
Arguments:
message_id -- The ID of the message.
reservation_id -- Reservation Id of the message.
timeout -- Optional. The timeout in seconds after which new reservation will expire.
"""
url = "queues/%s/messages/%s/touch" % (self.name, message_id)
qitems = {'reservation_id': reservation_id}
if timeout is not None:
qitems['timeout'] = timeout
body = json.dumps(qitems)
response = self.client.post(url, body=body,
headers={'Content-Type': 'application/json'})
return response['body']
def release(self, message_id, reservation_id, delay=0):
"""Release locked message after specified time. If there is no message with such id on the queue.
Arguments:
message_id -- The ID of the message.
reservation_id -- Reservation Id of the message.
delay -- The time after which the message will be released.
"""
url = "queues/%s/messages/%s/release" % (self.name, message_id)
body = {'reservation_id': reservation_id}
if delay > 0:
body['delay'] = delay
body = json.dumps(body)
response = self.client.post(url, body=body,
headers={'Content-Type': 'application/json'})
return response['body']
def update(self, options=None):
url = "queues/%s" % self.name
body = json.dumps({})
if options is not None:
body = json.dumps({'queue': options})
response = self.client.patch(url, body=body,
headers={'Content-Type': 'application/json'})
return response['body']['queue']
def delete_queue(self):
url = "queues/%s" % self.name
response = self.client.delete(url)
return response['body']
def add_alerts(self, *alerts):
body = json.dumps({'queue': {'alerts': alerts}})
url = "queues/%s" % self.name
response = self.client.patch(url=url, body=body,
headers={'Content-Type': 'application/json'})
return response['body']['queue']
def update_alerts(self, *alerts):
body = json.dumps({'queue': {'alerts': alerts}})
url = "queues/%s" % self.name
response = self.client.put(url=url, body=body,
headers={'Content-Type': 'application/json'})
return response['body']['queue']
def remove_alerts(self, *alerts):
url = "queues/%s/alerts" % self.name
body = json.dumps({'queue':{'alerts': alerts}})
response = self.client.delete(url, body=body, headers={"Content-Type":"application/json"})
return response['body']
def add_subscribers(self, *subscribers):
url = "queues/%s/subscribers" % self.name
body = json.dumps({'subscribers': subscribers})
response = self.client.post(url, body=body,
headers={'Content-Type': 'application/json'})
return response['body']
def remove_subscribers(self, *subscribers):
url = "queues/%s/subscribers" % self.name
body = json.dumps(self._prepare_subscribers(*subscribers))
response = self.client.delete(url, body=body,
headers={'Content-Type': 'application/json'})
return response['body']
def replace_subscribers(self, *subscribers):
url = "queues/%s/subscribers" % self.name
body = json.dumps({'subscribers': subscribers})
response = self.client.put(url, body=body,
headers={'Content-Type': 'application/json'})
return response['body']
def get_message_push_statuses(self, message_id):
url = "queues/%s/messages/%s/subscribers" % (self.name, message_id)
response = self.client.get(url)
return response['body']
def _prepare_alert_ids(self, *alert_ids):
alerts = [{'id': id} for id in alert_ids]
return {'alerts': alerts}
def _prepare_subscribers(self, *subscribers):
subscrs = [{'name': ss} for ss in subscribers]
return {'subscribers': subscrs}
class IronMQ(object):
NAME = 'iron_mq_python'
VERSION = '0.9'
API_VERSION = 3
client = None
name = None
def __init__(self, name=None, **kwargs):
"""Prepare a configured instance of the API wrapper and return it.
Keyword arguments are passed directly to iron_core_python; consult its
documentation for a full list and possible values."""
if name is not None:
self.name = name
kwargs['api_version'] = kwargs.get('api_version') or IronMQ.API_VERSION
self.client = iron_core.IronClient(name=IronMQ.NAME,
version=IronMQ.VERSION, product='iron_mq', **kwargs)
def queues(self, page=None, per_page=None, previous=None, prefix=None):
"""Execute an HTTP request to get a list of queues and return it.
Keyword arguments:
page -- The 0-based page to get queues from. Defaults to None, which
omits the parameter.
"""
options = {}
if page is not None:
raise Exception('page param is deprecated!')
if per_page is not None:
options['per_page'] = per_page
if previous is not None:
options['previous'] = previous
if prefix is not None:
options['prefix'] = prefix
query = urlencode(options)
url = 'queues'
if query != '':
url = "%s?%s" % (url, query)
result = self.client.get(url)
return [queue['name'] for queue in result['body']['queues']]
def queue(self, queue_name):
"""Returns Queue object.
Arguments:
queue_name -- The name of the queue.
"""
return Queue(self, queue_name)
def create_queue(self, queue_name, options=None):
body = json.dumps({})
if options is not None:
body = json.dumps({'queue': options})
url = "queues/%s" % queue_name
response = self.client.put(url, body=body, headers={'Content-Type': 'application/json'})
return response['body']['queue']
def update_queue(self, queue_name, options=None):
body = json.dumps({})
if options is not None:
body = json.dumps({'queue': options})
url = "queues/%s" % queue_name
response = self.client.patch(url, body=body,
headers={'Content-Type': 'application/json'})
return response['body']['queue']
def _prepare_subscribers(self, *subscribers):
subscrs = [{'url': ss} for ss in subscribers]
return {'subscribers': subscrs}
# DEPRECATED
def getQueues(self, page=None, project_id=None):
return self.queues(page=page)
def getQueueDetails(self, queue_name, project_id=None):
return self.queue(queue_name).info()
def deleteMessage(self, queue_name, message_id, project_id=None):
return self.queue(queue_name).delete(message_id)
def postMessage(self, queue_name, messages=[], project_id=None):
return self.queue(queue_name).post(*messages)
def getMessage(self, queue_name, max=None, project_id=None):
return self.queue(queue_name).get(max=max)
def clearQueue(self, queue_name, project_id=None):
return self.queue(queue_name).clear()
```
#### File: datapipe/test/test_main.py
```python
import unittest
from main import (EventHandlers,
CrashReportEvent,
PurchaseEvent,
InstallEvent,
pre_processing,
input_processing,
get_err_ret,
get_ok_ret)
import exceptions
class TestEventHandlers(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_register(self):
handler = EventHandlers()
self.assertEqual(handler.register('crash_report', CrashReportEvent),
CrashReportEvent)
self.assertEqual(handler.register('purchase', PurchaseEvent),
PurchaseEvent)
self.assertEqual(handler.register('install', InstallEvent),
InstallEvent)
self.assertEqual(handler.register('', InstallEvent), None)
self.assertEqual(handler.register('install', None), None)
self.assertEqual(len(handler.handlers), 3)
def test_get_handler(self):
handler = EventHandlers()
handler.register('crash_report', CrashReportEvent)
handler.register('purchase', PurchaseEvent)
handler.register('install', InstallEvent)
self.assertEqual(handler.get_handler('crash_report'), CrashReportEvent)
self.assertEqual(handler.get_handler('purchase'), PurchaseEvent)
self.assertEqual(handler.get_handler('install'), InstallEvent)
self.assertEqual(handler.get_handler('not_supported'), None)
self.assertEqual(handler.get_handler(None), None)
def test_crash_report_event(self):
crash_report = {"event_name": "crash_report",
"user_id": 666,
"timestamp": 1000,
"message": "TheFirstHeroku"}
evt = CrashReportEvent(crash_report)
self.assertEqual(str(evt), '666,1000,TheFirstHeroku')
def test_crash_report_event_bad_type(self):
crash_report = {"event_name": "crash_report",
"user_id": "666",
"timestamp": "1000",
"message": "TheFirstHeroku"}
with self.assertRaises(exceptions.ValueTypeErrException):
CrashReportEvent(crash_report)
def test_purchase_event(self):
purchase = {"event_name": "purchase",
"user_id": 666,
"timestamp": 1000,
"sku": "TheFirstHeroku"}
evt = PurchaseEvent(purchase)
self.assertEqual(str(evt), '666,1000,TheFirstHeroku')
def test_purchase_event_bad_type(self):
purchase = {"event_name": "purchase",
"user_id": "666",
"timestamp": "1000",
"sku": "TheFirstHeroku"}
with self.assertRaises(exceptions.ValueTypeErrException):
PurchaseEvent(purchase)
def test_install_event(self):
install = {"event_name": "install",
"user_id": 666,
"timestamp": 1000}
evt = InstallEvent(install)
self.assertEqual(str(evt), '666,1000')
def test_install_event_bad_type(self):
install = {"event_name": "install",
"timestamp": "1000"}
with self.assertRaises(exceptions.KeyMissingException):
InstallEvent(install)
def test_pre_processing(self):
with self.assertRaises(exceptions.InputExceedLimitException):
pre_processing(None)
with self.assertRaises(exceptions.InputExceedLimitException):
pre_processing('')
self.assertEqual(pre_processing(b'12'), 12)
self.assertEqual(pre_processing(12), 12)
self.assertEqual('{"event_name": "crash_report"}',
'{"event_name": "crash_report"}')
with self.assertRaises(exceptions.BadJsonStructureException):
pre_processing('{"event_name')
def test_input_processing(self):
with self.assertRaises(exceptions.NoContentException):
input_processing(None)
self.assertEqual(input_processing(12),
get_err_ret(exceptions.BadJsonStructureException(12)))
self.assertEqual(input_processing('{"a": "b"}'),
get_err_ret(exceptions.KeyMissingException('event_name')))
self.assertEqual(input_processing('{"event_name": "crash_report__"}'),
get_err_ret(exceptions.UnsupportedEventException('crash_report__')))
ok_msg = '{"event_name": "crash_report","user_id": 666,' \
'"timestamp": 1000,"message": "TheFirstHeroku"}'
self.assertEqual(input_processing(ok_msg), get_ok_ret())
if __name__ == '__main__':
unittest.main()
```
#### File: datapipe/test/test_settings.py
```python
import unittest
from unittest import mock
from unittest.mock import mock_open
import settings
class TestEncrypto(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_get_merged_settings(self):
with mock.patch('os.environ.get', return_value='/etc/datapipe.json') as oeg:
with mock.patch('os.path.isfile', return_value=True) as opi:
with mock.patch('builtins.open', new_callable=mock_open()) as opn:
with mock.patch('json.load'):
with mock.patch('settings._merge_settings') as ms:
settings.get_merged_settings()
oeg.assert_called_with('DATAPIPE_CNF', '/etc/datapipe.json')
opi.assert_called_with('/etc/datapipe.json')
opn.assert_called_with('/etc/datapipe.json')
def test_merge_settings_INPUT_MAX_LEN(self):
new_cnf = settings._merge_settings(settings._Settings(),
{"INPUT_MAX_LEN": 100})
self.assertEqual(new_cnf.INPUT_MAX_LEN, 100)
def test_merge_settings_ENCODING_SCHEME(self):
new_cnf = settings._merge_settings(settings._Settings(),
{"ENCODING_SCHEME": 'utf-8'})
self.assertEqual(new_cnf.ENCODING_SCHEME, 'utf-8')
def test_merge_settings_DB_LOAD_INTERVAL(self):
new_cnf = settings._merge_settings(settings._Settings(),
{"DB_LOAD_INTERVAL": 500})
self.assertEqual(new_cnf.DB_LOAD_INTERVAL, 500)
def test_merge_settings_DEFAULT_APP_PORT(self):
new_cnf = settings._merge_settings(settings._Settings(),
{"DEFAULT_APP_PORT": 9999})
self.assertEqual(new_cnf.DEFAULT_APP_PORT, 9999)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jiwenyu0531/osroom",
"score": 2
} |
#### File: utils/send_msg/send_email.py
```python
import json
import time
import os
from flask_mail import Message
from apps.core.plug_in.manager import plugin_manager
from apps.core.utils.get_config import get_config
from apps.utils.osr_async.osr_async import async_process
from apps.app import mail, app, mdbs
__author__ = 'woo'
def send_email(subject, recipients, text_msg=None, html_msg=None, attach=None,
send_independently=True):
"""
发送email
:param subject:
:param recipients:数组
:param text_msg:
:param html_msg:
:param attach:(#,<content_type>)
:param send_independently:如果为True, 独立给recipients中的每个地址发送信息,
否则,一次发送, 收件人能看到其他收件人的邮箱
:return:
"""
# 检测插件
data = plugin_manager.call_plug(hook_name="send_email",
recipients=recipients,
send_independently=send_independently,
subject=subject,
html=html_msg,
text=text_msg,
attach=attach)
if data == "__no_plugin__":
msg = Message(subject=subject,
sender=get_config("email", "MAIL_DEFAULT_SENDER"))
if html_msg:
msg.html = html_msg
elif text_msg:
msg.body = text_msg
if attach and len(attach) == 2:
with app.open_resource(attach[0]) as fp:
msg.attach(os.path.split(attach[0])[-1], attach[1], fp.read())
# send email
send_async_email(app, msg,
recipients=recipients,
send_independently=send_independently)
# 之后需要改成celery异步
@async_process()
def send_async_email(app, msg, recipients, send_independently=True):
"""
异步发送email
:param app:
:param msg:
:param send_independently: 每个单独发送
:return:
"""
mdbs["sys"].init_app(reinit=True)
with app.app_context():
if send_independently:
# 独立发送, 先连接好邮件服务器
with mail.connect() as conn:
for recipient in recipients:
msg.recipients = [recipient]
send_email_process(msg, conn)
else:
msg.recipients = recipients
return send_email_process(msg)
def send_email_process(msg, connected_instance=None):
"""
发送
:param msg:
:param connected_instance: 已连接的实例
:return:
"""
error_info = None
try:
if connected_instance:
r = connected_instance.send(msg)
else:
r = mail.send(msg)
if not r:
status = "normal"
else:
status = "abnormal"
except Exception as e:
error_info = json.dumps(str(e))
status = "error"
log = {
"type": "email",
"error_info": error_info,
'status': status,
'subject': msg.subject,
'from': msg.sender,
'to': list(msg.send_to),
'date': msg.date,
'body': msg.body,
'html': msg.html,
'msgid': msg.msgId,
'time': time.time()
}
mdbs["sys"].db.sys_message.insert_one(log)
if not status:
return 0
else:
return -1
``` |
{
"source": "jiwidi/adventofcode2019",
"score": 4
} |
#### File: adventofcode2019/day04/main.py
```python
def next_number_p1(number):
number+=1
#Check if is ascending, otherwise return the next asceding number
if(isAsc(number)==0):
number = nextAsc(number)
#Check if it contains double digits, otherwise return next number containing double digits
if(hasDD(number)==0):
number = nextDD(number)
return number
def next_number_p2(number):
number+=1
if number==177889:
print("yo")
#Check if is ascending, otherwise return the next asceding number
if(isAsc(number)==0):
number = nextAsc(number)
#Check if it contains double digits, otherwise return next number containing double digits
if(not hasDDstrict(number)):
number = nextDD(number)
if(not hasDDstrict(number)):
number =next_number_p2(number)
return number
def isAsc(i):
listIn = [int(n) for n in str(i)]
r=0
previous=listIn[0]
for n in listIn[1:]:
if(n<previous):
return False
previous=n
return True
def nextAsc(i):
listIn = [int(n) for n in str(i)]
previous = 0
c=0
for n in listIn:
if(n<previous):
listIn[c]=previous
else:
previous=n
c+=1
return int(''.join(map(str, listIn)))
def nextAsc(i):
listIn = [int(n) for n in str(i)]
previous = listIn[0]
c=1
for n in listIn[1:]:
if(n<previous):
listIn = listIn[:c] + [previous for u in range(len(listIn)-c)]
break
else:
previous=n
c+=1
return int(''.join(map(str, listIn)))
def hasDD(i):
listIn = [int(n) for n in str(i)]
r=0
previous=listIn[0]
for n in listIn[1:]:
if(n==previous):
r+=1
else:
previous=n
return r
def hasDDstrict(i):
listIn = [int(n) for n in str(i)]
r={}
for u in range(10):
r[u]=0
previous=listIn[0]
for n in listIn[1:]:
if(n==previous):
r[n]+=1
else:
previous=n
return (1 in r.values())
def nextDD(i):
listIn = [int(n) for n in str(i)]
listIn[-2] = listIn[-1]
return int(''.join(map(str, listIn)))
def read_input(path):
return [int(x) for x in open(path).read().split("-")]
if __name__ == "__main__":
#Read the input
small,big = read_input("input.txt")
# #Solve p1
c=0
words1 = []
minN,maxN = small,big
while(minN<maxN):
minN = next_number_p1(minN)
if(minN<maxN):
words1.append(minN)
c+=1
print(f"--- Part One --- \n{c}")
# #Solve p2
c=0
words2 = []
minN,maxN = small,big
while(minN<maxN):
minN = next_number_p2(minN)
if(minN<maxN):
words2.append(minN)
c+=1
print(f"--- Part Two --- \n{c}")
``` |
{
"source": "jiwidi/asr-transformer",
"score": 3
} |
#### File: project/model/transformer.py
```python
from argparse import ArgumentParser
import os
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as data
import torchaudio
import torchvision.transforms as transforms
from pytorch_lightning.core import LightningModule
from torch.utils.data import DataLoader
from project.utils.functions import data_processing, GreedyDecoder, cer, wer
from project.utils.cosine_annearing_with_warmup import CosineAnnealingWarmUpRestarts
from .decoder import Decoder
from .encoder import Encoder
IGNORE_ID = -1
def cal_performance(pred, gold, smoothing=0.0):
"""Calculate cross entropy loss, apply label smoothing if needed.
Args:
pred: N x T x C, score before softmax
gold: N x T
"""
pred = pred.view(-1, pred.size(2))
gold = gold.contiguous().view(-1)
loss = cal_loss(pred, gold, smoothing)
pred = pred.max(1)[1]
non_pad_mask = gold.ne(IGNORE_ID)
n_correct = pred.eq(gold)
n_correct = n_correct.masked_select(non_pad_mask).sum().item()
return loss, n_correct
def cal_loss(pred, gold, smoothing=0.0):
"""Calculate cross entropy loss, apply label smoothing if needed.
"""
if False or smoothing > 0.0:
eps = smoothing
n_class = pred.size(1)
# Generate one-hot matrix: N x C.
# Only label position is 1 and all other positions are 0
# gold include -1 value (IGNORE_ID) and this will lead to assert error
gold_for_scatter = gold.ne(IGNORE_ID).long() * gold
one_hot = torch.zeros_like(pred).scatter(1, gold_for_scatter.view(-1, 1), 1)
one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / n_class
log_prb = F.log_softmax(pred, dim=1)
non_pad_mask = gold.ne(IGNORE_ID)
n_word = non_pad_mask.sum().item()
loss = -(one_hot * log_prb).sum(dim=1)
loss = loss.masked_select(non_pad_mask).sum() / n_word
else:
loss = F.cross_entropy(
pred, gold, ignore_index=IGNORE_ID, reduction="elementwise_mean"
)
return loss
class Transformer(LightningModule):
"""An encoder-decoder framework only includes attention.
"""
def __init__(self, encoder=None, decoder=None, **kwargs):
super(Transformer, self).__init__()
self.save_hyperparameters()
if encoder is not None and decoder is not None:
self.encoder = encoder
self.decoder = decoder
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
else:
self.encoder = Encoder()
self.decoder = Decoder()
self.criterion = nn.CTCLoss()
def forward(self, padded_input, input_lengths, padded_target):
"""
Args:
padded_input: N x Ti x D
input_lengths: N
padded_targets: N x To
"""
encoder_padded_outputs, *_ = self.encoder(padded_input, input_lengths)
# pred is score before softmax
pred, gold, *_ = self.decoder(
padded_target, encoder_padded_outputs, input_lengths
)
return pred, gold
def recognize(self, input, input_length, char_list, args):
"""Sequence-to-Sequence beam search, decode one utterence now.
Args:
input: T x D
char_list: list of characters
args: args.beam
Returns:
nbest_hyps:
"""
encoder_outputs, *_ = self.encoder(input.unsqueeze(0), input_length)
nbest_hyps = self.decoder.recognize_beam(encoder_outputs[0], char_list, args)
return nbest_hyps
def serialize(self, optimizer, epoch, tr_loss, val_loss):
package = {
"state_dict": self.state_dict(),
"optim_dict": optimizer.state_dict(),
"epoch": epoch,
}
if tr_loss is not None:
package["tr_loss"] = tr_loss
package["val_loss"] = val_loss
return package
# ---------------------
# Pytorch lightning overrides
# ---------------------
def training_step(self, batch, batch_idx):
"""
Lightning calls this inside the training loop with the data from the training dataloader
passed in as `batch`.
"""
# forward pass
# spectrograms, labels, input_lengths, label_lengths = batch
# y_hat = self(spectrograms)
# output = F.log_softmax(y_hat, dim=2)
# output = output.transpose(0, 1) # (time, batch, n_class)
# loss = self.criterion(output, labels, input_lengths, label_lengths)
# tensorboard_logs = {"Loss/train": loss}
spectrograms, labels, input_lengths, label_lengths = batch
spectrograms = spectrograms.squeeze().permute(0, 2, 1)
input_lengths = torch.tensor(input_lengths)
# Forward prop.
pred, gold = self(spectrograms, input_lengths, labels)
# print(pred.dtype, gold.dtype)
loss, n_correct = cal_performance(pred, gold.long(), smoothing=0)
tensorboard_logs = {"Loss/train": loss}
return {"loss": loss, "log": tensorboard_logs}
def validation_step(self, batch, batch_idx):
"""
Lightning calls this inside the validation loop with the data from the validation dataloader
passed in as `batch`.
"""
spectrograms, labels, input_lengths, label_lengths = batch
spectrograms = spectrograms.squeeze().permute(0, 2, 1)
input_lengths_t = torch.tensor(input_lengths)
# Forward prop.
pred, gold = self(spectrograms, input_lengths_t, labels)
# self.criterion(output, labels, input_lengths, label_lengths)
# print(pred.shape)
# print( torch.argmax(pred, dim=2).shape)
# print(gold.shape)
# print(pred)
# print( torch.argmax(pred, dim=2))
# print(gold)
# sys.exit(0)
loss, n_correct = cal_performance(pred, gold.long(), smoothing=0)
decoded_preds, decoded_targets = GreedyDecoder(torch.argmax(pred, dim=2), labels, label_lengths)
tensorboard_logs = {"Loss/train": loss}
test_cer, test_wer = [], []
for j in range(len(decoded_preds)):
test_cer.append(cer(decoded_targets[j], decoded_preds[j]))
test_wer.append(wer(decoded_targets[j], decoded_preds[j]))
avg_cer = torch.FloatTensor([sum(test_cer) / len(test_cer)])
avg_wer = torch.FloatTensor(
[sum(test_wer) / len(test_wer)]
) # Need workt to make all operations in torch
logs = {
"cer": avg_cer,
"wer": avg_wer,
}
return {
"val_loss": loss,
"n_correct_pred": n_correct,
"n_pred": len(spectrograms),
"log": logs,
"wer": avg_wer,
"cer": avg_cer,
}
def test_step(self, batch, batch_idx):
spectrograms, labels, input_lengths, label_lengths = batch
spectrograms = spectrograms.squeeze().permute(0, 2, 1)
input_lengths_t = torch.tensor(input_lengths)
# Forward prop.
pred, gold = self(spectrograms, input_lengths_t, labels)
loss, n_correct = cal_performance(pred, gold.long(), smoothing=0)
decoded_preds, decoded_targets = GreedyDecoder(torch.argmax(pred, dim=2), labels, label_lengths)
tensorboard_logs = {"Loss/train": loss}
test_cer, test_wer = [], []
for j in range(len(decoded_preds)):
test_cer.append(cer(decoded_targets[j], decoded_preds[j]))
test_wer.append(wer(decoded_targets[j], decoded_preds[j]))
avg_cer = torch.FloatTensor([sum(test_cer) / len(test_cer)])
avg_wer = torch.FloatTensor(
[sum(test_wer) / len(test_wer)]
) # Need workt to make all operations in torch
logs = {
"cer": avg_cer,
"wer": avg_wer,
}
return {
"val_loss": loss,
"n_correct_pred": n_correct,
"n_pred": len(spectrograms),
"log": logs,
"wer": avg_wer,
"cer": avg_cer,
}
def validation_epoch_end(self, outputs):
"""
Called at the end of validation to aggregate outputs.
:param outputs: list of individual outputs of each validation step.
"""
avg_loss = torch.stack([x["val_loss"] for x in outputs]).mean()
val_acc = sum([x["n_correct_pred"] for x in outputs]) / sum(
x["n_pred"] for x in outputs
)
avg_wer = torch.stack([x["wer"] for x in outputs]).mean()
avg_cer = torch.stack([x["cer"] for x in outputs]).mean()
tensorboard_logs = {
"Loss/val": avg_loss,
"val_acc": val_acc,
"Metrics/wer": avg_wer,
"Metrics/cer": avg_cer,
}
return {
"val_loss": avg_loss,
"log": tensorboard_logs,
"wer": avg_wer,
"cer": avg_cer,
}
def test_epoch_end(self, outputs):
avg_loss = torch.stack([x["test_loss"] for x in outputs]).mean()
test_acc = sum([x["n_correct_pred"] for x in outputs]) / sum(
x["n_pred"] for x in outputs
)
avg_wer = torch.stack([x["wer"] for x in outputs]).mean()
avg_cer = torch.stack([x["cer"] for x in outputs]).mean()
tensorboard_logs = {
"Loss/test": avg_loss,
"test_acc": test_acc,
"Metrics/wer": avg_wer,
"Metrics/cer": avg_cer,
}
return {
"test_loss": avg_loss,
"log": tensorboard_logs,
"wer": avg_wer,
"cer": avg_cer,
}
# ---------------------
# TRAINING SETUP
# ---------------------
def configure_optimizers(self):
"""
Return whatever optimizers and learning rate schedulers you want here.
At least one optimizer is required.
"""
optimizer = torch.optim.Adam(
self.parameters(), lr=0.1, betas=(0.9, 0.98), eps=1e-09
)
# lr_scheduler = {'scheduler':optim.lr_scheduler.CyclicLR(optimizer,base_lr=self.hparams.learning_rate/5,max_lr=self.hparams.learning_rate,step_size_up=2000,cycle_momentum=False),
lr_scheduler = {
"scheduler": optim.lr_scheduler.OneCycleLR(
optimizer,
max_lr=self.hparams.learning_rate,
steps_per_epoch=int(len(self.train_dataloader())),
epochs=self.hparams.epochs,
anneal_strategy="linear",
final_div_factor=0.06,
pct_start=0.008,
),
# 'scheduler': CosineAnnealingWarmUpRestarts(optimizer, T_0=int(len(self.train_dataloader())*math.pi), T_mult=2, eta_max=self.learning_rate, T_up=int(len(self.train_dataloader()))*2, gamma=0.8),
"name": "learning_rate", # Name for tensorboard logs
"interval": "step",
"frequency": 1,
}
return [optimizer], [lr_scheduler]
def prepare_data(self):
if not os.path.exists(self.hparams.data_root):
os.makedirs(self.hparams.data_root)
a = [
torchaudio.datasets.LIBRISPEECH(
self.hparams.data_root, url=path, download=True
)
for path in self.hparams.data_train
]
b = [
torchaudio.datasets.LIBRISPEECH(
self.hparams.data_root, url=path, download=True
)
for path in self.hparams.data_test
]
return a, b
def setup(self, stage):
self.train_data = data.ConcatDataset(
[
torchaudio.datasets.LIBRISPEECH(
self.hparams.data_root, url=path, download=True
)
for path in self.hparams.data_train
]
)
self.test_data = data.ConcatDataset(
[
torchaudio.datasets.LIBRISPEECH(
self.hparams.data_root, url=path, download=True
)
for path in self.hparams.data_test
]
)
def train_dataloader(self):
return DataLoader(
dataset=self.train_data,
batch_size=self.hparams.batch_size,
shuffle=True,
collate_fn=lambda x: data_processing(x, "train"),
num_workers=self.hparams.num_workers,
)
def val_dataloader(self):
return DataLoader(
dataset=self.test_data,
batch_size=self.hparams.batch_size,
shuffle=False,
collate_fn=lambda x: data_processing(x, "valid"),
num_workers=self.hparams.num_workers,
)
def test_dataloader(self):
return DataLoader(
dataset=self.test_data,
batch_size=self.hparams.batch_size,
shuffle=False,
collate_fn=lambda x: data_processing(x, "valid"),
num_workers=self.hparams.num_workers,
)
@staticmethod
def add_model_specific_args(parent_parser): # pragma: no-cover
"""
Define parameters that only apply to this model
"""
parser = ArgumentParser(parents=[parent_parser])
# parser.add_argument("--n_cnn_layers", default=3, type=int)
# parser.add_argument("--n_rnn_layers", default=5, type=int)
# parser.add_argument("--rnn_dim", default=512, type=int)
# parser.add_argument("--n_class", default=29, type=int)
# parser.add_argument("--n_feats", default=128, type=str)
# parser.add_argument("--stride", default=2, type=int)
# parser.add_argument("--dropout", default=0.1, type=float)
return parser
``` |
{
"source": "jiwidi/dailypepebot-cloud",
"score": 3
} |
#### File: dailypepebot-cloud/app/main.py
```python
import os
import telegram
from google.cloud import datastore
from google.cloud import storage
import random
bot = telegram.Bot(token=os.environ["TELEGRAM_TOKEN"])
def getpepe():
datastore_client = datastore.Client()
query = datastore_client.query(kind='Pepe')
image_entities = list(query.fetch())
#Only 1 img, testing
idx = (int) (random.randrange(0, len(image_entities)))
image_entities = [image_entities[idx]]
return image_entities[0]['image_public_url']
def webhook(request):
if request.method == "POST":
update = telegram.Update.de_json(request.get_json(force=True), bot)
luckynumber = (int) (random.randrange(0,100))
if(update.message.text=='/randompepe' or luckynumber==69):
chat_id = update.message.chat.id
pepeurl=getpepe()
# Reply with the same message
if(luckynumber==69):
bot.send_photo(chat_id=chat_id,photo=pepeurl,caption="Lucky pepe")
else:
bot.send_photo(chat_id=chat_id, photo=pepeurl)
return "ok"
``` |
{
"source": "jiwidi/DeepICF_Tensorflow2.0",
"score": 3
} |
#### File: jiwidi/DeepICF_Tensorflow2.0/create_dataset.py
```python
import pandas as pd
import numpy as np
import os
import requests, zipfile, io
from random import sample
import logging
from tqdm import tqdm
import argparse
INPUT_PATH = 'data/ml-1m/'
INPUT_FILE = 'ratings.dat'
OUTPUT_PATH_TRAIN = 'movielens.train.rating'
OUTPUT_PATH_TEST = 'movielens.test.rating'
OUTPUT_PATH_TEST_NEGATIVES = 'movielens.test.negative'
OUTPUT_PATH_TRAIN_NEGATIVES = 'movielens.train.negative'
USER_FIELD = 'userID'
dataset_url = 'http://files.grouplens.org/datasets/movielens/ml-1m.zip'
logger = logging.getLogger()
#################### Arguments ####################
def parse_args():
parser = argparse.ArgumentParser(description="Create dataset")
parser.add_argument('--input_path', nargs='?', default=INPUT_PATH,
help='Input data path.')
parser.add_argument('--num_neg_test', type=int, default=-1,
help='Number of negative instances to pair with a positive instance for the test set. If -1 no negatives will be created')
parser.add_argument('--num_neg_train', type=int, default=-1,
help='Number of negative instances to pair with a positive instance for the train set. If -1 no negatives will be created')
parser.add_argument('--verbose', type=int, default=1,
help='Show info while running')
parser.add_argument('--force_download', type=bool, default=False,
help='Forces the script to redownload the data')
return parser.parse_args()
class TqdmLoggingHandler(logging.Handler):
def __init__(self, level=logging.NOTSET):
super().__init__(level)
def emit(self, record):
msg = self.format(record)
tqdm.write(msg)
self.flush()
def check_data(path,force):
'''
Checks if the needed data files to create the dataset are inside the provided path
'''
if(force or os.path.isfile(INPUT_PATH+INPUT_FILE)):
return 0
logging.info('Downloading file')
oldhandlers = list(logger.handlers)
response = requests.get(dataset_url,stream=True) # Need to add progress bar
total_length = int(response.headers.get('content-length'))
if total_length is None: # no content length header
content = response.content
else:
content = bytearray(b'')
# Setup progress bar
logger.addHandler (TqdmLoggingHandler ())
try:
for x in tqdm(response.iter_content(chunk_size=4096),total = int((total_length/4096))):
content+=x
except:
pass
logger.handlers = oldhandlers
z = zipfile.ZipFile(io.BytesIO(content))
target = zipfile.ZipFile(io.BytesIO(content),mode= 'w')
file = z.filelist[2]
target.writestr('ratings.dat', z.read(file.filename))
target.extract('ratings.dat',path)
logging.info('Download finished')
def get_train_test_df(transactions):
'''
return train and test dataframe, with leave the latest one out strategy
Args:
transactions: the entire df of user/item transactions
'''
logging.info("Size of the entire dataset:{}".format(transactions.shape))
transactions = transactions.sort_values(by = ['timestamp'])
last_transaction_mask = transactions.duplicated(subset = {USER_FIELD}, keep = "last")
# The last transaction mask has all the latest items of people
# We want for the test dataset, items marked with a False
train_df = transactions[last_transaction_mask]
test_df = transactions[~last_transaction_mask]
train_df = train_df.sort_values(by=["userID", 'timestamp'])
test_df = test_df.sort_values(by=["userID", 'timestamp'])
return train_df, test_df
def get_test_negatives(transactions,negatives):
'''
return a negative sample dataframe, creates 4 negatives samples for every user positive rating.
Args:
transactions: the entire df of user/item transactions
'''
#Really slow, need to improve
users=[]
movies=[]
ratings=[]
auxuserIDs = []
list_movies = transactions.movieID.unique()
oldhandlers = list(logger.handlers)
logger.addHandler (TqdmLoggingHandler ())
logging.info('Creating negatives, this will take a while')
usermoviedict = {}
for user in transactions.userID.unique():
usermoviedict[user] = transactions[transactions.userID == user].movieID.unique().tolist()
auxusermoviedict = {}
for user in transactions.userID.unique():
uniques = transactions[(transactions['userID']==user) & (transactions['rating']==1)].sort_values(by=['timestamp'])['movieID'].unique().tolist()[:-1] #remove the last saw, NICKLAS >.<
zeros = ['0' for u in range(len(transactions.movieID.unique())-len(uniques))]
auxusermoviedict[user] = uniques+zeros
for user in tqdm(transactions.userID.unique()):
useraux = auxusermoviedict[user]
user_movies = usermoviedict[user]
unseen_movies = [item for item in list_movies if item not in user_movies]
negative_movies = sample(unseen_movies,negatives)
usermoviedict[user] = usermoviedict[user] + negative_movies
for movie in negative_movies:
users.append(user)
movies.append(movie)
ratings.append(0)
auxuserIDs.append(useraux)
negatives = pd.DataFrame({
"userID" : users,
"movieID" : movies,
"rating" : ratings,
"auxuserID" : auxuserIDs}
)
logger.handlers = oldhandlers
return negatives
def get_train_negatives(transactions,negatives):
'''
return a negative sample dataframe, creates 4 negatives samples for every user positive rating.
Args:
transactions: the entire df of user/item transactions
'''
#Really slow, need to improve
users=[]
movies=[]
ratings=[]
auxuserIDs = []
list_movies = transactions.movieID.unique()
oldhandlers = list(logger.handlers)
logger.addHandler (TqdmLoggingHandler ())
logging.info('Creating negatives, this will take a while')
usermoviedict = {}
for user in transactions.userID.unique():
usermoviedict[user] = transactions[transactions.userID == user].movieID.unique().tolist()
auxusermoviedict = {}
for user in transactions.userID.unique():
uniques = transactions[(transactions['userID']==user) & (transactions['rating']==1)].sort_values(by=['timestamp'])['movieID'].unique().tolist()[:-1] #remove the last saw, NICKLAS >.<
zeros = ['0' for u in range(len(transactions.movieID.unique())-len(uniques))]
auxusermoviedict[user] = uniques+zeros
for user in tqdm(transactions.userID):
useraux = auxusermoviedict[user]
user_movies = usermoviedict[user]
unseen_movies = [item for item in list_movies if item not in user_movies]
negative_movies = sample(unseen_movies,negatives)
usermoviedict[user] = usermoviedict[user] + negative_movies
for movie in negative_movies:
users.append(user)
movies.append(movie)
ratings.append(0)
auxuserIDs.append(useraux)
negatives = pd.DataFrame({
"userID" : users,
"movieID" : movies,
"rating" : ratings,
"auxuserID" : auxuserIDs}
)
logger.handlers = oldhandlers
return negatives
def report_stats(transactions, train, test, negative_test):
'''
return stats for a series of dataframes
Args:
transactions: the entire df of user/item transactions
train: Train dataframe
test: test dataframe
negative_test: negative_test dataframe
'''
whole_size = transactions.shape[0]*1.0
train_size = train.shape[0]
test_size = test.shape[0]
negative_size = negative_test.shape[0]
print("Total No. of Records = {}".format(whole_size))
print("Train size = {}, Test size = {} Negative Test Size = {}".format(train_size, test_size, negative_size))
print("Train % = {}, Test % ={}".format(train_size/whole_size, test_size/whole_size))
def create_mapping(values):
value_to_id = {value:idx for idx, value in enumerate(values.unique())}
return value_to_id
def parse_user(df,row):
uniques = df[(df['userID']==row['userID']) & (df['rating']==1)]['movieID'].unique().tolist()
zeros = ['0' for u in range(len(df.movieID.unique())-len(uniques))]
return uniques+zeros
def clean_df(transactions):
user_mapping = create_mapping(transactions["userID"])
item_mapping = create_mapping(transactions["movieID"])
transactions = transactions.sample(10000,random_state=17)
transactions['movieID'] += 1
transactions['movieID'] = transactions['movieID'].apply(str)
transactions['userID'] = transactions['userID'].apply(str)
transactions['rating']=1
transactions['auxuserID'] = transactions.apply(lambda row: parse_user(transactions,row), axis=1)
return transactions
def main():
args = parse_args()
if(args.verbose == 1):
logger.setLevel (logging.INFO)
else:
logger.setLevel (logging.WARNING)
check_data(INPUT_PATH,args.force_download)
transactions = pd.read_csv(INPUT_PATH+INPUT_FILE, sep="::", names = ['userID', 'movieID', 'rating', 'timestamp'],engine='python')
logging.info('Cleaning dataset')
transactions = transactions[:10000]
transactions = clean_df(transactions)
print(transactions.head())
# convert to implicit scenario
transactions['rating'] = 1
# make the dataset
train_df, test_df = get_train_test_df(transactions)
train_df.reset_index(inplace=True, drop=True)
test_df.reset_index(inplace=True, drop=True)
if(args.num_neg_test>0):
negative_test = get_test_negatives(transactions,args.num_neg_test)
negative_test.reset_index(inplace=True, drop=True)
test_df = pd.concat([test_df,negative_test],sort=True).sample(frac=1).reset_index(drop=True)
test_df.to_csv(INPUT_PATH+OUTPUT_PATH_TEST_NEGATIVES,index = False)
report_stats(transactions, train_df, test_df,negative_test)
else:
test_df.to_csv(INPUT_PATH+OUTPUT_PATH_TEST,index = False)
if(args.num_neg_train>0):
negative_train = get_train_negatives(transactions,args.num_neg_train)
negative_train.columns = [len(transactions.userID.unique()),len(transactions.movieID.unique()),0]
train_df = pd.concat([test_df,negative_train],sort=True).sample(frac=1).reset_index(drop=True)
train_df.to_csv(INPUT_PATH+OUTPUT_PATH_TRAIN_NEGATIVES,index = False)
report_stats(transactions, train_df, test_df,negative_test)
else:
train_df.to_csv(INPUT_PATH+OUTPUT_PATH_TRAIN,index = False)
return 0
if __name__ == "__main__":
main()
``` |
{
"source": "jiwidi/KTH-Erasmus",
"score": 2
} |
#### File: dectrees/python/q6.py
```python
import random
import sys
import dtree as d
import monkdata as m
import numpy as np
import plotly
from statistics import pvariance
import plotly.graph_objs as go
plotly.tools.set_credentials_file(username='jiwidi', api_key='qMue368p0yeZMLP7rxmU')
reps=int(sys.argv[1])
def partition(data, fraction):
ldata= list(data)
random.shuffle(ldata)
breakPoint = int(len(ldata)*fraction)
return ldata[:breakPoint], ldata[breakPoint:]
fraction=[0.3,0.4,0.5,0.6,0.7,0.8]
valuesmonk1=[]
valuesmonk3=[]
variancemonk1=[]
variancemonk3=[]
monk1data=[]
monk3data=[]
monk1simple=[]
monk3simple=[]
for v in fraction:
monk1train,monk1validation=partition(m.monk1,v)
monk3train,monk3validation=partition(m.monk3,v)
monk1tree=d.buildTree(monk1train,m.attributes)
monk3tree=d.buildTree(monk3train,m.attributes)
monk1simple.append(d.check(monk1tree,monk1validation))
monk3simple.append(d.check(monk3tree,monk3validation))
#Monk1 evaluation
for v in fraction:
value=0
for _ in range(reps):
monk1train,monk1validation=partition(m.monk1,v)
monk1tree=d.buildTree(monk1train,m.attributes)
topPerformance=0
performance=0
keepPruning=True
while(keepPruning):
keepPruning=False
pruned=d.allPruned(monk1tree)
for tree in pruned:
performance=d.check(tree,monk1validation)
if(performance>topPerformance):
keepPruning=True
topPerformance=performance
monk1tree=tree
valuesmonk1.append(d.check(monk1tree,m.monk1test))
print("Monk1 fraction "+ str(v))
mean=np.mean(valuesmonk1)
print(" Error: "+str(1-mean))
monk1data.append(1-mean)
variance=pvariance(valuesmonk1,mean)
print(" Variance: "+str(variance))
variancemonk1.append(variance)
print()
print()
print()
#Monk3 evaluation
for v in fraction:
value=0
for _ in range(reps):
monk3train,monk3validation=partition(m.monk3,v)
monk3tree=d.buildTree(monk3train,m.attributes)
topPerformance=0
performance=0
keepPruning=True
while(keepPruning):
keepPruning=False
pruned=d.allPruned(monk3tree)
for tree in pruned:
performance=d.check(tree,monk3validation)
if(performance>topPerformance):
keepPruning=True
topPerformance=performance
monk3train=tree
valuesmonk3.append(d.check(monk3tree,m.monk3test))
print("Monk3 fraction "+ str(v))
mean=np.mean(valuesmonk3)
print(" Error: "+str(1-mean))
monk3data.append(1-mean)
variance=pvariance(valuesmonk3,mean)
print(" Variance: "+str(variance))
variancemonk3.append(variance)
trace1 = go.Scatter(
x=fraction,
y=monk1data,
name = '<b>Monk1 </b>', # Style name/legend entry with html tags
connectgaps=False
)
trace2 = go.Scatter(
x=fraction,
y=monk3data,
name = '<b>Monk3 </b>', # Style name/legend entry with html tags
connectgaps=False
)
trace1s = go.Scatter(
x=fraction,
y=monk1simple,
name='<b>Monk1 error </b>'
)
trace3s = go.Scatter(
x=fraction,
y=monk3simple,
name='<b>Monk3 error </b>'
)
trace4 = go.Scatter(
x=fraction,
y=variancemonk1,
name = '<b>Monk1 variance</b>', # Style name/legend entry with html tags
connectgaps=True
)
trace3 = go.Scatter(
x=fraction,
y=variancemonk3,
name = '<b>Monk3 variance</b>', # Style name/legend entry with html tags
fillcolor='rgba(0,100,80,0.2)',
connectgaps=True
)
# data =[trace4,trace3] #Variance
data =[trace1,trace2] #Error
# Edit the layout
layout = dict(title = 'Effect of different prunings fractions in monk1 and monk3',
xaxis = dict(title = 'Fraction between train and validation datasets'),
yaxis = dict(title = 'Variance'),
)
fig = dict(data=data, layout=layout)
plotly.plotly.iplot(fig, filename='Q7 ML lab1 Error test v3 ')
data =[trace4,trace3] #Variance
layout = dict(title = 'Effect of different prunings fractions in monk1 and monk3',
xaxis = dict(title = 'Fraction between train and test'),
yaxis = dict(title = 'Error on test dataset'),
)
fig = dict(data=data, layout=layout)
plotly.plotly.iplot(fig, filename='Q7 ML lab1 variance v3 ')
``` |
{
"source": "jiwidi/lightning-tutorials",
"score": 3
} |
#### File: lightning_examples/barlow-twins/barlow_twins.py
```python
from functools import partial
from typing import Sequence, Tuple, Union
import matplotlib.pyplot as plt
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
import torchvision.transforms.functional as VisionF
from pytorch_lightning import Callback, LightningModule, Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.metrics.functional import accuracy
from torch.utils.data import DataLoader
from torchvision.datasets import CIFAR10
from torchvision.models.resnet import resnet18
from torchvision.utils import make_grid
batch_size = 32
num_workers = 0 # to run notebook on CPU
max_epochs = 200
z_dim = 128
# %% [markdown]
# ### Transforms
#
# We first define the data augmentation pipeline used in Barlow Twins. Here, we use pipeline proposed in SimCLR, which generates two copies/views of an input image by applying the following transformations in a sequence.
#
# First it takes a random crop of the image and resizes it to a fixed pre-specified size. Then, it applies a left-to-right random flip with a probability of 0.5. This step is followed by a composition of color jitter, conversion to grayscale with a probability of 0.2 and the application of a Gaussian blur filter. Finally, we normalize the image and convert it to a tensor.
#
# Within this transform, we add a third view for our online finetuner, which we explain later on. But, to explain things quickly here, we add a another transform to perform perform test our encoder on a downstream classification task.
# %%
class BarlowTwinsTransform:
def __init__(self, train=True, input_height=224, gaussian_blur=True, jitter_strength=1.0, normalize=None):
self.input_height = input_height
self.gaussian_blur = gaussian_blur
self.jitter_strength = jitter_strength
self.normalize = normalize
self.train = train
color_jitter = transforms.ColorJitter(
0.8 * self.jitter_strength,
0.8 * self.jitter_strength,
0.8 * self.jitter_strength,
0.2 * self.jitter_strength,
)
color_transform = [transforms.RandomApply([color_jitter], p=0.8), transforms.RandomGrayscale(p=0.2)]
if self.gaussian_blur:
kernel_size = int(0.1 * self.input_height)
if kernel_size % 2 == 0:
kernel_size += 1
color_transform.append(transforms.RandomApply([transforms.GaussianBlur(kernel_size=kernel_size)], p=0.5))
self.color_transform = transforms.Compose(color_transform)
if normalize is None:
self.final_transform = transforms.ToTensor()
else:
self.final_transform = transforms.Compose([transforms.ToTensor(), normalize])
self.transform = transforms.Compose(
[
transforms.RandomResizedCrop(self.input_height),
transforms.RandomHorizontalFlip(p=0.5),
self.color_transform,
self.final_transform,
]
)
self.finetune_transform = None
if self.train:
self.finetune_transform = transforms.Compose(
[
transforms.RandomCrop(32, padding=4, padding_mode="reflect"),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
]
)
else:
self.finetune_transform = transforms.ToTensor()
def __call__(self, sample):
return self.transform(sample), self.transform(sample), self.finetune_transform(sample)
# %% [markdown]
# ### Dataset
#
# We select CIFAR10 as the dataset to demonstrate the pre-training process for Barlow Twins. CIFAR10 images are 32x32 in size and we do not apply a Gaussian blur transformation on them. In this step, we create the training and validation dataloaders for CIFAR10.
# %%
def cifar10_normalization():
normalize = transforms.Normalize(
mean=[x / 255.0 for x in [125.3, 123.0, 113.9]], std=[x / 255.0 for x in [63.0, 62.1, 66.7]]
)
return normalize
train_transform = BarlowTwinsTransform(
train=True, input_height=32, gaussian_blur=False, jitter_strength=0.5, normalize=cifar10_normalization()
)
train_dataset = CIFAR10(root=".", train=True, download=True, transform=train_transform)
val_transform = BarlowTwinsTransform(
train=False, input_height=32, gaussian_blur=False, jitter_strength=0.5, normalize=cifar10_normalization()
)
val_dataset = CIFAR10(root=".", train=False, download=True, transform=train_transform)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, drop_last=True)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers, drop_last=True)
# %% [markdown]
# ### Plot images
#
# To see how the CIFAR10 images look after the data augmentation pipeline, we load a few images from the dataloader and plot them here.
# %%
for batch in val_loader:
(img1, img2, _), label = batch
break
img_grid = make_grid(img1, normalize=True)
def show(imgs):
if not isinstance(imgs, list):
imgs = [imgs]
fix, axs = plt.subplots(ncols=len(imgs), squeeze=False)
for i, img in enumerate(imgs):
img = img.detach()
img = VisionF.to_pil_image(img)
axs[0, i].imshow(np.asarray(img))
axs[0, i].set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])
show(img_grid)
# %% [markdown]
# ### Barlow Twins Loss
#
# Here we define the loss function for Barlow Twins. It first normalizes the D dimensinonal vectors from the projection head and then computes the DxD cross-correlation matrix between the normalized vectors of the 2 views of each image.
#
# Then it splits this cross-correlation matrix into two parts. The first part, the diagonal of this matrix is brought closer to 1, which pushes up the cosine similarity between the latent vectors of two views of each image, thus making the backbone invariant to the transformations applied to the views. The second part of the loss pushes the non-diagonal elements of the cross-corrlelation matrix closes to 0. This reduces the redundancy between the different dimensions of the latent vector.
# %%
class BarlowTwinsLoss(nn.Module):
def __init__(self, batch_size, lambda_coeff=5e-3, z_dim=128):
super().__init__()
self.z_dim = z_dim
self.batch_size = batch_size
self.lambda_coeff = lambda_coeff
def off_diagonal_ele(self, x):
# taken from: https://github.com/facebookresearch/barlowtwins/blob/main/main.py
# return a flattened view of the off-diagonal elements of a square matrix
n, m = x.shape
assert n == m
return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()
def forward(self, z1, z2):
# N x D, where N is the batch size and D is output dim of projection head
z1_norm = (z1 - torch.mean(z1, dim=0)) / torch.std(z1, dim=0)
z2_norm = (z2 - torch.mean(z2, dim=0)) / torch.std(z2, dim=0)
cross_corr = torch.matmul(z1_norm.T, z2_norm) / self.batch_size
on_diag = torch.diagonal(cross_corr).add_(-1).pow_(2).sum()
off_diag = self.off_diagonal_ele(cross_corr).pow_(2).sum()
return on_diag + self.lambda_coeff * off_diag
# %% [markdown]
# ### Backbone
#
# This is a standard Resnet backbone that we pre-train using the Barlow Twins method. To accommodate the 32x32 CIFAR10 images, we replace the first 7x7 convolution of the Resnet backbone by a 3x3 filter. We also remove the first Maxpool layer from the network for CIFAR10 images.
# %%
encoder = resnet18()
# for CIFAR10, replace the first 7x7 conv with smaller 3x3 conv and remove the first maxpool
encoder.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
encoder.maxpool = nn.MaxPool2d(kernel_size=1, stride=1)
# replace classification fc layer of Resnet to obtain representations from the backbone
encoder.fc = nn.Identity()
# %% [markdown]
# ### Projection head
#
# Unlike SimCLR and BYOL, the downstream performance of Barlow Twins greatly benefits from having a larger projection head after the backbone network. The paper utilizes a 3 layer MLP with 8192 hidden dimensions and 8192 as the output dimenion of the projection head. For the purposes of the tutorial, we use a smaller projection head. But, it is imperative to mention here that in practice, Barlow Twins needs to be trained using a bigger projection head as it is highly sensitive to its architecture and output dimensionality.
# %%
class ProjectionHead(nn.Module):
def __init__(self, input_dim=2048, hidden_dim=2048, output_dim=128):
super().__init__()
self.projection_head = nn.Sequential(
nn.Linear(input_dim, hidden_dim, bias=True),
nn.BatchNorm1d(hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, output_dim, bias=False),
)
def forward(self, x):
return self.projection_head(x)
# %% [markdown]
# ### Learning rate warmup
#
# For the purposes of this tutorial, we keep things simple and use a linear warmup schedule with Adam optimizer. In our previous experiments we have found that linear warmup part is much more important for the final performance of a model than the cosine decay component of the schedule.
# %%
def fn(warmup_steps, step):
if step < warmup_steps:
return float(step) / float(max(1, warmup_steps))
else:
return 1.0
def linear_warmup_decay(warmup_steps):
return partial(fn, warmup_steps)
# %% [markdown]
# ### Barlow Twins Lightning Module
#
# We keep the LightningModule for Barlow Twins neat and simple. It takes in an backbone encoder and initializes the projection head and the loss function. We configure the optimizer and the learning rate scheduler in the ``configure_optimizers`` method.
# %%
class BarlowTwins(LightningModule):
def __init__(
self,
encoder,
encoder_out_dim,
num_training_samples,
batch_size,
lambda_coeff=5e-3,
z_dim=128,
learning_rate=1e-4,
warmup_epochs=10,
max_epochs=200,
):
super().__init__()
self.encoder = encoder
self.projection_head = ProjectionHead(input_dim=encoder_out_dim, hidden_dim=encoder_out_dim, output_dim=z_dim)
self.loss_fn = BarlowTwinsLoss(batch_size=batch_size, lambda_coeff=lambda_coeff, z_dim=z_dim)
self.learning_rate = learning_rate
self.warmup_epochs = warmup_epochs
self.max_epochs = max_epochs
self.train_iters_per_epoch = num_training_samples // batch_size
def forward(self, x):
return self.encoder(x)
def shared_step(self, batch):
(x1, x2, _), _ = batch
z1 = self.projection_head(self.encoder(x1))
z2 = self.projection_head(self.encoder(x2))
return self.loss_fn(z1, z2)
def training_step(self, batch, batch_idx):
loss = self.shared_step(batch)
self.log("train_loss", loss.item(), on_step=True, on_epoch=False)
return loss
def validation_step(self, batch, batch_idx):
loss = self.shared_step(batch)
self.log("val_loss", loss, on_step=False, on_epoch=True)
return loss
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)
warmup_steps = self.train_iters_per_epoch * self.warmup_epochs
scheduler = {
"scheduler": torch.optim.lr_scheduler.LambdaLR(
optimizer,
linear_warmup_decay(warmup_steps),
),
"interval": "step",
"frequency": 1,
}
return [optimizer], [scheduler]
# %% [markdown]
# ### Evaluation
#
# We define a callback which appends a linear layer on top of the encoder and trains the classification evaluation head in an online manner. We make sure not to backpropagate the gradients back to the encoder while tuning the linear layer. This technique was used in SimCLR as well and they showed that the final downstream classification peformance is pretty much similar to the results on online finetuning as the training progresses.
# %%
class OnlineFineTuner(Callback):
def __init__(
self,
encoder_output_dim: int,
num_classes: int,
) -> None:
super().__init__()
self.optimizer: torch.optim.Optimizer
self.encoder_output_dim = encoder_output_dim
self.num_classes = num_classes
def on_pretrain_routine_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None:
# add linear_eval layer and optimizer
pl_module.online_finetuner = nn.Linear(self.encoder_output_dim, self.num_classes).to(pl_module.device)
self.optimizer = torch.optim.Adam(pl_module.online_finetuner.parameters(), lr=1e-4)
def extract_online_finetuning_view(
self, batch: Sequence, device: Union[str, torch.device]
) -> Tuple[torch.Tensor, torch.Tensor]:
(_, _, finetune_view), y = batch
finetune_view = finetune_view.to(device)
y = y.to(device)
return finetune_view, y
def on_train_batch_end(
self,
trainer: pl.Trainer,
pl_module: pl.LightningModule,
outputs: Sequence,
batch: Sequence,
batch_idx: int,
dataloader_idx: int,
) -> None:
x, y = self.extract_online_finetuning_view(batch, pl_module.device)
with torch.no_grad():
feats = pl_module(x)
feats = feats.detach()
preds = pl_module.online_finetuner(feats)
loss = F.cross_entropy(preds, y)
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
acc = accuracy(F.softmax(preds, dim=1), y)
pl_module.log("online_train_acc", acc, on_step=True, on_epoch=False)
pl_module.log("online_train_loss", loss, on_step=True, on_epoch=False)
def on_validation_batch_end(
self,
trainer: pl.Trainer,
pl_module: pl.LightningModule,
outputs: Sequence,
batch: Sequence,
batch_idx: int,
dataloader_idx: int,
) -> None:
x, y = self.extract_online_finetuning_view(batch, pl_module.device)
with torch.no_grad():
feats = pl_module(x)
feats = feats.detach()
preds = pl_module.online_finetuner(feats)
loss = F.cross_entropy(preds, y)
acc = accuracy(F.softmax(preds, dim=1), y)
pl_module.log("online_val_acc", acc, on_step=False, on_epoch=True, sync_dist=True)
pl_module.log("online_val_loss", loss, on_step=False, on_epoch=True, sync_dist=True)
# %% [markdown]
# Finally, we define the trainer for training the model. We pass in the ``train_loader`` and ``val_loader`` we had initialized earlier to the ``fit`` function.
# %%
encoder_out_dim = 512
model = BarlowTwins(
encoder=encoder,
encoder_out_dim=encoder_out_dim,
num_training_samples=len(train_dataset),
batch_size=batch_size,
z_dim=z_dim,
)
online_finetuner = OnlineFineTuner(encoder_output_dim=encoder_out_dim, num_classes=10)
checkpoint_callback = ModelCheckpoint(every_n_val_epochs=100, save_top_k=-1, save_last=True)
trainer = Trainer(
max_epochs=max_epochs,
gpus=torch.cuda.device_count(),
precision=16 if torch.cuda.device_count() > 0 else 32,
callbacks=[online_finetuner, checkpoint_callback],
)
# uncomment this to train the model
# this is done for the tutorial so that the notebook compiles
# trainer.fit(model, train_loader, val_loader)
# %% [markdown]
# ### Using the trained encoder for downstream tasks
#
# Once the encoder is pretrained on CIFAR10, we can use it to get image embeddings and use them further downstream on tasks like classification, detection, segmentation etc.
#
# In this tutorial, we did not completely train our encoder for 100s of epochs using the Barlow Twins pretraining method. So, we will load the pretrained encoder weights from a checkpoint and show the image embeddings obtained from that.
#
# To create this checkpoint, the encoder was pretrained for 200 epochs, and obtained a online finetune accuracy of x% on CIFAR-10.
# %%
# ckpt_model = torch.load('') # upload checkpoint to aws
# encoder = ckpt_model.encoder
encoder = model.encoder
downstream_dataset = CIFAR10(root=".", train=False, transform=transforms.ToTensor())
dataloader = DataLoader(downstream_dataset, batch_size=4, shuffle=False)
for batch in dataloader:
img, label = batch
print(encoder(img).shape)
break
```
#### File: lightning_examples/basic-gan/gan.py
```python
import os
from collections import OrderedDict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
from pytorch_lightning import LightningDataModule, LightningModule, Trainer
from torch.utils.data import DataLoader, random_split
from torchvision.datasets import MNIST
PATH_DATASETS = os.environ.get("PATH_DATASETS", ".")
AVAIL_GPUS = min(1, torch.cuda.device_count())
BATCH_SIZE = 256 if AVAIL_GPUS else 64
NUM_WORKERS = int(os.cpu_count() / 2)
# %% [markdown]
# ### MNIST DataModule
#
# Below, we define a DataModule for the MNIST Dataset. To learn more about DataModules, check out our tutorial
# on them or see the [latest docs](https://pytorch-lightning.readthedocs.io/en/latest/extensions/datamodules.html).
# %%
class MNISTDataModule(LightningDataModule):
def __init__(
self,
data_dir: str = PATH_DATASETS,
batch_size: int = BATCH_SIZE,
num_workers: int = NUM_WORKERS,
):
super().__init__()
self.data_dir = data_dir
self.batch_size = batch_size
self.num_workers = num_workers
self.transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,)),
]
)
# self.dims is returned when you call dm.size()
# Setting default dims here because we know them.
# Could optionally be assigned dynamically in dm.setup()
self.dims = (1, 28, 28)
self.num_classes = 10
def prepare_data(self):
# download
MNIST(self.data_dir, train=True, download=True)
MNIST(self.data_dir, train=False, download=True)
def setup(self, stage=None):
# Assign train/val datasets for use in dataloaders
if stage == "fit" or stage is None:
mnist_full = MNIST(self.data_dir, train=True, transform=self.transform)
self.mnist_train, self.mnist_val = random_split(mnist_full, [55000, 5000])
# Assign test dataset for use in dataloader(s)
if stage == "test" or stage is None:
self.mnist_test = MNIST(self.data_dir, train=False, transform=self.transform)
def train_dataloader(self):
return DataLoader(
self.mnist_train,
batch_size=self.batch_size,
num_workers=self.num_workers,
)
def val_dataloader(self):
return DataLoader(self.mnist_val, batch_size=self.batch_size, num_workers=self.num_workers)
def test_dataloader(self):
return DataLoader(self.mnist_test, batch_size=self.batch_size, num_workers=self.num_workers)
# %% [markdown]
# ### A. Generator
# %%
class Generator(nn.Module):
def __init__(self, latent_dim, img_shape):
super().__init__()
self.img_shape = img_shape
def block(in_feat, out_feat, normalize=True):
layers = [nn.Linear(in_feat, out_feat)]
if normalize:
layers.append(nn.BatchNorm1d(out_feat, 0.8))
layers.append(nn.LeakyReLU(0.2, inplace=True))
return layers
self.model = nn.Sequential(
*block(latent_dim, 128, normalize=False),
*block(128, 256),
*block(256, 512),
*block(512, 1024),
nn.Linear(1024, int(np.prod(img_shape))),
nn.Tanh(),
)
def forward(self, z):
img = self.model(z)
img = img.view(img.size(0), *self.img_shape)
return img
# %% [markdown]
# ### B. Discriminator
# %%
class Discriminator(nn.Module):
def __init__(self, img_shape):
super().__init__()
self.model = nn.Sequential(
nn.Linear(int(np.prod(img_shape)), 512),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, 256),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(256, 1),
nn.Sigmoid(),
)
def forward(self, img):
img_flat = img.view(img.size(0), -1)
validity = self.model(img_flat)
return validity
# %% [markdown]
# ### <NAME>
#
# #### A couple of cool features to check out in this example...
#
# - We use `some_tensor.type_as(another_tensor)` to make sure we initialize new tensors on the right device (i.e. GPU, CPU).
# - Lightning will put your dataloader data on the right device automatically
# - In this example, we pull from latent dim on the fly, so we need to dynamically add tensors to the right device.
# - `type_as` is the way we recommend to do this.
# - This example shows how to use multiple dataloaders in your `LightningModule`.
# %%
class GAN(LightningModule):
def __init__(
self,
channels,
width,
height,
latent_dim: int = 100,
lr: float = 0.0002,
b1: float = 0.5,
b2: float = 0.999,
batch_size: int = BATCH_SIZE,
**kwargs
):
super().__init__()
self.save_hyperparameters()
# networks
data_shape = (channels, width, height)
self.generator = Generator(latent_dim=self.hparams.latent_dim, img_shape=data_shape)
self.discriminator = Discriminator(img_shape=data_shape)
self.validation_z = torch.randn(8, self.hparams.latent_dim)
self.example_input_array = torch.zeros(2, self.hparams.latent_dim)
def forward(self, z):
return self.generator(z)
def adversarial_loss(self, y_hat, y):
return F.binary_cross_entropy(y_hat, y)
def training_step(self, batch, batch_idx, optimizer_idx):
imgs, _ = batch
# sample noise
z = torch.randn(imgs.shape[0], self.hparams.latent_dim)
z = z.type_as(imgs)
# train generator
if optimizer_idx == 0:
# generate images
self.generated_imgs = self(z)
# log sampled images
sample_imgs = self.generated_imgs[:6]
grid = torchvision.utils.make_grid(sample_imgs)
self.logger.experiment.add_image("generated_images", grid, 0)
# ground truth result (ie: all fake)
# put on GPU because we created this tensor inside training_loop
valid = torch.ones(imgs.size(0), 1)
valid = valid.type_as(imgs)
# adversarial loss is binary cross-entropy
g_loss = self.adversarial_loss(self.discriminator(self(z)), valid)
tqdm_dict = {"g_loss": g_loss}
output = OrderedDict({"loss": g_loss, "progress_bar": tqdm_dict, "log": tqdm_dict})
return output
# train discriminator
if optimizer_idx == 1:
# Measure discriminator's ability to classify real from generated samples
# how well can it label as real?
valid = torch.ones(imgs.size(0), 1)
valid = valid.type_as(imgs)
real_loss = self.adversarial_loss(self.discriminator(imgs), valid)
# how well can it label as fake?
fake = torch.zeros(imgs.size(0), 1)
fake = fake.type_as(imgs)
fake_loss = self.adversarial_loss(self.discriminator(self(z).detach()), fake)
# discriminator loss is the average of these
d_loss = (real_loss + fake_loss) / 2
tqdm_dict = {"d_loss": d_loss}
output = OrderedDict({"loss": d_loss, "progress_bar": tqdm_dict, "log": tqdm_dict})
return output
def configure_optimizers(self):
lr = self.hparams.lr
b1 = self.hparams.b1
b2 = self.hparams.b2
opt_g = torch.optim.Adam(self.generator.parameters(), lr=lr, betas=(b1, b2))
opt_d = torch.optim.Adam(self.discriminator.parameters(), lr=lr, betas=(b1, b2))
return [opt_g, opt_d], []
def on_epoch_end(self):
z = self.validation_z.type_as(self.generator.model[0].weight)
# log sampled images
sample_imgs = self(z)
grid = torchvision.utils.make_grid(sample_imgs)
self.logger.experiment.add_image("generated_images", grid, self.current_epoch)
# %%
dm = MNISTDataModule()
model = GAN(*dm.size())
trainer = Trainer(gpus=AVAIL_GPUS, max_epochs=5, progress_bar_refresh_rate=20)
trainer.fit(model, dm)
# %%
# Start tensorboard.
# %load_ext tensorboard
# %tensorboard --logdir lightning_logs/
``` |
{
"source": "jiwidi/MIARFID",
"score": 3
} |
#### File: ALC/paolo/grid_search.py
```python
import os
import sys
import argparse
import pickle
from xml.dom import minidom
import numpy
from nltk.tokenize.casual import casual_tokenize
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from sklearn import svm
from sklearn.linear_model import SGDClassifier
from sklearn.neural_network import MLPClassifier
from sklearn import neighbors
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import cross_val_score, GridSearchCV
# from zeugma.embeddings import EmbeddingTransformer
from processing import process_csv
def load_data(dataset_path):
"""
Loads the data from dataset_path.
:param string dataset_path
Path of the data
:return X, Y
Two lists with the data in X and the labels in Y
"""
X = []
Y = []
with open(dataset_path + "/truth.txt") as truth_file:
for line in truth_file:
usr_id, truth = line.split(":::")
Y.append(truth)
xml_file = minidom.parse(dataset_path + "/" + usr_id + ".xml")
tweets = xml_file.getElementsByTagName("document")
samples = []
for tweet in tweets:
samples.append(tweet.firstChild.data)
samples = " ".join(samples)
samples = samples.replace("\n", "")
X.append(samples)
return X, Y
if __name__ == "__main__":
# X_train_en, Y_train_en = load_data('dataset/pan21-author-profiling-training-2021-03-14/en')
# X_train_es, Y_train_es = load_data('dataset/pan21-author-profiling-training-2021-03-14/es')
parser = argparse.ArgumentParser(
usage="python grid_search.py --lan ['es','en'] --vec [0-1] --model [0-8]"
)
parser.add_argument(
"--lan", type=str, metavar="string -> language, default: en", default="en"
)
parser.add_argument(
"--vec", type=int, metavar="int -> type of vectorizer, default: 0", default=0
)
parser.add_argument(
"--model", type=int, metavar="int -> type of model, default: 0", default=0
)
args = parser.parse_args()
if args.lan == "en":
with open("dataset/processed_text_en.pkl", "rb") as f:
X_train, Y_train = pickle.load(f)
elif args.lan == "es":
with open("dataset/processed_text_es.pkl", "rb") as f:
X_train, Y_train = pickle.load(f)
if args.vec == 0:
vectorizador = CountVectorizer(tokenizer=casual_tokenize, max_df=0.8)
elif args.vec == 1:
vectorizador = TfidfVectorizer(
tokenizer=casual_tokenize, max_features=5000, max_df=0.8, ngram_range=(2, 3)
)
vectorizador.fit(X_train)
matriz_train = vectorizador.transform(X_train)
if args.model == 0:
modelo = GradientBoostingClassifier()
params = {
"loss": ["deviance", "exponential"],
"learning_rate": [0.1, 0.01, 0.001, 0.0001],
"n_estimators": [150, 250, 350, 450, 550],
}
elif args.model == 1:
modelo = svm.LinearSVC(C=100, tol=0.01, loss="hinge", max_iter=500)
params = {
"loss": ["hinge", "squared_hinge"],
"C": [1, 10, 30, 50, 70, 90, 100],
"tol": [0.01, 0.05, 0.1, 0.2, 0.5, 2, 10, 20],
"max_iter": [1000],
}
elif args.model == 2:
modelo = svm.SVC(C=1)
params = {
"C": [1, 10, 30, 50, 70, 90, 100],
"kernel": ["linear", "poly", "rbf", "sigmoid"],
"tol": [0.01, 0.05, 0.1, 0.2, 0.5, 2, 10, 20],
"max_iter": [1000],
}
elif args.model == 3:
modelo = SGDClassifier()
params = {
"loss": ["hinge", "squared_hinge", "perceptron"],
"alpha": [0.1, 0.01, 0.001, 0.0001, 0.00001, 0.000001],
"tol": [0.01, 0.05, 0.1, 0.2, 0.5, 2, 10, 20],
"n_jobs": [-1],
}
elif args.model == 4:
modelo = MLPClassifier(
hidden_layer_sizes=(16, 32, 16),
solver="adam",
alpha=0.0001,
batch_size=200,
learning_rate="constant",
learning_rate_init=0.001,
random_state=1,
max_iter=500,
verbose=False,
warm_start=True,
)
params = {
"hidden_layer_sizes": [
(16, 32, 16),
(16),
(16, 16),
(32),
(1024, 64, 16),
(1024),
(64, 16),
],
"solver": ["adam", "sgd"],
"activation": ["logistic", "relu"],
"learning_rate": ["constant", "adaptive"],
"learning_rate_init": [0.1, 0.01, 0.001, 0.0001, 0.00001],
"max_iter": [500],
"warm_start": [True, False],
}
elif args.model == 5:
modelo = neighbors.KNeighborsClassifier()
params = {"n_neighbors": [5, 10, 20], "n_jobs": [-1]}
elif args.model == 6:
modelo = RandomForestClassifier(n_estimators=100)
params = {"n_estimators": [100, 150, 300], "n_jobs": [-1]}
elif args.model == 7:
modelo = GaussianNB()
params = {"var_smoothing": [1e-9]}
elif args.model == 8:
modelo = DecisionTreeClassifier()
params = {"splitter": ["best", "random"]}
grid_search = GridSearchCV(modelo, params, scoring="accuracy", cv=10, n_jobs=-1)
grid_search.fit(matriz_train.toarray(), Y_train)
print(
"Model %s: Accuracy (%s): %0.2f \tBest params: %s"
% (args.model, args.lan, grid_search.best_score_, grid_search.best_params_)
)
quit()
```
#### File: ARF/project/dataset.py
```python
import numpy
import pandas as pd
import PIL.Image as Image
import torch
import torch.utils.data as pytorch_data
from torchvision import transforms
import time
import albumentations as A
class SIIMDataset(pytorch_data.Dataset):
def __init__(self, df, transform, image_dir, test=False, use_metadata=False, include_2019 = False, use_9_classes=False):
self.df = df
self.transform = transform
self.test = test
self.image_dir = image_dir
self.use_metadata = use_metadata
self.include_2019 = include_2019
self.use_9_classes = use_9_classes
if self.use_metadata:
# Transform dataframe
dummies = pd.get_dummies(
self.df["anatom_site_general_challenge"],
dummy_na=True,
dtype=numpy.uint8,
prefix="site",
)
self.df = pd.concat([self.df, dummies.iloc[: self.df.shape[0]]], axis=1)
self.df["sex"] = self.df["sex"].map({"male": 1, "female": 0})
self.df["age_approx"] /= self.df["age_approx"].max()
if self.test:
self.df['site_anterior torso'] = [0 for i in range(len(self.df))]
self.df['site_lateral torso'] = [0 for i in range(len(self.df))]
self.df['site_posterior torso'] = [0 for i in range(len(self.df))]
self.df = self.df[['image_name', 'patient_id', 'sex', 'age_approx',
'anatom_site_general_challenge', 'site_anterior torso', 'site_head/neck',
'site_lateral torso', 'site_lower extremity', 'site_oral/genital',
'site_palms/soles', 'site_posterior torso', 'site_torso',
'site_upper extremity', 'site_nan'
]]
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
start = time.time()
meta = self.df.iloc[idx]
image_fn = (
meta["image_name"] + ".jpg"
) # Use this when training with original images
# image_fn = meta['image_name'] + '.png'
if self.test:
img = numpy.asarray(Image.open(str(self.image_dir / "test") + "/" + image_fn).convert("RGB"))
#img = numpy.array(Image.open(self.image_dir + "/test" + "/" + image_fn).convert("RGB"))
else:
if self.include_2019 and meta['patient_id'].startswith('IP_2019_'):
img = numpy.asarray(Image.open(str(self.image_dir / "2019") + "/" + image_fn).convert("RGB"))
#img = numpy.array(Image.open(self.image_dir + "/2019" + "/" + image_fn).convert("RGB"))
else:
img = numpy.asarray(Image.open(str(self.image_dir / "train") + "/" + image_fn).convert("RGB"))
#img = numpy.array(Image.open(self.image_dir + "/train" + "/" + image_fn).convert("RGB"))
if self.transform is not None:
img = self.transform(image=img)
img = img['image'].astype(numpy.float32)
img = numpy.moveaxis(img, -1, 0) # Convert to channels first
if not self.test:
if self.include_2019:
if self.use_metadata or self.use_9_classes:
# Now target will be a vector of size 9
target = meta[['MEL', 'NV', 'BCC', 'AK', 'BKL', 'DF', 'VASC', 'SCC', 'UNK']].tolist()
else:
target = meta['target']
else:
target = meta['target']
if self.use_metadata:
metadata = ["sex", "age_approx"] + [
col for col in meta.index if "site_" in col
]
metadata.remove("anatom_site_general_challenge")
metadata = numpy.array(meta[metadata], dtype=numpy.float64)
# print(type(img) ,type(metadata.values), type(meta["target"])
if self.test:
return img, torch.from_numpy(metadata)
else:
return img, torch.from_numpy(metadata), torch.Tensor(target).long()
if self.test:
return img
if self.use_9_classes:
return img, torch.Tensor(target).long()
#
# print(time.time() - start)
return img, target
if __name__ == "__main__":
transform_test = A.Compose(
[
A.Resize(
224,
224
), # Use this when training with original images
A.Normalize()
]
)
train_df = pd.read_csv("data/train_full.csv")
train_dataset = SIIMDataset(train_df, transform=transform_test, image_dir='data', use_metadata=True, include_2019=True)
print(train_dataset[0][0])
test_df = pd.read_csv("data/test_full.csv")
test_dataset = SIIMDataset(test_df, transform=transform_test, image_dir='data', use_metadata=True, test=True, include_2019=True)
print(test_dataset[0][0])
```
#### File: CV/gender/train.py
```python
from __future__ import print_function
import argparse
import shutil
import torch
import torchvision
import random
import os
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
import numpy as np
import matplotlib.pyplot as plt
writer = SummaryWriter()
from resnet import ResNet_small
from torchsummary import summary
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
def train(model, dataloader, optimizer, scheduler, loss_fn, epoch):
# Set the model into train mode
model.train()
train_loss = 0
correct = 0
total = 0
datacount = len(dataloader)
for batch_idx, (train_batch, labels_batch) in enumerate(dataloader):
# move the data onto the device
train_batch, labels_batch = train_batch.to(device), labels_batch.to(device)
optimizer.zero_grad()
# compute model outputs and loss
outputs = model(train_batch)
loss = loss_fn(outputs, labels_batch.squeeze())
loss.backward()
# after computing gradients based on current batch loss,
# apply them to parameters
optimizer.step()
scheduler.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
total += labels_batch.size(0)
correct += predicted.eq(labels_batch.squeeze()).sum().item()
# write to tensorboard
writer.add_scalar(
"train/loss",
train_loss / (batch_idx + 1),
(datacount * (epoch + 1)) + (batch_idx + 1),
)
writer.add_scalar(
"train/accuracy",
100.0 * correct / total,
(datacount * (epoch + 1)) + (batch_idx + 1),
)
writer.add_scalar(
"train/lr",
scheduler._last_lr[0],
(datacount * (epoch + 1)) + (batch_idx + 1),
)
print(
"Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
epoch,
batch_idx * len(train_batch),
len(dataloader.dataset),
100.0 * batch_idx / len(dataloader),
(train_loss / (batch_idx + 1)),
# loss,
),
end="\r",
flush=True,
)
print()
return train_loss / datacount, 100.0 * correct / total
def test(model, dataloader, loss_fn, epoch):
model.eval()
test_loss = 0
correct = 0
total = 0
datacount = len(dataloader)
with torch.no_grad():
for batch_idx, (test_batch, labels_batch) in enumerate(dataloader):
# move the data onto device
test_batch, labels_batch = test_batch.to(device), labels_batch.to(device)
# compute the model output
outputs = model(test_batch)
loss = loss_fn(outputs, labels_batch.squeeze())
test_loss += loss.item()
_, predicted = outputs.max(1)
total += labels_batch.size(0)
correct += predicted.eq(labels_batch.squeeze()).sum().item()
# log the test_loss
writer.add_scalar(
"test/loss",
test_loss / (batch_idx + 1),
(datacount * (epoch + 1)) + (batch_idx + 1),
)
writer.add_scalar(
"test/accuracy",
100.0 * correct / total,
(datacount * (epoch + 1)) + (batch_idx + 1),
)
test_loss = test_loss / datacount
acc = 100 * correct / total
print("Test accuracy:", acc)
return test_loss, acc
def save_ckp(state, checkpoint_dir):
f_path = "gender-best-checkpoint.pt"
torch.save(state, f_path)
def main():
# Training settings
parser = argparse.ArgumentParser(description="PyTorch GENDER CV LAB")
parser.add_argument(
"--batch-size",
type=int,
default=64,
metavar="N",
help="input batch size for training (default: 128)",
)
parser.add_argument(
"--epochs",
type=int,
default=200,
metavar="N",
help="number of epochs to train (default: 200)",
)
parser.add_argument(
"--seed", type=int, default=1, metavar="S", help="random seed (default: 1)"
)
parser.add_argument(
"--save_model",
action="store_true",
default=False,
help="For Saving the current Model",
)
parser.add_argument(
"--load_checkpoint",
type=str,
default=False,
help="Path of checkpoint to restore, if none will start training from 0",
)
args = parser.parse_args()
random.seed(args.seed)
os.environ["PYTHONHASHSEED"] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
train_kwargs = {"batch_size": args.batch_size}
test_kwargs = {"batch_size": args.batch_size}
if use_cuda:
cuda_kwargs = {"num_workers": 8, "pin_memory": True, "shuffle": True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
# Load
x_train = np.load("data/x_train.npy")
x_test = np.load("data/x_test.npy")
x_train = x_train / 255
x_test = x_test / 255
x_train = torch.from_numpy(x_train).squeeze().permute(0, 3, 1, 2).float()
x_test = torch.from_numpy(x_test).squeeze().permute(0, 3, 1, 2).float()
y_train = np.load("data/y_train.npy")
y_test = np.load("data/y_test.npy")
y_train = torch.from_numpy(y_train).squeeze().long()
y_test = torch.from_numpy(y_test).squeeze().long()
dataset1 = torch.utils.data.TensorDataset(x_train, y_train.unsqueeze(1))
dataset2 = torch.utils.data.TensorDataset(x_test, y_test.unsqueeze(1))
train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
model = ResNet_small().to(device)
print(summary(model, (3, 100, 100)))
print(
"Trainable parameters",
sum(p.numel() for p in model.parameters() if p.requires_grad),
)
optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
scheduler = torch.optim.lr_scheduler.OneCycleLR(
optimizer, max_lr=0.1, steps_per_epoch=len(train_loader), epochs=200
) # epoch 187
epoch = 1
loss = nn.CrossEntropyLoss()
if args.load_checkpoint:
print("Loading checkpoint args.load_checkpoint")
checkpoint = torch.load(args.load_checkpoint)
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
scheduler.load_state_dict(checkpoint["scheduler"])
epoch = checkpoint["epoch"]
best_acc = 0
l_train_loss = []
l_test_loss = []
l_train_acc = []
l_test_acc = []
l_lr = []
for epoch in range(epoch, args.epochs + 1):
train_loss, train_acc = train(
model, train_loader, optimizer, scheduler, loss, epoch
)
test_loss, test_acc = test(model, test_loader, loss, epoch)
if test_acc > best_acc:
best_acc = test_acc
if test_acc > 97.0:
print("Error < 3.0 achieved, stopped training")
break
if args.save_model and test_acc >= best_acc:
checkpoint = {
"epoch": epoch + 1,
"state_dict": model.state_dict(),
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
}
print("Saving checkpoint as best model to gender-best-checkpoint.pt")
save_ckp(checkpoint, "")
l_train_loss.append(train_loss)
l_test_loss.append(test_loss)
l_train_acc.append(train_acc)
l_test_acc.append(test_acc)
l_lr.append(scheduler._last_lr[0])
# PLOTS
fig = plt.figure()
plt.plot(l_train_loss, color="red", label="Train")
plt.plot(l_test_loss, color="blue", label="Test")
plt.xlabel("Epochs", fontsize=10)
plt.ylabel("Loss", fontsize=8)
plt.legend()
plt.grid()
fig.savefig("figures/gender_loss.png")
plt.close()
fig = plt.figure()
plt.plot(l_train_acc, color="red", label="Train")
plt.plot(l_test_acc, color="blue", label="Test")
plt.xlabel("Epochs", fontsize=10)
plt.ylabel("Accuracy", fontsize=8)
plt.legend()
plt.grid()
fig.savefig("figures/gender_acc.png")
plt.close()
fig = plt.figure()
plt.plot(l_lr, color="orange", label="Learning rate")
plt.xlabel("Epochs", fontsize=10)
plt.ylabel("Learning rate", fontsize=8)
plt.legend()
plt.grid()
fig.savefig("figures/gender_lr.png")
plt.close()
if __name__ == "__main__":
main()
```
#### File: CV/style/transfer.py
```python
from __future__ import print_function
import argparse
import copy
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.models as models
import torchvision.transforms as transforms
from PIL import Image
from utils import image_loader, run_style_transfer
def main(args):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
style_img = image_loader(args.style_img)
content_img = image_loader(args.content_img)
input_img = content_img.clone()
unloader = transforms.ToPILImage() # reconvert into PIL image
cnn = models.vgg19(pretrained=True).features.to(device).eval()
# Additionally, VGG networks are trained on images with each channel
# normalized by mean=[0.485, 0.456, 0.406] and std=[0.229, 0.224, 0.225].
# We will use them to normalize the image before sending it into the network.
cnn_normalization_mean = torch.tensor([0.485, 0.456, 0.406]).to(device)
cnn_normalization_std = torch.tensor([0.229, 0.224, 0.225]).to(device)
for i in [1, 1000, 1000000]:
output = run_style_transfer(
cnn,
cnn_normalization_mean,
cnn_normalization_std,
content_img,
style_img,
input_img,
num_steps=args.steps,
content_weight=args.content_weight,
style_weight=i,
)
plt.figure()
image = output.cpu().clone() # we clone the tensor to not do changes on it
image = image.squeeze(0) # remove the fake batch dimension
image = unloader(image)
aux = args.content_img.split("/")[1].split(".")[0]
image.save(f"images/resultstyle-{aux}-{args.content_weight}-{i}.png")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Process some integers.")
parser.add_argument(
"--style_img",
default="images/night.jpeg",
type=str,
help="Style image to use in transfer style",
)
parser.add_argument(
"--content_img",
default="images/mimi.jpg",
type=str,
help="Content image to use in transfer style",
)
parser.add_argument(
"--steps", type=int, default=200, help="Steps running the style transfer",
)
parser.add_argument(
"--content_weight",
type=int,
default=1,
help="Content weight in transfer style",
)
parser.add_argument(
"--style_weight", type=int, default=1, help="Style weight in transfer style",
)
args = parser.parse_args()
main(args)
```
#### File: CV/wideresnet/resnet.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
class ResBlock(nn.Module):
def __init__(self, in_size, out_size, stride=1, shorcut=False, dropout=0.0):
super(ResBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_size, out_size, kernel_size=3, stride=stride, padding=1, bias=False
)
self.bn1 = nn.BatchNorm2d(out_size)
self.drop1 = nn.Dropout(dropout)
self.conv2 = nn.Conv2d(
out_size, out_size, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn2 = nn.BatchNorm2d(out_size)
if shorcut:
self.shortcut = nn.Sequential(
nn.Conv2d(
in_size,
out_size,
kernel_size=1,
stride=stride,
bias=False,
),
nn.BatchNorm2d(out_size),
)
else:
self.shortcut = nn.Sequential() # Empty sequential equals to empty layer.
def forward(self, x):
out = self.bn1(self.conv1(x))
out = F.relu(out)
out = self.drop1(out)
out = self.bn2(self.conv2(out))
out = F.relu(out + self.shortcut(x)) # SHORCUT
return out
class WideResNet(nn.Module):
def __init__(self, i_channels=3, o_channels=64, scale_factor=1, n_classes=10):
super(WideResNet, self).__init__()
self.conv1 = nn.Conv2d(
i_channels,
o_channels * scale_factor,
kernel_size=3,
stride=1,
padding=1,
bias=False,
)
self.bn1 = nn.BatchNorm2d(o_channels * scale_factor)
self.blocks = nn.Sequential(
ResBlock(
o_channels * scale_factor,
o_channels * scale_factor,
1,
),
ResBlock(
o_channels * scale_factor,
o_channels * scale_factor,
1,
),
ResBlock(
o_channels * scale_factor,
o_channels * 2 * scale_factor,
2,
shorcut=True,
),
ResBlock(
o_channels * 2 * scale_factor,
o_channels * 2 * scale_factor,
1,
),
ResBlock(
o_channels * 2 * scale_factor,
o_channels * 4 * scale_factor,
2,
shorcut=True,
),
ResBlock(
o_channels * 4 * scale_factor,
o_channels * 4 * scale_factor,
1,
),
ResBlock(
o_channels * 4 * scale_factor,
o_channels * 8 * scale_factor,
2,
shorcut=True,
),
ResBlock(
o_channels * 8 * scale_factor,
o_channels * 8 * scale_factor,
1,
),
)
self.fw = nn.Linear(o_channels * 8 * scale_factor, n_classes) # 10 Classes
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.blocks(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.fw(out)
return out
```
#### File: CV/wideresnet/train.py
```python
from __future__ import print_function
import argparse
import shutil
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
import matplotlib.pyplot as plt
writer = SummaryWriter()
from resnet import WideResNet
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
def train(model, dataloader, optimizer, scheduler, loss_fn, epoch):
# Set the model into train mode
model.train()
train_loss = 0
correct = 0
total = 0
datacount = len(dataloader)
for batch_idx, (train_batch, labels_batch) in enumerate(dataloader):
# move the data onto the device
train_batch, labels_batch = train_batch.to(device), labels_batch.to(device)
optimizer.zero_grad()
# compute model outputs and loss
outputs = model(train_batch)
loss = loss_fn(outputs, labels_batch)
loss.backward()
# after computing gradients based on current batch loss,
# apply them to parameters
optimizer.step()
scheduler.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
total += labels_batch.size(0)
correct += predicted.eq(labels_batch).sum().item()
# write to tensorboard
writer.add_scalar(
"train/loss",
train_loss / (batch_idx + 1),
(datacount * (epoch + 1)) + (batch_idx + 1),
)
writer.add_scalar(
"train/accuracy",
100.0 * correct / total,
(datacount * (epoch + 1)) + (batch_idx + 1),
)
writer.add_scalar(
"train/lr",
scheduler._last_lr[0],
(datacount * (epoch + 1)) + (batch_idx + 1),
)
print(
"Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
epoch,
batch_idx * len(train_batch),
len(dataloader.dataset),
100.0 * batch_idx / len(dataloader),
(train_loss / (batch_idx + 1)),
# loss,
),
end="\r",
flush=True,
)
print()
return train_loss / datacount, 100.0 * correct / total
def test(model, dataloader, loss_fn, epoch):
model.eval()
test_loss = 0
correct = 0
total = 0
datacount = len(dataloader)
with torch.no_grad():
for batch_idx, (test_batch, labels_batch) in enumerate(dataloader):
# move the data onto device
test_batch, labels_batch = test_batch.to(device), labels_batch.to(device)
# compute the model output
outputs = model(test_batch)
loss = loss_fn(outputs, labels_batch)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += labels_batch.size(0)
correct += predicted.eq(labels_batch).sum().item()
# log the test_loss
writer.add_scalar(
"test/loss",
test_loss / (batch_idx + 1),
(datacount * (epoch + 1)) + (batch_idx + 1),
)
writer.add_scalar(
"test/accuracy",
100.0 * correct / total,
(datacount * (epoch + 1)) + (batch_idx + 1),
)
test_loss = test_loss / datacount
acc = 100.0 * correct / total
print("Test accuracy:", acc)
return test_loss, acc
def save_ckp(state, checkpoint_dir):
f_path = "cifar-best-checkpoint.pt"
torch.save(state, f_path)
def main():
# Training settings
parser = argparse.ArgumentParser(description="PyTorch MNIST RNA LAB")
parser.add_argument(
"--batch-size",
type=int,
default=128,
metavar="N",
help="input batch size for training (default: 128)",
)
parser.add_argument(
"--epochs",
type=int,
default=200,
metavar="N",
help="number of epochs to train (default: 200)",
)
parser.add_argument(
"--seed", type=int, default=1, metavar="S", help="random seed (default: 1)"
)
parser.add_argument(
"--save_model",
action="store_true",
default=True,
help="For Saving the current Model",
)
parser.add_argument(
"--load_checkpoint",
type=str,
default=False,
help="Path of checkpoint to restore, if none will start training from 0",
)
args = parser.parse_args()
torch.manual_seed(args.seed)
train_kwargs = {"batch_size": args.batch_size}
test_kwargs = {"batch_size": args.batch_size}
if use_cuda:
cuda_kwargs = {"num_workers": 8, "pin_memory": True, "shuffle": True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
train_transforms = transforms.Compose(
[
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
)
test_transforms = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
)
dataset1 = torchvision.datasets.CIFAR10(
".data", train=True, download=True, transform=train_transforms
)
dataset2 = torchvision.datasets.CIFAR10(
".data", train=False, download=True, transform=test_transforms
)
train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
model = WideResNet(i_channels=3, o_channels=64, scale_factor=4).to(device)
optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
scheduler = torch.optim.lr_scheduler.OneCycleLR(
optimizer, max_lr=0.1, steps_per_epoch=len(train_loader), epochs=200
) # epoch 187
epoch = 1
loss = nn.CrossEntropyLoss()
if args.load_checkpoint:
print("Loading checkpoint args.load_checkpoint")
checkpoint = torch.load(args.load_checkpoint)
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
scheduler.load_state_dict(checkpoint["scheduler"])
epoch = checkpoint["epoch"]
best_acc = 0
l_train_loss = []
l_test_loss = []
l_train_acc = []
l_test_acc = []
l_lr = []
print("Beginning training")
for epoch in range(epoch, args.epochs + 1):
train_loss, train_acc = train(
model, train_loader, optimizer, scheduler, loss, epoch
)
test_loss, test_acc = test(model, test_loader, loss, epoch)
if test_acc > best_acc:
best_acc = test_acc
if test_acc > 95.0:
print("Error < 5.0 achieved, stopped training")
break
if args.save_model and test_acc >= best_acc:
checkpoint = {
"epoch": epoch + 1,
"state_dict": model.state_dict(),
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
}
print("Saving checkpoint as best model to cifar-best-checkpoint.pt")
save_ckp(checkpoint, "")
l_train_loss.append(train_loss)
l_test_loss.append(test_loss)
l_train_acc.append(train_acc)
l_test_acc.append(test_acc)
l_lr.append(scheduler._last_lr[0])
# PLOTS
fig = plt.figure()
plt.plot(l_train_loss, color="red", label="Train")
plt.plot(l_test_loss, color="blue", label="Test")
plt.xlabel("Epochs", fontsize=10)
plt.ylabel("Loss", fontsize=8)
plt.legend()
plt.grid()
fig.savefig("figures/cifar_loss.png")
plt.close()
fig = plt.figure()
plt.plot(l_train_acc, color="red", label="Train")
plt.plot(l_test_acc, color="blue", label="Test")
plt.xlabel("Epochs", fontsize=10)
plt.ylabel("Accuracy", fontsize=8)
plt.legend()
plt.grid()
fig.savefig("figures/cifar_acc.png")
plt.close()
fig = plt.figure()
plt.plot(l_lr, color="orange", label="Learning rate")
plt.xlabel("Epochs", fontsize=10)
plt.ylabel("Learning rate", fontsize=8)
plt.legend()
plt.grid()
fig.savefig("figures/cifar_lr.png")
plt.close()
if __name__ == "__main__":
main()
```
#### File: dqn/breakout/model_breakout.py
```python
import random
import cv2
import torch
import torch.nn as nn
import torch.nn.functional as F
class DQN(nn.Module):
def __init__(self, h, w, outputs, device):
super(DQN, self).__init__()
self.conv1 = nn.Conv2d(4, 32, kernel_size=8, stride=4, bias=False)
self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2, bias=False)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1, bias=False)
self.fc1 = nn.Linear(64 * 7 * 7, 512)
self.fc2 = nn.Linear(512, outputs)
self.device = device
def init_weights(self, m):
if type(m) == nn.Linear:
torch.nn.init.kaiming_normal_(m.weight, nonlinearity="relu")
m.bias.data.fill_(0.0)
if type(m) == nn.Conv2d:
torch.nn.init.kaiming_normal_(m.weight, nonlinearity="relu")
# m.bias.data.fill_(0.1)
def forward(self, x):
x = x.to(self.device).float() / 255.0
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.fc1(x.view(x.size(0), -1)))
return self.fc2(x)
class ActionSelector(object):
def __init__(
self, INITIAL_EPSILON, FINAL_EPSILON, policy_net, EPS_DECAY, n_actions, device
):
self._eps = INITIAL_EPSILON
self._FINAL_EPSILON = FINAL_EPSILON
self._INITIAL_EPSILON = INITIAL_EPSILON
self._policy_net = policy_net
self._EPS_DECAY = EPS_DECAY
self._n_actions = n_actions
self._device = device
def select_action(self, state, training=False):
sample = random.random()
if training:
self._eps -= (self._INITIAL_EPSILON - self._FINAL_EPSILON) / self._EPS_DECAY
self._eps = max(self._eps, self._FINAL_EPSILON)
if sample > self._eps:
with torch.no_grad():
a = self._policy_net(state).max(1)[1].cpu().view(1, 1)
else:
a = torch.tensor(
[[random.randrange(self._n_actions)]], device="cpu", dtype=torch.long
)
return a.numpy()[0, 0].item(), self._eps
class ReplayMemory(object):
def __init__(self, capacity, state_shape, n_actions, device):
c, h, w = state_shape
self.capacity = capacity
self.device = device
self.m_states = torch.zeros((capacity, c, h, w), dtype=torch.uint8)
self.m_actions = torch.zeros((capacity, 1), dtype=torch.long)
self.m_rewards = torch.zeros((capacity, 1), dtype=torch.int8)
self.m_dones = torch.zeros((capacity, 1), dtype=torch.bool)
self.position = 0
self.size = 0
def push(self, state, action, reward, done):
"""Saves a transition."""
self.m_states[self.position] = state # 5,84,84
self.m_actions[self.position, 0] = action
self.m_rewards[self.position, 0] = reward
self.m_dones[self.position, 0] = done
self.position = (self.position + 1) % self.capacity
self.size = max(self.size, self.position)
def sample(self, bs):
i = torch.randint(0, high=self.size, size=(bs,))
bs = self.m_states[i, :4]
bns = self.m_states[i, 1:]
ba = self.m_actions[i].to(self.device)
br = self.m_rewards[i].to(self.device).float()
bd = self.m_dones[i].to(self.device).float()
return bs, ba, br, bns, bd
def __len__(self):
return self.size
def fp(n_frame):
n_frame = torch.from_numpy(n_frame)
h = n_frame.shape[-2]
return n_frame.view(1, h, h)
class FrameProcessor:
def __init__(self, im_size=84):
self.im_size = im_size
def process(self, frame):
im_size = self.im_size
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = frame[46 : 160 + 46, :]
frame = cv2.resize(frame, (im_size, im_size), interpolation=cv2.INTER_LINEAR)
frame = frame.reshape((1, im_size, im_size))
x = torch.from_numpy(frame)
return x
```
#### File: MIARFID/RNA/resnet.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
class ResBlock(nn.Module):
def __init__(self, in_size, out_size, stride=1, shorcut=False):
super(ResBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_size, out_size, kernel_size=3, stride=stride, padding=1, bias=False
)
self.bn1 = nn.BatchNorm2d(out_size)
self.conv2 = nn.Conv2d(
out_size, out_size, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn2 = nn.BatchNorm2d(out_size)
if shorcut:
self.shortcut = nn.Sequential(
nn.Conv2d(in_size, out_size, kernel_size=1, stride=stride, bias=False,),
nn.BatchNorm2d(out_size),
)
else:
self.shortcut = nn.Sequential() # Empty sequential equals to empty layer.
def forward(self, x):
out = self.bn1(self.conv1(x))
out = F.relu(out)
out = self.bn2(self.conv2(out))
out = F.relu(out + self.shortcut(x)) # SHORCUT
return out
class ResNet(nn.Module):
def __init__(self):
super(ResNet, self).__init__()
self.in_size = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.blocks = nn.Sequential(
ResBlock(64, 64, 1,),
ResBlock(64, 64, 1,),
ResBlock(64, 128, 2, shorcut=True),
ResBlock(128, 128, 1,),
ResBlock(128, 256, 2, shorcut=True),
ResBlock(256, 256, 1,),
ResBlock(256, 512, 2, shorcut=True),
ResBlock(512, 512, 1,),
)
self.fw = nn.Linear(512, 10) # 10 Classes
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.blocks(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.fw(out)
return out
```
#### File: lab_simfleet/simfleet/mystrategy.py
```python
import json
import random
from loguru import logger
from simfleet.customer import CustomerStrategyBehaviour
from simfleet.fleetmanager import FleetManagerStrategyBehaviour
from simfleet.helpers import PathRequestException, distance_in_meters
from simfleet.protocol import (
REQUEST_PERFORMATIVE,
ACCEPT_PERFORMATIVE,
REFUSE_PERFORMATIVE,
PROPOSE_PERFORMATIVE,
CANCEL_PERFORMATIVE,
INFORM_PERFORMATIVE,
QUERY_PROTOCOL,
REQUEST_PROTOCOL,
)
from simfleet.transport import TransportStrategyBehaviour
from simfleet.utils import (
TRANSPORT_WAITING,
TRANSPORT_WAITING_FOR_APPROVAL,
CUSTOMER_WAITING,
TRANSPORT_MOVING_TO_CUSTOMER,
CUSTOMER_ASSIGNED,
TRANSPORT_WAITING_FOR_STATION_APPROVAL,
TRANSPORT_MOVING_TO_STATION,
TRANSPORT_CHARGING,
TRANSPORT_CHARGED,
TRANSPORT_NEEDS_CHARGING,
)
################################################################
# #
# FleetManager Strategy #
# #
################################################################
class MyFleetManagerStrategy(FleetManagerStrategyBehaviour):
"""
The default strategy for the FleetManager agent. By default it delegates all requests to all transports.
# Modified to sent request only to the closest taxi to the customer
"""
async def run(self):
if not self.agent.registration:
await self.send_registration()
msg = await self.receive(timeout=5)
logger.debug("Manager received message: {}".format(msg))
if msg:
content = json.loads(msg.body)
customer = content["customer_id"]
position = content["origin"]
destination = content["dest"]
best_transport = None
min_distance = 10e99
for transport in self.get_transport_agents().values():
logger.warning("EEeeeee")
logger.warning(type(transport))
logger.warning((transport))
dst = distance_in_meters(transport.get_position(), position)
if dst < min_distance:
min_distance = dst
best_transport = transport
msg.to = str(best_transport["jid"])
logger.debug(
"Manager sent request to transport {}".format(best_transport["name"])
)
await self.send(msg)
################################################################
# #
# Transport Strategy #
# #
################################################################
class MyTransportStrategy(TransportStrategyBehaviour):
"""
The default strategy for the Transport agent. By default it accepts every request it receives if available.
"""
async def run(self):
if self.agent.needs_charging():
if self.agent.stations is None or len(self.agent.stations) < 1:
logger.warning(
"Transport {} looking for a station.".format(self.agent.name)
)
await self.send_get_stations()
else:
station = random.choice(list(self.agent.stations.keys()))
logger.info(
"Transport {} reserving station {}.".format(
self.agent.name, station
)
)
await self.send_proposal(station)
self.agent.status = TRANSPORT_WAITING_FOR_STATION_APPROVAL
msg = await self.receive(timeout=5)
if not msg:
return
logger.debug("Transport received message: {}".format(msg))
try:
content = json.loads(msg.body)
except TypeError:
content = {}
performative = msg.get_metadata("performative")
protocol = msg.get_metadata("protocol")
if protocol == QUERY_PROTOCOL:
if performative == INFORM_PERFORMATIVE:
self.agent.stations = content
logger.info(
"Got list of current stations: {}".format(
list(self.agent.stations.keys())
)
)
elif performative == CANCEL_PERFORMATIVE:
logger.info("Cancellation of request for stations information.")
elif protocol == REQUEST_PROTOCOL:
logger.debug(
"Transport {} received request protocol from customer/station.".format(
self.agent.name
)
)
if performative == REQUEST_PERFORMATIVE:
if self.agent.status == TRANSPORT_WAITING:
if not self.has_enough_autonomy(content["origin"], content["dest"]):
await self.cancel_proposal(content["customer_id"])
self.agent.status = TRANSPORT_NEEDS_CHARGING
else:
await self.send_proposal(content["customer_id"], {})
self.agent.status = TRANSPORT_WAITING_FOR_APPROVAL
elif performative == ACCEPT_PERFORMATIVE:
if self.agent.status == TRANSPORT_WAITING_FOR_APPROVAL:
logger.debug(
"Transport {} got accept from {}".format(
self.agent.name, content["customer_id"]
)
)
try:
self.agent.status = TRANSPORT_MOVING_TO_CUSTOMER
await self.pick_up_customer(
content["customer_id"], content["origin"], content["dest"]
)
except PathRequestException:
logger.error(
"Transport {} could not get a path to customer {}. Cancelling...".format(
self.agent.name, content["customer_id"]
)
)
self.agent.status = TRANSPORT_WAITING
await self.cancel_proposal(content["customer_id"])
except Exception as e:
logger.error(
"Unexpected error in transport {}: {}".format(
self.agent.name, e
)
)
await self.cancel_proposal(content["customer_id"])
self.agent.status = TRANSPORT_WAITING
else:
await self.cancel_proposal(content["customer_id"])
elif performative == REFUSE_PERFORMATIVE:
logger.debug(
"Transport {} got refusal from customer/station".format(
self.agent.name
)
)
self.agent.status = TRANSPORT_WAITING
elif performative == INFORM_PERFORMATIVE:
if self.agent.status == TRANSPORT_WAITING_FOR_STATION_APPROVAL:
logger.info(
"Transport {} got accept from station {}".format(
self.agent.name, content["station_id"]
)
)
try:
self.agent.status = TRANSPORT_MOVING_TO_STATION
await self.send_confirmation_travel(content["station_id"])
await self.go_to_the_station(
content["station_id"], content["dest"]
)
except PathRequestException:
logger.error(
"Transport {} could not get a path to station {}. Cancelling...".format(
self.agent.name, content["station_id"]
)
)
self.agent.status = TRANSPORT_WAITING
await self.cancel_proposal(content["station_id"])
except Exception as e:
logger.error(
"Unexpected error in transport {}: {}".format(
self.agent.name, e
)
)
await self.cancel_proposal(content["station_id"])
self.agent.status = TRANSPORT_WAITING
elif self.agent.status == TRANSPORT_CHARGING:
if content["status"] == TRANSPORT_CHARGED:
self.agent.transport_charged()
await self.agent.drop_station()
elif performative == CANCEL_PERFORMATIVE:
logger.info(
"Cancellation of request for {} information".format(
self.agent.fleet_type
)
)
################################################################
# #
# Customer Strategy #
# #
################################################################
class MyCustomerStrategy(CustomerStrategyBehaviour):
"""
The default strategy for the Customer agent. By default it accepts the first proposal it receives.
"""
async def run(self):
if self.agent.fleetmanagers is None:
await self.send_get_managers(self.agent.fleet_type)
msg = await self.receive(timeout=5)
if msg:
performative = msg.get_metadata("performative")
if performative == INFORM_PERFORMATIVE:
self.agent.fleetmanagers = json.loads(msg.body)
return
elif performative == CANCEL_PERFORMATIVE:
logger.info(
"Cancellation of request for {} information".format(
self.agent.type_service
)
)
return
if self.agent.status == CUSTOMER_WAITING:
await self.send_request(content={})
msg = await self.receive(timeout=5)
if msg:
performative = msg.get_metadata("performative")
transport_id = msg.sender
if performative == PROPOSE_PERFORMATIVE:
if self.agent.status == CUSTOMER_WAITING:
logger.debug(
"Customer {} received proposal from transport {}".format(
self.agent.name, transport_id
)
)
await self.accept_transport(transport_id)
self.agent.status = CUSTOMER_ASSIGNED
else:
await self.refuse_transport(transport_id)
elif performative == CANCEL_PERFORMATIVE:
if self.agent.transport_assigned == str(transport_id):
logger.warning(
"Customer {} received a CANCEL from Transport {}.".format(
self.agent.name, transport_id
)
)
self.agent.status = CUSTOMER_WAITING
```
#### File: SMA/lab_simfleet/strategies.py
```python
import json
import random
from loguru import logger
from .customer import CustomerStrategyBehaviour
from .fleetmanager import FleetManagerStrategyBehaviour
from .helpers import PathRequestException
from .protocol import REQUEST_PERFORMATIVE, ACCEPT_PERFORMATIVE, REFUSE_PERFORMATIVE, PROPOSE_PERFORMATIVE, \
CANCEL_PERFORMATIVE, INFORM_PERFORMATIVE, QUERY_PROTOCOL, REQUEST_PROTOCOL
from .transport import TransportStrategyBehaviour
from .utils import TRANSPORT_WAITING, TRANSPORT_WAITING_FOR_APPROVAL, CUSTOMER_WAITING, TRANSPORT_MOVING_TO_CUSTOMER, \
CUSTOMER_ASSIGNED, TRANSPORT_WAITING_FOR_STATION_APPROVAL, TRANSPORT_MOVING_TO_STATION, \
TRANSPORT_CHARGING, TRANSPORT_CHARGED, TRANSPORT_NEEDS_CHARGING
################################################################
# #
# FleetManager Strategy #
# #
################################################################
class DelegateRequestBehaviour(FleetManagerStrategyBehaviour):
"""
The default strategy for the FleetManager agent. By default it delegates all requests to all transports.
"""
async def run(self):
if not self.agent.registration:
await self.send_registration()
msg = await self.receive(timeout=5)
logger.debug("Manager received message: {}".format(msg))
if msg:
for transport in self.get_transport_agents().values():
msg.to = str(transport["jid"])
logger.debug("Manager sent request to transport {}".format(transport["name"]))
await self.send(msg)
################################################################
# #
# Transport Strategy #
# #
################################################################
class AcceptAlwaysStrategyBehaviour(TransportStrategyBehaviour):
"""
The default strategy for the Transport agent. By default it accepts every request it receives if available.
"""
async def run(self):
if self.agent.needs_charging():
if self.agent.stations is None or len(self.agent.stations) < 1:
logger.warning("Transport {} looking for a station.".format(self.agent.name))
await self.send_get_stations()
else:
station = random.choice(list(self.agent.stations.keys()))
logger.info("Transport {} reserving station {}.".format(self.agent.name, station))
await self.send_proposal(station)
self.agent.status = TRANSPORT_WAITING_FOR_STATION_APPROVAL
msg = await self.receive(timeout=5)
if not msg:
return
logger.debug("Transport received message: {}".format(msg))
try:
content = json.loads(msg.body)
except TypeError:
content = {}
performative = msg.get_metadata("performative")
protocol = msg.get_metadata("protocol")
if protocol == QUERY_PROTOCOL:
if performative == INFORM_PERFORMATIVE:
self.agent.stations = content
logger.info("Got list of current stations: {}".format(list(self.agent.stations.keys())))
elif performative == CANCEL_PERFORMATIVE:
logger.info("Cancellation of request for stations information.")
elif protocol == REQUEST_PROTOCOL:
logger.debug("Transport {} received request protocol from customer/station.".format(self.agent.name))
if performative == REQUEST_PERFORMATIVE:
if self.agent.status == TRANSPORT_WAITING:
if not self.has_enough_autonomy(content["origin"], content["dest"]):
await self.cancel_proposal(content["customer_id"])
self.agent.status = TRANSPORT_NEEDS_CHARGING
else:
await self.send_proposal(content["customer_id"], {})
self.agent.status = TRANSPORT_WAITING_FOR_APPROVAL
elif performative == ACCEPT_PERFORMATIVE:
if self.agent.status == TRANSPORT_WAITING_FOR_APPROVAL:
logger.debug("Transport {} got accept from {}".format(self.agent.name,
content["customer_id"]))
try:
self.agent.status = TRANSPORT_MOVING_TO_CUSTOMER
await self.pick_up_customer(content["customer_id"], content["origin"], content["dest"])
except PathRequestException:
logger.error("Transport {} could not get a path to customer {}. Cancelling..."
.format(self.agent.name, content["customer_id"]))
self.agent.status = TRANSPORT_WAITING
await self.cancel_proposal(content["customer_id"])
except Exception as e:
logger.error("Unexpected error in transport {}: {}".format(self.agent.name, e))
await self.cancel_proposal(content["customer_id"])
self.agent.status = TRANSPORT_WAITING
else:
await self.cancel_proposal(content["customer_id"])
elif performative == REFUSE_PERFORMATIVE:
logger.debug("Transport {} got refusal from customer/station".format(self.agent.name))
self.agent.status = TRANSPORT_WAITING
elif performative == INFORM_PERFORMATIVE:
if self.agent.status == TRANSPORT_WAITING_FOR_STATION_APPROVAL:
logger.info("Transport {} got accept from station {}".format(self.agent.name,
content["station_id"]))
try:
self.agent.status = TRANSPORT_MOVING_TO_STATION
await self.send_confirmation_travel(content["station_id"])
await self.go_to_the_station(content["station_id"], content["dest"])
except PathRequestException:
logger.error("Transport {} could not get a path to station {}. Cancelling..."
.format(self.agent.name, content["station_id"]))
self.agent.status = TRANSPORT_WAITING
await self.cancel_proposal(content["station_id"])
except Exception as e:
logger.error("Unexpected error in transport {}: {}".format(self.agent.name, e))
await self.cancel_proposal(content["station_id"])
self.agent.status = TRANSPORT_WAITING
elif self.agent.status == TRANSPORT_CHARGING:
if content["status"] == TRANSPORT_CHARGED:
self.agent.transport_charged()
await self.agent.drop_station()
elif performative == CANCEL_PERFORMATIVE:
logger.info("Cancellation of request for {} information".format(self.agent.fleet_type))
################################################################
# #
# Customer Strategy #
# #
################################################################
class AcceptFirstRequestBehaviour(CustomerStrategyBehaviour):
"""
The default strategy for the Customer agent. By default it accepts the first proposal it receives.
"""
async def run(self):
if self.agent.fleetmanagers is None:
await self.send_get_managers(self.agent.fleet_type)
msg = await self.receive(timeout=5)
if msg:
performative = msg.get_metadata("performative")
if performative == INFORM_PERFORMATIVE:
self.agent.fleetmanagers = json.loads(msg.body)
return
elif performative == CANCEL_PERFORMATIVE:
logger.info("Cancellation of request for {} information".format(self.agent.type_service))
return
if self.agent.status == CUSTOMER_WAITING:
await self.send_request(content={})
msg = await self.receive(timeout=5)
if msg:
performative = msg.get_metadata("performative")
transport_id = msg.sender
if performative == PROPOSE_PERFORMATIVE:
if self.agent.status == CUSTOMER_WAITING:
logger.debug(
"Customer {} received proposal from transport {}".format(self.agent.name, transport_id))
await self.accept_transport(transport_id)
self.agent.status = CUSTOMER_ASSIGNED
else:
await self.refuse_transport(transport_id)
elif performative == CANCEL_PERFORMATIVE:
if self.agent.transport_assigned == str(transport_id):
logger.warning(
"Customer {} received a CANCEL from Transport {}.".format(self.agent.name, transport_id))
self.agent.status = CUSTOMER_WAITING
```
#### File: TIA/lab_ga/ga_openai.py
```python
import pickle
from joblib import Parallel, delayed
import multiprocessing
import gym
import numpy as np
import torch
import matplotlib.pyplot as plt
import time
from gym.wrappers import Monitor
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import math
import copy
import random
from tqdm import tqdm
import pdb
num_cores = multiprocessing.cpu_count()
enviorment = "CartPole-v1"
game_actions = 2 # 2 actions possible: left or right
torch.set_grad_enabled(False) # disable gradients as we will not use them
num_agents = 200
top_limit = 20
generations = 1000
class CartPoleAI(nn.Module):
def __init__(self, num_inputs, num_actions):
super().__init__()
# self.fc = nn.Sequential(
# nn.Linear(4, 128, bias=True), nn.ReLU(), nn.Linear(128, 2, bias=True), nn.Softmax(dim=1)
# )
self.fc = nn.Sequential(
nn.Linear(num_inputs, 128, bias=True),
nn.ReLU(),
nn.Linear(128, num_actions, bias=True),
nn.Softmax(dim=1)
# )
)
def forward(self, inputs):
x = self.fc(inputs)
return x
def init_weights(m):
if (isinstance(m, nn.Linear)) | (isinstance(m, nn.Conv2d)):
torch.nn.init.xavier_uniform(m.weight)
m.bias.data.fill_(0.00)
def return_random_agents(num_agents):
agents = []
for _ in range(num_agents):
agent = CartPoleAI(4, 2)
for param in agent.parameters():
param.requires_grad = False
init_weights(agent)
agents.append(agent)
return agents
def run_agents(agents):
reward_agents = []
env = gym.make(enviorment)
for agent in agents:
agent.eval()
observation = env.reset()
r = 0
s = 0
for _ in range(250):
inp = torch.tensor(observation).type("torch.FloatTensor").view(1, -1)
output_probabilities = agent(inp).detach().numpy()[0]
action = np.random.choice(range(game_actions), 1, p=output_probabilities).item()
new_observation, reward, done, info = env.step(action)
r = r + reward
s = s + 1
observation = new_observation
if done:
break
reward_agents.append(r)
return reward_agents
def return_average_score(agent, runs):
score = 0.0
for i in range(runs):
score += run_agents([agent])[0]
return score / runs
def run_agents_n_times(agents, runs):
agents_avg_scores = Parallel(n_jobs=num_cores)(
delayed(return_average_score)(i, runs) for i in agents # tqdm(agents, leave=False)
)
return agents_avg_scores
def mutate(agent):
child_agent = copy.deepcopy(agent)
mutation_power = 0.02 # hyper-parameter, set from https://arxiv.org/pdf/1712.06567.pdf
for param in child_agent.parameters():
param.data += mutation_power * torch.randn_like(param)
return child_agent
def selection_ruleta(agents, fitness_list, k=-1):
if k < 0:
k = int(0.8 * len(agents))
normalized_fitness = [float(i) / sum(fitness_list) for i in fitness_list]
selection = random.choices(population=agents, weights=normalized_fitness, k=k)
return selection
def selection_top(agents, fitness_list, k=-1):
if k < 0:
k = int(0.6 * len(agents)) # default
# print("K IS", k)
sorted_parent_indexes = np.argsort(fitness_list)[::-1][:k]
top_agents = [agents[best_parent] for best_parent in sorted_parent_indexes]
selection = top_agents
return selection
def join_cross_new(parents, k=-1):
children = []
for parent1, parent2 in zip(parents[0::2], parents[1::2]):
copy_parent1 = copy.deepcopy(parent1)
copy_parent2 = copy.deepcopy(parent2)
total = len(list(copy_parent1.parameters()))
i = 0
for param1, param2 in zip(copy_parent1.parameters(), copy_parent2.parameters()):
if i < total / 2:
param1.data = param1.data * 1
param2.data = param2.data * 1
else:
param1.data = param2.data * 1
param2.data = param1.data * 1
i += 1
children.append(copy_parent1)
children.append(copy_parent2)
return children
def join_cross_old(parents):
children = []
for parent1, parent2 in zip(parents[0::2], parents[1::2]):
copy_parent1 = copy.deepcopy(parent1)
copy_parent2 = copy.deepcopy(parent2)
total = len(list(copy_parent1.parameters()))
i = 0
for param1, param2 in zip(copy_parent1.parameters(), copy_parent2.parameters()):
if i < total / 2:
param1.data = param1.data * 1
else:
param1.data = param2.data * 1
i += 1
children.append(copy_parent1)
children.append(copy_parent2)
return children
def not_join(parents):
children = parents
return children
def replace_generational(agents, fitness_list, children_agents):
# New generation is made of the new children solutions, we duplicate them until we have same number as previous generation.
return random.choices(children_agents, k=len(agents))
def replace_state_stationary(agents, fitness_list, children_agents):
# We pick the best solutions of previous generations to add to our children solutions.
sorted_parents_indexes = np.argsort(fitness_list)[::-1]
sorted_parents = [agents[best_parent] for best_parent in sorted_parents_indexes]
selection = sorted_parents[: len(agents) - len(children_agents)] + children_agents
return selection
def doom_day(agents, fitness_list, children_agents):
# Only the best solution of the children solutions stays and we add new fresh random solutions
fitness = run_agents_n_times(children_agents, 3)
sorted_agents_indexes = np.argsort(fitness)[::-1]
selection = [children_agents[sorted_agents_indexes[0]]]
selection = selection + return_random_agents(len(agents) - 1)
return selection
def select_agents(agents, fitness_list, mode="top", k=-1):
if mode == "top":
return selection_top(agents, fitness_list, k)
elif mode == "ruleta":
return selection_ruleta(agents, fitness_list, k)
else:
assert 1 == 0, "Mode not supported"
def join(parents, mode="cross"):
if mode == "cross":
return join_cross_new(parents)
elif mode == "none":
return not_join(parents)
elif mode == "cross-old":
return join_cross_old(parents)
else:
assert 1 == 0, "Mode not supported"
def replace(agents, fitness_list, children_agents, replace_mode):
if replace_mode == "generational":
return replace_generational(agents, fitness_list, children_agents)
elif replace_mode == "state":
return replace_state_stationary(agents, fitness_list, children_agents)
elif replace_mode == "doom-day":
return doom_day(agents, fitness_list, children_agents)
return children_agents
def return_children(
agents,
fitness_list,
selection_mode,
join_mode,
replace_mode,
k=-1,
):
children_agents = []
# Seleccionamos soluciones
selected_parents = select_agents(agents, fitness_list, selection_mode, k)
# Cuzamos los padres dado el metodo que hayamos elegido
children_agents = join(selected_parents, join_mode)
# Mutamos los hijos
children_agents = Parallel(n_jobs=num_cores)(delayed(mutate)(i) for i in children_agents)
# Reemplazo
children_agents = replace(agents, fitness_list, children_agents, replace_mode)
# Sanity check
assert len(agents) == len(children_agents), "Error in genetic loop, missmatch in populations length"
return children_agents
def play_agent(agent):
try: # try and exception block because, render hangs if an erorr occurs, we must do env.close to continue working
env = gym.make(enviorment)
env_record = Monitor(env, "./video", force=True)
observation = env_record.reset()
last_observation = observation
r = 0
for _ in range(250):
env_record.render()
inp = torch.tensor(observation).type("torch.FloatTensor").view(1, -1)
output_probabilities = agent(inp).detach().numpy()[0]
action = np.random.choice(range(game_actions), 1, p=output_probabilities).item()
new_observation, reward, done, info = env_record.step(action)
r = r + reward
observation = new_observation
if done:
break
env_record.close()
print("Rewards: ", r)
except Exception as e:
env_record.close()
def main():
selection_modes = ["ruleta", "top"]
join_modes = ["cross", "none", "cross-old"]
replace_modes = ["generational", "state", "doom-day"]
num_total_agents = [
5000,
20,
50,
100,
200,
500,
1000,
]
# reversed(num_total_agents)
for num_total_agent in num_total_agents:
results = {}
num_agents = num_total_agent
agents = return_random_agents(num_agents)
for replace_mode in replace_modes:
for selection_mode in selection_modes:
for join_mode in join_modes:
agents = return_random_agents(num_agents)
mean_fitness_history = []
# pbar = tqdm(range(generations), leave=False)
pbar = tqdm()
for generation in range(generations):
if replace_mode == "doom-day":
break
fitness = run_agents_n_times(agents, 3)
sorted_parent_indexes = np.argsort(fitness)[::-1][:top_limit]
top_fitness = [fitness[best_parent] for best_parent in sorted_parent_indexes]
mean_fitness_history.append(np.mean(top_fitness[:5]))
if (np.mean(top_fitness[:5])) > 249:
pbar.set_description(
f"Selec-{selection_mode} join {join_mode} replace-{replace_mode} total_agents {num_total_agent} | Gen {generation} - Top5fitness:{np.mean(top_fitness[:5]):.2f} - Mean fitness:{np.mean(fitness):.2f} SOLVED"
)
pbar.close()
break
elif generation == 150 and np.mean(fitness) < 25:
print("Not converging, early abort")
pbar.close()
break
pbar.set_description(
f"Selec-{selection_mode} join {join_mode} replace-{replace_mode} total_agents {num_total_agent} | Gen {generation} - Top5fitness:{np.mean(top_fitness[:5]):.2f} - Mean fitness:{np.mean(fitness):.2f}"
)
# setup an empty list for containing children agents
children_agents = return_children(
agents=agents,
fitness_list=fitness,
selection_mode=selection_mode,
join_mode=join_mode,
replace_mode=replace_mode,
)
# kill all agents, and replace them with their children
agents = children_agents
results[(selection_mode, join_mode, replace_mode)] = mean_fitness_history
with open(f"results/results_ga_agentsperpop{num_agents}.pickle", "wb") as handle:
pickle.dump(results, handle, protocol=pickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
main()
``` |
{
"source": "jiwidi/Salesforce-Predictive-Modelling",
"score": 3
} |
#### File: Salesforce-Predictive-Modelling/src/Deep neural network.py
```python
import pandas as pd
import numpy as np
import tensorflow as tf
import csv
LEARNING_RATE = 0.001
DROPOUT = 0.1
EPOCH = 10000
TRAIN = pd.read_csv('../data/train.csv')
DEV = pd.read_csv('../data/dev.csv')
TARGET = []
TARGET_TRAIN = TRAIN["Poder_Adquisitivo"].as_matrix()
TARGET_DEV = DEV["Poder_Adquisitivo"].as_matrix()
TRAIN = TRAIN.drop("ID_Customer", axis=1).drop("Poder_Adquisitivo", axis=1)
DEV = DEV.drop("ID_Customer", axis=1).drop("Poder_Adquisitivo", axis=1)
categorical = ["Socio_Demo_01", "Socio_Demo_02", "Socio_Demo_03", "Socio_Demo_04", "Socio_Demo_05"]
for l in categorical:
TRAIN = TRAIN.drop(l, axis=1)
DEV = DEV.drop(l, axis=1)
def main():
# Deep Neural Network Regressor with the training set which contain the data split by train test split
prepro()
regressor = model(EPOCH,DROPOUT,LEARNING_RATE)
regressor.fit(input_fn=lambda: input_fn_train(), steps=EPOCH)
# Evaluation on the test set created by train_test_split
ev = regressor.evaluate(input_fn=lambda: input_fn_eval(), steps=1)
loss_score1 = ev["loss"]
print('E'+str(EPOCH)+'-D'+str(DROPOUT)+'-L'+str(LEARNING_RATE)+" Final Loss on the testing set: {0:f}".format(loss_score1))
def model(EPOCH,DROPOUT,LEARNING_RATE):
feature_cols = [tf.contrib.layers.real_valued_column("", dimension=2)]
# Model
tf.logging.set_verbosity(tf.logging.ERROR)
regressor = tf.contrib.learn.DNNRegressor(feature_columns=feature_cols,
model_dir='model\model-E'+str(EPOCH)+'-D'+str(DROPOUT)+'-L'+str(LEARNING_RATE),
activation_fn=tf.nn.relu,
hidden_units=[200, 100, 50, 25, 12],
dropout=DROPOUT,
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=LEARNING_RATE,
l1_regularization_strength=0.001
)
)
return regressor
def prepro():
global TRAIN
global TEST
# Input builders
def input_fn_train(): # returns x, y
return tf.convert_to_tensor(np.array(TRAIN)),tf.convert_to_tensor(np.array(TARGET_TRAIN))
def input_fn_eval(): # returns x, y
return tf.convert_to_tensor(np.array(DEV)), tf.convert_to_tensor(np.array(TARGET_DEV))
#metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)
def input_fn_predict(): # returns x, None
return tf.convert_to_tensor(np.array(TEST))
main()
``` |
{
"source": "jiwon1219/couchers",
"score": 2
} |
#### File: couchers/servicers/bugs.py
```python
import grpc
import requests
from couchers.config import config
from couchers.db import session_scope
from couchers.models import User
from pb import bugs_pb2, bugs_pb2_grpc
class Bugs(bugs_pb2_grpc.BugsServicer):
def _version(self):
return config["VERSION"]
def Version(self, request, context):
return bugs_pb2.VersionInfo(version=self._version())
def ReportBug(self, request, context):
if not config["BUG_TOOL_ENABLED"]:
context.abort(grpc.StatusCode.UNAVAILABLE, "Bug tool disabled")
repo = config["BUG_TOOL_GITHUB_REPO"]
auth = (config["BUG_TOOL_GITHUB_USERNAME"], config["BUG_TOOL_GITHUB_TOKEN"])
with session_scope() as session:
username = session.query(User.username).filter(User.id == request.user_id).scalar() or "<unknown>"
issue_title = request.subject
issue_body = (
f"Subject: {request.subject}\n"
f"Description:\n"
f"{request.description}\n"
f"\n"
f"Steps:\n"
f"{request.steps}\n"
f"\n"
f"Results:\n"
f"{request.results}\n"
f"\n"
f"Backend version: {self._version()}\n"
f"Frontend version: {request.frontend_version}\n"
f"User Agent: {request.user_agent}\n"
f"Page: {request.page}\n"
f"User (spoofable): {username} ({request.user_id})"
)
issue_labels = ["bug tool"]
json_body = {"title": issue_title, "body": issue_body, "labels": issue_labels}
r = requests.post(f"https://api.github.com/repos/{repo}/issues", auth=auth, json=json_body)
if not r.status_code == 201:
context.abort(grpc.StatusCode.INTERNAL, "Request failed")
report_identifier = f'#{r.json()["number"]}'
return bugs_pb2.ReportBugRes(report_identifier=report_identifier)
``` |
{
"source": "JiwonCocoder/fine_tuning_resnet50",
"score": 3
} |
#### File: fine_tuning_resnet50/datasets/dataset.py
```python
import pdb
from torchvision import datasets, transforms
from torch.utils.data import Dataset
from .data_utils import get_onehot
from .augmentation.randaugment import RandAugment
from PIL import Image
import numpy as np
import copy
class BasicDataset(Dataset):
"""
BasicDataset returns a pair of image and labels (targets).
If targets are not given, BasicDataset returns None as the label.
This class supports strong augmentation for Fixmatch,
and return both weakly and strongly augmented images.
"""
def __init__(self,
data,
targets=None,
num_classes=None,
transform=None,
use_strong_transform=False,
strong_transform=None,
onehot=False,
*args, **kwargs):
"""
Args
data: x_data
targets: y_data (if not exist, None)
num_classes: number of label classes
transform: basic transformation of data
use_strong_transform: If True, this dataset returns both weakly and strongly augmented images.
strong_transform: list of transformation functions for strong augmentation
onehot: If True, label is converted into onehot vector.
"""
super(BasicDataset, self).__init__()
self.data = data
self.targets = targets
self.num_classes = num_classes
self.use_strong_transform = use_strong_transform
self.onehot = onehot
self.transform = transform
if use_strong_transform:
if strong_transform is None:
self.strong_transform = copy.deepcopy(transform)
color_jitter = transforms.ColorJitter(contrast=(0.3, 0.7), hue=0.3, saturation=(0.5, 1.5))
self.strong_transform.transforms.insert(0, transforms.RandomApply([color_jitter], p=0.5))
self.strong_transform.transforms.insert(0, transforms.RandomApply([transforms.Grayscale(num_output_channels=3)], p=0.2))
# self.strong_transform.transforms.insert(0, transforms.RandomApply([transforms.RandomResizedCrop(size=(128, 200),ratio=(1,1.1))], p=0.5)),
self.strong_transform.transforms.insert(0, transforms.RandomApply([transforms.RandomRotation(2)], p=0.5))
else:
self.strong_transform = strong_transform
def __getitem__(self, idx):
"""
If strong augmentation is not used,
return weak_augment_image, target
else:
return weak_augment_image, strong_augment_image, target
"""
#set idx-th target
if self.targets is None:
target = None
else:
target_ = self.targets[idx]
target = target_ if not self.onehot else get_onehot(self.num_classes, target_)
#set augmented images
img = self.data[idx]
if self.transform is None:
return transforms.ToTensor()(img), target
else:
if isinstance(img, np.ndarray):
img = Image.fromarray(img)
img_w = self.transform(img)
if not self.use_strong_transform:
return img_w, target
else:
return img_w, self.strong_transform(img), target
def __len__(self):
return len(self.data)
``` |
{
"source": "JiwonCocoder/-Joint-Learning-of-Feature-Extraction-and-Cost-Aggregation-for-Semantic-Matching",
"score": 2
} |
#### File: -Joint-Learning-of-Feature-Extraction-and-Cost-Aggregation-for-Semantic-Matching/lib/matching_model.py
```python
import torch
import torch.nn as nn
import numpy as np
from torch.autograd import Variable
import torch.nn.functional as F
def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1, batch_norm=False):
if batch_norm:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=True),
nn.BatchNorm2d(out_planes),
nn.LeakyReLU(0.1, inplace=True))
else:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=True),
nn.LeakyReLU(0.1))
def predict_flow(in_planes):
return nn.Conv2d(in_planes,2,kernel_size=3,stride=1,padding=1,bias=True)
def deconv(in_planes, out_planes, kernel_size=4, stride=2, padding=1):
return nn.ConvTranspose2d(in_planes, out_planes, kernel_size, stride, padding, bias=True)
def NormMap2D_to_unNormMap2D(NormMap2D):
B, C, H, W = NormMap2D.size()
mapping = torch.zeros_like(NormMap2D)
# mesh grid
mapping[:,0,:,:] = (NormMap2D[:, 0, :, :].float().clone() + 1) * (W - 1) / 2.0 # unormalise
mapping[:,1,:,:] = (NormMap2D[:, 1, :, :].float().clone() + 1) * (H - 1) / 2.0 # unormalise
idx = mapping[:, 0, :, :] + mapping[:,1,:,:] * W
idx = idx.type(torch.cuda.LongTensor)
return idx
#from normalized mapping to unnormalised flow
def unnormalise_and_convert_mapping_to_flow(map):
# here map is normalised to -1;1
# we put it back to 0,W-1, then convert it to flow
B, C, H, W = map.size()
mapping = torch.zeros_like(map)
# mesh grid
mapping[:,0,:,:] = (map[:, 0, :, :].float().clone() + 1) * (W - 1) / 2.0 # unormalise
mapping[:,1,:,:] = (map[:, 1, :, :].float().clone() + 1) * (H - 1) / 2.0 # unormalise
# print("map(normalized)")
# print(map[:, 0, 3, 5])
# print("mapping(unnormalized)")
# print(mapping[:, 0, 3, 5])
xx = torch.arange(0, W).view(1,-1).repeat(H,1)
yy = torch.arange(0, H).view(-1,1).repeat(1,W)
xx = xx.view(1,1,H,W).repeat(B,1,1,1)
yy = yy.view(1,1,H,W).repeat(B,1,1,1)
grid = torch.cat((xx,yy),1).float()
if mapping.is_cuda:
grid = grid.cuda()
flow = mapping - grid
return flow
def unnormalise_and_convert_mapping_to_flow_and_grid(map):
# here map is normalised to -1;1
# we put it back to 0,W-1, then convert it to flow
B, C, H, W = map.size()
mapping = torch.zeros_like(map)
# mesh grid
mapping[:,0,:,:] = (map[:, 0, :, :].float().clone() + 1) * (W - 1) / 2.0 # unormalise
mapping[:,1,:,:] = (map[:, 1, :, :].float().clone() + 1) * (H - 1) / 2.0 # unormalise
# print("map(normalized)")
# print(map[:, 0, 3, 5])
# print("mapping(unnormalized)")
# print(mapping[:, 0, 3, 5])
xx = torch.arange(0, W).view(1,-1).repeat(H,1)
yy = torch.arange(0, H).view(-1,1).repeat(1,W)
xx = xx.view(1,1,H,W).repeat(B,1,1,1)
yy = yy.view(1,1,H,W).repeat(B,1,1,1)
grid = torch.cat((xx,yy),1).float()
if mapping.is_cuda:
grid = grid.cuda()
flow = mapping - grid
return flow, grid
class CorrelationVolume(nn.Module):
"""
Implementation by <NAME>
paper: https://arxiv.org/abs/1703.05593
project: https://github.com/ignacio-rocco/cnngeometric_pytorch
"""
def __init__(self):
super(CorrelationVolume, self).__init__()
def forward(self, feature_A, feature_B):
b, c, h, w = feature_A.size()
# reshape features for matrix multiplication
feature_A = feature_A.transpose(2, 3).contiguous().view(b, c, h * w) # shape (b,c,h*w)
# feature_A = feature_A.view(b, c, h*w).transpose(1,2)
feature_B = feature_B.view(b, c, h * w).transpose(1, 2) # shape (b,h*w,c)
feature_mul = torch.bmm(feature_B, feature_A) # shape (b,h*w,h*w)
correlation_tensor = feature_mul.view(b, h, w, h * w).transpose(2, 3).transpose(1, 2)
# correlation_numpy = correlation_tensor.detach().cpu().numpy()
return correlation_tensor # shape (b,h*w,h,w)
class FeatureL2Norm(nn.Module):
"""
Implementation by <NAME>
paper: https://arxiv.org/abs/1703.05593
project: https://github.com/ignacio-rocco/cnngeometric_pytorch
"""
def __init__(self):
super(FeatureL2Norm, self).__init__()
def forward(self, feature):
epsilon = 1e-6
norm = torch.pow(torch.sum(torch.pow(feature, 2), 1) + epsilon, 0.5).unsqueeze(1).expand_as(feature)
return torch.div(feature, norm)
class OpticalFlowEstimator(nn.Module):
def __init__(self, in_channels, batch_norm):
super(OpticalFlowEstimator, self).__init__()
dd = np.cumsum([128,128,96,64,32])
self.conv_0 = conv(in_channels, 128, kernel_size=3, stride=1, batch_norm=batch_norm)
self.conv_1 = conv(in_channels + dd[0], 128, kernel_size=3, stride=1, batch_norm=batch_norm)
self.conv_2 = conv(in_channels + dd[1], 96, kernel_size=3, stride=1, batch_norm=batch_norm)
self.conv_3 = conv(in_channels + dd[2], 64, kernel_size=3, stride=1, batch_norm=batch_norm)
self.conv_4 = conv(in_channels + dd[3], 32, kernel_size=3, stride=1, batch_norm=batch_norm)
self.predict_flow = predict_flow(in_channels + dd[4])
def forward(self, x):
# dense net connection
x = torch.cat((self.conv_0(x), x),1)
x = torch.cat((self.conv_1(x), x),1)
x = torch.cat((self.conv_2(x), x),1)
x = torch.cat((self.conv_3(x), x),1)
x = torch.cat((self.conv_4(x), x),1)
flow = self.predict_flow(x)
return x, flow
class OpticalFlowEstimatorNoDenseConnection(nn.Module):
def __init__(self, in_channels, batch_norm):
super(OpticalFlowEstimatorNoDenseConnection, self).__init__()
self.conv_0 = conv(in_channels, 128, kernel_size=3, stride=1, batch_norm=batch_norm)
self.conv_1 = conv(128, 128, kernel_size=3, stride=1, batch_norm=batch_norm)
self.conv_2 = conv(128, 96, kernel_size=3, stride=1, batch_norm=batch_norm)
self.conv_3 = conv(96, 64, kernel_size=3, stride=1, batch_norm=batch_norm)
self.conv_4 = conv(64, 32, kernel_size=3, stride=1, batch_norm=batch_norm)
self.predict_flow = predict_flow(32)
def forward(self, x):
x = self.conv_4(self.conv_3(self.conv_2(self.conv_1(self.conv_0(x)))))
flow = self.predict_flow(x)
return x, flow
# extracted from DGCNet
def conv_blck(in_channels, out_channels, kernel_size=3,
stride=1, padding=1, dilation=1, bn=False):
if bn:
return nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding, dilation),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True))
else:
return nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding, dilation),
nn.ReLU(inplace=True))
def conv_head(in_channels):
return nn.Conv2d(in_channels, 2, kernel_size=3, padding=1)
class CorrespondenceMapBase(nn.Module):
def __init__(self, in_channels, bn=False):
super().__init__()
def forward(self, x1, x2=None, x3=None):
x = x1
# concatenating dimensions
if (x2 is not None) and (x3 is None):
x = torch.cat((x1, x2), 1)
elif (x2 is None) and (x3 is not None):
x = torch.cat((x1, x3), 1)
elif (x2 is not None) and (x3 is not None):
x = torch.cat((x1, x2, x3), 1)
return x
class CMDTop(CorrespondenceMapBase):
def __init__(self, in_channels, bn=False, use_cuda=False):
super().__init__(in_channels, bn)
chan = [128, 128, 96, 64, 32]
self.conv0 = conv_blck(in_channels, chan[0], bn=bn)
self.conv1 = conv_blck(chan[0], chan[1], bn=bn)
self.conv2 = conv_blck(chan[1], chan[2], bn=bn)
self.conv3 = conv_blck(chan[2], chan[3], bn=bn)
self.conv4 = conv_blck(chan[3], chan[4], bn=bn)
self.final = conv_head(chan[-1])
if use_cuda:
self.conv0.cuda()
self.conv1.cuda()
self.conv2.cuda()
self.conv3.cuda()
self.conv4.cuda()
self.final.cuda()
def forward(self, x1, x2=None, x3=None):
x = super().forward(x1, x2, x3)
x = self.conv4(self.conv3(self.conv2(self.conv1(self.conv0(x)))))
return self.final(x)
def warp(x, flo):
"""
warp an image/tensor (im2) back to im1, according to the optical flow
x: [B, C, H, W] (im2)
flo: [B, 2, H, W] flow
"""
B, C, H, W = x.size()
# mesh grid
xx = torch.arange(0, W).view(1, -1).repeat(H, 1)
yy = torch.arange(0, H).view(-1, 1).repeat(1, W)
xx = xx.view(1, 1, H, W).repeat(B, 1, 1, 1)
yy = yy.view(1, 1, H, W).repeat(B, 1, 1, 1)
grid = torch.cat((xx, yy), 1).float()
if x.is_cuda:
grid = grid.cuda()
vgrid = Variable(grid) + flo
# makes a mapping out of the flow
# scale grid to [-1,1]
vgrid[:, 0, :, :] = 2.0 * vgrid[:, 0, :, :].clone() / max(W - 1, 1) - 1.0
vgrid[:, 1, :, :] = 2.0 * vgrid[:, 1, :, :].clone() / max(H - 1, 1) - 1.0
vgrid = vgrid.permute(0, 2, 3, 1)
output = nn.functional.grid_sample(x, vgrid)
mask = torch.autograd.Variable(torch.ones(x.size())).cuda()
mask = nn.functional.grid_sample(mask, vgrid)
mask[mask < 0.9999] = 0
mask[mask > 0] = 1
return output * mask
# return output
def unNormMap1D_to_NormMap2D(idx_B_Avec, delta4d=None, k_size=1, do_softmax=False, scale='centered', return_indices=False,
invert_matching_direction=False):
to_cuda = lambda x: x.cuda() if idx_B_Avec.is_cuda else x
batch_size, sz = idx_B_Avec.shape
w = sz // 25
h = w
# fs2: width, fs1: height
if scale == 'centered':
XA, YA = np.meshgrid(np.linspace(-1, 1, w), np.linspace(-1, 1, h))
# XB, YB = np.meshgrid(np.linspace(-1, 1, w), np.linspace(-1, 1, h))
elif scale == 'positive':
XA, YA = np.meshgrid(np.linspace(0, 1, w), np.linspace(0, 1, h))
# XB, YB = np.meshgrid(np.linspace(0, 1, w), np.linspace(0, 1, h))
JA, IA = np.meshgrid(range(w), range(h))
# JB, IB = np.meshgrid(range(w), range(h))
XA, YA = Variable(to_cuda(torch.FloatTensor(XA))), Variable(to_cuda(torch.FloatTensor(YA)))
# XB, YB = Variable(to_cuda(torch.FloatTensor(XB))), Variable(to_cuda(torch.FloatTensor(YB)))
JA, IA = Variable(to_cuda(torch.LongTensor(JA).contiguous().view(1, -1))), Variable(to_cuda(torch.LongTensor(IA).contiguous().view(1, -1)))
# JB, IB = Variable(to_cuda(torch.LongTensor(JB).view(1, -1))), Variable(to_cuda(torch.LongTensor(IB).view(1, -1)))
iA = IA.contiguous().view(-1)[idx_B_Avec.contiguous().view(-1)].contiguous().view(batch_size, -1)
jA = JA.contiguous().view(-1)[idx_B_Avec.contiguous().view(-1)].contiguous().view(batch_size, -1)
# iB = IB.expand_as(iA)
# jB = JB.expand_as(jA)
xA=XA[iA.contiguous().view(-1),jA.contiguous().view(-1)].contiguous().view(batch_size,-1)
yA=YA[iA.contiguous().view(-1),jA.contiguous().view(-1)].contiguous().view(batch_size,-1)
# xB=XB[iB.view(-1),jB.view(-1)].view(batch_size,-1)
# yB=YB[iB.view(-1),jB.view(-1)].view(batch_size,-1)
xA_WTA = xA.contiguous().view(batch_size, 1, h, w)
yA_WTA = yA.contiguous().view(batch_size, 1, h, w)
Map2D_WTA = torch.cat((xA_WTA, yA_WTA), 1).float()
return Map2D_WTA
def unNormMap1D_to_NormMap2D_inLoc(idx_B_Avec,h,w, delta4d=None, k_size=1, do_softmax=False, scale='centered', return_indices=False,
invert_matching_direction=False):
to_cuda = lambda x: x.cuda() if idx_B_Avec.is_cuda else x
batch_size, sz = idx_B_Avec.shape
# fs2: width, fs1: height
if scale == 'centered':
XA, YA = np.meshgrid(np.linspace(-1, 1, w), np.linspace(-1, 1, h))
# XB, YB = np.meshgrid(np.linspace(-1, 1, w), np.linspace(-1, 1, h))
elif scale == 'positive':
XA, YA = np.meshgrid(np.linspace(0, 1, w), np.linspace(0, 1, h))
# XB, YB = np.meshgrid(np.linspace(0, 1, w), np.linspace(0, 1, h))
JA, IA = np.meshgrid(range(w), range(h))
# JB, IB = np.meshgrid(range(w), range(h))
XA, YA = Variable(to_cuda(torch.FloatTensor(XA))), Variable(to_cuda(torch.FloatTensor(YA)))
# XB, YB = Variable(to_cuda(torch.FloatTensor(XB))), Variable(to_cuda(torch.FloatTensor(YB)))
JA, IA = Variable(to_cuda(torch.LongTensor(JA).contiguous().view(1, -1))), Variable(to_cuda(torch.LongTensor(IA).contiguous().view(1, -1)))
# JB, IB = Variable(to_cuda(torch.LongTensor(JB).view(1, -1))), Variable(to_cuda(torch.LongTensor(IB).view(1, -1)))
iA = IA.contiguous().view(-1)[idx_B_Avec.contiguous().view(-1)].contiguous().view(batch_size, -1)
jA = JA.contiguous().view(-1)[idx_B_Avec.contiguous().view(-1)].contiguous().view(batch_size, -1)
# iB = IB.expand_as(iA)
# jB = JB.expand_as(jA)
xA=XA[iA.contiguous().view(-1),jA.contiguous().view(-1)].contiguous().view(batch_size,-1)
yA=YA[iA.contiguous().view(-1),jA.contiguous().view(-1)].contiguous().view(batch_size,-1)
# xB=XB[iB.view(-1),jB.view(-1)].view(batch_size,-1)
# yB=YB[iB.view(-1),jB.view(-1)].view(batch_size,-1)
xA_WTA = xA.contiguous().view(batch_size, 1, h, w)
yA_WTA = yA.contiguous().view(batch_size, 1, h, w)
Map2D_WTA = torch.cat((xA_WTA, yA_WTA), 1).float()
return Map2D_WTA
def warp_from_NormMap2D(x, NormMap2D):
"""
warp an image/tensor (im2) back to im1, according to the optical flow
x: [B, C, H, W] (im2)
flo: [B, 2, H, W] flow
"""
B, C, H, W = x.size()
# mesh grid
vgrid = NormMap2D.permute(0, 2, 3, 1).contiguous()
output = nn.functional.grid_sample(x, vgrid, align_corners=True) #N,C,H,W
mask = torch.autograd.Variable(torch.ones(x.size())).cuda()
mask = nn.functional.grid_sample(mask, vgrid)
#
mask[mask < 0.9999] = 0
mask[mask > 0] = 1
return output*mask
# return output
def L1_loss(input_flow, target_flow):
L1 = torch.abs(input_flow-target_flow)
L1 = torch.sum(L1, 1)
return L1
def L1_charbonnier_loss(input_flow, target_flow, sparse=False, mean=True, sum=False):
batch_size = input_flow.size(0)
epsilon = 0.01
alpha = 0.4
L1 = L1_loss(input_flow, target_flow)
norm = torch.pow(L1 + epsilon, alpha)
if sparse:
# invalid flow is defined with both flow coordinates to be exactly 0
mask = (target_flow[:,0] == 0) & (target_flow[:,1] == 0)
norm = norm[~mask]
if mean:
return norm.mean()
elif sum:
return norm.sum()
else:
return norm.sum()/batch_size
def EPE(input_flow, target_flow, sparse=False, mean=True, sum=False):
EPE_map = torch.norm(target_flow - input_flow, 2, 1)
# input_flow_np = input_flow.detach().cpu().numpy()
batch_size = EPE_map.size(0)
if sparse:
# invalid flow is defined with both flow coordinates to be exactly 0
mask = (target_flow[:,0] == 0) & (target_flow[:,1] == 0)
EPE_map = EPE_map[~mask]
if mean:
return EPE_map.mean()
elif sum:
return EPE_map.sum()
else:
return EPE_map.sum()/batch_size
def EPE_mask(input_flow, target_flow, mask_num, sparse=False, mean=False, sum=False):
EPE_map = torch.norm(target_flow - input_flow, 2, 1)
# input_flow_np = input_flow.detach().cpu().numpy()
batch_size = EPE_map.size(0)
if sparse:
# invalid flow is defined with both flow coordinates to be exactly 0
mask = (target_flow[:, 0] == 0) & (target_flow[:, 1] == 0)
EPE_map = EPE_map[~mask]
if mean:
return EPE_map.mean()
elif sum:
return EPE_map.sum()
else:
return (EPE_map/ mask_num).sum() /batch_size
def multiscaleEPE(Map2D_WTA, Map2D_NET, mask, sparse=False, robust_L1_loss=False, mean=True, sum=False):
# b, _, h, w = output.size()
# if sparse:
# target_scaled = sparse_max_pool(target, (h, w))
#
# if mask is not None:
# mask = sparse_max_pool(mask.float().unsqueeze(1), (h, w))
# else:
# target_scaled = F.interpolate(target, (h, w), mode='bilinear')
if mask is not None:
mask = mask.cuda().detach().byte()
if robust_L1_loss:
if mask is not None:
return L1_charbonnier_loss(output * mask.float(), target_scaled * mask.float(), sparse, mean=mean, sum=False)
else:
return L1_charbonnier_loss(output, target_scaled, sparse, mean=mean, sum=False)
else:
if mask is not None:
eps = 1
src_num_fgnd = mask.sum(dim=3, keepdim=True).sum(dim=2, keepdim=True) + eps
return EPE_mask(Map2D_WTA * mask.float(), Map2D_NET * mask.float(), src_num_fgnd, sparse, mean=mean, sum=sum)
else:
return EPE(Map2D_WTA, Map2D_NET, sparse, mean=mean, sum=False)
def generate_NormMap2D_corr4d_WTA(corr4d):
batch_size, ch, fs1, fs2, fs3, fs4 = corr4d.size()
nc_B_Avec = corr4d.view(batch_size, fs1 * fs2, fs3, fs4) # [batch_idx,k_A,i_B,j_B]
nc_B_Avec = torch.nn.functional.softmax(nc_B_Avec, 1)
scores_B, index_B = torch.max(nc_B_Avec, dim=1)
index1D_B = index_B.view(batch_size, -1)
Map2D = unNormMap1D_to_NormMap2D(index1D_B) # (B,2,S,S)
return Map2D
def generate_mask(flow, flow_bw, occ_thresh):
output_sum = flow + flow_bw
output_sum = torch.sum(torch.pow(output_sum.permute(0, 2, 3, 1), 2), 3)
occ_bw = (output_sum > occ_thresh).float()
mask_bw = 1. - occ_bw
return mask_bw
def warp_with_mask(x, flo, masked_flow):
"""
warp an image/tensor (im2) back to im1, according to the optical flow
x: [B, C, H, W] (im2)
flo: [B, 2, H, W] flow
mask: [B, C, H, W]
"""
B, C, H, W = x.size()
# mesh grid
xx = torch.arange(0, W).view(1, -1).repeat(H, 1)
yy = torch.arange(0, H).view(-1, 1).repeat(1, W)
xx = xx.view(1, 1, H, W).repeat(B, 1, 1, 1)
yy = yy.view(1, 1, H, W).repeat(B, 1, 1, 1)
grid = torch.cat((xx, yy), 1).float()
if x.is_cuda:
grid = grid.cuda()
vgrid = Variable(grid) + flo
# makes a mapping out of the flow
# scale grid to [-1,1]
vgrid[:, 0, :, :] = 2.0 * vgrid[:, 0, :, :].clone() / max(W - 1, 1) - 1.0
vgrid[:, 1, :, :] = 2.0 * vgrid[:, 1, :, :].clone() / max(H - 1, 1) - 1.0
vgrid = vgrid.permute(0, 2, 3, 1)
output = nn.functional.grid_sample(x, vgrid)
mask = torch.autograd.Variable(torch.ones(x.size())).cuda()
mask = nn.functional.grid_sample(mask, vgrid)
mask[mask < 0.9999] = 0
mask[mask > 0] = 1
# output_img = output * mask
output_masked = output * masked_flow
return output_masked
```
#### File: -Joint-Learning-of-Feature-Extraction-and-Cost-Aggregation-for-Semantic-Matching/lib/point_tnf.py
```python
import torch
import torch.nn
from torch.autograd import Variable
import numpy as np
import pdb
def normalize_axis(x,L):
return (x-1-(L-1)/2)*2/(L-1)
def unnormalize_axis(x,L):
return x*(L-1)/2+1+(L-1)/2
def corr_to_matches(corr4d, delta4d=None, k_size=1, do_softmax=False, scale='centered', return_indices=False, invert_matching_direction=False):
to_cuda = lambda x: x.cuda() if corr4d.is_cuda else x
batch_size,ch,fs1,fs2,fs3,fs4 = corr4d.size()
if scale=='centered':
XA,YA=np.meshgrid(np.linspace(-1,1,fs2*k_size),np.linspace(-1,1,fs1*k_size))
XB,YB=np.meshgrid(np.linspace(-1,1,fs4*k_size),np.linspace(-1,1,fs3*k_size))
elif scale=='positive':
XA,YA=np.meshgrid(np.linspace(0,1,fs2*k_size),np.linspace(0,1,fs1*k_size))
XB,YB=np.meshgrid(np.linspace(0,1,fs4*k_size),np.linspace(0,1,fs3*k_size))
JA,IA=np.meshgrid(range(fs2),range(fs1))
JB,IB=np.meshgrid(range(fs4),range(fs3))
XA,YA=Variable(to_cuda(torch.FloatTensor(XA))),Variable(to_cuda(torch.FloatTensor(YA)))
XB,YB=Variable(to_cuda(torch.FloatTensor(XB))),Variable(to_cuda(torch.FloatTensor(YB)))
JA,IA=Variable(to_cuda(torch.LongTensor(JA).view(1,-1))),Variable(to_cuda(torch.LongTensor(IA).view(1,-1)))
JB,IB=Variable(to_cuda(torch.LongTensor(JB).view(1,-1))),Variable(to_cuda(torch.LongTensor(IB).view(1,-1)))
if invert_matching_direction:
nc_A_Bvec=corr4d.view(batch_size,fs1,fs2,fs3*fs4)
if do_softmax:
nc_A_Bvec=torch.nn.functional.softmax(nc_A_Bvec,dim=3)
match_A_vals,idx_A_Bvec=torch.max(nc_A_Bvec,dim=3)
score=match_A_vals.view(batch_size,-1)
iB=IB.view(-1)[idx_A_Bvec.view(-1)].view(batch_size,-1)
jB=JB.view(-1)[idx_A_Bvec.view(-1)].view(batch_size,-1)
iA=IA.expand_as(iB)
jA=JA.expand_as(jB)
else:
nc_B_Avec=corr4d.view(batch_size,fs1*fs2,fs3,fs4) # [batch_idx,k_A,i_B,j_B]
if do_softmax:
nc_B_Avec=torch.nn.functional.softmax(nc_B_Avec,dim=1)
match_B_vals,idx_B_Avec=torch.max(nc_B_Avec,dim=1)
score=match_B_vals.view(batch_size,-1)
# IA_flatten = IA.view(-1)
# idx_B_Avec_flatten = idx_B_Avec.view(-1)
# iA_flatten = IA_flatten[idx_B_Avec_flatten]
# iA_flatten_test_index = idx_B_Avec_flatten[5:7]
# iA_faltten_test = IA_flatten[iA_flatten_test_index]
# iA_flatten_numpy = iA_flatten.cpu().numpy()
# iA_mine = iA_flatten.view(batch_size, -1)
iA=IA.view(-1)[idx_B_Avec.view(-1)].view(batch_size,-1)
jA=JA.view(-1)[idx_B_Avec.view(-1)].view(batch_size,-1)
iB=IB.expand_as(iA)
jB=JB.expand_as(jA)
if delta4d is not None: # relocalization
delta_iA,delta_jA,delta_iB,delta_jB = delta4d
diA=delta_iA.squeeze(0).squeeze(0)[iA.view(-1),jA.view(-1),iB.view(-1),jB.view(-1)]
djA=delta_jA.squeeze(0).squeeze(0)[iA.view(-1),jA.view(-1),iB.view(-1),jB.view(-1)]
diB=delta_iB.squeeze(0).squeeze(0)[iA.view(-1),jA.view(-1),iB.view(-1),jB.view(-1)]
djB=delta_jB.squeeze(0).squeeze(0)[iA.view(-1),jA.view(-1),iB.view(-1),jB.view(-1)]
iA=iA*k_size+diA.expand_as(iA)
jA=jA*k_size+djA.expand_as(jA)
iB=iB*k_size+diB.expand_as(iB)
jB=jB*k_size+djB.expand_as(jB)
# pdb.set_trace()
# iA = torch.LongTensor(iA).cuda()
# jA = torch.LongTensor(jA).cuda()
# iB = torch.LongTensor(iB).cuda()
# jB = torch.LongTensor(jB).cuda()
iA = iA.type(torch.long)
jA = jA.type(torch.long)
iB = iB.type(torch.long)
jB = jB.type(torch.long)
xA=XA[iA.view(-1),jA.view(-1)].view(batch_size,-1)
yA=YA[iA.view(-1),jA.view(-1)].view(batch_size,-1)
xB=XB[iB.view(-1),jB.view(-1)].view(batch_size,-1)
yB=YB[iB.view(-1),jB.view(-1)].view(batch_size,-1)
if return_indices:
return (xA,yA,xB,yB,score,iA,jA,iB,jB)
else:
return (xA,yA,xB,yB,score)
def nearestNeighPointTnf(matches,target_points_norm):
xA,yA,xB,yB=matches
# match target points to grid
deltaX=target_points_norm[:,0,:].unsqueeze(1)-xB.unsqueeze(2)
deltaY=target_points_norm[:,1,:].unsqueeze(1)-yB.unsqueeze(2)
distB=torch.sqrt(torch.pow(deltaX,2)+torch.pow(deltaY,2))
vals,idx=torch.min(distB,dim=1)
warped_points_x = xA.view(-1)[idx.view(-1)].view(1,1,-1)
warped_points_y = yA.view(-1)[idx.view(-1)].view(1,1,-1)
warped_points_norm = torch.cat((warped_points_x,warped_points_y),dim=1)
return warped_points_norm
def bilinearInterpPointTnf(matches,target_points_norm):
xA,yA,xB,yB=matches
feature_size=int(np.sqrt(xB.shape[-1]))
b,_,N=target_points_norm.size()
X_=xB.view(-1)
Y_=yB.view(-1)
grid = torch.FloatTensor(np.linspace(-1,1,feature_size)).unsqueeze(0).unsqueeze(2)
if xB.is_cuda:
grid=grid.cuda()
if isinstance(xB,Variable):
grid=Variable(grid)
x_minus = torch.sum(((target_points_norm[:,0,:]-grid)>0).long(),dim=1,keepdim=True)-1
x_minus[x_minus<0]=0 # fix edge case
x_plus = x_minus+1
y_minus = torch.sum(((target_points_norm[:,1,:]-grid)>0).long(),dim=1,keepdim=True)-1
y_minus[y_minus<0]=0 # fix edge case
y_plus = y_minus+1
toidx = lambda x,y,L: y*L+x
m_m_idx = toidx(x_minus,y_minus,feature_size)
p_p_idx = toidx(x_plus,y_plus,feature_size)
p_m_idx = toidx(x_plus,y_minus,feature_size)
m_p_idx = toidx(x_minus,y_plus,feature_size)
topoint = lambda idx, X, Y: torch.cat((X[idx.view(-1)].view(b,1,N).contiguous(),
Y[idx.view(-1)].view(b,1,N).contiguous()),dim=1)
P_m_m = topoint(m_m_idx,X_,Y_)
P_p_p = topoint(p_p_idx,X_,Y_)
P_p_m = topoint(p_m_idx,X_,Y_)
P_m_p = topoint(m_p_idx,X_,Y_)
multrows = lambda x: x[:,0,:]*x[:,1,:]
f_p_p=multrows(torch.abs(target_points_norm-P_m_m))
f_m_m=multrows(torch.abs(target_points_norm-P_p_p))
f_m_p=multrows(torch.abs(target_points_norm-P_p_m))
f_p_m=multrows(torch.abs(target_points_norm-P_m_p))
Q_m_m = topoint(m_m_idx,xA.view(-1),yA.view(-1))
Q_p_p = topoint(p_p_idx,xA.view(-1),yA.view(-1))
Q_p_m = topoint(p_m_idx,xA.view(-1),yA.view(-1))
Q_m_p = topoint(m_p_idx,xA.view(-1),yA.view(-1))
warped_points_norm = (Q_m_m*f_m_m+Q_p_p*f_p_p+Q_m_p*f_m_p+Q_p_m*f_p_m)/(f_p_p+f_m_m+f_m_p+f_p_m)
return warped_points_norm
def PointsToUnitCoords(P,im_size):
h,w = im_size[:,0],im_size[:,1]
P_norm = P.clone()
# normalize Y
P_norm[:,0,:] = normalize_axis(P[:,0,:],w.unsqueeze(1).expand_as(P[:,0,:]))
# normalize X
P_norm[:,1,:] = normalize_axis(P[:,1,:],h.unsqueeze(1).expand_as(P[:,1,:]))
return P_norm
def PointsToPixelCoords(P,im_size):
h,w = im_size[:,0],im_size[:,1]
P_norm = P.clone()
# normalize Y
P_norm[:,0,:] = unnormalize_axis(P[:,0,:],w.unsqueeze(1).expand_as(P[:,0,:]))
# normalize X
P_norm[:,1,:] = unnormalize_axis(P[:,1,:],h.unsqueeze(1).expand_as(P[:,1,:]))
return P_norm
``` |
{
"source": "JiwonCocoder/label_transformer",
"score": 2
} |
#### File: label_transformer/dataloader/domainnet.py
```python
from pathlib import Path
import wget
import zipfile
import argparse
from termcolor import cprint
import shutil
import numpy as np
from tqdm import tqdm
from PIL import Image
from torchvision import transforms as T
import sys
sys.path.append('.')
from util import data
from dataloader import SSLDataset, SupDataset
def download(root_dir):
root_dir = Path(root_dir)
domains = {
'clipart': ['http://csr.bu.edu/ftp/visda/2019/multi-source/groundtruth/clipart.zip',
'http://csr.bu.edu/ftp/visda/2019/multi-source/domainnet/txt/clipart_train.txt',
'http://csr.bu.edu/ftp/visda/2019/multi-source/domainnet/txt/clipart_test.txt'],
# 'infograph': ['http://csr.bu.edu/ftp/visda/2019/multi-source/infograph.zip',
# 'http://csr.bu.edu/ftp/visda/2019/multi-source/domainnet/txt/infograph_train.txt',
# 'http://csr.bu.edu/ftp/visda/2019/multi-source/domainnet/txt/infograph_test.txt'],
'painting': ['http://csr.bu.edu/ftp/visda/2019/multi-source/groundtruth/painting.zip',
'http://csr.bu.edu/ftp/visda/2019/multi-source/domainnet/txt/painting_train.txt',
'http://csr.bu.edu/ftp/visda/2019/multi-source/domainnet/txt/painting_test.txt'],
'quickdraw': ['http://csr.bu.edu/ftp/visda/2019/multi-source/quickdraw.zip',
'http://csr.bu.edu/ftp/visda/2019/multi-source/domainnet/txt/quickdraw_train.txt',
'http://csr.bu.edu/ftp/visda/2019/multi-source/domainnet/txt/quickdraw_test.txt'],
'real': ['http://csr.bu.edu/ftp/visda/2019/multi-source/real.zip',
'http://csr.bu.edu/ftp/visda/2019/multi-source/domainnet/txt/real_train.txt',
'http://csr.bu.edu/ftp/visda/2019/multi-source/domainnet/txt/real_test.txt'],
'sketch': ['http://csr.bu.edu/ftp/visda/2019/multi-source/sketch.zip',
'http://csr.bu.edu/ftp/visda/2019/multi-source/domainnet/txt/sketch_train.txt',
'http://csr.bu.edu/ftp/visda/2019/multi-source/domainnet/txt/sketch_test.txt']
}
for domain, urls in domains.items():
dst_dir = root_dir/domain
if dst_dir.exists(): shutil.rmtree(dst_dir)
dst_dir.mkdir(parents=True, exist_ok=True)
for url in urls:
cprint(f'Start downloading [{Path(url).name}]', color='blue', attrs=['bold'])
wget.download(url, str(dst_dir))
print()
if Path(url).suffix == '.zip':
file = dst_dir / Path(url).name
with zipfile.ZipFile(file, 'r') as f:
f.extractall(root_dir)
file.unlink()
def dataset_statistics(root_dir, domain):
root_dir = Path(root_dir)
xtrain, _ = read(root_dir/domain/f'{domain}_train.txt')
xtest, _ = read(root_dir/domain/f'{domain}_test.txt')
x = np.concatenate([xtrain, xtest])
count = 0
x_sum = np.zeros(3)
x_sqsum = np.zeros(3)
cprint('Start loading images...', color='blue', attrs=['bold'])
for i, xi in enumerate(tqdm(x)):
xi = np.asarray(read_img(root_dir/xi))
xi = np.transpose(xi, (2, 0, 1)) / 255.
xi = xi.reshape((3, -1))
count += xi.shape[1]
x_sum += np.sum(xi, axis=1)
x_sqsum += np.sum(xi**2, axis=1)
mean = x_sum/count
mean_str = np.array2string(mean, separator=', ', formatter={'float_kind': lambda x: f'{x:.8f}'})
print(f'mean = {mean_str}')
std = np.sqrt((x_sqsum - count*mean**2)/(count-1))
std_str = np.array2string(std, separator=', ', formatter={'float_kind': lambda x: f'{x:.8f}'})
print(f'std = {std_str}')
return mean, std
def read(file):
x, y = [], []
with open(file, 'r') as f:
for _, line in enumerate(f):
xi, yi = line.strip().split()
x.append(xi)
y.append(int(yi))
return np.array(x), np.array(y)
def read_img(file, shape):
file_ = list(file.resolve().parts)
file_[-4] = file_[-4]+f'-{shape}'
file_ = Path(*file_)
if file_.exists():
tmp = Image.open(file_)
x = tmp.copy()
tmp.close()
else:
file_.parent.mkdir(parents=True, exist_ok=True)
x = Image.open(file).convert('RGB')
resize = T.Compose([T.Resize(shape, Image.LANCZOS), T.CenterCrop(shape)])
x = resize(x)
x.save(file_)
return x
def split_data(root_dir, tgt_domains, src_domains, r_val, r_lab, r_unlab, w_unlab, rand_seed, r_data):
root_dir = Path(root_dir)
w_unlab = np.array(w_unlab) / np.sum(w_unlab)
if len(set(tgt_domains) & set(src_domains)) != 0:
print('tgt_domains should not overlap with src_domains')
raise AttributeError
# target test
xt, yt = [], []
for i, domain in enumerate(tgt_domains):
xd, yd = read(root_dir / domain / f'{domain}_test.txt')
xt.extend(xd.tolist())
yt.extend(yd.tolist())
for i, xi in enumerate(xt):
xt[i] = root_dir / xi
xt, yt = np.array(xt), np.array(yt)
# target val, target lab, target unlab
xv, yv, xl, yl, xu, yu, Nu = [], [], [], [], [], [], 0
for i, domain in enumerate(tgt_domains):
xd, yd = read(root_dir / domain / f'{domain}_train.txt')
# target val
if r_val is not None:
(xvd, yvd), (xd, yd) = data.split_data(xd.copy(), yd.copy(), rand_seed, r_val)
xv.extend(xvd.tolist())
yv.extend(yvd.tolist())
# target lab
(xld, yld), (xud, yud) = data.split_data(xd.copy(), yd.copy(), rand_seed, r_lab)
xl.extend(xld.tolist())
yl.extend(yld.tolist())
# target unlab
(xdu, ydu), (xres, _) = data.split_data(xud.copy(), yud.copy(), rand_seed, 1.-r_unlab)
xu.extend(xdu.tolist())
yu.extend(ydu.tolist())
Nu += len(xres)
if r_val is not None:
for i, xi in enumerate(xv):
xv[i] = root_dir / xi
xv, yv = np.array(xv), np.array(yv)
else:
xv, yv = xt, yt
for i, xi in enumerate(xl):
xl[i] = root_dir / xi
xl, yl = np.array(xl), np.array(yl)
# source unlab
for i, domain in enumerate(src_domains):
xd, yd = read(root_dir / domain / f'{domain}_train.txt')
Ndu = int(round(Nu * w_unlab[i]))
xd, yd = data.split_data(xd.copy(), yd.copy(), rand_seed, Ndu)[0]
xu.extend(xd.tolist())
yu.extend(yd.tolist())
for i, xi in enumerate(xu):
xu[i] = root_dir / xi
xu, yu = np.array(xu), np.array(yu)
# reduce data
if r_data is not None:
xl, yl = data.split_data(xl.copy(), yl.copy(), rand_seed, r_data)[0]
xu, yu = data.split_data(xu.copy(), yu.copy(), rand_seed, r_data)[0]
return xl, yl, xu, xv, yv, xt, yt
class DomainNetSSL(SSLDataset):
def read_x(self, idx):
return read_img(self.x[idx], self.shape)
@staticmethod
def split_data(root_dir, tgt_domains, src_domains, r_val, r_lab, r_unlab, w_unlab, rand_seed, r_data=None):
return split_data(root_dir, sorted(tgt_domains), sorted(src_domains), r_val, r_lab, r_unlab, w_unlab, rand_seed, r_data)
class DomainNetSup(SupDataset):
def read_x(self, idx):
return read_img(self.x[idx], self.shape)
@staticmethod
def split_data(root_dir, domain, r_val, r_data, rand_seed):
root_dir = Path(root_dir)
# test
xt, yt = read(root_dir / domain / f'{domain}_test.txt')
xt = xt.tolist()
for i, xi in enumerate(xt):
xt[i] = root_dir / xi
xt = np.array(xt)
xd, yd = read(root_dir / domain / f'{domain}_train.txt')
# val
if r_val is not None:
(xv, yv), (xd, yd) = data.split_data(xd.copy(), yd.copy(), rand_seed, r_val)
xv = xv.tolist()
for i, xi in enumerate(xv):
xv[i] = root_dir / xi
xv = np.array(xv)
else:
xv, yv = xt, yt
# train
x, y = data.split_data(xd.copy(), yd.copy(), rand_seed, r_data)[0]
x = x.tolist()
for i, xi in enumerate(x):
x[i] = root_dir / xi
x = np.array(x)
return x, y, xv, yv, xt, yt
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Download and extract DomainNet')
parser.add_argument('--root_dir', '-r', help='root dir where the DomainNet should be downloaded to')
args = parser.parse_args()
download(args.root_dir)
```
#### File: label_transformer/dataloader/mlcc.py
```python
import pickle
import numpy as np
from PIL import Image
from pathlib import Path
import pdb
import sys
sys.path.append('.')
from util import data
from dataloader import SSLDataset
from torchvision.datasets import ImageFolder
import os
from collections import Counter
class MLCCSSL(SSLDataset):
def read_x(self, idx):
return Image.fromarray(self.x[idx].copy())
@staticmethod
def split_data(root_dir, tgt_domains, src_domains, r_val, r_lab, r_unlab, w_unlab, rand_seed, r_data=None):
mlcc_root_dir = '/data/samsung/'
mlcc_test_dir = os.path.join(mlcc_root_dir, 'labeled/Test')
mlcc_train_dir = os.path.join(mlcc_root_dir, 'labeled/Train')
mlcc_unlabled_dir = os.path.join(mlcc_root_dir, 'unlabeled')
root_dir = Path(root_dir)
print("Debug : 000")
# test
# file = root_dir/'test_batch'
# batch = pickle.load(open(file, 'rb'), encoding='latin1')
# pdb.set_trace()
xt = np.array([np.array(tmp_x[0]) for tmp_x in
ImageFolder(root=mlcc_test_dir)])
yt = np.array([tmp[1] for tmp in ImageFolder(root=mlcc_test_dir)],
dtype=np.int)
from sklearn.utils import shuffle
xt, yt = shuffle(xt, yt, random_state=0)
print("Debug : 111")
# pdb.set_trace()
# xt = np.transpose(batch['data'].reshape((-1, 3, 32, 32)), (0, 2, 3, 1))
# yt = np.array(batch['labels'], dtype=np.int)
# val, lab, unlab
# files = [root_dir/f'data_batch_{i}' for i in range(1, 6)]
# batches = [pickle.load(open(file, 'rb'), encoding='latin1') for file in files]
# x = [batch['data'].reshape((-1, 3, 32, 32)) for batch in batches]
# x = np.concatenate([np.transpose(xi, (0, 2, 3, 1)) for xi in x])
# y = np.concatenate([np.array(batch['labels'], dtype=np.int) for batch in batches])
# pdb.set_trace()
x = np.array([np.array(tmp_x[0]) for tmp_x in
ImageFolder(root=mlcc_train_dir)])
y = np.array([tmp[1] for tmp in ImageFolder(root=mlcc_train_dir)],
dtype=np.int)
print("Debug : aaa_")
xu = np.array([np.array(tmp_x[0]) for tmp_x in
ImageFolder(root=mlcc_unlabled_dir)])
if r_val is not None:
# JY Validation set 제외 안시킴
(xv, yv), (x, y) = data.split_data(x.copy(), y.copy(), rand_seed, r_val)
# (xv, yv) = data.split_data(x.copy(), y.copy(), rand_seed, r_val)[0]
else:
xv, yv = xt, yt
# (xl, yl), (xu, yu) = data.split_data(x.copy(), y.copy(), rand_seed, r_la, b)
xl, yl = x, y;
# reduce data
if r_data is not None:
xu, yu = data.split_data(xu.copy(), np.zeros(len(xu)), rand_seed, r_data)[0]
test_class_count = Counter(yv)
train_class_count = Counter(yv)
return xl, yl, xu, xv, yv, xt, yt
```
#### File: label_transformer/dataloader/svhn.py
```python
import scipy.io as sio
import numpy as np
from PIL import Image
from pathlib import Path
import sys
sys.path.append('.')
from util import data
from dataloader import SSLDataset
class SVHNSSL(SSLDataset):
def read_x(self, idx):
return Image.fromarray(self.x[idx].copy())
@staticmethod
def split_data(root_dir, tgt_domains, src_domains, r_val, r_lab, r_unlab, w_unlab, rand_seed, r_data=None):
root_dir = Path(root_dir)
# test
d = sio.loadmat(root_dir / 'test_32x32.mat')
xt = np.transpose(d['X'], (3, 0, 1, 2))
yt = d['y'].reshape(-1).astype(int) - 1 # SVHN labels are 1-10
# val, lab, unlab
d = sio.loadmat(root_dir / 'train_32x32.mat')
x = np.transpose(d['X'], (3, 0, 1, 2))
y = d['y'].reshape(-1).astype(int) - 1 # SVHN labels are 1-10
if r_val is not None:
(xv, yv), (x, y) = data.split_data(x.copy(), y.copy(), rand_seed, r_val)
else:
xv, yv = xt, yt
(xl, yl), (xu, yu) = data.split_data(x.copy(), y.copy(), rand_seed, r_lab)
# reduce data
if r_data is not None:
xu, yu = data.split_data(xu.copy(), yu.copy(), rand_seed, r_data)[0]
return xl, yl, xu, xv, yv, xt, yt
```
#### File: label_transformer/train/featmatch.py
```python
import numpy as np
import os
from termcolor import cprint
import math
from sklearn.cluster import KMeans
import torch
import torch.nn as nn
import matplotlib
import pdb
from torch.cuda import device
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
sys.path.append('.')
from train import ssltrainer
from model import FeatMatch
from loss import common
from util import misc, metric
from util.command_interface import command_interface
from util.reporter import Reporter
from pathlib import Path
from torch.nn import functional as F
def ce_loss_check(logits_p, prob_p, target_index, target_with_class, mask=None):
if mask == None:
mask = torch.ones(len(prob_p), device=prob_p.device).unsqueeze(1)
print(target_index.shape, target_with_class.shape)
print(mask.shape)
print(target_index[0:9], target_with_class[0:9])
print(mask[0:9])
temp = F.cross_entropy(logits_p, target_index, reduction='none') * mask.detach()
temp2 = (target_with_class * -torch.log(F.softmax(logits_p, dim=1))).sum(dim=1) * mask.detach()
temp_mean = torch.mean(temp)
temp2_mean = torch.mean(temp2)
print(temp_mean, temp_mean)
return temp_mean
class Get_Scalar:
def __init__(self, value, device):
self.value = torch.tensor(value, dtype=torch.float32, device=device, requires_grad=True)
def get_value(self, iter):
return self.value
def __call__(self, iter):
return torch.clamp(self.value, 1e-9, 1.0)
class FeatMatchTrainer(ssltrainer.SSLTrainer):
def __init__(self, args, config):
super().__init__(args, config)
self.fu, self.pu = [], []
self.fp, self.yp, self.lp = None, None, None
self.T = args.temperature #(default: 0.5)
self.p_cutoff= args.p_cutoff
if self.config['loss']['hard_labels'] == "yes":
self.hard_labels = True
elif self.config['loss']['hard_labels'] == "no":
self.hard_labels = False
self.criterion = getattr(common, self.config['loss']['criterion'])
self.hard_ce = getattr(common, 'hard_ce')
self.attr_objs.extend(['fu', 'pu', 'fp', 'yp', 'lp'])
self.load(args.mode)
self.mode = args.mode
self.end_iter = (self.config['train']['pretrain_iters'] + \
2 * self.config['train']['cycle_iters'] + \
self.config['train']['end_iters']) - self.config['train']['end_iters']
print("-------------")
print(self.end_iter)
print("-------------")
def init_model(self):
model = FeatMatch(backbone=self.config['model']['backbone'],
num_classes=self.config['model']['classes'],
devices=self.args.devices,
num_heads=self.config['model']['num_heads'],
amp=self.args.amp,
attention = self.config['model']['attention'], #config added
d_model = self.config['model']['d_model'],
label_prop = self.config['model']['label_prop'],
detach = self.config['model']['detach'],
scaled = self.config['model']['scaled'],
mode = self.args.mode,
finetune_mode = self.config['model']['finetune_mode'],
residual = self.config['model']['residual'],
)
print(f'Use [{self.config["model"]["backbone"]}] model with [{misc.count_n_parameters(model):,}] parameters')
return model
def data_mixup(self, xl, prob_xl, xu, prob_xu, alpha=0.75):
Nl = len(xl)
x = torch.cat([xl, xu], dim=0)
prob = torch.cat([prob_xl, prob_xu], dim=0).detach() #(1728, 10)
idx = torch.randperm(x.shape[0])
x_, prob_ = x[idx], prob[idx] #(1728, 10), (1728, 10)
l = np.random.beta(alpha, alpha)
l = max(l, 1 - l)
x = l * x + (1 - l) * x_
prob = l * prob + (1 - l) * prob_
prob = prob / prob.sum(dim=1, keepdim=True)
xl, xu = x[:Nl], x[Nl:]
probl, probu = prob[:Nl], prob[Nl:]
return xl, probl, xu, probu
def train1(self, xl, yl, xu):
# Forward pass on original data
bsl, bsu, k, c = len(xl), len(xu), xl.size(1), self.config['model']['classes']
x = torch.cat([xl, xu], dim=0).reshape(-1, *xl.shape[2:])
# ((bsl + bsu) x (k+1) , 3, 32, 32) ex. (1728, 3 , 32, 32)
# print("pretraining_stage_input_shape:", x.shape)
logits_x = self.model(x)
# ((bsl + bsu) x (k+1) , 10) ex.(1728, 10)
# print("pretraining_stage_output_shape:", logits_x.shape)
logits_x = logits_x.reshape(bsl + bsu, k, c)
# ((bsl + bsu) x (k+1) , 10) ex.(192, 9, 10)
# print("pretraining_stage_output_shape:", logits_x.shape)
logits_xl, logits_xu = logits_x[:bsl], logits_x[bsl:]
# ex. (64, 9, 10) , (128, 9, 10)
# ex. (128, 10)
#temperature smaller, difference between softmax is bigger
# temperature bigger, difference between softmax is smaller
prob_xu_fake = prob_xu_fake ** (1. / self.config['transform']['data_augment']['T'])
prob_xu_fake = prob_xu_fake / prob_xu_fake.sum(dim=1, keepdim=True) #(128,10)/(128,1)
#(128,10)
prob_xu_fake = prob_xu_fake.unsqueeze(1).repeat(1, k, 1)
#(128, 9, 10)
# Mixup perturbation
xu = xu.reshape(-1, *xu.shape[2:]) #(576, 3, 32, 32)
xl = xl.reshape(-1, *xl.shape[2:]) #(1152, 3, 32, 32)
prob_xl_gt = torch.zeros(len(xl), c, device=xl.device) #(576, 10)
prob_xl_gt.scatter_(dim=1, index=yl.unsqueeze(1).repeat(1, k).reshape(-1, 1), value=1.)
# index (64, 1) -> (64, 9) -> (576, 1). every dim=0,
xl_mix, probl_mix, xu_mix, probu_mix = self.data_mixup(xl, prob_xl_gt, xu, prob_xu_fake.reshape(-1, c))
#(in) (576, 3, 32, 32), (576,1), (1152, 3, 32, 32), (1152, 1)
# Forward pass on mixed data
Nl = len(xl_mix)
x_mix = torch.cat([xl_mix, xu_mix], dim=0)
logits_x_mix = self.model(x_mix)
logits_xl_mix, logits_xu_mix = logits_x_mix[:Nl], logits_x_mix[Nl:]
# CLF loss
loss_pred = self.criterion(None, probl_mix, logits_xl_mix, None)
# Mixup loss
loss_con = self.criterion(None, probu_mix, logits_xu_mix, None)
# Graph loss
loss_graph = torch.tensor(0.0, device=self.default_device)
# Total loss
coeff = self.get_consistency_coeff()
loss = loss_pred + coeff*self.config['loss']['mix']*loss_con
# Prediction
pred_x = torch.softmax(logits_xl[:, 0].detach(), dim=1)
return pred_x, loss, loss_pred, loss_con, loss_graph
def train1_wo_mixup(self, xl, yl, xu):
# Forward pass on original data
bsl, bsu, k, c = len(xl), len(xu), xl.size(1), self.config['model']['classes']
x = torch.cat([xl, xu], dim=0).reshape(-1, *xl.shape[2:])
# ((bsl + bsu) x (k+1) , 3, 32, 32) ex. (1728, 3 , 32, 32)
logits_x = self.model(x)
# ((bsl + bsu) x (k+1) , 10) ex.(1728, 10)
logits_x = logits_x.reshape(bsl + bsu, k, c)
# ((bsl + bsu) x (k+1) , 10) ex.(192, 9, 10)
logits_xl, logits_xu = logits_x[:bsl], logits_x[bsl:]
# ex. (64, 9, 10) , (128, 9, 10)
#Labeled#
if self.hard_labels:
target_xl_1D = yl.unsqueeze(1).repeat(1,k).reshape(-1)
loss_sup = self.hard_ce(logits_xl.reshape(-1,c), target_xl_1D)
else:
target_xl_2D = torch.zeros(bsl*k, c, device=xl.device)
target_xl_2D.scatter_(dim=1, index=yl.unsqueeze(1).repeat(1,k).reshape(-1,1), value=1.)
loss_sup = self.criterion(None, target_xl_2D, logits_xl.reshape(-1,c), None)
#Unlabeled#
# Compute pseudo label
prob_xu_weak = torch.softmax(logits_xu[:, 0].detach(), dim=1)
# Generate mask
max_xu_probs, max_xu_idx = torch.max(prob_xu_weak, dim=1)
mask_xu = max_xu_idx.ge(self.p_cutoff).float()
mask_xu = mask_xu.unsqueeze(1).repeat(1, k).reshape(-1)
# Prediction-logit
logits_xu = logits_xu.reshape(-1, c)
if self.hard_labels:
target_xu_1D = max_xu_idx.unsqueeze(1).repeat(1, k).reshape(-1)
loss_con_f = self.hard_ce(logits_xu, target_xu_1D, mask_xu)
else:
prob_xu_with_T = torch.softmax(prob_xu_weak / self.T, dim=-1)
prob_xu_with_T = prob_xu_with_T.unsqueeze(1).repeat(1, k, 1).reshape(-1, c)
loss_con_f = self.criterion(None, prob_xu_with_T, logits_xu.reshape(-1, c), None, mask_xu)
loss_con_g = torch.tensor(0.0, device=self.default_device)
# Total loss
coeff = self.get_consistency_coeff()
loss = loss_sup + coeff * self.config['loss']['mix'] * loss_con_f
# Prediction
pred_xf = torch.softmax(logits_xl[:, 0].detach(), dim=1)
pred_xg = torch.zeros(len(logits_xl[:,0]), c, device=self.default_device)
return pred_xg, pred_xf, loss, loss_sup, loss_con_g, loss_con_f
def train2_w_mixup(self, xl, yl, xu):
bsl, bsu, k, c = len(xl), len(xu), xl.size(1), self.config['model']['classes']
x = torch.cat([xl, xu], dim=0).reshape(-1, *xl.shape[2:])
fx = self.model.extract_feature(x)
logits_x = self.model.cls(fx)
logits_x = logits_x.reshape(bsl + bsu, k, c)
logits_xl, logits_xu = logits_x[:bsl], logits_x[bsl:]
# Compute pseudo label(xfu)
prob_xu_fake = torch.softmax(logits_xu[:, 0].detach(), dim=1)
prob_xu_fake = prob_xu_fake ** (1. / self.config['transform']['data_augment']['T'])
prob_xu_fake = prob_xu_fake / prob_xu_fake.sum(dim=1, keepdim=True)
prob_xu_fake = prob_xu_fake.unsqueeze(1).repeat(1, k, 1)
#compute label(xfl)
prob_xl_gt = torch.zeros(len(xl), c, device=xl.device) #(576, 10)
prob_xl_gt.scatter_(dim=1, index=yl.unsqueeze(1).repeat(1, k).reshape(-1, 1), value=1.)
#img_for_mixup
xu = xu.reshape(-1, *xu.shape[2:]) #(576, 3, 32, 32)
xl = xl.reshape(-1, *xl.shape[2:]) #(1152, 3, 32, 32)
pdb.set_trace()
xl_mix, probl_mix, xu_mix, probu_mix = self.data_mixup(xl, prob_xl_gt, xu, prob_xu_fake.reshape(-1, c))
# Forward pass on mixed data
Nl = len(xl_mix)
x_mix = torch.cat([xl_mix, xu_mix], dim=0)
prob_mix = torch.cat([xl_mix, xu_mix], dim=0)
logits_xg_mix, logits_xf_mix, fx, fxg = self.model(x_mix, prob_mix)
logits_xg_mix = logits_xg_mix.reshape(bsl + bsu, k, c)
logits_xf_mix = logits_xf_mix.reshape(bsl + bsu, k, c)
logits_xgl_mix, logits_xgu_mix = logits_xg_mix[:bsl], logits_xg_mix[bsl:]
logits_xfl_mix, logits_xfu_mix = logits_xf_mix[:bsl], logits_xf_mix[bsl:]
pdb.set_trace()
# CLF1 loss
loss_pred = self.criterion(None, probl_mix, logits_xgl_mix.reshape(-1,c), None)
# Mixup loss
loss_con = self.criterion(None, probu_mix, logits_xgu_mix.reshape(-1,c), None)
# Graph loss
# loss_graph = self.criterion(None, probu_mix, logits_xfu_mix.reshape(-1,c), None)
# Total loss
coeff = self.get_consistency_coeff()
loss = loss_pred + coeff * (self.config['loss']['mix'] * loss_con + self.config['loss']['graph'] * loss_graph)
# Prediction
pred_x = torch.softmax(logits_xgl[:, 0].detach(), dim=1)
return pred_x, loss, loss_pred, loss_con, loss_graph
def train2(self, xl, yl, xu):
bsl, bsu, k, c = len(xl), len(xu), xl.size(1), self.config['model']['classes']
x = torch.cat([xl, xu], dim=0).reshape(-1, *xl.shape[2:])
logits_xg, logits_xf, fx, fxg = self.model(x, self.fp)
logits_xg = logits_xg.reshape(bsl + bsu, k, c)
logits_xf = logits_xf.reshape(bsl + bsu, k, c)
logits_xgl, logits_xgu = logits_xg[:bsl], logits_xg[bsl:]
logits_xfl, logits_xfu = logits_xf[:bsl], logits_xf[bsl:]
# Compute pseudo label(g)
prob_xgu_fake = torch.softmax(logits_xgu[:, 0].detach(), dim=1)
prob_xgu_fake = prob_xgu_fake ** (1. / self.config['transform']['data_augment']['T'])
prob_xgu_fake = prob_xgu_fake / prob_xgu_fake.sum(dim=1, keepdim=True)
prob_xgu_fake = prob_xgu_fake.unsqueeze(1).repeat(1, k, 1)
# Compute pseudo label(f)
prob_xfu_fake = torch.softmax(logits_xfu[:, 0].detach(), dim=1)
prob_xfu_fake = prob_xfu_fake ** (1. / self.config['transform']['data_augment']['T'])
prob_xfu_fake = prob_xfu_fake / prob_xfu_fake.sum(dim=1, keepdim=True)
prob_xfu_fake = prob_xfu_fake.unsqueeze(1).repeat(1, k, 1)
pdb.set_trace()
# Mixup perturbation
xu = xu.reshape(-1, *xu.shape[2:])
xl = xl.reshape(-1, *xl.shape[2:])
prob_xl_gt = torch.zeros(len(xl), c, device=xl.device)
prob_xl_gt.scatter_(dim=1, index=yl.unsqueeze(1).repeat(1, k).reshape(-1, 1), value=1.)
xl_mix, probl_mix, xu_mix, probu_mix = self.data_mixup(xl, prob_xl_gt, xu, prob_xfu_fake.reshape(-1, c))
# Forward pass on mixed data
Nl = len(xl_mix)
x_mix = torch.cat([xl_mix, xu_mix], dim=0)
logits_xg_mix, logits_xf_mix, _, _, _ = self.model(x_mix, self.fp)
logits_xgl_mix, logits_xgu_mix = logits_xg_mix[:Nl], logits_xg_mix[Nl:]
logits_xfl_mix, logits_xfu_mix = logits_xf_mix[:Nl], logits_xf_mix[Nl:]
# CLF1 loss
loss_pred = self.criterion(None, probl_mix, logits_xgl_mix, None)
# Mixup loss
loss_con = self.criterion(None, probu_mix, logits_xgu_mix, None)
# Graph loss
loss_graph = self.criterion(None, probu_mix, logits_xfu_mix, None)
# Total loss
coeff = self.get_consistency_coeff()
loss = loss_pred + coeff * (self.config['loss']['mix'] * loss_con + self.config['loss']['graph'] * loss_graph)
# Prediction
pred_x = torch.softmax(logits_xgl[:, 0].detach(), dim=1)
return pred_x, loss, loss_pred, loss_con, loss_graph
def train2_wo_mixup(self, xl, yl, xu):
bsl, bsu, k, c = len(xl), len(xu), xl.size(1), self.config['model']['classes']
x = torch.cat([xl, xu], dim=0).reshape(-1, *xl.shape[2:])
logits_xg, logits_xf, fx, fxg = self.model(x)
logits_xg = logits_xg.reshape(bsl + bsu, k, c)
logits_xf = logits_xf.reshape(bsl + bsu, k, c)
logits_xgl, logits_xgu = logits_xg[:bsl], logits_xg[bsl:]
logits_xfl, logits_xfu = logits_xf[:bsl], logits_xf[bsl:]
#Labeled#
# [Hard & Soft]
if self.hard_labels:
target_xgl_1D = yl.unsqueeze(1).repeat(1, k).reshape(-1)
loss_sup_g = self.hard_ce(logits_xgl.reshape(-1,c), target_xgl_1D)
else:
target_xgl_2D = torch.zeros(bsl*k, c, device=xl.device)
target_xgl_2D.scatter_(dim=1, index=yl.unsqueeze(1).repeat(1, k).reshape(-1, 1), value=1.)
loss_sup_g = self.criterion(None, target_xgl_2D, logits_xgl.reshape(-1, c), None)
#Unlabeled#
# [Hard & Soft]
# Compute pseudo label(g)
prob_xgu_weak = torch.softmax(logits_xgu[:, 0].detach(), dim=1)
#Generate mask (g)
max_xgu_probs, max_xgu_idx = torch.max(prob_xgu_weak, dim=1) # bu
mask_xgu =max_xgu_idx.ge(self.p_cutoff).float() # bu
mask_xgu = mask_xgu.unsqueeze(1).repeat(1, k).reshape(-1)
#Prediction-logit(g & f)
logits_xgu = logits_xgu.reshape(-1, c)
# logits_xfu = logits_xfu.reshape(-1, c)
# reference: https://github.com/LeeDoYup/FixMatch-pytorch/blob/0f22e7f7c63396e0a0839977ba8101f0d7bf1b04/models/fixmatch/fixmatch_utils.py
if self.hard_labels:
#pseudo_labeling
#target:(bsu*k, c)
# target_xgu_2D = torch.zeros(bsu*k, c, device=xu.device)
# target_xgu_2D.scatter_(dim=1, index=max_xgu_idx.unsqueeze(1).repeat(1, k).reshape(-1, 1), value=1.)
#(target_xgu_1D, mask) : bsu
target_xgu_1D = max_xgu_idx.unsqueeze(1).repeat(1, k).reshape(-1)
#(prev)loss_con
loss_con_g = self.hard_ce(logits_xgu, target_xgu_1D, mask_xgu)
# (prev) Graph loss
loss_con_f = torch.tensor(0.0, device=self.default_device)
else:
#To check softmax_temperature value#
# prob_xgu_weak = prob_xgu_weak ** (1. / T)
# prob_xgu_weak = prob_xgu_weak / prob_xgu_weak.sum(dim=1, keepdim=True)
prob_xgu_with_T = torch.softmax(prob_xgu_weak/self.T, dim = -1)
prob_xgu_with_T = prob_xgu_with_T.unsqueeze(1).repeat(1, k, 1).reshape(-1, c)
loss_con_g = self.criterion(None, prob_xgu_with_T, logits_xgu.reshape(-1, c), None, mask_xgu)
# loss_con_f = self.criterion(None, prob_xgu_with_T, logits_xfu.reshape(-1, c), None, mask_xgu)
loss_con_f = torch.tensor(0.0, device=self.default_device)
# Total loss
coeff = self.get_consistency_coeff()
loss = loss_sup_g + coeff * (self.config['loss']['mix'] * loss_con_g + self.config['loss']['graph'] * loss_con_f)
# Prediction
pred_xg = torch.softmax(logits_xgl[:, 0].detach(), dim=1)
pred_xf = torch.softmax(logits_xfl[:, 0].detach(), dim=1)
return pred_xg, pred_xf, loss, loss_sup_g, loss_con_g, loss_con_f
def finetune_wo_mixup(self, xl, yl, xu):
bsl, bsu, k, c = len(xl), len(xu), xl.size(1), self.config['model']['classes']
x = torch.cat([xl, xu], dim=0).reshape(-1, *xl.shape[2:])
logits_xg, logits_xf, fx, fxg = self.model(x)
logits_xg = logits_xg.reshape(bsl + bsu, k, c)
logits_xf = logits_xf.reshape(bsl + bsu, k, c)
logits_xgl, logits_xgu = logits_xg[:bsl], logits_xg[bsl:]
logits_xfl, logits_xfu = logits_xf[:bsl], logits_xf[bsl:]
# target#
# target:(bsl*k, c)
if self.hard_labels:
target_xgl_1D = yl.unsqueeze(1).repeat(1, k).reshape(-1)
loss_sup_g = self.hard_ce(logits_xgl.reshape(-1,c), target_xgl_1D)
else:
target_xgl_2D = torch.zeros(bsl*k, c, device=xl.device)
target_xgl_2D.scatter_(dim=1, index=yl.unsqueeze(1).repeat(1, k).reshape(-1, 1), value=1.)
loss_sup_g = self.criterion(None, target_xgl_2D, logits_xgl.reshape(-1, c), None)
# [Hard & Soft]
# Compute pseudo label(g)
prob_xgu_weak = torch.softmax(logits_xgu[:, 0].detach(), dim=1)
# Generate mask (g)
max_xgu_probs, max_xgu_idx = torch.max(prob_xgu_weak, dim=1) # bu
mask_xgu = max_xgu_idx.ge(self.p_cutoff).float() # bu
mask_xgu = mask_xgu.unsqueeze(1).repeat(1, k).reshape(-1)
# Prediction-logit(g & f)
logits_xgu = logits_xgu.reshape(-1, c)
logits_xfu = logits_xfu.reshape(-1, c)
# reference: https://github.com/LeeDoYup/FixMatch-pytorch/blob/0f22e7f7c63396e0a0839977ba8101f0d7bf1b04/models/fixmatch/fixmatch_utils.py
if self.hard_labels:
# pseudo_labeling
# target:(bsu*k, c)
# target_xgu_2D = torch.zeros(bsu*k, c, device=xu.device)
# target_xgu_2D.scatter_(dim=1, index=max_xgu_idx.unsqueeze(1).repeat(1, k).reshape(-1, 1), value=1.)
# (target_xgu_1D, mask) : bsu
target_xgu_1D = max_xgu_idx.unsqueeze(1).repeat(1, k).reshape(-1)
# (prev)locc_con
loss_con_g = self.hard_ce(logits_xgu, target_xgu_1D, mask_xgu)
loss_con_f = self.hard_ce(logits_xfu, target_xgu_1D, mask_xgu)
else:
# To check softmax_temperature value#
# prob_xgu_weak = prob_xgu_weak ** (1. / T)
# prob_xgu_weak = prob_xgu_weak / prob_xgu_weak.sum(dim=1, keepdim=True)
prob_xgu_with_T = torch.softmax(prob_xgu_weak / self.T, dim=-1)
prob_xgu_with_T = prob_xgu_with_T.unsqueeze(1).repeat(1, k, 1).reshape(-1, c)
loss_con_g = self.criterion(None, prob_xgu_with_T, logits_xgu.reshape(-1, c), None, mask_xgu)
loss_con_f = self.criterion(None, prob_xgu_with_T, logits_xfu.reshape(-1, c), None, mask_xgu)
# Total loss
coeff = self.get_consistency_coeff()
loss = loss_sup_g + coeff * (
self.config['loss']['mix'] * loss_con_g + self.config['loss']['graph'] * loss_con_f)
# Prediction
pred_xg = torch.softmax(logits_xgl[:, 0].detach(), dim=1)
pred_xf = torch.softmax(logits_xfl[:, 0].detach(), dim=1)
return pred_xg, pred_xf, loss, loss_sup_g, loss_con_g, loss_con_f
def eval1(self, x, y):
logits_x = self.model(x)
# Compute pseudo label
prob_fake = torch.softmax(logits_x.detach(), dim=1)
prob_fake = prob_fake ** (1. / self.config['transform']['data_augment']['T'])
prob_fake = prob_fake / prob_fake.sum(dim=1, keepdim=True)
# Mixup perturbation
prob_gt = torch.zeros(len(y), self.config['model']['classes'], device=x.device)
prob_gt.scatter_(dim=1, index=y.unsqueeze(1), value=1.)
x_mix, prob_mix, _, _ = self.data_mixup(x, prob_gt, x, prob_fake)
# Forward pass on mixed data
logits_x_mix = self.model(x_mix)
# CLF loss and Mixup loss
loss_con = loss_pred = self.criterion(None, prob_mix, logits_x_mix, None)
# Graph loss
loss_graph = torch.tensor(0.0, device=self.default_device)
# Total loss
coeff = self.get_consistency_coeff()
loss = loss_pred + coeff*self.config['loss']['mix']*loss_con
# Prediction
pred_x = torch.softmax(logits_x.detach(), dim=1)
return pred_x, loss, loss_pred, loss_con, loss_graph
def eval1_wo_mixup(self, x, y):
logits_x = self.model(x)
# Compute pseudo label
prob_fake = torch.softmax(logits_x.detach(), dim=1)
prob_fake = prob_fake ** (1. / self.config['transform']['data_augment']['T'])
prob_fake = prob_fake / prob_fake.sum(dim=1, keepdim=True)
# CLF loss
# Between (pseudo-label) and (model output)
loss_con = loss_pred = self.criterion(None, prob_fake, logits_x, None)
# Graph loss
loss_graph = torch.tensor(0.0, device=self.default_device)
# Total loss
coeff = self.get_consistency_coeff()
loss = loss_pred + coeff*self.config['loss']['mix']*loss_con
# Prediction
pred_x = torch.softmax(logits_x.detach(), dim=1)
return pred_x, loss, loss_pred, loss_con, loss_graph
def eval2(self, x, y):
logits_xg, logits_xf, _, _, _ = self.model(x, self.fp)
# Compute pseudo label
prob_fake = torch.softmax(logits_xg.detach(), dim=1)
prob_fake = prob_fake ** (1. / self.config['transform']['data_augment']['T'])
prob_fake = prob_fake / prob_fake.sum(dim=1, keepdim=True)
# Mixup perturbation
prob_gt = torch.zeros(len(y), self.config['model']['classes'], device=x.device)
prob_gt.scatter_(dim=1, index=y.unsqueeze(1), value=1.)
x_mix, prob_mix, _, _ = self.data_mixup(x, prob_gt, x, prob_fake)
# Forward pass on mixed data
logits_xg_mix, logits_xf_mix, _, _, _ = self.model(x_mix, self.fp)
# CLF loss and Mixup loss
loss_con = loss_pred = self.criterion(None, prob_mix, logits_xg_mix, None)
# Graph loss
loss_graph = self.criterion(None, prob_mix, logits_xf_mix, None)
# Total loss
coeff = self.get_consistency_coeff()
loss = loss_pred + coeff*(self.config['loss']['mix']*loss_con + self.config['loss']['graph']*loss_graph)
# Prediction
pred_xf = torch.softmax(logits_xf.detach(), dim=1)
pred_xg = torch.softmax(logits_xg.detach(), dim=1)
return pred_xg, pred_xf, loss, loss_pred, loss_con, loss_graph
def eval2_wo_mixup(self, x, y):
# encoder clf output, transformer clf output
# FeatMatch
logits_xg, logits_xf, fx, fxg = self.model(x)
# Compute pseudo label
prob_fake = torch.softmax(logits_xg.detach(), dim=1)
prob_fake = prob_fake ** (1. / self.config['transform']['data_augment']['T'])
prob_fake = prob_fake / prob_fake.sum(dim=1, keepdim=True)
# Forward pass on mixed data
loss_con = loss_pred = self.criterion(None, prob_fake, logits_xg, None)
# Con_f loss
loss_graph = self.criterion(None, prob_fake, logits_xf, None)
# Total loss
coeff = self.get_consistency_coeff()
loss = loss_pred + coeff*(self.config['loss']['mix']*loss_con + self.config['loss']['graph']*loss_graph)
# Prediction
pred_xg = torch.softmax(logits_xg.detach(), dim=1)
pred_xf = torch.softmax(logits_xf.detach(), dim=1)
return pred_xg, pred_xf, loss, loss_pred, loss_con, loss_graph
# caller: trainer.py - def train 406: with amp.autocast
def forward_train(self, data):
self.model.train()
xl = data[0].reshape(-1, *data[0].shape[2:])
xl = self.Tnorm(xl.to(self.default_device)).reshape(data[0].shape)
yl = data[1].to(self.default_device)
xu = data[2].reshape(-1, *data[2].shape[2:])
xu = self.Tnorm(xu.to(self.default_device)).reshape(data[2].shape)
# T = torch.clamp(self.T_origin, 1e-9, 1.0)
# p_cutoff = torch.clamp(self.p_cutoff_origin, 1e-9, 1.0)
#fast debugging
self.config['train']['pretrain_iters'] = 20
self.end_iter = 40
# #hyper_params for update
# T = self.t_fn(self.curr_iter)
# p_cutoff = self.p_fn(self.curr_iter)
#for debuging training stage#
#Calculated each stage#
#Transformer fixed + wo_mixup_fixed
#(algo_sequence) 1st model_mode setting 2nd train_setting
if self.curr_iter < self.config['train']['pretrain_iters']:
self.model.set_mode('pretrain')
pred_xg, pred_xf, loss, loss_sup, loss_con_g, loss_con_f = self.train1_wo_mixup(xl, yl, xu)
elif self.curr_iter < self.end_iter:
self.model.set_mode('finetune')
#Chagned for no detach
pred_xg, pred_xf, loss, loss_sup, loss_con_g, loss_con_f = self.finetune_wo_mixup(xl, yl, xu)
else:
self.model.set_mode('finetune')
pred_xg, pred_xf, loss, loss_sup, loss_con_g, loss_con_f = self.finetune_wo_mixup(xl, yl, xu)
results = {
'y_pred': torch.max(pred_xf, dim=1)[1].detach().cpu().numpy(),
'y_pred_agg': torch.max(pred_xg, dim=1)[1].detach().cpu().numpy(),
'y_true': yl.cpu().numpy(),
'loss': {
'all': loss.detach().cpu().item(),
'sup': loss_sup.detach().cpu().item(),
'con_g': loss_con_g.detach().cpu().item(),
'con_f': loss_con_f.detach().cpu().item()
},
}
# if self.mode != 'finetune':
# if self.config['model']['attention'] == "no":
# self.model.set_mode('pretrain')
# if self.config['model']['mixup'] == 'no':
# pred_xf, loss, loss_pred, loss_con, loss_graph = self.train1_wo_mixup(xl, yl, xu)
# elif self.config['model']['mixup'] == 'yes':
# pred_xf, loss, loss_pred, loss_con, loss_graph = self.train1(xl, yl, xu)
# pred_xg = torch.tensor(0.0, device=self.default_device)
#
# elif self.config['model']['attention'] == "Transformer":
# if self.curr_iter < self.config['train']['pretrain_iters']:
# self.model.set_mode('pretrain')
# if self.config['model']['mixup'] == 'yes':
# pred_xf, loss, loss_pred, loss_con, loss_graph = self.train1(xl, yl, xu)
# elif self.config['model']['mixup'] == 'no':
# pred_xf, loss, loss_pred, loss_con, loss_graph = self.train1_wo_mixup(xl, yl, xu)
# pred_xg = torch.tensor(0.0, device=self.default_device)
# # pred_x, (x1_weak_label's softmax)
# # loss, (total loss)
# # loss_pred, (log loss for xl_mixup) loss_con, (log loss for xu_mixup),
# # loss_graph = 0.0
# elif self.curr_iter == self.config['train']['pretrain_iters']:
# self.model.set_mode('train')
# if self.config['model']['mixup'] == 'yes':
# # self.extract_fp()
# pred_xg, pred_xf, loss, loss_pred, loss_con, loss_graph = self.train2(xl, yl, xu)
# elif self.config['model']['mixup'] == 'no':
# pred_xg, pred_xf, loss, loss_pred, loss_con, loss_graph = self.train2_wo_mixup(xl, yl, xu, T,
# p_cutoff)
# else:
# self.model.set_mode('train')
# # if self.curr_iter % self.config['train']['sample_interval'] == 0:
# if self.config['model']['mixup'] == 'yes':
# # self.extract_fp()
# pred_xg, pred_xf, loss, loss_pred, loss_con, loss_graph = self.train2(xl, yl, xu)
# elif self.config['model']['mixup'] == 'no':
# print("train2_wo_mixup")
# pred_xg, pred_xf, loss, loss_pred, loss_con, loss_graph = self.train2_wo_mixup(xl, yl, xu, T,
# p_cutoff)
# else:
# self.model.set_mode('finetune')
# if self.config['model']['mixup'] == 'no':
# print("finetune_wo_mixup")
# # xl (bs, k, c, h, w)
# # yl (bs)
# # xu (bu, k, c, h, w)
# pred_xg, pred_xf, loss, loss_pred, loss_con, loss_graph = self.finetune_wo_mixup(xl, yl, xu, T,
# p_cutoff)
return loss, results
# Deprecated
# def forward_finetune(self, data):
# self.model.train()
# xl = data[0].reshape(-1, *data[0].shape[2:])
# xl = self.Tnorm(xl.to(self.default_device)).reshape(data[0].shape)
#
# yl = data[1].to(self.default_device)
# xu = data[2].reshape(-1, *data[2].shape[2:])
# xu = self.Tnorm(xu.to(self.default_device)).reshape(data[2].shape)
# #fast debugging
#
# self.model.set_mode('finetune')
# if self.config['model']['mixup'] == 'no':
# print("finetune_wo_mixup")
# #xl (bs, k, c, h, w)
# #yl (bs)
# #xu (bu, k, c, h, w)
# pred_xg, pred_xf, loss, loss_pred, loss_con, loss_graph = self.finetune_wo_mixup(xl, yl, xu)
# results = {
# 'y_pred': torch.max(pred_xf, dim=1)[1].detach().cpu().numpy(),
# 'y_pred_agg': torch.max(pred_xg, dim=1)[1].detach().cpu().numpy(),
# 'y_true': yl.cpu().numpy(),
# 'loss': {
# 'all': loss.detach().cpu().item(),
# 'pred': loss_pred.detach().cpu().item(),
# 'con': loss_con.detach().cpu().item(),
# 'graph': loss_graph.detach().cpu().item()
# },
# }
#
# return loss, results
def forward_eval(self, data):
self.model.eval()
x = self.Tnorm(data[0].to(self.default_device))
y = data[1].to(self.default_device)
# if self.config['model']['attention'] == "no":
# self.model.set_mode('pretrain')
# pred_xf, loss, loss_pred, loss_con, loss_graph = self.eval1_wo_mixup(x, y)
# pred_xg = torch.tensor(0.0, device=self.default_device)
# elif self.config['model']['attention'] == "Transformer":
# if self.curr_iter < self.config['train']['pretrain_iters']:
# self.model.set_mode('pretrain')
# if self.config['model']['mixup'] =='yes':
# pred_xf, loss, loss_pred, loss_con, loss_graph = self.eval1(x, y)
# elif self.config['model']['mixup'] =='no':
# pred_xf, loss, loss_pred, loss_con, loss_graph = self.eval1_wo_mixup(x, y)
# # pred_xg: Transformer Output, pred_xf: Encoder Clf Output
# pred_xg = torch.tensor(0.0, device=self.default_device)
# else:
# # FIXME: model의 test mode가 필요?
# self.model.set_mode('train')
# if self.config['model']['mixup'] == 'yes':
# pred_xg, pred_xf, loss, loss_pred, loss_con, loss_graph = self.eval2(x,y)
# elif self.config['model']['mixup'] == 'no':
# pred_xg, pred_xf, loss, loss_pred, loss_con, loss_graph = self.eval2_wo_mixup(x, y)
if self.curr_iter > self.config['train']['pretrain_iters']:
self.model.set_mode('train')
pred_xg, pred_xf, loss, loss_pred, loss_con, loss_graph = self.eval2_wo_mixup(x, y)
elif self.curr_iter <= self.config['train']['pretrain_iters']:
self.model.set_mode('pretrain')
pred_xf, loss, loss_pred, loss_con, loss_graph = self.eval1_wo_mixup(x, y)
pred_xg = torch.tensor(0.0, device=self.default_device)
results = {
'y_pred': torch.max(pred_xf, dim=1)[1].detach().cpu().numpy(),
'y_pred_agg': torch.max(pred_xg, dim=1)[1].detach().cpu().numpy() if not pred_xg.shape==torch.Size([]) else np.zeros_like(pred_xf[:,0].detach().cpu().numpy()),
'y_true': y.cpu().numpy(),
'loss': {
'all': loss.detach().cpu().item(),
'pred': loss_pred.detach().cpu().item(),
'con': loss_con.detach().cpu().item(),
'graph': loss_graph.detach().cpu().item()
}
}
return results
if __name__ == '__main__':
args, config, save_root = command_interface()
r = args.rand_seed
reporter = Reporter(save_root, args)
for i in range(args.iters):
args.rand_seed = r + i
cprint(f'Run iteration [{i+1}/{args.iters}] with random seed [{args.rand_seed}]', attrs=['bold'])
setattr(args, 'save_root', save_root/f'run{i}')
if args.mode == 'resume' and not args.save_root.exists():
args.mode = 'new'
args.save_root.mkdir(parents=True, exist_ok=True)
trainer = FeatMatchTrainer(args, config)
if args.mode != 'test':
trainer.train()
acc_val, acc_test, acc_agg_val, acc_agg_test, acc_loaded = trainer.test() # loaded_Acc
print(f"Val Acc: {acc_val:.4f}, Test Acc: {acc_test:.4f}, Loaded Acc: {acc_loaded:.4f}, Val Agg Acc: {acc_agg_val:.4f}, Test Agg Acc: {acc_agg_test:.4f}")
# elif args.mode == 'finetune':
# trainer.train()
acc_median = metric.median_acc(os.path.join(args.save_root, 'results.txt'))
reporter.record(acc_val, acc_test, acc_median)
with open(args.save_root/'final_result.txt', 'w') as file:
file.write(f'Val acc: {acc_val*100:.2f} %')
file.write(f'Test acc: {acc_test*100:.2f} %')
file.write(f'Median acc: {acc_median*100:.2f} %')
reporter.report()
``` |
{
"source": "JiwonCocoder/matching1",
"score": 3
} |
#### File: matching1/lib/im_pair_dataset.py
```python
from __future__ import print_function, division
import os
import torch
from torch.autograd import Variable
from torch.utils.data import Dataset
from skimage import io, data
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
from lib.transformation import AffineTnf
class ImagePairDataset(Dataset):
"""
Image pair dataset used for weak supervision
Args:
csv_file (string): Path to the csv file with image names and transformations.
training_image_path (string): Directory with the images.
output_size (2-tuple): Desired output size
transform (callable): Transformation for post-processing the training pair (eg. image normalization)
"""
def __init__(self, dataset_csv_path, dataset_csv_file, dataset_image_path, dataset_size=0,output_size=(240,240),transform=None,random_crop=False):
self.random_crop=random_crop
self.out_h, self.out_w = output_size
self.train_data = pd.read_csv(os.path.join(dataset_csv_path,dataset_csv_file))
if dataset_size is not None and dataset_size!=0:
dataset_size = min((dataset_size,len(self.train_data)))
self.train_data = self.train_data.iloc[0:dataset_size,:]
self.img_A_names = self.train_data.iloc[:,0]
self.img_B_names = self.train_data.iloc[:,1]
self.set = self.train_data.iloc[:,2].values
self.flip = self.train_data.iloc[:, 3].values.astype('int')
self.dataset_image_path = dataset_image_path
self.transform = transform
# no cuda as dataset is called from CPU threads in dataloader and produces confilct
self.affineTnf = AffineTnf(out_h=self.out_h, out_w=self.out_w, use_cuda = False)
def __len__(self):
return len(self.img_A_names)
def __getitem__(self, idx):
# get pre-processed images
image_A,im_size_A = self.get_image(self.img_A_names,idx,self.flip[idx])
image_B,im_size_B = self.get_image(self.img_B_names,idx,self.flip[idx])
# test_warpedA = image_A.squeeze(0)
# test_warpedA = image_A.numpy().astype(int).transpose(1,2,0)
# io.imshow(test_warpedA)
# plt.show()
# test_warpedB = image_B.squeeze(0)
# test_warpedB = image_B.numpy().astype(int).transpose(1,2,0)
# io.imshow(test_warpedB)
# plt.show()
image_set = self.set[idx]
sample = {'source_image': image_A, 'target_image': image_B, 'source_im_size': im_size_A, 'target_im_size': im_size_B, 'set':image_set}
if self.transform:
sample = self.transform(sample)
return sample
def get_image(self,img_name_list,idx,flip):
img_name = os.path.join(self.dataset_image_path, img_name_list.iloc[idx])
image = io.imread(img_name)
# if grayscale convert to 3-channel image
if image.ndim==2:
image=np.repeat(np.expand_dims(image,2),axis=2,repeats=3)
# do random crop
if self.random_crop:
h,w,c=image.shape
top=np.random.randint(h/4)
bottom=int(3*h/4+np.random.randint(h/4))
left=np.random.randint(w/4)
right=int(3*w/4+np.random.randint(w/4))
image = image[top:bottom,left:right,:]
# flip horizontally if needed
if flip:
image=np.flip(image,1)
# get image size
im_size = np.asarray(image.shape)
# convert to torch Variable
image = np.expand_dims(image.transpose((2,0,1)),0)
image = torch.Tensor(image.astype(np.float32))
image_var = Variable(image,requires_grad=False)
# Resize image using bilinear sampling with identity affine tnf
image = self.affineTnf(image_var).data.squeeze(0)
im_size = torch.Tensor(im_size.astype(np.float32))
return (image, im_size)
```
#### File: JiwonCocoder/matching1/train.py
```python
from __future__ import print_function, division
import os
from os.path import exists, join, basename
from collections import OrderedDict
import numpy as np
import numpy.random
import datetime
import torch
import torch.nn as nn
import torch.optim as optim
from torch.nn.functional import relu
from torch.utils.data import Dataset
from lib.dataloader import DataLoader # modified dataloader
from lib.model import ImMatchNet
from lib.im_pair_dataset import ImagePairDataset
from lib.normalization import NormalizeImageDict, UnNormalize
from lib.torch_util import save_checkpoint, str_to_bool
from lib.torch_util import BatchTensorToVars, str_to_bool
from image.normalization import NormalizeImageDict, normalize_image
import argparse
from torchvision import transforms
import torch
from matplotlib import pyplot as plt
from skimage import io,data
# Seed and CUDA
use_cuda = torch.cuda.is_available()
torch.manual_seed(1)
if use_cuda:
torch.cuda.manual_seed(1)
np.random.seed(1)
print('ImMatchNet training script')
# Argument parsing
parser = argparse.ArgumentParser(description='Compute PF Pascal matches')
parser.add_argument('--checkpoint', type=str, default='')
parser.add_argument('--image_size', type=int, default=400)
parser.add_argument('--dataset_image_path', type=str, default='datasets/pf-pascal/', help='path to PF Pascal dataset')
parser.add_argument('--dataset_csv_path', type=str, default='datasets/pf-pascal/image_pairs/', help='path to PF Pascal training csv')
parser.add_argument('--num_epochs', type=int, default=5, help='number of training epochs')
parser.add_argument('--batch_size', type=int, default=16, help='training batch size')
parser.add_argument('--lr', type=float, default=0.0005, help='learning rate')
parser.add_argument('--ncons_kernel_sizes', nargs='+', type=int, default=[5,5,5], help='kernels sizes in neigh. cons.')
parser.add_argument('--ncons_channels', nargs='+', type=int, default=[16,16,1], help='channels in neigh. cons')
parser.add_argument('--result_model_fn', type=str, default='checkpoint_adam', help='trained model filename')
parser.add_argument('--result-model-dir', type=str, default='trained_models', help='path to trained models folder')
parser.add_argument('--fe_finetune_params', type=int, default=0, help='number of layers to finetune')
args = parser.parse_args()
print(args)
# Create model
print('Creating CNN model...')
model = ImMatchNet(use_cuda=use_cuda,
checkpoint=args.checkpoint,
ncons_kernel_sizes=args.ncons_kernel_sizes,
ncons_channels=args.ncons_channels)
# Set which parts of the model to train
if args.fe_finetune_params>0:
for i in range(args.fe_finetune_params):
for p in model.FeatureExtraction.model[-1][-(i+1)].parameters():
p.requires_grad=True
print('Trainable parameters:')
for i,p in enumerate(filter(lambda p: p.requires_grad, model.parameters())):
print(str(i+1)+": "+str(p.shape))
# Optimizer
print('using Adam optimizer')
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr)
cnn_image_size=(args.image_size,args.image_size)
Dataset = ImagePairDataset
train_csv = 'train_pairs.csv'
test_csv = 'val_pairs.csv'
normalization_tnf = NormalizeImageDict(['source_image','target_image'])
batch_preprocessing_fn = BatchTensorToVars(use_cuda=use_cuda)
# Dataset and dataloader
dataset = Dataset(transform=normalization_tnf,
dataset_image_path=args.dataset_image_path,
dataset_csv_path=args.dataset_csv_path,
dataset_csv_file = train_csv,
output_size=cnn_image_size)
dataloader = DataLoader(dataset, batch_size=args.batch_size,
shuffle=False,
num_workers=0)
dataset_test = Dataset(transform=normalization_tnf,
dataset_image_path=args.dataset_image_path,
dataset_csv_path=args.dataset_csv_path,
dataset_csv_file=test_csv,
output_size=cnn_image_size)
dataloader_test = DataLoader(dataset_test, batch_size=args.batch_size,
shuffle=True, num_workers=4)
# Define checkpoint name
checkpoint_name = os.path.join(args.result_model_dir,
datetime.datetime.now().strftime("%Y-%m-%d_%H:%M")+'_'+args.result_model_fn + '.pth.tar')
print('Checkpoint name: '+checkpoint_name)
# Train
best_test_loss = float("inf")
def weak_loss(model,batch,normalization='softmax',alpha=30):
if normalization is None:
normalize = lambda x: x
elif normalization=='softmax':
normalize = lambda x: torch.nn.functional.softmax(x,1)
elif normalization=='l1':
normalize = lambda x: x/(torch.sum(x,dim=1,keepdim=True)+0.0001)
b = batch['source_image'].size(0)
# positive
#corr4d = model({'source_image':batch['source_image'], 'target_image':batch['target_image']})
corr4d = model(batch)
inv_normalize = transforms.Normalize(
mean=[-0.485/0.229, -0.456/0.224, -0.406/0.225],
std=[1/0.229, 1/0.224, 1/0.225]
)
# print(corr4d.shape)
# corr4d_torch = torch.tensor(corr4d).float()
# inv_corr4d = inv_normalize(batch['source_image'][0])
# print(inv_corr4d)
source_image = normalize_image(batch['source_image'], forward=False)
source_image = source_image.data.squeeze(0).transpose(0, 1).transpose(1, 2).cpu().numpy()
print("source_image_shape")
print(batch['source_image'].shape, source_image.shape)
print(source_image)
warped_image = normalize_image(corr4d, forward=False)
warped_image = warped_image.data.squeeze(0).transpose(0, 1).transpose(1, 2).cpu().numpy()
print("warped_image_shape")
print(batch['target_image'].shape, warped_image.shape)
print(warped_image)
target_image = normalize_image(batch['target_image'], forward=False)
target_image = target_image.data.squeeze(0).transpose(0, 1).transpose(1, 2).cpu().numpy()
# check if display is available
exit_val = os.system('python -c "import matplotlib.pyplot as plt;plt.figure()" > /dev/null 2>&1')
display_avail = exit_val == 0
if display_avail:
fig, axs = plt.subplots(1, 3)
axs[0].imshow(source_image)
axs[0].set_title('src')
axs[1].imshow(warped_image)
axs[1].set_title('warped')
axs[2].imshow(target_image)
axs[2].set_title('tgt')
print('Showing results. Close figure window to continue')
plt.show()
loss_fn = lambda model,batch: weak_loss(model,batch,normalization='softmax')
# define epoch function
def process_epoch(mode,epoch,model,loss_fn,optimizer,dataloader,batch_preprocessing_fn,use_cuda=True,log_interval=50):
epoch_loss = 0
for batch_idx, batch in enumerate(dataloader):
if mode=='train':
optimizer.zero_grad()
tnf_batch = batch_preprocessing_fn(batch)
loss_fn(model,tnf_batch)
if mode=='train':
epoch_loss = None
return epoch_loss
train_loss = np.zeros(args.num_epochs)
test_loss = np.zeros(args.num_epochs)
print('Starting training...')
model.FeatureExtraction.eval()
for epoch in range(1, args.num_epochs+1):
train_loss[epoch-1] = process_epoch('train',epoch,model,loss_fn,optimizer,dataloader,batch_preprocessing_fn,log_interval=1)
test_loss[epoch-1] = process_epoch('test',epoch,model,loss_fn,optimizer,dataloader_test,batch_preprocessing_fn,log_interval=1)
# remember best loss
is_best = test_loss[epoch-1] < best_test_loss
best_test_loss = min(test_loss[epoch-1], best_test_loss)
save_checkpoint({
'epoch': epoch,
'args': args,
'state_dict': model.state_dict(),
'best_test_loss': best_test_loss,
'optimizer' : optimizer.state_dict(),
'train_loss': train_loss,
'test_loss': test_loss,
}, is_best,checkpoint_name)
print('Done!')
``` |
{
"source": "jiwoncpark/baobab",
"score": 2
} |
#### File: baobab/bnn_priors/base_cosmo_bnn_prior.py
```python
import numpy as np
from astropy.cosmology import FlatLambdaCDM
from abc import ABC, abstractmethod
class BaseCosmoBNNPrior(ABC):
"""Abstract base class for a cosmology-aware BNN prior
"""
def __init__(self, bnn_omega):
self._check_cosmology_config_validity(bnn_omega)
self._define_cosmology(bnn_omega.cosmology)
for cosmo_comp in ['cosmology', 'redshift', 'kinematics']:
setattr(self, cosmo_comp, bnn_omega[cosmo_comp])
self.sample_redshifts = getattr(self, 'sample_redshifts_from_{:s}'.format(self.redshift.model))
def _raise_config_error(self, missing_key, parent_config_key, bnn_prior_class):
"""Convenience function for raising errors related to config values
"""
raise ValueError("{:s} must be specified in the config inside {:s} for {:s}".format(missing_key, parent_config_key, bnn_prior_class))
def _check_cosmology_config_validity(self, bnn_omega):
"""Check whether the config file specified the hyperparameters for all the fields
required for cosmology-aware BNN priors, e.g. cosmology, redshift, galaxy kinematics
"""
required_keys = ['cosmology', 'redshift', 'kinematics']
for possible_missing_key in required_keys:
if possible_missing_key not in bnn_omega:
self._raise_cfg_error(possible_missing_key, 'bnn_omega', cls.__name__)
def _define_cosmology(self, cosmology_cfg):
"""Set the cosmology, with which to generate all the training samples, based on the config
Parameters
----------
cosmology_cfg : dict
Copy of `cfg.bnn_omega.cosmology`
"""
self.cosmo = FlatLambdaCDM(**cosmology_cfg)
def sample_param(self, hyperparams):
"""Assigns a sampling distribution
"""
dist = hyperparams.pop('dist')
return getattr(baobab.distributions, 'sample_{:s}'.format(dist))(**hyperparams)
def eval_param_pdf(self, eval_at, hyperparams):
"""Assigns and evaluates the PDF
"""
dist = hyperparams.pop('dist')
return getattr(baobab.distributions, 'eval_{:s}_pdf'.format(dist))(**hyperparams)
def sample_redshifts_from_differential_comoving_volume(self, redshifts_cfg):
"""Sample redshifts from the differential comoving volume,
on a grid with the range and resolution specified in the config
Parameters
----------
redshifts_cfg : dict
Copy of `cfg.bnn_omega.redshift`
Returns
-------
tuple
the tuple of floats that are the realized z_lens, z_src
"""
z_grid = np.arange(**redshifts_cfg.grid)
dVol_dz = self.cosmo.differential_comoving_volume(z_grid).value
dVol_dz_normed = dVol_dz/np.sum(dVol_dz)
sampled_z = np.random.choice(z_grid, 2, replace=False, p=dVol_dz_normed)
z_lens = np.min(sampled_z)
z_src = np.max(sampled_z)
while z_src < z_lens + redshifts_cfg.min_diff:
sampled_z = np.random.choice(z_grid, 2, replace=False, p=dVol_dz_normed)
z_lens = np.min(sampled_z)
z_src = np.max(sampled_z)
return z_lens, z_src
def sample_redshifts_from_independent_dist(self, redshifts_cfg):
"""Sample lens and source redshifts from independent distributions, while enforcing that the lens redshift is smaller than source redshift
Parameters
----------
redshifts_cfg : dict
Copy of `cfg.bnn_omega.redshift`
Returns
-------
tuple
the tuple of floats that are the realized z_lens, z_src
"""
z_lens = self.sample_param(redshifts_cfg.z_lens.copy())
z_src = self.sample_param(redshifts_cfg.z_src.copy())
while z_src < z_lens + redshifts_cfg.min_diff:
z_lens = self.sample_param(redshifts_cfg.z_lens.copy())
z_src = self.sample_param(redshifts_cfg.z_src.copy())
return z_lens, z_src
```
#### File: baobab/configs/parser.py
```python
import os, sys
from datetime import datetime
import warnings
from importlib import import_module
from addict import Dict
import json
from collections import OrderedDict
import lenstronomy.SimulationAPI.ObservationConfig as obs_cfg
class BaobabConfig:
"""Nested dictionary representing the configuration for Baobab data generation
"""
def __init__(self, user_cfg):
"""
Parameters
----------
user_cfg : dict or Dict
user-defined configuration
"""
self.__dict__ = Dict(user_cfg)
if not hasattr(self, 'out_dir'):
# Default out_dir path if not specified
self.out_dir = os.path.join(self.destination_dir, '{:s}_{:s}_prior={:s}_seed={:d}'.format(self.name, self.train_vs_val, self.bnn_prior_class, self.seed))
self.out_dir = os.path.abspath(self.out_dir)
if not hasattr(self, 'checkpoint_interval'):
self.checkpoint_interval = max(100, self.n_data // 100)
self.get_survey_info(self.survey_info, self.psf.type)
self.interpret_magnification_cfg()
self.interpret_kinematics_cfg()
self.log_filename = datetime.now().strftime("log_%m-%d-%Y_%H:%M_baobab.json")
self.log_path = os.path.join(self.out_dir, self.log_filename)
@classmethod
def from_file(cls, user_cfg_path):
"""Alternative constructor that accepts the path to the user-defined configuration python file
Parameters
----------
user_cfg_path : str or os.path object
path to the user-defined configuration python file
"""
dirname, filename = os.path.split(os.path.abspath(user_cfg_path))
module_name, ext = os.path.splitext(filename)
sys.path.insert(0, dirname)
if ext == '.py':
#user_cfg_file = map(__import__, module_name)
#user_cfg = getattr(user_cfg_file, 'cfg')
user_cfg_script = import_module(module_name)
user_cfg = getattr(user_cfg_script, 'cfg').deepcopy()
return cls(user_cfg)
elif ext == '.json':
with open(user_cfg_path, 'r') as f:
user_cfg_str = f.read()
user_cfg = Dict(json.loads(user_cfg_str)).deepcopy()
return cls(user_cfg)
else:
raise NotImplementedError("This extension is not supported.")
def export_log(self):
"""Export the baobab log to the current working directory
"""
with open(self.log_path, 'w') as f:
json.dump(self.__dict__, f)
print("Exporting baobab log to {:s}".format(self.log_path))
def interpret_magnification_cfg(self):
if 'agn_light' not in self.components:
if len(self.bnn_omega.magnification.frac_error_sigma) != 0: # non-empty dictionary
warnings.warn("`bnn_omega.magnification.frac_error_sigma` field is ignored as the images do not contain AGN.")
self.bnn_omega.magnification.frac_error_sigma = 0.0
else:
if 'magnification' not in self.bnn_omega:
self.bnn_omega.magnification.frac_error_sigma = 0.0
elif self.bnn_omega.magnification is None:
self.bnn_omega.magnification.frac_error_sigma = 0.0
if ('magnification' not in self.bnn_omega) and 'agn_light' in self.components:
self.bnn_omega.magnification.frac_error_sigma = 0.0
def interpret_kinematics_cfg(self):
"""Validate the kinematics config
"""
kinematics_cfg = self.bnn_omega.kinematics_cfg
if kinematics_cfg.anisotropy_model == 'analytic':
warnings.warn("Since velocity dispersion computation is analytic, any entry other than `sampling_number` in `kinematics.numerics_kwargs` will be ignored.")
def get_survey_info(self, survey_info, psf_type):
"""Fetch the camera and instrument information corresponding to the survey string identifier
"""
sys.path.insert(0, obs_cfg.__path__[0])
survey_module = import_module(survey_info['survey_name'])
survey_class = getattr(survey_module, survey_info['survey_name'])
coadd_years = survey_info['coadd_years'] if 'coadd_years' in survey_info else None
self.survey_object_dict = OrderedDict()
for bp in survey_info['bandpass_list']:
survey_object = survey_class(band=bp, psf_type=psf_type, coadd_years=coadd_years)
# Overwrite ObservationConfig PSF type with user-configured PSF type
if hasattr(self, 'psf'):
survey_object.obs['psf_type'] = self.psf.type
if survey_object.obs['psf_type'] == 'PIXEL':
if hasattr(self, 'psf'):
if hasattr(self.psf, 'psf_kernel_size'):
survey_object.psf_kernel_size = self.psf.kernel_size
else:
raise ValueError("Observation dictionary must specify PSF kernel size if psf_type is PIXEL.")
if hasattr(self.psf, 'which_psf_maps'):
survey_object.which_psf_maps = self.psf.which_psf_maps
else:
raise ValueError("Observation dictionary must specify indices of PSF kernel maps if psf_type is PIXEL.")
else:
raise ValueError("User must supply PSF kwargs in the Baobab config if PSF type is PIXEL.")
else: # 'GAUSSIAN'
survey_object.psf_kernel_size = None
survey_object.which_psf_maps = None
# Override default survey specs with user-specified kwargs
survey_object.camera.update(survey_info['override_camera_kwargs'])
survey_object.obs.update(survey_info['override_obs_kwargs'])
self.survey_object_dict[bp] = survey_object
# Camera dict is same across bands, so arbitrarily take the last band
self.instrument = survey_object.camera
def get_noise_kwargs(self,bandpass):
"""
Return the noise kwargs defined in the babobab config, e.g. for passing to the noise model for online data augmentation
Returns
-------
(dict): A dict containing the noise kwargs to be passed to the noise
model.
(str): The bandpass to pull the noise information for
"""
# Go through the baobab config and pull out the noise kwargs one by one.
noise_kwargs = {}
noise_kwargs.update(self.instrument)
noise_kwargs.update(self.survey_object_dict[bandpass].kwargs_single_band())
return noise_kwargs
```
#### File: baobab/postponed_tests/test_noise_tf.py
```python
import os
import random
import unittest
import numpy as np
import tensorflow as tf
from baobab.data_augmentation import get_noise_sigma2_lenstronomy
from baobab.data_augmentation.noise_tf import NoiseModelTF
from baobab.tests.test_data_augmentation.tf_data_utils import generate_simple_tf_record, parse_example, tf_img_size, tf_y_names, tf_data_size
class TestNoiseTF(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Seed randomness
"""
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
np.random.seed(123)
random.seed(123)
cls.img = np.random.randn(3, 3)*3.0 + 6.0
cls.noise_kwargs = dict(
pixel_scale=0.08,
exposure_time=100.0,
magnitude_zero_point=25.9463,
read_noise=10,
ccd_gain=7.0,
sky_brightness=20.1,
seeing=0.6,
num_exposures=1,
psf_type='GAUSSIAN',
kernel_point_source=None,
truncation=5,
#data_count_unit='ADU',
background_noise=None
)
def test_lenstronomy_vs_tf_ADU(self):
"""Compare the lenstronomy and tf noise variance for ADU units
"""
lens_sigma2 = get_noise_sigma2_lenstronomy(self.img, data_count_unit='ADU', **self.noise_kwargs)
img_tf_tensor = tf.cast(self.img, tf.float32)
noise_model_tf = NoiseModelTF(**self.noise_kwargs)
tf_sigma2 = {}
tf_sigma2['sky'] = noise_model_tf.get_sky_noise_sigma2()
tf_sigma2['readout'] = noise_model_tf.get_readout_noise_sigma2()
tf_sigma2['poisson'] = noise_model_tf.get_poisson_noise_sigma2(img_tf_tensor)
np.testing.assert_array_almost_equal(self.img, img_tf_tensor.numpy(), decimal=5, err_msg="image array")
np.testing.assert_almost_equal(lens_sigma2['sky'], tf_sigma2['sky'], decimal=7, err_msg="sky")
np.testing.assert_almost_equal(lens_sigma2['readout'], tf_sigma2['readout'], decimal=7, err_msg="readout")
np.testing.assert_array_almost_equal(lens_sigma2['poisson'], tf_sigma2['poisson'].numpy(), decimal=7, err_msg="poisson")
def test_lenstronomy_vs_tf_electron(self):
"""Compare the lenstronomy and tf noise variance for electron units
"""
lens_sigma2 = get_noise_sigma2_lenstronomy(self.img, data_count_unit='e-', **self.noise_kwargs)
img_tf_tensor = tf.cast(self.img, tf.float32)
noise_model_tf = NoiseModelTF(data_count_unit='e-', **self.noise_kwargs)
tf_sigma2 = {}
tf_sigma2['sky'] = noise_model_tf.get_sky_noise_sigma2()
tf_sigma2['readout'] = noise_model_tf.get_readout_noise_sigma2()
tf_sigma2['poisson'] = noise_model_tf.get_poisson_noise_sigma2(img_tf_tensor)
np.testing.assert_array_almost_equal(self.img, img_tf_tensor.numpy(), decimal=5, err_msg="image array")
np.testing.assert_almost_equal(lens_sigma2['sky'], tf_sigma2['sky'], decimal=7, err_msg="sky")
np.testing.assert_almost_equal(lens_sigma2['readout'], tf_sigma2['readout'], decimal=7, err_msg="readout")
np.testing.assert_array_almost_equal(lens_sigma2['poisson'], tf_sigma2['poisson'].numpy(), decimal=7, err_msg="poisson")
def test_build_tf_dataset(self):
"""Test whether tf.data.Dataset can be instantiated from tf.data.TFRecordDataset with the data augmentation (noise addition) mapping
"""
tf_record_path = os.path.abspath('test_ADU')
batch_size = 2
n_epochs = 3
noise_model_tf = NoiseModelTF(**self.noise_kwargs)
add_noise_func = getattr(noise_model_tf, 'add_noise')
#print(add_noise_func(tf.ones((3, 3), dtype=tf.float32)))
generate_simple_tf_record(tf_record_path, tf_y_names)
tf_dataset = tf.data.TFRecordDataset(tf_record_path).map(parse_example).map(lambda image, label: (add_noise_func(image), label)).repeat(n_epochs).shuffle(buffer_size=tf_data_size + 1).batch(batch_size, drop_remainder=True)
images = [img for img, label in tf_dataset]
labels = [label for img, label in tf_dataset]
size = len(labels)
np.testing.assert_array_equal(images[0].shape, (batch_size, tf_img_size, tf_img_size, 1))
np.testing.assert_array_equal(labels[0].shape, (batch_size, len(tf_y_names)))
np.testing.assert_equal(size, (tf_data_size*n_epochs//2))
# Delete resulting data
if os.path.exists(tf_record_path):
os.remove(tf_record_path)
if __name__ == '__main__':
unittest.main()
```
#### File: baobab/sim_utils/image_utils.py
```python
import copy
import sys
import numpy as np
# Lenstronomy modules
from lenstronomy.LensModel.lens_model import LensModel
from lenstronomy.LightModel.light_model import LightModel
from lenstronomy.ImSim.image_model import ImageModel
from baobab.sim_utils import mag_to_amp_extended, mag_to_amp_point, get_lensed_total_flux, get_unlensed_total_flux_numerical
from lenstronomy.LensModel.Solver.lens_equation_solver import LensEquationSolver
from lenstronomy.SimulationAPI.data_api import DataAPI
from lenstronomy.PointSource.point_source import PointSource
from baobab.sim_utils import psf_utils
__all__ = ['Imager', 'Imager2']
class Imager2:
"""Dev-mode class, more flexible than Imager.
Note
----
Accompanying `generate` script doesn't exist yet.
"""
def __init__(self, lens_model_list, src_model_list,
n_pix, pixel_scale,
psf_type, psf_kernel_size=None, which_psf_maps=None,
kwargs_numerics={'supersampling_factor': 1}):
# Define models
self.lens_model = LensModel(lens_model_list=lens_model_list)
self.src_model = LightModel(light_model_list=src_model_list)
#self.ps_model = ps_model
#self.lens_light_model = lens_light_model
# Set detector specs
self.n_pix = n_pix
self.pixel_scale = pixel_scale
self.psf_type = psf_type
self.psf_kernel_size = psf_kernel_size
self.which_psf_maps = which_psf_maps
self.kwargs_numerics = kwargs_numerics
# Initialize kwargs (must be set using setter)
self._survey = None
self._lens_kwargs = None
self._src_kwargs = None
#self._ps_kwargs = None
#self._lens_light_kwargs = None
@property
def survey_kwargs(self):
"""Ordered dict containing detector information. Length is number of
bandpasses. Should be set before the model kwargs.
"""
return self._survey_kwargs
@survey_kwargs.setter
def survey_kwargs(self, survey_kwargs):
survey_name = survey_kwargs['survey_name']
bandpass_list = survey_kwargs['bandpass_list']
coadd_years = survey_kwargs.get('coadd_years')
override_obs_kwargs = survey_kwargs.get('override_obs_kwargs', {})
override_camera_kwargs = survey_kwargs.get('override_camera_kwargs', {})
import lenstronomy.SimulationAPI.ObservationConfig as ObsConfig
from importlib import import_module
sys.path.insert(0, ObsConfig.__path__[0])
SurveyClass = getattr(import_module(survey_name), survey_name)
self._data_api = [] # init
self._image_model = [] # init
for bp in bandpass_list:
survey_obj = SurveyClass(band=bp,
psf_type=self.psf_type,
coadd_years=coadd_years)
# Override as specified in survey_kwargs
survey_obj.camera.update(override_camera_kwargs)
survey_obj.obs.update(override_obs_kwargs)
# This is what we'll actually use
kwargs_detector = survey_obj.kwargs_single_band()
data_api = DataAPI(self.n_pix, **kwargs_detector)
psf_model = psf_utils.get_PSF_model(self.psf_type,
self.pixel_scale,
seeing=kwargs_detector['seeing'],
kernel_size=self.psf_kernel_size,
which_psf_maps=self.which_psf_maps)
image_model_bp = ImageModel(data_api.data_class,
psf_model,
self.lens_model,
self.src_model,
None,
None,
kwargs_numerics=self.kwargs_numerics)
self._data_api.append(data_api)
self._image_model.append(image_model_bp)
@property
def lens_kwargs(self):
return self._lens_kwargs
@lens_kwargs.setter
def lens_kwargs(self, lens_kwargs):
self._lens_kwargs = lens_kwargs
@property
def src_kwargs(self):
return self._src_kwargs
@src_kwargs.setter
def src_kwargs(self, src_kwargs):
for i, data_api_bp in enumerate(self._data_api):
# Convert magnitude to amp recognized by the profile
if 'magnitude' in src_kwargs[i]:
src_kwargs[i] = mag_to_amp_extended([src_kwargs[i]],
self.src_model,
data_api_bp)[0]
self._src_kwargs = src_kwargs
def generate_image(self):
n_filters = len(self._image_model)
img_canvas = np.empty([n_filters, self.n_pix, self.n_pix])
for i, image_model_bp in enumerate(self._image_model):
img = image_model_bp.image(self.lens_kwargs,
self.src_kwargs,
None, None,
lens_light_add=False,
point_source_add=False)
img = np.maximum(0.0, img) # safeguard against negative pixel values
img_canvas[i, :, :] = img
return img_canvas
class Imager:
"""Deterministic utility class for imaging the objects on a pixel grid
Attributes
----------
bnn_omega : dict
copy of `cfg.bnn_omega`
components : list
list of components, e.g. `lens_mass`
"""
def __init__(self, components, lens_mass_model, src_light_model, lens_light_model=None, ps_model=None, kwargs_numerics={'supersampling_factor': 1}, min_magnification=0.0, for_cosmography=False, magnification_frac_err=0.0):
self.components = components
self.kwargs_numerics = kwargs_numerics
self.lens_mass_model = lens_mass_model
self.src_light_model = src_light_model
self.lens_light_model = lens_light_model
self.ps_model = ps_model
self.unlensed_ps_model = PointSource(point_source_type_list=['SOURCE_POSITION'], fixed_magnification_list=[False])
self.lens_eq_solver = LensEquationSolver(self.lens_mass_model)
self.min_magnification = min_magnification
self.for_cosmography = for_cosmography
self.magnification_frac_err = magnification_frac_err
self.img_features = {} # Initialized to store metadata of images, will get updated for each lens
def _set_sim_api(self, num_pix, kwargs_detector, psf_kernel_size, which_psf_maps):
"""Set the simulation API objects
"""
self.data_api = DataAPI(num_pix, **kwargs_detector)
#self.pixel_scale = data_api.pixel_scale
pixel_scale = kwargs_detector['pixel_scale']
psf_model = psf_utils.get_PSF_model(kwargs_detector['psf_type'], pixel_scale, seeing=kwargs_detector['seeing'], kernel_size=psf_kernel_size, which_psf_maps=which_psf_maps)
# Set the precision level of lens equation solver
self.min_distance = 0.05
self.search_window = pixel_scale*num_pix
self.image_model = ImageModel(self.data_api.data_class, psf_model, self.lens_mass_model, self.src_light_model, self.lens_light_model, self.ps_model, kwargs_numerics=self.kwargs_numerics)
if 'agn_light' in self.components:
self.unlensed_image_model = ImageModel(self.data_api.data_class, psf_model, None, self.src_light_model, None, self.unlensed_ps_model, kwargs_numerics=self.kwargs_numerics)
else:
self.unlensed_image_model = ImageModel(self.data_api.data_class, psf_model, None, self.src_light_model, None, None, kwargs_numerics=self.kwargs_numerics)
def _load_kwargs(self, sample):
"""Generate an image from provided model and model parameters
Parameters
----------
sample : dict
model parameters sampled by a bnn_prior object
"""
self._load_lens_mass_kwargs(sample['lens_mass'], sample['external_shear'])
self._load_src_light_kwargs(sample['src_light'])
if 'lens_light' in self.components:
self._load_lens_light_kwargs(sample['lens_light'])
else:
self.kwargs_lens_light = None
if 'agn_light' in self.components:
self._load_agn_light_kwargs(sample)
else:
self.kwargs_ps = None
self.kwargs_unlensed_unmagnified_amp_ps = None
def _load_lens_mass_kwargs(self, lens_mass_sample, external_shear_sample):
self.kwargs_lens_mass = [lens_mass_sample, external_shear_sample]
def _load_src_light_kwargs(self, src_light_sample):
kwargs_src_light = [src_light_sample]
# Convert from mag to amp
self.kwargs_src_light = mag_to_amp_extended(kwargs_src_light, self.src_light_model, self.data_api)
def _load_lens_light_kwargs(self, lens_light_sample):
kwargs_lens_light = [lens_light_sample]
# Convert lens magnitude into amp
self.kwargs_lens_light = mag_to_amp_extended(kwargs_lens_light, self.lens_light_model, self.data_api)
def _load_agn_light_kwargs(self, sample):
"""Set the point source kwargs to be ingested by Lenstronomy
"""
# When using the image positions for cosmological parameter recovery, the time delays must be computed by evaluating the Fermat potential at these exact positions.
if self.for_cosmography:
x_image = sample['misc']['x_image']
y_image = sample['misc']['y_image']
# When the precision of the lens equation solver doesn't have to be matched between image positions and time delays, simply solve for the image positions using whatever desired precision.
else:
x_image, y_image = self.lens_eq_solver.findBrightImage(self.kwargs_src_light[0]['center_x'],
self.kwargs_src_light[0]['center_y'],
self.kwargs_lens_mass,
min_distance=self.min_distance,
search_window=self.search_window,
numImages=4,
num_iter_max=100, # default is 10 but td_cosmography default is 100
precision_limit=10**(-10) # default for both this and td_cosmography
)
agn_light_sample = sample['agn_light']
unlensed_mag = agn_light_sample['magnitude'] # unlensed agn mag
# Save the unlensed (source-plane) kwargs in amplitude units
kwargs_unlensed_unmagnified_mag_ps = [{'ra_source': self.kwargs_src_light[0]['center_x'], 'dec_source': self.kwargs_src_light[0]['center_y'], 'magnitude': unlensed_mag}]
self.kwargs_unlensed_unmagnified_amp_ps = mag_to_amp_point(kwargs_unlensed_unmagnified_mag_ps, self.unlensed_ps_model, self.data_api) # note
# Compute the lensed (image-plane), magnified kwargs in amplitude units
magnification = self.lens_mass_model.magnification(x_image, y_image, kwargs=self.kwargs_lens_mass)
measured_magnification = np.abs(magnification*(1.0 + self.magnification_frac_err*np.random.randn(len(magnification)))) # Add noise to magnification
magnification = np.abs(magnification)
kwargs_lensed_unmagnified_mag_ps = [{'ra_image': x_image, 'dec_image': y_image, 'magnitude': unlensed_mag}] # note unlensed magnitude
kwargs_lensed_unmagnified_amp_ps = mag_to_amp_point(kwargs_lensed_unmagnified_mag_ps, self.ps_model, self.data_api) # note unmagnified amp
self.kwargs_ps = copy.deepcopy(kwargs_lensed_unmagnified_amp_ps)
for kw in self.kwargs_ps:
kw.update(point_amp=kw['point_amp']*measured_magnification)
# Log the solved image positions
self.img_features.update(x_image=x_image,
y_image=y_image,
magnification=magnification,
measured_magnification=measured_magnification)
def generate_image(self, sample, num_pix, survey_object_dict):
img_canvas = np.empty([len(survey_object_dict), num_pix, num_pix]) # [n_filters, num_pix, num_pix]
# Loop over bands
for i, (bp, survey_object) in enumerate(survey_object_dict.items()):
self._set_sim_api(num_pix, survey_object.kwargs_single_band(), survey_object.psf_kernel_size, survey_object.which_psf_maps)
self._load_kwargs(sample)
# Reject nonsensical number of images (due to insufficient numerical precision)
if ('y_image' in self.img_features) and (len(self.img_features['y_image']) not in [2, 4]):
return None, None
# Compute magnification
lensed_total_flux = get_lensed_total_flux(self.kwargs_lens_mass, self.kwargs_src_light, self.kwargs_ps, self.image_model)
#unlensed_total_flux = get_unlensed_total_flux(self.kwargs_src_light, self.src_light_model, self.kwargs_unlensed_amp_ps, self.ps_model)
unlensed_total_flux = get_unlensed_total_flux_numerical(self.kwargs_src_light, self.kwargs_unlensed_unmagnified_amp_ps, self.unlensed_image_model)
total_magnification = lensed_total_flux/unlensed_total_flux
# Apply magnification cut
if (total_magnification < self.min_magnification) or np.isnan(total_magnification):
return None, None
# Generate image for export
img = self.image_model.image(self.kwargs_lens_mass, self.kwargs_src_light, self.kwargs_lens_light, self.kwargs_ps)
img = np.maximum(0.0, img) # safeguard against negative pixel values
img_canvas[i, :, :] = img
# Save remaining image features
img_features_single_band = {f'total_magnification_{bp}': total_magnification, f'lensed_total_flux_{bp}': lensed_total_flux, f'unlensed_total_flux_{bp}': unlensed_total_flux}
self.img_features.update(img_features_single_band)
return img_canvas, self.img_features
def add_noise(self, image_array):
"""Add noise to the image (deprecated; replaced by the data_augmentation package)
"""
#noise_map = self.data_api.noise_for_model(image_array, background_noise=True, poisson_noise=True, seed=None)
#image_array += noise_map
#return image_array
pass
``` |
{
"source": "jiwoncpark/DC2-analysis",
"score": 2
} |
#### File: contributed/hackurdc2_utils/moments.py
```python
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
import units
def calculate_total_flux(df):
df = df.set_index(['objectId', 'ccdVisitId'])
df['apFlux'] = df.groupby(['objectId', 'ccdVisitId'])['flux'].sum()
df = df.reset_index()
return df
def calculate_1st_moments(df):
flux_ratio = df['flux'].values/df['apFlux'].values
df['Ix_contrib'] = df['ra'].values*flux_ratio
df['Iy_contrib'] = df['dec'].values*flux_ratio
df = df.set_index(['objectId', 'ccdVisitId'])
df['Ix'] = df.groupby(['objectId', 'ccdVisitId'])['Ix_contrib'].sum()
df['Iy'] = df.groupby(['objectId', 'ccdVisitId'])['Iy_contrib'].sum()
df = df.reset_index()
return df
def calculate_centered_2nd_moments(e, phi, sigma):
sqrt_q = ((1.0 - e)/(1.0 + e))**0.5
lam1 = sigma**2.0/sqrt_q
lam2 = sigma**2.0*sqrt_q
cos = np.cos(phi)
sin = np.sin(phi)
Ixx = lam1*cos**2.0 + lam2*sin**2.0
Iyy = lam1*sin**2.0 + lam2*cos**2.0
Ixy = (lam1 - lam2)*cos*sin
return Ixx, Iyy, Ixy
def calculate_2nd_moments(df):
e = df['e']
phi = df['phi']
gauss_sigma = df['gauss_sigma']
flux_ratio = df['flux']/df['apFlux']
ra = df['ra']
dec = df['dec']
reference_Ix = df['Ix']
reference_Iy = df['Iy']
Ixx, Iyy, Ixy = calculate_centered_2nd_moments(e=e, phi=phi, sigma=gauss_sigma)
df['Ixx_contrib'] = flux_ratio*(Ixx + (ra - reference_Ix)**2.0)
df['Iyy_contrib'] = flux_ratio*(Iyy + (dec - reference_Iy)**2.0)
df['Ixy_contrib'] = flux_ratio*(Ixy + (ra - reference_Ix)*(dec - reference_Iy))
df = df.set_index(['objectId', 'ccdVisitId'])
for mom in ['Ixx', 'Iyy', 'Ixy']:
df[mom] = df.groupby(['objectId', 'ccdVisitId'])['%s_contrib' %mom].sum()
df = df.reset_index()
return df
def apply_environment(df):
df['Ixx'] += df['Ixx_PSF']
df['Iyy'] += df['Ixx_PSF']
df['apFlux'] *= np.random.normal(0.0, df['apFluxErr'].values)
return df
def collapse_mog(mog_df):
collapsed = mog_df.groupby(['objectId', 'ccdVisitId',])['apFlux', 'Ix', 'Iy', 'Ixx', 'Iyy', 'Ixy',
'apFluxErr', 'sky', 'Ixx_PSF', 'expMJD',
'num_gal_neighbors',
'num_star_neighbors', 'num_agn_neighbors', 'num_sprinkled_neighbors'].mean()
collapsed[['Ix', 'Iy']] = units.arcsec_to_deg(collapsed[['Ix', 'Iy']])
collapsed = collapsed.reset_index()
return collapsed
```
#### File: contributed/hackurdc2_utils/units.py
```python
from __future__ import absolute_import, division, print_function
import numpy as np
def flux_to_mag(flux, zeropoint_mag=0.0, from_unit=None):
if from_unit=='nMgy':
zeropoint_mag = 22.5
return zeropoint_mag - 2.5*np.log10(flux)
def mag_to_flux(mag, zeropoint_mag=0.0, to_unit=None):
if to_unit=='nMgy':
zeropoint_mag = 22.5
return np.power(10.0, -0.4*(mag - zeropoint_mag))
def fwhm_to_sigma(fwhm):
return fwhm/np.sqrt(8.0*np.log(2.0))
def deg_to_arcsec(deg):
return 3600.0*deg
def arcsec_to_deg(arcsec):
return arcsec/3600.0
def e1e2_to_phi(e1, e2):
phi = 0.5*np.arctan(e2/e1)
return phi
def e1e2_to_ephi(e1, e2):
e = np.power(np.power(e1, 2.0) + np.power(e2, 2.0), 0.5)
phi = 0.5*np.arctan(e2/e1)
return e, phi
``` |
{
"source": "jiwoncpark/ex-con",
"score": 3
} |
#### File: n2j/losses/local_global_loss.py
```python
import torch.nn as nn
__all__ = ['MSELoss']
class MSELoss:
def __init__(self):
self.local_mse = nn.MSELoss(reduction='mean')
self.global_mse = nn.MSELoss(reduction='mean')
def __call__(self, pred, target):
pred_local, pred_global = pred
target_local, target_global = target
mse = self.local_mse(pred_local, target_local)
mse += self.global_mse(pred_global, target_global)
return mse
```
#### File: n2j/models/flow.py
```python
import torch
from torch import nn, optim
from torch.functional import F
import numpy as np
####
# From Karpathy's MADE implementation
####
DEBUG = False
class MaskedLinear(nn.Linear):
""" same as Linear except has a configurable mask on the weights """
def __init__(self, in_features, out_features, bias=True):
super().__init__(in_features, out_features, bias)
self.register_buffer('mask', torch.ones(out_features, in_features))
def set_mask(self, mask):
self.mask.data.copy_(torch.from_numpy(mask.astype(np.uint8).T))
def forward(self, input):
if DEBUG:
print("masked linear: ", torch.any(torch.isnan(input)), input.mean())
return F.linear(input, self.mask * self.weight, self.bias)
class MADE(nn.Module):
def __init__(self, nin, hidden_sizes,
nout, num_masks=1, natural_ordering=False):
"""
nin: integer; number of inputs
hidden sizes: a list of integers; number of units in hidden layers
nout: integer; number of outputs, which usually collectively parameterize some kind of 1D distribution
note: if nout is e.g. 2x larger than nin (perhaps the mean and std), then the first nin
will be all the means and the second nin will be stds. i.e. output dimensions depend on the
same input dimensions in "chunks" and should be carefully decoded downstream appropriately.
the output of running the tests for this file makes this a bit more clear with examples.
num_masks: can be used to train ensemble over orderings/connections
natural_ordering: force natural ordering of dimensions, don't use random permutations
"""
super().__init__()
self.nin = nin
self.nout = nout
self.hidden_sizes = hidden_sizes
assert self.nout % self.nin == 0, "nout must be integer multiple of nin"
# define a simple MLP neural net
self.net = []
hs = [nin] + hidden_sizes + [nout]
for h0, h1 in zip(hs, hs[1:]):
self.net.extend([
MaskedLinear(h0, h1),
nn.ReLU(),
])
self.net.pop() # pop the last ReLU for the output layer
self.net = nn.Sequential(*self.net)
# seeds for orders/connectivities of the model ensemble
self.natural_ordering = natural_ordering
self.num_masks = num_masks
self.seed = 0 # for cycling through num_masks orderings
self.m = {}
self.update_masks() # builds the initial self.m connectivity
# note, we could also precompute the masks and cache them, but this
# could get memory expensive for large number of masks.
def update_masks(self):
if self.m and self.num_masks == 1: return # only a single seed, skip for efficiency
L = len(self.hidden_sizes)
# fetch the next seed and construct a random stream
rng = np.random.RandomState(self.seed)
self.seed = (self.seed + 1) % self.num_masks
# sample the order of the inputs and the connectivity of all neurons
self.m[-1] = np.arange(self.nin) if self.natural_ordering else rng.permutation(self.nin)
for l in range(L):
self.m[l] = rng.randint(self.m[l-1].min(), self.nin-1, size=self.hidden_sizes[l])
# construct the mask matrices
masks = [self.m[l-1][:,None] <= self.m[l][None,:] for l in range(L)]
masks.append(self.m[L-1][:,None] < self.m[-1][None,:])
# handle the case where nout = nin * k, for integer k > 1
if self.nout > self.nin:
k = int(self.nout / self.nin)
# replicate the mask across the other outputs
masks[-1] = np.concatenate([masks[-1]]*k, axis=1)
# set the masks in all MaskedLinear layers
layers = [l for l in self.net.modules() if isinstance(l, MaskedLinear)]
for l,m in zip(layers, masks):
l.set_mask(m)
def forward(self, x):
return self.net(x)
####
# End Karpathy's code
####
class MAF(nn.Module):
"""x0 only depends on x0, etc"""
def __init__(self, features, context, hidden=100, nlayers=1):
super(self.__class__, self).__init__()
self._fmualpha = MADE(features+context,
[hidden]*nlayers, 2*(features+context),
natural_ordering=True)
self.context_map = nn.Linear(context, context)
self.context = context
self.features = features
def fmualpha(self, x):
# Only return the data parts: (conditioned on whole context vector)
out = self._fmualpha(x)
mu = out[:, self.context:self.context+self.features]
alpha = out[:, 2*self.context+self.features:]
return mu, alpha
def load_context(self, x, context):
return torch.cat((self.context_map(context), x), dim=1)
def invert(self, u, context):
_x = self.load_context(u, context)
mu, alpha = self.fmualpha(_x)
x = u * torch.exp(alpha) + mu
return x
def forward(self, x, context):
# Invert the flow
_x = self.load_context(x, context)
if DEBUG:
print("_x is nan:", torch.any(torch.isnan(_x)), _x.mean())
mu, alpha = self.fmualpha(_x)
if DEBUG:
print("mu is nan:", torch.any(torch.isnan(mu)), mu.mean())
print("alpha is nan:", torch.any(torch.isnan(alpha)), alpha.mean())
u = (x - mu) * torch.exp(-alpha)
log_det = - torch.sum(alpha, dim=1)
return u, log_det
class Perm(nn.Module):
def __init__(self, nvars, perm=None):
super(self.__class__, self).__init__()
# If perm is none, chose some random permutation that gets fixed at initialization
if perm is None:
perm = torch.randperm(nvars)
self.perm = perm
self.reverse_perm = torch.argsort(perm)
def forward(self, x, context):
idx = self.perm.to(x.device)
return x[:, idx], 0
def invert(self, x, context):
rev_idx = self.reverse_perm.to(x.device)
return x[:, rev_idx]
class Flow(nn.Module):
def __init__(self, *layers):
super(self.__class__, self).__init__()
self.layers = nn.ModuleList(layers)
def forward(self, x, context):
log_det = None
for layer in self.layers:
x, _log_det = layer(x, context)
log_det = (log_det if log_det is not None else 0) + _log_det
# Same ordering as input:
for layer in self.layers[::-1]:
if 'Perm' not in str(layer):
continue
x = x[:, layer.reverse_perm]
return x, log_det
def invert(self, u, context):
for layer in self.layers:
if 'Perm' not in str(layer):
continue
u = u[:, layer.perm]
for layer in self.layers[::-1]:
u = layer.invert(u, context)
return u
```
#### File: tests/test_inference/test_inference_manager.py
```python
import os
import unittest
import shutil
import numpy as np
import pandas as pd
import scipy.stats
from n2j.inference.inference_manager import InferenceManager
import n2j.data as in_data
class TestInferenceManager(unittest.TestCase):
"""A suite of tests verifying CosmoDC2Graph class methods
"""
@classmethod
def setUpClass(cls):
"""InferenceManager object to test
"""
infer_obj = InferenceManager('cuda',
out_dir='inf_results/E3',
checkpoint_dir='results/E3',
seed=1028)
cls.infer_obj = infer_obj
def test_load_dataset(self):
if True:
return
features = ['galaxy_id', 'ra', 'dec', 'redshift']
features += ['ra_true', 'dec_true', 'redshift_true']
features += ['ellipticity_1_true', 'ellipticity_2_true']
features += ['bulge_to_total_ratio_i']
features += ['ellipticity_1_bulge_true', 'ellipticity_1_disk_true']
features += ['ellipticity_2_bulge_true', 'ellipticity_2_disk_true']
features += ['shear1', 'shear2', 'convergence']
features += ['size_bulge_true', 'size_disk_true', 'size_true']
features += ['mag_{:s}_lsst'.format(b) for b in 'ugrizY']
# Features to train on
sub_features = ['ra_true', 'dec_true']
# sub_features += ['size_true']
# sub_features += ['ellipticity_1_true', 'ellipticity_2_true']
sub_features += ['mag_{:s}_lsst'.format(b) for b in 'ugrizY']
IN_DIR = in_data.__path__[0] # where raw data lies
TRAIN_HP = [10327]
VAL_HP = [10326]
N_TRAIN = [20000]
N_VAL = 1000
BATCH_SIZE = 1000 # min(N_TRAIN//5, 50)
SUB_TARGET = ['final_kappa', ] # 'final_gamma1', 'final_gamma2']
SUB_TARGET_LOCAL = ['stellar_mass', 'redshift']
norm_obj = scipy.stats.norm(loc=0.01, scale=0.03)
# Training
self.infer_obj.load_dataset(
dict(features=features,
raytracing_out_dirs=[os.path.join(IN_DIR, f'cosmodc2_{hp}/Y_{hp}') for hp in TRAIN_HP],
healpixes=TRAIN_HP,
n_data=N_TRAIN,
aperture_size=1.0,
subsample_pdf_func=norm_obj.pdf,
stop_mean_std_early=False,
in_dir=IN_DIR),
sub_features=sub_features,
sub_target=SUB_TARGET,
sub_target_local=SUB_TARGET_LOCAL,
is_train=True,
batch_size=BATCH_SIZE,
rebin=False,
)
# Test
self.infer_obj.load_dataset(
dict(features=features,
raytracing_out_dirs=[os.path.join(IN_DIR, f'cosmodc2_{hp}/Y_{hp}') for hp in VAL_HP],
healpixes=VAL_HP,
n_data=[N_VAL]*len(VAL_HP),
aperture_size=1.0,
in_dir=IN_DIR),
sub_features=sub_features,
sub_target=SUB_TARGET,
sub_target_local=SUB_TARGET_LOCAL,
is_train=False,
batch_size=N_VAL, # FIXME: must be same as train
)
def test_configure_model(self):
pass
def test_load_checkpoint(self):
pass
def test_get_bnn_kappa(self):
pass
@classmethod
def tearDownClass(cls):
pass
if __name__ == '__main__':
unittest.main()
```
#### File: test_trainval_data/test_utils/test_coord_utils.py
```python
import unittest
import numpy as np
import healpy as hp
from n2j.trainval_data.utils import coord_utils as cu
from scipy import stats
class TestCoordUtils(unittest.TestCase):
"""A suite of tests verifying the raytracing utility methods
"""
@classmethod
def setUpClass(cls):
"""Set global defaults for tests
"""
pass
def test_get_padded_nside(self):
"""Test get_padded_nside for correctness
"""
actual = cu.get_padded_nside(padding=15.0, nside_in=32)
expected = 128
np.testing.assert_equal(actual, expected)
actual = cu.get_padded_nside(padding=30.0, nside_in=32)
expected = 64
np.testing.assert_equal(actual, expected)
def test_get_corners(self):
"""Test get_corners for correctness
"""
# Upgrade by 1 order so there are 4 divisions, the centers
# of which make up the corners
# upgraded_ids = cu.upgrade_healpix(0, False, 2, 4) # nested
# ra_corners, dec_corners = cu.get_healpix_centers(upgraded_ids,
# 4, True)
# https://healpix.jpl.nasa.gov/html/intronode4.htm
actual = cu.get_corners(4, counterclockwise=False)
np.testing.assert_equal(actual, [0, 1, 2, 3])
actual = cu.get_corners(4, counterclockwise=True)
np.testing.assert_equal(actual, [0, 1, 3, 2])
def test_is_inside(self):
"""Test is_inside for correctness
"""
target_nside = cu.get_target_nside(100,
nside_in=32)
sightline_ids = cu.upgrade_healpix(10450, False,
32, target_nside)
ra_grid, dec_grid = cu.get_healpix_centers(sightline_ids,
target_nside,
nest=True)
coords = hp.boundaries(nside=32, pix=10450, step=1)
ra_corners, dec_corners = hp.vec2ang(np.transpose(coords),
lonlat=True)
actual = cu.is_inside(ra_grid, dec_grid, ra_corners, dec_corners)
np.testing.assert_equal(actual,
np.ones_like(actual).astype(bool))
actual = cu.is_inside([66, 67.5, 68.5], [-44, -45, -47],
ra_corners, dec_corners)
np.testing.assert_equal(actual,
[False, True, False])
def test_get_target_nside(self):
"""Test if the correct target NSIDE is returned
"""
# Say we want 17 subsamples of a healpix, close to 2 order diff (16)
# Then we need to choose 3 order diff to sample more than 17
order_diff = 2
n_samples = int(4**order_diff + 1)
order_in = 5
nside_desired = int(2**(order_in + order_diff + 1))
nside_actual = cu.get_target_nside(n_samples, nside_in=2**order_in)
np.testing.assert_equal(nside_actual, nside_desired)
def test_get_skycoord(self):
"""Test if a SkyCoord instance is returned
"""
from astropy.coordinates import SkyCoord
skycoord_actual = cu.get_skycoord(ra=np.array([0, 1, 2]),
dec=np.array([0, 1, 2]))
assert isinstance(skycoord_actual, SkyCoord)
assert skycoord_actual.shape[0] == 3
def test_sample_in_aperture(self):
"""Test uniform distribution of samples
"""
radius = 3.0/60.0 # deg
x, y = cu.sample_in_aperture(10000, radius=radius)
r2 = x**2 + y**2
ang = np.arctan2(y, x)
uniform_rv_r2 = stats.uniform(loc=0, scale=radius**2.0)
D, p = stats.kstest(r2, uniform_rv_r2.cdf)
np.testing.assert_array_less(0.01, p, err_msg='R2 fails KS test')
uniform_rv_ang = stats.uniform(loc=-np.pi, scale=2*np.pi)
D, p = stats.kstest(ang, uniform_rv_ang.cdf)
np.testing.assert_array_less(0.01, p, err_msg='angle fails KS test')
def test_get_healpix_centers(self):
"""Test if correct sky locations are returned in the cosmoDC2 convention
"""
# Correct answers hardcoded with known cosmoDC2 catalog values
# Input i_pix is in nested scheme
ra, dec = cu.get_healpix_centers(hp.ring2nest(32, 10450), 32, nest=True)
np.testing.assert_array_almost_equal(ra, [67.5], decimal=1)
np.testing.assert_array_almost_equal(dec, [-45.0], decimal=1)
# Input i_pix is in ring scheme
ra, dec = cu.get_healpix_centers(10450, 32, nest=False)
np.testing.assert_array_almost_equal(ra, [67.5], decimal=1)
np.testing.assert_array_almost_equal(dec, [-45.0], decimal=1)
def test_upgrade_healpix(self):
"""Test correctness of healpix upgrading
"""
nside_in = 2
nside_out = nside_in*2 # must differ by 1 order for this test
npix_in = hp.nside2npix(nside_in)
npix_out = hp.nside2npix(nside_out)
pix_i = 5
# Upgrade pix_i in NSIDE=1 using cu
# Downgrade all pixels in NSIDE=2 to NSIDE=1
# Check if mappings from NSIDE=1 to NSIDE=2 match
# Output is always NESTED
# Test 1: Input pix_i is in NESTED
# "visual" checks with https://healpix.jpl.nasa.gov/html/intronode4.htm
actual = cu.upgrade_healpix(pix_i, True, nside_in, nside_out)
desired_all = np.arange(npix_out).reshape((npix_in, 4))
desired = np.sort(desired_all[pix_i, :]) # NESTED
np.testing.assert_array_equal(desired, [20, 21, 22, 23], "visual")
np.testing.assert_array_equal(actual, desired, "input in NESTED")
# Test 2: Input pix_i is in RING
actual = cu.upgrade_healpix(pix_i, False, nside_in, nside_out)
# See https://stackoverflow.com/a/56675901
# `reorder` reorders RING IDs in NESTED order
# `reshape` is possible because the ordering is NESTED
# indexing should be done with a NESTED ID because ordering is NESTED
# but the output is in RING ID, which was reordered in the first place
desired_all = hp.reorder(np.arange(npix_out), r2n=True).reshape((npix_in, 4))
desired_ring = desired_all[hp.ring2nest(nside_in, pix_i), :]
np.testing.assert_array_equal(np.sort(desired_ring),
[14, 26, 27, 43],
"visual")
desired_nest = hp.ring2nest(nside_out, desired_ring)
np.testing.assert_array_equal(np.sort(actual),
np.sort(desired_nest),
"input in RING")
def test_match(self):
"""Test correctness of matching
"""
ra_grid = np.array([1, 2, 3])
dec_grid = np.array([1, 2, 3])
ra_cat = np.array([1.1, 10, 20, 1.9, 30])
dec_cat = np.array([1.1, 20, 10, 1.9, 30])
fake_dist = np.sqrt(2*0.1**2.0)
constraint, i_cat, dist = cu.match(ra_grid, dec_grid,
ra_cat, dec_cat, 0.5)
np.testing.assert_array_equal(constraint, [True, True, False])
np.testing.assert_array_equal(i_cat, [0, 3])
np.testing.assert_array_almost_equal(dist,
[fake_dist, fake_dist],
decimal=4)
@classmethod
def tearDownClass(cls):
pass
if __name__ == '__main__':
unittest.main()
```
#### File: test_trainval_data/test_utils/test_halo_utils.py
```python
import os
import unittest
import shutil
import numpy as np
from n2j.trainval_data.utils import halo_utils as hu
class TestHaloUtils(unittest.TestCase):
"""A suite of tests verifying the raytracing utility methods
"""
@classmethod
def setUpClass(cls):
"""Set global defaults for tests
"""
cls.healpix = 10450
cls.out_dir = 'test_out_dir'
os.makedirs(cls.out_dir, exist_ok=True)
cls.halo_mass = np.array([1e12, 5e12, 1e13])
cls.stellar_mass = np.array([1e12, 5e12, 1e13])
cls.halo_z = np.array([1.5, 1.0, 1.2])
cls.z_src = 2.0
cls.halo_ra = np.array([1.0, 2.0, 0.5])/60.0 # deg
cls.halo_dec = np.array([1.0, 0.5, 2.0])/60.0 # deg
def test_get_concentration(self):
"""Test mass-concentration relation at extreme values
"""
c_0 = 3.19
c200_at_stellar_mass = hu.get_concentration(1.0, 1.0, m=-0.10,
A=3.44,
trans_M_ratio=430.49,
c_0=c_0,
add_noise=False)
c200_at_high_halo_mass = hu.get_concentration(10.0**5, 1.0, m=-0.10,
A=3.44,
trans_M_ratio=430.49,
c_0=c_0,
add_noise=False)
np.testing.assert_almost_equal(c200_at_stellar_mass, 6.060380052400085,
err_msg='halo mass at stellar mass')
np.testing.assert_almost_equal(c200_at_high_halo_mass, c_0, decimal=2,
err_msg='at high halo mass')
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.out_dir)
if __name__ == '__main__':
unittest.main()
```
#### File: trainval_data/raytracers/cosmodc2_raytracer.py
```python
import os
import time
import multiprocessing
import numpy as np
import pandas as pd
from tqdm import tqdm
from astropy.cosmology import WMAP7 # WMAP 7-year cosmology
from lenstronomy.LensModel.lens_model import LensModel
from n2j.trainval_data.raytracers.base_raytracer import BaseRaytracer
import n2j.trainval_data.utils.coord_utils as cu
import n2j.trainval_data.utils.halo_utils as hu
__all__ = ['CosmoDC2Raytracer']
column_naming = {'ra_true': 'ra', 'dec_true': 'dec', 'redshift_true': 'z',
'convergence': 'kappa',
'shear1': 'gamma1', 'shear2': 'gamma2'}
class CosmoDC2Raytracer(BaseRaytracer):
"""Raytracing tool for postprocessing the lensing distortion in CosmoDC2
"""
NSIDE = 32
LENSING_NSIDE = 4096
KAPPA_DIFF = 1.0 # arcsec
COLUMN_NAMING = column_naming
TO_200C = 0.85 # multiply FOF (b=0.168) masses by this to get 200c masses
def __init__(self, in_dir, out_dir, fov, n_kappa_samples, healpix, seed,
approx_z_src=2.0, mass_cut=11, n_sightlines=1000,
kappa_sampling_dir=None, debug=False):
"""
Parameters
----------
in_dir : str or os.path
where input data are stored, i.e. the parent folder of `raw`
out_dir : str or os.path
where Y labels will be stored
fov : float
diameter of field of view in arcmin
healpix : int
healpix ID that will be supersampled
approx_z_src : float
approximate redshift of all sources, aka sightline galaxies
(Default: 2.0)
mass_cut : float
log10(minimum halo mass) (Default: 11.0)
n_sightlines : int
number of sightlines to raytrace through (Default: 1000)
"""
self.seed = seed
self.rng = np.random.default_rng(seed=self.seed)
BaseRaytracer.__init__(self, in_dir, out_dir, debug)
self.fov = fov
self.mass_cut = mass_cut
self.approx_z_src = approx_z_src
self.n_sightlines = n_sightlines
self.n_kappa_samples = n_kappa_samples
self.healpix = healpix
if self.n_kappa_samples: # kappa explicitly sampled
self.kappa_sampling_dir = None
else: # kappa interpolated
if os.path.exists(kappa_sampling_dir):
self.kappa_sampling_dir = kappa_sampling_dir
else:
raise OSError("If kappas were not sampled for each sightline,"
" you must generate some pairs of weighted sum of"
" masses and mean of kappas and provide the"
" out_dir of that run.")
self.debug = debug
self._set_column_names()
self._get_pointings()
uncalib_df = pd.DataFrame(columns=self.uncalib_cols)
uncalib_df.to_csv(self.uncalib_path, index=None)
def _set_column_names(self):
"""Set column names to be stored
"""
pointings_cols = ['kappa', 'gamma1', 'gamma2']
pointings_cols += ['galaxy_id', 'ra', 'dec', 'z', 'eps']
pointings_cols.sort()
self.pointings_cols = pointings_cols
halos_cols = ['ra', 'ra_diff', 'dec', 'dec_diff', 'z', 'dist']
halos_cols += ['eff', 'halo_mass', 'stellar_mass', 'Rs', 'alpha_Rs']
halos_cols += ['galaxy_id']
halos_cols.sort()
self.halos_cols = halos_cols
uncalib_cols = ['idx', 'kappa', 'gamma1', 'gamma2']
uncalib_cols += ['weighted_mass_sum']
self.uncalib_cols = uncalib_cols
Y_cols = ['final_kappa', 'final_gamma1', 'final_gamma2', 'mean_kappa']
Y_cols += ['galaxy_id', 'ra', 'dec', 'z']
self.Y_cols = Y_cols
def get_pointings_iterator(self, columns=None, chunksize=100000):
"""Get an iterator over the galaxy catalog defining the pointings
"""
if columns is None:
columns = ['ra_true', 'dec_true', 'redshift_true']
columns += ['convergence', 'shear1', 'shear2', 'galaxy_id']
cat_path = os.path.join(self.in_dir,
'cosmodc2_{:d}'.format(self.healpix), 'raw',
'pointings_{:d}.csv'.format(self.healpix))
cat = pd.read_csv(cat_path, chunksize=chunksize, nrows=None,
usecols=columns)
return cat
def get_halos_iterator(self, columns=None, chunksize=100000):
"""Get an iterator over the halo catalog defining our line-of-sight
halos
"""
if columns is None:
halos_cols = ['halo_mass', 'stellar_mass']
halos_cols += ['ra_true', 'dec_true', 'redshift_true', 'galaxy_id']
cat_path = os.path.join(self.in_dir,
'cosmodc2_{:d}'.format(self.healpix), 'raw',
'halos_{:d}.csv'.format(self.healpix))
cat = pd.read_csv(cat_path, chunksize=chunksize, nrows=None,
usecols=halos_cols)
return cat
def _get_pointings(self):
"""Gather pointings defining our sightlines
"""
if os.path.exists(self.sightlines_path):
pointings_arr = np.load(self.sightlines_path)
pointings = pd.DataFrame(pointings_arr, columns=self.pointings_cols)
self.pointings = pointings
else:
start = time.time()
self.pointings = self._get_pointings_on_grid(self.fov*0.5/60.0)
end = time.time()
print("Generated {:d} sightline(s) in"
" {:.2f} min.".format(self.n_sightlines, (end-start)/60.0))
def _get_pointings_on_grid(self, dist_thres):
"""Get the pointings on a grid of healpixes
Parameters
----------
dist_thres : float
matching threshold between gridpoints and halo positions, in deg
Notes
-----
Currently takes 1.2 min for 1000 sightlines.
Doesn't have to be so rigorous about finding sightlines closest to grid.
Two requirements are that sightlines need to be dominated by cosmic
variance (span a few degrees) and that each sightline has a galaxy.
"""
# Get centroids of D partitions by gridding the sky area and querying a
# galaxy closest to each grid center at redshift z > self.approx_z_src
# Each partition, centered at that galaxy, corresponds to an LOS
if False:
target_nside = cu.get_target_nside(self.n_sightlines,
nside_in=self.NSIDE)
sightline_ids = cu.upgrade_healpix(self.healpix, False,
self.NSIDE, target_nside)
ra_grid, dec_grid = cu.get_healpix_centers(sightline_ids, target_nside,
nest=True)
need_more = True
padded_nside = cu.get_padded_nside(self.fov*0.5, self.NSIDE)
while need_more:
sightline_ids = cu.upgrade_healpix(self.healpix, False,
self.NSIDE, padded_nside)
ra_pre, dec_pre = cu.get_healpix_centers(sightline_ids,
padded_nside,
nest=True)
corners_i = np.array(cu.get_corners(len(ra_pre),
counterclockwise=True))
ra_corners, dec_corners = ra_pre[corners_i], dec_pre[corners_i]
inside_mask = cu.is_inside(ra_pre, dec_pre,
ra_corners, dec_corners)
need_more = sum(inside_mask) < self.n_sightlines
if need_more:
padded_nside *= 2 # Increase resolution of target NSIDE
# Slice pointings within padded bounds
ra_grid = ra_pre[inside_mask]
dec_grid = dec_pre[inside_mask]
# Randomly choose number of sightlines requested
rand_i = self.rng.choice(np.arange(len(ra_grid)),
size=self.n_sightlines,
replace=False)
ra_grid, dec_grid = ra_grid[rand_i], dec_grid[rand_i]
close_enough = np.zeros_like(ra_grid).astype(bool) # all gridpoints False
iterator = self.get_pointings_iterator()
sightlines = pd.DataFrame()
for df in iterator:
high_z = df[(df['redshift_true'] > self.approx_z_src)].reset_index(drop=True)
if len(high_z) > 0:
remaining = ~close_enough
passing, i_cat, dist = cu.match(ra_grid[remaining],
dec_grid[remaining],
high_z['ra_true'].values,
high_z['dec_true'].values,
dist_thres
)
more_sightlines = high_z.iloc[i_cat].copy()
more_sightlines['eps'] = dist
sightlines = sightlines.append(more_sightlines,
ignore_index=True)
close_enough[remaining] = passing
if np.all(close_enough):
break
sightlines.reset_index(drop=True, inplace=True)
sightlines.rename(columns=self.COLUMN_NAMING, inplace=True)
sightlines.sort_index(axis=1, inplace=True)
np.save(self.sightlines_path, sightlines.values)
return sightlines
def get_los_halos(self, i, ra_los, dec_los, z_src, galaxy_id_los):
"""Compile halos in the line of sight of a given galaxy
"""
iterator = self.get_halos_iterator()
# Sorted list of stored halo properties
if os.path.exists(self.halo_path_fmt.format(i, galaxy_id_los)):
halos_arr = np.load(self.halo_path_fmt.format(i, galaxy_id_los))
halos = pd.DataFrame(halos_arr, columns=self.halos_cols)
return halos
halos = pd.DataFrame() # neighboring galaxies in LOS
# Iterate through chunks to bin galaxies into the partitions
for df in iterator:
# Get galaxies in the aperture and in foreground of source
# Discard smaller masses, since they won't have a big impact anyway
lower_z = df['redshift_true'].values + 1.e-7 < z_src
if lower_z.any(): # there are still some lower-z halos
pass
else: # z started getting too high, no need to continue
continue
high_mass = df['halo_mass'].values*self.TO_200C > 10.0**self.mass_cut
cut = np.logical_and(high_mass, lower_z)
df = df[cut].reset_index(drop=True)
if len(df) > 0:
d, ra_diff, dec_diff = cu.get_distance(ra_f=df['ra_true'].values,
dec_f=df['dec_true'].values,
ra_i=ra_los,
dec_i=dec_los
)
df['dist'] = d*60.0 # deg to arcmin
df['ra_diff'] = ra_diff # deg
df['dec_diff'] = dec_diff # deg
halos = halos.append(df[df['dist'].values < self.fov*0.5],
ignore_index=True)
else:
continue
#####################
# Define NFW kwargs #
#####################
halos['halo_mass'] *= self.TO_200C
Rs, alpha_Rs, eff = hu.get_nfw_kwargs(halos['halo_mass'].values,
halos['stellar_mass'].values,
halos['redshift_true'].values,
z_src,
seed=i)
halos['Rs'] = Rs
halos['alpha_Rs'] = alpha_Rs
halos['eff'] = eff
halos.reset_index(drop=True, inplace=True)
halos.rename(columns=self.COLUMN_NAMING, inplace=True)
halos.sort_index(axis=1, inplace=True)
np.save(self.halo_path_fmt.format(i, galaxy_id_los), halos.values)
return halos
def single_raytrace(self, i):
"""Raytrace through a single sightline
"""
sightline = self.pointings.iloc[i]
halos = self.get_los_halos(i,
sightline['ra'], sightline['dec'],
sightline['z'], int(sightline['galaxy_id']))
n_halos = halos.shape[0]
# Instantiate multi-plane lens model
lens_model = LensModel(lens_model_list=['NFW']*n_halos,
z_source=sightline['z'],
lens_redshift_list=halos['z'].values,
multi_plane=True,
cosmo=WMAP7,
observed_convention_index=[])
halos['center_x'] = halos['ra_diff']*3600.0 # deg to arcsec
halos['center_y'] = halos['dec_diff']*3600.0
nfw_kwargs = halos[['Rs', 'alpha_Rs', 'center_x', 'center_y']].to_dict('records')
uncalib_kappa = lens_model.kappa(0.0, 0.0, nfw_kwargs,
diff=self.KAPPA_DIFF,
diff_method='square')
uncalib_gamma1, uncalib_gamma2 = lens_model.gamma(0.0, 0.0, nfw_kwargs,
diff=self.KAPPA_DIFF,
diff_method='square')
# Log the uncalibrated shear/convergence and the weighted sum of halo masses
w_mass_sum = np.log10(np.sum(halos['eff'].values*halos['halo_mass'].values))
new_row_data = dict(idx=[i],
kappa=[uncalib_kappa],
gamma1=[uncalib_gamma1],
gamma2=[uncalib_gamma2],
weighted_mass_sum=[w_mass_sum],
)
new_row = pd.DataFrame(new_row_data)
new_row.to_csv(self.uncalib_path, index=None, mode='a', header=None)
# Optionally map the uncalibrated shear and convergence on a grid
if self.debug:
hu.get_kappa_map(lens_model, nfw_kwargs, self.fov,
self.k_map_fmt.format(i),
self.KAPPA_DIFF)
hu.get_gamma_maps(lens_model, nfw_kwargs, self.fov,
(self.g1_map_fmt.format(i),
self.g2_map_fmt.format(i)),
self.KAPPA_DIFF)
if self.n_kappa_samples > 0:
self.sample_kappas(i, lens_model, halos)
def sample_kappas(self, i, lens_model, halos):
"""Render the halos in uniformly random locations within the aperture to
sample the kappas. The mean of sampled kappas will be used as an estimate of
the additional average kappa contribution of our halos
"""
n_halos = halos.shape[0]
# gamma1, gamma2 are not resampled due to symmetry around 0
if os.path.exists(self.k_samples_fmt.format(i)):
return None
kappa_samples = np.empty(self.n_kappa_samples)
S = 0
while S < self.n_kappa_samples:
new_ra, new_dec = cu.sample_in_aperture(n_halos, self.fov*0.5/60.0)
halos['center_x'] = new_ra*3600.0
halos['center_y'] = new_dec*3600.0
nfw_kwargs = halos[['Rs', 'alpha_Rs', 'center_x', 'center_y']].to_dict('records')
resampled_kappa = lens_model.kappa(0.0, 0.0, nfw_kwargs,
diff=self.KAPPA_DIFF,
diff_method='square')
if resampled_kappa < 1.0:
kappa_samples[S] = resampled_kappa
if self.debug:
hu.get_kappa_map(lens_model, nfw_kwargs, self.fov,
self.k_samples_map_fmt.format(i, S))
S += 1
else: # halo fell on top of zeropoint!
continue
np.save(self.k_samples_fmt.format(i), kappa_samples)
def parallel_raytrace(self, n_cores):
"""Raytrace through multiple sightlines in parallel
"""
with multiprocessing.Pool(n_cores) as pool:
return list(tqdm(pool.imap(self.single_raytrace,
range(self.n_sightlines)),
total=self.n_sightlines))
``` |
{
"source": "jiwoncpark/fast-forward",
"score": 2
} |
#### File: jiwoncpark/fast-forward/derp_data.py
```python
import numpy as np
import os
import pandas as pd
import torch
from torch.utils.data import Dataset
from sklearn.preprocessing import StandardScaler
import units_utils as units # FIXME: may not be used
import astropy.units as u
from collections import OrderedDict
import json
class DerpData(Dataset):
"""Preprocessed and unnormalized Derp dataset."""
def __init__(self, data_path, data_path2, X_base_cols, Y_base_cols, args, ignore_null_rows=True, save_to_disk=False):
"""
Parameters
----------
truth_path : string
Path to csv file containing the input X
drp_path : string
Path to csv file containing the label Y
ignore_null_rows : Bool
Whether rows with null values will be ignored
"""
if args['data_already_processed']:
self.read_XY_from_disk()
else:
self.verbose = args['verbose']
self.save_to_disk = save_to_disk
#self.mask_val = -1
self.ignore_null_rows = ignore_null_rows
self.X_base_cols = X_base_cols
self.Y_base_cols = Y_base_cols
self.star_cols = list('ugrizy') + ['ra_truth', 'dec_truth', 'redshift', 'star', 'agn']
self.pixel_scale = 0.2 # arcsec/pix
self.train_frac = args['train_frac']
self.scale_flux = 1.0
#if 'truth_id' not in self.X_base_cols + self.Y_base_cols:
# self.Y_base_cols.append('truth_id')
# Initialize new column mapping and names
self.X_col_map = OrderedDict(zip(self.X_base_cols, self.X_base_cols)) # same by default
self.Y_col_map = OrderedDict(zip(self.Y_base_cols, self.Y_base_cols))
XY1 = pd.read_csv(data_path, index_col=None)
cols = XY1.columns.values
XY2 = pd.read_csv(data_path2, index_col=None)[cols]
XY = pd.concat([XY1, XY2], axis=0).reset_index(drop=True)
XY = XY.sample(frac=1, random_state=123).reset_index(drop=True)
assert XY.shape[0] == XY1.shape[0] + XY2.shape[0]
assert XY.shape[1] == XY1.shape[1]
self.X = XY[self.X_base_cols]
self.Y = XY[self.Y_base_cols]
if self.ignore_null_rows:
if 'star' in self.X_base_cols:
self.zero_extragal_cols_for_stars()
self.zero_nan_for_galaxies()
self.delete_null_rows()
#self.delete_negative_fluxes()
else:
raise NotImplementedError
#self.mask_null_values()
# Engineer features
self.engineer_XY()
# Slice features
self.X_cols = list(self.X_col_map.values())
self.Y_cols = list(self.Y_col_map.values())
self.X = self.X[self.X_cols]
self.Y = self.Y[self.Y_cols]
# Save metadata: number of examples, input dim, output dim
self.n_trainval, self.X_dim = self.X.shape
_, self.Y_dim = self.Y.shape
# Categorical data
if 'star' in self.X_base_cols:
self.X_cat_cols = [self.X_col_map['star'],]
else:
self.X_cat_cols = []
# Split train vs. val
#self.val_indices = np.load('val_indices.npy')
self.val_indices = np.arange(int((1.0 - self.train_frac)*self.n_trainval))
self.train_indices = np.array(list(set(range(self.n_trainval)) - set(self.val_indices)))
self.n_val = len(self.val_indices)
self.n_train = len(self.train_indices)
# Normalize features
exclude_from_norm = self.X_cat_cols +\
['mag_true_%s_lsst' %bp for bp in 'ugrizy'] +\
['%s_flux' %bp for bp in 'ugrizy']
self.normalize_XY(exclude_X_cols=exclude_from_norm, normalize_Y=False)
# Some QA
self.abort_if_null()
self.report_star_fraction()
# Convert into numpy array
self.X = self.X.values.astype(np.float32)
self.Y = self.Y.values.astype(np.float32)
# Save processed data to disk
if self.save_to_disk:
if self.verbose:
print("Saving processed data to disk...")
np.save('data/X', self.X)
np.save('data/Y', self.Y)
def read_XY_from_disk(self):
self.X = np.load('data/X.npy')
self.Y = np.load('data/Y.npy')
data_meta = json.load(open("data_meta.txt"))
for key, value in data_meta.items():
setattr(self, key, value)
assert self.X.shape[0] == self.n_train + self.n_val
assert self.X.shape[1] == self.X_dim
assert self.Y.shape[1] == self.Y_dim
def __len__(self):
return self.X.shape[0]
def __getitem__(self, idx):
sample_X = self.X[idx, :]
sample_Y = self.Y[idx, :]
return sample_X, sample_Y
def report_star_fraction(self):
star_colname = self.X_col_map['star']
X_train = self.X.iloc[self.train_indices, :]
X_val = self.X.iloc[self.val_indices, :]
overall_star_frac = self.X[star_colname].sum()/self.n_trainval
train_star_frac = X_train[star_colname].sum()/self.n_train
val_star_frac = X_val[star_colname].sum()/self.n_val
print("Overall star frac: %.2f" %overall_star_frac)
print("Training star frac: %.2f" %train_star_frac)
print("Validation star frac: %.2f" %val_star_frac)
def abort_if_null(self):
X_null_cols = self.X.columns[self.X.isna().any()].tolist()
Y_null_cols = self.Y.columns[self.Y.isna().any()].tolist()
print("X has null columns: ", X_null_cols)
print("Y has null columns: ", Y_null_cols)
if len(X_null_cols) + len(Y_null_cols) > 0:
raiseValueError("Null values in data. Aborting...")
def export_metadata_for_eval(self, device_type):
import json
data_meta = {
'device_type': device_type,
'scale_flux': self.scale_flux,
'ref_centroid': self.ref_centroid, # arcsec
'X_dim': self.X_dim,
'Y_dim': self.Y_dim,
'n_val': self.n_val,
'n_train': self.n_train,
'X_cat_cols': self.X_cat_cols,
'X_cols': self.X_cols,
'Y_cols': self.Y_cols,
'train_indices': self.train_indices.tolist(),
'val_indices': self.val_indices.tolist(),
'X_mean': self.X_mean.tolist(),
'X_std': self.X_std.tolist(),
}
with open('data_meta.txt', 'w') as fp:
json.dump(data_meta, fp)
def engineer_XY(self):
"""Engineer features in input X and label Y
Note
----
Does not include normalization
"""
self.scale_flux = 1.e5
self.ref_centroid = 350000.0 # arcsec
if 'extendedness' in self.Y_base_cols:
self.Y.loc[:, 'extendedness'] = 1.0 - self.Y['extendedness'].values
# Turn total mag into flux
for mag_name in 'ugrizy':
mag = self.X[mag_name].values
flux = (mag * u.ABmag).to_value(u.Jy)*self.scale_flux
flux_name = mag_name + '_flux'
self.X[flux_name] = flux
self.X_col_map[mag_name] = flux_name
# Turn galaxy mag into flux
for gal_mag_name in ['mag_true_%s_lsst' %bp for bp in 'ugrizy']:
gal_mag = self.X.loc[self.X['star']==False, gal_mag_name].values
gal_flux = (gal_mag * u.ABmag).to_value(u.Jy)*self.scale_flux
self.X.loc[self.X['star']==False, gal_mag_name] = gal_flux
# Calculate positional offset
if 'ra_obs' in self.Y_base_cols:
assert 'ra_truth' in self.X_base_cols
self.Y['ra_offset'] = (self.Y['ra_obs'] - self.X['ra_truth'])*3600.0*1000.0 # mas
self.Y_col_map['ra_obs'] = 'ra_offset'
if 'dec_obs' in self.Y_base_cols:
assert 'dec_truth' in self.X_base_cols
self.Y['dec_offset'] = (self.Y['dec_obs'] - self.X['dec_truth'])*3600.0*1000.0 # mas
self.Y_col_map['dec_obs'] = 'dec_offset'
# Square root the second moments
if 'Ixx' in self.Y_base_cols:
self.Y.loc[:, 'Ixx'] = np.sqrt(self.Y['Ixx']) # as
self.Y.loc[:, 'IxxPSF'] = np.sqrt(self.Y['IxxPSF'])
if 'Iyy' in self.Y_base_cols:
self.Y.loc[:, 'Iyy'] = np.sqrt(self.Y['Iyy']) # as
self.Y.loc[:, 'IyyPSF'] = np.sqrt(self.Y['IyyPSF'])
# Get first moments in asec
#if 'Ix' in self.Y_base_cols:
# self.Y.loc[:, 'Ix'] = (self.Y['Ix']/self.pixel_scale - self.ref_centroid)/3600.0/1000.0 # asec --> deg --> 1000 deg
# self.Y.loc[:, 'Iy'] = (self.Y['Iy']/self.pixel_scale - self.ref_centroid)/3600.0/1000.0 # asec --> deg --> 1000 deg
for col in self.Y_base_cols:
if 'Flux' in col:
self.Y.loc[:, col] = self.Y.loc[:, col]*1.e-9*self.scale_flux # 1.e-5 of Jy
# Define as offset from truth
for bp in 'ugrizy':
self.Y.loc[:, 'psFlux_%s' %bp] = self.Y.loc[:, 'psFlux_%s' %bp] - self.X.loc[:, '%s_flux' %bp]
self.Y.loc[:, 'cModelFlux_%s' %bp] = self.Y.loc[:, 'cModelFlux_%s' %bp] - self.X.loc[:, '%s_flux' %bp]
def zero_extragal_cols_for_stars(self):
"""Zeroes out the extragal columns and
replaces galaxy magnitudes with the duplicate star magnitudes for stars
"""
extragal_mag_cols = ['mag_true_%s_lsst' %bp for bp in 'ugrizy']
star_mag_cols = list('ugrizy')
#self.X.loc[self.X['star']==True, extragal_mag_cols] = self.X.loc[self.X['star']==True, star_mag_cols].values
self.X.loc[self.X['star']==True, extragal_mag_cols] = -1.0
other_extragal_cols = list(set(self.X_base_cols) - set(self.star_cols) - set(extragal_mag_cols))
self.X.loc[self.X['star']==True, other_extragal_cols] = 0.0
def zero_nan_for_galaxies(self):
"""Zeroes out some extragal columns for galaxies
Note
----
Galaxies with bulge ratio = 0.0 have NaNs as ellipticities 1 and 2
"""
self.X.loc[(self.X['bulge_to_total_ratio_i']==0.0), ['ellipticity_1_bulge_true', 'ellipticity_2_bulge_true']] = 0.0
def delete_null_rows(self):
"""Deletes rows with any null value
Note
----
This method assumes self.X has no null value.
"""
n_rows_before = len(self.X)
y_notnull_rows = np.logical_not(self.Y.isna().any(1))
self.X = self.X.loc[y_notnull_rows, :].reset_index(drop=True)
self.Y = self.Y.loc[y_notnull_rows, :].reset_index(drop=True)
n_rows_after = len(self.X)
if self.verbose:
print("Deleting null rows: %d --> %d" %(n_rows_before, n_rows_after))
def delete_negative_fluxes(self, flux_prefixes=['psFlux_%s', 'cModelFlux_%s'], flux_suffixes=None):
"""Deletes rows with any negative flux
"""
n_rows_before = len(self.Y)
row_mask = np.ones(n_rows_before).astype(bool) # initialize as deleting no row
for prefix in flux_prefixes:
for bp in 'ugrizy':
flux_colname = prefix %bp
row_mask = np.logical_and(self.Y[flux_colname]>0, row_mask)
self.X = self.X.loc[row_mask, :].reset_index(drop=True)
self.Y = self.Y.loc[row_mask, :].reset_index(drop=True)
n_rows_after = len(self.Y)
if self.verbose:
print("Deleting negative fluxes: %d --> %d" %(n_rows_before, n_rows_after))
def mask_null_values(self):
"""Replaces null values with a token, self.mask_val
"""
self.X.fillna(self.mask_val, inplace=True)
self.y.fillna(self.mask_val, inplace=True)
def normalize_XY(self, exclude_X_cols=[], exclude_Y_cols=[], normalize_Y=True):
"""Standardizes input X and label Y column-wise except exclude_cols
Note
----
The validation set must be standardized using the parameters of the training set.
"""
X_train = self.X.iloc[self.train_indices, :].copy()
X_val = self.X.iloc[self.val_indices, :].copy()
X_mean = X_train.mean()
X_std = X_train.std()
X_mean.loc[exclude_X_cols] = 0.0
X_std.loc[exclude_X_cols] = 1.0
self.X_mean = X_mean
self.X_std = X_std
self.X.iloc[self.train_indices, :] = (X_train - self.X_mean)/self.X_std
self.X.iloc[self.val_indices, :] = (X_val - self.X_mean)/self.X_std
if normalize_Y:
Y_train = self.Y.iloc[self.train_indices, :].copy()
Y_val = self.Y.iloc[self.val_indices, :].copy()
Y_mean = Y_train.mean()
Y_std = Y_train.std()
Y_mean.loc[exclude_Y_cols] = 0.0
Y_std.loc[exclude_Y_cols] = 1.0
self.Y_mean = Y_mean
self.Y_std = Y_std
self.Y.iloc[self.train_indices, :] = (Y_train - self.Y_mean)/self.Y_std
self.Y.iloc[self.val_indices, :] = (Y_val - self.Y_mean)/self.Y_std
else:
self.Y_mean = np.zeros((self.Y_dim,))
self.Y_std = np.ones((self.Y_dim,))
if self.verbose:
print("Standardized X except: ", exclude_X_cols)
print("Standardized Y except: ", exclude_Y_cols)
# print("Normalizing columns in X: ", self.X_cols)
# print("Normalizing columns in Y: ", self.Y_cols)
if __name__ == "__main__":
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
import itertools
# X base columns
truth_cols = list('ugrizy') + ['ra_truth', 'dec_truth', 'redshift', 'star',]
truth_cols += ['size_bulge_true', 'size_minor_bulge_true', 'ellipticity_1_bulge_true', 'ellipticity_2_bulge_true', 'bulge_to_total_ratio_i']
truth_cols += ['size_disk_true', 'size_minor_disk_true', 'ellipticity_1_disk_true', 'ellipticity_2_disk_true',]
opsim_cols = ['m5_flux', 'PSF_sigma2', 'filtSkyBrightness_flux', 'airmass', 'n_obs']
# Y base columns
drp_cols = ['Ix', 'Iy', 'ra_obs', 'dec_obs', 'Ixx', 'Ixy', 'Iyy', 'IxxPSF', 'IxyPSF', 'IyyPSF',] #'extendedness',]
drp_cols_prefix = ['cModelFlux_', 'psFlux_']
drp_cols_suffix = ['_base_CircularApertureFlux_70_0_instFlux','_ext_photometryKron_KronFlux_instFlux',]
drp_cols += [t[0] + t[1] for t in list(itertools.product(drp_cols_prefix, list('ugrizy')))]
drp_cols += [t[1] + t[0] for t in list(itertools.product(drp_cols_suffix, list('ugrizy')))]
# Test constructor
data = DerpData(data_path='raw_data/obj_master.csv', X_base_cols=truth_cols+opsim_cols, Y_base_cols=drp_cols,
ignore_null_rows=True, verbose=True, save_to_disk=False, already_processed=True)
#data.export_metadata_for_eval(device_type='cuda')
# Test __getitem__
X_slice, Y_slice = data[0]
print(X_slice.shape, Y_slice.shape)
# Test loader instantiated with DerpData instance
train_sampler = SubsetRandomSampler(data.train_indices)
train_loader = DataLoader(data, batch_size=7, sampler=train_sampler)
for batch_idx, (X_batch, Y_batch) in enumerate(train_loader):
print(X_batch.shape)
print(Y_batch.shape)
break
``` |
{
"source": "jiwoncpark/h0rton",
"score": 3
} |
#### File: h0rton/h0_inference/gaussian_bnn_posterior_cpu.py
```python
from abc import ABC, abstractmethod
import random
import numpy as np
from scipy.stats import multivariate_normal
__all__ = ['BaseGaussianBNNPosteriorCPU', 'DiagonalGaussianBNNPosteriorCPU', 'LowRankGaussianBNNPosteriorCPU', 'DoubleLowRankGaussianBNNPosteriorCPU', 'FullRankGaussianBNNPosteriorCPU', 'DoubleGaussianBNNPosteriorCPU', 'sigmoid', 'logsigmoid']
def sigmoid(x):
return np.where(x >= 0,
1 / (1 + np.exp(-x)),
np.exp(x) / (1 + np.exp(x)))
def logsigmoid(x):
return np.where(x >= 0,
-np.log1p(np.exp(-x)),
x - np.log1p(np.exp(x)))
class BaseGaussianBNNPosteriorCPU(ABC):
"""Abstract base class to represent the Gaussian BNN posterior
Gaussian posteriors or mixtures thereof with various forms of the covariance matrix inherit from this class.
"""
def __init__(self, Y_dim, Y_mean=None, Y_std=None):
"""
Parameters
----------
pred : torch.Tensor of shape `[1, out_dim]` or `[out_dim,]`
raw network output for the predictions
Y_dim : int
number of parameters to predict
Y_mean : list
mean values for the original values of `whitened_Y_cols`
Y_std : list
std values for the original values of `whitened_Y_cols`
device : torch.device object
"""
self.Y_dim = Y_dim
self.Y_mean = Y_mean.reshape(1, -1)
self.Y_std = Y_std.reshape(1, -1)
self.sigmoid = sigmoid
self.logsigmoid = logsigmoid
def seed_samples(self, sample_seed):
"""Seed the sampling for reproducibility
Parameters
----------
sample_seed : int
"""
np.random.seed(sample_seed)
random.seed(sample_seed)
@abstractmethod
def sample(self, n_samples, sample_seed=None):
"""Sample from the Gaussian posterior. Must be overridden by subclasses.
Parameters
----------
n_samples : int
how many samples to obtain
sample_seed : int
seed for the samples. Default: None
Returns
-------
np.array of shape `[n_samples, self.Y_dim]`
samples
"""
return NotImplemented
@abstractmethod
def get_hpd_interval(self):
"""Get the highest posterior density (HPD) interval
"""
return NotImplemented
def transform_back_mu(self, array):
"""Transform back, i.e. unwhiten, the tensor of central values
Parameters
----------
array : np.array of shape `[batch_size, Y_dim]`
Returns
-------
torch.tensor of shape `[batch_size, Y_dim]`
the original tensor
"""
array = np.expand_dims(array, axis=1)
array = self.unwhiten_back(array)
return array.squeeze()
def unwhiten_back(self, sample):
"""Scale and shift back to the unwhitened state
Parameters
----------
pred : torch.Tensor
network prediction of shape `[batch_size, n_samples, self.Y_dim]`
Returns
-------
torch.Tensor
the unwhitened pred
"""
sample = sample*np.expand_dims(self.Y_std, 1) + np.expand_dims(self.Y_mean, 1)
return sample
def sample_low_rank(self, n_samples, mu, logvar, F):
"""Sample from a single Gaussian posterior with a full but low-rank plus diagonal covariance matrix
Parameters
----------
n_samples : int
how many samples to obtain
mu : torch.Tensor of shape `[self.batch_size, self.Y_dim]`
network prediction of the mu (mean parameter) of the BNN posterior
logvar : torch.Tensor of shape `[self.batch_size, self.Y_dim]`
network prediction of the log of the diagonal elements of the covariance matrix
F : torch.Tensor of shape `[self.batch_size, self.Y_dim, self.rank]`
network prediction of the low rank portion of the covariance matrix
Returns
-------
np.array of shape `[self.batch_size, n_samples, self.Y_dim]`
samples
"""
#F = torch.unsqueeze(F, dim=1).repeat(1, n_samples, 1, 1) # [self.batch_size, n_samples, self.Y_dim, self.rank]
F = np.repeat(F, repeats=n_samples, axis=0) # [self.batch_size*n_samples, self.Y_dim, self.rank]
mu = np.repeat(mu, repeats=n_samples, axis=0) # [self.batch_size*n_samples, self.Y_dim]
logvar = np.repeat(logvar, repeats=n_samples, axis=0) # [self.batch_size*n_samples, self.Y_dim]
eps_low_rank = np.random.randn(self.batch_size*n_samples, self.rank, 1)
eps_diag = np.random.randn(self.batch_size*n_samples, self.Y_dim)
half_var = np.exp(0.5*logvar) # [self.batch_size*n_samples, self.Y_dim]
samples = np.matmul(F, eps_low_rank).squeeze() + mu + half_var*eps_diag # [self.batch_size*n_samples, self.Y_dim]
samples = samples.reshape(self.batch_size, n_samples, self.Y_dim)
#samples = samples.transpose((1, 0, 2)) # [self.batch_size, n_samples, self.Y_dim]
return samples
def sample_full_rank(self, n_samples, mu, tril_elements):
"""Sample from a single Gaussian posterior with a full-rank covariance matrix
Parameters
----------
n_samples : int
how many samples to obtain
mu : torch.Tensor of shape `[self.batch_size, self.Y_dim]`
network prediction of the mu (mean parameter) of the BNN posterior
tril_elements : torch.Tensor of shape `[self.batch_size, tril_len]`
network prediction of lower-triangular matrix in the log-Cholesky decomposition of the precision matrix
Returns
-------
np.array of shape `[self.batch_size, n_samples, self.Y_dim]`
samples
"""
samples = np.zeros([self.batch_size, n_samples, self.Y_dim])
for b in range(self.batch_size):
tril = np.zeros([self.Y_dim, self.Y_dim])
tril[self.tril_idx[0], self.tril_idx[1]] = tril_elements[b, :]
log_diag_tril = np.diagonal(tril, offset=0, axis1=0, axis2=1)
tril[np.eye(self.Y_dim).astype(bool)] = np.exp(log_diag_tril)
prec_mat = np.dot(tril, tril.T) # [Y_dim, Y_dim]
cov_mat = np.linalg.inv(prec_mat)
sample_b = multivariate_normal.rvs(mean=mu[b, :], cov=cov_mat, size=[n_samples,])
samples[b, :, :] = sample_b
samples = self.unwhiten_back(samples)
return samples
class DiagonalGaussianBNNPosteriorCPU(BaseGaussianBNNPosteriorCPU):
"""The negative log likelihood (NLL) for a single Gaussian with diagonal covariance matrix
`BaseGaussianNLL.__init__` docstring for the parameter description.
"""
def __init__(self, Y_dim, Y_mean=None, Y_std=None):
super(DiagonalGaussianBNNPosteriorCPU, self).__init__(Y_dim, Y_mean, Y_std)
self.out_dim = self.Y_dim*2
def set_sliced_pred(self, pred):
d = self.Y_dim # for readability
#pred = pred.cpu().numpy()
self.batch_size = pred.shape[0]
self.mu = pred[:, :d]
self.logvar = pred[:, d:]
self.cov_diag = np.exp(self.logvar)
#F_tran_F = np.matmul(normal.F, np.swapaxes(normal.F, 1, 2))
#cov_mat = np.apply_along_axis(np.diag, -1, np.exp(normal.logvar)) + F_tran_F
#cov_diag = np.exp(normal.logvar) + np.diagonal(F_tran_F, axis1=1, axis2=2)
#assert np.array_equal(cov_mat.shape, [batch_size, self.Y_dim, self.Y_dim])
#assert np.array_equal(cov_diag.shape, [batch_size, self.Y_dim])
#np.apply_along_axis(np.diag, -1, np.exp(logvar)) # for diagonal
def sample(self, n_samples, sample_seed):
"""Sample from a Gaussian posterior with diagonal covariance matrix
Parameters
----------
n_samples : int
how many samples to obtain
sample_seed : int
seed for the samples. Default: None
Returns
-------
np.array of shape `[n_samples, self.Y_dim]`
samples
"""
self.seed_samples(sample_seed)
eps = np.random.randn(self.batch_size, n_samples, self.Y_dim)
samples = eps*np.exp(0.5*np.expand_dims(self.logvar, 1)) + np.expand_dims(self.mu, 1)
samples = self.unwhiten_back(samples)
return samples
def get_hpd_interval(self):
return NotImplementedError
class LowRankGaussianBNNPosteriorCPU(BaseGaussianBNNPosteriorCPU):
"""The negative log likelihood (NLL) for a single Gaussian with diagonal covariance matrix
`BaseGaussianNLL.__init__` docstring for the parameter description.
"""
def __init__(self, Y_dim, Y_mean=None, Y_std=None):
super(LowRankGaussianBNNPosteriorCPU, self).__init__(Y_dim, Y_mean, Y_std)
self.out_dim = self.Y_dim*4
self.rank = 2 # FIXME: hardcoded
def set_sliced_pred(self, pred):
d = self.Y_dim # for readability
#pred = pred.cpu().numpy()
self.batch_size = pred.shape[0]
self.mu = pred[:, :d]
self.logvar = pred[:, d:2*d]
self.F = pred[:, 2*d:].reshape([self.batch_size, self.Y_dim, self.rank])
#F_tran_F = np.matmul(self.F, np.swapaxes(self.F, 1, 2))
#self.cov_diag = np.exp(self.logvar) + np.diagonal(F_tran_F, axis1=1, axis2=2)
def sample(self, n_samples, sample_seed):
self.seed_samples(sample_seed)
return self.sample_low_rank(n_samples, self.mu, self.logvar, self.F)
def get_hpd_interval(self):
return NotImplementedError
class DoubleLowRankGaussianBNNPosteriorCPU(BaseGaussianBNNPosteriorCPU):
"""The negative log likelihood (NLL) for a single Gaussian with diagonal covariance matrix
`BaseGaussianNLL.__init__` docstring for the parameter description.
"""
def __init__(self, Y_dim, Y_mean=None, Y_std=None):
super(DoubleLowRankGaussianBNNPosteriorCPU, self).__init__(Y_dim, Y_mean, Y_std)
self.out_dim = self.Y_dim*8 + 1
self.rank = 2 # FIXME: hardcoded
def set_sliced_pred(self, pred):
d = self.Y_dim # for readability
#pred = pred.cpu().numpy()
self.w2 = 0.5*self.sigmoid(pred[:, -1].reshape(-1, 1))
self.batch_size = pred.shape[0]
self.mu = pred[:, :d]
self.logvar = pred[:, d:2*d]
self.F = pred[:, 2*d:4*d].reshape([self.batch_size, self.Y_dim, self.rank])
#F_tran_F = np.matmul(self.F, np.swapaxes(self.F, 1, 2))
#self.cov_diag = np.exp(self.logvar) + np.diagonal(F_tran_F, axis1=1, axis2=2)
self.mu2 = pred[:, 4*d:5*d]
self.logvar2 = pred[:, 5*d:6*d]
self.F2 = pred[:, 6*d:8*d].reshape([self.batch_size, self.Y_dim, self.rank])
#F_tran_F2 = np.matmul(self.F2, np.swapaxes(self.F2, 1, 2))
#self.cov_diag2 = np.exp(self.logvar2) + np.diagonal(F_tran_F2, axis1=1, axis2=2)
def sample(self, n_samples, sample_seed):
"""Sample from a mixture of two Gaussians, each with a full but constrained as low-rank plus diagonal covariance
Parameters
----------
n_samples : int
how many samples to obtain
sample_seed : int
seed for the samples. Default: None
Returns
-------
np.array of shape `[self.batch_size, n_samples, self.Y_dim]`
samples
"""
self.seed_samples(sample_seed)
samples = np.zeros([self.batch_size, n_samples, self.Y_dim])
# Determine first vs. second Gaussian
unif2 = np.random.rand(self.batch_size, n_samples)
second_gaussian = (self.w2 > unif2)
# Sample from second Gaussian
samples2 = self.sample_low_rank(n_samples, self.mu2, self.logvar2, self.F2)
samples[second_gaussian, :] = samples2[second_gaussian, :]
# Sample from first Gaussian
samples1 = self.sample_low_rank(n_samples, self.mu, self.logvar, self.F)
samples[~second_gaussian, :] = samples1[~second_gaussian, :]
return samples
def get_hpd_interval(self):
return NotImplementedError
class FullRankGaussianBNNPosteriorCPU(BaseGaussianBNNPosteriorCPU):
"""The negative log likelihood (NLL) for a single Gaussian with diagonal covariance matrix
`BaseGaussianNLL.__init__` docstring for the parameter description.
"""
def __init__(self, Y_dim, Y_mean=None, Y_std=None):
super(FullRankGaussianBNNPosteriorCPU, self).__init__(Y_dim, Y_mean, Y_std)
self.tril_idx = np.tril_indices(self.Y_dim) # lower-triangular indices
self.tril_len = len(self.tril_idx[0])
self.out_dim = self.Y_dim + self.Y_dim*(self.Y_dim + 1)//2
def set_sliced_pred(self, pred):
d = self.Y_dim # for readability
#pred = pred.cpu().numpy()
self.batch_size = pred.shape[0]
self.mu = pred[:, :d]
self.tril_elements = pred[:, d:self.out_dim]
def sample(self, n_samples, sample_seed):
self.seed_samples(sample_seed)
return self.sample_full_rank(n_samples, self.mu, self.tril_elements)
def get_hpd_interval(self):
return NotImplementedError
class DoubleGaussianBNNPosteriorCPU(BaseGaussianBNNPosteriorCPU):
"""The negative log likelihood (NLL) for a single Gaussian with diagonal covariance matrix
`BaseGaussianNLL.__init__` docstring for the parameter description.
"""
def __init__(self, Y_dim, Y_mean=None, Y_std=None):
super(DoubleGaussianBNNPosteriorCPU, self).__init__(Y_dim, Y_mean, Y_std)
self.tril_idx = np.tril_indices(self.Y_dim) # lower-triangular indices
self.tril_len = len(self.tril_idx[0])
self.out_dim = self.Y_dim**2 + 3*self.Y_dim + 1
def set_sliced_pred(self, pred):
d = self.Y_dim # for readability
#pred = pred.cpu().numpy()
self.batch_size = pred.shape[0]
# First gaussian
self.mu = pred[:, :d]
self.tril_elements = pred[:, d:d+self.tril_len]
self.mu2 = pred[:, d+self.tril_len:2*d+self.tril_len]
self.tril_elements2 = pred[:, 2*d+self.tril_len:-1]
self.w2 = 0.5*self.sigmoid(pred[:, -1].reshape(-1, 1))
def sample(self, n_samples, sample_seed):
"""Sample from a mixture of two Gaussians, each with a full but constrained as low-rank plus diagonal covariance
Parameters
----------
n_samples : int
how many samples to obtain
sample_seed : int
seed for the samples. Default: None
Returns
-------
np.array of shape `[self.batch_size, n_samples, self.Y_dim]`
samples
"""
self.seed_samples(sample_seed)
samples = np.zeros([self.batch_size, n_samples, self.Y_dim])
# Determine first vs. second Gaussian
unif2 = np.random.rand(self.batch_size, n_samples)
second_gaussian = (self.w2 > unif2)
# Sample from second Gaussian
samples2 = self.sample_full_rank(n_samples, self.mu2, self.tril_elements2)
samples[second_gaussian, :] = samples2[second_gaussian, :]
# Sample from first Gaussian
samples1 = self.sample_full_rank(n_samples, self.mu, self.tril_elements)
samples[~second_gaussian, :] = samples1[~second_gaussian, :]
return samples
def get_hpd_interval(self):
return NotImplementedError
```
#### File: h0rton/h0_inference/h0_posterior.py
```python
import numpy as np
import mpmath as mp
from astropy.cosmology import FlatLambdaCDM
import baobab.sim_utils.kinematics_utils as kinematics_utils
from lenstronomy.LensModel.lens_model import LensModel
from lenstronomy.PointSource.point_source import PointSource
from lenstronomy.Analysis.td_cosmography import TDCosmography
from scipy.stats import norm
from h0rton.h0_inference import h0_utils
__all__ = ['H0Posterior']
class H0Posterior:
"""Represents the posterior over H0
"""
required_params = ["lens_mass_center_x", "src_light_center_x","lens_mass_center_y", "src_light_center_y", "lens_mass_gamma", "lens_mass_theta_E", "lens_mass_e1", "lens_mass_e2", "external_shear_gamma1", "external_shear_gamma2", "src_light_R_sersic"]
def __init__(self, H0_prior, kappa_ext_prior, kwargs_model, baobab_time_delays, Om0, define_src_pos_wrt_lens, exclude_vel_disp=True, aniso_param_prior=None, kinematics=None, kappa_transformed=True, kwargs_lens_eqn_solver={}):
"""
Parameters
----------
H0_prior : scipy rv_continuous object
kappa_ext_prior : scipy rv_continuous object
prior over the external convergence
aniso_param_prior : scipy rv_continuous object
prior over the anisotropy radius, r_ani
kwargs_model : dict
dictionary defining which models (parameterizations) are used to define `lens_mass_dict`, `ext_shear_dict`, `ps_dict`
abcd_ordering_i : list
ABCD in an increasing dec order if the keys ABCD mapped to values 0123, respectively, e.g. [3, 1, 0, 2] if D (value 3) is lowest, B (value 1) is second lowest
exclude_vel_disp : bool
whether to exclude the velocity dispersion likelihood. Default: False
"""
self.H0_prior = H0_prior
self.kappa_transformed = kappa_transformed
self.kappa_ext_prior = kappa_ext_prior
if self.kappa_transformed:
self.sample_kappa_ext = self.sample_kappa_ext_transformed
else:
self.sample_kappa_ext = self.sample_kappa_ext_original
self.aniso_param_prior = aniso_param_prior
self.exclude_vel_disp = exclude_vel_disp
self.kwargs_model = kwargs_model
self.baobab_time_delays = baobab_time_delays
self.define_src_pos_wrt_lens = define_src_pos_wrt_lens
self.kinematics = kinematics
self.Om0 = Om0 # Omega matter
self.kwargs_lens_eqn_solver = kwargs_lens_eqn_solver
self.kwargs_model.update(dict(point_source_model_list=['SOURCE_POSITION']))
if not self.exclude_vel_disp:
if self.kinematics is None:
raise ValueError("kinematics is required to calculate velocity dispersion.")
if self.kinematics.anisotropy_model == 'analytic':
self.get_velocity_dispersion = getattr(kinematics_utils, 'velocity_dispersion_analytic')
else:
# TODO: currently not available, as BNN does not predict lens light profile
self.get_velocity_disperison = getattr(kinematics_utils, 'velocity_dispersion_numerical')
@classmethod
def from_dict(cls, lens_dict):
"""Initialize H0Posterior from a dictionary
Parameters
----------
lens_dict : dict
contains properties required to initialize H0Posterior. See `__init__` method above for the required parameters and their formats.
"""
return cls(lens_dict.items())
def set_cosmology_observables(self, z_lens, z_src, measured_td_wrt0, measured_td_err, abcd_ordering_i, true_img_dec, true_img_ra, kappa_ext, measured_vd=None, measured_vd_err=None):
"""Set the cosmology observables for a given lens system, persistent across all the samples for that system
Parameters
----------
z_lens : float
z_src : float
lens_mass_dict : dict
dict of lens mass kwargs
ext_shear_dict : dict
dict of external shear kwargs
ps_dict : dict
dict of point source kwargs
measured_vd : float
measured velocity dispersion
measured_vd_err : float
measurement error of velocity dispersion
lens_light_R_sersic : float
effective radius of lens light in arcsec
measured_td : np.array of shape `[n_images,]`
the measured time delays in days
measured_td_err : float
the time delay measurement error in days
abcd_ordering_i : np.array of shape `[n_images,]`
the image ordering followed by `measured_td` in increasing dec. Example: if the `measured_td` are [a, b, c, d] and the corresponding image dec are [0.3, -0.1, 0.8, 0.4], then `abcd_ordering_i` are [1, 0, 3, 2].
true_img_dec : np.array of shape `[n_images, ]`
dec of the true image positions in arcsec
true_img_ra : np.array of shape `[n_images, ]`
ra of the true image positions in arcsec
"""
self.z_lens = z_lens
self.z_src = z_src
self.measured_vd = measured_vd
self.measured_vd_err = measured_vd_err
self.measured_td_wrt0 = np.array(measured_td_wrt0)
self.measured_td_err = np.array(measured_td_err)
self.abcd_ordering_i = abcd_ordering_i
self.true_img_dec = true_img_dec
self.true_img_ra = true_img_ra
self.kappa_ext = kappa_ext
#self._reorder_measured_td_to_tdlmc()
# Number of AGN images
self.n_img = len(measured_td_wrt0) + 1
def _reorder_measured_td_to_tdlmc(self):
"""Reorder the measured time delays (same for all lens model samples)
Note
----
Unused!
"""
#print(self.measured_td, self.true_img_dec, self.abcd_ordering_i)
reordered_measured_td = h0_utils.reorder_to_tdlmc(self.measured_td, np.argsort(self.true_img_dec), self.abcd_ordering_i)
# Measured time in days (offset from the image with the smallest dec)
self.measured_td_wrt0 = reordered_measured_td[1:] - reordered_measured_td[0]
def format_lens_model(self, sample):
"""Set the lens model parameters for a given lens mass model
Parameters
----------
sample : dict
a sampled set of lens model parameters
"""
# Lens mass
# FIXME: hardcoded for SPEMD
kwargs_spemd = {'theta_E': sample['lens_mass_theta_E'],
'center_x': sample['lens_mass_center_x'],
'center_y': sample['lens_mass_center_y'],
'e1': sample['lens_mass_e1'],
'e2': sample['lens_mass_e2'],
'gamma': sample['lens_mass_gamma'],}
# External shear
kwargs_shear = {'gamma1': sample['external_shear_gamma1'],
'gamma2': sample['external_shear_gamma2'],
'ra_0': sample['lens_mass_center_x'],
'dec_0': sample['lens_mass_center_y']}
# AGN point source
if self.define_src_pos_wrt_lens:
kwargs_ps = {
'ra_source': sample['src_light_center_x'] + sample['lens_mass_center_x'],
'dec_source': sample['src_light_center_y'] + sample['lens_mass_center_y'],
}
else:
kwargs_ps = {
'ra_source': sample['src_light_center_x'],
'dec_source': sample['src_light_center_y'],
}
kwargs_lens = [kwargs_spemd, kwargs_shear]
# Raytrace to get point source kwargs in image plane
#kwargs_img, requires_reordering = self.get_img_pos(kwargs_ps, kwargs_lens)
# Pre-store for reordering image arrays
#dec_image = kwargs_img[0]['dec_image']
#increasing_dec_i = np.argsort(dec_image)
formatted_lens_model = dict(
# TODO: key checking depending on kwargs_model
kwargs_lens=kwargs_lens,
kwargs_ps=[kwargs_ps],
#kwargs_img=kwargs_img,
requires_reordering=True, #FIXME: get edge cases
#increasing_dec_i=increasing_dec_i,
#lens_light_R_sersic=sample['lens_light_R_sersic'],
)
return formatted_lens_model
def get_img_pos(self, ps_dict, kwargs_lens):
"""Sets the kwargs_ps class attribute as those coresponding to the point source model `LENSED_POSITION`
Parameters
----------
ps_dict : dict
point source parameters definitions, either of `SOURCE_POSITION` or `LENSED_POSITION`
"""
if 'ra_source' in ps_dict:
# If the user provided ps_dict in terms of the source position, we precompute the corresponding image positions before we enter the sampling loop.
lens_model_class = LensModel(self.kwargs_model['lens_model_list'])
ps_class = PointSource(['SOURCE_POSITION'], lens_model_class)
kwargs_ps_source = [ps_dict]
ra_image, dec_image = ps_class.image_position(kwargs_ps_source, kwargs_lens)
kwargs_image = [dict(ra_image=ra_image[0],
dec_image=dec_image[0])]
requires_reordering = True # Since the ra_image is coming out of lenstronomy, we need to reorder it to agree with TDLMC
else:
kwargs_image = [ps_dict]
requires_reordering = False # If the user is providing `ra_image` inside `ps_dict`, the order is required to agree with `measured_time_delays`.
return kwargs_image, requires_reordering
def sample_H0(self, random_state):
return self.H0_prior.rvs(random_state=random_state)
def sample_kappa_ext_original(self, random_state):
return self.kappa_ext_prior.rvs(random_state=random_state)
def sample_kappa_ext_transformed(self, random_state):
x = self.kappa_ext_prior.rvs(random_state=random_state)
i = 0
while ~np.isfinite(1.0 - 1.0/x):
x = self.kappa_ext_prior.rvs(random_state=random_state + i)
i += 1
return 1.0 - 1.0/x
def sample_aniso_param(self, random_state):
return self.aniso_param_prior.rvs(random_state=random_state)
def calculate_offset_from_true_image_positions(self, model_ra, model_dec, true_img_ra, true_img_dec, increasing_dec_i, abcd_ordering_i):
"""Calculates the difference in arcsec between the (inferred or fed-in) image positions known to `H0Posterior` and the provided true image positions
Parameters
----------
true_img_ra : array-like, of length self.n_img
ra of true image positions in TDLMC order
true_img_dec : array-like, of length self.n_img
dec of true image positions in TDLMC order
Returns
-------
array-like
offset in arcsec for each image
"""
if self.requires_reordering:
model_ra = h0_utils.reorder_to_tdlmc(model_ra, increasing_dec_i, abcd_ordering_i)
model_dec = h0_utils.reorder_to_tdlmc(model_dec, increasing_dec_i, abcd_ordering_i)
ra_offset = model_ra - true_img_ra
dec_offset = model_dec - true_img_dec
ra_offset = ra_offset.reshape(-1, 1)
dec_offset = dec_offset.reshape(-1, 1)
offset = np.concatenate([ra_offset, dec_offset], axis=1)
return np.linalg.norm(offset, axis=1)
def get_h0_sample(self, sampled_lens_model_raw, random_state):
"""Get MC samples from the H0Posterior
Parameters
----------
sampled_lens_model_raw : dict
sampled lens model parameters, pre-formatting
random_state : np.random.RandomState object
Returns
-------
tuple of floats
the candidate H0 and its weight
"""
# Samples from the lens posterior are reinterpreted as samples from the lens model prior in the H0 inference stage
lens_prior_sample = self.format_lens_model(sampled_lens_model_raw)
kwargs_lens = lens_prior_sample['kwargs_lens']
#lens_light_R_sersic = lens_prior_sample['lens_light_R_sersic']
#increasing_dec_i = lens_prior_sample['increasing_dec_i']
# Sample from respective predefined priors
h0_candidate = self.sample_H0(random_state)
k_ext = self.sample_kappa_ext(random_state)
# Define cosmology
cosmo = FlatLambdaCDM(H0=h0_candidate, Om0=self.Om0)
# Tool for getting time delays and velocity dispersions
td_cosmo = TDCosmography(self.z_lens, self.z_src, self.kwargs_model, cosmo_fiducial=cosmo, kwargs_lens_eqn_solver=self.kwargs_lens_eqn_solver)
# Velocity dispersion
# TODO: separate sampling function if vel_disp is excluded
if self.exclude_vel_disp:
ll_vd = 0.0
else:
aniso_param = self.sample_aniso_param(random_state)
inferred_vd = self.get_velocity_dispersion(
td_cosmo,
kwargs_lens,
None, #FIXME: only analytic
{'aniso_param': aniso_param},
self.kinematics.kwargs_aperture,
self.kinematics.kwargs_psf,
self.kinematics.anisotropy_model,
None,
self.kinematics.kwargs_numerics,
k_ext
)
ll_vd = h0_utils.gaussian_ll_pdf(inferred_vd, self.measured_vd, self.measured_vd_err)
# Time delays
inferred_td, x_image, y_image = td_cosmo.time_delays(kwargs_lens, lens_prior_sample['kwargs_ps'], kappa_ext=k_ext)
if len(inferred_td) > len(self.measured_td_wrt0) + 1:
inferred_td, x_image, y_image = self.chuck_images(inferred_td, x_image, y_image)
if lens_prior_sample['requires_reordering']:
increasing_dec_i = np.argsort(y_image)
inferred_td = h0_utils.reorder_to_tdlmc(inferred_td, increasing_dec_i, self.abcd_ordering_i)
else:
inferred_td = np.array(inferred_td)
inferred_td_wrt0 = inferred_td[1:] - inferred_td[0]
#print(inferred_td, self.measured_td)
ll_td = np.sum(h0_utils.gaussian_ll_pdf(inferred_td_wrt0, self.measured_td_wrt0, self.measured_td_err))
log_w = ll_vd + ll_td
weight = mp.exp(log_w)
return h0_candidate, weight
def set_truth_lens_model(self, sampled_lens_model_raw):
# Set once per lens
# Samples from the lens posterior are reinterpreted as samples from the lens model prior in the H0 inference stage
self.kwargs_model.update(dict(point_source_model_list=['SOURCE_POSITION']))
self.lens_prior_sample = self.format_lens_model(sampled_lens_model_raw)
cosmo = FlatLambdaCDM(H0=70.0, Om0=self.Om0) # fiducial cosmology, doesn't matter
td_cosmo = TDCosmography(self.z_lens, self.z_src, self.kwargs_model, cosmo_fiducial=cosmo, kwargs_lens_eqn_solver=self.kwargs_lens_eqn_solver)
_, x_image, y_image = td_cosmo.time_delays(self.lens_prior_sample['kwargs_lens'], self.lens_prior_sample['kwargs_ps'], kappa_ext=0.0)
while len(y_image) not in [2, 4]:
_, x_image, y_image = td_cosmo.time_delays(self.lens_prior_sample['kwargs_lens'], self.lens_prior_sample['kwargs_ps'], kappa_ext=0.0)
#raise ValueError("Defective lens?")
self.kwargs_model.update(dict(point_source_model_list=['LENSED_POSITION']))
self.kwargs_image = [dict(ra_image=x_image, dec_image=y_image)]
def get_h0_sample_truth(self, random_state):
"""Get MC samples from the H0Posterior
Parameters
----------
sampled_lens_model_raw : dict
sampled lens model parameters, pre-formatting
random_state : np.random.RandomState object
Returns
-------
tuple of floats
the candidate H0 and its weight
"""
#increasing_dec_i = lens_prior_sample['increasing_dec_i']
# Sample from respective predefined priors
h0_candidate = self.sample_H0(random_state)
k_ext = self.sample_kappa_ext_transformed(random_state)#self.kappa_ext #self.sample_kappa_ext(random_state) #
# Define cosmology
cosmo = FlatLambdaCDM(H0=h0_candidate, Om0=self.Om0)
# Tool for getting time delays and velocity dispersions
td_cosmo = TDCosmography(self.z_lens, self.z_src, self.kwargs_model, cosmo_fiducial=cosmo, kwargs_lens_eqn_solver=self.kwargs_lens_eqn_solver)
# Velocity dispersion
# TODO: separate sampling function if vel_disp is excluded
# Time delays
inferred_td, x_image, y_image = td_cosmo.time_delays(self.lens_prior_sample['kwargs_lens'], self.kwargs_image, kappa_ext=k_ext)
#print(inferred_td, y_image)
if len(inferred_td) > len(self.measured_td_wrt0) + 1:
inferred_td, x_image, y_image = self.chuck_images(inferred_td, x_image, y_image)
#print("after correct: ", inferred_td, y_image)
if self.lens_prior_sample['requires_reordering']:
increasing_dec_i = np.argsort(y_image)
inferred_td = h0_utils.reorder_to_tdlmc(inferred_td, increasing_dec_i, self.abcd_ordering_i)
else:
inferred_td = np.array(inferred_td)
inferred_td_wrt0 = inferred_td[1:] - inferred_td[0]
#print(inferred_td_wrt0, self.measured_td_wrt0)
ll_td = np.sum(h0_utils.gaussian_ll_pdf(inferred_td_wrt0, self.measured_td_wrt0, self.measured_td_err))
log_w = ll_td
weight = mp.exp(log_w)
return h0_candidate, weight
def chuck_images(self, inferred_td, x_image, y_image):
"""If the number of predicted images are greater than the measured, choose the images that best correspond to the measured.
"""
# Find index or indices that must be removed.
# Candidates 4 choose 2 (=6) or 3 choose 2 (=3)
keep_idx = np.zeros(len(self.true_img_dec))
for i, actual_y_i in enumerate(self.true_img_dec): # FIXME: use measured_img_dec
keep_idx[i] = np.argmin((y_image - actual_y_i)**2.0)
keep_idx = np.sort(keep_idx).astype(int)
inferred_td = inferred_td[keep_idx]
x_image = x_image[keep_idx]
y_image = y_image[keep_idx]
return inferred_td, x_image, y_image
```
#### File: h0rton/h0_inference/h0_utils.py
```python
import os
import numpy as np
from astropy.cosmology import FlatLambdaCDM
from lenstronomy.Cosmo.lens_cosmo import LensCosmo
from hierarc.Sampling.mcmc_sampling import MCMCSampler
import corner
import matplotlib.pyplot as plt
from scipy.stats import norm, median_abs_deviation
__all__ = ["reorder_to_tdlmc", "pred_to_natural_gaussian", "CosmoConverter", "get_lognormal_stats", "get_lognormal_stats_naive", "get_normal_stats", "get_normal_stats_naive", "remove_outliers_from_lognormal", "combine_lenses", "gaussian_ll_pdf"]
MAD_to_sig = 1.0/norm.ppf(0.75) # 1.4826 built into scipy, so not used.
class DeltaFunction:
def __init__(self, true_value=0.0):
self.true_value = true_value
def rvs(self, random_state=None):
return self.true_value
def gaussian_ll_pdf(x, mu, sigma):
"""Evaluates the (unnormalized) log of the normal PDF at point x
Parameters
----------
x : float or array-like
point at which to evaluate the log pdf
mu : float or array-like
mean of the normal on a linear scale
sigma : float or array-like
standard deviation of the normal on a linear scale
"""
log_pdf = -0.5*(x - mu)**2.0/sigma**2.0 #- np.log(sigma) - 0.5*np.log(2.0*np.pi)
return log_pdf
def reorder_to_tdlmc(img_array, increasing_dec_i, abcd_ordering_i):
"""Apply the permutation scheme for reordering the list of ra, dec, and time delays to conform to the order in the TDLMC challenge
Parameters
----------
img_array : array-like
array of properties corresponding to the AGN images
Returns
-------
array-like
`img_array` reordered to the TDLMC order
"""
#print(img_array, increasing_dec_i.shape, abcd_ordering_i.shape)
img_array = np.array(img_array)[increasing_dec_i][abcd_ordering_i]
return img_array
def pred_to_natural_gaussian(pred_mu, pred_cov_mat, shift, scale):
"""Convert the BNN-predicted multivariate Gaussian parameters into the natural space counterparts by reverse transformation
Parameters
----------
pred_mu : np.array of shape `[Y_dim,]`
pred_cov_mat : np.array of shape `[Y_dim, Y_dim]`
scale : np.array of shape `[Y_dim,]`
vector by which the features were scaled, e.g. the training-set feature standard deviations
shift : np.array of shape `[Y_dim,]`
vector by which the features were shifted, e.g. the training-set feature means
Note
----
Derive it or go here: https://math.stackexchange.com/questions/332441/affine-transformation-applied-to-a-multivariate-gaussian-random-variable-what
Returns
-------
mu : np.array of shape `[Y_dim,]`
mu in natural space
cov_mat : np.array of shape `[Y_dim, Y_dim]`
covariance matrix in natural space
"""
mu = pred_mu*scale + shift
A = np.diagflat(scale)
cov_mat = np.matmul(np.matmul(A, pred_cov_mat), A.T) # There is a better way to do this...
return mu, cov_mat
class CosmoConverter:
"""Convert the time-delay distance to H0 and vice versa
Note
----
This was modified from lenstronomy.Cosmo.cosmo_solver to handle array types.
"""
def __init__(self, z_lens, z_src, H0=70.0, Om0=0.3):
self.cosmo_fiducial = FlatLambdaCDM(H0=H0, Om0=Om0) # arbitrary
self.h0_fiducial = self.cosmo_fiducial.H0.value
self.lens_cosmo = LensCosmo(z_lens=z_lens, z_source=z_src, cosmo=self.cosmo_fiducial)
self.ddt_fiducial = self.lens_cosmo.ddt
def get_H0(self, D_dt):
H0 = self.h0_fiducial * self.ddt_fiducial / D_dt
return H0
def get_D_dt(self, H0):
D_dt = self.h0_fiducial * self.ddt_fiducial / H0
return D_dt
def get_lognormal_stats(all_samples):
"""Compute lognormal stats robustly, using median stats, assuming the samples are drawn from a lognormal distribution
"""
is_nan_mask = np.logical_or(np.isnan(all_samples), ~np.isfinite(all_samples))
samples = all_samples[~is_nan_mask]
log_samples = np.log(samples)
mu = np.median(log_samples)
sig2 = median_abs_deviation(log_samples, axis=None, scale='normal')**2.0
mode = np.exp(mu - sig2)
std = ((np.exp(sig2) - 1.0)*(np.exp(2*mu - sig2)))**0.5
stats = dict(
mu=mu,
sigma=sig2**0.5,
mode=mode,
std=std
)
return stats
def get_lognormal_stats_naive(all_samples, all_weights=None):
"""Compute lognormal stats assuming the samples are drawn from a lognormal distribution
"""
if all_weights is None:
all_weights = np.ones_like(all_samples)
is_nan_mask = np.logical_or(np.logical_or(np.isnan(all_weights), ~np.isfinite(all_weights)), np.isnan(all_samples))
all_weights[~is_nan_mask] = all_weights[~is_nan_mask]/np.sum(all_weights[~is_nan_mask])
samples = all_samples[~is_nan_mask]
weights = all_weights[~is_nan_mask]
n_samples = len(samples)
log_samples = np.log(samples)
mu = np.average(log_samples, weights=weights)
sig2 = np.average((log_samples - mu)**2.0, weights=weights)*(n_samples/(n_samples - 1))
mode = np.exp(mu - sig2)
std = ((np.exp(sig2) - 1.0)*(np.exp(2*mu - sig2)))**0.5
stats = dict(
mu=mu,
sigma=sig2**0.5,
mode=mode,
std=std
)
return stats
def get_normal_stats(all_samples):
is_nan_mask = np.logical_or(np.isnan(all_samples), ~np.isfinite(all_samples))
samples = all_samples[~is_nan_mask]
mean = np.median(samples)
std = median_abs_deviation(samples, axis=None, scale='normal')
stats = dict(
mean=mean,
std=std
)
return stats
def get_normal_stats_naive(all_samples, all_weights):
is_nan_mask = np.logical_or(np.logical_or(np.isnan(all_weights), ~np.isfinite(all_weights)), np.isnan(all_samples))
all_weights[~is_nan_mask] = all_weights[~is_nan_mask]/np.sum(all_weights[~is_nan_mask])
samples = all_samples[~is_nan_mask]
weights = all_weights[~is_nan_mask]
mean = np.average(samples, weights=weights)
std = np.average((samples - mean)**2.0, weights=weights)**0.5
#print(mean, std)
stats = dict(
mean=mean,
std=std,
samples=samples,
weights=weights
)
return stats
def remove_outliers_from_lognormal(data, level=3):
"""Remove extreme outliers corresponding to level-STD away from the mean
Parameters
----------
data : np.array
data expected to follow a lognormal distribution
"""
# Quantiles are preserved under monotonic transformations
log_data = np.log(data)
robust_mean = np.median(log_data)
robust_std = median_abs_deviation(log_data, scale='normal')
return data[abs(log_data - robust_mean) < level*robust_std]
def combine_lenses(likelihood_type, z_lens, z_src, true_Om0, samples_save_path=None, corner_save_path=None, n_run=100, n_burn=400, n_walkers=10, **posterior_parameters):
"""Combine lenses in the D_dt space
Parameters
----------
true_Om0 : float
true Om0, not inferred
likelihood_type : str
'DdtGaussian', 'DdtLogNorm', 'DdtHistKDE' supported. 'DdtGaussian' must have 'ddt_mean', 'ddt_sigma'. 'DdtLogNorm' must have 'ddt_mu' and 'ddt_sigma'. 'DdtHistKDE' must have 'lens_ids' and 'samples_dir'.
"""
n_test = len(z_lens)
kwargs_posterior_list = []
if likelihood_type in ['DdtLogNorm', 'DdtGaussian']:
for i in range(n_test):
kwargs_posterior = {'z_lens': z_lens[i], 'z_source': z_src[i],
'likelihood_type': likelihood_type}
for param_name, param_value in posterior_parameters.items():
kwargs_posterior.update({param_name: param_value[i]})
kwargs_posterior_list.append(kwargs_posterior)
elif likelihood_type == 'DdtHistKDE':
lens_ids = posterior_parameters['lens_ids']
samples_dir = posterior_parameters['samples_dir']
binning_method = posterior_parameters['binning_method']
for i, lens_i in enumerate(lens_ids):
h0_dict_path = os.path.join(samples_dir, 'D_dt_dict_{:04d}.npy'.format(lens_i))
h0_dict = np.load(h0_dict_path, allow_pickle=True).item() # TODO: Use context manager to prevent memory overload
D_dt_samples = h0_dict['D_dt_samples']
remove = np.isnan(D_dt_samples)
D_dt_samples = D_dt_samples[~remove]
#cosmo_converter = CosmoConverter(z_lens[i], z_src[i])
#D_dt_samples = cosmo_converter.get_D_dt(H0_samples)
kwargs_posterior = {'z_lens': z_lens[i], 'z_source': z_src[i],
'ddt_samples': D_dt_samples, 'ddt_weights': None,
'likelihood_type': 'DdtHist', 'binning_method': binning_method}
kwargs_posterior_list.append(kwargs_posterior)
else:
raise NotImplementedError("This likelihood type is not supported. Please choose from 'DdtGaussian', 'DdtLogNorm', and 'DdtHistKDE'.")
kwargs_lower_cosmo = {'h0': 50.0}
kwargs_lower_lens = {}
kwargs_lower_kin = {}
kwargs_upper_cosmo = {'h0': 90.0}
kwargs_upper_lens = {}
kwargs_upper_kin = {}
kwargs_fixed_cosmo = {'om': true_Om0}
kwargs_fixed_lens = {}
kwargs_fixed_kin = {}
kwargs_mean_start = {'kwargs_cosmo': {'h0': 70.0},
'kwargs_lens': {},
'kwargs_kin': {}}
kwargs_sigma_start = {'kwargs_cosmo': {'h0': 10.0},
'kwargs_lens': {},
'kwargs_kin': {}}
kwargs_bounds = {'kwargs_lower_cosmo': kwargs_lower_cosmo,
'kwargs_lower_lens': kwargs_lower_lens,
'kwargs_lower_kin': kwargs_lower_kin,
'kwargs_upper_cosmo': kwargs_upper_cosmo,
'kwargs_upper_lens': kwargs_upper_lens,
'kwargs_upper_kin': kwargs_upper_kin,
'kwargs_fixed_cosmo': kwargs_fixed_cosmo,
'kwargs_fixed_lens': kwargs_fixed_lens,
'kwargs_fixed_kin': kwargs_fixed_kin}
cosmology = 'FLCDM' # available models: 'FLCDM', "FwCDM", "w0waCDM", "oLCDM"
mcmc_sampler = MCMCSampler(kwargs_likelihood_list=kwargs_posterior_list,
cosmology=cosmology,
kwargs_bounds=kwargs_bounds,
ppn_sampling=False,
lambda_mst_sampling=False,
lambda_mst_distribution='NONE',
anisotropy_sampling=False,
kappa_ext_sampling=False,
kappa_ext_distribution='NONE',
anisotropy_model='NONE',
anisotropy_distribution='NONE',
custom_prior=None,
interpolate_cosmo=True,
num_redshift_interp=100,
cosmo_fixed=None)
mcmc_samples, log_prob_cosmo = mcmc_sampler.mcmc_emcee(n_walkers, n_run, n_burn, kwargs_mean_start, kwargs_sigma_start)
if samples_save_path is not None:
np.save(samples_save_path, mcmc_samples)
if corner_save_path is not None:
corner.corner(mcmc_samples, show_titles=True, labels=mcmc_sampler.param_names(latex_style=True))
plt.show()
plt.savefig(corner_save_path)
plt.close()
return mcmc_samples, log_prob_cosmo
```
#### File: h0rton/h0rton/infer_h0_forward_modeling.py
```python
import time
import warnings
import os
from tqdm import tqdm
import gc
from ast import literal_eval
import numpy as np
import pandas as pd
import torch
from torch.utils.data import DataLoader
from lenstronomy.Cosmo.lcdm import LCDM
from lenstronomy.Plots.model_plot import ModelPlot
import baobab.sim_utils.metadata_utils as metadata_utils
from baobab import BaobabConfig
from h0rton.configs import TrainValConfig, TestConfig
import h0rton.script_utils as script_utils
from h0rton.h0_inference import h0_utils, plotting_utils, mcmc_utils
from h0rton.h0_inference.forward_modeling_posterior import ForwardModelingPosterior
from h0rton.trainval_data import XYData
def main():
args = script_utils.parse_inference_args()
test_cfg = TestConfig.from_file(args.test_config_file_path)
baobab_cfg = BaobabConfig.from_file(test_cfg.data.test_baobab_cfg_path)
cfg = TrainValConfig.from_file(test_cfg.train_val_config_file_path)
# Set device and default data type
device = torch.device(test_cfg.device_type)
if device.type == 'cuda':
torch.set_default_tensor_type('torch.cuda.' + cfg.data.float_type)
else:
torch.set_default_tensor_type('torch.' + cfg.data.float_type)
script_utils.seed_everything(test_cfg.global_seed)
############
# Data I/O #
############
# Define val data and loader
test_data = XYData(is_train=False,
Y_cols=cfg.data.Y_cols,
float_type=cfg.data.float_type,
define_src_pos_wrt_lens=cfg.data.define_src_pos_wrt_lens,
rescale_pixels=False,
rescale_pixels_type=None,
log_pixels=False,
add_pixel_noise=cfg.data.add_pixel_noise,
eff_exposure_time={"TDLMC_F160W": test_cfg.data.eff_exposure_time},
train_Y_mean=np.zeros((1, len(cfg.data.Y_cols))),
train_Y_std=np.ones((1, len(cfg.data.Y_cols))),
train_baobab_cfg_path=cfg.data.train_baobab_cfg_path,
val_baobab_cfg_path=test_cfg.data.test_baobab_cfg_path,
for_cosmology=True)
master_truth = test_data.Y_df
master_truth = metadata_utils.add_qphi_columns(master_truth)
master_truth = metadata_utils.add_gamma_psi_ext_columns(master_truth)
# Figure out how many lenses BNN will predict on (must be consecutive)
if test_cfg.data.lens_indices is None:
if args.lens_indices_path is None:
# Test on all n_test lenses in the test set
n_test = test_cfg.data.n_test
lens_range = range(n_test)
else:
# Test on the lens indices in a text file at the specified path
lens_range = []
with open(args.lens_indices_path, "r") as f:
for line in f:
lens_range.append(int(line.strip()))
n_test = len(lens_range)
print("Performing H0 inference on {:d} specified lenses...".format(n_test))
else:
if args.lens_indices_path is None:
# Test on the lens indices specified in the test config file
lens_range = test_cfg.data.lens_indices
n_test = len(lens_range)
print("Performing H0 inference on {:d} specified lenses...".format(n_test))
else:
raise ValueError("Specific lens indices were specified in both the test config file and the command-line argument.")
batch_size = max(lens_range) + 1
test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False, drop_last=True)
# Output directory into which the H0 histograms and H0 samples will be saved
out_dir = test_cfg.out_dir
if not os.path.exists(out_dir):
os.makedirs(out_dir)
print("Destination folder path: {:s}".format(out_dir))
else:
warnings.warn("Destination folder already exists.")
################
# Compile data #
################
# Image data
with torch.no_grad():
for X_, Y_ in test_loader:
X = X_.to(device)
break
X = X.detach().cpu().numpy()
#############
# MCMC loop #
#############
kwargs_lens_eqn_solver = dict(
min_distance=0.05,
search_window=baobab_cfg.instrument['pixel_scale']*baobab_cfg.image['num_pix'],
num_iter_max=200
)
fm_posterior = ForwardModelingPosterior(kwargs_lens_eqn_solver=kwargs_lens_eqn_solver,
astrometric_sigma=test_cfg.image_position_likelihood.sigma,
supersampling_factor=baobab_cfg.numerics.supersampling_factor)
# Get H0 samples for each system
if not test_cfg.time_delay_likelihood.baobab_time_delays:
if 'abcd_ordering_i' not in master_truth:
raise ValueError("If the time delay measurements were not generated using Baobab, the user must specify the order of image positions in which the time delays are listed, in order of increasing dec.")
total_progress = tqdm(total=n_test)
realized_time_delays = pd.read_csv(test_cfg.error_model.realized_time_delays, index_col=None)
# For each lens system...
for i, lens_i in enumerate(lens_range):
###########################
# Relevant data and prior #
###########################
data_i = master_truth.iloc[lens_i].copy()
lcdm = LCDM(z_lens=data_i['z_lens'], z_source=data_i['z_src'], flat=True)
measured_td_wrt0 = np.array(literal_eval(realized_time_delays.iloc[lens_i]['measured_td_wrt0']))
n_img = len(measured_td_wrt0) + 1
#print(baobab_cfg.survey_object_dict)
fm_posterior.set_kwargs_data_joint(
image=X[lens_i, 0, :, :],
measured_td=measured_td_wrt0,
measured_td_sigma=test_cfg.time_delay_likelihood.sigma,
survey_object_dict=baobab_cfg.survey_object_dict,
eff_exposure_time=test_cfg.data.eff_exposure_time,
)
# Update solver according to number of lensed images
if test_cfg.numerics.solver_type == 'NONE':
fm_posterior.kwargs_constraints['solver_type'] = 'NONE'
else:
fm_posterior.kwargs_constraints['solver_type'] = 'PROFILE_SHEAR' if n_img == 4 else 'ELLIPSE'
fm_posterior.kwargs_constraints['num_point_source_list'] = [n_img]
#print(fm_posterior.kwargs_params['point_source_model'][0][0])
true_D_dt = lcdm.D_dt(H_0=data_i['H0'], Om0=0.3)
# Pull truth param values and initialize walkers there
if test_cfg.numerics.initialize_walkers_to_truth:
fm_posterior.kwargs_lens_init = metadata_utils.get_kwargs_lens_mass(data_i)
fm_posterior.kwargs_lens_light_init = metadata_utils.get_kwargs_lens_light(data_i)
fm_posterior.kwargs_source_init = metadata_utils.get_kwargs_src_light(data_i)
fm_posterior.kwargs_ps_init = metadata_utils.get_kwargs_ps_lensed(data_i)
fm_posterior.kwargs_special_init = dict(D_dt=true_D_dt)
###########################
# MCMC posterior sampling #
###########################
lens_i_start_time = time.time()
#with script_utils.HiddenPrints():
chain_list_mcmc, kwargs_result_mcmc = fm_posterior.run_mcmc(test_cfg.numerics.mcmc)
lens_i_end_time = time.time()
inference_time = (lens_i_end_time - lens_i_start_time)/60.0 # min
#############################
# Plotting the MCMC samples #
#############################
# sampler_type : 'EMCEE'
# samples_mcmc : np.array of shape `[n_mcmc_eval, n_params]`
# param_mcmc : list of str of length n_params, the parameter names
sampler_type, samples_mcmc, param_mcmc, _ = chain_list_mcmc[0]
new_samples_mcmc = mcmc_utils.postprocess_mcmc_chain(kwargs_result_mcmc,
samples_mcmc,
fm_posterior.kwargs_model,
fm_posterior.kwargs_params['lens_model'][2],
fm_posterior.kwargs_params['point_source_model'][2],
fm_posterior.kwargs_params['source_model'][2],
fm_posterior.kwargs_params['special'][2],
fm_posterior.kwargs_constraints,
kwargs_fixed_lens_light=fm_posterior.kwargs_params['lens_light_model'][2],
verbose=False
)
#from lenstronomy.Plots import chain_plot
model_plot = ModelPlot(fm_posterior.multi_band_list,
fm_posterior.kwargs_model, kwargs_result_mcmc, arrow_size=0.02, cmap_string="gist_heat")
plotting_utils.plot_forward_modeling_comparisons(model_plot, out_dir)
# Plot D_dt histogram
D_dt_samples = new_samples_mcmc['D_dt'].values # may contain negative values
data_i['D_dt'] = true_D_dt
# Export D_dt samples for this lens
lens_inference_dict = dict(
D_dt_samples=D_dt_samples, # kappa_ext=0 for these samples
inference_time=inference_time,
true_D_dt=true_D_dt,
)
lens_inference_dict_save_path = os.path.join(out_dir, 'D_dt_dict_{0:04d}.npy'.format(lens_i))
np.save(lens_inference_dict_save_path, lens_inference_dict)
# Optionally export the MCMC samples
if test_cfg.export.mcmc_samples:
mcmc_samples_path = os.path.join(out_dir, 'mcmc_samples_{0:04d}.csv'.format(lens_i))
new_samples_mcmc.to_csv(mcmc_samples_path, index=None)
# Optionally export the D_dt histogram
if test_cfg.export.D_dt_histogram:
cleaned_D_dt_samples = h0_utils.remove_outliers_from_lognormal(D_dt_samples, 3)
_ = plotting_utils.plot_D_dt_histogram(cleaned_D_dt_samples, lens_i, true_D_dt, save_dir=out_dir)
# Optionally export the plot of MCMC chain
if test_cfg.export.mcmc_chain:
mcmc_chain_path = os.path.join(out_dir, 'mcmc_chain_{0:04d}.png'.format(lens_i))
plotting_utils.plot_mcmc_chain(chain_list_mcmc, mcmc_chain_path)
# Optionally export posterior cornerplot of select lens model parameters with D_dt
if test_cfg.export.mcmc_corner:
mcmc_corner_path = os.path.join(out_dir, 'mcmc_corner_{0:04d}.png'.format(lens_i))
plotting_utils.plot_mcmc_corner(new_samples_mcmc[test_cfg.export.mcmc_cols], data_i[test_cfg.export.mcmc_cols], test_cfg.export.mcmc_col_labels, mcmc_corner_path)
total_progress.update(1)
gc.collect()
realized_time_delays.to_csv(os.path.join(out_dir, 'realized_time_delays.csv'), index=None)
total_progress.close()
if __name__ == '__main__':
#import cProfile
#pr = cProfile.Profile()
#pr.enable()
main()
#pr.disable()
#pr.print_stats(sort='cumtime')
```
#### File: h0rton/losses/gaussian_nll_cpu.py
```python
from abc import ABC, abstractmethod
import numpy as np
from scipy.special import logsumexp
from h0rton.h0_inference.gaussian_bnn_posterior_cpu import sigmoid, logsigmoid
__all__ = ['BaseGaussianNLLCPU', 'DiagonalGaussianNLLCPU', 'FullRankGaussianNLLCPU', 'DoubleGaussianNLLCPU']
log_2_pi = 1.8378770664093453
log_2 = 0.6931471805599453
class BaseGaussianNLLCPU(ABC):
"""Abstract base class to represent the Gaussian negative log likelihood (NLL).
Gaussian NLLs or mixtures thereof with various forms of the covariance matrix inherit from this class.
"""
def __init__(self, Y_dim):
"""
Parameters
----------
Y_dim : int
number of parameters to predict
"""
self.Y_dim = Y_dim
self.sigmoid = sigmoid
self.logsigmoid = logsigmoid
@abstractmethod
def slice(self, pred):
"""Slice the raw network prediction into meaningful Gaussian parameters
Parameters
----------
pred : np.Tensor of shape `[batch_size, self.Y_dim]`
the network prediction
"""
return NotImplemented
@abstractmethod
def __call__(self, pred, target):
"""Evaluate the NLL. Must be overridden by subclasses.
Parameters
----------
pred : np.Tensor
raw network output for the predictions
target : np.Tensor
Y labels
"""
return NotImplemented
def nll_diagonal(self, target, mu, logvar):
"""Evaluate the NLL for single Gaussian with diagonal covariance matrix
Parameters
----------
target : np.Tensor of shape [batch_size, Y_dim]
Y labels
mu : np.Tensor of shape [batch_size, Y_dim]
network prediction of the mu (mean parameter) of the BNN posterior
logvar : np.Tensor of shape [batch_size, Y_dim]
network prediction of the log of the diagonal elements of the covariance matrix
Returns
-------
np.Tensor of shape
NLL values
"""
precision = np.exp(-logvar)
# Loss kernel
loss = precision * (target - mu)**2.0 + logvar
# Restore prefactors
loss += np.log(2.0*np.pi)
loss *= 0.5
return np.mean(np.sum(loss, axis=1), axis=0)
def nll_full_rank(self, target, mu, tril_elements, reduce=True):
"""Evaluate the NLL for a single Gaussian with a full-rank covariance matrix
Parameters
----------
target : np.Tensor of shape [batch_size, Y_dim]
Y labels
mu : np.Tensor of shape [batch_size, Y_dim]
network prediction of the mu (mean parameter) of the BNN posterior
tril_elements : np.Tensor of shape [batch_size, Y_dim*(Y_dim + 1)//2]
reduce : bool
whether to take the mean across the batch
Returns
-------
np.Tensor of shape [batch_size,]
NLL values
"""
batch_size, _ = target.shape
tril = np.zeros([batch_size, self.Y_dim, self.Y_dim])
tril[:, self.tril_idx[0], self.tril_idx[1]] = tril_elements
log_diag_tril = np.diagonal(tril, offset=0, axis1=1, axis2=2) # [batch_size, Y_dim]
logdet_term = -np.sum(log_diag_tril, axis=1) # [batch_size,]
tril[:, np.eye(self.Y_dim).astype(bool)] = np.exp(log_diag_tril)
prec_mat = np.matmul(tril, np.transpose(tril, [0, 2, 1])) # [batch_size, Y_dim, Y_dim]
y_diff = mu - target # [batch_size, Y_dim]
mahalanobis_term = 0.5*np.sum(
y_diff*np.sum(prec_mat*np.expand_dims(y_diff, -1), axis=-2), axis=-1) # [batch_size,]
loss = logdet_term + mahalanobis_term + 0.5*self.Y_dim*log_2_pi
if reduce:
return np.mean(loss, axis=0) # float
else:
return loss # [batch_size,]
def nll_mixture(self, target, mu, tril_elements, mu2, tril_elements2, alpha):
"""Evaluate the NLL for a single Gaussian with a full but low-rank plus diagonal covariance matrix
Parameters
----------
target : np.Tensor of shape [batch_size, Y_dim]
Y labels
mu : np.Tensor of shape [batch_size, Y_dim]
network prediction of the mu (mean parameter) of the BNN posterior for the first Gaussian
tril_elements : np.Tensor of shape [batch_size, self.tril_len]
network prediction of the elements in the precision matrix
mu2 : np.Tensor of shape [batch_size, Y_dim]
network prediction of the mu (mean parameter) of the BNN posterior for the second Gaussian
tril_elements2 : np.Tensor of shape [batch_size, self.tril_len]
network prediction of the elements in the precision matrix for the second Gaussian
alpha : np.Tensor of shape [batch_size, 1]
network prediction of the logit of twice the weight on the second Gaussian
Note
----
The weight on the second Gaussian is required to be less than 0.5, to make the two Gaussians well-defined.
Returns
-------
np.Tensor of shape [batch_size,]
NLL values
"""
batch_size, _ = target.shape
log_ll = np.empty([batch_size, 2], dtype=None)
alpha = alpha.reshape(-1)
log_ll[:, 0] = np.log1p(2.0*np.exp(-alpha)) - log_2 - np.log1p(np.exp(-alpha)) - self.nll_full_rank(target, mu, tril_elements, reduce=False) # [batch_size]
# np.log(np.tensor([0.5])).double()
log_ll[:, 1] = -log_2 + self.logsigmoid(alpha) - self.nll_full_rank(target, mu2, tril_elements2, reduce=False) # [batch_size], 0.6931471 = np.log(2)
log_nll = -logsumexp(log_ll, axis=1)
return np.mean(log_nll)
class DiagonalGaussianNLLCPU(BaseGaussianNLLCPU):
"""The negative log likelihood (NLL) for a single Gaussian with diagonal covariance matrix
`BaseGaussianNLLCPU.__init__` docstring for the parameter description.
"""
posterior_name = 'DiagonalGaussianBNNPosterior'
def __init__(self, Y_dim):
super(DiagonalGaussianNLLCPU, self).__init__(Y_dim)
self.out_dim = Y_dim*2
def __call__(self, pred, target):
sliced = self.slice(pred)
return self.nll_diagonal(target, **sliced)
def slice(self, pred):
d = self.Y_dim # for readability
sliced = dict(
mu=pred[:, :d],
logvar=pred[:, d:]
)
return sliced
class FullRankGaussianNLLCPU(BaseGaussianNLLCPU):
"""The negative log likelihood (NLL) for a single Gaussian with a full-rank covariance matrix
See `BaseGaussianNLLCPU.__init__` docstring for the parameter description.
"""
posterior_name = 'FullRankGaussianBNNPosterior'
def __init__(self, Y_dim):
super(FullRankGaussianNLLCPU, self).__init__(Y_dim)
self.tril_idx = np.tril_indices(self.Y_dim) # lower-triangular indices
self.tril_len = len(self.tril_idx[0])
self.out_dim = self.Y_dim + self.Y_dim*(self.Y_dim + 1)//2
def __call__(self, pred, target):
sliced = self.slice(pred)
return self.nll_full_rank(target, **sliced, reduce=True)
def slice(self, pred):
d = self.Y_dim # for readability
sliced = dict(
mu=pred[:, :d],
tril_elements=pred[:, d:d+self.tril_len]
)
return sliced
class DoubleGaussianNLLCPU(BaseGaussianNLLCPU):
"""The negative log likelihood (NLL) for a mixture of two Gaussians, each with a full but constrained as low-rank plus diagonal covariance
Only rank 2 is currently supported. `BaseGaussianNLLCPU.__init__` docstring for the parameter description.
"""
posterior_name = 'DoubleGaussianBNNPosterior'
def __init__(self, Y_dim):
super(DoubleGaussianNLLCPU, self).__init__(Y_dim)
self.tril_idx = np.tril_indices(self.Y_dim) # lower-triangular indices
self.tril_len = len(self.tril_idx[0])
self.out_dim = self.Y_dim**2 + 3*self.Y_dim + 1
def __call__(self, pred, target):
sliced = self.slice(pred)
return self.nll_mixture(target, **sliced)
def slice(self, pred):
d = self.Y_dim # for readability
sliced = dict(
mu=pred[:, :d],
tril_elements=pred[:, d:d+self.tril_len],
mu2=pred[:, d+self.tril_len:2*d+self.tril_len],
tril_elements2=pred[:, 2*d+self.tril_len:-1],
alpha=pred[:, -1]
)
return sliced
```
#### File: h0rton/h0rton/summarize.py
```python
import os
import numpy as np
import pandas as pd
import argparse
import scipy.stats
from baobab.configs import BaobabConfig
from h0rton.configs import TestConfig
import h0rton.h0_inference.h0_utils as h0_utils
import h0rton.tdlmc_utils as tdlmc_utils
def parse_args():
"""Parse command-line arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument('version_id', help='version ID', type=int)
parser.add_argument('sampling_method', help='the sampling method (one of simple_mc_default, mcmc_default, hybrid', type=str)
parser.add_argument('--rung_idx', help='the TDLMC rung index, if H0rton was run on TDLMC data', type=int, default=None)
args = parser.parse_args()
return args
def main():
args = parse_args()
# Folder where all the H0 samples live
samples_dir = '/home/jwp/stage/sl/h0rton/experiments/v{:d}/{:s}'.format(args.version_id, args.sampling_method)
# Read in test cfg for this version and sampling method
test_cfg_path = os.path.join(samples_dir, '..', '{:s}.json'.format(args.sampling_method))
test_cfg = TestConfig.from_file(test_cfg_path)
if 'mcmc_default' in args.sampling_method:
summarize_mcmc(samples_dir, test_cfg, 'mcmc_default', args.rung_idx)
elif args.sampling_method == 'hybrid':
summarize_mcmc(samples_dir, test_cfg, 'hybrid')
elif args.sampling_method == 'simple_mc_default':
summarize_simple_mc_default(samples_dir, test_cfg)
else:
raise ValueError("This sampling method is not supported. Choose one of [simple_mc_default, mcmc_default, hybrid].")
def summarize_simple_mc_default(samples_dir, test_cfg):
"""Summarize the output of simple_mc_default, i.e. the uniform H0 samples with corresponding weights
"""
H0_dicts = [f for f in os.listdir(samples_dir) if f.startswith('h0_dict')]
H0_dicts.sort()
# Read in the redshift columns of metadata
baobab_cfg = BaobabConfig.from_file(test_cfg.data.test_baobab_cfg_path)
metadata_path = os.path.join(baobab_cfg.out_dir, 'metadata.csv')
meta = pd.read_csv(metadata_path, index_col=None, usecols=['z_lens', 'z_src', 'n_img'])
summary_df = pd.DataFrame() # instantiate empty dataframe for storing summary
for i, f_name in enumerate(H0_dicts):
lens_i = int(os.path.splitext(f_name)[0].split('h0_dict_')[1])
# Slice meta for this lensing system
meta_i = meta.iloc[lens_i]
z_lens = meta_i['z_lens']
z_src = meta_i['z_src']
n_img = meta_i['n_img']
# Read in H0 samples using lens identifier
H0_dict = np.load(os.path.join(samples_dir, f_name), allow_pickle=True).item()
H0_samples = H0_dict['h0_samples']
weights = H0_dict['h0_weights']
H0_normal_stats = h0_utils.get_normal_stats_naive(H0_samples, weights)
n_eff = np.sum(weights)**2.0/(np.sum(weights**2.0))
# Convert H0 H0_samples to D_dt
cosmo_converter = h0_utils.CosmoConverter(z_lens, z_src)
D_dt_samples = cosmo_converter.get_D_dt(H0_samples)
D_dt_stats = h0_utils.get_lognormal_stats_naive(D_dt_samples, weights)
D_dt_normal_stats = h0_utils.get_normal_stats_naive(D_dt_samples, weights)
summary_i = dict(
id=lens_i,
measured_td_wrt0=list(H0_dict['measured_td_wrt0']),
H0_mean=H0_normal_stats['mean'],
H0_std=H0_normal_stats['std'],
D_dt_mu=D_dt_stats['mu'],
D_dt_sigma=D_dt_stats['sigma'],
D_dt_mean=D_dt_normal_stats['mean'],
D_dt_std=D_dt_normal_stats['std'],
n_eff=n_eff,
z_lens=z_lens,
z_src=z_src,
n_img=n_img,
inference_time=H0_dict['inference_time'],
)
summary_df = summary_df.append(summary_i, ignore_index=True)
summary_df.to_csv(os.path.join(samples_dir, '..', 'summary.csv'))
# Output list of problem lens IDs
problem_id = summary_df.loc[(summary_df['n_eff'] < 3) | (summary_df['H0_std'] < 1.0)]['id'].astype(int)
with open(os.path.join(samples_dir, '..', "mcmc_default_candidates.txt"), "w") as f:
for pid in problem_id:
f.write(str(pid) +"\n")
def summarize_mcmc(samples_dir, test_cfg, sampling_method, rung_idx):
"""Summarize the output of mcmc_default, i.e. MCMC samples from the D_dt posterior for each lens
"""
true_H0 = 70.0
true_Om0 = 0.3
if 'mcmc_default' in sampling_method:
if rung_idx is None:
# Read in the relevant columns of metadata,
baobab_cfg = BaobabConfig.from_file(test_cfg.data.test_baobab_cfg_path)
metadata_path = os.path.join(baobab_cfg.out_dir, 'metadata.csv')
summary_df = pd.read_csv(metadata_path, index_col=None, usecols=['z_lens', 'z_src', 'n_img'], nrows=500) # FIXME: capped test set size at 500, as the stored dataset may be much larger
else:
summary_df = tdlmc_utils.convert_to_dataframe(rung=rung_idx, save_csv_path=None)
summary_df.sort_values('seed', axis=0, inplace=True)
true_H0 = summary_df.iloc[0]['H0']
true_Om0 = 0.27
summary_df['id'] = summary_df.index
summary_df['D_dt_mu'] = np.nan
summary_df['D_dt_sigma'] = np.nan
summary_df['H0_mean'] = np.nan
summary_df['H0_std'] = np.nan
summary_df['inference_time'] = 0.0
else:
summary_df = pd.read_csv(os.path.join(samples_dir, '..', 'summary.csv'), index_col=None)
D_dt_dicts = [f for f in os.listdir(samples_dir) if f.startswith('D_dt_dict')]
D_dt_dicts.sort()
oversampling = 20
threshold = 1000
# Initialize list for catastrophic lenses not solved by MCMC
lenses_to_rerun = []
lenses_run = []
for i, f_name in enumerate(D_dt_dicts):
lens_i = int(os.path.splitext(f_name)[0].split('D_dt_dict_')[1])
lenses_run.append(lens_i)
meta = summary_df.loc[summary_df['id']==lens_i, ['z_lens', 'z_src']].squeeze()
# Read in D_dt samples using lens identifier
D_dt_dict = np.load(os.path.join(samples_dir, f_name), allow_pickle=True).item()
# Rescale D_dt samples to correct for k_ext
uncorrected_D_dt_samples = D_dt_dict['D_dt_samples'] # [old_n_samples,]
uncorrected_D_dt_samples = h0_utils.remove_outliers_from_lognormal(uncorrected_D_dt_samples, 3).reshape(-1, 1) # [n_samples, 1]
k_ext_rv = getattr(scipy.stats, test_cfg.kappa_ext_prior.dist)(**test_cfg.kappa_ext_prior.kwargs)
k_ext = k_ext_rv.rvs(size=[len(uncorrected_D_dt_samples), oversampling]) # [n_samples, oversampling]
if test_cfg.kappa_ext_prior.transformed:
D_dt_samples = (uncorrected_D_dt_samples*k_ext).flatten()
else:
D_dt_samples = (uncorrected_D_dt_samples/(1.0 - k_ext)).flatten() # [n_samples,]
# Compute lognormal params for D_dt and update summary
try:
D_dt_stats = h0_utils.get_lognormal_stats(D_dt_samples)
D_dt_normal_stats = h0_utils.get_normal_stats(D_dt_samples)
except:
print("lens", lens_i)
print("==========")
lenses_to_rerun.append(lens_i)
#continue
summary_df.loc[summary_df['id']==lens_i, 'D_dt_mu'] = D_dt_stats['mu']
summary_df.loc[summary_df['id']==lens_i, 'D_dt_sigma'] = D_dt_stats['sigma']
summary_df.loc[summary_df['id']==lens_i, 'D_dt_mean'] = D_dt_normal_stats['mean']
summary_df.loc[summary_df['id']==lens_i, 'D_dt_std'] = D_dt_normal_stats['std']
# Convert D_dt samples to H0
D_dt_samples = scipy.stats.lognorm.rvs(scale=np.exp(D_dt_stats['mu']), s=D_dt_stats['sigma'], size=oversampling*threshold)
D_dt_samples = D_dt_samples[np.isfinite(D_dt_samples)]
cosmo_converter = h0_utils.CosmoConverter(meta['z_lens'], meta['z_src'], H0=true_H0, Om0=true_Om0)
H0_samples = cosmo_converter.get_H0(D_dt_samples)
# Reject H0 samples outside H0 prior
H0_samples = H0_samples[np.isfinite(H0_samples)]
if len(H0_samples) > 0:
H0_samples = H0_samples[np.logical_and(H0_samples > 50.0, H0_samples < 90.0)]
if len(H0_samples) < threshold:
lenses_to_rerun.append(lens_i)
summary_df.loc[summary_df['id']==lens_i, 'H0_mean'] = np.mean(H0_samples)
summary_df.loc[summary_df['id']==lens_i, 'H0_std'] = np.std(H0_samples)
summary_df.loc[summary_df['id']==lens_i, 'inference_time'] += D_dt_dict['inference_time']
# Replace existing summary
summary_df.to_csv(os.path.join(samples_dir, '..', 'summary.csv'))
# Output list of catastrophic/no-good lens IDs
if sampling_method == 'mcmc_default':
# List of lenses that skipped MCMC
total_lenses = np.arange(test_cfg.data.n_test)
lenses_not_run = set(list(total_lenses)) - set(list(lenses_run))
lenses_for_hybrid = list(lenses_not_run.union(set(lenses_to_rerun)))
with open(os.path.join(samples_dir, '..', "hybrid_candidates.txt"), "w") as f:
for lens_i in lenses_for_hybrid:
f.write(str(lens_i) +"\n")
else: # hybrid case
with open(os.path.join(samples_dir, '..', "no_good_candidates.txt"), "w") as f:
for lens_i in lenses_to_rerun:
f.write(str(lens_i) +"\n")
if __name__ == '__main__':
main()
```
#### File: h0rton/tdlmc_utils/reorder_images.py
```python
import numpy as np
__all__ = ['reorder_to_tdlmc']
def reorder_to_tdlmc(abcd_ordering_i, ra_img, dec_img, time_delays):
"""Reorder the list of ra, dec, and time delays to conform to the
order in the TDLMC challenge
Parameters
----------
abcd_ordering_i : array-like
ABCD in an increasing dec order if the keys ABCD mapped to values 0123, respectively, e.g. [3, 1, 0, 2] if D (value 3) is lowest, B (value 1) is second lowest
ra_img : array-like
list of ra from lenstronomy
dec_img : array-like
list of dec from lenstronomy, in the order specified by `ra_img`
time_delays : array-like
list of time delays from lenstronomy, in the order specified by `ra_img`
Returns
-------
tuple
tuple of (reordered ra, reordered_dec, reordered time delays)
"""
ra_img = np.array(ra_img)
dec_img = np.array(dec_img)
time_delays = np.array(time_delays)
# Order ra_pos, dec_pos, time_delays in increasing dec order
increasing_dec_i = np.argsort(dec_img)
ra_img = ra_img[increasing_dec_i]
dec_img = dec_img[increasing_dec_i]
time_delays = time_delays[increasing_dec_i]
# Reorder to get it in ABCD
ra_img = ra_img[abcd_ordering_i]
dec_img = dec_img[abcd_ordering_i]
time_delays = time_delays[abcd_ordering_i]
return (ra_img, dec_img, time_delays)
```
#### File: tests/test_configs/test_train_val_config.py
```python
import numpy as np
import unittest
import copy
from h0rton.configs import TrainValConfig
class TestTrainValConfig(unittest.TestCase):
"""A suite of tests for TrainValConfig
"""
@classmethod
def setUpClass(cls):
cls.train_val_dict = dict(
data=dict(
),
monitoring=dict(
n_plotting=20
),
model=dict(
likelihood_class='DoubleGaussianNLL'
),
optim=dict(
batch_size=100
)
)
def test_train_val_config_constructor(self):
"""Test the instantiation of TrainValConfig from a dictionary with minimum required keys
"""
train_val_dict = copy.deepcopy(self.train_val_dict)
train_val_dict['data']['train_baobab_cfg_path'] = 'some_path'
train_val_dict['data']['val_baobab_cfg_path'] = 'some_other_path'
train_val_cfg = TrainValConfig(train_val_dict)
def test_train_val_absent(self):
"""Test if an error is raised when the either the train or val baobab config is not passed in
"""
train_val_dict = copy.deepcopy(self.train_val_dict)
train_val_dict['data']['val_baobab_cfg_path'] = 'some_path'
with np.testing.assert_raises(ValueError):
train_val_cfg = TrainValConfig(train_val_dict)
train_val_dict = copy.deepcopy(self.train_val_dict)
train_val_dict['data']['train_baobab_cfg_path'] = 'some_path'
with np.testing.assert_raises(ValueError):
train_val_cfg = TrainValConfig(train_val_dict)
if __name__ == '__main__':
unittest.main()
```
#### File: tests/test_trainval_data/test_data_utils.py
```python
import os
import shutil
import unittest
import numpy as np
import torch
import pandas as pd
from addict import Dict
from torch.utils.data import DataLoader
from h0rton.trainval_data import XYData
import h0rton.trainval_data.data_utils as data_utils
class TestDataUtils(unittest.TestCase):
""""A suite of tests for data utility functions
"""
@classmethod
def setUpClass(cls):
cls.Y_cols = ["lens_mass_center_x", "src_light_center_x","lens_mass_center_y", "src_light_center_y", "external_shear_gamma_ext", "external_shear_psi_ext"]
cls.train_Y_mean = np.random.randn(len(cls.Y_cols))
cls.train_Y_std = np.abs(np.random.randn(len(cls.Y_cols))) + 1.0
cls.img_numpy = np.abs(np.random.randn(50*50)*2.0).reshape([1, 50, 50])
cls.img_torch = torch.from_numpy(cls.img_numpy)
n_data = 2
cls.metadata = pd.DataFrame.from_dict({"lens_mass_center_x": np.random.randn(n_data),
"lens_mass_center_y": np.random.randn(n_data),
"src_light_center_x": np.random.randn(n_data),
"src_light_center_y": np.random.randn(n_data),
"external_shear_gamma_ext": np.random.randn(n_data),
"external_shear_psi_ext": np.random.randn(n_data)
})
def test_whiten_pixels(self):
"""Test the torch pixel whitening vs. numpy
"""
actual = data_utils.whiten_pixels(self.img_torch)
expected = (self.img_numpy - np.mean(self.img_numpy))/np.std(self.img_numpy, ddof=1)
np.testing.assert_array_almost_equal(actual, expected, err_msg='test_whiten_pixels')
def test_asinh(self):
"""Test the torch asinh approximation vs. numpy
"""
actual = data_utils.asinh(self.img_torch)
expected = np.arcsinh(self.img_numpy)
np.testing.assert_array_almost_equal(actual, expected, err_msg='test_asinh')
def test_plus_1_log(self):
"""Test the torch log(1+X) vs. numpy
"""
actual = data_utils.plus_1_log(self.img_torch)
expected = np.log1p(self.img_numpy)
np.testing.assert_array_almost_equal(actual, expected, err_msg='test_plus_1_log')
def test_rescale_01(self):
"""Test the torch minmax stretching vs. numpy
"""
actual = data_utils.rescale_01(self.img_torch)
expected = (self.img_numpy - self.img_numpy.min())/(self.img_numpy.max() - self.img_numpy.min())
np.testing.assert_array_almost_equal(actual, expected, err_msg='test_rescale_01')
def test_whiten_Y_cols(self):
"""Test the Y whitening in pandas vs. numpy
"""
# All columns
actual = self.metadata.copy()
data_utils.whiten_Y_cols(actual, self.train_Y_mean, self.train_Y_std, self.Y_cols)
expected = (self.metadata[self.Y_cols].values - self.train_Y_mean.reshape([1, -1]))/self.train_Y_std.reshape([1, -1])
np.testing.assert_array_almost_equal(actual[self.Y_cols].values, expected, err_msg='test_whiten_Y_cols')
# Subset of columns
actual = self.metadata.copy()
subset_train_Y_mean = self.train_Y_mean[:3]
subset_train_Y_std = self.train_Y_std[:3]
subset_Y_cols = self.Y_cols[:3]
data_utils.whiten_Y_cols(actual, subset_train_Y_mean, subset_train_Y_std, subset_Y_cols)
expected = (self.metadata[subset_Y_cols].values - subset_train_Y_mean.reshape([1, -1]))/subset_train_Y_std.reshape([1, -1])
np.testing.assert_array_almost_equal(actual[subset_Y_cols].values, expected, err_msg='test_whiten_Y_cols with a subset of the columns')
if __name__ == '__main__':
unittest.main()
```
#### File: h0rton/train_utils/checkpoint_utils.py
```python
import os
import numpy as np
import random
import datetime
import torch
__all__ = ['save_state_dict', 'load_state_dict', 'load_state_dict_test']
def save_state_dict(model, optimizer, lr_scheduler, train_loss, val_loss, checkpoint_dir, model_architecture, epoch_idx):
"""Save the state dict of the current training to disk
Parameters
----------
model : torch model
trained model to save
optimizer : torch.optim object
lr_scheduler: torch.optim.lr_scheduler object
checkpoint_dir : str or os.path object
directory into which to save the model
model_architecture : str
type of architecture
epoch : int
epoch index
Returns
-------
str or os.path object
path to the saved model
"""
state = dict(
model=model.state_dict(),
optimizer=optimizer.state_dict(),
lr_scheduler=lr_scheduler.state_dict(),
epoch=epoch_idx,
train_loss=train_loss,
val_loss=val_loss,
)
time_stamp = datetime.datetime.now().strftime("epoch={:d}_%m-%d-%Y_%H:%M".format(epoch_idx))
model_fname = '{:s}_{:s}.mdl'.format(model_architecture, time_stamp)
model_path = os.path.join(checkpoint_dir, model_fname)
torch.save(state, model_path)
return model_path
def load_state_dict(checkpoint_path, model, optimizer, n_epochs, device, lr_scheduler=None):
"""Load the state dict of the past training
Parameters
----------
checkpoint_path : str or os.path object
path of the state dict to load
model : torch model
trained model to save
optimizer : torch.optim object
lr_scheduler: torch.optim.lr_scheduler object
n_epochs : int
total number of epochs to train
device : torch.device object
device on which to load the model
Returns
-------
str or os.path object
path to the saved model
"""
state = torch.load(checkpoint_path)
model.load_state_dict(state['model'])
model.to(device)
optimizer.load_state_dict(state['optimizer'])
if lr_scheduler is not None:
lr_scheduler.load_state_dict(state['lr_scheduler'])
epoch = state['epoch']
train_loss = state['train_loss']
val_loss = state['val_loss']
print("Loaded weights at {:s}".format(checkpoint_path))
print("Epoch [{}/{}]: TRAIN Loss: {:.4f}".format(epoch+1, n_epochs, train_loss))
print("Epoch [{}/{}]: VALID Loss: {:.4f}".format(epoch+1, n_epochs, val_loss))
return epoch, model, optimizer, train_loss, val_loss
def load_state_dict_test(checkpoint_path, model, n_epochs, device):
"""Load the state dict of the past training
Parameters
----------
checkpoint_path : str or os.path object
path of the state dict to load
model : torch model
trained model to save
optimizer : torch.optim object
lr_scheduler: torch.optim.lr_scheduler object
n_epochs : int
total number of epochs to train
device : torch.device object
device on which to load the model
Returns
-------
str or os.path object
path to the saved model
"""
state = torch.load(checkpoint_path)
model.load_state_dict(state['model'])
model.to(device)
epoch = state['epoch']
train_loss = state['train_loss']
val_loss = state['val_loss']
print("Loaded weights at {:s}".format(checkpoint_path))
print("Epoch [{}/{}]: TRAIN Loss: {:.4f}".format(epoch+1, n_epochs, train_loss))
print("Epoch [{}/{}]: VALID Loss: {:.4f}".format(epoch+1, n_epochs, val_loss))
return model, epoch
```
#### File: h0rton/trainval_data/data_utils.py
```python
import numpy as np
import torch
__all__ = ['rescale_01', 'whiten_Y_cols', 'plus_1_log', 'asinh', 'whiten_pixels', 'log_parameterize_Y_cols']
def whiten_pixels(pixels):
return (pixels - torch.mean(pixels))/torch.std(pixels)
def asinh(x):
return torch.log(x+(x**2+1)**0.5)
def plus_1_log(linear):
"""Add 1 and take the log10 of an image
Parameters
----------
linear : torch.Tensor of shape `[X_dim, X_dim]`
Returns
-------
torch.Tensor
the image of the same input shape, with values now logged
"""
return torch.log1p(linear)
def rescale_01(unscaled):
"""Rescale an image of unknown range to values between 0 and 1
Parameters
----------
unscaled : torch.Tensor of shape `[X_dim, X_dim]`
Returns
-------
torch.Tensor
the image of the same input shape, with values now scaled between 0 and 1
"""
return (unscaled - unscaled.min())/(unscaled.max() - unscaled.min())
def whiten_Y_cols(df, mean, std, col_names):
"""Whiten (in place) select columns in the given dataframe, i.e. shift and scale then so that they have the desired mean and std
Parameters
----------
df : pd.DataFrame
mean : array-like
target mean
std : array-like
target std
col_names : list
names of columns to whiten
"""
df.loc[:, col_names] = (df.loc[:, col_names].values - mean)/std
#return df
def log_parameterize_Y_cols(df, col_names):
"""Whiten (in place) select columns in the given dataframe, i.e. shift and scale then so that they have the desired mean and std
Parameters
----------
df : pd.DataFrame
mean : array-like
target mean
std : array-like
target std
col_names : list
names of columns to whiten
"""
df.loc[:, col_names] = np.log(df.loc[:, col_names].values)
``` |
{
"source": "jiwoncpark/helio-ai",
"score": 4
} |
#### File: aracle/toy_data/toy_squares.py
```python
import numpy as np
import matplotlib.pyplot as plt
class ToySquares:
"""A set of squares that grow and shift to the right over time
Parameters
----------
canvas_size : int
size of the canvas on which the toy squares fall, in pixels
n_objects : int
number of toy squares to spawn
"""
def __init__(self, canvas_size, n_objects):
self.canvas_size = canvas_size
self.n_objects = n_objects
self.initialize_positions()
self.initialize_sizes()
self.set_growth_rates()
self.rightward_shift = 2 # pixels
def initialize_positions(self):
"""Initialize the initial positions of the squares, with respect to the lower left of the square
"""
# Initialize x on the left half, so it doesn't fall out of bounds too quickly as it moves rightward across the canvas
self.x_pos = (np.random.rand(self.n_objects)*self.canvas_size*0.5).astype(int)
self.y_pos = (np.random.rand(self.n_objects)*self.canvas_size).astype(int)
self.in_the_canvas = np.ones(self.n_objects).astype(bool)
def initialize_sizes(self):
"""Initialize the initial sizes of the squares, as the number of pixels per edge
"""
allowed_sizes = np.arange(1, 5)
prob = np.ones(len(allowed_sizes))
prob /= np.sum(prob)
sizes = np.random.choice(allowed_sizes, size=self.n_objects, p=prob, replace=True)
self.x_sizes = sizes
self.y_sizes = sizes
def set_growth_rates(self):
"""Randomly set the size increase that is applied every time step
"""
allowed_growth_rates = np.arange(1, 3)
prob = np.ones(len(allowed_growth_rates))
prob /= np.sum(prob)
self.growth_rates = np.random.choice(allowed_growth_rates, size=self.n_objects, p=prob, replace=True)
def increment_time_step(self):
"""Advance one time step, updating object properties accordingly
"""
self.grow()
self.shift_right()
self.update_in_canvas()
def grow(self):
"""Grow the sizes of the objects by their respective growth rates
"""
self.x_sizes += self.growth_rates
self.y_sizes += self.growth_rates
def shift_right(self):
"""Shift the objects to the right by two pixels
"""
self.x_pos += self.rightward_shift
def update_in_canvas(self):
"""Evaluate whether the objects fall within the canvas and, if they get truncated by the canvas bounds, what the effective sizes are
"""
self.x_sizes = np.minimum(self.x_sizes, self.canvas_size - self.x_pos)
self.y_sizes = np.minimum(self.x_sizes, self.canvas_size - self.y_pos)
x_in_canvas = (self.x_sizes > 0.0)
y_in_canvas = (self.y_sizes > 0.0)
self.in_canvas = np.logical_and(x_in_canvas, y_in_canvas)
def export_image(self, img_path):
"""Export the current object states to disk as an npy file
Paramters
---------
img_path : str or os.path object
path of image file to be saved
"""
canvas = np.zeros((self.canvas_size, self.canvas_size))
for obj in range(self.n_objects):
canvas[self.x_pos[obj]:self.x_pos[obj] + self.x_sizes[obj],
self.y_pos[obj]:self.y_pos[obj] + self.y_sizes[obj]] = 1.0
np.save(img_path, canvas.T) # transpose b/c numpy indexing conventions
if __name__ == '__main__':
toy_squares = ToySquares(canvas_size=224, n_objects=3)
toy_squares.increment_time_step()
``` |
{
"source": "jiwoncpark/hierArc",
"score": 3
} |
#### File: Likelihood/LensLikelihood/mag_likelihood.py
```python
import numpy as np
class MagnificationLikelihood(object):
"""
likelihood of an unlensed apprarent source magnification given a measurement of the magnified brightness
This can i.e. be applied to lensed SNIa on the population level
"""
def __init__(self, amp_measured, cov_amp_measured, mag_model, cov_model):
"""
:param amp_measured: array, amplitudes of measured fluxes of image positions
:param cov_amp_measured: 2d array, error covariance matrix of the measured amplitudes
:param mag_model: mean magnification of the model prediction
:param cov_model: 2d array (image amplitudes); model lensing magnification covariances
"""
self._data_vector = amp_measured
self._cov_amp_measured = np.array(cov_amp_measured)
# check sizes of covariances matches
n_tot = len(self._data_vector)
assert n_tot == len(cov_model)
self._cov_data = self._cov_amp_measured
self._model_tot = np.array(mag_model)
self._cov_model = cov_model
self.num_data = n_tot
def log_likelihood(self, mu_intrinsic):
"""
:param mu_intrinsic: intrinsic brightness of the source (already incorporating the inverse MST transform)
:return: log likelihood of the measured magnified images given the source brightness
"""
model_vector, cov_tot = self._scale_model(mu_intrinsic)
# invert matrix
try:
cov_tot_inv = np.linalg.inv(cov_tot)
except:
return -np.inf
# difference to data vector
delta = self._data_vector - model_vector
# evaluate likelihood
lnlikelihood = -delta.dot(cov_tot_inv.dot(delta)) / 2.
sign_det, lndet = np.linalg.slogdet(cov_tot)
lnlikelihood -= 1 / 2. * (self.num_data * np.log(2 * np.pi) + lndet)
return lnlikelihood
def _scale_model(self, mu_intrinsic):
"""
:param mu_intrinsic: intrinsic brightness of the source (already incorporating the inverse MST transform)
:return:
"""
# compute model predicted magnified image amplitude and time delay
model_vector = mu_intrinsic * self._model_tot
# scale model covariance matrix with model_scale vector (in quadrature)
cov_model = self._cov_model * mu_intrinsic ** 2
# combine data and model covariance matrix
cov_tot = self._cov_data + cov_model
return model_vector, cov_tot
``` |
{
"source": "jiwoncpark/laggy-light",
"score": 2
} |
#### File: laggylight/utils/units_utils.py
```python
from __future__ import absolute_import, division, print_function
import numpy as np
__all__ = ['scale_mag_as_flux', 'flux_to_mag', 'mag_to_flux',]
def scale_mag_as_flux(mag, flux_scale=1.0):
"""
Identical to flux_to_mag(mag_to_flux(mag)*flux_scale)
"""
return mag - 2.5*np.log10(flux_scale)
def flux_to_mag(flux, zeropoint_mag=0.0, from_unit=None, to_unit=None):
if from_unit=='nMgy':
zeropoint_mag=22.5
return zeropoint_mag-2.5*np.log10(flux)
def mag_to_flux(mag, zeropoint_mag=0.0, from_unit=None, to_unit=None):
if to_unit=='nMgy':
zeropoint_mag=22.5
return np.power(10.0, -0.4*(mag - zeropoint_mag))
``` |
{
"source": "jiwoncpark/lenstronomy",
"score": 2
} |
#### File: lenstronomy/Analysis/image_reconstruction.py
```python
import copy
import numpy as np
import lenstronomy.Util.class_creator as class_creator
from lenstronomy.ImSim.MultiBand.single_band_multi_model import SingleBandMultiModel
class MultiBandImageReconstruction(object):
"""
this class manages the output/results of a fitting process and can conveniently access image reconstruction
properties in multi-band fitting.
In particular, the fitting result does not come with linear inversion parameters (which may or may not be joint
or different for multiple bands) and this class performs the linear inversion for the surface brightness amplitudes
and stores them for each individual band to be accessible by the user.
This class is is the backbone of the ModelPlot routine that provides the interface of this class with plotting and
illustration routines.
"""
def __init__(self, multi_band_list, kwargs_model, kwargs_params, multi_band_type='multi-linear',
kwargs_likelihood=None):
"""
:param multi_band_list: list of imaging data configuration [[kwargs_data, kwargs_psf, kwargs_numerics], [...]]
:param kwargs_model: model keyword argument list
:param kwargs_params: keyword arguments of the model parameters, same as output of FittingSequence() 'kwargs_result'
:param multi_band_type: string, option when having multiple imaging data sets modelled simultaneously. Options are:
- 'multi-linear': linear amplitudes are inferred on single data set
- 'linear-joint': linear amplitudes ae jointly inferred
- 'single-band': single band
:param kwargs_likelihood: likelihood keyword arguments as supported by the Likelihood() class
"""
# here we retrieve those settings in the likelihood keyword arguments that are relevant for the image reconstruction
if kwargs_likelihood is None:
kwargs_likelihood = {}
image_likelihood_mask_list = kwargs_likelihood.get('image_likelihood_mask_list', None)
source_marg = kwargs_likelihood.get('source_marg', False)
linear_prior = kwargs_likelihood.get('linear_prior', None)
bands_compute = kwargs_likelihood.get('bands_compute', None)
if bands_compute is None:
bands_compute = [True] * len(multi_band_list)
if multi_band_type == 'single-band':
multi_band_type = 'multi-linear' # this makes sure that the linear inversion outputs are coming in a list
self._imageModel = class_creator.create_im_sim(multi_band_list, multi_band_type, kwargs_model,
bands_compute=bands_compute,
likelihood_mask_list=image_likelihood_mask_list)
# here we perform the (joint) linear inversion with all data
model, error_map, cov_param, param = self._imageModel.image_linear_solve(inv_bool=True, **kwargs_params)
check_solver_error(param)
logL = self._imageModel.likelihood_data_given_model(source_marg=source_marg, linear_prior=linear_prior, **kwargs_params)
n_data = self._imageModel.num_data_evaluate
if n_data > 0:
print(logL * 2 / n_data, 'reduced X^2 of all evaluated imaging data combined.')
self.model_band_list = []
self._index_list = []
index = 0
for i in range(len(multi_band_list)):
if bands_compute[i] is True:
if multi_band_type == 'joint-linear':
param_i = param
cov_param_i = cov_param
else:
param_i = param[index]
cov_param_i = cov_param[index]
model_band = ModelBand(multi_band_list, kwargs_model, model[index], error_map[index], cov_param_i,
param_i, copy.deepcopy(kwargs_params),
image_likelihood_mask_list=image_likelihood_mask_list, band_index=i)
self.model_band_list.append(model_band)
self._index_list.append(index)
else:
self._index_list.append(-1)
index += 1
def band_setup(self, band_index=0):
"""
ImageModel() instance and keyword arguments of the model components to execute all the options of the ImSim
core modules.
:param band_index: integer (>=0) of imaging band in order of multi_band_list input to this class
:return: ImageModel() instance and keyword arguments of the model
"""
i = self._index_list[band_index]
if i == -1:
raise ValueError("band %s is not computed or out of range." % band_index)
i = int(i)
return self.model_band_list[i].image_model_class, self.model_band_list[i].kwargs_model
class ModelBand(object):
"""
class to plot a single band given the full modeling results
This class has it's specific role when the linear inference is performed on the joint band level and/or when only
a subset of model components get used for this specific band in the modeling.
"""
def __init__(self, multi_band_list, kwargs_model, model, error_map, cov_param, param, kwargs_params,
image_likelihood_mask_list=None, band_index=0):
"""
:param multi_band_list: list of imaging data configuration [[kwargs_data, kwargs_psf, kwargs_numerics], [...]]
:param kwargs_model: model keyword argument list for the full multi-band modeling
:param model: 2d numpy array of modeled image for the specified band
:param error_map: 2d numpy array of size of the image, additional error in the pixels coming from PSF uncertainties
:param cov_param: covariance matrix of the linear inversion
:param param: 1d numpy array of the linear coefficients of this imaging band
:param kwargs_params: keyword argument of keyword argument lists of the different model components selected for
the imaging band, NOT including linear amplitudes (not required as being overwritten by the param list)
:param image_likelihood_mask_list: list of 2d numpy arrays of likelihood masks (for all bands)
:param band_index: integer of the band to be considered in this class
"""
self._bandmodel = SingleBandMultiModel(multi_band_list, kwargs_model, likelihood_mask_list=image_likelihood_mask_list,
band_index=band_index)
self._kwargs_special_partial = kwargs_params.get('kwargs_special', None)
kwarks_lens_partial, kwargs_source_partial, kwargs_lens_light_partial, kwargs_ps_partial, self._kwargs_extinction_partial = self._bandmodel.select_kwargs(**kwargs_params)
self._kwargs_lens_partial, self._kwargs_source_partial, self._kwargs_lens_light_partial, self._kwargs_ps_partial = self._bandmodel.update_linear_kwargs(param, kwarks_lens_partial, kwargs_source_partial, kwargs_lens_light_partial, kwargs_ps_partial)
# this is an (out-commented) example of how to re-create the model in this band
#model_new = self.bandmodel.image(self._kwargs_lens_partial, self._kwargs_source_partial, self._kwargs_lens_light_partial, self._kwargs_ps_partial, self._kwargs_special_partial, self._kwargs_extinction_partial)
self._norm_residuals = self._bandmodel.reduced_residuals(model, error_map=error_map)
self._reduced_x2 = self._bandmodel.reduced_chi2(model, error_map=error_map)
print("reduced chi^2 of data ", band_index, "= ", self._reduced_x2)
self._model = model
self._cov_param = cov_param
self._param = param
@property
def image_model_class(self):
"""
ImageModel() class instance of the single band with only the model components applied to this band
:return: SingleBandMultiModel() instance, which inherits the ImageModel instance
"""
return self._bandmodel
@property
def kwargs_model(self):
"""
:return: keyword argument of keyword argument lists of the different model components selected for the imaging
band, including linear amplitudes. These format matches the image_model_class() return
"""
kwargs_return = {'kwargs_lens': self._kwargs_lens_partial, 'kwargs_source': self._kwargs_source_partial,
'kwargs_lens_light': self._kwargs_lens_light_partial, 'kwargs_ps': self._kwargs_ps_partial,
'kwargs_special': self._kwargs_special_partial,
'kwargs_extinction': self._kwargs_extinction_partial}
return kwargs_return
def check_solver_error(image):
"""
:param image: numpy array of modelled image from linear inversion
:return: bool, True if solver could not find a unique solution, False if solver works
"""
result = np.all(image == 0)
if result is True:
Warning('Linear inversion of surface brightness components did not result in a unique solution.'
'All linear amplitude parameters are set =0 instead. Please check whether '
'a) there are too many basis functions in the model, '
'or b) some linear basis sets are outside of the image/likelihood mask.')
return result
```
#### File: lenstronomy/Analysis/light2mass.py
```python
import numpy as np
from lenstronomy.Util import util
from lenstronomy.LightModel.light_model import LightModel
def light2mass_interpol(lens_light_model_list, kwargs_lens_light, numPix=100, deltaPix=0.05, subgrid_res=5,
center_x=0, center_y=0):
"""
takes a lens light model and turns it numerically in a lens model
(with all lensmodel quantities computed on a grid). Then provides an interpolated grid for the quantities.
:param kwargs_lens_light: lens light keyword argument list
:param numPix: number of pixels per axis for the return interpolation
:param deltaPix: interpolation/pixel size
:param center_x: center of the grid
:param center_y: center of the grid
:param subgrid_res: subgrid for the numerical integrals
:return: keyword arguments for 'INTERPOL' lens model
"""
# make super-sampled grid
x_grid_sub, y_grid_sub = util.make_grid(numPix=numPix * 5, deltapix=deltaPix, subgrid_res=subgrid_res)
import lenstronomy.Util.mask_util as mask_util
mask = mask_util.mask_sphere(x_grid_sub, y_grid_sub, center_x, center_y, r=1)
x_grid, y_grid = util.make_grid(numPix=numPix, deltapix=deltaPix)
# compute light on the subgrid
lightModel = LightModel(light_model_list=lens_light_model_list)
flux = lightModel.surface_brightness(x_grid_sub, y_grid_sub, kwargs_lens_light)
flux_norm = np.sum(flux[mask == 1]) / np.sum(mask)
flux /= flux_norm
from lenstronomy.LensModel import convergence_integrals as integral
# compute lensing quantities with subgrid
convergence_sub = util.array2image(flux)
f_x_sub, f_y_sub = integral.deflection_from_kappa_grid(convergence_sub, grid_spacing=deltaPix / float(subgrid_res))
f_sub = integral.potential_from_kappa_grid(convergence_sub, grid_spacing=deltaPix / float(subgrid_res))
# interpolation function on lensing quantities
x_axes_sub, y_axes_sub = util.get_axes(x_grid_sub, y_grid_sub)
from lenstronomy.LensModel.Profiles.interpol import Interpol
interp_func = Interpol()
interp_func.do_interp(x_axes_sub, y_axes_sub, f_sub, f_x_sub, f_y_sub)
# compute lensing quantities on sparser grid
x_axes, y_axes = util.get_axes(x_grid, y_grid)
f_ = interp_func.function(x_grid, y_grid)
f_x, f_y = interp_func.derivatives(x_grid, y_grid)
# numerical differentials for second order differentials
from lenstronomy.LensModel.lens_model import LensModel
lens_model = LensModel(lens_model_list=['INTERPOL'])
kwargs = [{'grid_interp_x': x_axes_sub, 'grid_interp_y': y_axes_sub, 'f_': f_sub,
'f_x': f_x_sub, 'f_y': f_y_sub}]
f_xx, f_xy, f_yx, f_yy = lens_model.hessian(x_grid, y_grid, kwargs, diff=0.00001)
kwargs_interpol = {'grid_interp_x': x_axes, 'grid_interp_y': y_axes, 'f_': util.array2image(f_),
'f_x': util.array2image(f_x), 'f_y': util.array2image(f_y), 'f_xx': util.array2image(f_xx),
'f_xy': util.array2image(f_xy), 'f_yy': util.array2image(f_yy)}
return kwargs_interpol
```
#### File: ImSim/Numerics/numerics.py
```python
from lenstronomy.ImSim.Numerics.grid import RegularGrid, AdaptiveGrid
from lenstronomy.ImSim.Numerics.convolution import SubgridKernelConvolution, PixelKernelConvolution, MultiGaussianConvolution
from lenstronomy.ImSim.Numerics.point_source_rendering import PointSourceRendering
from lenstronomy.Util import util
from lenstronomy.Util import kernel_util
import numpy as np
class Numerics(PointSourceRendering):
"""
this classes manages the numerical options and computations of an image.
The class has two main functions, re_size_convolve() and coordinates_evaluate()
"""
def __init__(self, pixel_grid, psf, supersampling_factor=1, compute_mode='regular', supersampling_convolution=False,
supersampling_kernel_size=5, flux_evaluate_indexes=None, supersampled_indexes=None,
compute_indexes=None, point_source_supersampling_factor=1, convolution_kernel_size=None,
convolution_type='fft_static', truncation=4):
"""
:param pixel_grid: PixelGrid() class instance
:param psf: PSF() class instance
:param compute_mode: options are: 'regular', 'adaptive'
:param supersampling_factor: int, factor of higher resolution sub-pixel sampling of surface brightness
:param supersampling_convolution: bool, if True, performs (part of) the convolution on the super-sampled
grid/pixels
:param supersampling_kernel_size: int (odd number), size (in regular pixel units) of the super-sampled
convolution
:param flux_evaluate_indexes: boolean 2d array of size of image (or None, then initiated as gird of True's).
Pixels indicated with True will be used to perform the surface brightness computation (and possible lensing
ray-shooting). Pixels marked as False will be assigned a flux value of zero (or ignored in the adaptive
convolution)
:param supersampled_indexes: 2d boolean array (only used in mode='adaptive') of pixels to be supersampled (in
surface brightness and if supersampling_convolution=True also in convolution)
:param compute_indexes: 2d boolean array (only used in mode='adaptive'), marks pixel that the resonse after
convolution is computed (all others =0). This can be set to likelihood_mask in the Likelihood module for
consistency.
:param point_source_supersampling_factor: super-sampling resolution of the point source placing
:param convolution_kernel_size: int, odd number, size of convolution kernel. If None, takes size of point_source_kernel
:param convolution_type: string, 'fft', 'grid', 'fft_static' mode of 2d convolution
"""
if compute_mode not in ['regular', 'adaptive']:
raise ValueError('compute_mode specified as %s not valid. Options are "adaptive", "regular"')
# if no super sampling, turn the supersampling convolution off
self._psf_type = psf.psf_type
if not isinstance(supersampling_factor, int):
raise TypeError('supersampling_factor needs to be an integer! Current type is %s' % type(supersampling_factor))
if supersampling_factor == 1:
supersampling_convolution = False
self._pixel_width = pixel_grid.pixel_width
nx, ny = pixel_grid.num_pixel_axes
transform_pix2angle = pixel_grid.transform_pix2angle
ra_at_xy_0, dec_at_xy_0 = pixel_grid.radec_at_xy_0
if supersampled_indexes is None:
supersampled_indexes = np.zeros((nx, ny), dtype=bool)
if compute_mode == 'adaptive': # or (compute_mode == 'regular' and supersampling_convolution is False and supersampling_factor > 1):
self._grid = AdaptiveGrid(nx, ny, transform_pix2angle, ra_at_xy_0, dec_at_xy_0, supersampled_indexes,
supersampling_factor, flux_evaluate_indexes)
else:
self._grid = RegularGrid(nx, ny, transform_pix2angle, ra_at_xy_0, dec_at_xy_0, supersampling_factor,
flux_evaluate_indexes)
if self._psf_type == 'PIXEL':
if compute_mode == 'adaptive' and supersampling_convolution is True:
from lenstronomy.ImSim.Numerics.adaptive_numerics import AdaptiveConvolution
kernel_super = psf.kernel_point_source_supersampled(supersampling_factor)
kernel_super = self._supersampling_cut_kernel(kernel_super, convolution_kernel_size, supersampling_factor)
self._conv = AdaptiveConvolution(kernel_super, supersampling_factor,
conv_supersample_pixels=supersampled_indexes,
supersampling_kernel_size=supersampling_kernel_size,
compute_pixels=compute_indexes, nopython=True, cache=True, parallel=False)
elif compute_mode == 'regular' and supersampling_convolution is True:
kernel_super = psf.kernel_point_source_supersampled(supersampling_factor)
if convolution_kernel_size is not None:
kernel_super = psf.kernel_point_source_supersampled(supersampling_factor)
kernel_super = self._supersampling_cut_kernel(kernel_super, convolution_kernel_size,
supersampling_factor)
self._conv = SubgridKernelConvolution(kernel_super, supersampling_factor,
supersampling_kernel_size=supersampling_kernel_size,
convolution_type=convolution_type)
else:
kernel = psf.kernel_point_source
kernel = self._supersampling_cut_kernel(kernel, convolution_kernel_size,
supersampling_factor=1)
self._conv = PixelKernelConvolution(kernel, convolution_type=convolution_type)
elif self._psf_type == 'GAUSSIAN':
pixel_scale = pixel_grid.pixel_width
fwhm = psf.fwhm # FWHM in units of angle
sigma = util.fwhm2sigma(fwhm)
sigma_list = [sigma]
fraction_list = [1]
self._conv = MultiGaussianConvolution(sigma_list, fraction_list, pixel_scale, supersampling_factor,
supersampling_convolution, truncation=truncation)
elif self._psf_type == 'NONE':
self._conv = None
else:
raise ValueError('psf_type %s not valid! Chose either NONE, GAUSSIAN or PIXEL.' % self._psf_type)
super(Numerics, self).__init__(pixel_grid=pixel_grid, supersampling_factor=point_source_supersampling_factor,
psf=psf)
if supersampling_convolution is True:
self._high_res_return = True
else:
self._high_res_return = False
def re_size_convolve(self, flux_array, unconvolved=False):
"""
:param flux_array: 1d array, flux values corresponding to coordinates_evaluate
:param array_low_res_partial: regular sampled surface brightness, 1d array
:return: convolved image on regular pixel grid, 2d array
"""
# add supersampled region to lower resolution on
image_low_res, image_high_res_partial = self._grid.flux_array2image_low_high(flux_array,
high_res_return=self._high_res_return)
if unconvolved is True or self._psf_type == 'NONE':
image_conv = image_low_res
else:
# convolve low res grid and high res grid
image_conv = self._conv.re_size_convolve(image_low_res, image_high_res_partial)
return image_conv * self._pixel_width ** 2
@property
def coordinates_evaluate(self):
"""
:return: 1d array of all coordinates being evaluated to perform the image computation
"""
return self._grid.coordinates_evaluate
@staticmethod
def _supersampling_cut_kernel(kernel_super, convolution_kernel_size, supersampling_factor):
"""
:param kernel_super: super-sampled kernel
:param convolution_kernel_size: size of convolution kernel in units of regular pixels (odd)
:param supersampling_factor: super-sampling factor of convolution kernel
:return: cut out kernel in super-sampling size
"""
if convolution_kernel_size is not None:
size = convolution_kernel_size * supersampling_factor
if size % 2 == 0:
size += 1
kernel_cut = kernel_util.cut_psf(kernel_super, size)
return kernel_cut
else:
return kernel_super
```
#### File: lenstronomy/LensModel/lens_model_extensions.py
```python
import numpy as np
import lenstronomy.Util.util as util
import lenstronomy.Util.mask_util as mask_util
import lenstronomy.Util.param_util as param_util
class LensModelExtensions(object):
"""
class with extension routines not part of the LensModel core routines
"""
def __init__(self, lensModel):
"""
:param lensModel: instance of the LensModel() class, or with same functionalities.
In particular, the following definitions are required to execute all functionalities presented in this class:
def ray_shooting()
def magnification()
def kappa()
def alpha()
def hessian()
"""
self._lensModel = lensModel
def magnification_finite(self, x_pos, y_pos, kwargs_lens, source_sigma=0.003, window_size=0.1, grid_number=100,
shape="GAUSSIAN", polar_grid=False, aspect_ratio=0.5):
"""
returns the magnification of an extended source with Gaussian light profile
:param x_pos: x-axis positons of point sources
:param y_pos: y-axis position of point sources
:param kwargs_lens: lens model kwargs
:param source_sigma: Gaussian sigma in arc sec in source
:param window_size: size of window to compute the finite flux
:param grid_number: number of grid cells per axis in the window to numerically comute the flux
:return: numerically computed brightness of the sources
"""
mag_finite = np.zeros_like(x_pos)
deltaPix = float(window_size)/grid_number
if shape == 'GAUSSIAN':
from lenstronomy.LightModel.Profiles.gaussian import Gaussian
quasar = Gaussian()
elif shape == 'TORUS':
import lenstronomy.LightModel.Profiles.ellipsoid as quasar
else:
raise ValueError("shape %s not valid for finite magnification computation!" % shape)
x_grid, y_grid = util.make_grid(numPix=grid_number, deltapix=deltaPix, subgrid_res=1)
if polar_grid is True:
a = window_size*0.5
b = window_size*0.5*aspect_ratio
ellipse_inds = (x_grid*a**-1) **2 + (y_grid*b**-1) **2 <= 1
x_grid, y_grid = x_grid[ellipse_inds], y_grid[ellipse_inds]
for i in range(len(x_pos)):
ra, dec = x_pos[i], y_pos[i]
center_x, center_y = self._lensModel.ray_shooting(ra, dec, kwargs_lens)
if polar_grid is True:
theta = np.arctan2(dec,ra)
xcoord, ycoord = util.rotate(x_grid, y_grid, theta)
else:
xcoord, ycoord = x_grid, y_grid
betax, betay = self._lensModel.ray_shooting(xcoord + ra, ycoord + dec, kwargs_lens)
I_image = quasar.function(betax, betay, 1., source_sigma, center_x, center_y)
mag_finite[i] = np.sum(I_image) * deltaPix**2
return mag_finite
def zoom_source(self, x_pos, y_pos, kwargs_lens, source_sigma=0.003, window_size=0.1, grid_number=100,
shape="GAUSSIAN"):
"""
computes the surface brightness on an image with a zoomed window
:param x_pos: angular coordinate of center of image
:param y_pos: angular coordinate of center of image
:param kwargs_lens: lens model parameter list
:param source_sigma: source size (in angular units)
:param window_size: window size in angular units
:param grid_number: number of grid points per axis
:param shape: string, shape of source, supports 'GAUSSIAN' and 'TORUS
:return: 2d numpy array
"""
deltaPix = float(window_size) / grid_number
if shape == 'GAUSSIAN':
from lenstronomy.LightModel.Profiles.gaussian import Gaussian
quasar = Gaussian()
elif shape == 'TORUS':
import lenstronomy.LightModel.Profiles.ellipsoid as quasar
else:
raise ValueError("shape %s not valid for finite magnification computation!" % shape)
x_grid, y_grid = util.make_grid(numPix=grid_number, deltapix=deltaPix, subgrid_res=1)
center_x, center_y = self._lensModel.ray_shooting(x_pos, y_pos, kwargs_lens)
betax, betay = self._lensModel.ray_shooting(x_grid + x_pos, y_grid + y_pos, kwargs_lens)
image = quasar.function(betax, betay, 1., source_sigma, center_x, center_y)
return util.array2image(image)
def critical_curve_tiling(self, kwargs_lens, compute_window=5, start_scale=0.5, max_order=10):
"""
:param kwargs_lens: lens model keyword argument list
:param compute_window: total window in the image plane where to search for critical curves
:param start_scale: float, angular scale on which to start the tiling from (if there are two distinct curves in
a region, it might only find one.
:param max_order: int, maximum order in the tiling to compute critical curve triangles
:return: list of positions representing coordinates of the critical curve (in RA and DEC)
"""
numPix = int(compute_window / start_scale)
x_grid_init, y_grid_init = util.make_grid(numPix, deltapix=start_scale, subgrid_res=1)
mag_init = util.array2image(self._lensModel.magnification(x_grid_init, y_grid_init, kwargs_lens))
x_grid_init = util.array2image(x_grid_init)
y_grid_init = util.array2image(y_grid_init)
ra_crit_list = []
dec_crit_list = []
# iterate through original triangles and return ra_crit, dec_crit list
for i in range(numPix-1):
for j in range(numPix-1):
edge1 = [x_grid_init[i, j], y_grid_init[i, j], mag_init[i, j]]
edge2 = [x_grid_init[i+1, j+1], y_grid_init[i+1, j+1], mag_init[i+1, j+1]]
edge_90_1 = [x_grid_init[i, j+1], y_grid_init[i, j+1], mag_init[i, j+1]]
edge_90_2 = [x_grid_init[i+1, j], y_grid_init[i+1, j], mag_init[i+1, j]]
ra_crit, dec_crit = self._tiling_crit(edge1, edge2, edge_90_1, max_order=max_order,
kwargs_lens=kwargs_lens)
ra_crit_list += ra_crit # list addition
dec_crit_list += dec_crit # list addition
ra_crit, dec_crit = self._tiling_crit(edge1, edge2, edge_90_2, max_order=max_order,
kwargs_lens=kwargs_lens)
ra_crit_list += ra_crit # list addition
dec_crit_list += dec_crit # list addition
return np.array(ra_crit_list), np.array(dec_crit_list)
def _tiling_crit(self, edge1, edge2, edge_90, max_order, kwargs_lens):
"""
tiles a rectangular triangle and compares the signs of the magnification
:param edge1: [ra_coord, dec_coord, magnification]
:param edge2: [ra_coord, dec_coord, magnification]
:param edge_90: [ra_coord, dec_coord, magnification]
:param max_order: maximal order to fold triangle
:param kwargs_lens: lens model keyword argument list
:return:
"""
ra_1, dec_1, mag_1 = edge1
ra_2, dec_2, mag_2 = edge2
ra_3, dec_3, mag_3 = edge_90
sign_list = np.sign([mag_1, mag_2, mag_3])
if sign_list[0] == sign_list[1] and sign_list[0] == sign_list[2]: # if all signs are the same
return [], []
else:
# split triangle along the long axis
# execute tiling twice
# add ra_crit and dec_crit together
# if max depth has been reached, return the mean value in the triangle
max_order -= 1
if max_order <= 0:
return [(ra_1 + ra_2 + ra_3)/3], [(dec_1 + dec_2 + dec_3)/3]
else:
# split triangle
ra_90_ = (ra_1 + ra_2)/2 # find point in the middle of the long axis to split triangle
dec_90_ = (dec_1 + dec_2)/2
mag_90_ = self._lensModel.magnification(ra_90_, dec_90_, kwargs_lens)
edge_90_ = [ra_90_, dec_90_, mag_90_]
ra_crit, dec_crit = self._tiling_crit(edge1=edge_90, edge2=edge1, edge_90=edge_90_, max_order=max_order,
kwargs_lens=kwargs_lens)
ra_crit_2, dec_crit_2 = self._tiling_crit(edge1=edge_90, edge2=edge2, edge_90=edge_90_, max_order=max_order,
kwargs_lens=kwargs_lens)
ra_crit += ra_crit_2
dec_crit += dec_crit_2
return ra_crit, dec_crit
def critical_curve_caustics(self, kwargs_lens, compute_window=5, grid_scale=0.01):
"""
:param kwargs_lens: lens model kwargs
:param compute_window: window size in arcsec where the critical curve is computed
:param grid_scale: numerical grid spacing of the computation of the critical curves
:return: lists of ra and dec arrays corresponding to different disconnected critical curves and their caustic counterparts
"""
numPix = int(compute_window / grid_scale)
x_grid_high_res, y_grid_high_res = util.make_grid(numPix, deltapix=grid_scale, subgrid_res=1)
mag_high_res = util.array2image(self._lensModel.magnification(x_grid_high_res, y_grid_high_res, kwargs_lens))
ra_crit_list = []
dec_crit_list = []
ra_caustic_list = []
dec_caustic_list = []
import matplotlib.pyplot as plt
cs = plt.contour(util.array2image(x_grid_high_res), util.array2image(y_grid_high_res), mag_high_res, [0],
alpha=0.0)
paths = cs.collections[0].get_paths()
for i, p in enumerate(paths):
v = p.vertices
ra_points = v[:, 0]
dec_points = v[:, 1]
ra_crit_list.append(ra_points)
dec_crit_list.append(dec_points)
ra_caustics, dec_caustics = self._lensModel.ray_shooting(ra_points, dec_points, kwargs_lens)
ra_caustic_list.append(ra_caustics)
dec_caustic_list.append(dec_caustics)
plt.cla()
return ra_crit_list, dec_crit_list, ra_caustic_list, dec_caustic_list
def hessian_eigenvectors(self, x, y, kwargs_lens, diff=None):
"""
computes magnification eigenvectors at position (x, y)
:param x: x-position
:param y: y-position
:param kwargs_lens: lens model keyword arguments
:return: radial stretch, tangential stretch
"""
f_xx, f_xy, f_yx, f_yy = self._lensModel.hessian(x, y, kwargs_lens, diff=diff)
if isinstance(x, int) or isinstance(x, float):
A = np.array([[1-f_xx, f_xy], [f_yx, 1-f_yy]])
w, v = np.linalg.eig(A)
v11, v12, v21, v22 = v[0, 0], v[0, 1], v[1, 0], v[1, 1]
w1, w2 = w[0], w[1]
else:
w1, w2, v11, v12, v21, v22 = np.empty(len(x), dtype=float), np.empty(len(x), dtype=float), np.empty_like(x), np.empty_like(x), np.empty_like(x), np.empty_like(x)
for i in range(len(x)):
A = np.array([[1 - f_xx[i], f_xy[i]], [f_yx[i], 1 - f_yy[i]]])
w, v = np.linalg.eig(A)
w1[i], w2[i] = w[0], w[1]
v11[i], v12[i], v21[i], v22[i] = v[0, 0], v[0, 1], v[1, 0], v[1, 1]
return w1, w2, v11, v12, v21, v22
def radial_tangential_stretch(self, x, y, kwargs_lens, diff=None, ra_0=0, dec_0=0,
coordinate_frame_definitions=False):
"""
computes the radial and tangential stretches at a given position
:param x: x-position
:param y: y-position
:param kwargs_lens: lens model keyword arguments
:param diff: float or None, finite average differential scale
:return: radial stretch, tangential stretch
"""
w1, w2, v11, v12, v21, v22 = self.hessian_eigenvectors(x, y, kwargs_lens, diff=diff)
v_x, v_y = x - ra_0, y - dec_0
prod_v1 = v_x*v11 + v_y*v12
prod_v2 = v_x*v21 + v_y*v22
if isinstance(x, int) or isinstance(x, float):
if (coordinate_frame_definitions is True and abs(prod_v1) >= abs(prod_v2)) or (coordinate_frame_definitions is False and w1 >= w2):
#if w1 > w2:
#if abs(prod_v1) > abs(prod_v2): # radial vector has larger scalar product to the zero point
lambda_rad = 1. / w1
lambda_tan = 1. / w2
v1_rad, v2_rad = v11, v12
v1_tan, v2_tan = v21, v22
prod_r = prod_v1
else:
lambda_rad = 1. / w2
lambda_tan = 1. / w1
v1_rad, v2_rad = v21, v22
v1_tan, v2_tan = v11, v12
prod_r = prod_v2
if prod_r < 0: # if radial eigenvector points towards the center
v1_rad, v2_rad = -v1_rad, -v2_rad
if v1_rad * v2_tan - v2_rad * v1_tan < 0: # cross product defines orientation of the tangential eigenvector
v1_tan *= -1
v2_tan *= -1
else:
lambda_rad, lambda_tan, v1_rad, v2_rad, v1_tan, v2_tan = np.empty(len(x), dtype=float), np.empty(len(x), dtype=float), np.empty_like(x), np.empty_like(x), np.empty_like(x), np.empty_like(x)
for i in range(len(x)):
if (coordinate_frame_definitions is True and abs(prod_v1[i]) >= abs(prod_v2[i])) or (
coordinate_frame_definitions is False and w1[i] >= w2[i]):
#if w1[i] > w2[i]:
lambda_rad[i] = 1. / w1[i]
lambda_tan[i] = 1. / w2[i]
v1_rad[i], v2_rad[i] = v11[i], v12[i]
v1_tan[i], v2_tan[i] = v21[i], v22[i]
prod_r = prod_v1[i]
else:
lambda_rad[i] = 1. / w2[i]
lambda_tan[i] = 1. / w1[i]
v1_rad[i], v2_rad[i] = v21[i], v22[i]
v1_tan[i], v2_tan[i] = v11[i], v12[i]
prod_r = prod_v2[i]
if prod_r < 0: # if radial eigenvector points towards the center
v1_rad[i], v2_rad[i] = -v1_rad[i], -v2_rad[i]
if v1_rad[i] * v2_tan[i] - v2_rad[i] * v1_tan[i] < 0: # cross product defines orientation of the tangential eigenvector
v1_tan[i] *= -1
v2_tan[i] *= -1
return lambda_rad, lambda_tan, v1_rad, v2_rad, v1_tan, v2_tan
def radial_tangential_differentials(self, x, y, kwargs_lens, center_x=0, center_y=0, smoothing_3rd=0.001,
smoothing_2nd=None):
"""
computes the differentials in stretches and directions
:param x: x-position
:param y: y-position
:param kwargs_lens: lens model keyword arguments
:param center_x: x-coord of center towards which the rotation direction is defined
:param center_y: x-coord of center towards which the rotation direction is defined
:param smoothing_3rd: finite differential length of third order in units of angle
:param smoothing_2nd: float or None, finite average differential scale of Hessian
:return:
"""
lambda_rad, lambda_tan, v1_rad, v2_rad, v1_tan, v2_tan = self.radial_tangential_stretch(x, y, kwargs_lens,
diff=smoothing_2nd,
ra_0=center_x, dec_0=center_y,
coordinate_frame_definitions=True)
x0 = x - center_x
y0 = y - center_y
# computing angle of tangential vector in regard to the defined coordinate center
cos_angle = (v1_tan * x0 + v2_tan * y0) / np.sqrt((x0 ** 2 + y0 ** 2) * (v1_tan ** 2 + v2_tan ** 2))# * np.sign(v1_tan * y0 - v2_tan * x0)
orientation_angle = np.arccos(cos_angle) - np.pi / 2
# computing differentials in tangential and radial directions
dx_tan = x + smoothing_3rd * v1_tan
dy_tan = y + smoothing_3rd * v2_tan
lambda_rad_dtan, lambda_tan_dtan, v1_rad_dtan, v2_rad_dtan, v1_tan_dtan, v2_tan_dtan = self.radial_tangential_stretch(dx_tan, dy_tan, kwargs_lens, diff=smoothing_2nd,
ra_0=center_x, dec_0=center_y, coordinate_frame_definitions=True)
dx_rad = x + smoothing_3rd * v1_rad
dy_rad = y + smoothing_3rd * v2_rad
lambda_rad_drad, lambda_tan_drad, v1_rad_drad, v2_rad_drad, v1_tan_drad, v2_tan_drad = self.radial_tangential_stretch(
dx_rad, dy_rad, kwargs_lens, diff=smoothing_2nd, ra_0=center_x, dec_0=center_y, coordinate_frame_definitions=True)
# eigenvalue differentials in tangential and radial direction
dlambda_tan_dtan = (lambda_tan_dtan - lambda_tan) / smoothing_3rd# * np.sign(v1_tan * y0 - v2_tan * x0)
dlambda_tan_drad = (lambda_tan_drad - lambda_tan) / smoothing_3rd# * np.sign(v1_rad * x0 + v2_rad * y0)
dlambda_rad_drad = (lambda_rad_drad - lambda_rad) / smoothing_3rd# * np.sign(v1_rad * x0 + v2_rad * y0)
dlambda_rad_dtan = (lambda_rad_dtan - lambda_rad) / smoothing_3rd# * np.sign(v1_rad * x0 + v2_rad * y0)
# eigenvector direction differentials in tangential and radial direction
cos_dphi_tan_dtan = v1_tan * v1_tan_dtan + v2_tan * v2_tan_dtan #/ (np.sqrt(v1_tan**2 + v2_tan**2) * np.sqrt(v1_tan_dtan**2 + v2_tan_dtan**2))
norm = np.sqrt(v1_tan**2 + v2_tan**2) * np.sqrt(v1_tan_dtan**2 + v2_tan_dtan**2)
cos_dphi_tan_dtan /= norm
arc_cos_dphi_tan_dtan = np.arccos(np.abs(np.minimum(cos_dphi_tan_dtan, 1)))
dphi_tan_dtan = arc_cos_dphi_tan_dtan / smoothing_3rd
cos_dphi_tan_drad = v1_tan * v1_tan_drad + v2_tan * v2_tan_drad # / (np.sqrt(v1_tan ** 2 + v2_tan ** 2) * np.sqrt(v1_tan_drad ** 2 + v2_tan_drad ** 2))
norm = np.sqrt(v1_tan ** 2 + v2_tan ** 2) * np.sqrt(v1_tan_drad ** 2 + v2_tan_drad ** 2)
cos_dphi_tan_drad /= norm
arc_cos_dphi_tan_drad = np.arccos(np.abs(np.minimum(cos_dphi_tan_drad, 1)))
dphi_tan_drad = arc_cos_dphi_tan_drad / smoothing_3rd
cos_dphi_rad_drad = v1_rad * v1_rad_drad + v2_rad * v2_rad_drad #/ (np.sqrt(v1_rad**2 + v2_rad**2) * np.sqrt(v1_rad_drad**2 + v2_rad_drad**2))
norm = np.sqrt(v1_rad**2 + v2_rad**2) * np.sqrt(v1_rad_drad**2 + v2_rad_drad**2)
cos_dphi_rad_drad /= norm
cos_dphi_rad_drad = np.minimum(cos_dphi_rad_drad, 1)
dphi_rad_drad = np.arccos(cos_dphi_rad_drad) / smoothing_3rd
cos_dphi_rad_dtan = v1_rad * v1_rad_dtan + v2_rad * v2_rad_dtan # / (np.sqrt(v1_rad ** 2 + v2_rad ** 2) * np.sqrt(v1_rad_dtan ** 2 + v2_rad_dtan ** 2))
norm = np.sqrt(v1_rad ** 2 + v2_rad ** 2) * np.sqrt(v1_rad_dtan ** 2 + v2_rad_dtan ** 2)
cos_dphi_rad_dtan /= norm
cos_dphi_rad_dtan = np.minimum(cos_dphi_rad_dtan, 1)
dphi_rad_dtan = np.arccos(cos_dphi_rad_dtan) / smoothing_3rd
return lambda_rad, lambda_tan, orientation_angle, dlambda_tan_dtan, dlambda_tan_drad, dlambda_rad_drad, dlambda_rad_dtan, dphi_tan_dtan, dphi_tan_drad, dphi_rad_drad, dphi_rad_dtan
def curved_arc_estimate(self, x, y, kwargs_lens, smoothing=None, smoothing_3rd=0.001):
"""
performs the estimation of the curved arc description at a particular position of an arbitrary lens profile
:param x: float, x-position where the estimate is provided
:param y: float, y-position where the estimate is provided
:param kwargs_lens: lens model keyword arguments
:return: keyword argument list corresponding to a CURVED_ARC profile at (x, y) given the initial lens model
"""
radial_stretch, tangential_stretch, v_rad1, v_rad2, v_tang1, v_tang2 = self.radial_tangential_stretch(x, y, kwargs_lens, diff=smoothing)
dx_tang = x + smoothing_3rd * v_tang1
dy_tang = y + smoothing_3rd * v_tang2
rad_dt, tang_dt, v_rad1_dt, v_rad2_dt, v_tang1_dt, v_tang2_dt = self.radial_tangential_stretch(dx_tang, dy_tang,
kwargs_lens,
diff=smoothing)
d_tang1 = v_tang1_dt - v_tang1
d_tang2 = v_tang2_dt - v_tang2
delta = np.sqrt(d_tang1**2 + d_tang2**2)
if delta > 1:
d_tang1 = v_tang1_dt + v_tang1
d_tang2 = v_tang2_dt + v_tang2
delta = np.sqrt(d_tang1 ** 2 + d_tang2 ** 2)
curvature = delta / smoothing_3rd
direction = np.arctan2(v_rad2 * np.sign(v_rad1 * x + v_rad2 * y), v_rad1 * np.sign(v_rad1 * x + v_rad2 * y))
#direction = np.arctan2(v_rad2, v_rad1)
kwargs_arc = {'radial_stretch': radial_stretch,
'tangential_stretch': tangential_stretch,
'curvature': curvature,
'direction': direction,
'center_x': x, 'center_y': y}
return kwargs_arc
```
#### File: LightModel/Profiles/profile_base.py
```python
import numpy as np
class LightProfileBase(object):
"""
base class of all light profiles
"""
def __init__(self):
pass
def function(self, *args, **kwargs):
"""
:param x: x-coordinate
:param y: y-coordinate
:param kwargs: keyword arguments of profile
:return: surface brightness, raise as definition is not defined
"""
raise ValueError('function definition not defined in the light profile.')
def light_3d(self, *args, **kwargs):
"""
:param r: 3d radius
:param kwargs: keyword arguments of profile
:return: 3d light profile, raise as definition is not defined
"""
raise ValueError('light_3d definition not defined in the light profile.')
```
#### File: test_LightModel/test_Profiles/test_interpolation.py
```python
import pytest
import numpy.testing as npt
from lenstronomy.LightModel.Profiles.interpolation import Interpol
from lenstronomy.LightModel.Profiles.gaussian import Gaussian
import lenstronomy.Util.util as util
class TestInterpol(object):
"""
class to test Shapelets
"""
def setup(self):
pass
def test_function(self):
"""
:return:
"""
x, y = util.make_grid(numPix=20, deltapix=1.)
gauss = Gaussian()
flux = gauss.function(x, y, amp=1., center_x=0., center_y=0., sigma=1.)
image = util.array2image(flux)
interp = Interpol()
kwargs_interp = {'image': image, 'scale': 1., 'phi_G': 0., 'center_x': 0., 'center_y': 0.}
output = interp.function(x, y, **kwargs_interp)
npt.assert_almost_equal(output, flux, decimal=0)
flux = gauss.function(x-1., y, amp=1., center_x=0., center_y=0., sigma=1.)
kwargs_interp = {'image': image, 'scale': 1., 'phi_G': 0., 'center_x': 1., 'center_y': 0.}
output = interp.function(x, y, **kwargs_interp)
npt.assert_almost_equal(output, flux, decimal=0)
flux = gauss.function(x - 1., y - 1., amp=1, center_x=0., center_y=0., sigma=1.)
kwargs_interp = {'image': image, 'scale': 1., 'phi_G': 0., 'center_x': 1., 'center_y': 1.}
output = interp.function(x, y, **kwargs_interp)
npt.assert_almost_equal(output, flux, decimal=0)
out = interp.function(x=1000, y=0, **kwargs_interp)
assert out == 0
def test_delete_cache(self):
x, y = util.make_grid(numPix=20, deltapix=1.)
gauss = Gaussian()
flux = gauss.function(x, y, amp=1., center_x=0., center_y=0., sigma=1.)
image = util.array2image(flux)
interp = Interpol()
kwargs_interp = {'image': image, 'scale': 1., 'phi_G': 0., 'center_x': 0., 'center_y': 0.}
output = interp.function(x, y, **kwargs_interp)
assert hasattr(interp, '_image_interp')
interp.delete_cache()
assert not hasattr(interp, '_image_interp')
if __name__ == '__main__':
pytest.main()
``` |
{
"source": "jiwoncpark/magnificat",
"score": 3
} |
#### File: magnificat/samplers/dc2subset_sampler.py
```python
import numpy as np
from magnificat.samplers.dc2_sampler import DC2Sampler
class DC2SubsetSampler:
def __init__(self, seed, bandpasses, std_factor=0.5):
self.seed = seed
self.rng = np.random.default_rng(self.seed)
self.bandpasses = bandpasses
self.std_factor = std_factor
dc2_sampler = DC2Sampler(self.seed, bandpasses)
cat = dc2_sampler.cat.copy().drop('galaxy_id', axis=1)
self.mean = cat.mean()
self.std = cat.std()*self.std_factor
self.n_params = len(self.mean)
def sample(self):
eps = self.rng.standard_normal(self.n_params)
return eps*self.std + self.mean
if __name__ == '__main__':
sampler = DC2SubsetSampler(123, list('ugri'))
s = sampler.sample()
print(s)
s = sampler.sample()
print(s)
``` |
{
"source": "jiwoncpark/magnify",
"score": 3
} |
#### File: magnify/attentive_neural_process/context_target_sampler.py
```python
import torch
import numpy as np
class GetRandomObservations:
"""
Return random subset of indices corresponding to observations
"""
def __init__(self, out_dir, n_pointings: int, seed: int = 123):
from numpy.random import default_rng
from magnificat.cadence import LSSTCadence
self.out_dir = out_dir
self.n_pointings = n_pointings
self.seed = seed
self.rng = default_rng(self.seed)
self.cadence_obj = LSSTCadence(out_dir, self.seed)
ra, dec = self.cadence_obj.get_pointings(self.n_pointings)
self.cadence_obj.get_obs_info(ra, dec)
def __call__(self, batch_size, n_possible_points):
# Randomly sample observation
obs_i = self.rng.choice(self.n_pointings)
obs_idx = self.cadence_obj.get_mjd_single_pointing(obs_i, rounded=True).astype(np.int32)
# All the available indices, with batching
idx_all = (
np.arange(n_possible_points)
.reshape(1, n_possible_points)
.repeat(batch_size, axis=0)
)
idx = torch.from_numpy(idx_all[:, obs_idx])
return idx
def collate_fn_opsim(batch, rng, cadence_obj, n_pointings, frac_context=0.9,
exclude_ddf=True):
x, y, meta = zip(*batch) # batch ~ list of (x, y, param) tuples
x = torch.stack(x, axis=0) # [batch_size, n_points, n_filters]
y = torch.stack(y, axis=0) # [batch_size, n_points, n_filters]
meta = torch.stack(meta, axis=0).float() # [batch_size, n_params]
# Log-parameterize some params
# n_full_x = x.shape[1]
obs_i = rng.choice(n_pointings)
if exclude_ddf:
while len(cadence_obj.get_mjd_single_pointing(obs_i, rounded=True)) > 1500:
obs_i = rng.choice(n_pointings)
target_i = cadence_obj.get_mjd_i_band_pointing(obs_i, rounded=True).astype(np.int32) # [n_points,]
sub_i = rng.choice(np.arange(len(target_i)),
size=int(len(target_i)*frac_context),
replace=False) # [n_points*frac_context,]
context_i = target_i[sub_i]
context_i.sort()
# every_other_10 = np.arange(0, n_full_x, every_other)
# target_i = np.union1d(context_i, every_other_10)
# target_i.sort()
mask = torch.from_numpy(cadence_obj.get_mask_single_pointing(obs_i)).bool() # [n_points, n_filters]
return (x[:, context_i, :], y[:, context_i, :],
x[:, target_i, :], y[:, target_i, :],
meta,
mask,
torch.from_numpy(sub_i).long())
def collate_fn_opsim_v1(batch, rng, cadence_obj, n_pointings, frac_context=0.9,
exclude_ddf=True):
x, y, meta = zip(*batch) # batch ~ list of (x, y, param) tuples
x = torch.stack(x, axis=0) # [batch_size, n_points, n_filters]
y = torch.stack(y, axis=0) # [batch_size, n_points, n_filters]
meta = torch.stack(meta, axis=0).float() # [batch_size, n_params]
# Log-parameterize some params
n_full_x = x.shape[1]
obs_i = rng.choice(n_pointings)
if exclude_ddf:
while len(cadence_obj.get_mjd_single_pointing(obs_i, rounded=True)) > 1500:
obs_i = rng.choice(n_pointings)
context_i = cadence_obj.get_mjd_single_pointing(obs_i, rounded=True).astype(np.int32) # [n_points,]
every_other_10 = np.arange(0, n_full_x, 20)
target_i = np.union1d(context_i, every_other_10)
target_i.sort()
#mask = torch.from_numpy(cadence_obj.get_mask_single_pointing(obs_i)).bool() # [n_points, n_filters]
return (x[:, context_i, :], y[:, context_i, :],
x[:, target_i, :], y[:, target_i, :],
meta,
None, None)
def collate_fn_sdss(batch, device, pred_frac=0.1):
x, y, meta = zip(*batch) # batch ~ list of (x, y, param) tuples
x = [x_i.unsqueeze(0).to(device) for x_i in x]
y = [y_i.unsqueeze(0).to(device) for y_i in y]
meta = [m.unsqueeze(0).to(device) for m in meta]
n_obs = [x_i.shape[1] for x_i in x]
obs_frac = 1.0 - pred_frac
# Sorted random idx of observed times
# Note that sort() returns a named tuple of values, indices
context_i = [torch.randint(low=0,
high=n_obs_i,
size=[int(n_obs_i*obs_frac)],
dtype=torch.int64).sort().values for n_obs_i in n_obs]
x_context = [x_i[:, context_i[i], :] for i, x_i in enumerate(x)] # length batch_size
y_context = [y_i[:, context_i[i], :] for i, y_i in enumerate(y)] # length batch_size
return (x_context, y_context,
x, y,
meta)
def collate_fn_baseline(batch, rng, cadence_obj, n_pointings, every_other=10,
exclude_ddf=True, pointings_band=3):
x, y, meta = zip(*batch) # batch ~ list of (x, y, param) tuples
x = torch.stack(x, axis=0) # [batch_size, n_points, n_filters]
y = torch.stack(y, axis=0) # [batch_size, n_points, n_filters]
meta = torch.stack(meta, axis=0).float() # [batch_size, n_params]
# Log-parameterize some params
obs_i = rng.choice(n_pointings)
if exclude_ddf:
while len(cadence_obj.get_mjd_single_pointing(obs_i, rounded=True)) > 1500:
obs_i = rng.choice(n_pointings)
context_i = cadence_obj.get_mjd_single_pointing(obs_i, rounded=True).astype(np.int32) # [n_points,]
# Compute summary stats
flux_mean = torch.mean(y[:, context_i, :], dim=1) # [batch_size, n_filters]
flux_std = torch.std(y[:, context_i, :], dim=1) # [batch_size, n_filters]
return (flux_mean, flux_std, meta)
def collate_fn_multi_filter(batch, rng, cadence_obj, n_pointings, every_other=10,
exclude_ddf=True):
x, y, meta = zip(*batch) # batch ~ list of (x, y, param) tuples
x = torch.stack(x, axis=0) # [batch_size, n_points, n_filters]
y = torch.stack(y, axis=0) # [batch_size, n_points, n_filters]
meta = torch.stack(meta, axis=0) # [batch_size, n_params]
# Log-parameterize some params
obs_i = rng.choice(n_pointings)
if exclude_ddf:
while len(cadence_obj.get_mjd_single_pointing(obs_i, rounded=True)) > 1500:
obs_i = rng.choice(n_pointings)
mjd = torch.from_numpy(cadence_obj.get_mjd_single_pointing(obs_i, rounded=True).astype(np.int32))
mask_c = torch.from_numpy(cadence_obj.get_mask_single_pointing(obs_i)) # [n_points, n_filters]
mask_t = torch.from_numpy(rng.choice([True, False], size=mask_c.shape, p=[0.1, 0.9]))
return (x, y, mjd, mask_c, mask_t,
meta)
if __name__ == '__main__':
from magnificat.sdss_dr7_dataset import SDSSDR7Dataset
from torch.utils.data import DataLoader
from functools import partial
agn_params = ['M_i', 'BH_mass', 'redshift']
bp_params = ['log_rf_tau', 'log_sf_inf']
bandpasses = list('ugriz')
val_frac = 0.1
seed = 123
dataset = SDSSDR7Dataset(out_dir='sdss_dr7',
agn_params=agn_params,
bp_params=bp_params,
bandpasses=bandpasses,
num_samples=10,
metadata_kwargs=dict(keep_agn_mode='max_obs'),
light_curve_kwargs=dict(),)
# Define collate fn that samples context points based on pointings
collate_fn = partial(collate_fn_sdss,
pred_frac=0.1,
device=torch.device('cpu')
)
loader = DataLoader(dataset,
batch_size=7,
collate_fn=collate_fn,
)
dataset.get_normalizing_metadata(loader)
print(dataset.mean_params)
x, y, meta = dataset[0]
print(x.shape, y.shape, meta.shape)
print(loader.batch_size)
for d in loader:
print("length of d: ", len(d))
print("batch size: ", len(d[0]))
print("params of first example: ", d[-1][0])
```
#### File: magnify/attentive_neural_process/preprocess.py
```python
import torch as t
import torchvision.transforms
import numpy as np
def collate_fn(batch):
# Puts each data field into a tensor with outer dimension batch size
assert isinstance(batch[0], tuple)
trans = torchvision.transforms.ToTensor()
batch_size = len(batch)
max_num_context = 784
num_context = np.random.randint(10, 784) # extract random number of contexts
num_target = np.random.randint(0, max_num_context - num_context)
num_total_points = num_context + num_target # this num should be # of target points
# num_total_points = max_num_context
context_x, context_y, target_x, target_y = list(), list(), list(), list()
for d, _ in batch:
d = trans(d)
total_idx = np.random.choice(range(784), num_total_points, replace=False)
total_idx = list(map(lambda x: (x//28, x%28), total_idx))
c_idx = total_idx[:num_context]
c_x, c_y, total_x, total_y = list(), list(), list(), list()
for idx in c_idx:
c_y.append(d[:, idx[0], idx[1]])
c_x.append((idx[0] / 27., idx[1] / 27.))
for idx in total_idx:
total_y.append(d[:, idx[0], idx[1]])
total_x.append((idx[0] / 27., idx[1] / 27.))
c_x, c_y, total_x, total_y = list(map(lambda x: t.FloatTensor(x), (c_x, c_y, total_x, total_y)))
context_x.append(c_x)
context_y.append(c_y)
target_x.append(total_x)
target_y.append(total_y)
context_x = t.stack(context_x, dim=0)
context_y = t.stack(context_y, dim=0).unsqueeze(-1)
target_x = t.stack(target_x, dim=0)
target_y = t.stack(target_y, dim=0).unsqueeze(-1)
return context_x, context_y, target_x, target_y
def collate_fn_test(batch):
# Puts each data field into a tensor with outer dimension batch size
assert isinstance(batch[0], tuple)
trans = torchvision.transforms.ToTensor()
batch_size = len(batch)
num_total_points = 784
num_context = 10 # half of total points
context_x, context_y, target_x, target_y = list(), list(), list(), list()
for d, _ in batch:
d = trans(d)
total_idx = range(784)
total_idx = list(map(lambda x: (x//28, x%28), total_idx))
c_idx = np.random.choice(range(784), num_total_points, replace=False)
c_idx = list(map(lambda x: (x//28, x%28), c_idx))
c_idx = c_idx[:num_context]
c_x, c_y, total_x, total_y = list(), list(), list(), list()
for idx in c_idx:
c_y.append(d[:, idx[0], idx[1]])
c_x.append((idx[0] / 27., idx[1] / 27.))
for idx in total_idx:
total_y.append(d[:, idx[0], idx[1]])
total_x.append((idx[0] / 27., idx[1] / 27.))
c_x, c_y, total_x, total_y = list(map(lambda x: t.FloatTensor(x), (c_x, c_y, total_x, total_y)))
context_x.append(c_x)
context_y.append(c_y)
target_x.append(total_x)
target_y.append(total_y)
context_x = t.stack(context_x, dim=0)
context_y = t.stack(context_y, dim=0).unsqueeze(-1)
target_x = t.stack(target_x, dim=0)
target_y = t.stack(target_y, dim=0).unsqueeze(-1)
return context_x, context_y, target_x, target_y
```
#### File: magnify/data/drw_utils.py
```python
import numpy as np
import torch
from magnificat.drw_dataset import DRWDataset
from magnificat.samplers.dc2_sampler import DC2Sampler
import magnify.data.processing_utils as putils
def get_data_min_max(records, device, predefined=True):
"""Get minimum and maximum for each feature (bandpass)
across the whole dataset
Parameters
----------
records : iterable
Each element is the dictionary of x, y, trimmed_mask, and params
Returns
-------
tuple
min and max values in y across the dataset
"""
data_min, data_max = None, None
inf = torch.Tensor([float("Inf")])[0].to(device)
n_samples = len(records)
print(f"Computing global min and max of {n_samples} training examples...")
for data_i in range(n_samples):
data = records[data_i]
mask = data['trimmed_mask']
vals = data['y']
n_features = vals.size(-1)
if predefined:
break
batch_min = []
batch_max = []
for i in range(n_features):
non_missing_vals = vals[:, i][mask[:, i] == 1]
if len(non_missing_vals) == 0:
batch_min.append(inf)
batch_max.append(-inf)
else:
batch_min.append(torch.min(non_missing_vals))
batch_max.append(torch.max(non_missing_vals))
batch_min = torch.stack(batch_min)
batch_max = torch.stack(batch_max)
if (data_min is None) and (data_max is None):
data_min = batch_min
data_max = batch_max
else:
data_min = torch.min(data_min, batch_min)
data_max = torch.max(data_max, batch_max)
if predefined:
data_min = 15*torch.ones(n_features).to(device)
data_max = 30*torch.ones(n_features).to(device)
return data_min, data_max
else:
return data_min.to(device), data_max.to(device)
def variable_time_collate_fn(batch, args, device=torch.device("cpu"),
data_type="train",
data_min=None, data_max=None):
"""
Expects a batch of time series data in the form of (
record_id, tt, vals, mask, labels) where
- record_id is a patient id
- tt is a 1-dimensional tensor containing T time values of
observations.
- vals is a (T, D) tensor containing observed values for D variables.
- mask is a (T, D) tensor containing 1 where values were
observed and 0 otherwise.
- labels is a list of labels for the current patient,
if labels are available. Otherwise None.
Returns:
combined_tt: The union of all time observations.
combined_vals: (M, T, D) tensor containing the observed values.
combined_mask: (M, T, D) tensor containing 1 where values
were observed and 0 otherwise.
"""
D = batch[0]['y'].shape[-1] # first example (0), get y values (2),
# and dim 1 of shape is Y_dim
common_times = batch[0]['x']
batch_size = len(batch)
# Whether each time was observed in any filter in the batch
observed = torch.zeros_like(common_times).to(bool) # init unobserved
for b in range(batch_size):
# Was it observed in any of the filters, for this example?
obs_example = batch[b]['trimmed_mask'].any(dim=1) # [len(common_times),]
observed = torch.logical_or(observed, obs_example)
combined_tt = common_times[observed].to(device)
inv_indices = torch.arange(len(combined_tt)-1, -1, -1)
combined_vals = torch.zeros([batch_size, len(combined_tt), D])
combined_mask = torch.zeros([batch_size, len(combined_tt), D])
combined_labels = None
N_labels = len(batch[0]['params'])
combined_labels = torch.zeros(len(batch), N_labels) + torch.tensor(float('nan')) # [B, N_labels]
for b, data in enumerate(batch):
# Slice observed times only
vals = data['y'][observed, :]
mask = data['trimmed_mask'][observed, :]
if 'params' in data:
labels = data['params']
# Populate batch y, mask in inverse time
combined_vals[b, inv_indices] = vals
combined_mask[b, inv_indices] = mask.to(torch.float32)
if labels is not None:
combined_labels[b, :] = labels
# Put on device
combined_tt = combined_tt.to(device)
combined_vals = combined_vals.to(device)
combined_mask = combined_mask.to(device)
combined_labels = combined_labels.to(device)
combined_vals, _, _ = putils.normalize_masked_data(combined_vals,
combined_mask,
att_min=data_min,
att_max=data_max)
if torch.max(combined_tt) != 0.:
combined_tt = combined_tt / 3650.0 # out of 10 years, in days
data_dict = {"data": combined_vals,
"time_steps": combined_tt,
"mask": combined_mask,
"labels": combined_labels}
data_dict = putils.split_and_subsample_batch(data_dict,
args,
data_type=data_type)
return data_dict
def get_drw_datasets(train_seed, val_seed, n_pointings, bandpasses,
t_transform, y_transform,
train_dir, val_dir):
train_params = [f'tau_{bp}' for bp in bandpasses]
train_params += [f'SF_inf_{bp}' for bp in bandpasses]
train_params += ['BH_mass', 'M_i', 'redshift']
train_params += [f'mag_{bp}' for bp in bandpasses]
log_params = [True for bp in bandpasses]
log_params += [True for bp in bandpasses]
log_params += [False, False, False]
log_params += [False for bp in bandpasses]
n_pointings = n_pointings
train_cat_idx = np.load('/home/jwp/stage/sl/magnify/train_idx.npy') # 11227
val_cat_idx = np.load('/home/jwp/stage/sl/magnify/val_idx.npy') # 114
n_train = len(train_cat_idx)
n_val = len(val_cat_idx)
train_dataset = DRWDataset(DC2Sampler(train_seed, bandpasses, train_cat_idx),
out_dir=train_dir,
num_samples=n_train,
seed=train_seed,
is_training=True,
transform_x_func=t_transform,
transform_y_func=y_transform,
err_y=0.01,
obs_kwargs={'n_pointings_init': n_pointings,
'obs_dir': '/home/jwp/stage/sl/magnify/latent_ode_data/gr_obs',
'bandpasses': bandpasses},
prestored_bandpasses=bandpasses)
train_dataset.slice_params = [train_dataset.param_names.index(n) for n in train_params]
train_dataset.log_params = log_params
train_dataset.get_normalizing_metadata(set_metadata=True)
# Validation data
val_dataset = DRWDataset(DC2Sampler(val_seed, bandpasses, val_cat_idx),
out_dir=val_dir,
num_samples=n_val,
seed=val_seed,
is_training=False,
transform_x_func=t_transform,
transform_y_func=y_transform,
err_y=0.01,
obs_kwargs={'n_pointings_init': n_pointings,
'obs_dir': '/home/jwp/stage/sl/magnify/latent_ode_data/gr_obs',
'bandpasses': bandpasses},
prestored_bandpasses=bandpasses)
val_dataset.slice_params = train_dataset.slice_params
val_dataset.log_params = log_params
val_dataset.mean_params = train_dataset.mean_params
val_dataset.std_params = train_dataset.std_params
return train_dataset, val_dataset
```
#### File: latent_ode/lib/parse_datasets.py
```python
from functools import partial
import torch
import magnify.latent_ode.lib.utils as utils
from magnify.latent_ode.periodic_utils import Periodic_1d, Periodic1dDataset
import magnify.latent_ode.drw_utils as drw_utils
from torch.distributions import uniform
from torch.utils.data import DataLoader
def parse_datasets(args, device):
def basic_collate_fn(batch, time_steps, args = args, device=device, data_type="train"):
tseries, labels = map(list, zip(*batch))
tseries = torch.stack(tseries, dim=0)
labels = torch.stack(labels, dim=0)
tseries = tseries.to(device) # [n_samples, n_times, input_dim]
labels = labels.to(device) # [n_samples, n_labels]
# batch = torch.stack(batch) # [B, n_times, 2, 1]
data_dict = {
"data": tseries,
"time_steps": time_steps}
# physionet did this before calling split_and_subsample_batch
data_dict["labels"] = labels
data_dict = utils.split_and_subsample_batch(data_dict, args,
data_type=data_type)
return data_dict
dataset_name = args.dataset
n_total_tp = args.timepoints + args.extrap
max_t_extrap = args.max_t / args.timepoints * n_total_tp
##################################################################
if dataset_name == 'drw':
train_seed = 123
val_seed = 456
train_dataset, test_dataset = drw_utils.get_drw_datasets(train_seed,
val_seed)
# record_id, tt, y_vals, labels, mask = train_dataset[0]
input_dim = train_dataset[0]['y'].shape[-1] # [n_filters]
n_labels = len(train_dataset.get_sliced_params())
batch_size = args.batch_size
print("batch size", batch_size)
# record_id, tt, vals, mask, labels = train_data[0]
# n_samples = len(total_dataset)
data_min, data_max = drw_utils.get_data_min_max(train_dataset,
device)
print("Data min: ", data_min)
print("Data max: ", data_max)
train_dataloader = DataLoader(train_dataset,
batch_size=batch_size,
shuffle=False,
# num_workers=4,
collate_fn=partial(drw_utils.variable_time_collate_fn,
args=args,
device=device,
data_type="train",
data_min=data_min,
data_max=data_max))
test_dataloader = DataLoader(test_dataset,
batch_size=len(test_dataset),
shuffle=False,
collate_fn=partial(drw_utils.variable_time_collate_fn,
args=args,
device=device,
data_type="test",
data_min=data_min,
data_max=data_max))
attr_names = train_dataset.get_sliced_params()
data_objects = {"dataset_obj": train_dataset,
"train_dataloader": utils.inf_generator(train_dataloader),
"test_dataloader": utils.inf_generator(test_dataloader),
"input_dim": input_dim,
"n_train_batches": len(train_dataloader),
"n_test_batches": len(test_dataloader),
"attr": attr_names, # optional
"classif_per_tp": False, # optional
"n_labels": n_labels} # optional
return data_objects
########### 1d datasets ###########
# Sampling args.timepoints time points in the interval [0, args.max_t]
# Sample points for both training sequence and explapolation (test)
distribution = uniform.Uniform(torch.Tensor([0.0]),torch.Tensor([max_t_extrap]))
time_steps_extrap = distribution.sample(torch.Size([n_total_tp-1]))[:,0]
time_steps_extrap = torch.cat((torch.Tensor([0.0]), time_steps_extrap))
time_steps_extrap = torch.sort(time_steps_extrap)[0]
dataset_obj = None
##################################################################
# Sample a periodic function
if dataset_name == "periodic":
dataset_obj = Periodic_1d(
init_freq = None, init_amplitude = 1.,
final_amplitude = 1., final_freq = None,
z0 = 1.)
##################################################################
if dataset_obj is None:
raise Exception("Unknown dataset: {}".format(dataset_name))
print("n_samples", args.n)
dataset = dataset_obj.sample_traj(time_steps_extrap, n_samples=args.n,
noise_weight=args.noise_weight)
# Process small datasets
time_steps_extrap = time_steps_extrap.to(device)
train_y, test_y = utils.split_train_test(dataset, train_frac=0.8)
train_data = Periodic1dDataset(train_y)
test_data = Periodic1dDataset(test_y)
# first example (0), first in tuple for tseries (0), 2nd dim of each tseries
input_dim = train_y[0].size(-1) # which-dimensional time series?
batch_size = min(args.batch_size, args.n)
print("batch size", batch_size)
train_dataloader = DataLoader(train_data,
batch_size=batch_size,
shuffle=False,
collate_fn=lambda b: basic_collate_fn(b, time_steps_extrap, data_type="train"))
test_dataloader = DataLoader(test_data,
batch_size=args.n,
shuffle=False,
collate_fn=lambda b: basic_collate_fn(b, time_steps_extrap, data_type = "test"))
print("number of train batches", len(train_dataloader))
print("number of test batches", len(test_dataloader))
data_objects = {"train_dataloader": utils.inf_generator(train_dataloader),
"test_dataloader": utils.inf_generator(test_dataloader),
"input_dim": input_dim,
"n_train_batches": len(train_dataloader),
"n_test_batches": len(test_dataloader),
"n_labels": 1,
"classif_per_tp": False, }
return data_objects
```
#### File: magnify/magnify/script_utils.py
```python
import os
import torch
def save_state(model, optim, lr_scheduler, kl_scheduler, epoch,
train_dir, param_w_scheduler, epoch_i):
"""Save the state dict of the current training to disk
Parameters
----------
train_loss : float
current training loss
val_loss : float
current validation loss
"""
state = dict(
model=model.state_dict(),
optimizer=optim.state_dict(),
lr_scheduler=lr_scheduler.state_dict(),
kl_scheduler=kl_scheduler.__dict__,
param_w_scheduler=param_w_scheduler.__dict__,
epoch=epoch,
)
model_path = os.path.join(train_dir, f'model_{epoch_i}.mdl')
torch.save(state, model_path)
def load_state(model, train_dir, device,
optim=None, lr_scheduler=None, kl_scheduler=None,
param_w_scheduler=None,
epoch_i=0,
):
"""Load the state dict to resume training or infer
"""
model_path = os.path.join(train_dir, f'model_{epoch_i}.mdl')
state = torch.load(model_path)
model.load_state_dict(state['model'])
model.to(device)
if optim is not None:
optim.load_state_dict(state['optimizer'])
if lr_scheduler is not None:
lr_scheduler.load_state_dict(state['lr_scheduler'])
if kl_scheduler is not None:
kl_scheduler.__dict__ = state['kl_scheduler']
if param_w_scheduler is not None:
param_w_scheduler.__dict__ = state['param_w_scheduler']
print(f"Loaded model at epoch {state['epoch']}")
```
#### File: magnify/magnify/train_anp_dc2.py
```python
import os
from functools import partial
import random
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
plt.switch_backend('agg')
from tensorboardX import SummaryWriter
import torch
from magnificat.drw_dataset import DRWDataset
from magnificat.samplers.dc2_sampler import DC2Sampler
from magnificat.cadence import LSSTCadence
from torch.utils.data import DataLoader
from magnify.attentive_neural_process.network import NeuralProcess
from magnify.attentive_neural_process.context_target_sampler import collate_fn_opsim
def train(run_dir, train_params, bandpasses, log_params,
train_cat_idx, val_cat_idx, n_pointings=1000,
checkpoint_path=None):
torch.cuda.empty_cache()
os.makedirs(run_dir, exist_ok=True)
train_seed = 123
n_train = len(train_cat_idx)
n_val = len(val_cat_idx)
train_dataset = DRWDataset(DC2Sampler(train_seed, bandpasses, train_cat_idx), 'train_drw',
num_samples=n_train,
seed=train_seed,
shift_x=-3650*0.5,
rescale_x=1.0/(3650*0.5)*4.0,
delta_x=1.0,
max_x=3650.0,
err_y=0.01,
bandpasses=bandpasses)
train_dataset.slice_params = [train_dataset.param_names.index(n) for n in train_params]
train_dataset.log_params = log_params
train_dataset.get_normalizing_metadata(set_metadata=True)
print(train_dataset.slice_params)
print(train_dataset.mean_params, train_dataset.std_params)
# Generate pointings
cadence_obj = LSSTCadence('obs', train_seed)
ra, dec = cadence_obj.get_pointings(n_pointings)
cadence_obj.get_obs_info(ra, dec)
cadence_obj.set_bandpasses(bandpasses)
# Define collate fn that samples context points based on pointings
collate_fn = partial(collate_fn_opsim,
rng=np.random.default_rng(train_seed),
cadence_obj=cadence_obj,
n_pointings=n_pointings,
frac_context=0.9,
exclude_ddf=True,)
train_loader = DataLoader(train_dataset, batch_size=20, collate_fn=collate_fn,
shuffle=True)
# Validation data
val_seed = 456
val_dataset = DRWDataset(DC2Sampler(val_seed, bandpasses, val_cat_idx), 'val_drw',
num_samples=n_val,
seed=val_seed,
shift_x=-3650*0.5,
rescale_x=1.0/(3650*0.5)*4.0,
delta_x=1.0,
max_x=3650.0,
err_y=0.01,
bandpasses=bandpasses)
val_dataset.slice_params = train_dataset.slice_params
val_dataset.log_params = log_params
val_dataset.mean_params = train_dataset.mean_params
val_dataset.std_params = train_dataset.std_params
collate_fn_val = partial(collate_fn_opsim,
rng=np.random.default_rng(val_seed),
cadence_obj=cadence_obj,
n_pointings=n_pointings,
frac_context=0.9,
exclude_ddf=True,)
val_loader = DataLoader(val_dataset, batch_size=n_val, collate_fn=collate_fn_val,
shuffle=False)
epochs = 200
model = NeuralProcess(x_dim=len(bandpasses),
y_dim=len(bandpasses),
use_self_attn=False,
hidden_dim=32, latent_dim=32, weight_y_loss=1.0,
n_target=len(train_dataset.slice_params),
).cuda()
total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Trainable params: ", total_params)
optim = torch.optim.Adam(model.parameters(), lr=1e-3)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optim, factor=0.5,
patience=5, verbose=True)
if checkpoint_path is not None:
state = torch.load(checkpoint_path)
model.load_state_dict(state['model'])
optim.load_state_dict(state['optimizer'])
scheduler.load_state_dict(state['scheduler'])
scheduler.patience = 10
def get_lr(gamma, optimizer):
return [group['lr'] * gamma
for group in optimizer.param_groups]
for param_group, lr in zip(optim.param_groups, get_lr(0.2, optim)):
param_group['lr'] = lr
# print(scheduler.__dict__)
model.train()
min_val_loss = np.inf
writer = SummaryWriter(run_dir)
for epoch in tqdm(range(epochs)):
train_single_epoch(model, train_loader, val_loader, optim, epoch, writer)
val_loss = eval(model, val_loader, epoch, writer, log=False)
scheduler.step(val_loss)
# Save model if validation loss decreased
if val_loss < min_val_loss:
torch.save({'model': model.state_dict(),
'optimizer': optim.state_dict(),
'scheduler': scheduler.state_dict()},
os.path.join(run_dir, 'checkpoint.pth.tar'))
min_val_loss = val_loss
def train_single_epoch(model, train_loader, val_loader, optim, epoch, writer):
total_loss, mse_loss, meta_loss = 0.0, 0.0, 0.0
for i, data in enumerate(train_loader):
model.train()
optim.zero_grad()
context_x, context_y, target_x, target_y, meta, mask, sub_i = data
context_x = context_x.cuda()
context_y = context_y.cuda()
target_x = target_x.cuda()
target_y = target_y.cuda()
meta = meta.cuda()
mask = mask.cuda()
sub_i = sub_i.cuda()
# pass through the latent model
y_pred, losses, extra = model(context_x, context_y, target_x, target_y, meta, mask, sub_i)
loss = losses['loss']
# Training step
loss.backward()
optim.step()
# Logging
total_loss += (loss - total_loss)/(i+1)
mse_loss += (losses['loss_mse'] - mse_loss)/(i+1)
meta_loss += (losses['loss_meta'] - meta_loss)/(i+1)
if i % 100 == 0:
eval(model, val_loader, epoch*len(train_loader)+i, writer)
writer.add_scalars('training_loss',
{'loss': total_loss,
'meta': losses['loss_meta']},
epoch)
def eval(model, val_loader, epoch, writer, log=True):
total_loss, mse_loss, meta_loss = 0.0, 0.0, 0.0
model.eval()
with torch.no_grad():
for i, data in enumerate(val_loader):
context_x, context_y, target_x, target_y, meta, mask, sub_i = data
context_x = context_x.cuda()
context_y = context_y.cuda()
target_x = target_x.cuda()
target_y = target_y.cuda()
meta = meta.cuda()
mask = mask.cuda()
sub_i = sub_i.cuda()
# pass through the latent model
pred_y, losses, extra = model(context_x, context_y, target_x, target_y,
meta, mask, sub_i)
loss = losses['loss']
# Logging
total_loss += (loss - total_loss)/(i+1)
mse_loss += (losses['loss_mse'] - mse_loss)/(i+1)
meta_loss += (losses['loss_meta'] - meta_loss)/(i+1)
if False:
for p in range(meta.shape[1]):
fig = get_params_fig(extra['mean_meta'].cpu().numpy()[:, p],
extra['log_sigma_meta'].cpu().numpy()[:, p],
meta.cpu().numpy()[:, p])
writer.add_figure(f'param {p} recovery', fig, global_step=epoch)
# Get histogram of errors
if log:
model.param_loss.set_trained_pred(extra['pred_meta'])
sample = model.param_loss.sample(mean=torch.zeros([1, meta.shape[1]]).cuda(),
std=torch.ones([1, meta.shape[1]]).cuda(),
n_samples=100).mean(1) # [n_batch, Y_dim]
error = np.mean(sample - meta.cpu().numpy(), axis=-1)
writer.add_histogram('Mean error', error, epoch)
# Visualize fit on first light curve
if log:
bp_i = 0
pred_y, _, extra = model(context_x[0:1], context_y[0:1],
target_x[0:1], None, None, mask, sub_i)
pred_y = pred_y.cpu().numpy()[0, :, bp_i]
std_y = extra['y_dist'].scale.cpu().numpy()[0, :, bp_i]
target_x = target_x.cpu().numpy()[0, :, bp_i]
target_y = target_y.cpu().numpy()[0, :, bp_i]
context_x = context_x.cpu().numpy()[0, :, bp_i]
context_y = context_y.cpu().numpy()[0, :, bp_i]
fig = get_light_curve_fig(pred_y, std_y, context_x, context_y, target_x, target_y)
writer.add_figure('fit', fig, global_step=epoch)
if log:
writer.add_scalars('val_loss',
{'loss': total_loss,
'meta': losses['loss_meta']},
epoch)
return total_loss
def get_params_fig(pred_mean, pred_log_sigma, truth):
fig, ax = plt.subplots()
truth_grid = np.linspace(truth.min(), truth.max(), 20)
ax.errorbar(truth, pred_mean, np.exp(pred_log_sigma), fmt='o',
color='tab:blue')
ax.plot(truth_grid, truth_grid, linestyle='--', color='tab:gray')
return fig
def get_light_curve_fig(pred_y, std_y, context_x, context_y, target_x, target_y):
fig, ax = plt.subplots(figsize=(10, 5))
target_x = target_x
pred_y = pred_y
std_y = std_y
ax.scatter(target_x, pred_y, marker='.', color='tab:blue')
ax.fill_between(target_x,
pred_y - std_y,
pred_y + std_y,
alpha=0.2,
facecolor="tab:blue",
interpolate=True,
label="uncertainty",
)
ax.scatter(target_x, target_y, marker='.', color='k', label='target')
ax.scatter(context_x, context_y, marker='*', color='tab:orange', label='context')
#ax.legend()
return fig
if __name__ == '__main__':
run_dir = os.path.join('results', 'E3')
bandpasses = list('ugrizy')
train_params = [f'tau_{bp}' for bp in bandpasses]
train_params += [f'SF_inf_{bp}' for bp in bandpasses]
train_params += ['BH_mass', 'M_i', 'redshift']
train_params += [f'mag_{bp}' for bp in bandpasses]
log_params = [True for bp in bandpasses]
log_params += [True for bp in bandpasses]
log_params += [False, False, False]
log_params += [False for bp in bandpasses]
train_cat_idx = np.load('train_idx.npy') # 11227
val_cat_idx = np.load('val_idx.npy') # 114
train(run_dir, train_params, bandpasses,
train_cat_idx=train_cat_idx, val_cat_idx=val_cat_idx,
n_pointings=1000, log_params=log_params,
#checkpoint_path=os.path.join(run_dir, 'checkpoint.pth.tar')
)
# test(os.path.join(run_dir, 'checkpoint.pth.tar'))
``` |
{
"source": "jiwoncpark/node-to-joy",
"score": 2
} |
#### File: trainval_data/graphs/cosmodc2_graph.py
```python
import os
import os.path as osp
import multiprocessing
from functools import cached_property
import bisect
import numpy as np
import pandas as pd
from scipy.spatial import cKDTree
import scipy.stats
from tqdm import tqdm
import torch
from torch.utils.data.dataset import ConcatDataset
from torch_geometric.data import DataLoader
from n2j.trainval_data.graphs.base_graph import BaseGraph, Subgraph
from n2j.trainval_data.utils import coord_utils as cu
from n2j.trainval_data.utils.running_stats import RunningStats
from torch.utils.data.sampler import SubsetRandomSampler # WeightedRandomSampler,
class CosmoDC2Graph(ConcatDataset):
"""Concatenation of multiple CosmoDC2GraphHealpix instances,
with an added data transformation functionality
"""
def __init__(self, in_dir, healpixes, raytracing_out_dirs, aperture_size,
n_data, features, subsample_pdf_func=None,
n_subsample=None, subsample_with_replacement=True,
stop_mean_std_early=False, n_cores=20, num_workers=4,
out_dir=None):
"""Summary
Parameters
----------
in_dir : TYPE
Description
healpixes : TYPE
Description
raytracing_out_dirs : TYPE
Description
aperture_size : TYPE
Description
n_data : TYPE
Description
features : TYPE
Description
subsample_pdf_func : callable, optional
Function that evaluates the target subsampling PDF
n_subsample : int, optional
How many examples to subsample, to form the final effective
dataset size. Required if subsample_pdf_func is not None.
stop_mean_std_early : bool, optional
Description
n_cores : int, optional
Description
"""
self.stop_mean_std_early = stop_mean_std_early
self.n_datasets = len(healpixes)
self.n_cores = n_cores
self.num_workers = num_workers
self.subsample_pdf_func = subsample_pdf_func
if out_dir is None:
out_dir = in_dir
else:
out_dir = out_dir
if self.subsample_pdf_func is not None:
assert n_subsample is not None
self.n_subsample = n_subsample
self.replace = False
datasets = []
Y_list = []
for i in range(self.n_datasets):
graph_hp = CosmoDC2GraphHealpix(healpixes[i],
in_dir,
raytracing_out_dirs[i],
aperture_size,
n_data[i],
features,
n_cores=self.n_cores,
out_dir=out_dir
)
datasets.append(graph_hp)
Y_list.append(graph_hp.Y)
self.Y = pd.concat(Y_list, ignore_index=True).reset_index(drop=True)
ConcatDataset.__init__(self, datasets)
self.transform_X_Y_local = None
self.transform_Y = None
@cached_property
def data_stats(self):
"""Statistics of the X, Y data used for standardizing
"""
loader_dict = dict(X=lambda b: b.x, # node features x
Y_local=lambda b: b.y_local, # node labels y_local
Y=lambda b: b.y,
X_meta=lambda b: b.x_meta) # graph labels y
rs = RunningStats(loader_dict)
y_class_counts = 0 # [n_classes,] where n_classes = number of bins
y_class = torch.zeros(len(self), dtype=torch.long) # [n_train,]
if self.subsample_pdf_func is None:
subsample_weight = None
else:
subsample_weight = np.zeros(len(self)) # [n_train,]
y_values_orig = np.zeros(len(self))
batch_size = 2000 if self.n_cores < 8 else 10000
dummy_loader = DataLoader(dataset=self,
batch_size=batch_size,
shuffle=False,
num_workers=self.num_workers,
drop_last=False)
print("Generating standardizing metadata...")
for i, b in enumerate(dummy_loader):
# Update running stats for this new batch
rs.update(b, i)
# Update running bin count for kappa
y_class_b = b.y_class
y_class_b[y_class_b > 3] = 3
y_class_counts += torch.bincount(y_class_b, minlength=4)[:4]
y_class[i*batch_size:(i+1)*batch_size] = y_class_b
# Log original kappa values
k_values_orig_batch = b.y[:, 0].cpu().numpy()
y_values_orig[i*batch_size:(i+1)*batch_size] = k_values_orig_batch
# Compute subsampling weights
if self.subsample_pdf_func is not None:
subsample_weight[i*batch_size:(i+1)*batch_size] = self.subsample_pdf_func(k_values_orig_batch)
if self.stop_mean_std_early and i > 100:
break
print("Y_mean without resampling: ", rs.stats['Y_mean'])
print("Y_std without resampling: ", rs.stats['Y_var']**0.5)
# Each bin is weighted by the inverse frequency
class_weight = torch.sum(y_class_counts)/y_class_counts # [n_bins,]
y_weight = class_weight[y_class] # [n_train]
subsample_idx = None
# Recompute mean, std if subsampling according to a distribution
if self.subsample_pdf_func is not None:
print("Re-generating standardizing metadata for subsampling dist...")
# Re-initialize mean, std
rs = RunningStats(loader_dict)
# Define SubsetRandomSampler to follow dist in subsample_pdf_func
print("Subsampling with replacement to follow provided subsample_pdf_func...")
# See https://github.com/pytorch/pytorch/issues/11201
torch.multiprocessing.set_sharing_strategy('file_system')
rng = np.random.default_rng(123)
kde = scipy.stats.gaussian_kde(y_values_orig, bw_method='scott')
p = subsample_weight/kde.pdf(y_values_orig)
p /= np.sum(p)
subsample_idx = rng.choice(np.arange(len(y_values_orig)),
p=p, replace=True,
size=self.n_subsample)
subsample_idx = subsample_idx.tolist()
sampler = SubsetRandomSampler(subsample_idx)
sampling_loader = DataLoader(self,
batch_size=batch_size,
sampler=sampler,
num_workers=self.num_workers,
drop_last=False)
for i, b in enumerate(sampling_loader):
# Update running stats for this new batch
rs.update(b, i)
if self.stop_mean_std_early and i > 100:
break
class_weight = None
y_weight = None
print("Y_mean with resampling: ", rs.stats['Y_mean'])
print("Y_std with resampling: ", rs.stats['Y_var']**0.5)
print("X_meta_mean with resampling: ", rs.stats['X_meta_mean'])
print("X_meta_std with resampling: ", rs.stats['X_meta_var']**0.5)
stats = dict(X_mean=rs.stats['X_mean'], X_std=rs.stats['X_var']**0.5,
Y_mean=rs.stats['Y_mean'], Y_std=rs.stats['Y_var']**0.5,
Y_local_mean=rs.stats['Y_local_mean'],
Y_local_std=rs.stats['Y_local_var']**0.5,
X_meta_mean=rs.stats['X_meta_mean'],
X_meta_std=rs.stats['X_meta_var']**0.5,
y_weight=y_weight, # [n_train,] or None
subsample_idx=subsample_idx,
class_weight=class_weight, # [n_classes,] or None
)
return stats
@cached_property
def data_stats_valtest(self):
"""Statistics of the X, Y data on validation set used for
resampling to mimic training dist.
Mean, std computation skipped.
"""
print("Computing resampling stats for val/test set...")
B = 1000
dummy_loader = DataLoader(self, # val_dataset
batch_size=B,
shuffle=False,
num_workers=self.num_workers,
drop_last=False)
# If subsample_pdf_func is None, don't need this attribute
assert self.subsample_pdf_func is not None
assert self.n_subsample is not None
torch.multiprocessing.set_sharing_strategy('file_system')
y_values_orig = np.zeros(len(self)) # [n_val,]
subsample_weight = np.zeros(len(self)) # [n_val,]
# Evaluate target density on all validation examples
for i, b in enumerate(dummy_loader):
# Log original kappa values
k_batch = b.y[:, 0].cpu().numpy()
y_values_orig[i*B:(i+1)*B] = k_batch
# Compute subsampling weights
subsample_weight[i*B:(i+1)*B] = self.subsample_pdf_func(k_batch)
rng = np.random.default_rng(456)
kde = scipy.stats.gaussian_kde(y_values_orig, bw_method='scott')
p = subsample_weight/kde.pdf(y_values_orig)
p /= np.sum(p)
subsample_idx = rng.choice(np.arange(len(y_values_orig)),
p=p, replace=self.replace,
size=self.n_subsample)
subsample_idx = subsample_idx.tolist()
stats_val = dict(subsample_idx=subsample_idx)
return stats_val
def __getitem__(self, idx):
if idx < 0:
if -idx > len(self):
raise ValueError("absolute value of index should not exceed"
" dataset length")
idx = len(self) + idx
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
data = self.datasets[dataset_idx][sample_idx]
if self.transform_X_Y_local is not None:
data.x, data.y_local, data.x_meta = self.transform_X_Y_local(data.x,
data.y_local,
data.x_meta)
if self.transform_Y is not None:
data.y = self.transform_Y(data.y)
return data
class CosmoDC2GraphHealpix(BaseGraph):
"""Set of graphs representing a single healpix of the CosmoDC2 field
"""
columns = ['ra', 'dec', 'galaxy_id', 'redshift']
columns += ['ra_true', 'dec_true', 'redshift_true']
columns += ['ellipticity_1_true', 'ellipticity_2_true']
columns += ['bulge_to_total_ratio_i']
columns += ['ellipticity_1_bulge_true', 'ellipticity_1_disk_true']
columns += ['ellipticity_2_bulge_true', 'ellipticity_2_disk_true']
# columns += ['shear1', 'shear2', 'convergence']
columns += ['size_bulge_true', 'size_disk_true', 'size_true']
columns += ['mag_{:s}_lsst'.format(b) for b in 'ugrizY']
def __init__(self, healpix, in_dir, raytracing_out_dir,
aperture_size, n_data, features,
n_cores=20,
out_dir=None,
debug=False,):
"""Graph dataset for a single healpix
Parameters
----------
healpix : int
Healpix ID of NSIDE=32 from CosmoDC2
in_dir : str
Directory from which to read input. Catalogs for this healpix
should be placed in `in_dir/cosmodc2_{healpix}/raw`
raytracing_out_dir : str
Directory containing the raytraced labels, which should live
in `raytracing_out_dir/Y_{healpix}`
aperture_size : float
Radius of aperture in arcmin
n_data : int
Number of sightlines
features : list
Input features per node
n_cores : int, optional
Number of cores to parallelize across. Only used when generating
the data.
out_dir : str, optional
Directory to store the generated graphs. Graphs will go to
`out_dir/cosmodc2_{healpix}/processed`.
debug : bool, optional
Debug mode. Default: False
"""
self.in_dir = in_dir
if out_dir is None:
self.out_dir = in_dir
else:
self.out_dir = out_dir
self.healpix = healpix
self.features = features
self.n_cores = n_cores
self.closeness = 0.5/60.0 # deg, edge criterion between neighbors
self.mag_lower = -np.inf # lower magnitude cut, excludes stars
# LSST gold sample i-band mag (Gorecki et al 2014) = 25.3
# LSST 10-year coadded 5-sigma depth = 26.8
self.mag_upper = 26.8 # upper magnitude cut, excludes small halos
# Store output in <root>/processed for processed_dir
# Read input from in_dir/cosmodc2_{healpix}/raw
root = osp.join(self.out_dir, f'cosmodc2_{self.healpix}')
BaseGraph.__init__(self, root, raytracing_out_dir, aperture_size,
n_data, debug)
@property
def n_features(self):
return len(self.features)
@property
def raw_dir(self) -> str:
return osp.join(self.in_dir, f'cosmodc2_{self.healpix}', 'raw')
@property
def raw_file_name(self):
if self.debug:
return 'debug_gals.csv'
else:
return 'gals_{:d}.csv'.format(self.healpix)
@property
def raw_file_names(self):
return [self.raw_file_name]
@property
def processed_file_fmt(self):
if self.debug:
return 'debug_subgraph_{:d}.pt'
else:
return 'subgraph_{:d}.pt'
@property
def processed_file_path_fmt(self):
return osp.join(self.processed_dir, self.processed_file_fmt)
@property
def processed_file_names(self):
"""A list of files relative to self.processed_dir which needs to be
found in order to skip the processing
"""
return [self.processed_file_fmt.format(n) for n in range(self.n_data)]
def get_los_node(self):
"""Properties of the sightline galaxy, with unobservable features
(everything other than position) appropriately masked out.
Parameters
----------
ra_los : ra of sightline, in arcmin
dec_los : dec of sightline, in arcmin
"""
node = dict(zip(self.features, [[0]]*len(self.features)))
return node
def download(self):
"""Called when `raw_file_names` aren't found
"""
pass
def get_gals_iterator(self, healpix, columns, chunksize=100000):
"""Get an iterator over the galaxy catalog defining the line-of-sight
galaxies
"""
# dtype = dict(zip(columns, [np.float32]*len(columns)))
# if 'galaxy_id' in columns:
# dtype['galaxy_id'] = np.int64
if self.debug:
cat = pd.read_csv(self.raw_paths[0],
chunksize=50, nrows=1000,
usecols=columns, dtype=np.float32)
else:
cat = pd.read_csv(self.raw_paths[0],
chunksize=chunksize, nrows=None,
usecols=columns, dtype=np.float32)
return cat
def get_edges(self, ra_dec):
"""Get the edge indices from the node positions
Parameters
----------
ra_dec : `np.ndarray`
ra and dec of nodes, of shape `[n_nodes, 2]`
Returns
-------
`torch.LongTensor`
edge indices, of shape `[2, n_edges]`
"""
n_nodes = ra_dec.shape[0]
kd_tree = cKDTree(ra_dec)
# Pairs of galaxies that are close enough
edges_close = kd_tree.query_pairs(r=self.closeness, p=2,
eps=self.closeness/5.0,
output_type='set')
edges_close_reverse = [(b, a) for a, b in edges_close] # bidirectional
# All neighboring gals have edge to central LOS gal
edges_to_center = set(zip(np.arange(n_nodes), np.zeros(n_nodes)))
edge_index = edges_to_center.union(edges_close)
edge_index = edge_index.union(edges_close_reverse)
edge_index = torch.LongTensor(list(edge_index)).transpose(0, 1)
return edge_index
def _save_graph_to_disk(self, i):
los_info = self.Y.iloc[i]
# Init with central galaxy containing masked-out features
# Back when central galaxy was given a node
# nodes = pd.DataFrame(self.get_los_node())
nodes = pd.DataFrame(columns=self.features + ['halo_mass', 'stellar_mass'])
gals_iter = self.get_gals_iterator(self.healpix,
self.features + ['halo_mass', 'stellar_mass'])
for gals_df in gals_iter:
# Query neighboring galaxies within 3' to sightline
dist, ra_diff, dec_diff = cu.get_distance(gals_df['ra_true'].values,
gals_df['dec_true'].values,
los_info['ra'],
los_info['dec'])
gals_df['ra_true'] = ra_diff # deg
gals_df['dec_true'] = dec_diff # deg
gals_df['r'] = dist
dist_keep = np.logical_and(dist < self.aperture_size/60.0,
dist > 1.e-7) # exclude LOS gal
mag_keep = np.logical_and(gals_df['mag_i_lsst'].values > self.mag_lower,
gals_df['mag_i_lsst'].values < self.mag_upper)
keep = np.logical_and(dist_keep, mag_keep)
nodes = nodes.append(gals_df.loc[keep, :], ignore_index=True)
x = torch.from_numpy(nodes[self.features].values).to(torch.float32)
y_local = torch.from_numpy(nodes[['halo_mass', 'stellar_mass', 'redshift_true']].values).to(torch.float32)
y_global = torch.FloatTensor([[los_info['final_kappa'],
los_info['final_gamma1'],
los_info['final_gamma2']]]) # [1, 3]
x_meta = torch.FloatTensor([[x.shape[0],
np.sum(1.0/(nodes['r'].values + 1.e-5))]]) # [1, 2]
# Vestiges of adhoc edge definitions
# edge_index = self.get_edges(nodes[['ra_true', 'dec_true']].values)
# data = Subgraph(x, global_y, edge_index)
y_class = self._get_y_class(y_global)
data = Subgraph(x=x, y=y_global, y_local=y_local, x_meta=x_meta,
y_class=y_class)
if self.pre_transform is not None:
data = self.pre_transform(data)
torch.save(data, self.processed_file_path_fmt.format(i))
def _get_y_class(self, y):
y_class = torch.bucketize(y[:, 0], # only kappa
boundaries=torch.Tensor([0.0, 0.03, 0.05, 1.e6]))
return y_class
def process_single(self, i):
"""Process a single sightline indexed i
"""
if not osp.exists(self.processed_file_path_fmt.format(i)):
self._save_graph_to_disk(i)
# else:
# self._save_graph_to_disk(i)
# else:
# data = torch.load(self.processed_file_path_fmt.format(i))
# data.y_class = self._get_y_class(data.y)
# torch.save(data, self.processed_file_path_fmt.format(i))
def process(self):
"""Process multiple sightline in parallel
"""
print("Parallelizing across {:d} cores...".format(self.n_cores))
with multiprocessing.Pool(self.n_cores) as pool:
return list(tqdm(pool.imap(self.process_single,
range(self.n_data)),
total=self.n_data))
def len(self):
return len(self.processed_file_names)
def get(self, idx):
data = torch.load(self.processed_file_path_fmt.format(idx))
return data
``` |
{
"source": "jiwoncpark/sims_GCRCatSimInterface",
"score": 2
} |
#### File: sims_GCRCatSimInterface/bin.src/generateInstCat.py
```python
import argparse
import warnings
import os
import copy
import time
import multiprocessing
import numbers
import json
from astropy._erfa import ErfaWarning
with warnings.catch_warnings():
warnings.filterwarnings('ignore', '\nThis call', UserWarning)
warnings.filterwarnings('ignore', 'Duplicate object type', UserWarning)
warnings.filterwarnings('ignore', 'No md5 sum', UserWarning)
warnings.filterwarnings('ignore', 'ERFA function', ErfaWarning)
warnings.filterwarnings('ignore', 'Numpy has detected', FutureWarning)
warnings.filterwarnings('ignore', 'divide by zero', RuntimeWarning)
warnings.filterwarnings('ignore', 'invalid value', RuntimeWarning)
from desc.sims.GCRCatSimInterface import InstanceCatalogWriter
def generate_instance_catalog(args=None, lock=None):
with warnings.catch_warnings():
if args.suppress_warnings:
warnings.filterwarnings('ignore', '\nThis call', UserWarning)
warnings.filterwarnings('ignore', 'Duplicate object type', UserWarning)
warnings.filterwarnings('ignore', 'No md5 sum', UserWarning)
warnings.filterwarnings('ignore', 'ERFA function', ErfaWarning)
warnings.filterwarnings('ignore', 'Numpy has detected', FutureWarning)
warnings.filterwarnings('ignore', 'divide by zero', RuntimeWarning)
warnings.filterwarnings('ignore', 'invalid value', RuntimeWarning)
if not hasattr(generate_instance_catalog, 'instcat_writer'):
config_dict = {}
config_dict.update(args.__dict__)
instcat_writer = InstanceCatalogWriter(args.db, args.descqa_catalog,
dither=not args.disable_dithering,
min_mag=args.min_mag,
minsource=args.minsource,
proper_motion=args.enable_proper_motion,
protoDC2_ra=args.protoDC2_ra,
protoDC2_dec=args.protoDC2_dec,
star_db_name=args.star_db_name,
sed_lookup_dir=args.sed_lookup_dir,
agn_db_name=args.agn_db_name,
agn_threads=args.agn_threads,
sn_db_name=args.sn_db_name,
host_image_dir=args.host_image_dir,
host_data_dir=args.host_data_dir,
sprinkler=args.enable_sprinkler,
gzip_threads=args.gzip_threads,
config_dict=config_dict)
generate_instance_catalog.instcat_writer = instcat_writer
for obsHistID in args.ids:
if args.job_log is not None:
if lock is not None:
lock.acquire()
with open(args.job_log, 'a') as out_file:
out_file.write('starting %d at time %.0f\n' % (obsHistID, time.time()))
if lock is not None:
lock.release()
pickup_file = None
if args.pickup_dir is not None:
pickup_file = os.path.join(args.pickup_dir, 'job_log_%.8d.txt' % obsHistID)
config_dict['pickup_file'] = pickup_file
status_file_name = generate_instance_catalog.instcat_writer.write_catalog(obsHistID,
out_dir=args.out_dir,
fov=args.fov,
status_dir=args.out_dir,
pickup_file=pickup_file)
if args.job_log is not None:
if lock is not None:
lock.acquire()
with open(args.job_log, 'a') as out_file:
out_file.write('ending %d at time %.0f\n' % (obsHistID, time.time()))
if lock is not None:
lock.release()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Instance catalog generator')
parser.add_argument('--config_file', type=str, default=None,
help='config file containing all of the arguments for this method. '
'Arguments are in a json-ized dict')
parser.add_argument('--db', type=str,
help='path to the OpSim database to query')
parser.add_argument('--star_db_name', type=str,
help='Sqlite file containing DC2 stellar sources')
parser.add_argument('--agn_db_name', type=str,
help='File of AGN parameters generated by create_agn_db.py')
parser.add_argument('--sed_lookup_dir', type=str,
default='/global/projecta/projectdirs/lsst/groups/SSim/DC2/sedLookup',
help='Directory where the SED lookup tables reside')
parser.add_argument('--agn_threads', type=int, default=1,
help='Number of threads to use when simulating AGN variability')
parser.add_argument('--sn_db_name', type=str, default=None,
help='File of SN parameters generated by create_sne_db.py')
parser.add_argument('--host_image_dir', type=str,
help='Location of FITS stamps of lensed host images produced by generate_lensed_hosts_***.py',
default=os.path.join(os.environ['SIMS_GCRCATSIMINTERFACE_DIR'], 'data', 'outputs'))
parser.add_argument('--host_data_dir', type=str,
help='Name of csv file of lensed host data created by the sprinkler.',
default=os.path.join(os.environ['SIMS_GCRCATSIMINTERFACE_DIR'],'data'))
parser.add_argument('--descqa_catalog', type=str, default='protoDC2',
help='the desired DESCQA catalog')
parser.add_argument('--out_dir', type=str, help='directory where output will be written',
default=os.path.join(os.environ['SIMS_GCRCATSIMINTERFACE_DIR'],'data', 'outputs'))
parser.add_argument('--ids', type=int, nargs='+',
default=None,
help='obsHistID to generate InstanceCatalog for (a list)')
parser.add_argument('--disable_dithering', default=False,
action='store_true',
help='flag to disable dithering')
parser.add_argument('--min_mag', type=float, default=10.0,
help='the minimum magintude for stars')
parser.add_argument('--fov', type=float, default=2.0,
help='field of view radius in degrees')
parser.add_argument('--enable_proper_motion', default=False,
action='store_true',
help='flag to enable proper motion')
parser.add_argument('--minsource', type=int, default=100,
help='mininum #objects in a trimmed instance catalog')
parser.add_argument('--protoDC2_ra', type=float, default=0,
help='RA (J2000 degrees) of the new protoDC2 center')
parser.add_argument('--protoDC2_dec', type=float, default=0,
help='Dec (J2000 degrees) of the new protoDC2 center')
parser.add_argument('--enable_sprinkler', default=False, action='store_true',
help='flag to enable the sprinkler')
parser.add_argument('--suppress_warnings', default=False, action='store_true',
help='flag to suppress warnings')
parser.add_argument('--n_jobs', type=int, default=1,
help='Number of jobs to run in parallel with multiprocessing')
parser.add_argument('--gzip_threads', type=int, default=3,
help="number of parallel gzip jobs any one "
"InstanceCatalogWriter can start in parallel "
"at a time")
parser.add_argument('--job_log', type=str, default=None,
help="file where we will write 'job started/completed' messages")
parser.add_argument('--pickup_dir', type=str, default=None,
help='directory to check for aborted job logs')
args = parser.parse_args()
if args.config_file is not None:
with open(args.config_file, 'r') as in_file:
config_dict = json.load(in_file)
args.__dict__.update(config_dict)
print('args ',args.n_jobs,args.ids)
if args.n_jobs==1 or isinstance(args.ids, numbers.Number) or len(args.ids)==1:
generate_instance_catalog(args=args)
else:
print('trying multi processing')
lock = multiprocessing.Lock()
job_list = []
n_id = len(args.ids)//args.n_jobs # number of ids per job
print('n_id is %d' % n_id)
for i_start in range(0, len(args.ids), n_id):
local_args = copy.deepcopy(args)
local_args.ids = args.ids[i_start:i_start+n_id]
print('local_ids ',local_args.ids)
p = multiprocessing.Process(target=generate_instance_catalog,
kwargs={'args':local_args, 'lock':lock})
p.start()
job_list.append(p)
for p in job_list:
p.join()
if args.job_log is not None:
with open(args.job_log, 'a') as out_file:
out_file.write('%s should be completed\n' % str(args.ids))
```
#### File: sims/GCRCatSimInterface/AGNModule.py
```python
import numpy as np
import os
import numbers
from lsst.utils import getPackageDir
from lsst.sims.photUtils import Sed, BandpassDict
__all__ = ["log_Eddington_ratio", "M_i_from_L_Mass", "k_correction",
"tau_from_params", "SF_from_params"]
def log_Eddington_ratio(bhmass, accretion_rate):
"""
Parameters
----------
bhmass is in solar masses
accretion_rate is in solar masses per Gyr
Returns
-------
log10(L/L_Eddington)
"""
# reference for expressions defining Eddington luminosity
# http://www-astro.physics.ox.ac.uk/~garret/teaching/lecture7-2012.pdf
log_m_sun = np.log10(1.98855) + 33.0 # in grams
log_G = np.log10(6.674) - 8.0 # in cgs units
log_m_proton = log_m_sun - np.log10(1.189) - 57.0 # see A.2 of Kolb and Turner
log_sigma_T = np.log10(6.6524) - 25.0 # in cm^2 -- A.1.2 of Kolb and Turner
log_c = np.log10(2.9979) + 10.0 # in cm/sec
log_sec_per_yr = np.log10(3600.0*24.0*365.25)
log_Const = np.log10(4.0) + np.log10(np.pi)
log_Const += log_G + log_c + log_m_proton - log_sigma_T
log_L_Eddington = np.log10(bhmass) + log_m_sun + log_Const
log_epsilon = -1.0
log_L = np.log10(accretion_rate)
log_L += log_m_sun - 9.0 - log_sec_per_yr
log_L += log_epsilon
log_L += 2.0*log_c
output = log_L-log_L_Eddington
return output
def M_i_from_L_Mass(Ledd_ratio, bhmass):
"""
Parameters
----------
Ledd_ratio is the log10(L/L_Eddington) ratio
bhmass is the log10(mass of the blackhole in solar masses)
Returns
-------
Absolute i-band magnitude. This will be read off from
the apparent relationships in Figure 15 of MacLeod et al 2010
(ApJ, 721, 1014)
"""
if not hasattr(M_i_from_L_Mass, '_initialized'):
print('initializing M_i')
M_i_from_L_Mass._initialized = True
# example points taken from Figure 15 of MacLeod et al (2010)
l_edd = [-0.5, -0.5,
-0.1, -0.1,
-1.1, -1.1,
-1.5, -1.5]
mbh = [9.8, 7.8,
9.0, 7.7,
10.1, 8.3,
10.1, 8.85]
m_i = [-28.3, -23.2,
-27.6, -24.4,
-27.7, -23.2,
-26.3, -23.1]
l_edd = np.array(l_edd)
mbh = np.array(mbh)
m_i = np.array(m_i)
theta_best = None
l_edd_0_best = None
mbh_0_best = None
err_best = None
mm = np.zeros((3,3), dtype=float)
bb = np.zeros(3, dtype=float)
nn = len(m_i)
mm[0][0] = (l_edd**2).sum()
mm[0][1] = (l_edd*mbh).sum()
mm[0][2] = l_edd.sum()
mm[1][0] = mm[0][1]
mm[1][1] = (mbh**2).sum()
mm[1][2] = mbh.sum()
mm[2][0] = l_edd.sum()
mm[2][1] = mbh.sum()
mm[2][2] = nn
bb[0] = (l_edd*m_i).sum()
bb[1] = (mbh*m_i).sum()
bb[2] = m_i.sum()
vv = np.linalg.solve(mm, bb)
M_i_from_L_Mass._coeffs = vv
return (M_i_from_L_Mass._coeffs[0]*Ledd_ratio +
M_i_from_L_Mass._coeffs[1]*bhmass +
M_i_from_L_Mass._coeffs[2])
def k_correction(sed_obj, bp, redshift):
"""
Parameters
----------
sed_obj is an instantiation of Sed representing the observed
spectral energy density of the source
bp is an instantiation of Bandpass representing the bandpass
in which we are calculating the magnitudes
redshift is a float representing the redshift of the source
Returns
-------
K correction in magnitudes according to equation (12) of
Hogg et al. 2002 (arXiv:astro-ph/0210394)
"""
if sed_obj.fnu is None:
sed_obj.flambdaTofnu()
dilation = 1.0 + redshift
restframe_wavelen_grid = bp.wavelen*dilation
if not hasattr(k_correction, '_valid_dex_dict'):
k_correction._valid_dex_dict = {}
if bp not in k_correction._valid_dex_dict:
print('calculating valid dexes')
valid_bp_dex = np.where(np.abs(bp.sb)>0.0)
k_correction._valid_dex_dict[bp] = valid_bp_dex
else:
valid_bp_dex = k_correction._valid_dex_dict[bp]
restframe_min_wavelen = restframe_wavelen_grid[valid_bp_dex[0][0]]
restframe_max_wavelen = restframe_wavelen_grid[valid_bp_dex[0][-1]]
if (restframe_min_wavelen < sed_obj.wavelen[0] or
restframe_max_wavelen > sed_obj.wavelen[-1]):
msg = '\nBP/(1+z) range '
msg += '%.6e < lambda < %.6e\n' % (restframe_min_wavelen,
restframe_max_wavelen)
msg += 'SED range '
mst += '%.6e < lambda < %.6e\n' % (sed_obj.wavelen.min(),
sed_obj.wavelen.max())
raise RuntimeError(msg)
restframe_fnu = np.interp(restframe_wavelen_grid,
sed_obj.wavelen,
sed_obj.fnu,
left=0.0,
right=0.0)
observed_fnu = np.interp(bp.wavelen,
sed_obj.wavelen,
sed_obj.fnu,
left=0.0,
right=0.0)
d_wavelen = bp.wavelen[1:]-bp.wavelen[:-1]
bf_over_w = bp.sb*restframe_fnu/bp.wavelen
restframe_integral = (0.5*(bf_over_w[1:] + bf_over_w[:-1]) *
d_wavelen).sum()
bf_over_w = bp.sb*observed_fnu/bp.wavelen
observer_integral = (0.5*(bf_over_w[1:] + bf_over_w[:-1]) *
d_wavelen).sum()
return -2.5*np.log10((1.0+redshift)*observer_integral/restframe_integral)
def tau_from_params(redshift, M_i, mbh, eff_wavelen, rng=None):
"""
Use equation (7) and Table 1 (last row) of MacLeod et al.
to get tau from black hole parameters
Parameters
----------
redshift of the black hole (will be used to calculate
the rest-frame effective wavelength of the i bandpass)
M_i is the absolute magnitude of the AGN in the i-band
mbh is the mass of the blackhole in solar masses
eff_wavelen is the observer-frame effective
wavelength of the band in Angstroms
rng is an option np.random.RandomState instantiation
which will introduce scatter into the coefficients
of the Macleod et al fit expression
Returns
-------
tau -- the characteristic timescale of the AGN light curve
in the i-band in days
"""
#if not hasattr(tau_from_params, '_eff_wavelen_i'):
# bp_dict = BandpassDict.loadTotalBandpassesFromFiles()
# eff_wav_nm = bp_dict['i'].calcEffWavelen()
# tau_from_params._eff_wavelen_i = 10.0*eff_wav_nm[0] # use phi; not sb
AA = 2.4
BB = 0.17
CC = 0.03
DD = 0.21
if rng is not None:
if isinstance(redshift, numbers.Number):
n_obj = 1
else:
n_obj = len(redshift)
AA += rng.normal(0.0, 0.2, size=n_obj)
BB += rng.normal(0.0, 0.02, size=n_obj)
CC += rng.normal(0.0, 0.04, size=n_obj)
DD += rng.normal(0.0, 0.07, size=n_obj)
eff_wavelen_rest = eff_wavelen/(1.0+redshift)
log_tau = AA + BB*np.log10(eff_wavelen_rest/4000.0)
log_tau += CC*(M_i+23.0) + DD*(np.log10(mbh)-9.0)
return np.power(10.0, log_tau)
def SF_from_params(redshift, M_i, mbh, eff_wavelen, rng=None):
"""
Use equation (7) and Table 1 (5th row) of MacLeod et al.
to get the structure function from black hole parameters
Parameters
----------
redshift of the black hole (will be used to calculate
the rest-frame effective wavelength of the i bandpass)
M_i is the absolute magnitude of the AGN in the i-band
mbh is the mass of the blackhole in solar masses
eff_wavelen is the observer-frame effective
wavelength of the band in Angstroms
rng is an option np.random.RandomState instantiation
which will introduce scatter into the coefficients
of the Macleod et al fit expression
Returns
-------
SF -- the structure function of the light curve at infinite
time lag of at the effective wavelength specified
"""
AA = -0.51
BB = -0.479
CC = 0.131
DD = 0.18
if rng is not None:
if isinstance(redshift, numbers.Number):
n_obj = 1
else:
n_obj = len(redshift)
AA += rng.normal(0.0, 0.02, size=n_obj)
BB += rng.normal(0.0, 0.005, size=n_obj)
CC += rng.normal(0.0, 0.008, size=n_obj)
DD += rng.normal(0.0, 0.03, size=n_obj)
eff_wavelen_rest = eff_wavelen/(1.0+redshift)
log_sf = AA + BB*np.log10(eff_wavelen_rest/4000.0)
log_sf += CC*(M_i+23.0) + DD*(np.log10(mbh)-9.0)
return np.power(10.0, log_sf)
```
#### File: sims_GCRCatSimInterface/tests/test_full_instanceCatalogs.py
```python
import unittest
import os
import tempfile
import shutil
from lsst.utils import getPackageDir
from desc.sims.GCRCatSimInterface import diskDESCQAObject_protoDC2
from desc.sims.GCRCatSimInterface import PhoSimDESCQA
from lsst.sims.utils import ObservationMetaData
from lsst.sims.catalogs.definitions import InstanceCatalog
mag_grid = os.path.join(getPackageDir('sims_GCRCatSimInterface'), 'data',
'CatSimMagGrid.txt')
class BulgePhoSimCatalogTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.out_dir = tempfile.mkdtemp(prefix='full_instanceCatalog')
cls.field_ra = 82.3
cls.field_dec = -81.4
@classmethod
def tearDownClass(cls):
if os.path.exists(cls.out_dir):
list_of_files = os.listdir(cls.out_dir)
for file_name in list_of_files:
os.unlink(os.path.join(cls.out_dir, file_name))
shutil.rmtree(cls.out_dir)
@unittest.skipIf(not os.path.exists(mag_grid),
'Have not created SED magnitude grid, yet')
def test_disk_phosim_catalog(self):
"""
Just try producing a PhoSim InstanceCatalog from a fake
ObservationMetaData, using protoDC2 (to make sure we don't
break the whole interface)
"""
db = diskDESCQAObject_protoDC2(yaml_file_name='protoDC2')
db.field_ra = self.field_ra
db.field_dec = self.field_dec
obs = ObservationMetaData(pointingRA=self.field_ra+0.2,
pointingDec=self.field_dec-0.2,
mjd=59580.0, rotSkyPos=112.0,
bandpassName='z',
boundType='circle', boundLength=0.01)
cat = PhoSimDESCQA(db, obs_metadata=obs, cannot_be_null=['hasDisk'])
cat.phoSimHeaderMap = {}
cat_name = os.path.join(self.out_dir, 'disk_phosim_cat.txt')
self.assertTrue(os.path.exists(self.out_dir))
cat.write_catalog(cat_name)
with open(cat_name, 'r') as in_file:
cat_lines = in_file.readlines()
self.assertGreater(len(cat_lines), 50)
if os.path.exists(cat_name):
os.unlink(cat_name)
def test_default_varParamStr(self):
"""
Test that DESCQAObjects now return varParamStr='None' by default
"""
db = diskDESCQAObject_protoDC2(yaml_file_name='protoDC2')
db.field_ra = self.field_ra
db.field_dec = self.field_dec
obs = ObservationMetaData(pointingRA=self.field_ra-0.7,
pointingDec=self.field_dec+1.0)
class VarParamStrTestClass(InstanceCatalog):
column_outputs = ['raJ2000', 'decJ2000', 'varParamStr']
cat = VarParamStrTestClass(db, obs_metadata=obs)
cat_name = os.path.join(self.out_dir, 'varParamStr_cat.txt')
cat.write_catalog(cat_name)
line_ct = 0
with open(cat_name, 'r') as in_file:
for line in in_file:
if line[0] == '#':
continue
cols = line.strip().split()
self.assertEqual(cols[2],'None')
line_ct += 1
self.assertGreater(line_ct, 100)
if os.path.exists(cat_name):
os.unlink(cat_name)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "JiwooKimAR/MWP-solver-with-pretrained-language-model",
"score": 3
} |
#### File: MWP-solver-with-pretrained-language-model/base/BaseModel.py
```python
import torch.nn as nn
class BaseModel(nn.Module):
def __init__(self, dataset, model_conf):
super(BaseModel, self).__init__()
"""
Initialize model configuration.
Parameters:
:param Dataset dataset: dataset to use
:param Parameters model_conf: model configurations such as hidden dimension
"""
pass
def forward(self, *input):
"""
Pytorch forward path.
return output
"""
raise NotImplementedError
def train_model(self, dataset, evaluator, early_stop, saver, logger, config):
"""
Train model following given config.
"""
raise NotImplementedError
def predict(self, dataset):
"""
Make prediction on eval data which is stored in dataset.
evaluation data is stored at dataset.eval_input as matrix form.
:param Dataset dataset: dataset to use
:returns eval_output: (num_users, num_items) shaped matrix with predicted scores
"""
raise NotImplementedError
```
#### File: MWP-solver-with-pretrained-language-model/dataloader/DataBatcher.py
```python
import numpy as np
class BatchSampler:
def __init__(self, data_size, batch_size, drop_remain=False, shuffle=False):
self.data_size = data_size
self.batch_size = batch_size
self.drop_remain = drop_remain
self.shuffle = shuffle
def __iter__(self):
if self.shuffle:
perm = np.random.permutation(self.data_size)
else:
perm = range(self.data_size)
batch_idx = []
for idx in perm:
batch_idx.append(idx)
if len(batch_idx) == self.batch_size:
yield batch_idx
batch_idx = []
if len(batch_idx) > 0 and not self.drop_remain:
yield batch_idx
def __len__(self):
if self.drop_remain:
return self.data_size // self.batch_size
else:
return int(np.ceil(self.data_size / self.batch_size))
class DataBatcher:
def __init__(self, *data_source, batch_size, drop_remain=False, shuffle=False):
self.data_source = list(data_source)
self.batch_size = batch_size
self.drop_remain = drop_remain
self.shuffle = shuffle
for i, d in enumerate(self.data_source):
if isinstance(d, list):
self.data_source[i] = np.array(d)
self.data_size = len(self.data_source[0])
if len(self.data_source)> 1:
flag = np.all([len(src) == self.data_size for src in self.data_source])
if not flag:
raise ValueError("All elements in data_source should have same lengths")
self.sampler = BatchSampler(self.data_size, self.batch_size, self.drop_remain, self.shuffle)
self.iterator = iter(self.sampler)
self.n=0
def __next__(self):
batch_idx = next(self.iterator)
batch_data = tuple([data[batch_idx] for data in self.data_source])
if len(batch_data) == 1:
batch_data = batch_data[0]
return batch_data
def __iter__(self):
return self
def __len__(self):
return len(self.sampler)
```
#### File: MWP-solver-with-pretrained-language-model/dataloader/Dataset.py
```python
import os
import re
import json
import numpy as np
from tqdm import tqdm
from time import time
from pythonds.basic import Stack
from IPython import embed
string_list = [
# (가)-(하)
'(가)', '(나)', '(다)', '(라)', '(라)', '(마)', '(바)', '(사)', '(아)', '(자)', '(차)', '(카)', '(타)', '(파)', '(하)',
# 문제에 등장하는 '인물'의 이름
'남준', '석진', '윤기', '호석', '지민', '태형', '정국', '민영', '유정', '은지', '유나', '경수', '미라', '민주', '현지',
'상민', '윤정', '현정', '예원', '영표', '재선', '승연', '승기', '혜수', '가은', '미애', '효리', '준수', '예림', '찬우', '슬기'
# 가족 관계를 나타내는 단어
'손자', '손녀', '조카', '이모', '삼촌', '동생', '누나', '오빠', '아버지', '어머니', '할머니', '할아버지', '엄마', '아빠', '나', '저', '형', '언니',
# 신체를 나타내는 단어
'손가락', '발가락', '팔', '다리',
# 성별을 구분하는 단어
'암컷', '수컷', '암탉', '수탉', '여학생', '남학생', '여자', '남자',
# 색을 나타내는 단어
'흰색', '검은색', '파란색', '노란색', '초록색', '보라색', '노란색', '빨간색', '주황색', '남색', '검정색',
# 과목을 나타내는 단어
'영어', '수학', '국어', '사회', '과학', '음악', '미술', '체육',
# 동물을 나타내는 단어
'오리', '닭', '토끼', '물고기', '고래', '거위', '달팽이', '개구리', '강아지', '고양이', '비둘기', '병아리', '개', '강아지', '달팽이', '염소', '홍학', '두루미', '꿩', '돼지',
# 꽃을 나타내는 단어
'장미', '백합', '튤립', '카네이션', '국화', '화분', '화단', '꽃병',
# 운동 관련 단어
'배구공', '농구공', '축구공', '탁구공', '야구공', '줄넘기', '달리기', '수영', '시합',
# 음식 관련 단어
'사과', '배', '감', '귤', '포도', '수박', '참외', '딸기', '복숭아', '바나나', '오렌지',
'토마토', '무', '당근', '오이', '배추', '상추', '양상추', '감자', '양파',
'사탕', '김밥', '빵', '라면', '과자', '음료수', '주스', '우유', '달걀', '계란',
# 학습에 필요한 물품을 나타내는 단어
'연필', '색연필', '지우개', '공책', '도화지', '색종이', '풀', '테이프', '바둑돌', '구슬', '상자', '나무토막', '장난감', '책장', '책꽂이',
# 일반적인 장소를 나타내는 단어
'서점', '마트', '문구점', '집', '학교', '수영장', '교실', '도서관', '박물관', '운동장', '주차장', '정류장', '아파트', '농장', '강당', '경찰서', '소방서', '병원', '약국', '공원',
# 이동수단을 나타내는 단어
'비행기', '자동차', '트럭', '배', '자전거', '오토바이', '기차', '버스', '엘리베이터',
# 건물 관련 용어
'페인트', '벽', '천장', '문', '울타리',
# 그 외 trainset에서 추가
'초코우유', '딸기우유', '바나나우유', '커피우유', '흰우유', '우산', '지팡이', '수조', '양동이', '접시', '사과파이',
]
class Dataset:
def __init__(self, model_name, data_dir, dataset, add_kor_number, testsets, use_ixc, use_iec, use_isc):
self.model_name = model_name
self.data_dir = data_dir
self.data_name = dataset
self.testsets = testsets
if 'chall' in self.data_name:
self.load_data_chall(model_name, data_dir, dataset, add_kor_number, testsets, use_ixc, use_iec, use_isc)
# For final submission (dataset/problemsheet.json)
if 'dataset' in self.data_name:
self.load_data_submit(model_name, data_dir, dataset, add_kor_number, use_ixc, use_iec, use_isc)
def load_data_chall(self, model_name, data_dir, dataset, add_kor_number, testsets, use_ixc, use_iec, use_isc):
# read_json
train_path = os.path.join(data_dir, self.data_name, 'questions_train.json')
valid_path = os.path.join(data_dir, self.data_name, 'questions_valid.json')
with open(train_path, 'r', encoding='utf-8-sig') as f:
train_json = json.load(f)
with open(valid_path, 'r', encoding='utf-8-sig') as f:
valid_json = json.load(f)
test_paths = [os.path.join(data_dir, self.data_name, f'{test_name}.json') for test_name in testsets]
test_jsons = []
for test_path in test_paths:
with open(test_path, 'r', encoding='utf-8-sig') as f:
test_jsons.append(json.load(f))
# initializing
self.idx2question = dict()
self.idx2solution = dict()
self.idx2solution = dict()
self.idx2qtype = dict()
self.idx2isstring = dict()
self.idx2INC = dict()
self.idx2IXC = dict()
self.idx2IEC = dict()
self.idx2ISC = dict()
self.idx2IMQ = dict()
self.idx2NET = dict()
self.idx2postfix = dict()
self.idx2template = dict()
# TODO: 사람이름, 가나다라,
self.netvocab2netidx = {'[PAD]': 0, '[BOS]': 1, '[EOS]': 2, '[OP]': 3}
self.netidx2netvocab = {0: '[PAD]', 1: '[BOS]', 2: '[EOS]', 3: '[OP]'}
self.operator2idx = {'[PAD]': 0}
self.idx2operator = {0: '[PAD]'}
self.templatetoken2idx = {'[PAD]': 0, '[BOS]': 1, '[EOS]': 2}
self.idx2templatetoken = {0: '[PAD]', 1: '[BOS]', 2: '[EOS]'}
self.kornum2num = {'하나': 1, '둘': 2, '셋': 3, '넷': 4, '다섯': 5, '여섯': 6, '일곱': 7, '여덟': 8, '아홉': 9, '열': 10,
'한': 1, '두': 2, '세': 3, '네': 4}
self.string1_list = [s for s in string_list if len(s) == 1]
self.string2_list = [s for s in string_list if len(s) == 2]
self.string3_list = [s for s in string_list if len(s) == 3]
self.string4_list = [s for s in string_list if len(s) == 4]
self.string5_list = [s for s in string_list if len(s) == 5]
def set_values(json, start_idx):
idxes = []
for json_idx in json.keys():
idx = int(json_idx) + start_idx
idxes.append(idx)
# question, postfix
question = json[json_idx]['question']
postfix = json[json_idx]['equation_op']
self.idx2question[idx] = question
self.idx2postfix[idx] = postfix
self.idx2isstring[idx] = (len(re.sub(r'[0-9\[\]A-Za-z_ ]', '', postfix)) > 0)
try:
qtype = json[json_idx]['qtype']
except:
qtype = '타입미지정'
self.idx2qtype[idx] = qtype
try:
solution = json[json_idx]['answer'][0]
self.idx2solution[idx] = solution
except:
pass
# Check if value already exists
if json[json_idx].get('checked') is True:
INC = json[json_idx]['INC']
IXC = json[json_idx]['IXC']
IEC = json[json_idx]['IEC']
ISC = json[json_idx]['ISC']
IMQ = json[json_idx]['IMQ']
NET = json[json_idx]['NET']
template = json[json_idx]['template']
self.idx2INC[idx] = INC
self.idx2IXC[idx] = IXC
self.idx2IEC[idx] = IEC
self.idx2ISC[idx] = ISC
self.idx2IMQ[idx] = IMQ.strip()
self.idx2NET[idx] = NET
self.idx2template[idx] = template
continue
else:
json[json_idx]['checked'] = False
# 문장 전처리
new_question = []
for word in question.strip().split():
# 수사가 등장시 숫자 추가
if add_kor_number and (word in self.kornum2num.keys()):
new_question.append(str(self.kornum2num[word]))
new_question.append(word)
question = ' '.join(new_question)
# INC, IMQ, IXC, IEC, ISC
IMQ = ''
self.idx2INC[idx] = dict()
num2INC = dict()
self.idx2IXC[idx] = dict()
alpha2IXC = dict()
self.idx2IEC[idx] = dict()
eq2IEC = dict()
self.idx2ISC[idx] = dict()
str2ISC = dict()
for word in question.split():
# 등식이 등장 시 IEC 부여
if '=' in word and use_iec:
eq = ''
for c in word:
if c in '1234567890+-*%./-=ABCDEFGHIJKLMNOPQRSTUVWXYZ':
eq += c
else: # 88점입니다.
break
IEC = '[E' + str(len(self.idx2IEC[idx])) + ']'
self.idx2IEC[idx][IEC] = eq
eq2IEC[eq] = IEC
IMQ += IEC + ' '
IMQ += word + ' '
# 숫자가 등장시 INC 부여
elif word[0].isdigit() or ((word[0] == '-' and len(word) != 1) and word[1].isdigit()):
num = ''
# 1,000원 -> 1000원
for c in re.sub('[,]', '', word):
if c in '1234567890./-': # 소수, 분수, 음수 고려
num += c
else: # 88점입니다.
break
INC = '[N' + str(len(self.idx2INC[idx])) + ']'
self.idx2INC[idx][INC] = num
num2INC[num] = INC
IMQ += INC + ' '
IMQ += word + ' '
# 영어 대문자가 등장시 IXC 부여
elif use_ixc and (word[0] in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'):
if alpha2IXC.get(word[0]) is not None:
IXC = alpha2IXC[word[0]]
else:
IXC = '[X' + str(len(self.idx2IXC[idx])) + ']'
self.idx2IXC[idx][IXC] = word[0]
alpha2IXC[word[0]] = IXC
IMQ += IXC + ' '
IMQ += word + ' '
# 정답식과 문제에 특정 문자열이 등장시 ISC 부여
# 특정 문자열이 등장시 ISC 부여
elif use_isc and ((re.sub('[,을를이가은는로]', '', word) in self.string1_list) or (word[:2] in self.string2_list) or (word[:3] in self.string3_list) or (word[:4] in self.string4_list) or (word[:5] in self.string5_list)):
tmp_str = ''
if word[:5] in self.string5_list:
tmp_str = word[:5]
elif word[:4] in self.string4_list:
tmp_str = word[:4]
elif word[:3] in self.string3_list:
tmp_str = word[:3]
elif word[:2] in self.string2_list:
tmp_str = word[:2]
elif re.sub('[,을를이가은는로]', '', word) in self.string1_list:
tmp_str = re.sub('[,을를이가은는로]', '', word)
if str2ISC.get(tmp_str) is not None:
ISC = str2ISC[tmp_str]
else:
ISC = '[S' + str(len(self.idx2ISC[idx])) + ']'
self.idx2ISC[idx][ISC] = tmp_str
str2ISC[tmp_str] = ISC
IMQ += ISC + ' '
IMQ += word + ' '
else:
IMQ += word + ' '
self.idx2IMQ[idx] = IMQ.strip()
# postfix -> NET (For TM-generation)
NET = postfix.split()
# Number -> INC
for k, v in self.idx2INC[idx].items():
for NET_idx, token in enumerate(NET):
if v == token:
NET[NET_idx] = k
# 미지수 -> IXC
for k, v in self.idx2IXC[idx].items():
for NET_idx, token in enumerate(NET):
if v == token:
NET[NET_idx] = k
# 등식 -> IEC
for k, v in self.idx2IEC[idx].items():
for NET_idx, token in enumerate(NET):
if v == token:
NET[NET_idx] = k
# 문자열 -> ISC
for k, v in self.idx2ISC[idx].items():
for NET_idx, token in enumerate(NET):
if v == token:
NET[NET_idx] = k
# Constant -> C
for NET_idx, token in enumerate(NET):
if token[0].isdigit() or (token[0] == '-' and token[1].isdigit()) or token in '><':
NET[NET_idx] = '[C' + token + ']'
# Operation -> OP & Constant 처리
for NET_idx, token in enumerate(NET):
if token.startswith('[OP'):
if self.operator2idx.get(token) is None:
self.operator2idx[token] = len(self.operator2idx)
self.idx2operator[self.operator2idx[token]] = token
NET[NET_idx] = '[OP]'
else:
if self.netvocab2netidx.get(token) is None:
self.netvocab2netidx[token] = len(self.netvocab2netidx)
self.netidx2netvocab[self.netvocab2netidx[token]] = token
# for NET_idx, token in enumerate(NET):
# if self.netvocab2netidx.get(token) is None:
# self.netvocab2netidx[token] = len(self.netvocab2netidx)
# self.netidx2netvocab[self.netvocab2netidx[token]] = token
# if token.startswith('[OP'):
# if self.operator2idx.get(token) is None:
# self.operator2idx[token] = len(self.operator2idx)
# self.idx2operator[self.operator2idx[token]] = token
# NET[NET_idx] = token
self.idx2NET[idx] = ' '.join(NET)
# postfix -> template (For GEO)
template = postfix.split()
for k, v in self.idx2INC[idx].items():
for template_idx, token in enumerate(template):
if v == token:
template[template_idx] = k
# 미지수 -> IXC
for k, v in self.idx2IXC[idx].items():
for template_idx, token in enumerate(template):
if v == token:
template[template_idx] = k
# 등식 -> IEC
for k, v in self.idx2IEC[idx].items():
for template_idx, token in enumerate(template):
if v == token:
template[template_idx] = k
# 문자열 -> ISC
for k, v in self.idx2ISC[idx].items():
for template_idx, token in enumerate(template):
if v == token:
template[template_idx] = k
# Constant -> C
for template_idx, token in enumerate(template):
if token[0].isdigit() or (token[0] == '-' and token[1].isdigit()) or token in '><':
template[template_idx] = '[C' + token + ']'
# templatetoken dict에 추가
for template_idx, token in enumerate(template):
if self.templatetoken2idx.get(token) is None:
self.templatetoken2idx[token] = len(self.templatetoken2idx)
self.idx2templatetoken[self.templatetoken2idx[token]] = token
self.idx2template[idx] = ' '.join(template)
return np.array(idxes)
# Set train/valid/test ids
self.train_ids = set_values(train_json, start_idx=0)
self.valid_ids = set_values(valid_json, start_idx=1000000)
self.test_ids = []
for i, test_json in enumerate(test_jsons):
test_ids = set_values(test_json, start_idx=10000000*(i+1))
self.test_ids.append(test_ids)
# Set question type ids
self.idx2qtype_id = dict()
map_qtype_id = dict()
for idx, qtype in self.idx2qtype.items():
if map_qtype_id.get(qtype) is None:
map_qtype_id[qtype] = len(map_qtype_id)
self.idx2qtype_id[idx] = map_qtype_id[qtype]
# save file for debugging
self.save_dataloader_to_file(train_json, train_path, start_idx=0)
self.save_dataloader_to_file(valid_json, valid_path, start_idx=1000000)
for i, (test_json, test_path) in enumerate(zip(test_jsons, test_paths)):
self.save_dataloader_to_file(test_json, test_path, start_idx=10000000*(i+1))
def load_data_submit(self, model_name, data_dir, dataset, add_kor_number, use_ixc, use_iec, use_isc):
# read_json (dataset/problemsheet.json)
test_path = os.path.join(self.data_name, 'problemsheet_5_00.json')
with open(test_path, 'r', encoding='utf-8-sig') as f:
test_json = json.load(f)
# initializing
self.idx2question = dict()
self.idx2INC = dict()
self.idx2IXC = dict()
self.idx2IEC = dict()
self.idx2ISC = dict()
self.idx2IMQ = dict()
# TODO: 사람이름, 가나다라, A, B, C, D, ... 고려
self.netvocab2netidx = {'[PAD]': 0, '[BOS]': 1, '[EOS]': 2, '[OP]': 3}
self.netidx2netvocab = {0: '[PAD]', 1: '[BOS]', 2: '[EOS]', 3: '[OP]'}
self.operator2idx = {'[PAD]': 0}
self.idx2operator = {0: '[PAD]'}
self.templatetoken2idx = {'[PAD]': 0, '[BOS]': 1, '[EOS]': 2}
self.idx2templatetoken = {0: '[PAD]', 1: '[BOS]', 2: '[EOS]'}
self.kornum2num = {'하나': 1, '둘': 2, '셋': 3, '넷': 4, '다섯': 5, '여섯': 6, '일곱': 7, '여덟': 8, '아홉': 9, '열': 10,
'한': 1, '두': 2, '세': 3, '네': 4}
self.string1_list = [s for s in string_list if len(s) == 1]
self.string2_list = [s for s in string_list if len(s) == 2]
self.string3_list = [s for s in string_list if len(s) == 3]
self.string4_list = [s for s in string_list if len(s) == 4]
self.string5_list = [s for s in string_list if len(s) == 5]
def set_values(json, start_idx):
idxes = []
for json_idx in json.keys():
idx = int(json_idx) + start_idx
idxes.append(idx)
# question
question = json[json_idx]['question']
self.idx2question[idx] = question
# 문장 전처리
new_question = []
for word in question.strip().split():
# 수사가 등장시 숫자 추가
if add_kor_number and (word in self.kornum2num.keys()):
new_question.append(str(self.kornum2num[word]))
new_question.append(word)
question = ' '.join(new_question)
# INC, IMQ, IXC, IEC, ISC
IMQ = ''
self.idx2INC[idx] = dict()
num2INC = dict()
self.idx2IXC[idx] = dict()
alpha2IXC = dict()
self.idx2IEC[idx] = dict()
eq2IEC = dict()
self.idx2ISC[idx] = dict()
str2ISC = dict()
for word in question.split():
# 등식이 등장 시 IEC 부여
if '=' in word and use_iec:
eq = ''
for c in word:
if c in '1234567890+-*%./-=ABCDEFGHIJKLMNOPQRSTUVWXYZ':
eq += c
else: # 88점입니다.
break
IEC = '[E' + str(len(self.idx2IEC[idx])) + ']'
self.idx2IEC[idx][IEC] = eq
eq2IEC[eq] = IEC
IMQ += IEC + ' '
IMQ += word + ' '
# 숫자가 등장시 INC 부여
elif word[0].isdigit() or ((word[0] == '-' and len(word) != 1) and word[1].isdigit()):
num = ''
# 1,000원 -> 1000원
for c in re.sub('[,]', '', word):
if c in '1234567890./-': # 소수, 분수, 음수 고려
num += c
else: # 88점입니다.
break
INC = '[N' + str(len(self.idx2INC[idx])) + ']'
self.idx2INC[idx][INC] = num
num2INC[num] = INC
IMQ += INC + ' '
IMQ += word + ' '
# 영어 대문자가 등장시 IXC 부여
elif use_ixc and (word[0] in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'):
if alpha2IXC.get(word[0]) is not None:
IXC = alpha2IXC[word[0]]
else:
IXC = '[X' + str(len(self.idx2IXC[idx])) + ']'
self.idx2IXC[idx][IXC] = word[0]
alpha2IXC[word[0]] = IXC
IMQ += IXC + ' '
IMQ += word + ' '
# 특정 문자열이 등장시 ISC 부여
elif use_isc and ((re.sub('[,을를이가은는로]', '', word) in self.string1_list) or (word[:2] in self.string2_list) or (word[:3] in self.string3_list) or (word[:4] in self.string4_list) or (word[:5] in self.string5_list)):
tmp_str = ''
if word[:5] in self.string5_list:
tmp_str = word[:5]
elif word[:4] in self.string4_list:
tmp_str = word[:4]
elif word[:3] in self.string3_list:
tmp_str = word[:3]
elif word[:2] in self.string2_list:
tmp_str = word[:2]
elif re.sub('[,을를이가은는로]', '', word) in self.string1_list:
tmp_str = re.sub('[,을를이가은는로]', '', word)
if str2ISC.get(tmp_str) is not None:
ISC = str2ISC[tmp_str]
else:
ISC = '[S' + str(len(self.idx2ISC[idx])) + ']'
self.idx2ISC[idx][ISC] = tmp_str
str2ISC[tmp_str] = ISC
IMQ += ISC + ' '
IMQ += word + ' '
else:
IMQ += word + ' '
self.idx2IMQ[idx] = IMQ.strip()
return np.array(idxes)
# Set train/valid/test ids
self.test_ids = set_values(test_json, start_idx=0)
def load_data_CC(self, model_name, data_dir, dataset):
# read_json
data_path = os.path.join(data_dir, self.data_name, 'questions.json')
with open(data_path, 'r') as f:
all_json = json.load(f)
# Set train/valid/test ids
all_ids = np.arange(len(all_json))
np.random.shuffle(all_ids)
self.train_ids = all_ids[:int(0.7 * len(all_ids))]
self.valid_ids = all_ids[int(0.7 * len(all_ids)): int(0.8 * len(all_ids))]
self.test_ids = all_ids[int(0.8 * len(all_ids)):]
# initializing
self.idx2question = dict()
self.idx2alignment = dict()
self.idx2solution = dict()
self.idx2equation = dict()
self.idx2INC = dict()
self.idx2IMQ = dict()
self.idx2NET = dict()
self.idx2postfix = dict()
# TODO: Constant 고려 필요 (예정)
self.netvocab2netidx = {'[PAD]': 0, '[BOS]': 1, '[EOS]': 2, 'OP': 3}
self.netidx2netvocab = {0: '[PAD]', 1: '[BOS]', 2: '[EOS]', 3: 'OP'}
self.operator2idx = {'[PAD]': 0}
self.idx2operator = {0: '[PAD]'}
# Set Values using json
for i in range(len(all_json)):
idx = all_json[i]['iIndex']
# question, alignment, solution, equation
question = all_json[i]['sQuestion']
alignments = all_json[i]['lAlignments']
solution = all_json[i]['lSolutions'][0]
equation = all_json[i]['lEquations'][0]
self.idx2question[idx] = question
self.idx2alignment[idx] = alignments
self.idx2solution[idx] = solution
self.idx2equation[idx] = equation
# INC, IMQ
self.idx2INC[idx] = dict()
IMQ = ''
num2INC = dict()
for word in question.split():
# 숫자가 등장시 INC 부여
re_word = re.sub('[-=+,#/\?:^$.@*\"※~&%ㆍ!』\\‘|\(\)\[\]\<\>`\'…》]', '', word)
if re_word.isdigit():
INC = 'N' + str(len(self.idx2INC[idx]))
self.idx2INC[idx][INC] = re_word
num2INC[re_word] = INC
IMQ += INC + ' '
IMQ += word + ' '
self.idx2IMQ[idx] = IMQ.strip()
# infix -> postfix
postfix = self.infixToPostfix(self.make_space_eq(equation[2:]))
self.idx2postfix[idx] = postfix
# postfix -> NET
NET = postfix.split()
for k, v in self.idx2INC[idx].items():
for NET_idx, token in enumerate(NET):
if v+'.0' == token:
NET[NET_idx] = k
break
for NET_idx, token in enumerate(NET):
if token in '+-*/':
if self.operator2idx.get(token) is None:
self.operator2idx[token] = len(self.operator2idx)
self.idx2operator[self.operator2idx[token]] = token
NET[NET_idx] = 'OP'
else:
if self.netvocab2netidx.get(token) is None:
self.netvocab2netidx[token] = len(self.netvocab2netidx)
self.netidx2netvocab[self.netvocab2netidx[token]] = token
self.idx2NET[idx] = ' '.join(NET)
def infixToPostfix(self, infixexpr):
prec = {}
prec["*"] = 3
prec["/"] = 3
prec["+"] = 2
prec["-"] = 2
prec["("] = 1
opStack = Stack()
postfixList = []
tokenList = infixexpr.split()
for token in tokenList:
if re.sub('[-=+,#/\?:^$.@*\"※~&%ㆍ!』\\‘|\(\)\[\]\<\>`\'…》]', '', token).isdigit():
postfixList.append(token)
elif token == '(':
opStack.push(token)
elif token == ')':
topToken = opStack.pop()
while topToken != '(':
postfixList.append(topToken)
topToken = opStack.pop()
else:
while (not opStack.isEmpty()) and (prec[opStack.peek()] >= prec[token]):
postfixList.append(opStack.pop())
opStack.push(token)
while not opStack.isEmpty():
postfixList.append(opStack.pop())
return " ".join(postfixList)
def make_space_eq(self, infix):
new_infix = ''
for c in infix:
if c in '+-*/()=%':
new_infix += ' ' + c + ' '
else:
new_infix += c
return new_infix
def __str__(self):
ret_str = '\n'
ret_str += 'Dataset: %s\n' % self.data_name
# ret_str += '# of docs_data: %d\n' % len(self.docs_data)
# ret_str += '# of rels_data: %d(%d+%d)\n' % (self.num_rels_train + self.num_rels_test, self.num_rels_train, self.num_rels_test)
return ret_str
def save_dataloader_to_file(self, orig_json, data_path, start_idx=0):
for json_idx in orig_json.keys():
idx = int(json_idx)+start_idx
orig_json[json_idx]['INC'] = self.idx2INC.get(idx)
orig_json[json_idx]['IXC'] = self.idx2IXC.get(idx)
orig_json[json_idx]['IEC'] = self.idx2IEC.get(idx)
orig_json[json_idx]['ISC'] = self.idx2ISC.get(idx)
orig_json[json_idx]['IMQ'] = self.idx2IMQ.get(idx)
orig_json[json_idx]['NET'] = self.idx2NET.get(idx)
orig_json[json_idx]['template'] = self.idx2template.get(idx)
with open(data_path, 'w', encoding='UTF-8') as f:
f.write(json.dumps(orig_json, ensure_ascii=False, indent=4))
print(f"> Successfullly saved processed file at {data_path}")
``` |
{
"source": "jiwoong-choi/SiamMask",
"score": 2
} |
#### File: conversion/siammask_sharp/resnet.py
```python
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
from models.features import Features
__all__ = ['ResNet', 'resnet50']
class Bottleneck(Features):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
# padding = (2 - stride) + (dilation // 2 - 1)
padding = 2 - stride
assert stride == 1 or dilation == 1, "stride and dilation must have one equals to zero at least"
if dilation > 1:
padding = dilation
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=padding, bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if out.size() != residual.size():
print(out.size(), residual.size())
out += residual
out = self.relu(out)
return out
def make_downsample_layer(expansion, inplanes, planes, stride, dilation):
if stride == 1 and dilation == 1:
return nn.Sequential(
nn.Conv2d(inplanes, planes * expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * expansion),
), dilation
else:
if dilation > 1:
dd = dilation // 2
padding = dd
else:
dd = 1
padding = 0
return nn.Sequential(
nn.Conv2d(inplanes, planes * expansion,
kernel_size=3, stride=stride, bias=False,
padding=padding, dilation=dd),
nn.BatchNorm2d(planes * expansion),
), dd
class BottleneckWithDownSample(Features):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(BottleneckWithDownSample, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
# padding = (2 - stride) + (dilation // 2 - 1)
padding = 2 - stride
self.downsample, dilation = make_downsample_layer(4, inplanes, planes, stride, dilation)
assert stride == 1 or dilation == 1, "stride and dilation must have one equals to zero at least"
if dilation > 1:
padding = dilation
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=padding, bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
residual = self.downsample(x)
if out.size() != residual.size():
print(out.size(), residual.size())
out += residual
out = self.relu(out)
return out
def make_layer(inplanes, block1, block2, planes, blocks, stride=1, dilation=1):
layers = []
layers.append(block2(inplanes, planes, stride, dilation))
inplanes = planes * block2.expansion
for i in range(1, blocks):
layers.append(block1(inplanes, planes, dilation=dilation))
return nn.Sequential(*layers), inplanes
class ResNet(nn.Module):
def __init__(self, block1, block2, layers, layer4=False, layer3=False):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=0, # 3
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1, self.inplanes = make_layer(self.inplanes, block1, block2, 64, layers[0])
self.layer2, self.inplanes = make_layer(self.inplanes, block1, block2, 128, layers[1], stride=2) # 31x31, 15x15
self.feature_size = 128 * block2.expansion
if layer3:
self.layer3, self.inplanes = make_layer(self.inplanes, block1, block2, 256, layers[2], stride=1, dilation=2) # 15x15, 7x7
self.feature_size = (256 + 128) * block2.expansion
else:
self.layer3 = lambda x: x # identity
if layer4:
self.layer4, self.inplanes = make_layer(self.inplanes, block1, block2, 512, layers[3], stride=1, dilation=4) # 7x7, 3x3
self.feature_size = 512 * block2.expansion
else:
self.layer4 = lambda x: x # identity
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
p0 = self.relu(x)
x = self.maxpool(p0)
p1 = self.layer1(x)
p2 = self.layer2(p1)
p3 = self.layer3(p2)
return p0, p1, p2, p3
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, BottleneckWithDownSample, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url('https://download.pytorch.org/models/resnet50-19c8e357.pth'))
return model
``` |
{
"source": "jiwoong-choi/SupContrast",
"score": 2
} |
#### File: jiwoong-choi/SupContrast/main_linear.py
```python
from __future__ import print_function
import sys
import argparse
import time
import math
import torch
import poptorch
from main_ce import set_loader
from util import AverageMeter
from util import adjust_learning_rate, warmup_learning_rate, accuracy
from util import set_optimizer
from networks.resnet_big import SupConResNet, LinearClassifier
try:
import apex
from apex import amp, optimizers
except ImportError:
pass
def parse_option():
parser = argparse.ArgumentParser('argument for training')
parser.add_argument('--print_freq', type=int, default=10,
help='print frequency')
parser.add_argument('--save_freq', type=int, default=50,
help='save frequency')
parser.add_argument('--batch_size', type=int, default=256,
help='batch_size')
parser.add_argument('--num_workers', type=int, default=16,
help='num of workers to use')
parser.add_argument('--epochs', type=int, default=100,
help='number of training epochs')
# optimization
parser.add_argument('--optimizer', default='SGD', choices=['SGD', 'Adam', 'RMSprop'],
help='optimizer for training')
parser.add_argument('--loss_scaling', type=float, default=1.0, help="Loss scaling factor")
parser.add_argument('--learning_rate', type=float, default=0.1,
help='learning rate')
parser.add_argument('--lr_decay_epochs', type=str, default='60,75,90',
help='where to decay lr, can be a list')
parser.add_argument('--lr_decay_rate', type=float, default=0.2,
help='decay rate for learning rate')
parser.add_argument('--weight_decay', type=float, default=0,
help='weight decay')
parser.add_argument('--momentum', type=float, default=0.9,
help='momentum')
# model dataset
parser.add_argument('--model', type=str, default='resnet50')
parser.add_argument('--dataset', type=str, default='cifar10',
choices=['cifar10', 'cifar100'], help='dataset')
# other setting
parser.add_argument('--cosine', action='store_true',
help='using cosine annealing')
parser.add_argument('--warm', action='store_true',
help='warm-up for large batch training')
parser.add_argument('--ckpt', type=str, default='',
help='path to pre-trained model')
opt = parser.parse_args()
# set the path according to the environment
opt.data_folder = './datasets/'
iterations = opt.lr_decay_epochs.split(',')
opt.lr_decay_epochs = list([])
for it in iterations:
opt.lr_decay_epochs.append(int(it))
opt.model_name = '{}_{}_lr_{}_decay_{}_bsz_{}'.\
format(opt.dataset, opt.model, opt.learning_rate, opt.weight_decay,
opt.batch_size)
if opt.cosine:
opt.model_name = '{}_cosine'.format(opt.model_name)
# warm-up for large-batch training,
if opt.warm:
opt.model_name = '{}_warm'.format(opt.model_name)
opt.warmup_from = 0.01
opt.warm_epochs = 10
if opt.cosine:
eta_min = opt.learning_rate * (opt.lr_decay_rate ** 3)
opt.warmup_to = eta_min + (opt.learning_rate - eta_min) * (
1 + math.cos(math.pi * opt.warm_epochs / opt.epochs)) / 2
else:
opt.warmup_to = opt.learning_rate
if opt.dataset == 'cifar10':
opt.n_cls = 10
elif opt.dataset == 'cifar100':
opt.n_cls = 100
else:
raise ValueError('dataset not supported: {}'.format(opt.dataset))
return opt
def set_model(opt):
model = SupConResNet(name=opt.model)
criterion = torch.nn.CrossEntropyLoss()
classifier = LinearClassifier(name=opt.model, num_classes=opt.n_cls)
ckpt = torch.load(opt.ckpt, map_location='cpu')
state_dict = ckpt['model']
model.load_state_dict(state_dict)
return model, classifier, criterion
class ModelWithLoss(torch.nn.Module):
def __init__(self, encoder, classifier, loss):
super().__init__()
self.encoder = encoder
self.classifier = classifier
self.loss = loss
def forward(self, images, labels):
features = self.encoder(images)
output = self.classifier(features)
loss = self.loss(output, labels)
return output, loss
def train(train_loader, training_model, optimizer, epoch, opt):
"""one epoch training"""
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
end = time.time()
for idx, (images, labels) in enumerate(train_loader):
data_time.update(time.time() - end)
bsz = labels.shape[0]
# warm-up learning rate
warmup_learning_rate(opt, epoch, idx, len(train_loader), optimizer)
training_model.setOptimizer(optimizer)
# compute loss
output, loss = training_model(images, labels)
# update metric
losses.update(loss.item(), bsz)
# acc1, acc5 = accuracy(output, labels, topk=(1, 5))
# top1.update(acc1[0], bsz)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# print info
if (idx + 1) % opt.print_freq == 0:
print('Train: [{0}][{1}/{2}]\t'
'BT {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'DT {data_time.val:.3f} ({data_time.avg:.3f})\t'
'loss {loss.val:.3f} ({loss.avg:.3f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch, idx + 1, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1))
sys.stdout.flush()
return losses.avg, top1.avg
def validate(val_loader, model, classifier, criterion, opt):
"""validation"""
model.eval()
classifier.eval()
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
with torch.no_grad():
end = time.time()
for idx, (images, labels) in enumerate(val_loader):
bsz = labels.shape[0]
# forward
output = classifier(model.encoder(images))
loss = criterion(output, labels)
# update metric
losses.update(loss.item(), bsz)
acc1, acc5 = accuracy(output, labels, topk=(1, 5))
top1.update(acc1[0], bsz)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if idx % opt.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
idx, len(val_loader), batch_time=batch_time,
loss=losses, top1=top1))
print(' * Acc@1 {top1.avg:.3f}'.format(top1=top1))
return losses.avg, top1.avg
def main():
best_acc = 0
opt = parse_option()
# build data loader
train_loader, val_loader = set_loader(opt)
# build model and criterion
model, classifier, criterion = set_model(opt)
model.eval()
classifier.train()
model_with_loss = ModelWithLoss(model.encoder, classifier, criterion)
# build optimizer
optimizer = set_optimizer(opt, model_with_loss.classifier)
poptorch_model = poptorch.trainingModel(model_with_loss, optimizer=optimizer)
# training routine
for epoch in range(1, opt.epochs + 1):
adjust_learning_rate(opt, optimizer, epoch)
poptorch_model.setOptimizer(optimizer)
# train for one epoch
time1 = time.time()
loss, acc = train(train_loader, poptorch_model, optimizer, epoch, opt)
time2 = time.time()
print('Train epoch {}, total time {:.2f}, accuracy:{:.2f}'.format(
epoch, time2 - time1, acc))
# eval for one epoch
loss, val_acc = validate(val_loader, model, classifier, criterion, opt)
if val_acc > best_acc:
best_acc = val_acc
print('best accuracy: {:.2f}'.format(best_acc))
if __name__ == '__main__':
main()
```
#### File: jiwoong-choi/SupContrast/main_supcon.py
```python
from __future__ import print_function
import os
import sys
import argparse
import time
import math
import tensorboard_logger as tb_logger
import torch
import popart
import poptorch
from torchvision import transforms, datasets
from util import TwoCropTransform, AverageMeter
from util import adjust_learning_rate, warmup_learning_rate
from util import pipeline_model, replace_bn, set_optimizer, save_model
from networks.resnet_big import SupConResNet
from losses import SupConLoss
def parse_option():
parser = argparse.ArgumentParser('argument for training')
parser.add_argument('--print_freq', type=int, default=10,
help='print frequency')
parser.add_argument('--save_freq', type=int, default=50,
help='save frequency')
parser.add_argument('--batch_size', type=int, default=256,
help='batch_size')
parser.add_argument('--num_workers', type=int, default=16,
help='num of workers to use')
parser.add_argument('--epochs', type=int, default=1000,
help='number of training epochs')
# IPU options
parser.add_argument('--pipeline_splits', nargs='+', default=[], help='pipeline splits')
parser.add_argument('--enable_pipeline_recompute', action='store_true',
help='Enable the recomputation of network activations during backward pass instead of caching them during forward pass')
parser.add_argument('--gradient_accumulation', type=int, default=1,
help='gradient accumulation')
parser.add_argument('--replication_factor', type=int, default=1,
help='replication factor')
parser.add_argument('--memory_proportion', type=float, default=0.6,
help='available memory proportion for conv and matmul')
parser.add_argument('--norm_type', default='batch', choices=['batch', 'group', 'none'],
help='normalization layer type')
parser.add_argument('--norm_num_group', type=int, default=32,
help='number of groups for group normalization layers')
parser.add_argument('--precision', default='32.32', choices=['16.16', '16.32', '32.32'],
help='Precision of Ops(weights/activations/gradients) and Master data types: 16.16, 16.32, 32.32')
parser.add_argument('--half_partial', action='store_true',
help='Accumulate matrix multiplication partials in half precision')
# optimization
parser.add_argument('--optimizer', default='SGD', choices=['SGD', 'Adam', 'RMSprop'],
help='optimizer for training')
parser.add_argument('--loss_scaling', type=float, default=1.0, help="Loss scaling factor")
parser.add_argument('--learning_rate', type=float, default=0.05,
help='learning rate')
parser.add_argument('--lr_decay_epochs', type=str, default='700,800,900',
help='where to decay lr, can be a list')
parser.add_argument('--lr_decay_rate', type=float, default=0.1,
help='decay rate for learning rate')
parser.add_argument('--weight_decay', type=float, default=1e-4,
help='weight decay')
parser.add_argument('--momentum', type=float, default=0.9,
help='momentum')
parser.add_argument('--betas', type=float, nargs=2, default=[0.9, 0.999],
help='betas for Adam optimizer')
# model dataset
parser.add_argument('--model', type=str, default='resnet50')
parser.add_argument('--dataset', type=str, default='cifar10',
choices=['cifar10', 'cifar100', 'path'], help='dataset')
parser.add_argument('--mean', type=str, help='mean of dataset in path in form of str tuple')
parser.add_argument('--std', type=str, help='std of dataset in path in form of str tuple')
parser.add_argument('--data_folder', type=str, default=None, help='path to custom dataset')
parser.add_argument('--size', type=int, default=32, help='parameter for RandomResizedCrop')
# method
parser.add_argument('--method', type=str, default='SupCon',
choices=['SupCon', 'SimCLR'], help='choose method')
# temperature
parser.add_argument('--temp', type=float, default=0.07,
help='temperature for loss function')
# other setting
parser.add_argument('--cosine', action='store_true',
help='using cosine annealing')
parser.add_argument('--syncBN', action='store_true',
help='using synchronized batch normalization')
parser.add_argument('--warm', action='store_true',
help='warm-up for large batch training')
parser.add_argument('--trial', type=str, default='0',
help='id for recording multiple runs')
opt = parser.parse_args()
# check if dataset is path that passed required arguments
if opt.dataset == 'path':
assert opt.data_folder is not None \
and opt.mean is not None \
and opt.std is not None
# set the path according to the environment
if opt.data_folder is None:
opt.data_folder = './datasets/'
opt.model_path = './save/SupCon/{}_models'.format(opt.dataset)
opt.tb_path = './save/SupCon/{}_tensorboard'.format(opt.dataset)
iterations = opt.lr_decay_epochs.split(',')
opt.lr_decay_epochs = list([])
for it in iterations:
opt.lr_decay_epochs.append(int(it))
opt.model_name = '{}_{}_{}_lr_{}_decay_{}_bsz_{}_temp_{}_trial_{}'.\
format(opt.method, opt.dataset, opt.model, opt.learning_rate,
opt.weight_decay, opt.batch_size, opt.temp, opt.trial)
if opt.cosine:
opt.model_name = '{}_cosine'.format(opt.model_name)
# warm-up for large-batch training,
if opt.batch_size > 256:
opt.warm = True
if opt.warm:
opt.model_name = '{}_warm'.format(opt.model_name)
opt.warmup_from = 0.01
opt.warm_epochs = 10
if opt.cosine:
eta_min = opt.learning_rate * (opt.lr_decay_rate ** 3)
opt.warmup_to = eta_min + (opt.learning_rate - eta_min) * (
1 + math.cos(math.pi * opt.warm_epochs / opt.epochs)) / 2
else:
opt.warmup_to = opt.learning_rate
opt.tb_folder = os.path.join(opt.tb_path, opt.model_name)
if not os.path.isdir(opt.tb_folder):
os.makedirs(opt.tb_folder)
opt.save_folder = os.path.join(opt.model_path, opt.model_name)
if not os.path.isdir(opt.save_folder):
os.makedirs(opt.save_folder)
opt.profiling = 'POPLAR_ENGINE_OPTIONS' in os.environ
assert len(opt.pipeline_splits) in (0, 1, 3, 7, 15)
return opt
def set_loader(opt, poptorch_opts: poptorch.Options):
# construct data loader
if opt.dataset == 'cifar10':
mean = (0.4914, 0.4822, 0.4465)
std = (0.2023, 0.1994, 0.2010)
elif opt.dataset == 'cifar100':
mean = (0.5071, 0.4867, 0.4408)
std = (0.2675, 0.2565, 0.2761)
elif opt.dataset == 'path':
mean = eval(opt.mean)
std = eval(opt.mean)
else:
raise ValueError('dataset not supported: {}'.format(opt.dataset))
normalize = transforms.Normalize(mean=mean, std=std)
train_transform = transforms.Compose([
transforms.RandomResizedCrop(size=opt.size, scale=(0.2, 1.)),
transforms.RandomHorizontalFlip(),
transforms.RandomApply([
transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)
], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.ToTensor(),
normalize,
])
if opt.dataset == 'cifar10':
train_dataset = datasets.CIFAR10(root=opt.data_folder,
transform=TwoCropTransform(train_transform),
download=True)
elif opt.dataset == 'cifar100':
train_dataset = datasets.CIFAR100(root=opt.data_folder,
transform=TwoCropTransform(train_transform),
download=True)
elif opt.dataset == 'path':
train_dataset = datasets.ImageFolder(root=opt.data_folder,
transform=TwoCropTransform(train_transform))
else:
raise ValueError(opt.dataset)
train_sampler = None
train_loader = poptorch.DataLoader(
poptorch_opts, train_dataset, batch_size=opt.batch_size, shuffle=(train_sampler is None),
num_workers=opt.num_workers, pin_memory=True, sampler=train_sampler)
train_loader.__len__ = lambda: len(train_dataset)
return train_loader
class ModelWithLoss(torch.nn.Module):
def __init__(self, model, loss, method):
super().__init__()
self.model = model
self.loss = loss
self.method = method
def forward(self, x, labels):
bsz = labels.shape[0]
features = self.model(x)
f1, f2 = torch.split(features, [bsz, bsz], dim=0)
features = torch.cat([f1.unsqueeze(1), f2.unsqueeze(1)], dim=1)
if self.method == 'SupCon':
loss = self.loss(features, labels)
elif self.method == 'SimCLR':
loss = self.loss(features)
else:
raise ValueError('contrastive method not supported: {}'.
format(self.method))
return features, loss
def set_model(opt):
model = SupConResNet(name=opt.model)
criterion = SupConLoss(temperature=opt.temp)
if opt.norm_type in ['group', 'none']:
replace_bn(model, 'group', opt.norm_num_group)
pipeline_model(model, opt.pipeline_splits)
model_with_loss = ModelWithLoss(model, criterion, opt.method).train()
if opt.precision[-3:] == ".16":
model.half()
return model_with_loss
def train(train_loader, model, optimizer, epoch, opt):
"""one epoch training"""
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
end = time.time()
for idx, (images, labels) in enumerate(train_loader):
if opt.profiling and idx > 0:
break
data_time.update(time.time() - end)
images = torch.cat([images[0], images[1]], dim=0)
if opt.precision[:2] == "16":
images = images.half()
bsz = labels.shape[0]
labels = labels.int()
# warm-up learning rate
warmup_learning_rate(opt, epoch, idx, len(train_loader), optimizer)
model.setOptimizer(optimizer)
# compute loss
features, loss = model(images, labels)
# update metric
losses.update(loss.item(), bsz)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# print info
if (idx + 1) % opt.print_freq == 0:
print('Train: [{0}][{1}/{2}]\t'
'BT {batch_time.val:.6f} ({batch_time.avg:.6f})\t'
'DT {data_time.val:.6f} ({data_time.avg:.6f})\t'
'loss {loss.val:.6f} ({loss.avg:.6f})'.format(
epoch, idx + 1, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses))
sys.stdout.flush()
return losses.avg
def main():
opt = parse_option()
# poptorch options
poptorch_opts = poptorch.Options()
poptorch_opts.Training.gradientAccumulation(opt.gradient_accumulation)
poptorch_opts.replicationFactor(opt.replication_factor)
poptorch_opts.Training.accumulationReductionType(poptorch.ReductionType.Mean)
poptorch_opts.setAvailableMemoryProportion({
f'IPU{ipu_id}': opt.memory_proportion for ipu_id in range(len(opt.pipeline_splits))
})
if opt.half_partial:
poptorch_opts.Popart.set("partialsTypeMatMuls", "half")
poptorch_opts.Popart.set("convolutionOptions", {'partialsType': 'half'})
if opt.enable_pipeline_recompute and len(opt.pipeline_splits) > 0:
poptorch_opts.Popart.set("autoRecomputation", int(popart.RecomputationType.Pipeline))
strategy = poptorch.ParallelPhasedExecution(
*[poptorch.Phase([str(ipu_id)]) for ipu_id in range(len(opt.pipeline_splits) + 1)]
)
for ipu_id in range(len(opt.pipeline_splits) + 1):
strategy.phase(ipu_id).ipus(ipu_id)
poptorch_opts.setExecutionStrategy(strategy)
poptorch_opts.TensorLocations.setWeightLocation(
poptorch.TensorLocationSettings().useOnChipStorage(False)
)
poptorch_opts.TensorLocations.setAccumulatorLocation(
poptorch.TensorLocationSettings().useOnChipStorage(False)
)
poptorch_opts.TensorLocations.setOptimizerLocation(
poptorch.TensorLocationSettings().useOnChipStorage(False)
)
poptorch_opts.TensorLocations.setActivationLocation(
poptorch.TensorLocationSettings().useOnChipStorage(False)
)
# build data loader
train_loader = set_loader(opt, poptorch_opts)
# build model and criterion
model = set_model(opt)
# build optimizer
optimizer = set_optimizer(opt, model)
# poptorch wrapper
poptorch_model = poptorch.trainingModel(model, options=poptorch_opts, optimizer=optimizer)
# tensorboard
logger = tb_logger.Logger(logdir=opt.tb_folder, flush_secs=2)
# training routine
for epoch in range(1, opt.epochs + 1):
if opt.profiling and epoch > 1:
break
adjust_learning_rate(opt, optimizer, epoch)
poptorch_model.setOptimizer(optimizer)
# train for one epoch
time1 = time.time()
loss = train(train_loader, poptorch_model, optimizer, epoch, opt)
time2 = time.time()
print('epoch {}, total time {:.2f}'.format(epoch, time2 - time1))
# tensorboard logger
logger.log_value('loss', loss, epoch)
logger.log_value('learning_rate', optimizer.param_groups[0]['lr'], epoch)
if epoch % opt.save_freq == 0:
save_file = os.path.join(
opt.save_folder, 'ckpt_epoch_{epoch}.pth'.format(epoch=epoch))
poptorch_model.copyWeightsToHost()
save_model(model.model, optimizer, opt, epoch, save_file)
# save the last model
save_file = os.path.join(
opt.save_folder, 'last.pth')
poptorch_model.copyWeightsToHost()
save_model(model.model, optimizer, opt, opt.epochs, save_file)
if __name__ == '__main__':
main()
``` |
{
"source": "jiwoongim/ft-SNE",
"score": 2
} |
#### File: jiwoongim/ft-SNE/core.py
```python
import os, sys
import theano.tensor as T
import theano
import numpy as np
from utils import dist2hy
import theano.sandbox.rng_mrg as RNG_MRG
import theano.tensor.shared_randomstreams as RNG_TRG
from theano.tensor.shared_randomstreams import RandomStreams
RNG = np.random.RandomState(0)
MRG = RNG_MRG.MRG_RandomStreams(RNG.randint(2 ** 30))
TRG = RNG_TRG.RandomStreams(seed=1234)
epsilon = 1e-6
floath = np.float32
def sqeuclidean_var(X):
N = X.shape[0]
ss = (X ** 2).sum(axis=1)
return ss.reshape((N, 1)) + ss.reshape((1, N)) - 2*X.dot(X.T)
def discrete_sample(preds, num_sam, temperature=1.0):
# function to sample an index from a probability array
probas = TRG.choice(a=np.arange(3), size=[num_sam,], p=preds)
return np.argmax(probas, axis=1)
def euclidean2_np(X):
N = X.shape[0]
ss = np.sum(X**2, axis=1)
dist = np.reshape(ss, [N, 1]) + np.reshape(ss, [1, N]) - 2*np.dot(X, X.T)
dist = dist * np.asarray(dist>0,'float32')
return dist
def p_Xp_given_X_np(X, sigma, metric, approxF=0):
N = X.shape[0]
if metric == 'euclidean':
sqdistance = euclidean2_np(X)
elif metric == 'precomputed':
sqdistance = X**2
else:
raise Exception('Invalid metric')
euc_dist = np.exp(-sqdistance / (np.reshape(2*(sigma**2), [N, 1])))
np.fill_diagonal(euc_dist, 0.0 )
if approxF > 0:
sorted_euc_dist = euc_dist[:,:]
np.sort(sorted_euc_dist, axis=1)
row_sum = np.reshape(np.sum(sorted_euc_dist[:,1:approxF+1], axis=1), [N, 1])
else:
row_sum = np.reshape(np.sum(euc_dist, axis=1), [N, 1])
return euc_dist/row_sum # Possibly dangerous
def p_Xp_given_X_var(X, sigma, metric):
N = X.shape[0]
if metric == 'euclidean':
sqdistance = sqeuclidean_var(X)
elif metric == 'precomputed':
sqdistance = X**2
else:
raise Exception('Invalid metric')
esqdistance = T.exp(-sqdistance / ((2 * (sigma**2)).reshape((N, 1))))
esqdistance_zd = T.fill_diagonal(esqdistance, 0)
row_sum = T.sum(esqdistance_zd, axis=1).reshape((N, 1))
return esqdistance_zd/row_sum
def p_Xp_X_var(p_Xp_given_X):
return (p_Xp_given_X + p_Xp_given_X.T) / 2.0
def p_Yp_Y_var(Y):
N = Y.shape[0]
sqdistance = sqeuclidean_var(Y)
one_over = T.fill_diagonal(1/(sqdistance + 1), 0)
p_Yp_given_Y = one_over/one_over.sum(axis=1).reshape((N, 1))
return p_Yp_given_Y
def p_Yp_Y_var_np(Y):
N = Y.shape[0]
sqdistance = euclidean2_np(Y)
one_over = 1./(sqdistance + 1)
p_Yp_given_Y = one_over/one_over.sum(axis=1).reshape((N, 1))
return p_Yp_given_Y
def kl_cost_var(X, Y, sigma, metric):
p_Xp_given_X = p_Xp_given_X_var(X, sigma, metric)
PX = p_Xp_X_var(p_Xp_given_X)
PY = p_Yp_Y_var(Y)
PXc = T.maximum(PX, epsilon)
PYc = T.maximum(PY, epsilon)
return T.mean(T.sum(PX * T.log(PXc / PYc),-1))
def reverse_kl_cost_var(X, Y, sigma, metric):
p_Xp_given_X = p_Xp_given_X_var(X, sigma, metric)
PX = p_Xp_X_var(p_Xp_given_X)
PY = p_Yp_Y_var(Y)
PXc = T.maximum(PX, epsilon)
PYc = T.maximum(PY, epsilon)
return -T.mean(T.sum(PY * T.log(PXc / PYc),-1))
def js_cost_var(X, Y, sigma, metric):
return kl_cost_var(X, Y, sigma, metric) * 0.5 + \
reverse_kl_cost_var(X, Y, sigma, metric) * 0.5
def chi_square_cost_var(X, Y, sigma, metric):
p_Xp_given_X = p_Xp_given_X_var(X, sigma, metric)
PX = p_Xp_X_var(p_Xp_given_X)
PY = p_Yp_Y_var(Y)
PXc = T.maximum(PX, epsilon)
PYc = T.maximum(PY, epsilon)
return T.mean(T.sum(PY * (PXc / PYc - 1.)**2, -1))
def hellinger_cost_var(X, Y, sigma, metric):
p_Xp_given_X = p_Xp_given_X_var(X, sigma, metric)
PX = p_Xp_X_var(p_Xp_given_X)
PY = p_Yp_Y_var(Y)
PXc = T.maximum(PX, epsilon)
PYc = T.maximum(PY, epsilon)
return T.mean(T.sum(PY * (T.sqrt(PXc / PYc) - 1.)**2,-1))
def find_sigma(X_shared, sigma_shared, N, perplexity, sigma_iters,
metric, verbose=0):
"""Binary search on sigma for a given perplexity."""
X = T.fmatrix('X')
sigma = T.fvector('sigma')
target = np.log(perplexity)
P = T.maximum(p_Xp_given_X_var(X, sigma, metric), epsilon)
entropy = -T.sum(P*T.log(P), axis=1)
# Setting update for binary search interval
sigmin_shared = theano.shared(np.full(N, np.sqrt(epsilon), dtype=floath))
sigmax_shared = theano.shared(np.full(N, np.inf, dtype=floath))
sigmin = T.fvector('sigmin')
sigmax = T.fvector('sigmax')
upmin = T.switch(T.lt(entropy, target), sigma, sigmin)
upmax = T.switch(T.gt(entropy, target), sigma, sigmax)
givens = {X: X_shared, sigma: sigma_shared, sigmin: sigmin_shared,
sigmax: sigmax_shared}
updates = [(sigmin_shared, upmin), (sigmax_shared, upmax)]
update_intervals = theano.function([], entropy, givens=givens,
updates=updates)
# Setting update for sigma according to search interval
upsigma = T.switch(T.isinf(sigmax), sigma*2, (sigmin + sigmax)/2.)
givens = {sigma: sigma_shared, sigmin: sigmin_shared,
sigmax: sigmax_shared}
updates = [(sigma_shared, upsigma)]
update_sigma = theano.function([], sigma, givens=givens, updates=updates)
for i in range(sigma_iters):
e = update_intervals()
update_sigma()
if verbose:
print('Iteration: {0}.'.format(i+1))
print('Perplexities in [{0:.4f}, {1:.4f}].'.format(np.exp(e.min()),
np.exp(e.max())))
if np.any(np.isnan(np.exp(e))):
raise Exception('Invalid sigmas. The perplexity is probably too low.')
def find_sigma_np(X, sigma, N, perplexity, sigma_iters, metric, verbose=1, approxF=0):
"""Binary search on sigma for a given perplexity."""
target = np.log(perplexity)
# Setting update for binary search interval
sigmin = np.full(N, np.sqrt(epsilon), dtype='float32')
sigmax = np.full(N, np.inf, dtype='float32')
for i in range(sigma_iters):
P = np.maximum(p_Xp_given_X_np(X, sigma, metric, approxF), epsilon)
entropy = -np.sum(P*np.log(P), axis=1)
minind = np.argwhere(entropy < target).flatten()
maxind = np.argwhere(entropy > target).flatten()
sigmin[minind] = sigma[minind]
sigmax[maxind] = sigma[maxind]
infmask = np.argwhere(np.isinf(sigmax)).flatten()
old_sigma = sigma[infmask]
sigma = (sigmin + sigmax)/2.
sigma[infmask] = old_sigma*2
if verbose:
print('Iteration: {0}.'.format(i+1))
print('Perplexities in [{0:.4f}, {1:.4f}].'.format(np.exp(entropy.min()), np.exp(entropy.max())))
if np.any(np.isnan(np.exp(entropy))):
raise Exception('Invalid sigmas. The perplexity is probably too low.')
return sigma
if __name__ == '__main__':
asdf = discrete_sample(np.asarray([0.3,0.2,0.5]), 1000)
import pdb; pdb.set_trace()
```
#### File: jiwoongim/ft-SNE/main.py
```python
import os, sys, gzip, pickle, cPickle, argparse
import matplotlib
matplotlib.use('Agg')
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
from tsne import tsne
from utils import unpickle, plot_map
from utils_sne import precision_K, K_neighbours
from sklearn.decomposition import PCA
RNG = np.random.RandomState(0)
def parse_args():
desc = "Pytorch implementation of AAE collections"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--datatype', type=str, default='mnist', \
choices=['mnist','mnist1','face','news'],
help='The name of dataset')
parser.add_argument('--dataset_path', type=str, \
default='./data/',\
help='Dataset directory')
parser.add_argument('--divtypet', type=str, default='kl', \
choices=['kl','rkl','js','hl', 'ch'],
help='Choose your f-divergence')
parser.add_argument('--perplexity_tsne', type=int, default=100, \
help='Perplexity')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
divtypet = args.divtypet
dataset_path = args.dataset_path
perplexity_tsne = args.perplexity_tsne
if args.datatype == 'mnist':
dataset_path = dataset_path + '/mnist.pkl.gz'
f = gzip.open(dataset_path, 'rb')
train_set_np, valid_set_np, test_set_np = cPickle.load(f)
ind0 = np.argwhere(train_set_np[1] == 0).flatten()
ind1 = np.argwhere(train_set_np[1] == 1).flatten()
ind2 = np.argwhere(train_set_np[1] == 2).flatten()
ind3 = np.argwhere(train_set_np[1] == 4).flatten()
ind4 = np.argwhere(train_set_np[1] == 5).flatten()
ind = np.concatenate([ind0, ind1, ind2, ind3, ind4])
data = train_set_np[0][ind]
label= train_set_np[1][ind]
pca = PCA(n_components=30)
pcastr = 'pca30_5class'
data = pca.fit(data).transform(data)
perm = RNG.permutation(data.shape[0])
data = data [perm][:6000]
color= label[perm][:6000]
initial_momentum=0.5
n_epochs_tsne=2000;
if divtypet=='hl':
initial_lr_tsne=300
momentum_switch=200
lrDecay=100
elif divtypet=='ch':
initial_lr_tsne=10;
momentum_switch=200
lrDecay=100
elif divtypet=='rkl':
initial_lr_tsne=1000;
momentum_switch=200
lrDecay=100
elif divtypet=='js':
initial_lr_tsne=1000;
momentum_switch=200
lrDecay=100
else:
initial_lr_tsne=2500
momentum_switch=200
lrDecay=100
elif args.datatype == 'mnist1':
dataset_path = dataset_path + '/MNIST/mnist.pkl.gz'
f = gzip.open(dataset_path, 'rb')
train_set_np, valid_set_np, test_set_np = cPickle.load(f)
ind = np.argwhere(train_set_np[1] == 1).flatten()
data = train_set_np[0][ind]
label= train_set_np[1][ind]
pca = PCA(n_components=30)
pcastr = 'pca30_1class'
data = pca.fit(data).transform(data)
perm = RNG.permutation(data.shape[0])
data = data [perm][:5000]
color= label[perm][:5000]
initial_momentum=0.5; momentum_switch=200
n_epochs_tsne=200;
if divtypet=='hl':
initial_lr_tsne=300
lrDecay=100
elif divtypet=='ch':
initial_lr_tsne=5;
momentum_switch=1
lrDecay=100
elif divtypet=='rkl':
initial_lr_tsne=1000;
lrDecay=100
elif divtypet=='js':
initial_lr_tsne=1000;
lrDecay=100
else:
initial_lr_tsne=1000
lrDecay=100
elif args.datatype == 'face':
import scipy.io as sio
mat_contents = sio.loadmat(dataset_path+'/face_data.mat')
data = mat_contents['images'].T
light = (mat_contents['lights'].T - mat_contents['lights'].T.min()) / mat_contents['lights'].T.max()
poses = (mat_contents['poses'].T - mat_contents['poses'].T.min()) / (mat_contents['poses'].T.max() - mat_contents['poses'].T.min())
color = poses[:,0]
n_epochs_tsne=1000;
pcastr = 'pose1'
if divtypet=='hl':
initial_momentum=0.5
initial_lr_tsne=100
momentum_switch=100
lrDecay=10.0
elif divtypet=='ch':
initial_momentum=0.5
initial_lr_tsne=100
momentum_switch=100
lrDecay=10
elif divtypet=='rkl':
initial_momentum=0.5
initial_lr_tsne=1000;
momentum_switch=25
lrDecay=50
elif divtypet=='js':
initial_momentum=0.5
initial_lr_tsne=1000;
momentum_switch=200
lrDecay=100
else:
initial_momentum=0.5
initial_lr_tsne=1000
momentum_switch=200
lrDecay=100
elif args.datatype == 'news':
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
categories = ['rec.autos', 'rec.motorcycles', 'rec.sport.baseball', 'rec.sport.hockey', \
'sci.crypt', 'sci.electronics', 'sci.med', 'sci.space', 'soc.religion.christian', \
'talk.politics.guns', 'talk.politics.mideast', 'talk.politics.misc', 'talk.religion.misc']
newsgroups_train = fetch_20newsgroups(subset='train', categories=categories)
vectorizer = TfidfVectorizer()
data = vectorizer.fit_transform(newsgroups_train.data).todense().astype('float32')
color = newsgroups_train.target
pca = PCA(n_components=30)
pcastr = '_pca30_3hier'
data = pca.fit(data).transform(data)
data, color = data[:6000], color[:6000]
data = data / (data.max()-data.min())
n_epochs_tsne=300;
if divtypet=='hl':
initial_momentum=0.5
initial_lr_tsne=100
momentum_switch=200
lrDecay=5
elif divtypet=='ch':
initial_momentum=0.5
initial_lr_tsne=100
momentum_switch=200
lrDecay=100
elif divtypet=='rkl':
initial_momentum=0.5
initial_lr_tsne=1000
momentum_switch=100
lrDecay=25
elif divtypet=='js':
initial_momentum=0.5
initial_lr_tsne=3000;
momentum_switch=200
lrDecay=100
else:
initial_momentum=0.5
initial_lr_tsne=1500
momentum_switch=200
lrDecay=100
print 'Divtype %s, Perplexity %d' % (divtypet, perplexity_tsne)
fname = args.datatype+'/'+divtypet+'/tsne_'+str(perplexity_tsne)+'perp'+str(n_epochs_tsne)+'epoch_initlr'+str(initial_lr_tsne)+pcastr
projX = tsne(data,
initial_lr=initial_lr_tsne, \
final_lr=initial_lr_tsne,\
lrDecay=lrDecay,\
initial_momentum=initial_momentum,\
momentum_switch=momentum_switch,\
perplexity=perplexity_tsne, \
n_epochs=n_epochs_tsne, fname=fname, \
color=color, divtype=divtypet, datatype=args.datatype)
print(fname)
pass
```
#### File: jiwoongim/ft-SNE/utils_sne.py
```python
import os, sys, gzip, pickle, cPickle
import matplotlib
matplotlib.use('Agg')
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
from utils import unpickle
from core import p_Xp_given_X_np, p_Yp_Y_var_np
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def plot_map_news(xx, colors, color_dict, fname):
plt.figure()
ax = plt.subplot(111)
area = np.pi * 4 #* (15 * np.random.rand(N))**2 # 0 to 15 point radii
#jfor i, x in enumerate(xx):
#j plt.scatter(xx[i,0], xx[i,1], s=area, c=colors[i], alpha=0.5, cmap=plt.cm.Spectral)
for i, x in enumerate(xx):
plt.scatter(x[0], x[1], s=area, c=color_dict[colors[i]], alpha=0.7, facecolor='0.8', lw = 0)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 1., box.height])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.1),
fancybox=True, shadow=True, ncol=3)
plt.axis('off')
plt.savefig(fname, bbox_inches='tight', format='pdf')
def plot_map_c(xx, colors, fname):
plt.figure()
ax = plt.subplot(111)
area = np.pi * 4 #* (15 * np.random.rand(N))**2 # 0 to 15 point radii
#jfor i, x in enumerate(xx):
#j plt.scatter(xx[i,0], xx[i,1], s=area, c=colors[i], alpha=0.5, cmap=plt.cm.Spectral)
plt.scatter(xx[:,0], xx[:,1], s=area, c=colors, alpha=1.0, cmap=plt.cm.Spectral, \
facecolor='0.5', lw = 0)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 1., box.height])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.1),
fancybox=True, shadow=True, ncol=3)
plt.axis('off')
plt.savefig(fname, bbox_inches='tight', format='pdf')
def plot1D(xx, colors, fname):
plt.figure()
ax = plt.subplot(111)
area = np.pi * 5 #* (15 * np.random.rand(N))**2 # 0 to 15 point radii
#jfor i, x in enumerate(xx):
#j plt.scatter(xx[i,0], xx[i,1], s=area, c=colors[i], alpha=0.5, cmap=plt.cm.Spectral)
#plt.plot(xx, c=colorVal, alpha=0.9, lw = 0)
dummy = np.zeros_like(xx)
plt.scatter(xx, dummy, s=area, c=colors, alpha=0.9, cmap=plt.cm.Spectral, facecolor='0.5', lw = 0)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 1., box.height])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.1),
fancybox=True, shadow=True, ncol=3)
plt.savefig(fname, bbox_inches='tight', format='pdf')
def plot3D(xx, colors, fname):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
area = np.pi *5 #* (15 * np.random.rand(N))**2 # 0 to 15 point radii
ax.scatter(xx[:,0], xx[:,1], xx[:,2], c=colors, s=area, alpha=0.5, cmap=plt.cm.Spectral, \
facecolor='0.5', lw = 0)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 1., box.height])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.1),
fancybox=True, shadow=True, ncol=3)
plt.axis('off')
plt.savefig(fname, bbox_inches='tight', format='pdf', transparent=True)
def precision_K(p_sorted_ind, q_sorted_ind, Ks, K=3):
p_sorted_ind = p_sorted_ind[:, :K]
q_sorted_ind = q_sorted_ind[:, :K]
N = p_sorted_ind.shape[0]
accuracy = np.zeros((N,len(Ks)))
# For each point in x compute the distance of K points in P and Q
for j,kk in enumerate(Ks):
for i in xrange(N):
for k in xrange(kk):
ind_k = q_sorted_ind[i, k]
tmp_k = np.argwhere(ind_k == p_sorted_ind[i,:kk]).flatten()
if tmp_k.shape[0] > 0:
accuracy[i,j] += 1.0
# Count the number of correct indices
outputs = []
for jj in xrange(len(Ks)):
outputs += [[np.mean(accuracy[:,jj]), np.std(accuracy[:,jj])]]
return outputs
def K_neighbours(data, maxK=10, revF=False, sigma=None):
from utils import dist2hy_np
#dists = dist2hy_np(data, data)
if sigma is not None:
dists = p_Xp_given_X_np(data, sigma, 'euclidean')
else:
dists = p_Yp_Y_var_np(data)
N, _ = dists.shape
sorted_ind_p = np.zeros((N,maxK), dtype='int32')
for i in xrange(N):sorted_ind_p[i,:] = np.argsort(dists[i,:])[1:maxK+1]
if revF: sorted_ind_p = sorted_ind_p[:,::-1]
return sorted_ind_p, dists
def neighbour_accuracy_K(data, labels, Ks, maxK=10):
#from utils import dist2hy_np
#dists = dist2hy_np(data, data)
N, _ = data.shape
fractions = []
for i in xrange(N):
#ind_sort = np.argsort(dists[i,:])[1:maxK+1]
ind_sort = data[i,:]
label = labels[i]
neighbor_labels = labels[ind_sort]
fraction = np.asarray(neighbor_labels == label) * 1.0
fractions.append(fraction)
fractions = np.asarray(fractions)
output = []
for K in Ks:
output += [np.mean(np.sum(fractions[:,:K], axis=1) / K), \
np.std(np.sum(fractions[:,:K], axis=1) / K)]
return output
def get_iris_data():
data, label = [], []
f = open('/groups/branson/home/imd/Documents/data/embedding_data/iris.txt', 'r')
line = f.readline()
data.append(line[:-1])
label.append(line[-1])
while line.strip() != '':
line = f.readline()
data.append(line[:-1])
label.append(line[-1])
return np.asarray(data), np.asarrya(label)
``` |
{
"source": "jiwoongim/IMsML",
"score": 2
} |
#### File: code/model_kit/layer_norm.py
```python
import os, sys
import numpy as np
import tensorflow as tf
from utils.nn_utils import *
from utils.tf_utils import *
TINY = 1e-5
class Layer_Norm(object):
def __init__(self, D, M, name, numpy_rng):
self.W = initialize_weight(D, M, name, numpy_rng, 'uniform')
self.eta = theano.shared(np.ones((M,), dtype=theano.config.floatX), name='eta')
self.beta = theano.shared(np.zeros((M,), dtype=theano.config.floatX), name='beta')
self.params = [self.W, self.eta, self.beta]
def propagate(self, X, atype='sigmoid'):
H = self.pre_activation(X)
H = activation_fn_th(H, atype=atype)
return H
def pre_activation(self, X):
Z = self.post_batch_norm(X, testF=testF)
H = self.eta * Z + self.beta
return H
def post_batch_norm(self, X):
Z = T.dot(X, self.W)
mean = Z.mean(axis=-1)
std = Z.std( axis=-1)
Z = (Z - mean) / (std + TINY)
return Z
def layer_norm_fn(Z, beta, eta):
mean, var = tf.nn.moments(Z,axes=[1])
Z = (Z - tf.expand_dims(mean, 1)) / \
tf.sqrt(tf.expand_dims(var,1) + TINY)
H = tf.expand_dims(eta, 0) * Z + tf.expand_dims(beta, 0)
return H
```
#### File: code/model_kit/rez_block_layer.py
```python
import math
import tensorflow as tf
from model_kits.bn_layer import bn
from model_kits.conv_layer import conv2d
from utils.utils import base_name
from utils.nn_utils import init_weights, activation_fn
"This code is from https://github.com/ry/tensorflow-resnet/"
def stack(x, c):
for n in range(c['num_blocks']):
s = c['stack_stride'] if n == 0 else 1
c['block_stride'] = s
with tf.variable_scope('block%d' % (n + 1)):
x = block(x, c)
return x
def block(x, c, atype='relu'):
filters_in = x.get_shape()[-1]
# Note: filters_out isn't how many filters are outputed.
# That is the case when bottleneck=False but when bottleneck is
# True, filters_internal*4 filters are outputted. filters_internal is how many filters
# the 3x3 convs output internally.
m = 4 if c['bottleneck'] else 1
filters_out = m * c['block_filters_internal']
shortcut = x # branch 1
c['conv_filters_out'] = c['block_filters_internal']
if c['bottleneck']:
with tf.variable_scope('a'):
c['ksize'] = 1
c['stride'] = c['block_stride']
x = conv2d(x, c)
x = bn(x, c)
x = activation_fn(x, atype)
with tf.variable_scope('b'):
x = conv2d(x, c)
x = bn(x, c)
x = activation_fn(x, atype)
with tf.variable_scope('c'):
c['conv_filters_out'] = filters_out
c['ksize'] = 1
assert c['stride'] == 1
x = conv2d(x, c)
x = bn(x, c)
else:
with tf.variable_scope('A'):
c['stride'] = c['block_stride']
x = conv2d(x, c)
x = bn(x, c)
x = activation_fn(x, atype)
with tf.variable_scope('B'):
c['conv_filters_out'] = filters_out
#assert c['stride'] == 1
x = conv2d(x, c)
x = bn(x, c)
with tf.variable_scope('shortcut'):
if filters_out != filters_in or c['block_stride'] != 1:
c['ksize'] = 1
c['stride'] = c['block_stride']
c['conv_filters_out'] = filters_out
shortcut = conv2d(shortcut, c)
shortcut = bn(shortcut, c)
return activation_fn(x + shortcut, atype)
```
#### File: code/utils/img_utils.py
```python
import os, sys, math, time, inspect, zipfile, urllib
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from skimage.transform import resize as imresize
import matplotlib
matplotlib.use('Agg')
plt.style.use('ggplot')
def resize_image(img, ptype='vgg'):
# Note that in the lecture, I used a slightly different inception
# model, and this one requires us to subtract the mean from the input image.
# The preprocess function will also crop/resize the image to 299x299
if ptype == 'vgg':
img = vgg_preprocess(og)
elif ptype == 'inception':
img = inception_preprocess(og)
print(og.shape)
print(img.shape)
return img
def inception_preprocess(img, crop=True, resize=True, dsize=(299, 299)):
if img.dtype != np.uint8:
img *= 255.0
if crop:
crop = np.min(img.shape[:2])
r = (img.shape[0] - crop) // 2
c = (img.shape[1] - crop) // 2
cropped = img[r: r + crop, c: c + crop]
else:
cropped = img
if resize:
rsz = imresize(cropped, dsize, preserve_range=True)
else:
rsz = cropped
if rsz.ndim == 2:
rsz = rsz[..., np.newaxis]
rsz = rsz.astype(np.float32)
# subtract imagenet mean
return (rsz - 117)
def inception_deprocess(img):
return np.clip(img + 117, 0, 255).astype(np.uint8)
def vgg_preprocess(img, crop=True, resize=True, dsize=(224, 224)):
if img.dtype == np.uint8:
img = img / 255.0
if crop:
short_edge = min(img.shape[:2])
yy = int((img.shape[0] - short_edge) / 2)
xx = int((img.shape[1] - short_edge) / 2)
crop_img = img[yy: yy + short_edge, xx: xx + short_edge]
else:
crop_img = img
if resize:
norm_img = imresize(crop_img, dsize, preserve_range=True)
else:
norm_img = crop_img
return (norm_img).astype(np.float32)
def vgg_deprocess(img):
return np.clip(img * 255, 0, 255).astype(np.uint8)
# return ((img / np.max(np.abs(img))) * 127.5 +
# 127.5).astype(np.uint8)
"""Utilities used in the Kadenze Academy Course on Deep Learning w/ Tensorflow.
Creative Applications of Deep Learning w/ Tensorflow.
Kadenze, Inc.
<NAME>
Copyright <NAME>, June 2016.
"""
def imcrop_tosquare(img):
"""Make any image a square image.
Parameters
----------
img : np.ndarray
Input image to crop, assumed at least 2d.
Returns
-------
crop : np.ndarray
Cropped image.
"""
size = np.min(img.shape[:2])
extra = img.shape[:2] - size
crop = img
for i in np.flatnonzero(extra):
crop = np.take(crop, extra[i] // 2 + np.r_[:size], axis=i)
return crop
def montage(images, saveto='montage.png'):
"""Draw all images as a montage separated by 1 pixel borders.
Also saves the file to the destination specified by `saveto`.
Parameters
----------
images : numpy.ndarray
Input array to create montage of. Array should be:
batch x height x width x channels.
saveto : str
Location to save the resulting montage image.
Returns
-------
m : numpy.ndarray
Montage image.
"""
if isinstance(images, list):
images = np.array(images)
img_h = images.shape[1]
img_w = images.shape[2]
n_plots = int(np.ceil(np.sqrt(images.shape[0])))
if len(images.shape) == 4 and images.shape[3] == 3:
m = np.ones(
(images.shape[1] * n_plots + n_plots + 1,
images.shape[2] * n_plots + n_plots + 1, 3)) * 0.5
else:
m = np.ones(
(images.shape[1] * n_plots + n_plots + 1,
images.shape[2] * n_plots + n_plots + 1)) * 0.5
for i in range(n_plots):
for j in range(n_plots):
this_filter = i * n_plots + j
if this_filter < images.shape[0]:
this_img = images[this_filter]
m[1 + i + i * img_h:1 + i + (i + 1) * img_h,
1 + j + j * img_w:1 + j + (j + 1) * img_w] = this_img
plt.imsave(arr=m, fname=saveto)
return m
def gauss(mean, stddev, ksize):
"""Uses Tensorflow to compute a Gaussian Kernel.
Parameters
----------
mean : float
Mean of the Gaussian (e.g. 0.0).
stddev : float
Standard Deviation of the Gaussian (e.g. 1.0).
ksize : int
Size of kernel (e.g. 16).
Returns
-------
kernel : np.ndarray
Computed Gaussian Kernel using Tensorflow.
"""
g = tf.Graph()
with tf.Session(graph=g):
x = tf.linspace(-3.0, 3.0, ksize)
z = (tf.exp(tf.neg(tf.pow(x - mean, 2.0) /
(2.0 * tf.pow(stddev, 2.0)))) *
(1.0 / (stddev * tf.sqrt(2.0 * 3.1415))))
return z.eval()
def gauss2d(mean, stddev, ksize):
"""Uses Tensorflow to compute a 2D Gaussian Kernel.
Parameters
----------
mean : float
Mean of the Gaussian (e.g. 0.0).
stddev : float
Standard Deviation of the Gaussian (e.g. 1.0).
ksize : int
Size of kernel (e.g. 16).
Returns
-------
kernel : np.ndarray
Computed 2D Gaussian Kernel using Tensorflow.
"""
z = gauss(mean, stddev, ksize)
g = tf.Graph()
with tf.Session(graph=g):
z_2d = tf.matmul(tf.reshape(z, [ksize, 1]), tf.reshape(z, [1, ksize]))
return z_2d.eval()
def convolve(img, kernel):
"""Uses Tensorflow to convolve a 4D image with a 4D kernel.
Parameters
----------
img : np.ndarray
4-dimensional image shaped N x H x W x C
kernel : np.ndarray
4-dimensional image shape K_H, K_W, C_I, C_O corresponding to the
kernel's height and width, the number of input channels, and the
number of output channels. Note that C_I should = C.
Returns
-------
result : np.ndarray
Convolved result.
"""
g = tf.Graph()
with tf.Session(graph=g):
convolved = tf.nn.conv2d(img, kernel, strides=[1, 1, 1, 1], padding='SAME')
res = convolved.eval()
return res
def gabor(ksize=32):
"""Uses Tensorflow to compute a 2D Gabor Kernel.
Parameters
----------
ksize : int, optional
Size of kernel.
Returns
-------
gabor : np.ndarray
Gabor kernel with ksize x ksize dimensions.
"""
g = tf.Graph()
with tf.Session(graph=g):
z_2d = gauss2d(0.0, 1.0, ksize)
ones = tf.ones((1, ksize))
ys = tf.sin(tf.linspace(-3.0, 3.0, ksize))
ys = tf.reshape(ys, [ksize, 1])
wave = tf.matmul(ys, ones)
gabor = tf.mul(wave, z_2d)
return gabor.eval()
"""Utilities used in the Kadenze Academy Course on Deep Learning w/ Tensorflow.
Creative Applications of Deep Learning w/ Tensorflow.
Kadenze, Inc.
<NAME>
Copyright <NAME>, June 2016.
"""
def get_celeb_files(datapath='/shared/imd/data/img_align_celeba/'):
"""Downloads the first 100 images of the celeb dataset.
Files will be placed in a directory 'img_align_celeba' if one
doesn't exist.
Returns
-------
files : list of strings
Locations to the first 100 images of the celeb net dataset.
"""
# Create a directory
if not os.path.exists(datapath):
os.mkdir(datapath)
# Now perform the following 100 times:
for img_i in range(1, 101):
# create a string using the current loop counter
f = '000%03d.jpg' % img_i
if os.path.exists(datapath+f):
continue
# and get the url with that string appended the end
url = 'https://s3.amazonaws.com/cadl/celeb-align/' + f
# We'll print this out to the console so we can see how far we've gone
print(url), # end='\r')
# And now download the url to a location inside our new directory
urllib.urlretrieve(url, os.path.join(datapath, f))
files = [os.path.join(datapath, file_i)
for file_i in os.listdir(datapath)
if '.jpg' in file_i]
return files
def get_celeb_imgs():
"""Loads the first 100 images of the celeb dataset.
Returns
-------
imgs : list of np.ndarray
List of the first 100 images from the celeb dataset
"""
return [plt.imread(f_i) for f_i in get_celeb_files()]
```
#### File: utils/preprocessors/sanitizer.py
```python
from abc_processor import Base
import re
# this will filter out data to the regexp input into config.
class Processor(Base):
def run(self, data):
temp = re.sub(self.config, "", data)
return temp
```
#### File: code/utils/processor_chain.py
```python
class ProcessorChain():
def __init__(self):
self.processors = []
def load(self, processor):
self.processors.append(processor)
def run(self, data):
temp = data
for processor in self.processors:
temp = processor.run(data)
return temp
``` |
{
"source": "jixer/mpg-prediction-api",
"score": 3
} |
#### File: src/watcher/WatchdogHandler.py
```python
from watchdog.events import FileSystemEventHandler
import os
from data import model
class WatchdogHandler(FileSystemEventHandler):
def on_created(self, event):
"""
event.event_type
'modified' | 'created' | 'moved' | 'deleted'
event.is_directory
True | False
event.src_path
path/to/observed/file
"""
model.train_model(event.src_path)
os.remove(event.src_path)
print(event.src_path)
``` |
{
"source": "jixiaojie/CarND-Advanced-Lane-Lines",
"score": 3
} |
#### File: jixiaojie/CarND-Advanced-Lane-Lines/functions.py
```python
import matplotlib.image as mpimg
import numpy as np
import cv2
import os
#debug mode
debug = False
def calibrateCam():
#Get the images' name
images = os.listdir("camera_cal/")
objpoints = [] # original points
imgpoints = [] # img points
w = 9 # widht of chessboard corners
h = 6 # height of chessboard corners
#Get objpoints
objp = np.zeros((w*h,3), np.float32)
num = 0
for h1 in range(h):
for w1 in range(w):
objp[num] = [w1, h1, 0]
num += 1
#Get calibrate parameters
for fname in images:
img = mpimg.imread('camera_cal/' + fname)
gray = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
size = gray.shape[::-1]
ret, corners = cv2.findChessboardCorners(gray, (w,h), None)
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, size, None, None)
#return calibrate parameters
return ret, mtx, dist, rvecs, tvecs
def abs_sobel_thresh(img, orient='x', sobel_kernel = 3, thresh = (0, 255)):
# Apply the following steps to img
# 1) Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 2) Take the derivative in x or y given orient = 'x' or 'y'
sobel = None
if orient == 'x':
sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize = sobel_kernel)
else:
sobel = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize = sobel_kernel)
# 3) Take the absolute value of the derivative or gradient
abs_sobel = np.absolute(sobel)
# 4) Scale to 8-bit (0 - 255) then convert to type = np.uint8
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
# 5) Create a mask of 1's where the scaled gradient magnitude
# is > thresh_min and < thresh_max
binary_output = np.zeros_like(scaled_sobel)
binary_output[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1
# 6) Return this mask as your binary_output image
return binary_output
def mag_thresh(img, sobel_kernel=3, mag_thresh=(0, 255)):
# Apply the following steps to img
# 1) Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 2) Take the gradient in x and y separately
sobel_x = cv2.Sobel(gray, cv2.CV_64F, 1, 0)
sobel_y = cv2.Sobel(gray, cv2.CV_64F, 0, 1)
# 3) Calculate the magnitude
abs_sobel = np.sqrt(sobel_x ** 2 + sobel_y ** 2)
# 4) Scale to 8-bit (0 - 255) and convert to type = np.uint8
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
# 5) Create a binary mask where mag thresholds are met
binary_output = np.zeros_like(scaled_sobel)
binary_output[(scaled_sobel >= mag_thresh[0]) & (scaled_sobel <= mag_thresh[1])] = 1
# 6) Return this mask as your binary_output image
return binary_output
def dir_thresh(img, sobel_kernel=3, thresh=(0, np.pi/2)):
# 1) Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 2) Take the gradient in x and y separately
sobel_x = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize = sobel_kernel)
sobel_y = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize = sobel_kernel)
# 3) Take the absolute value of the x and y gradients
abs_sobelx = np.absolute(sobel_x)
abs_sobely = np.absolute(sobel_y)
# 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradient
dir_sobel = np.arctan2(abs_sobely, abs_sobelx)
# 5) Create a binary mask where direction thresholds are met
binary_output = np.uint8(np.zeros_like(dir_sobel))
binary_output[(dir_sobel > thresh[0]) & (dir_sobel < thresh[1])] = 1
# 6) Return this mask as your binary_output image
return binary_output
def hsv_filter(img):
#Copy the img
imgtemp = np.copy(img)
#Blur the img
blur = cv2.GaussianBlur(imgtemp,(5,5),5)
#Get hsv values from the img
hsv = cv2.cvtColor(blur, cv2.COLOR_RGB2HSV)
h = hsv[:,:,0]
s = hsv[:,:,1]
v = hsv[:,:,2]
#Create a binary mask
mask = np.uint8(np.zeros_like(h))
mask[(h > 11) & (h < 34) & (s > 100) & (s < 255) & (v > 150) & (v < 255)] = 1 #yellow
mask[(h > 0) & (h < 180) & (s > 0) & (s < 30) & (v > 220) & (v <= 255)] = 1 #white
#Crop the mask
mask[:mask.shape[0] // 2, :] = 0
mask[670:, :] = 0
if debug:
mpimg.imsave('output_images/mask.jpg', mask, cmap = 'gray')
mpimg.imsave('output_images/hsv_h.jpg', h, cmap = 'gray')
mpimg.imsave('output_images/hsv_s.jpg', s, cmap = 'gray')
mpimg.imsave('output_images/hsv_v.jpg', v, cmap = 'gray')
return mask
def rgb_filter(img):
#Copy the img
imgtemp = np.copy(img)
#Get rgb values from img
r = imgtemp[:,:,0]
g = imgtemp[:,:,1]
b = imgtemp[:,:,2]
#Create a binary mask
mask = np.uint8(np.zeros_like(r))
mask[(r >= 180) & (r <= 255) & (b >= 30) & (b <= 120) & ((g < 120) | (g > 180))] = 1 # yellow
mask[(r >= 230) & (r <= 255) & (g >=230) & (g <= 255) & (b >= 230) & (b <= 255)] = 1 #white
mask[:mask.shape[0] // 2, :] = 0
mask[670:, :] = 0
if debug:
mpimg.imsave('output_images/rgb_r.jpg', r, cmap = 'gray')
mpimg.imsave('output_images/rgb_g.jpg', g, cmap = 'gray')
mpimg.imsave('output_images/rgb_b.jpg', b, cmap = 'gray')
#mpimg.imsave('output_images/rgb_mask.jpg', mask, cmap = 'gray')
return mask
def get_perspective(grayimg, src, dst):
# For destination points, I'm arbitrarily choosing some points to be
# a nice fit for displaying our warped result
# again, not exact, but close enough for our purposes
# Given src and dst points, calculate the perspective transform matrix
M = cv2.getPerspectiveTransform(src, dst)
# Warp the image using OpenCV warpPerspective()
img_size = (grayimg.shape[1], grayimg.shape[0])
warped = cv2.warpPerspective(grayimg, M, img_size)
return warped
#class Line():
# def __init__(self):
# # was the line detected in the last iteration?
# self.detected = False
# # x values of the last n fits of the line
# self.recent_xfitted = []
# #average x values of the fitted line over the last n iterations
# self.bestx = None
# #polynomial coefficients averaged over the last n iterations
# self.best_fit = None
# #polynomial coefficients for the most recent fit
# self.current_fit = [np.array([False])]
# #radius of curvature of the line in some units
# self.radius_of_curvature = None
# #distance in meters of vehicle center from the line
# self.line_base_pos = None
# #difference in fit coefficients between last and new fits
# self.diffs = np.array([0,0,0], dtype='float')
# #x values for detected line pixels
# self.allx = None
# #y values for detected line pixels
# self.ally = None
``` |
{
"source": "Ji-Xin/bovina",
"score": 2
} |
#### File: bovina/common/evaluate.py
```python
from common.evaluators.diff_token_evaluator import DiffTokenEvaluator
from common.evaluators.paired_token_evaluator import PairedTokenEvaluator
class EvaluatorFactory(object):
"""
Get the corresponding Evaluator class for a particular dataset.
"""
evaluator_map = {
'ApacheDiffToken': DiffTokenEvaluator,
'ApachePairedToken': PairedTokenEvaluator,
'SpringDiffToken': DiffTokenEvaluator
}
@staticmethod
def get_evaluator(dataset_cls, model, embedding, data_loader, batch_size, device, keep_results=False):
if data_loader is None:
return None
if dataset_cls.NAME not in EvaluatorFactory.evaluator_map:
raise ValueError('{} is not implemented.'.format(dataset_cls))
return EvaluatorFactory.evaluator_map[dataset_cls.NAME](
dataset_cls, model, embedding, data_loader, batch_size, device, keep_results
)
```
#### File: embeddings/code2vec/model.py
```python
import math
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
import torch.nn.functional as F
NINF = - 3.4 * math.pow(10, 38) # -Inf
class Code2Vec(nn.Module):
"""the code2vec model"""
def __init__(self, option):
super(Code2Vec, self).__init__()
self.option = option
self.terminal_embedding = nn.Embedding(
option.terminal_count, option.terminal_embed_size)
self.path_embedding = nn.Embedding(
option.path_count, option.path_embed_size)
self.input_linear = nn.Linear(
option.terminal_embed_size * 2 + option.path_embed_size,
option.encode_size, bias=False)
self.input_layer_norm = nn.LayerNorm(option.encode_size)
if 0.0 < option.dropout_prob < 1.0:
self.input_dropout = nn.Dropout(p=option.dropout_prob)
else:
self.input_dropout = None
self.attention_parameter = Parameter(
torch.nn.init.xavier_normal_(
torch.zeros(option.encode_size, 1, dtype=torch.float32,
requires_grad=True)
).view(-1), requires_grad=True)
self.output_linear = nn.Linear(
option.encode_size*2, option.label_count, bias=True)
self.output_linear.bias.data.fill_(0.0)
def forward(self, starts_prev, paths_prev, ends_prev,
starts_curr, paths_curr, ends_curr):
option = self.option
def get_code_vector(starts, paths, ends):
# embedding
embed_starts = self.terminal_embedding(starts)
embed_paths = self.path_embedding(paths)
embed_ends = self.terminal_embedding(ends)
combined_context_vectors = torch.cat(
(embed_starts, embed_paths, embed_ends), dim=2)
# FNN, Layer Normalization, tanh
combined_context_vectors = self.input_linear(combined_context_vectors)
ccv_size = combined_context_vectors.size()
combined_context_vectors = self.input_layer_norm(
combined_context_vectors.view(-1, option.encode_size)).view(ccv_size)
combined_context_vectors = torch.tanh(combined_context_vectors)
# dropout
if self.input_dropout is not None:
combined_context_vectors = self.input_dropout(combined_context_vectors)
# attention
attn_mask = (starts > 0).float()
attention = self.get_attention(combined_context_vectors, attn_mask)
# code vector
expanded_attn = attention.unsqueeze(-1).expand_as(combined_context_vectors)
code_vector = torch.sum(
torch.mul(combined_context_vectors, expanded_attn), dim=1)
return code_vector
code_vector_prev = get_code_vector(starts_prev, paths_prev, ends_prev)
code_vector_curr = get_code_vector(starts_curr, paths_curr, ends_curr)
code_vector = torch.cat([code_vector_prev, code_vector_curr], 1)
# FNN
outputs = self.output_linear(code_vector)
# if opt.training and opt.dropout_prob < 1.0:
# outputs = F.dropout(outputs, p=opt.dropout_prob, training=opt.training)
return outputs, code_vector, 0 #attention
def get_attention(self, vectors, mask):
"""calculate the attention of the (masked) context vetors. mask=1: meaningful value, mask=0: padded."""
expanded_attn_param = self.attention_parameter.unsqueeze(0).expand_as(vectors)
attn_ca = torch.mul(torch.sum(vectors * expanded_attn_param, dim=2), mask) \
+ (1 - mask) * NINF
# attn_ca = torch.sum(vectors * expanded_attn_param, dim=2)
# attn_ca[mask == 0] = NINF
attention = F.softmax(attn_ca, dim=1)
# expanded_attn_param = self.attention_parameter.unsqueeze(0).expand_as(vectors)
# attn_ca = torch.mul(torch.sum(vectors * expanded_attn_param, dim=2), mask)
# attn_max, _ = torch.max(attn_ca, dim=1, keepdim=True)
# attn_exp = torch.mul(torch.exp(attn_ca - attn_max), mask)
# attn_sum = torch.sum(attn_exp, dim=1, keepdim=True)
# attention = torch.div(attn_exp, attn_sum.expand_as(attn_exp) + eps)
return attention
```
#### File: diff_token/reg_cnn/args.py
```python
import os
import models.diff_token.args
def get_args():
parser = models.diff_token.args.get_args()
parser.add_argument('--dataset', type=str, default='ApacheDiffToken', choices=['ApacheDiffToken', 'SpringDiffToken'])
parser.add_argument('--mode', type=str, default='multichannel', choices=['rand', 'static', 'non-static', 'multichannel'])
parser.add_argument('--output-channel', type=int, default=100)
parser.add_argument('--words-dim', type=int, default=300)
parser.add_argument('--embed-dim', type=int, default=300)
parser.add_argument('--epoch-decay', type=int, default=15)
parser.add_argument('--weight-decay', type=float, default=0)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--dropblock', type=float, default=0.0)
parser.add_argument('--dropblock-size', type=int, default=7)
parser.add_argument('--beta-ema', type=float, default=0, help="for temporal averaging")
parser.add_argument('--embed-droprate', type=float, default=0.0, help="for embedded dropout")
parser.add_argument('--batchnorm', action='store_true')
parser.add_argument('--attention', action='store_true')
parser.add_argument('--dynamic-pool', action='store_true')
parser.add_argument('--dynamic-pool-length', type=int, default=8)
parser.add_argument('--bottleneck-layer', action='store_true')
parser.add_argument('--bottleneck-units', type=int, default=100)
parser.add_argument('--word-vectors-dir', default=os.path.join(os.pardir, 'omniocular-data', 'embeddings'))
parser.add_argument('--word-vectors-file', default='java1k_size300_min10.txt')
parser.add_argument('--save-path', type=str, default=os.path.join('models', 'diff_token', 'reg_cnn', 'saves'))
parser.add_argument('--resume-snapshot', type=str)
parser.add_argument('--trained-model', type=str)
args = parser.parse_args()
return args
``` |
{
"source": "jixintong813/2021-march-bootcamp",
"score": 5
} |
#### File: week2-python/assignment2/Assignment2_xintong.py
```python
def is_prime(n:int)->bool:
if n <= 1 and isinstance(n, int):
return False
for i in range (2,n):
if n% i== 0:
return False
else:
return True
# DO NOT ALTER BELOW.
assert is_prime(2)
assert not is_prime(15)
assert is_prime(7907)
assert not is_prime(-1)
assert not is_prime(0)
# In[ ]:
# Q2 Write a function rotate(ar[], d) that rotates arr[] of size n by d elements.
# Input ar = [1,2,3,4,5,6,7], d = 2
# Output [3,4,5,6,7,1,2]
def rotate(ar:[int], d:int)->list:
if d == 0:
return ar
else:
ar[:] = ar[-d: len(ar)] + ar[0: -d]
return ar
# DO NOT ALTER BELOW.
assert rotate([1,2,3,4,5,6,7], 4) == [3,4,5,6,7,1,2]
assert rotate([1,2,3], 4) == [2,3,1]
# In[ ]:
#Q3 Selection sort - implement a workable selection sort algorithm
# https://www.runoob.com/w3cnote/selection-sort.html 作为参考
# Input students would be a list of [student #, score], sort by score ascending order.
def selection_sort(arr:[[int]])->list:
b=sorted(arr,key=lambda x:x[1])
return b
# DO NOT ALTER BELOW.
assert selection_sort([]) == []
assert selection_sort([[1, 100], [2, 70], [3, 95], [4, 66], [5, 98]]) == [[4, 66], [2, 70], [3, 95], [5, 98], [1, 100]]
# In[ ]:
# Q4. Convert a list of Tuples into Dictionary
# tips: copy operation - copy by value, copy by reference
# 123, String, immutable (copy by value)
# mutable, list, dict... (copy by reference)
def convert(tup:(any), di:{any,any}) -> None:
for i in range(0,len(tup),2):
di[tup[i]] = tup[i+1]
pass
# DO NOT ALTER BELOW.
expected_dict = {}
convert((), expected_dict)
assert expected_dict == {}
convert(('key1', 'val1', 'key2', 'val2'), expected_dict)
assert expected_dict == {'key1': 'val1', 'key2': 'val2'}
# In[ ]:
# Q5. 研究为什么 Python dict 可以做到常数级别的查找效率,将答案写在 Assignment2-{firstname}.ipynb
dict对象的存储结构采用的是散列表(哈希表),哈希表是一个用于存储Key-Value键值对的集合,使用哈希表可以进行非常快速的查找操作,查找时间为常数,同时不需要元素排列有序。
```
#### File: week3-numpy_pandas/assignment3/assignment3-ruoxi.py
```python
"""
交易传输指令经常需要验证完整性,比如以下的例子
{
request :
{
order# : 1,
Execution_details: ['a', 'b', 'c'],
request_time: "2020-10-10T10:00EDT"
},
checksum:1440,
...
}
可以通过很多种方式验证完整性,假设我们通过判断整个文本中的括号 比如 '{}', '[]', '()' 来判断下单是否为有效的。
比如 {{[],[]}}是有效的,然而 []{[}](是无效的。
写一个python 程序来进行验证。
d = {'}': '{', ']': '[', ')': '(', '>': '<'}
l,r = d.values(), d.keys()
def checkOrders(orders: str) -> bool:
arr = []
for c in orders:
if c in l:
arr.append(c)
elif c in r:
if arr and arr[-1] == d[c]:
arr.pop()
return arr == []
def checkList(list:[str]) -> [bool]:
n = []
for i in list:
n.append(checkOrders(i))
return n
assert checkList(["()", "(", "{}[]", "[][][]", "[{]{]"]) == [True, False, True, True, False]
"""
# Q3
"""
我们在进行交易的时候通常会选择一家broker公司而不是直接与交易所交易。
假设我们有20家broker公司可以选择 (broker id is 0...19),通过一段时间的下单表现(完成交易的时间),我们希望找到最慢的broker公司并且考虑与其解除合约。
我们用简单的数据结构表达broker公司和下单时间: [[broker id, 此时秒数]]
[[0, 2], [1, 5], [2, 7], [0, 16], [3, 19], [4, 25], [2, 35]]
解读:
Broker 0 使用了0s - 2s = 2s
Broker 1 使用了5 - 2 = 3s
Broker 2 使用了7 - 5 = 2s
Broker 0 使用了16-7 = 9s
Broker 3 使用了19-16=3s
Broker 4 使用了25-19=6s
Broker 2 使用了35-25=10s
综合表现,是broker2出现了最慢的交易表现。
def slowest(orders: [[int]]) -> int:
d = {}
for i in range(len(orders)):
if i == 0:
d[orders[i][0]] = orders[i][1]
else:
d[orders[i][0]] = orders[i][1] - orders[i - 1][1]
for i,j in d.items():
if j == max(d.values()):
return i
assert slowest([[0, 2], [1, 5], [2, 7], [0, 16], [3, 19], [4, 25], [2, 35]]) == 2
"""
# Q4
"""
判断机器人是否能返回原点
一个机器人从平面(0,0)的位置出发,他可以U(向上), L(向左), R(向右), 或者D(向下)移动一个格子。
给定一个行走顺序,问是否可以回到原点。
例子
1. moves = "UD", return True.
2. moves = "LL", return False.
3. moves = "RRDD", return False.
4. moves = "LDRRLRUULR", return False.
def judgeRobotMove(moves: str) -> bool:
return moves.count("U") == moves.count("D") and moves.count("L") == moves.count("R")
assert judgeRobotMove("UD")
assert not judgeRobotMove("LL")
assert not judgeRobotMove("RRDD")
assert not judgeRobotMove("LDRRLRUULR")
"""
# Q5
"""
写一个验证email格式的程序, 对于给定的string监查是不是一个email地址:
1. 必须只包含小写字母,"-", "/" , "." , "_" 和数字
2. 有且仅有一个"@"
3. @之前之后不能为空
4. 以 ".edu" 或 ".com" 结尾
可以使用regex或者python标准包的方法。
"""
``` |
{
"source": "Ji-Xinyou/DIP-proj-DepthEstimation",
"score": 3
} |
#### File: Ji-Xinyou/DIP-proj-DepthEstimation/load_data.py
```python
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
from PIL import Image
from load_data_utils import nyu2_paired_path
import torch.nn as nn
import random
import cv2
class nyu2_dataset(Dataset):
'''
nyu2_dataset:
used to train one shot depth estimation
'''
def __init__(self,
pairs,
_transforms=None):
# self.paths has item like [path_of_xtr, path_of_ytr]
self.path_pairs = pairs
self.transforms = _transforms
def __getitem__(self, index):
path_xtr, path_ytr = self.path_pairs[index]
x_tr = cv2.imread(path_xtr)
y_tr = cv2.imread(path_ytr)
y_tr = cv2.cvtColor(y_tr, cv2.COLOR_BGR2GRAY)
if self.transforms:
x_tr = self.transforms(x_tr)
y_tr = self.transforms(y_tr)
normalize_by_imagenet = transforms.Compose([
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
])
x_tr = normalize_by_imagenet(x_tr)
return (x_tr, y_tr)
def __len__(self):
return len(self.path_pairs)
def nyu2_dataloaders(batchsize=64, nyu2_path='./nyu2_train'):
'''
split and return training set, validation set and testing test
all in format of torch.util.data.Dataloader
Args:
batchsize (int): the # of entry to be used in one batch of training
(or testing)
nyu2path (str) : the path of nyu2_train dataset
'''
print("Entering nyu2_dataloaders()")
print("---------------- Loading Dataloaders ----------------")
# used for trainingset and validation set
train_val_transforms = transforms.Compose([
# output is a (224, 224, 3) tensor
transforms.ToPILImage(),
transforms.Scale(size=[320, 240]),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(5),
transforms.RandomCrop([224, 224]),
transforms.ToTensor(),
]
)
# used for testing set
test_transforms = transforms.Compose([
transforms.ToPILImage(),
transforms.Scale(size=[320, 240]),
transforms.RandomCrop([224, 224]),
transforms.ToTensor(),
]
)
# preparing the pathpairs for different parts of data
all_pairs = nyu2_paired_path(nyu2_path=nyu2_path)
# train: val: test = 7: 2: 1
total_size = len(all_pairs)
train_size = int(total_size * 0.7)
ttl_sz_left = total_size - train_size
val_size = int(total_size * 0.2)
# shuffle the list and assign them to datasets
random.shuffle(all_pairs)
train_pair = all_pairs[: train_size]
val_pair = all_pairs[train_size: train_size + val_size]
test_pair = all_pairs[train_size + val_size: ]
# from pairs -> to datasets
train_dataset = nyu2_dataset(pairs=train_pair,
_transforms=train_val_transforms)
val_dataset = nyu2_dataset(pairs=val_pair,
_transforms=train_val_transforms)
test_dataset = nyu2_dataset(pairs=test_pair,
_transforms=test_transforms)
print("-------- Datasets are ready, preparing Dataloaders --------")
# datalodaers, to be enumerated
train_loader = DataLoader (dataset=train_dataset,
shuffle=True,
batch_size=batchsize,
num_workers=4)
val_loader = DataLoader (dataset=val_dataset,
shuffle=True,
batch_size=batchsize,
num_workers=4)
test_loader = DataLoader (dataset=test_dataset,
shuffle=True,
batch_size=batchsize,
num_workers=4)
print("----------------- DataLoaders Ready ----------------")
print("Exit nyu2_dataloaders()")
return (train_loader, val_loader, test_loader)
```
#### File: Ji-Xinyou/DIP-proj-DepthEstimation/loss.py
```python
import numpy as np
import torch
import torch.nn as nn
import torch.linalg as linalg
class Sobel(nn.Module):
'''
Edge detection using Sobel operator:
input: depth image
output:
out[:, 0, :, :] = dx
out[:, 1, :, :] = dy
The output of Sobel operator will be used to
compute terms **loss_grad** and **loss_normal**
'''
def __init__(self):
super(Sobel, self).__init__()
self.edge_conv = nn.Conv2d(1, 2, kernel_size=3, stride=1, padding=1, bias=False)
edge_kx = np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1]])
edge_ky = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])
edge_k = np.stack((edge_kx, edge_ky))
# 2(dx, dy) x 1(depth) x (3 x 3) (filter size)
edge_k = torch.from_numpy(edge_k).float().view(2, 1, 3, 3)
self.edge_conv.weight = nn.Parameter(edge_k)
for param in self.parameters():
param.requires_grad = False
def forward(self, x):
out = self.edge_conv(x)
out = out.contiguous().view(-1, 2, x.size(2), x.size(3))
return out
def compute_loss(pred, truth, device, **kwargs):
'''
Compute the loss of the model
Inputs:
pred: output depth of the model
truth: ground truth depth
device: cuda or cpu
kwargs:
'alpha': constant added in the logarithm
default: 0.5
'lambda': constant multiplied on loss_grad for bounding
default: 1
'mu': constant multiplied on loss_normal for bounding
default: 1
Logic:
There are three parts of losses
loss_depth, loss_grad, loss_normal
diff = truth - pred
loss_depth: logarithm of L1/L2 norm of diff
loss_grad: sum of logarithm of L1/L2 norm of diff_dx and diff_dy
loss_normal:
'''
_alpha = kwargs.get('_alpha', 0.5) # Logarithm
_lambda = kwargs.get('_lambda', 1) # Loss_grad
_mu = kwargs.get('_mu', 1) # Loss_normal
_gamma = kwargs.get('_gamma', 1) # Loss_scale
# first term of loss
d = torch.abs(truth - pred)
loss_depth = torch.log(d.norm() + _alpha).mean()
d_mean = d.mean()
loss_scale = -1 * _gamma * d_mean * d_mean
grad_of = Sobel().to(device=device)
grad_of = Sobel()
pred_grad, truth_grad = grad_of(pred), grad_of(truth)
pred_dx = pred_grad[:, 0, :, :].contiguous().view_as(truth)
pred_dy = pred_grad[:, 1, :, :].contiguous().view_as(truth)
truth_dx = truth_grad[:, 0, :, :].contiguous().view_as(truth)
truth_dy = truth_grad[:, 1, :, :].contiguous().view_as(truth)
# second term of loss
loss_grad = _lambda * \
(torch.log(torch.abs(truth_dx - pred_dx) + _alpha).mean() + \
torch.log(torch.abs(truth_dy - pred_dy) + _alpha).mean())
# (B, 1, H, W)
normal_z_shape = [truth.size(0), 1, truth.size(2), truth.size(3)]
z_grad = torch.ones(*normal_z_shape).to(device=device)
pred_normal = torch.cat((-pred_dx, -pred_dy, z_grad), 1)
truth_normal = torch.cat((-truth_dx, -truth_dy, z_grad), 1)
# similarity computed in the depth_derivative channel (dim 1)
cos_sim = nn.CosineSimilarity(dim=1, eps=1e-8)
loss_normal = _mu * \
(torch.abs(1 - cos_sim(truth_normal, pred_normal)).mean())
print(loss_depth, loss_grad, loss_normal, loss_scale)
loss = loss_depth + \
loss_grad + \
loss_normal + \
loss_scale
return loss
```
#### File: model/Res_Unet/Res_Unet.py
```python
from .modules import *
class Residual_Encoder(nn.Module):
'''
The Residual Encoder of the Depth Estimator
Args:
inp: 3 for three channels RGB images
mid_planes: the inp and outp for residual convs in between
outp: output planes of the encoder, **need to be matched** with unet decoder
leaky_alpha: the negative slope of leaky ReLU
'''
def __init__(self,
inp,
mid_planes,
outp,
leaky_alpha=0.02):
super().__init__()
# inp -> mid_planes[0], mp[0] -> mp[1], ..., mp[l - 2] -> mp[l - 1], mp[l - 1] -> outp
self.inconv = Conv_Norm_ReLU(inp=inp,
outp=mid_planes[0],
leaky_alpha=0.02,
stride=1)
# dim = mp[0] now
# each block: residualdouble conv + conv to next dim
# also needs some downsampling to reduce the ram cost
self.blocks = nn.ModuleList()
for i in range(len(mid_planes) - 1):
in_plane = mid_planes[i]
out_plane = mid_planes[i + 1]
self.blocks.append(ResidualDoubleConv(inp=in_plane,
outp=in_plane,
leaky_alpha=leaky_alpha))
self.blocks.append(Conv_Norm_ReLU(inp=in_plane,
outp=out_plane,
leaky_alpha=0.02,
stride=1))
self.outconv = Conv_Norm_ReLU(inp=mid_planes[-1],
outp=outp,
leaky_alpha=leaky_alpha)
# Optional: downsample and upsample to regular resolution
def forward(self, x):
x = self.inconv(x)
for block in self.blocks:
x = block(x)
x = self.outconv(x)
return x
class UNet(nn.Module):
def __init__(self, n_channels, n_classes, bilinear=True):
super(UNet, self).__init__()
self.n_channels = n_channels
self.n_classes = n_classes
self.bilinear = bilinear
self.inc = DoubleConv(n_channels, 64)
self.down1 = DownSampling(64, 128)
self.down2 = DownSampling(128, 256)
self.down3 = DownSampling(256, 512)
factor = 2 if bilinear else 1
self.down4 = DownSampling(512, 1024 // factor)
self.up1 = UpSampling(1024, 512 // factor, bilinear)
self.up2 = UpSampling(512, 256 // factor, bilinear)
self.up3 = UpSampling(256, 128 // factor, bilinear)
self.up4 = UpSampling(128, 64, bilinear)
self.conv1 = nn.Conv2d(64, 64, kernel_size=1)
self.conv2 = nn.Conv2d(64, n_classes, kernel_size=1)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
x = self.conv1(x)
x = self.conv2(x)
return x
class Encoder_Decoder_Net(nn.Module):
def __init__(self,
e_inp=3,
e_midps=[128],
e_outp=64,
d_outp=1,
leaky_alpha=0.02):
super().__init__()
self.encoder = Residual_Encoder(inp=e_inp,
mid_planes=e_midps,
outp=e_outp,
leaky_alpha=leaky_alpha)
# self.encoder = get_resnet50_encoder()
# output: 512 x 4
# encoder's output channel = decoder's input channel
self.decoder = UNet(n_channels=e_outp,
n_classes=d_outp,
bilinear=True)
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
```
#### File: Ji-Xinyou/DIP-proj-DepthEstimation/utils.py
```python
import torch
def save_param(model, pth_path):
'''
save the parameters of the model
Args:
model: the model to which the params belong
pth_path: the path where .pth file is saved
'''
torch.save(model.state_dict(), pth_path)
def load_param(model, pth_path):
'''
load the parameters of the model
Args:
model: the model where the params go into
pth_path: the path where .pth (to be loaded) is saved
'''
model.load_state_dict(torch.load(pth_path))
``` |
{
"source": "Ji-Xinyou/SJTU-networking-labs",
"score": 3
} |
#### File: lab3/clientServer/server.py
```python
import socket
import os
import math
import threading
import json
with open("../macro.json", 'r') as f:
params = json.load(f)
FILESIZE = params["FILESIZE"]
PACKETSIZE = params["PACKETSIZE"]
PORT = params["PORT"]
NR_THREAD = params["NR_THREAD"]
MAX_BACKLOG = params["MAX_BACKLOG"]
def createfile(reps=10):
'''
n reps === 2^n lines
means 2^n+1 * 32bytes
reps: 10 -> 32KB
15 -> 1MB
17 -> 4MB
20 -> 32MB
22 -> 128MB
'''
os.system("cat /dev/null > file.txt")
os.system("printf \"HelloHelloHello\nWorldWorldWorld\n\" -> file.txt;")
for i in range(reps):
os.system("cat file.txt file.txt > file2.txt && mv file2.txt file.txt")
def countreps(FILESIZE):
n = int(math.log2(FILESIZE))
return 15 + n
# This function is used by multiple threads, be cautious about synchronization
def accept_transmit():
count = 1
while(1):
if count == 1:
print("\n\nThis is the server, request your file please!\n")
# with each client's request, generate a new socket to handle the request
# Described in https://www.scottklement.com/rpg/socktut/selectserver.html
sock_trans, _ = sock_listn.accept()
# default FILESIZE is 8MB, change it in macro.py
TOTALBYTES = FILESIZE * 1024 * 1024
with open("file.txt", "rb") as f:
while TOTALBYTES != 0:
content = f.read(PACKETSIZE)
sock_trans.send(content)
TOTALBYTES -= len(content)
sock_trans.close()
print("sent %d MB\n" % ( FILESIZE ))
count += 1
class myThread(threading.Thread):
def __init__(self, nr):
threading.Thread.__init__(self)
self.nr = nr
def run(self):
print("Thread # %d starts" % self.nr)
accept_transmit()
if __name__ == '__main__':
rep = countreps(FILESIZE)
createfile(reps=rep)
# server needs two sockets
# one for listening to requests, another for transmission
# In this lab, we choose both to be TCP socket
sock_listn = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
sock_listn.bind(("10.0.0.1", PORT)) # bind to localhost
sock_listn.listen(MAX_BACKLOG)
# start multithread serving the clients
# default # of threads is 4, change it in macro.py
# (more threads than this does not necessarily mean more throughput)
threads = []
for i in range(NR_THREAD):
newthread = myThread(i + 1)
threads.append(newthread)
for thread in threads:
thread.start()
```
#### File: lab3/P2P/client.py
```python
import socket
from time import sleep
import os
import json
import threading
with open("../macro.json", 'r') as f:
params = json.load(f)
FILESIZE = params["FILESIZE"]
PACKETSIZE = params["PACKETSIZE"]
NR_HOST = params["NR_HOST"]
MAX_BACKLOG = params["MAX_BACKLOG"]
MAX_FILE = params["MAX_FILE"]
FILE_OVER_FLOW = params["FILE_OVER_FLOW"]
# serverport is for requesting local chunk from server
SERVERPORT = params["PORT"]
#* clientport is for
#* listening from other clients and sent local chunk
#* request remote chunk from other clients
CLIENTPORT = params["P2P_CLIENT_PORT"]
# NEED TO ACQUIRE ALL CHUNKS FROM OTHER CLIENTS
NR_CHUNK = params["NR_HOST"]
def getips():
baseip = "10.0.0."
ips = []
for i in range(NR_HOST):
# i = 0 -> h1 -> 10.0.0.2
postfix = str(i + 2)
ip = baseip + postfix
ips.append(ip)
return ips
# get the valid filename to save in local directory
def getfilename():
for i in range(MAX_FILE):
if os.path.exists("save%d.txt" % i):
i += 1
else:
return "save%d.txt" % i
return FILE_OVER_FLOW
# get local ip through udp
def get_local_ip():
try:
s = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
while s.connect_ex(("10.0.0.1", 80)) != 0:
sleep(1)
ip= s.getsockname()[0]
finally:
s.close()
return ip
def calc_size_each_chunk():
totalsize = FILESIZE * 1024 * 1024
totalchunk = NR_CHUNK
each_chunksize_int = totalsize // totalchunk
chunk_size = []
for i in range(NR_CHUNK - 1):
chunk_size.append(each_chunksize_int)
totalsize -= each_chunksize_int
chunk_size.append(totalsize)
return chunk_size
class listenThread(threading.Thread):
'''
ListenThread not only just serve the clients by transmitting the local chunk
#! it first downloads the local chunk from server!
#! remember to set the is_local_rdy to True after local chunk is ready
'''
def __init__(self, localip, ips, chunk_size, chunks):
threading.Thread.__init__(self)
self.localip = localip
self.ips = ips
self.chunk_size = chunk_size
self.chunks = chunks
def run(self):
print("I am the listening thread of %s", self.localip)
#! first, request the local chunk
sock_download = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
while sock_download.connect_ex(("10.0.0.1", SERVERPORT)) != 0:
sleep(1)
print("connected")
selfchunkidx = self.ips.index(self.localip)
totalsize = self.chunk_size[selfchunkidx]
while totalsize != 0:
content = sock_download.recv(PACKETSIZE).decode()
if content != "":
self.chunks[selfchunkidx] += content
totalsize -= len(content)
# local chunk transfer done, set the global variable
global is_local_rdy
is_local_rdy = True
sock_download.close()
print("OVER")
#! second, listen and transmit local chunk
#TODO: now serial, maybe parallel?
sock_listn = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
sock_listn.bind((self.localip, CLIENTPORT)) # bind to localhost
sock_listn.listen(MAX_BACKLOG)
count = 0
served = []
while(1):
if count == 1:
print("Localrchunk download rdy, start serving other clients")
sock_trans, client_ip = sock_listn.accept()
local_chunk_size = self.chunk_size[selfchunkidx]
local_chunk = self.chunks[selfchunkidx]
count = 0
while local_chunk_size != 0:
content = local_chunk[count * PACKETSIZE: (count + 1) * PACKETSIZE]
content = content.encode('utf8')
sock_trans.send(content)
local_chunk_size -= len(content)
count += 1
sock_trans.close()
print("local chunk sent to %s", client_ip)
served.append(client_ip)
count += 1
if len(served) == NR_HOST - 1:
sock_listn.close()
break
return
class downloadThread(threading.Thread):
'''
localip: ip of the host running this thread
ips: all ips within the p2p topology
nr_chunk: the number of chunks needed
chunks: chunks to be saved in order
'''
def __init__(self, localip, ips, NR_CHUNK, chunk_size, chunks):
threading.Thread.__init__(self)
# self.chunks is the chunks to be saves
self.localip = localip
# ips is all host's ips
self.ips = ips
self.chunks = chunks
self.chunk_size = chunk_size
def run(self):
print("I am the downloading thread of %s " % self.localip)
chunkidx_needed = [i for i in range(NR_CHUNK)]
selfchunkidx = self.ips.index(self.localip)
chunkidx_needed.pop(selfchunkidx)
# serial download
#TODO: maybe upgrade it to parallel download?
for idx in chunkidx_needed:
sock_download = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
ip_to_connect = self.ips[idx]
while sock_download.connect_ex((ip_to_connect, CLIENTPORT)) != 0:
sleep(1)
# print("trying to connect to %s" % ip_to_connect)
print("Connected to %s" % ip_to_connect)
totalsize = self.chunk_size[idx]
while totalsize != 0:
content = sock_download.recv(PACKETSIZE).decode()
if content != "":
self.chunks[idx] += content
totalsize -= len(content)
sock_download.close()
print("Chunk %d downloaded to local %d" % (idx, selfchunkidx))
filename = getfilename()
# blocked here, until all chunks are ready
global is_local_rdy
while (is_local_rdy is False):
pass
if filename != FILE_OVER_FLOW:
with open(filename, "w", encoding="utf-8") as f:
for chunk in self.chunks:
f.write(chunk)
print("My name is %s, all chunks are saved in local directory!" % localip)
return
# two jobs, give out local chunks, download remote chunk
if __name__ == '__main__':
localip = get_local_ip()
ips = getips()
chunk_size = calc_size_each_chunk()
#! chunks list is modified by threads!!!!!!!!!!!!!!
#TODO remember to pass the chunks list to both threads!!!!!!!!
chunks = ["" for _ in range(NR_CHUNK)]
# listen thread works permanently (after the chunk is ready)
# download thread works until all chunks are downloaded
is_local_rdy = False
listen_thread = listenThread(localip, ips, chunk_size, chunks)
download_thread = downloadThread(localip, ips, NR_CHUNK, chunk_size, chunks)
download_thread.start()
listen_thread.start()
```
#### File: SJTU-networking-labs/ryulab/ryutest.py
```python
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.node import OVSBridge, OVSSwitch, OVSKernelSwitch
from mininet.node import CPULimitedHost
from mininet.node import RemoteController
from mininet.link import TCLink
from mininet.util import dumpNodeConnections
from mininet.log import setLogLevel, info
from mininet.cli import CLI
from sys import argv
# It would be nice if we didn't have to do this:
# # pylint: disable=arguments-differ
def Test():
"Create network and run simple performance test"
net = Mininet( switch=OVSSwitch,
host=CPULimitedHost,
link=TCLink,
autoStaticArp=False, controller=RemoteController)
switch1 = net.addSwitch('s1')
switch2 = net.addSwitch('s2')
switch3 = net.addSwitch('s3')
switch4 = net.addSwitch('s4')
host1 = net.addHost('h1', cpu=.25, mac='00:00:00:00:00:01')
host2 = net.addHost('h2', cpu=.25, mac='00:00:00:00:00:02')
net.addLink(host1, switch1, bw=10, delay='5ms', loss=0, use_htb=True)
net.addLink(host2, switch2, bw=10, delay='5ms', loss=0, use_htb=True)
net.addLink(switch1, switch3, bw=10, delay='5ms', loss=0, use_htb=True)
net.addLink(switch1, switch4, bw=10, delay='5ms', loss=0, use_htb=True)
net.addLink(switch2, switch3, bw=10, delay='5ms', loss=0, use_htb=True)
net.addLink(switch2, switch4, bw=10, delay='5ms', loss=0, use_htb=True)
c1 = net.addController('c1', controller=RemoteController, ip="127.0.0.1", port=6653)
net.build()
c1.start()
s1, s2, s3, s4 = net.getNodeByName('s1', 's2', 's3', 's4')
# s1, s2, s3= net.getNodeByName('s1', 's2', 's3')
s1.start([c1])
s2.start([c1])
s3.start([c1])
s4.start([c1])
net.start()
info( "Dumping host connections\n" )
dumpNodeConnections(net.hosts)
h1, h2 = net.getNodeByName('h1', 'h2')
CLI(net)
net.stop()
if __name__ == '__main__':
# setLogLevel( 'debug' )
setLogLevel('info')
Test()
``` |
{
"source": "jixishi/python-Practice",
"score": 2
} |
#### File: jixishi/python-Practice/BE6php.net.py
```python
import sys
import thread
import tool
sys.path.append('.\ipy\BE6')
from bottle import route, run, request, static_file, error, hook, response
import mc
def load_plugin():
print('[BE6CLOUD] BE6php已加载')
print('[BE6CLOUD] ========================')
print('[BE6CLOUD] 默认端口8080')
print('[BE6CLOUD] 如有需要请更改最后一行')
print('[BE6CLOUD] 更改远程后台配置')
print('[BE6CLOUD] 后台地址')
print('[BE6CLOUD] ip:8080')
print('[BE6CLOUD] 配置40和41行')
print (sys.path)
thread.start_new_thread(be6, ())
@route('/help')
def helps():
return static_file('help.html', root='./BE6')
@route('/sq', method='POST')
def sq():
qq = request.forms.get('contact_qq')
email = request.forms.get('contact_email')
boxed = request.forms.get('contact_id')
print(qq, email, boxed)
if qq != "":
if email != "":
if boxed != "":
mc.runcmd('whitelist add ' + boxed)
import time
sj = time.strftime('%Y-%m-%d %H:%M:%S')
work = tool.WorkingPath()
path = work + 'plugins/BE6php/whitesq.txt'
f = open(path, "a")
f.write("申请时间->{}\nQQ:{},邮箱:{},Xbox ID:{},\n>-------------------------<\n".format(sj, qq, email, boxed))
f.close()
# 修改用户名与密码
# 不要泄露
@route('/cmd', method='POST')
def cmd():
name = request.forms.get('name')
passwd = request.forms.get('pass')
mccmd = request.forms.get('cmd')
mm = "jxs"
wd = "be6"
if name == mm:
if passwd == wd:
mc.runcmd(mccmd)
def pfile():
if not tool.IfDir('./plugins/BE6php'):
tool.CreateDir('./plugins/BE6php')
print('[INFO] BE6php正在创建文件夹...')
else:
print('[INFO] BE6php恭喜您,你的文件夹一切正常')
if not tool.IfFile('./plugins/BE6php/whitesq.txt'):
tool.WriteAllText('./plugins/BE6php/whitesq.txt', '')
@route('/cmd')
def login():
return static_file('login.html', root='./BE6')
@route('/favicon.ico')
def login():
return static_file('favicon.ico', root='./BE6')
@route('/')
def index():
return static_file('index.html', root='./BE6')
@hook('after_request')
def enable_cors():
response.headers['Access-Control-Allow-Origin'] = '*'
@error(404)
def error404():
return "我找不到目标了,我发生错误了"
def be6():
run(host='0.0.0.0', port=17849, debug=True)
```
#### File: jixishi/python-Practice/be6requist.py
```python
from fake_useragent import UserAgent # 下载:pip install
ua = UserAgent() # 实例化,需要联网但是网站不太稳定-可能耗时会长一些
headers = {
'User-Agent': ua.random # 伪装
}
import requests
def get_proxy():
return requests.get("http://127.0.0.1:5010/get/").json()
def delete_proxy(proxy):
requests.get("http://127.0.0.1:5010/delete/?proxy={}".format(proxy))
def getHtml(path):
# ....
retry_count = 5
proxy = get_proxy().get("proxy")
while retry_count > 0:
try:
html = requests.get(path, headers=headers, proxies={"http": "http://{}".format(proxy)})
# 使用代理访问
return html
except Exception:
retry_count -= 1
# 删除代理池中代理
delete_proxy(proxy)
```
#### File: jixishi/python-Practice/tool.py
```python
import ctypes
import json
import os
import progressbar
import requests
import urllib.parse
import urllib.request
import requests.packages.urllib3
import win32api
import win32con
def IfDir(path): # 判断目录是否存在
os.path.isdir(path)
def CreateDir(path): # 创建目录
os.makedirs(path)
def IfFile(path): # 判断文件是否存在
os.path.isfile(path)
def WriteAllText(path, text): # 覆盖写入文件或者创建文件
file = open(path, mode="w", encoding="utf-8")
file.write(text)
file.close()
def WriteCapText(path, text): # 追加写入文件
file = open(path, mode="a", encoding="utf-8")
file.write(text)
file.close()
# 自定格式读写入文件
def WriteText(path, mode, text):
if mode == "w":
file = open(path, mode=mode, encoding="utf-8")
file.write(text)
file.close()
elif mode == "a":
file = open(path, mode=mode, encoding="utf-8")
file.write(text)
file.close()
elif mode == "r":
file = open(path, mode=mode, encoding="utf-8")
while True:
output = file.read(1024 * 1024)
if not output:
break
yield output
def WorkingPath():
os.getcwd()
def FileLine(path): # 获取文件每行内容并以列表返回
ls = []
for line in open(path, 'r', encoding='utf-8'):
ls.append(line.strip('\n'))
return ls
# 把文件内容隔指定行数,然后分列表返回
# lt1,lt2,lt3,~,ltn = CreateList(path, n)
# lt’i‘ 从第 i 行开始隔 n 行取下一个值所构成的列表
def CreateList(path, amount):
lt = []
for line in open(path, 'r', encoding='utf-8'):
lt.append(line.strip('\n'))
for i in range(amount):
globals()["list" + str(i)] = lt[i::amount]
return tuple(globals()["list" + str(i)] for i in range(amount))
# 弹出对话框参数 (标题,内容,模式)ctypes.WinDLL("user32.dll").MessageBoxW
def WinTitleA(title, con, mode):
if mode == "o": # 提醒OK消息框
win32api.MessageBox(0, con, title, win32con.MB_OK)
elif mode == "yn": # 是否信息框
win32api.MessageBox(0, con, title, win32con.MB_YESNO)
elif mode == "s": # 说明信息框
win32api.MessageBox(0, con, title, win32con.MB_HELP)
elif mode == "!": # 警告信息框
win32api.MessageBox(0, con, title, win32con.MB_ICONWARNING)
elif mode == "q": # 疑问信息框
win32api.MessageBox(0, con, title, win32con.MB_ICONQUESTION)
elif mode == "t": # 提示信息框
win32api.MessageBox(0, con, title, win32con.MB_ICONASTERISK)
elif mode == "y": # 确认信息框
win32api.MessageBox(0, con, title, win32con.MB_OKCANCEL)
elif mode == "r": # 重试信息框
win32api.MessageBox(0, con, title, win32con.MB_RETRYCANCEL)
elif mode == "ynb": # 是否取消信息框
win32api.MessageBox(0, con, title, win32con.MB_YESNOCANCEL)
# 弹出对话框参数 (标题,内容,模式)
def WinTitleUI(title, con, mode):
if mode == "o": # 提醒OK消息框
ctypes.WinDLL("user32.dll").MessageBoxW(0, con, title, win32con.MB_OK)
elif mode == "yn": # 是否信息框
ctypes.WinDLL("user32.dll").MessageBoxW(0, con, title, win32con.MB_YESNO)
elif mode == "s": # 说明信息框
ctypes.WinDLL("user32.dll").MessageBoxW(0, con, title, win32con.MB_HELP)
elif mode == "!": # 警告信息框
ctypes.WinDLL("user32.dll").MessageBoxW(0, con, title, win32con.MB_ICONWARNING)
elif mode == "q": # 疑问信息框
ctypes.WinDLL("user32.dll").MessageBoxW(0, con, title, win32con.MB_ICONQUESTION)
elif mode == "t": # 提示信息框
ctypes.WinDLL("user32.dll").MessageBoxW(0, con, title, win32con.MB_ICONASTERISK)
elif mode == "y": # 确认信息框
ctypes.WinDLL("user32.dll").MessageBoxW(0, con, title, win32con.MB_OKCANCEL)
elif mode == "r": # 重试信息框
ctypes.WinDLL("user32.dll").MessageBoxW(0, con, title, win32con.MB_RETRYCANCEL)
elif mode == "ynb": # 是否取消信息框
ctypes.WinDLL("user32.dll").MessageBoxW(0, con, title, win32con.MB_YESNOCANCEL)
def TLYoudao(text):
url_youdao = 'http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule&smartresult=ugc&sessionFrom=' \
'http://www.youdao.com/'
datatext = {'type': 'AUTO', 'doctype': 'json', 'xmlVersion': '1.8', 'keyfrom': 'fanyi.web', 'ue': 'UTF-8',
'action': 'FY_BY_CLICKBUTTON', 'typoResult': 'true', 'i': text}
data = urllib.parse.urlencode(datatext).encode('utf-8')
response = urllib.request.urlopen(url_youdao, data)
content = response.read().decode('utf-8')
data = json.loads(content)
result = data['translateResult'][0][0]['tgt']
return result
def DownLoad(save, url):
response = requests.request("GET", url, stream=True, data=None, headers=None)
requests.packages.urllib3.disable_warnings()
save_path = save
total_length = int(response.headers.get("Content-Length"))
with open(save_path, 'wb') as f:
widgets = ['Progress: ', progressbar.Percentage(), ' ',
progressbar.Bar(marker='#', left='[', right=']'),
' ', progressbar.ETA(), ' ', progressbar.FileTransferSpeed()]
pbar = progressbar.ProgressBar(widgets=widgets, maxval=total_length).start()
for chunk in response.iter_content(chunk_size=1):
if chunk:
f.write(chunk)
f.flush()
pbar.update(len(chunk) + 1)
pbar.finish()
``` |
{
"source": "jixj5/python-redfish-lenovo",
"score": 3
} |
#### File: python-redfish-lenovo/examples/get_psu_inventory.py
```python
import sys
import redfish
import json
import traceback
import lenovo_utils as utils
def get_psu_inventory(ip, login_account, login_password):
"""Get power supply unit inventory
:params ip: BMC IP address
:type ip: string
:params login_account: BMC user name
:type login_account: string
:params login_password: <PASSWORD>
:type login_password: string
:returns: returns power supply unit inventory when succeeded or error message when failed
"""
result = {}
psu_details = []
login_host = 'https://' + ip
try:
# Connect using the BMC address, account name, and password
# Create a REDFISH object
REDFISH_OBJ = redfish.redfish_client(base_url=login_host, username=login_account, timeout=utils.g_timeout,
password=<PASSWORD>, default_prefix='/redfish/v1', cafile=utils.g_CAFILE)
# Login into the server and create a session
REDFISH_OBJ.login(auth=utils.g_AUTH)
except Exception as e:
traceback.print_exc()
result = {'ret': False, 'msg': "Error_message: %s. Please check if username, password and IP are correct" % repr(e)}
return result
try:
response_base_url = REDFISH_OBJ.get('/redfish/v1', None)
# Get response_base_url
if response_base_url.status == 200:
chassis_url = response_base_url.dict['Chassis']['@odata.id']
else:
error_message = utils.get_extended_error(response_base_url)
result = {'ret': False, 'msg': "Url '%s' response Error code %s\nerror_message: %s" % (
'/redfish/v1', response_base_url.status, error_message)}
return result
response_chassis_url = REDFISH_OBJ.get(chassis_url, None)
if response_chassis_url.status == 200:
for request in response_chassis_url.dict['Members']:
request_url = request['@odata.id']
response_url = REDFISH_OBJ.get(request_url, None)
if response_url.status == 200:
# if chassis is not normal skip it
if len(response_chassis_url.dict['Members']) > 1 and ("Links" not in response_url.dict or "ComputerSystems" not in response_url.dict["Links"]):
continue
if 'PowerSubsystem' in response_url.dict:
# Get the powersubsystem resources
powersubsystem_url = response_url.dict['PowerSubsystem']['@odata.id']
response_powersubsystem_url = REDFISH_OBJ.get(powersubsystem_url, None)
if response_powersubsystem_url.status == 200:
if 'PowerSupplies' not in response_powersubsystem_url.dict:
result = {'ret': False, 'msg': "There is no PowerSupplies data in %s" % powersubsystem_url}
REDFISH_OBJ.logout()
return result
# Get PowerSupplies resources
powersupplies_url = response_powersubsystem_url.dict['PowerSupplies']['@odata.id']
response_powersupplies_url = REDFISH_OBJ.get(powersupplies_url, None)
for i in range(response_powersupplies_url.dict["<EMAIL>"]):
members_url = response_powersupplies_url.dict['Members'][i]['@odata.id']
response_members_url = REDFISH_OBJ.get(members_url, None)
psu = response_members_url.dict
for property in ["@odata.id", "@odata.context", "@odata.type", "@odata.etag"]:
if property in psu:
del psu[property]
if 'Metrics' in response_members_url.dict:
# Get Metrics resources of each PSU
metrics_url = response_members_url.dict['Metrics']['@odata.id']
response_metrics_url = REDFISH_OBJ.get(metrics_url, None)
metrics = response_metrics_url.dict
for property in ["@odata.id", "@odata.context", "@odata.type", "@odata.etag"]:
if property in metrics:
del metrics[property]
psu["Metrics"] = metrics
psu_details.append(psu)
else:
error_message = utils.get_extended_error(response_powersubsystem_url)
result = {'ret': False, 'msg': "Url '%s' response Error code %s\nerror_message: %s" % (
powersubsystem_url, response_powersubsystem_url.status, error_message)}
return result
else:
# Get the power resources
power_url = response_url.dict['Power']['@odata.id']
response_power_url = REDFISH_OBJ.get(power_url, None)
if response_power_url.status == 200:
if 'PowerSupplies' not in response_power_url.dict:
result = {'ret': False, 'msg': "There is no PowerSupplies data in %s" % power_url}
REDFISH_OBJ.logout()
return result
power_supply_list = response_power_url.dict['PowerSupplies']
for PowerSupplies in power_supply_list:
entry = {}
for property in ['Name', 'SerialNumber', 'PowerOutputWatts', 'EfficiencyPercent', 'LineInputVoltage',
'PartNumber', 'FirmwareVersion', 'PowerCapacityWatts', 'PowerInputWatts', 'Model',
'PowerSupplyType', 'Status', 'Manufacturer', 'HotPluggable', 'LastPowerOutputWatts',
'InputRanges', 'LineInputVoltageType', 'Location']:
if property in PowerSupplies:
entry[property] = PowerSupplies[property]
if 'Oem' in PowerSupplies and 'Lenovo' in PowerSupplies['Oem']:
entry['Oem'] = {'Lenovo':{}}
for oemprop in ['FruPartNumber', 'ManufactureDate', 'ManufacturerName']:
if oemprop in PowerSupplies['Oem']['Lenovo']:
entry['Oem']['Lenovo'][oemprop] = PowerSupplies['Oem']['Lenovo'][oemprop]
psu_details.append(entry)
else:
error_message = utils.get_extended_error(response_power_url)
result = {'ret': False, 'msg': "Url '%s' response Error code %s\nerror_message: %s" % (
power_url, response_power_url.status, error_message)}
return result
else:
error_message = utils.get_extended_error(response_url)
result = {'ret': False, 'msg': "Url '%s' response Error code %s\nerror_message: %s" % (
request_url, response_url.status, error_message)}
return result
else:
error_message = utils.get_extended_error(response_chassis_url)
result = {'ret': False, 'msg': "Url '%s' response Error code %s\nerror_message: %s" % (
chassis_url, response_chassis_url.status, error_message)}
return result
if len(psu_details) > 0:
result['ret'] = True
result['entry_details'] = psu_details
else:
result['ret'] = False
result['entry_details'] = []
result['msg'] = "PowerSupplies are empty."
# Logout of the current session
except Exception as e:
traceback.print_exc()
result = {'ret': False, 'msg': "error_message: %s" % e}
finally:
# Logout of the current session
try:
REDFISH_OBJ.logout()
except:
pass
return result
if __name__ == '__main__':
# Get parameters from config.ini and/or command line
argget = utils.create_common_parameter_list()
args = argget.parse_args()
parameter_info = utils.parse_parameter(args)
# Get connection info from the parameters user specified
ip = parameter_info['ip']
login_account = parameter_info["user"]
login_password = parameter_info["<PASSWORD>"]
# Get power supply unit inventory and check result
result = get_psu_inventory(ip, login_account, login_password)
if result['ret'] is True:
del result['ret']
sys.stdout.write(json.dumps(result['entry_details'], sort_keys=True, indent=2))
else:
sys.stderr.write(result['msg'] + '\n')
sys.exit(1)
``` |
{
"source": "jix/sby",
"score": 3
} |
#### File: examples/indinv/example.py
```python
from collections import defaultdict
import inspect
N = 32
f = lambda x: (2*x-1) ^ (x&7)
table = [f(i) & (N-1) for i in range(N)]
rtable = [table.count(i) for i in range(N)]
def getPath(v):
if table[v] is None:
return [v]
bak = table[v]
table[v] = None
r = [v] + getPath(bak)
table[v] = bak
return r
def getPaths():
visited = set()
paths = list()
for i in range(N):
if rtable[i] == 0:
paths.append(getPath(i))
for path in paths:
for i in path:
visited.add(i)
for i in range(N):
if i not in visited:
paths.append(getPath(i))
for j in paths[-1]:
visited.add(j)
return paths
pathsByLidx = defaultdict(set)
loopsByIdx = dict()
loopsByLidx = dict()
for path in getPaths():
i = path.index(path[-1])+1
head, loop, lidx = tuple(path[:i]), tuple(path[i:]), max(path[i:])
pathsByLidx[lidx].add((head, loop))
print()
print("The %d-bit function f(x) produces %d loops:" % (N.bit_length()-1, len(pathsByLidx)))
print(" ", inspect.getsource(f).strip())
for lidx, paths in pathsByLidx.items():
loop = None
for path in paths:
for i in path[0] + path[1]:
loopsByIdx[i] = lidx
if loop is None or path[1][0] > loop[0]:
loop = path[1]
loopsByLidx[lidx] = loop
print()
print("%d-Element Loop:" % len(loop))
print(" ", " ->- ".join(["%2d" % i for i in loop + (loop[0],)]))
lines = []
lastPath = []
for path in sorted([tuple(reversed(p[0])) for p in paths]):
line = ""
for i in range(len(path)):
if i < len(lastPath) and lastPath[i] == path[i]:
line += " %s " % (" " if i == 0 else "| ")
else:
line += " %s %2d" % (" " if i == 0 else "`<-" if len(lastPath) else "-<-", path[i])
lastPath = []
lastPath = path
lines.append(line)
for i in range(len(lines)-1, -1, -1):
line, nextline = list(lines[i]), "" if i == len(lines)-1 else lines[i+1]
if len(nextline) < len(line): nextline = nextline.ljust(len(line))
for k in range(len(line)):
if line[k] == "|" and nextline[k] in " -":
line[k] = " "
lines[i] = "".join(line)
print("%d Lead-Ins:" % len(lines))
for line in lines:
print(line)
print()
print("Loop Membership:")
for lidx in pathsByLidx:
print("%18s |" % (loopsByLidx[lidx],), end="")
for i in range(N):
print("*" if loopsByIdx[i] == lidx else ".", end="")
print("|")
print()
``` |
{
"source": "jixuan-wang/Grad2Task",
"score": 2
} |
#### File: jixuan-wang/Grad2Task/meta_dataset.py
```python
import itertools
import os
from transformers.data.processors.utils import InputFeatures
from utils import get_device_name
import torch
import numpy as np
from torch.utils.data import (DataLoader, TensorDataset, dataloader)
from torch.utils.data.sampler import Sampler, WeightedRandomSampler
from transformers import glue_output_modes
from transformers import glue_processors
from transformers import glue_convert_examples_to_features
from logging_utils import get_logger
logger = get_logger('Meta-Data-Loader')
TASK_TEXT_LABELS = {}
TASK_TEXT_LABELS['MNLI'] = ["contradiction", "entailment", "neutral"]
TASK_TEXT_LABELS['MRPC'] = ["not paraphase", "paraphase"]
TASK_TEXT_LABELS['SST-2'] = ["negative movie review", "positive movie review"]
TASK_TEXT_LABELS['QQP'] = ["not paraphase", "paraphase"]
TASK_TEXT_LABELS['QNLI'] = ["entailment", "not entailment"]
TASK_TEXT_LABELS['RTE'] = ["entailment", "not entailment"]
TASK_TEXT_LABELS['SNLI'] = ["contradiction", "entailment", "neutral"]
TASK_TEXT_LABELS['CoLA'] = ['not grammatically acceptable', 'grammatically acceptable']
TASK_TEXT_LABELS['SciTail'] = ["entailment", "neutral"]
def load_and_cache_examples(args, data_dir, task, tokenizer, split):
processor = glue_processors[task]()
output_mode = glue_output_modes[task]
# Load data features from cache or dataset file
cached_features_file = os.path.join(data_dir, 'cached_{}_{}_{}_{}'.format(
split,
args.lm_type,
str(args.max_seq_length),
str(task)))
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", data_dir)
label_list = processor.get_labels()
if split == 'val':
examples = processor.get_dev_examples(data_dir)
elif split == 'train':
examples = processor.get_train_examples(data_dir)
elif split == 'test':
pass
else:
raise ValueError(f'Unsupported split: {split}')
features = glue_convert_examples_to_features(examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
pad_on_left=False,
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=0,
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
logger.info('Convert to Tensors and build dataset')
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
if output_mode == "classification":
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif output_mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
logger.info(f'Finish loading {task}')
return dataset
class ClassBalancedRandomSampler(Sampler):
"""Samples elements randomly with class balance.
Attributes:
data_source_labels (list): List of dataset labels.
strict_balance (bool): If true, every batch with the size of N * num_class
will be class balanced. For example:
disturb: Whether to disturb the dataset.
Class 1: * * * * * * * *
Class 2: # # # # # # # # #
if the batch size is 3 * 2 = 6, only the last batch is discarded since
it's not class balanced:
Class 1: |* * *|* * *|* *
Class 2: |# # #|# # #|# # #
|-> discarded
"""
def __init__(self, data_source_labels, strict_balance=False, disturb=True):
self.data_source_labels = data_source_labels
self.strict_balance = strict_balance
self.distrub = disturb
def __iter__(self):
"""
Here we permutate the index of each class separately and then merge
the indexes so that the correspoding label sequence looks like:
# * # * # * # * # * # * # * # * # * ...
Sample batches sequencially with size of N * num_class with result in
class balanced batches, except the last few batches depending on how
balanced the dataset is.
"""
unique_labels = list(set(self.data_source_labels))
label_list = np.array(self.data_source_labels)
perm_list = []
label_idx = {}
for l in unique_labels:
idx = np.where(label_list == l)[0]
if self.distrub:
idx = np.random.permutation(idx)
label_idx[l] = idx.tolist()
# use min to make sure every class is include in each batch with size of N * num_class
min_or_max = min if self.strict_balance else max
size = min_or_max([len(label_idx[l]) for l in label_idx])
for _ in range(size):
for l in label_idx:
if len(label_idx[l]) > 0:
perm_list.append(label_idx[l].pop())
return iter(perm_list)
def __len__(self):
return len(self.data_source_labels)
class MetaBatch:
"""Used for parallel episodic training with DataParallel.
One episode on each device. Note we can't simply feed a dictionary into
DataParallel because each value will be split along the first dimension. The
attributes of an object will not be split.
"""
def __init__(self, device_batch_dict, task_name_list=None):
for device_name in device_batch_dict:
setattr(self, device_name, device_batch_dict[device_name])
self.task_name_list = task_name_list
class DatasetProcessor:
"""Abstract class for dataset processor.
Attributes:
args
tokenizer: e.g. BERT tokenizer
train_task_list (list): List of tasks for meta-training.
val_task_list (list): List of tasks for meta-validation.
test_task_list (list): List of tasks for meta-testing.
"""
def __init__(self, args, tokenizer, train_task_list, val_task_list, test_task_list):
self.args = args
self.tokenizer = tokenizer
self.split_task_list = {
'train': train_task_list,
'val': val_task_list,
# 'test': test_task_list
}
self.init_dataloader()
def init_dataloader(self):
raise NotImplementedError()
def features_to_tensors(self, features):
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
return (all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
def text_to_features(self, text, label):
mask_padding_with_zero = True
max_length=self.args.max_seq_length
pad_on_left = False
pad_token=self.tokenizer.convert_tokens_to_ids([self.tokenizer.pad_token])[0]
pad_token_segment_id=0
inputs = self.tokenizer.encode_plus(
text,
add_special_tokens=True,
max_length=max_length
)
input_ids, token_type_ids = inputs["input_ids"], inputs["token_type_ids"]
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)
return InputFeatures(input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
label=label)
def _episode_generator(self, dataloader, infinite_loop=True):
if infinite_loop:
while True:
for episode in dataloader:
yield episode
else:
for episode in dataloader:
yield episode
def _task_generator(self, task_list, sample_weights=None):
if sample_weights is None:
while True:
yield np.random.choice(task_list)
else:
if len(sample_weights) != len(task_list):
raise ValueError('Count of sampling weights and tasks must match.')
if abs(sum(sample_weights) - 1) > 0.0001:
raise ValueError('Sampling weights need to be normalized.')
while True:
for i in WeightedRandomSampler(sample_weights, 100, replacement=True):
yield task_list[i]
def _prepare_episode(self, batch, task_id=None, label_features=None, text_labels=None, device=None):
""" Batch -> Episode
Args:
batch (tuple<torch.Tensor>): First half is the support set; second
half is the query set.
Returns:
dict: Data for this episode.
"""
if task_id is not None:
task_id = torch.tensor(task_id, dtype=torch.int)
if device is not None:
batch = tuple(t.to(device) for t in batch)
# num_examples = batch[0].shape[0]
# total_num_batches = num_query_batches + num_support_batches
# num_support = num_examples * num_support_batches // total_num_batches
if batch[3].max() + 1 != batch[3].unique().shape[0]:
raise ValueError('Largest class id should match number of classes.')
episode = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2],
'labels': batch[3] }
if task_id is not None:
episode['task_id'] = task_id.to(device) if device is not None else task_id
if label_features is not None:
label_features = tuple(t.to(device) for t in label_features) if device is not None else label_features
episode['label_features'] = label_features
if text_labels is not None:
episode['text_labels'] = text_labels
return episode
class MetaDatasetProcessor(DatasetProcessor):
""" Dataset processor for meta-training on GLUE tasks. """
def __init__(self, args, tokenizer, train_task_list, val_task_list, test_task_list):
super().__init__(args, tokenizer, train_task_list, val_task_list, test_task_list)
def _init_dataloader_of_split(self, task_name, split):
logger.info(f'***** Loading data for {split} *****')
if task_name.lower() not in glue_processors:
raise ValueError("Task not found: %s" % (task_name))
processor = glue_processors[task_name.lower()]()
# output_mode = glue_output_modes[task_name.lower()]
batch_size = (self.args.num_shots_support+self.args.num_shots_query) * len(processor.get_labels())
data_dir = os.path.join(self.args.data_dir, task_name)
dataset = load_and_cache_examples(self.args, data_dir, task_name.lower(),
self.tokenizer, split)
dataloader = DataLoader(dataset,
sampler=ClassBalancedRandomSampler(dataset.tensors[-1].tolist(), strict_balance=True),
batch_size=batch_size,
drop_last=True)
return dataloader, len(dataset)
def init_dataloader(self):
self.train_episode_gen_dict = {}
self._train_task_id_dict = {}
for i, task in enumerate(self.split_task_list['train']):
self._train_task_id_dict[task] = i
self._val_task_id_dict = {}
for i, task in enumerate(self.split_task_list['val']):
self._val_task_id_dict[task] = i
val_dataloader_dict = {}
test_dataloader_dict = {}
task_train_size = []
total_num_batches_each_episode = self.args.num_query_batches + self.args.num_support_batches
train_total_episode = 0
val_total_episode = 0
test_total_episode = 0
# label feature generation
logger.info("Generating task label features")
self.task_label_features = {}
all_tasks = set(itertools.chain.from_iterable(self.split_task_list.values()))
for task_name in set(all_tasks):
self.task_label_features[task_name] = self.features_to_tensors([
self.text_to_features(' # '.join(TASK_TEXT_LABELS[task_name]), -1)
])
self.split_dataloader = {}
train_task_size_list = []
for split, task_list in self.split_task_list.items():
logger.info(f'***** Loading split: {split} *****')
dataloader_dict = {}
total_episode = 0
for task_name in task_list:
dataloader_dict[task_name], task_size = self._init_dataloader_of_split(task_name, split)
total_episode += len(dataloader_dict[task_name])
if split == 'train':
self.train_episode_gen_dict[task_name] = self._episode_generator(dataloader_dict[task_name])
train_task_size_list.append(task_size)
setattr(self, f'{split}_num_episode_per_epoch', total_episode)
self.split_dataloader[split] = dataloader_dict
if split == 'train':
# Sample dataset according sqrt of data size.
train_task_size_list = np.array(train_task_size_list)
train_task_size_list = np.sqrt(train_task_size_list)
train_task_size_list = train_task_size_list / np.sum(train_task_size_list)
self._train_task_gen = self._task_generator(task_list,
sample_weights=train_task_size_list.tolist())
def get_train_episode(self, num_episodes_per_device, device_list):
""" Get data of one episode. """
task_index = 0
device_batch_dict = {}
task_name_list = []
for device in device_list:
episode_list = []
for _ in range(num_episodes_per_device):
task_name = next(self._train_task_gen)
batch = next(self.train_episode_gen_dict[task_name])
episode = self._prepare_episode(batch,
task_id=self._train_task_id_dict[task_name],
label_features=self.task_label_features[task_name],
text_labels=TASK_TEXT_LABELS[task_name],
device=device)
task_index += 1
episode_list.append(episode)
task_name_list.append(task_name)
device_batch_dict[get_device_name(device)] = episode_list
# device_batch_dict[get_device_name(device)] = {k: torch.cat([episode[k] for episode in episode_list], dim=0)
# for k in episode_list[0]}
return MetaBatch(device_batch_dict, task_name_list=task_name_list)
def get_train_episode_different_task_on_each_device(self, num_episodes_per_device, device_list):
""" Get data of one episode. """
task_index = 0
device_batch_dict = {}
for device in device_list:
episode_list = []
saved_task = []
for _ in range(num_episodes_per_device):
while True:
task_name = next(self._train_task_gen)
if task_name not in saved_task:
saved_task.append(task_name)
break
batch = next(self._train_episode_gen_dict[task_name])
episode = self._prepare_episode(batch,
task_id=self._train_task_id_dict[task_name],
label_features=self.task_label_features[task_name],
text_labels=TASK_TEXT_LABELS[task_name],
device=device)
task_index += 1
episode_list.append(episode)
device_batch_dict[get_device_name(device)] = episode_list
# device_batch_dict[get_device_name(device)] = {k: torch.cat([episode[k] for episode in episode_list], dim=0)
# for k in episode_list[0]}
return MetaBatch(device_batch_dict)
def val_episode_loop(self, num_episodes_per_device, device_list, max_num_episode=-1):
device_batch_dict = {}
task_index = 0
task_name_list = []
for task_name in self.split_task_list['val']:
count = 0
for batch in self.split_dataloader['val'][task_name]:
device_idx = task_index // num_episodes_per_device
device_name = get_device_name(device_list[device_idx])
episode = self._prepare_episode(batch,
task_id=self._val_task_id_dict[task_name],
label_features=self.task_label_features[task_name],
text_labels=TASK_TEXT_LABELS[task_name],
device=device_list[device_idx])
if not device_name in device_batch_dict:
device_batch_dict[device_name] = [episode]
else:
device_batch_dict[device_name].append(episode)
task_name_list.append(task_name)
task_index += 1
if task_index == num_episodes_per_device * len(device_list):
yield MetaBatch(device_batch_dict, task_name_list=task_name_list)
device_batch_dict.clear()
task_index = 0
task_name_list = []
count += 1
if max_num_episode > 0 and count == max_num_episode:
break
if task_index > 1:
yield MetaBatch(device_batch_dict, task_name_list=task_name_list)
def val_episode_loop_different_task_on_each_device(self, num_episodes_per_device, device_list):
epi_count = 0
max_epi_count = 50
task_batch_iter_dict = {task:iter(loader) for task,loader in self._val_dataloader_dict.items()}
while True:
device_batch_dict = {}
task_name_list = []
for device_idx in range(len(device_list)):
device_task_list = []
device_task_batch_dict = {}
for task in self._val_list:
if len(device_task_list) < num_episodes_per_device:
batch = next(task_batch_iter_dict[task], None)
if batch is not None:
device_task_list.append(task)
device_task_batch_dict[task] = batch
if len(device_task_list) == num_episodes_per_device:
for task_name in device_task_list:
device_name = get_device_name(device_list[device_idx])
batch = device_task_batch_dict[task_name]
episode = self._prepare_episode(batch,
task_id=self._val_task_id_dict[task_name],
label_features=self.task_label_features[task_name],
text_labels=TASK_TEXT_LABELS[task_name],
device=device_list[device_idx])
if not device_name in device_batch_dict:
device_batch_dict[device_name] = [episode]
else:
device_batch_dict[device_name].append(episode)
task_name_list.append(task_name)
if len(device_batch_dict) > 0:
epi_count += 1
yield MetaBatch(device_batch_dict, task_name_list=task_name_list)
if epi_count == max_epi_count:
break
else:
break
class RegularDatasetProcessor(DatasetProcessor):
def __init__(self, args, tokenizer, train_task_list, val_task_list, test_task_list):
super().__init__(args, tokenizer, train_task_list, val_task_list, test_task_list)
def _init_dataloader_of_split(self, task_name, split):
logger.info(f'***** Loading data for {split} *****')
if task_name.lower() not in glue_processors:
raise ValueError("Task not found: %s" % (task_name))
processor = glue_processors[task_name.lower()]()
# output_mode = glue_output_modes[task_name.lower()]
batch_size = (self.args.num_shots_support+self.args.num_shots_query) * len(processor.get_labels())
data_dir = os.path.join(self.args.data_dir, task_name)
dataset = load_and_cache_examples(self.args, data_dir, task_name.lower(),
self.tokenizer, split)
dataloader = DataLoader(dataset,
sampler=ClassBalancedRandomSampler(dataset.tensors[-1].tolist(), strict_balance=True),
batch_size=batch_size,
drop_last=True)
return dataloader, len(dataset)
def init_dataloader(self):
# label feature generation
logger.info("Generating task label features")
self.task_label_features = {}
all_tasks = set(itertools.chain.from_iterable(self.split_task_list.values()))
for task_name in all_tasks:
self.task_label_features[task_name] = self.features_to_tensors([
self.text_to_features(' # '.join(TASK_TEXT_LABELS[task_name]), -1)
])
self.split_dataloader = {}
self.train_task_gen = None
self.train_episode_gen_dict = {}
train_task_size_list = []
for split, task_list in self.split_task_list.items():
logger.info(f'Loading split: {split}')
dataloader_dict = {}
total_batches = 0
for task_name in task_list:
dataloader, task_size = self._init_dataloader_of_split(task_name, split)
dataloader_dict[task_name] = dataloader
total_batches += len(dataloader)
logger.info(f'Loaded dataset {task_name}. Batches # per epoch is {len(dataloader)}')
if split == 'train':
self.train_episode_gen_dict[task_name] = self._episode_generator(dataloader)
train_task_size_list.append(task_size)
self.split_dataloader[split] = dataloader_dict
if split == 'train':
# Sample dataset according sqrt of data size
train_task_size_list = np.array(train_task_size_list)
train_task_size_list = np.sqrt(train_task_size_list)
train_task_size_list = train_task_size_list / np.sum(train_task_size_list)
self.train_task_gen = self._task_generator(task_list,
sample_weights=train_task_size_list.tolist())
def get_episodes_from_different_tasks_on_each_device(self, num_episodes_per_device, device_list):
""" Get data of one episode. """
task_index = 0
device_batch_dict = {}
task_name_list = []
for device in device_list:
episode_list = []
saved_task = set()
for _ in range(num_episodes_per_device):
while True:
task_name = next(self.train_task_gen)
if task_name not in saved_task:
saved_task.add(task_name)
task_name_list.append(task_name)
break
batch = next(self.train_episode_gen_dict[task_name])
episode = self._prepare_episode(batch,
label_features=self.task_label_features[task_name],
text_labels=TASK_TEXT_LABELS[task_name],
device=device)
task_index += 1
episode_list.append(episode)
device_batch_dict[get_device_name(device)] = episode_list
return MetaBatch(device_batch_dict, task_name_list)
class TaskDataset(DatasetProcessor):
""" Dataset of tasks.
Each datapoint is a task. Each task is a mini-dataset, consisting of N
classes and a few examples per class.
"""
def __init__(self, args, tokenizer, train_task_list, val_task_list, test_task_list):
super().__init__(args, tokenizer, train_task_list, val_task_list, test_task_list)
def _init_dataloader_of_split(self, task_name, split):
logger.info(f'***** Loading data for {split} *****')
if task_name.lower() not in glue_processors:
raise ValueError("Task not found: %s" % (task_name))
processor = glue_processors[task_name.lower()]()
# output_mode = glue_output_modes[task_name.lower()]
# Each batch is actually a pair of tasks with the same size.
batch_size = len(processor.get_labels())
data_dir = os.path.join(self.args.data_dir, task_name)
dataset = load_and_cache_examples(self.args, data_dir, task_name.lower(),
self.tokenizer, split)
dataloader = DataLoader(dataset,
sampler=ClassBalancedRandomSampler(dataset.tensors[-1].tolist(), strict_balance=True),
batch_size=batch_size,
drop_last=True)
return dataloader, len(dataset)
def init_dataloader(self):
self.train_episode_gen_dict = {}
self._train_task_id_dict = {}
for i, task in enumerate(self.split_task_list['train']):
self._train_task_id_dict[task] = i
self._val_task_id_dict = {}
for i, task in enumerate(self.split_task_list['val']):
self._val_task_id_dict[task] = i
# label feature generation
logger.info("Generating task label features")
self.task_label_features = {}
all_tasks = set(itertools.chain.from_iterable(self.split_task_list.values()))
for task_name in set(all_tasks):
self.task_label_features[task_name] = self.features_to_tensors([
self.text_to_features(' # '.join(TASK_TEXT_LABELS[task_name]), -1)
])
self.split_dataloader = {}
train_task_size_list = []
for split, task_list in self.split_task_list.items():
logger.info(f'***** Loading split: {split} *****')
dataloader_dict = {}
total_episode = 0
for task_name in task_list:
dataloader_dict[task_name], task_size = self._init_dataloader_of_split(task_name, split)
total_episode += len(dataloader_dict[task_name])
if split == 'train':
self.train_episode_gen_dict[task_name] = self._episode_generator(dataloader_dict[task_name])
train_task_size_list.append(task_size)
setattr(self, f'{split}_num_episode_per_epoch', total_episode)
self.split_dataloader[split] = dataloader_dict
if split == 'train':
# Sample dataset according sqrt of data size.
train_task_size_list = np.array(train_task_size_list)
train_task_size_list = np.sqrt(train_task_size_list)
train_task_size_list = train_task_size_list / np.sum(train_task_size_list)
self._train_task_gen = self._task_generator(task_list,
sample_weights=train_task_size_list.tolist())
def get_train_episode_different_task_on_each_device(self, num_episodes_per_device, device_list, min_shots, max_shots, num_per_task=2):
""" Get data of one episode. """
task_index = 0
device_batch_dict = {}
for device in device_list:
episode_list = []
saved_task = [] # Make sure the tasks on one device are distinct with each other.
for _ in range(num_episodes_per_device):
# Make sure not sampling the same task.
while True:
task_name = next(self._train_task_gen)
if task_name not in saved_task:
saved_task.append(task_name)
break
episode_cur_task = []
for _ in range(num_per_task):
num_shots = np.random.randint(min_shots, max_shots+1)
batch_list = []
for _ in range(num_shots):
# Each batch is a one shot task.
batch_list.append(next(self.train_episode_gen_dict[task_name]))
batch = self._merge_batches(batch_list)
episode = self._prepare_episode(batch,
task_id=self._train_task_id_dict[task_name],
label_features=self.task_label_features[task_name],
text_labels=TASK_TEXT_LABELS[task_name],
device=device)
episode_cur_task.append(episode)
task_index += 1
episode_list.append(episode_cur_task)
device_batch_dict[get_device_name(device)] = episode_list
return MetaBatch(device_batch_dict)
def _merge_batches(self, batches):
return [torch.cat([b[i] for b in batches], dim=0) for i in range(len(batches[0]))]
```
#### File: jixuan-wang/Grad2Task/modeling_bert.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import math
import numbers
import os
import sys
import torch
from torch import embedding, nn
from torch.nn import CrossEntropyLoss, MSELoss
import torch.nn.functional as F
from transformers.modeling_utils import PreTrainedModel, prune_linear_layer
from transformers.configuration_bert import BertConfig
from transformers.file_utils import add_start_docstrings
from transformers.modeling_bert import (BERT_PRETRAINED_MODEL_ARCHIVE_MAP,
load_tf_weights_in_bert, ACT2FN, BertLayerNorm)
logger = logging.getLogger('Modeling BERT')
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.layer_norm_n_shape = (config.hidden_size, )
self.layer_norm_eps=config.layer_norm_eps
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, params, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, params_prefix='embeddings'):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand(input_shape)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if inputs_embeds is None:
inputs_embeds = F.embedding(input_ids,
params[f'{params_prefix}_word_embeddings_weight'],
self.word_embeddings.padding_idx, self.word_embeddings.max_norm,
self.word_embeddings.norm_type, self.word_embeddings.scale_grad_by_freq,
self.word_embeddings.sparse)
position_embeddings = F.embedding(position_ids,
params[f'{params_prefix}_position_embeddings_weight'],
self.position_embeddings.padding_idx, self.position_embeddings.max_norm,
self.position_embeddings.norm_type, self.position_embeddings.scale_grad_by_freq,
self.position_embeddings.sparse)
token_type_embeddings = F.embedding(token_type_ids,
params[f'{params_prefix}_token_type_embeddings_weight'],
self.token_type_embeddings.padding_idx, self.token_type_embeddings.max_norm,
self.token_type_embeddings.norm_type, self.token_type_embeddings.scale_grad_by_freq,
self.token_type_embeddings.sparse)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
# embeddings = self.LayerNorm(embeddings)
embeddings = F.layer_norm(
embeddings, self.layer_norm_n_shape,
params[f'{params_prefix}_LayerNorm_weight'],
params[f'{params_prefix}_LayerNorm_bias'],
self.layer_norm_eps)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.output_attentions = config.output_attentions
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, params, params_prefix, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None):
mixed_query_layer = F.linear(hidden_states,
params[f'{params_prefix}_query_weight'],
params[f'{params_prefix}_query_bias'])
if encoder_hidden_states is not None:
mixed_key_layer = F.linear(encoder_hidden_states,
params[f'{params_prefix}_key_weight'],
params[f'{params_prefix}_key_bias'])
mixed_value_layer = F.linear(encoder_hidden_states,
params[f'{params_prefix}_value_weight'],
params[f'{params_prefix}_value_bias'])
attention_mask = encoder_attention_mask
else:
mixed_key_layer = F.linear(hidden_states,
params[f'{params_prefix}_key_weight'],
params[f'{params_prefix}_key_bias'])
mixed_value_layer = F.linear(hidden_states,
params[f'{params_prefix}_value_weight'],
params[f'{params_prefix}_value_bias'])
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,)
return outputs
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.layer_norm_n_shape = (config.hidden_size, )
self.layer_norm_eps=config.layer_norm_eps
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, params, params_prefix, hidden_states, input_tensor):
hidden_states = F.linear(hidden_states,
params[f'{params_prefix}_dense_weight'],
params[f'{params_prefix}_dense_bias'])
hidden_states = self.dropout(hidden_states)
hidden_states = F.layer_norm(hidden_states + input_tensor,
self.layer_norm_n_shape,
params[f'{params_prefix}_LayerNorm_weight'],
params[f'{params_prefix}_LayerNorm_bias'],
self.layer_norm_eps)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
self.pruned_heads = set()
def forward(self,params, params_prefix, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None):
self_outputs = self.self(params, f'{params_prefix}_self', hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask)
attention_output = self.output(params, f'{params_prefix}_output', self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, params, params_prefix, hidden_states):
hidden_states = F.linear(hidden_states,
params[f'{params_prefix}_dense_weight'],
params[f'{params_prefix}_dense_bias'])
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.layer_norm_n_shape = (config.hidden_size, )
self.layer_norm_eps=config.layer_norm_eps
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, params, params_prefix, hidden_states, input_tensor):
hidden_states = F.linear(hidden_states,
params[f'{params_prefix}_dense_weight'],
params[f'{params_prefix}_dense_bias'])
hidden_states = self.dropout(hidden_states)
hidden_states = F.layer_norm(hidden_states + input_tensor,
self.layer_norm_n_shape,
params[f'{params_prefix}_LayerNorm_weight'],
params[f'{params_prefix}_LayerNorm_bias'],
self.layer_norm_eps)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config, params_prefix=''):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.is_decoder = config.is_decoder
if self.is_decoder:
self.crossattention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
self.params_prefix = params_prefix
def forward(self, params, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None):
self_attention_outputs = self.attention(params, f'{self.params_prefix}_attention', hidden_states, attention_mask, head_mask)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
if self.is_decoder and encoder_hidden_states is not None:
cross_attention_outputs = self.crossattention(attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:] # add cross attentions if we output attention weights
intermediate_output = self.intermediate(params, f'{self.params_prefix}_intermediate', attention_output)
layer_output = self.output(params, f'{self.params_prefix}_output', intermediate_output, attention_output)
outputs = (layer_output,) + outputs
return outputs
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.layer = nn.ModuleList([BertLayer(config, f'encoder_layer_{i}') for i in range(config.num_hidden_layers)])
def forward(self, params, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None):
all_hidden_states = ()
all_attentions = ()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(params, hidden_states, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask)
hidden_states = layer_outputs[0]
if self.output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
return outputs # last-layer hidden state, (all hidden states), (all attentions)
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.activation = nn.Tanh()
def forward(self, params, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = F.linear(first_token_tensor,
params['pooler_dense_weight'],
params['pooler_dense_bias'])
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
config_class = BertConfig
pretrained_model_archive_map = BERT_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "bert"
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
class BertModel(BertPreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the output of the last layer of the model.
**pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)``
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during Bert pretraining. This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
def __init__(self, config):
super(BertModel, self).__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.dummy_param = nn.Parameter(torch.ones(1))
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None,
head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None,
params=None):
""" Forward pass on the Model.
The model can behave as an encoder (with only self-attention) as well
as a decoder, in which case a layer of cross-attention is added between
the self-attention layers, following the architecture described in `Attention is all you need`_ by <NAME>,
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME>.
To behave as an decoder the model needs to be initialized with the
`is_decoder` argument of the configuration set to `True`; an
`encoder_hidden_states` is expected as an input to the forward pass.
.. _`Attention is all you need`:
https://arxiv.org/abs/1706.03762
"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if attention_mask.dim() == 2:
if self.config.is_decoder:
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length, device=device)
causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
else:
extended_attention_mask = attention_mask[:, None, None, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer
head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.num_hidden_layers
if params is None:
raise ValueError('Parameters should be specified in forward() for this class.')
else:
embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids,
token_type_ids=token_type_ids, inputs_embeds=inputs_embeds,
params=params)
encoder_outputs = self.encoder(params, embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(params, sequence_output)
outputs = (sequence_output, pooled_output,) + encoder_outputs[1:] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
``` |
{
"source": "jixu-invn/veterinary_clinic_database",
"score": 3
} |
#### File: veterinary_clinic_database/appli_python/affichage_tables.py
```python
import psycopg2 as psy
from prettytable import PrettyTable
from datetime import date
def printClients(connexion):
t = PrettyTable(['id','Nom','Prenom','Naissance','Adresse', 'Tel'])
resultat = connexion.cursor()
resultat.execute("SELECT * FROM Clients")
for ligne in resultat.fetchall():
t.add_row(ligne)
print(t)
def printEspeces(connexion):
t = PrettyTable(["Nom de l'espece",'Classe'])
resultat = connexion.cursor()
resultat.execute("SELECT * FROM Espece")
for ligne in resultat.fetchall():
t.add_row(ligne)
print(t)
def printAnimaux(connexion):
t = PrettyTable(['ID Animal','Nom','Poids', 'Taille', 'Année de naissance','Espèce', 'ID Propriétaire'])
resultat = connexion.cursor()
resultat.execute("SELECT id, nom, dernier_poids, derniere_taille, annee_naissance, espece,proprietaire FROM Animaux")
for ligne in resultat.fetchall():
t.add_row(ligne)
print(t)
def printMedocPrescrits(connexion):
t = PrettyTable(['ID Animal','ID proprietaire','Date de début','Date de fin de traitement','Medicament prescrit', 'Quantité journalière'])
resultat = connexion.cursor()
resultat.execute("SELECT a.id, a.proprietaire, t->>'debut', t->>'fin', m->>'molecule', m->>'qte' \
FROM animaux a, JSON_ARRAY_ELEMENTS(a.traitements) t, JSON_ARRAY_ELEMENTS(t->'medicaments') m")
for ligne in resultat.fetchall():
t.add_row(ligne)
print(t)
def printStatMoyennesEspeces(connexion):
print("Statistique : Caractéristiques moyennes de chaque espèce")
t = PrettyTable(['Espece','Poids moyen','Taille moyenne'])
resultat = connexion.cursor()
resultat.execute("SELECT A.espece, AVG(A.dernier_poids) PoidsMoyen,AVG(A.derniere_taille) TailleMoyenne \
FROM Animaux A \
GROUP BY A.espece")
for ligne in resultat.fetchall():
t.add_row(ligne)
print(t)
def printStatGlobalesMedoc(connexion):
print("Statistique : Quantité totale prescrite de chaque médicament")
t = PrettyTable(['Molecule','Quantité totale prescrite'])
resultat = connexion.cursor()
resultat.execute("SELECT m->>'molecule', SUM(CAST(m->>'qte' AS INTEGER)*(TO_DATE(t->>'fin','YYYY-MM-DD')-TO_DATE(t->>'debut','YYYY-MM-DD')+1)) \
FROM animaux a, JSON_ARRAY_ELEMENTS(a.traitements) t, JSON_ARRAY_ELEMENTS(t->'medicaments') m\
GROUP BY m->>'molecule'")
for ligne in resultat.fetchall():
t.add_row(ligne)
print(t)
def printStatQteMedocParAnimal(connexion):
print("Statistique : Quantité de chaque médicament prescrite à chaque animal")
t = PrettyTable(['ID Animal','Molecule','Quantité totale prescrite'])
resultat = connexion.cursor()
resultat.execute("SELECT a.id, m->>'molecule', SUM(CAST(m->>'qte' AS INTEGER)*(TO_DATE(t->>'fin','YYYY-MM-DD')-TO_DATE(t->>'debut','YYYY-MM-DD')+1)) \
FROM animaux a, JSON_ARRAY_ELEMENTS(a.traitements) t, JSON_ARRAY_ELEMENTS(t->'medicaments') m\
GROUP BY a.id, m->>'molecule'")
for ligne in resultat.fetchall():
t.add_row(ligne)
print(t)
def printStatBonus(connexion):
print("Statistique bonus: Nombre de vétérinaires consultés par chaque client")
t = PrettyTable(['Nom','Prenom','Naissance','Nombre de vétérinaires consultés'])
resultat = connexion.cursor()
resultat.execute("SELECT C.nom, C.prenom, C.naissance, COUNT(DISTINCT t->>'veto') AS Nb_veto_consultes \
FROM Animaux A JOIN Clients C ON A.proprietaire = C.id, JSON_ARRAY_ELEMENTS(a.traitements) t \
GROUP BY C.nom, C.prenom, C.naissance")
for ligne in resultat.fetchall():
t.add_row(ligne)
print(t)
if __name__ == '__main__':
conn = psy.connect("dbname = 'dbbdd0p050' user='bdd0p050' host='tuxa.sme.utc' password='<PASSWORD>'")
print("OK")
printClients(conn)
printAnimaux(conn)
printMedocPrescrits(conn)
printStatBonus(conn)
```
#### File: veterinary_clinic_database/appli_python/creationTraitementDict.py
```python
from datetime import datetime
def creationVetoJSON() :
nom = str(input("Nom du vétérinaire : "))
prenom = str(input("Prenom du vétérinaire : "))
return({"nom":nom, "pnom":prenom})
def creationMedicamentJSON() :
molecule = str(input("Nom du médicament : "))
while True :
try :
qte = int(input("Quantité journalière du médicament : "))
break
except :
print("Le quantité doit être un entier!")
return({"molecule" : molecule, "qte" : qte})
def creerDate() :
while True :
try :
dateInput = str(input("Date (YYYY-MM-DD) : "))
date = datetime.strptime(dateInput, '%Y-%m-%d')
break
except :
print("Entrez une date correcte, sous le format indiqué")
date = date.date().strftime('%Y-%m-%d')
return date
def creationTraitementJSON() :
print("Entrez une date de debut de traitement. ", end=""); dateDebut = creerDate()
print("Entrez une date de fin de traitement. ", end=""); dateFin = creerDate()
while datetime.strptime(dateFin, '%Y-%m-%d') < datetime.strptime(dateDebut, '%Y-%m-%d') :
print("La date de fin doit être plus ancienne que la date de début")
dateFin = creerDate()
veto = creationVetoJSON()
while True :
try :
nbMedicaments = int(input("Combien de médicaments sont inclus dans le traitement? : "))
break
except :
print("Entrez un nombre entier")
listeMedocs = []
for i in range(0, nbMedicaments, 1) :
medicament = creationMedicamentJSON()
listeMedocs.append(medicament)
traitement = {"debut" : dateDebut,
"fin" : dateFin,
"veto" : veto,
"medicaments" : listeMedocs}
return(traitement)
if __name__ == "__main__" :
pass
#print(creerDate())
#print(creationVetoJSON())
#print(creationMedicamentJSON())
#print(creationTraitementJSON())
```
#### File: veterinary_clinic_database/appli_python/main.py
```python
import psycopg2 as psy
import affichage_tables as aff
import addTraitement as trait
import json
# ajout d'un animal
def InsertionAnimal(connexion):
print("Vous avez choisi d'inserer un animal")
id = input('Id: ')
while not id.isdigit():
id = input('Input invalide, veuillez entrer de nouveau:')
nom = input('Nom d\'animal:')
der_poids = input("Son dernier poids:")
while (der_poids != '' and float(der_poids) < 0):
der_poids = input("Le poids ne doit pas etre negative, veillez entrer de nouveau: \n")
der_taille = input("Son dernier taille: ")
while (der_taille != '' and float(der_taille) < 0):
der_taille = input("La taille ne doit pas etre negative, veillez entrer de nouveau: \n")
annee = input("Son annee de naissance: ")
while (annee != '' and ((int(annee) < 1800) or (int(annee) > 2019))):
annee = input("L\'annee de naissance est invalide, veuillez entrer de nouveau:\n")
propri = input("Id du proprietaire: ")
while not propri.isdigit():
propri = input('Input invalide, veuillez entrer de nouveau: \n')
espece = input("Son espece: ")
sql = "INSERT INTO Animaux VALUES("
sql = sql + id + ",'" + nom + "',"
sql = sql+ der_poids + "," + der_taille + ","
sql = sql + annee + ",NULL,'"
sql = sql + propri + "','" + espece + "');"
print(sql)
connexion.cursor().execute(sql)
# ajout d'un client
def insertionClient(connexion):
print("Vous avez choisi d'inserer un client")
sql = "INSERT INTO Clients VALUES("
id = input("ID: ")
sql = sql+id+",'"
nom = input("Nom: ")
sql = sql+nom+"','"
prenom = input("Prenom: ")
sql = sql+prenom+"','"
naissance = input("Date de naissance: (YYYY-MM-DD)")
sql = sql+naissance+"','"
adresse = input("Adresse: ")
sql = sql+adresse+"','"
telephone = input("Telephone(10 chiffres sans espace): ")
sql = sql+telephone+"')"
connexion.cursor().execute(sql)
#menu principal
def menu_p(connexion) :
print("\n\n+++++++++++++\nBienvenue dans le menu\n+++++++++++++")
print("Vos choix")
print(" 1 : Voir le contenu de toutes les tables ")
print(" 2 : Ajouter des enregistrements dans la base de données")
print(" 3 : Voir le résultat des requetes statistiques")
print(" 0 pour quitter\n+++++++++++++")
choix = False
while not choix:
choix = input("Entrer le nombre correspondant au choix : ")
if(choix != "0" and choix != "1" and choix != "2" and choix != "3" and choix != "4"):
choix = False
return choix
#menu secondaire
def menu_s(connexion, c):
if(c == "1"):
menu_1(connexion)
if(c == "2"):
menu_2(connexion)
if(c == "3"):
menu_3(connexion)
#menu 1 Voir le contenu de toutes les tables
def menu_1(connexion):
print("\n\n+++++++++++++\nQuelle table voulez vous voir?")
print("Tables disponibles:")
print("1 : Clients")
print("2 : Especes Animales")
print("3 : Animaux")
print("4 : Medicaments prescrits")
print("0 pour quitter\n+++++++++++++")
choix = False
while not choix:
choix = input("Entrer le nombre correspondant au choix : ")
if( choix != "0" and choix != "1" and choix != "2" and choix != "3" and choix != "4"):
choix = False
if(choix == "1"):
print("Affichage de la table: Clients")
aff.printClients(connexion)
if(choix == "2"):
print("Affichage de la table: Especes animales")
aff.printEspeces(connexion)
if(choix == "3"):
print("Affichage de la table: Animaux")
aff.printAnimaux(connexion)
if(choix == "4"):
print("Affichage de la table: Medicaments prescrits")
aff.printMedocPrescrits(connexion)
#menu 2 : Ajouter des enregistrements dans la base de données
def menu_2(connexion):
print("\n\n+++++++++++++\nQue voulez vous ajouter?")
print("1 : Ajouter des Clients")
print("2 : Ajouter des animaux")
print("3 : Ajouter des traitements")
print("0 pour quitter\n+++++++++++++")
choix = False
while not choix:
choix = input("Entrer le nombre correspondant au choix : ")
if( choix != "0" and choix != "1" and choix != "2" and choix != "3"):
choix = False
if(choix == "1"):
print("Affichage de la table: Clients")
aff.printClients(connexion)
print("\n")
insertionClient(connexion)
if(choix == "2"):
print("Affichage de la table: Animaux")
aff.printAnimaux(connexion)
print("\n")
InsertionAnimal(connexion)
if(choix == "3"):
print("Affichage de la table: Animaux")
aff.printAnimaux(connexion)
print("\n")
trait.addTraitement(connexion)
# menu 3 : Voir le résultat des requetes statistiques
def menu_3(connexion):
print("\n\n+++++++++++++\n Quelles statistiques voulez vous afficher?")
print(" 1 : poids et taille moyenne des animaux d'une espèce traitée ")
print(" 2 : quantité d'un médicament prescrit au total dans la clinique")
print(" 3 : quantité de chaque type de médicament prescrit pour un animal donné")
print(" 4 : Statistique bonus: Nombre de vétérinaires consultés par chaque client")
print("0 pour quitter\n+++++++++++++")
choix = False
while not choix:
choix = input("Entrer le nombre correspondant au choix : ")
if( choix != "0" and choix != "1" and choix != "2" and choix != "3" and choix != "4"):
choix = False
if(choix == "1"):
aff.printStatMoyennesEspeces(connexion)
if(choix == "2"):
aff.printStatGlobalesMedoc(connexion)
if(choix == "3"):
aff.printStatQteMedocParAnimal(connexion)
if(choix == "4"):
aff.printStatBonus(connexion)
if __name__ == "__main__":
conn = psy.connect("dbname = 'dbbdd0p050' user='bdd0p050' host='tuxa.sme.utc' password='<PASSWORD>'")
while True :
c = menu_p(conn)
print("Vous avez selectionne le choix " + c + "\n")
if(c == "0"):
break
menu_s(conn,c)
conn.commit()
print("+++++++++++++\nFermeture de la clinique\n+++++++++++++")
``` |
{
"source": "JixunMoe/ContraNES1TranslationPatch",
"score": 3
} |
#### File: p65-py3/Ophis/Passes.py
```python
from __future__ import nested_scopes
import Ophis.Errors as Err
import Ophis.IR as IR
import Ophis.Opcodes as Ops
import Ophis.CmdLine as Cmd
import Ophis.Macro as Macro
# The passes themselves
class Pass:
"""Superclass for all assembler passes. Automatically handles IR
types that modify the environent's structure, and by default
raises an error on anything else. Override visitUnknown in your
extension pass to produce a pass that accepts everything."""
name = "Default Pass"
def __init__(self):
self.writeOK = 1
def visitNone(self, node, env):
pass
def visitSEQUENCE(self, node, env):
Err.currentpoint = node.ppt
for n in node.data:
n.accept(self, env)
def visitDataSegment(self, node, env):
self.writeOK = 0
env.setsegment(node.data[0])
def visitTextSegment(self, node, env):
self.writeOK = 1
env.setsegment(node.data[0])
def visitScopeBegin(self, node, env):
env.newscope()
def visitScopeEnd(self, node, env):
env.endscope()
def visitUnknown(self, node, env):
Err.log("Internal error! "+self.name+" cannot understand node type "+node.nodetype)
def prePass(self):
pass
def postPass(self):
pass
def go(self, node, env):
"""Prepares the environment and runs this pass, possibly
printing debugging information."""
if Err.count == 0:
if Cmd.verbose > 1: print("Running: "+self.name)
env.reset()
self.prePass()
node.accept(self, env)
self.postPass()
env.reset()
if Cmd.verbose > 3:
print("Current labels:")
print(env)
if Cmd.verbose > 2:
print("Current IR:")
print(node)
class FixPoint:
"""A specialized class that is not a pass but can be run like one.
This class takes a list of passes and a "fixpoint" function."""
def __init__(self, name, passes, fixpoint):
self.name = name
self.passes = passes
self.fixpoint = fixpoint
def go(self, node, env):
"""Runs this FixPoint's passes, in order, until the fixpoint
is true. Always runs the passes at least once."""
for i in range(100):
if Err.count != 0: break
for p in self.passes:
p.go(node, env)
if Err.count != 0: break
if self.fixpoint(): break
if Cmd.verbose > 1: print("Fixpoint failed, looping back")
else:
Err.log("Can't make %s converge! Maybe there's a recursive dependency somewhere?" % self.name)
class DefineMacros(Pass):
"Extract macro definitions and remove them from the IR"
name = "Macro definition pass"
def prePass(self):
self.inDef = 0
self.nestedError = 0
def postPass(self):
if self.inDef:
Err.log("Unmatched .macro")
elif Cmd.verbose > 2:
print("Macro definitions:")
Macro.dump()
def visitMacroBegin(self, node, env):
if self.inDef:
Err.log("Nested macro definition")
self.nestedError = 1
else:
Macro.newMacro(node.data[0])
node.nodetype = "None"
node.data = []
self.inDef = 1
def visitMacroEnd(self, node, env):
if self.inDef:
Macro.endMacro()
node.nodetype = "None"
node.data = []
self.inDef = 0
elif not self.nestedError:
Err.log("Unmatched .macend")
def visitUnknown(self, node, env):
if self.inDef:
Macro.registerNode(node)
node.nodetype = "None"
node.data = []
class ExpandMacros(Pass):
"Replace macro invocations with the appropriate text"
name = "Macro expansion pass"
def prePass(self):
self.changed = 0
def visitMacroInvoke(self, node, env):
replacement = Macro.expandMacro(node.ppt, node.data[0], node.data[1:])
node.nodetype = replacement.nodetype
node.data = replacement.data
self.changed = 1
def visitUnknown(self, node, env):
pass
class InitLabels(Pass):
"Finds all reachable labels"
name = "Label initialization pass"
def __init__(self):
Pass.__init__(self)
self.labelmap = {}
def prePass(self):
self.changed = 0
self.PCvalid = 1
def visitAdvance(self, node, env):
self.PCvalid=node.data[0].valid(env, self.PCvalid)
def visitSetPC(self, node, env):
self.PCvalid=node.data[0].valid(env, self.PCvalid)
def visitLabel(self, node, env):
(label, val) = node.data
fulllabel = "%d:%s" % (env.stack[0], label)
if fulllabel in self.labelmap and self.labelmap[fulllabel] is not node:
Err.log("Duplicate label definition '%s'" % label)
if fulllabel not in self.labelmap:
self.labelmap[fulllabel] = node
if val.valid(env, self.PCvalid) and label not in env:
env[label]=0
self.changed=1
def visitUnknown(self, node, env):
pass
class CircularityCheck(Pass):
"Checks for circular label dependencies"
name = "Circularity check pass"
def prePass(self):
self.changed=0
self.PCvalid=1
def visitAdvance(self, node, env):
PCvalid = self.PCvalid
self.PCvalid=node.data[0].valid(env, self.PCvalid)
if not node.data[0].valid(env, PCvalid):
Err.log("Undefined or circular reference on .advance")
def visitSetPC(self, node, env):
PCvalid = self.PCvalid
self.PCvalid=node.data[0].valid(env, self.PCvalid)
if not node.data[0].valid(env, PCvalid):
Err.log("Undefined or circular reference on program counter set")
def visitCheckPC(self, node, env):
if not node.data[0].valid(env, self.PCvalid):
Err.log("Undefined or circular reference on program counter check")
def visitLabel(self, node, env):
(label, val) = node.data
if not val.valid(env, self.PCvalid):
Err.log("Undefined or circular dependency for label '%s'" % label)
def visitUnknown(self, node, env):
pass
class CheckExprs(Pass):
"Ensures all expressions can resolve"
name = "Expression checking pass"
def visitUnknown(self, node, env):
for i in [x for x in node.data if isinstance(x, IR.Expr)]:
i.value(env) # Throw away result, just confirm validity of all expressions
class EasyModes(Pass):
"Assigns address modes to hardcoded and branch instructions"
name = "Easy addressing modes pass"
def visitMemory(self, node, env):
if Ops.opcodes[node.data[0]][11] is not None:
node.nodetype = "Relative"
return
if node.data[1].hardcoded:
if not collapse_no_index(node, env):
node.nodetype = "Absolute"
def visitMemoryX(self, node, env):
if node.data[1].hardcoded:
if not collapse_x(node, env):
node.nodetype = "AbsoluteX"
def visitMemoryY(self, node, env):
if node.data[1].hardcoded:
if not collapse_y(node, env):
node.nodetype = "AbsoluteY"
def visitUnknown(self, node, env):
pass
class UpdateLabels(Pass):
"Computes the new values for all entries in the symbol table"
name = "Label Update Pass"
def prePass(self):
self.changed = 0
def visitSetPC(self, node, env): env.setPC(node.data[0].value(env))
def visitAdvance(self, node, env): env.setPC(node.data[0].value(env))
def visitImplied(self, node, env): env.incPC(1)
def visitImmediate(self, node, env): env.incPC(2)
def visitIndirectX(self, node, env): env.incPC(2)
def visitIndirectY(self, node, env): env.incPC(2)
def visitZeroPage(self, node, env): env.incPC(2)
def visitZeroPageX(self, node, env): env.incPC(2)
def visitZeroPageY(self, node, env): env.incPC(2)
def visitRelative(self, node, env): env.incPC(2)
def visitIndirect(self, node, env): env.incPC(3)
def visitAbsolute(self, node, env): env.incPC(3)
def visitAbsoluteX(self, node, env): env.incPC(3)
def visitAbsoluteY(self, node, env): env.incPC(3)
def visitMemory(self, node, env): env.incPC(3)
def visitMemoryX(self, node, env): env.incPC(3)
def visitMemoryY(self, node, env): env.incPC(3)
def visitCheckPC(self, node, env): pass
def visitLabel(self, node, env):
(label, val) = node.data
old = env[label]
env[label] = val.value(env)
if old != env[label]:
self.changed = 1
def visitByte(self, node, env): env.incPC(len(node.data))
def visitWord(self, node, env): env.incPC(len(node.data)*2)
def visitDword(self, node, env): env.incPC(len(node.data)*4)
def visitWordBE(self, node, env): env.incPC(len(node.data)*2)
def visitDwordBE(self, node, env): env.incPC(len(node.data)*4)
class Collapse(Pass):
"""Selects as many zero-page instructions to convert as
possible, and tracks how many instructions have been
converted this pass."""
name = "Instruction Collapse Pass"
def prePass(self):
self.collapsed = 0
def visitMemory(self, node, env):
if collapse_no_index(node, env): self.collapsed += 1
def visitMemoryX(self, node, env):
if collapse_x(node, env): self.collapsed += 1
def visitMemoryY(self, node, env):
if collapse_y(node, env): self.collapsed += 1
def visitUnknown(self, node, env):
pass
def collapse_no_index(node, env):
"""Transforms a Memory node into a ZeroPage one if possible.
Returns 1 if it made the collapse, false otherwise."""
if node.data[1].value(env) < 0x100 and Ops.opcodes[node.data[0]][2] is not None:
node.nodetype = "ZeroPage"
return 1
else:
return 0
def collapse_x(node, env):
"""Transforms a MemoryX node into a ZeroPageX one if possible.
Returns 1 if it made the collapse, false otherwise."""
if node.data[1].value(env) < 0x100 and Ops.opcodes[node.data[0]][3] is not None:
node.nodetype = "ZeroPageX"
return 1
else:
return 0
def collapse_y(node, env):
"""Transforms a MemoryY node into a ZeroPageY one if possible.
Returns 1 if it made the collapse, false otherwise."""
if node.data[1].value(env) < 0x100 and Ops.opcodes[node.data[0]][4] is not None:
node.nodetype = "ZeroPageY"
return 1
else:
return 0
class NormalizeModes(Pass):
"""Eliminates the intermediate "Memory" nodes, converting them
to "Absolute"."""
name = "Mode Normalization pass"
def visitMemory(self, node, env): node.nodetype = "Absolute"
def visitMemoryX(self, node, env): node.nodetype = "AbsoluteX"
def visitMemoryY(self, node, env): node.nodetype = "AbsoluteY"
def visitUnknown(self, node, env): pass
class Assembler(Pass):
"""Converts the IR into a list of bytes, suitable for writing to
a file."""
name = "Assembler"
def prePass(self):
self.output = []
self.code = 0
self.data = 0
self.filler = 0
def postPass(self):
if Cmd.verbose > 0 and Err.count == 0:
print("Assembly complete: %s bytes output (%s code, %s data, %s filler)" \
% (len(self.output), self.code, self.data, self.filler))
def outputbyte(self, expr, env):
'Outputs a byte, with range checking'
if self.writeOK:
val = expr.value(env)
if val < 0x00 or val > 0xff:
Err.log("Byte constant "+str(expr)+" out of range")
val = 0
self.output.append(int(val))
else:
Err.log("Attempt to write to data segment")
def outputword(self, expr, env):
'Outputs a little-endian word, with range checking'
if self.writeOK:
val = expr.value(env)
if val < 0x0000 or val > 0xFFFF:
Err.log("Word constant "+str(expr)+" out of range")
val = 0
self.output.append(int(val & 0xFF))
self.output.append(int((val >> 8) & 0xFF))
else:
Err.log("Attempt to write to data segment")
def outputdword(self, expr, env):
'Outputs a little-endian dword, with range checking'
if self.writeOK:
val = expr.value(env)
if val < 0x00000000 or val > 0xFFFFFFFF:
Err.log("DWord constant "+str(expr)+" out of range")
val = 0
self.output.append(int(val & 0xFF))
self.output.append(int((val >> 8) & 0xFF))
self.output.append(int((val >> 16) & 0xFF))
self.output.append(int((val >> 24) & 0xFF))
else:
Err.log("Attempt to write to data segment")
def outputword_be(self, expr, env):
'Outputs a big-endian word, with range checking'
if self.writeOK:
val = expr.value(env)
if val < 0x0000 or val > 0xFFFF:
Err.log("Word constant "+str(expr)+" out of range")
val = 0
self.output.append(int((val >> 8) & 0xFF))
self.output.append(int(val & 0xFF))
else:
Err.log("Attempt to write to data segment")
def outputdword_be(self, expr, env):
'Outputs a big-endian dword, with range checking'
if self.writeOK:
val = expr.value(env)
if val < 0x00000000 or val > 0xFFFFFFFF:
Err.log("DWord constant "+str(expr)+" out of range")
val = 0
self.output.append(int((val >> 24) & 0xFF))
self.output.append(int((val >> 16) & 0xFF))
self.output.append(int((val >> 8) & 0xFF))
self.output.append(int(val & 0xFF))
else:
Err.log("Attempt to write to data segment")
def assemble(self, node, mode, env):
"A generic instruction called by the visitor methods themselves"
(opcode, expr) = node.data
bin_op = Ops.opcodes[opcode][mode]
if bin_op is None:
Err.log('%s does not have mode "%s"' % (opcode.upper(), Ops.modes[mode]))
return
self.outputbyte(IR.ConstantExpr(bin_op), env)
arglen = Ops.lengths[mode]
if mode == 11: # Special handling for relative mode
arg = expr.value(env)
arg = arg-(env.getPC()+2)
if arg < -128 or arg > 127:
Err.log("Branch target out of bounds")
arg = 0
if arg < 0: arg += 256
expr = IR.ConstantExpr(arg)
if arglen == 1: self.outputbyte(expr, env)
if arglen == 2: self.outputword(expr, env)
env.incPC(1+arglen)
self.code += 1+arglen
def visitImplied(self, node, env): self.assemble(node, 0, env)
def visitImmediate(self, node, env): self.assemble(node, 1, env)
def visitZeroPage(self, node, env): self.assemble(node, 2, env)
def visitZeroPageX(self, node, env): self.assemble(node, 3, env)
def visitZeroPageY(self, node, env): self.assemble(node, 4, env)
def visitAbsolute(self, node, env): self.assemble(node, 5, env)
def visitAbsoluteX(self, node, env): self.assemble(node, 6, env)
def visitAbsoluteY(self, node, env): self.assemble(node, 7, env)
def visitIndirect(self, node, env): self.assemble(node, 8, env)
def visitIndirectX(self, node, env): self.assemble(node, 9, env)
def visitIndirectY(self, node, env): self.assemble(node, 10, env)
def visitRelative(self, node, env): self.assemble(node, 11, env)
def visitLabel(self, node, env): pass
def visitByte(self, node, env):
for expr in node.data:
self.outputbyte(expr, env)
env.incPC(len(node.data))
self.data += len(node.data)
def visitWord(self, node, env):
for expr in node.data:
self.outputword(expr, env)
env.incPC(len(node.data)*2)
self.data += len(node.data)*2
def visitDword(self, node, env):
for expr in node.data:
self.outputdword(expr, env)
env.incPC(len(node.data)*4)
self.data += len(node.data)*4
def visitWordBE(self, node, env):
for expr in node.data:
self.outputword_be(expr, env)
env.incPC(len(node.data)*2)
self.data += len(node.data)*2
def visitDwordBE(self, node, env):
for expr in node.data:
self.outputdword_be(expr, env)
env.incPC(len(node.data)*4)
self.data += len(node.data)*4
def visitSetPC(self, node, env):
env.setPC(node.data[0].value(env))
def visitCheckPC(self, node, env):
pc = env.getPC()
target = node.data[0].value(env)
if (pc > target):
Err.log(".checkpc assertion failed: $%x > $%x" % (pc, target))
def visitAdvance(self, node, env):
pc = env.getPC()
target = node.data[0].value(env)
if (pc > target):
Err.log("Attempted to .advance backwards: $%x to $%x" % (pc, target))
else:
zero = IR.ConstantExpr(0)
for i in range(target-pc): self.outputbyte(zero, env)
self.filler += target-pc
env.setPC(target)
``` |
{
"source": "JixunMoe/drone-minio-uploader",
"score": 3
} |
#### File: drone-minio-uploader/uploader/upload.py
```python
from os import getenv as env, path
from uploader.args import args
from uploader.const import S3_SERVER, UPLOAD_BUCKET, UPLOAD_DIR, FILE_PREFIX, ACCESS_KEY, SECRET_KEY
from minio import Minio
class Uploader(Minio):
def __init__(self):
super().__init__(
S3_SERVER,
access_key=ACCESS_KEY,
secret_key=SECRET_KEY,
secure=True,
)
def upload(self, file, name):
name = name if name else self.get_renamed(file)
etag = self.fput_object(bucket_name=UPLOAD_BUCKET, object_name=f'{UPLOAD_DIR}/{name}', file_path=file)
print(f'upload ok! etag: {etag}')
@staticmethod
def get_renamed(file):
name, ext = path.splitext(file)
if '.tar.' in file:
ext = '.tar' + ext
return FILE_PREFIX + ext
def main():
uploader = Uploader()
uploader.upload(args.file, args.name)
if __name__ == '__main__':
main()
``` |
{
"source": "JixunMoe/netease-cloud-music-api",
"score": 3
} |
#### File: JixunMoe/netease-cloud-music-api/index.py
```python
from flask import *
from Crypto.Cipher import AES
from Crypto.PublicKey import RSA
from Crypto.Hash import SHA256
from random import randint
import binascii, os, json
import yaml, requests
from redis_session import RedisSessionInterface
# Load and parse config file
config = yaml.load(file('config.yaml', 'r'))
encrypt = config['encrypt']
app = Flask(__name__, static_url_path='/static')
app.config['recaptcha'] = config['recaptcha']
app.debug = config['debug']
app.session_interface = RedisSessionInterface(config['redis'])
def aesEncrypt(text, secKey):
pad = 16 - len(text) % 16
text = text + pad * chr(pad)
encryptor = AES.new(secKey, 1)
cipherText = encryptor.encrypt(text)
cipherText = binascii.b2a_hex(cipherText).upper()
return cipherText
def encrypted_request(jsonDict):
jsonStr = json.dumps(jsonDict, separators = (",", ":"))
encText = aesEncrypt(jsonStr, secretKey)
data = {
'eparams': encText,
}
return data
nonce = encrypt['nonce']
n, e = int(encrypt["n"], 16), int(encrypt["e"], 16)
def req_netease(url, payload):
data = encrypted_request(payload)
r = requests.post(url, data = data, headers = headers)
result = json.loads(r.text)
if result['code'] != 200:
return None
return result
def req_netease_detail(songId):
payload = {"method": "POST", "params": {"c": "[{id:%d}]" % songId}, "url": "http://music.163.com/api/v3/song/detail"}
data = req_netease('http://music.163.com/api/linux/forward', payload)
if data is None or data['songs'] is None or len(data['songs']) != 1:
return None
song = data['songs'][0]
return song
def req_netease_url(songId, rate):
payload = {"method": "POST", "params": {"ids": [songId],"br": rate}, "url": "http://music.163.com/api/song/enhance/player/url"}
data = req_netease('http://music.163.com/api/linux/forward', payload)
if data is None or data['data'] is None or len(data['data']) != 1:
return None
song = data['data'][0]
if song['code'] != 200 or song['url'] is None:
return None
# song['url'] = song['url'].replace('http:', '')
return song
def req_recaptcha(response, remote_ip):
r = requests.post('https://www.google.com/recaptcha/api/siteverify', data = {
'secret': config['recaptcha']['secret'],
'response': response,
'remoteip': remote_ip
});
result = json.loads(r.text);
print("req_recaptcha from %s, result: %s" % (remote_ip, r.text))
return result['success']
print("Generating secretKey for current session...")
secretKey = binascii.a2b_hex(encrypt['secret'])
headers = {
'Referer': 'http://music.163.com',
'X-Real-IP': '192.168.127.12',
'Cookie': 'os=linux; appver=1.0.0.1026; osver=Ubuntu%2016.10',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',
}
def sign_request(songId, rate):
h = SHA256.new()
h.update(str(songId))
h.update(str(rate))
h.update(config["sign_salt"])
return h.hexdigest()
def is_verified(session):
if not config['recaptcha']:
return True
return 'verified' in session and session['verified'] > 0
def set_verified(session):
if config['recaptcha']:
session['verified'] = randint(10, 20)
def decrease_verified(session):
if config['recaptcha']:
session['verified'] -= 1;
@app.route("/")
def index():
verified = is_verified(session)
return render_template('index.j2', verified = verified)
@app.route("/backdoor")
def backdoor():
if app.debug:
set_verified(session)
return 'ok!'
@app.route('/s/<path:path>')
def static_route(path):
return app.send_static_file(path)
@app.route("/sign/<int:songId>/<int:rate>", methods=['POST'])
def generate_sign(songId, rate):
if not is_verified(session):
# 首先检查谷歌验证
if 'g-recaptcha-response' not in request.form \
or not req_recaptcha(
request.form['g-recaptcha-response'],
request.headers[config['ip_header']] if config['ip_header'] else request.remote_addr
):
#
return jsonify({"verified": is_verified(session), "errno": 2})
set_verified(session)
# 请求歌曲信息, 然后签个名
decrease_verified(session)
song = req_netease_detail(songId)
if song is None:
return jsonify({"verified": is_verified(session), "errno": 1})
return jsonify({
"verified": True,
"sign": sign_request(songId, rate),
"song": {
"id": song['id'],
"name": song['name'],
"artist": [{"id": a['id'], "name": a['name']} for a in song['ar']]
}
})
@app.route("/<int:songId>/<int:rate>/<sign>")
def get_song_url(songId, rate, sign):
if sign_request(songId, rate) != sign:
return abort(403)
song = req_netease_url(songId, rate)
if song is None:
return abort(404)
response = redirect(song['url'], code=302)
response.headers["max-age"] = song['expi']
return response
if __name__ == "__main__":
print("Running...")
app.run()
``` |
{
"source": "JiyanBlack/synthpop_silo",
"score": 3
} |
#### File: silodemos/csynthpop/zone_synthesizer.py
```python
from functools import partial
import multiprocessing
import pandas as pd
from .synthesizer import synthesize, enable_logging
from . import categorizer as cat
def load_data(hh_marginal_file, person_marginal_file,
hh_sample_file, person_sample_file):
"""
Load and process data inputs from .csv files on disk
Parameters
----------
hh_marginal_file : string
path to a csv file of household marginals
person_marginal_file : string
path to a csv file of person marginals
hh_sample_file : string
path to a csv file of sample household records to be drawn from
person_sample_file : string
path to a csv file of sample person records
Returns
-------
hh_marg : pandas.DataFrame
processed and properly indexed household marginals table
p_marg : pandas.DataFrame
processed and properly indexed person marginals table
hh_sample : pandas.DataFrame
household sample table
p_sample : pandas.DataFrame
person sample table
xwalk : list of tuples
list of marginal-to-sample geography crosswalks to iterate over
"""
print("Custom Synthpop!")
hh_sample = pd.read_csv(hh_sample_file)
p_sample = pd.read_csv(person_sample_file)
hh_marg = pd.read_csv(hh_marginal_file, header=[0, 1], index_col=0)
hh_marg.columns.levels[0].name = 'cat_name'
hh_marg.columns.levels[1].name = 'cat_values'
xwalk = list(zip(hh_marg.index, hh_marg.sample_geog.unstack().values))
hh_marg = hh_marg.drop('sample_geog', axis=1, level=0)
p_marg = pd.read_csv(person_marginal_file, header=[0, 1], index_col=0)
p_marg.columns.levels[0].name = 'cat_name'
p_marg.columns.levels[1].name = 'cat_values'
return hh_marg, p_marg, hh_sample, p_sample, xwalk
def synthesize_all_zones(hh_marg, p_marg, hh_sample, p_sample, xwalk):
"""
Iterate over a geography crosswalk list and synthesize in-line
Parameters
----------
hh_marg : pandas.DataFrame
processed and properly indexed household marginals table
p_marg : pandas.DataFrame
processed and properly indexed person marginals table
hh_sample : pandas.DataFrame
household sample table
p_sample : pandas.DataFrame
person sample table
xwalk : list of tuples
list of marginal-to-sample geography crosswalks to iterate over
Returns
-------
all_households : pandas.DataFrame
synthesized household records
all_persons : pandas.DataFrame
synthesized person records
all_stats : pandas.DataFrame
chi-square and p-score values for each marginal geography drawn
"""
hh_list = []
people_list = []
stats_list = []
hh_index_start = 1
for geogs in xwalk:
households, people, stats = synthesize_zone(hh_marg, p_marg,
hh_sample, p_sample, geogs)
stats_list.append(stats)
hh_list.append(households)
people_list.append(people)
if len(households) > 0:
hh_index_start = households.index.values[-1] + 1
all_households = pd.concat(hh_list)
all_persons = pd.concat(people_list)
all_households, all_persons = synch_hhids(all_households, all_persons)
all_stats = pd.DataFrame(stats_list)
return all_households, all_persons, all_stats
def synch_hhids(households, persons):
"""
Synchronize household ids with corresponding person records
Parameters
----------
households : pandas.DataFrame
full households table with id values sequential by geog
persons : pandas.DataFrame
full persons table with id values sequential by geog
Returns
-------
households : pandas.DataFrame
households table with reindexed sequential household ids
persons : pandas.DataFrame
persons table synchronized with updated household ids
"""
households['hh_id'] = households.index
households['household_id'] = range(1, len(households.index)+1)
persons = pd.merge(
persons, households[['household_id', 'geog', 'hh_id']],
how='left', left_on=['geog', 'hh_id'], right_on=['geog', 'hh_id'],
suffixes=('', '_x')).drop('hh_id', axis=1)
households.set_index('household_id', inplace=True)
households.drop('hh_id', axis=1, inplace=True)
return households, persons
def synthesize_zone(hh_marg, p_marg, hh_sample, p_sample, xwalk):
"""
Synthesize a single zone (Used within multiprocessing synthesis)
Parameters
----------
hh_marg : pandas.DataFrame
processed and properly indexed household marginals table
p_marg : pandas.DataFrame
processed and properly indexed person marginals table
hh_sample : pandas.DataFrame
household sample table
p_sample : pandas.DataFrame
person sample table
xwalk : tuple
tuple of marginal-to-sample geography crosswalk
Returns
-------
households : pandas.DataFrame
synthesized household records
people : pandas.DataFrame
synthesized person records
stats : pandas.DataFrame
chi-square and p-score values for marginal geography drawn
"""
hhs, hh_jd = cat.joint_distribution(
hh_sample[hh_sample.sample_geog == xwalk[1]],
cat.category_combinations(hh_marg.columns))
ps, p_jd = cat.joint_distribution(
p_sample[p_sample.sample_geog == xwalk[1]],
cat.category_combinations(p_marg.columns))
households, people, people_chisq, people_p = synthesize(
hh_marg.loc[xwalk[0]], p_marg.loc[xwalk[0]], hh_jd, p_jd, hhs, ps,
hh_index_start=1)
households['geog'] = xwalk[0]
people['geog'] = xwalk[0]
stats = {'geog': xwalk[0], 'chi-square': people_chisq, 'p-score': people_p}
return households, people, stats
def multiprocess_synthesize(hh_marg, p_marg, hh_sample,
p_sample, xwalk, cores=False):
"""
Synthesize for a set of marginal geographies via multiprocessing
Parameters
----------
hh_marg : pandas.DataFrame
processed and properly indexed household marginals table
p_marg : pandas.DataFrame
processed and properly indexed person marginals table
hh_sample : pandas.DataFrame
household sample table
p_sample : pandas.DataFrame
person sample table
xwalk : list of tuples
list of marginal-to-sample geography crosswalks to iterate over
cores : integer, optional
number of cores to use in the multiprocessing pool. defaults to
multiprocessing.cpu_count() - 1
Returns
-------
all_households : pandas.DataFrame
synthesized household records
all_persons : pandas.DataFrame
synthesized person records
all_stats : pandas.DataFrame
chi-square and p-score values for each marginal geography drawn
"""
cores = cores if cores else (multiprocessing.cpu_count()-1)
part = partial(synthesize_zone, hh_marg, p_marg, hh_sample, p_sample)
p = multiprocessing.Pool(cores)
results = p.map(part, list(xwalk))
p.close()
p.join()
hh_list = [result[0] for result in results]
people_list = [result[1] for result in results]
all_stats = pd.DataFrame([result[2] for result in results])
all_households = pd.concat(hh_list)
all_persons = pd.concat(people_list)
all_households, all_persons = synch_hhids(all_households, all_persons)
return all_persons, all_households, all_stats
``` |
{
"source": "jiyangchen/benchmarks",
"score": 2
} |
#### File: rbe_benchmarks/tools/generate_yml.py
```python
import argparse
import logging
import os
from string import maketrans
import k8s_tensorflow_lib
import yaml
_TEST_NAME_ENV_VAR = 'TF_DIST_BENCHMARK_NAME'
_PORT = 5000
def _ConvertToValidName(name):
"""Converts to name that we can use as a kubernetes job prefix.
Args:
name: benchmark name.
Returns:
Benchmark name that can be used as a kubernetes job prefix.
"""
return name.translate(maketrans('/:_', '---'))
def _GetGpuVolumeMounts(flags):
"""Get volume specs to add to Kubernetes config.
Args:
flags: flags
Returns:
Volume specs in the format: volume_name: (hostPath, podPath).
"""
volume_specs = {}
if flags.nvidia_lib_dir:
volume_specs['nvidia-libraries'] = (flags.nvidia_lib_dir, '/usr/lib/nvidia')
if flags.cuda_lib_dir:
cuda_library_files = ['libcuda.so', 'libcuda.so.1', 'libcudart.so']
for cuda_library_file in cuda_library_files:
lib_name = cuda_library_file.split('.')[0]
volume_specs['cuda-libraries-%s' % lib_name] = (
os.path.join(flags.cuda_lib_dir, cuda_library_file),
os.path.join('/usr/lib/cuda/', cuda_library_file))
return volume_specs
def main():
parser = argparse.ArgumentParser()
parser.register(
'type', 'bool', lambda v: v.lower() in ('true', 't', 'y', 'yes'))
parser.add_argument(
'--benchmark_configs_file', type=str, default=None, required=True,
help='YAML file with benchmark configs.')
parser.add_argument(
'--benchmark_config_output', type=str, default=None, required=True,
help='YAML file to store final config.')
parser.add_argument(
'--docker_image', type=str, default=None, required=True,
help='Docker iage to use on K8S to run test.')
parser.add_argument(
'--cuda_lib_dir', type=str, default=None, required=False,
help='Directory where cuda library files are located on gcloud node.')
parser.add_argument(
'--nvidia_lib_dir', type=str, default=None, required=False,
help='Directory where nvidia library files are located on gcloud node.')
flags, _ = parser.parse_known_args()
logging.basicConfig(level=logging.DEBUG)
config_base_path = os.path.dirname(__file__)
config_text = open(
os.path.join(config_base_path, flags.benchmark_configs_file), 'r').read()
configs = yaml.load(config_text)
# TODO(annarev): run benchmarks in parallel instead of sequentially.
for config in configs:
name = _ConvertToValidName(str(config['benchmark_name']))
env_vars = {
_TEST_NAME_ENV_VAR: name
}
gpu_count = (0 if 'gpus_per_machine' not in config
else config['gpus_per_machine'])
volumes = {}
if gpu_count > 0:
volumes = _GetGpuVolumeMounts(flags)
env_vars['LD_LIBRARY_PATH'] = (
'/usr/lib/cuda:/usr/lib/nvidia:/usr/lib/x86_64-linux-gnu')
env_vars.update(config.get('env_vars', {}))
args = config.get('args', {})
kubernetes_config = k8s_tensorflow_lib.GenerateConfig(
config['worker_count'],
config['ps_count'],
_PORT,
request_load_balancer=False,
docker_image=flags.docker_image,
name_prefix=name,
additional_args=args,
env_vars=env_vars,
volumes=volumes,
use_shared_volume=False,
use_cluster_spec=False,
gpu_limit=gpu_count)
with open(flags.benchmark_config_output, 'w') as output_config_file:
output_config_file.write(kubernetes_config)
if __name__ == '__main__':
main()
``` |
{
"source": "JiYangE/All-IT-eBooks-Spider",
"score": 3
} |
#### File: JiYangE/All-IT-eBooks-Spider/crawler.py
```python
import re
import time
import urllib.request
import conf as cf
BASE_URL = 'http://www.allitebooks.com'
class MyCrawler:
def __init__(self, base_url=cf.BASE_URL, header=cf.FAKE_HEADER, start_page=1):
self.base_url = base_url
self.start_page = start_page
self.headers = header
# 链接代理
def build_proxy(self):
proxy = cf.PROXY
proxy_support = urllib.request.ProxyHandler(proxy)
opener = urllib.request.build_opener(proxy_support)
urllib.request.install_opener(opener)
def fetch_book_name_list(self):
while True:
try:
req = urllib.request.Request(
self.base_url + '/page/{}'.format(self.start_page), headers=self.headers)
html = urllib.request.urlopen(req)
doc = html.read().decode('utf8')
alist = list(set(re.findall(cf.BOOK_LINK_PATTERN, doc)))
print('Now working on page {}\n'.format(self.start_page))
time.sleep(20)
self.start_page += 1
self.fetch_download_link(alist)
except urllib.error.HTTPError as err:
print(err.msg)
break
def fetch_download_link(self, alist):
f = open('result.txt', 'a')
for item in alist:
req = urllib.request.Request(item)
html = urllib.request.urlopen(req)
doc = html.read().decode('utf8')
url = re.findall(cf.DOWNLOAD_LINK_PATTERN, doc)[0]
print('Storing {}'.format(url))
f.write(url + '\n')
time.sleep(7)
f.close()
def run(self):
self.fetch_book_name_list()
if __name__ == '__main__':
mc = MyCrawler()
# mc.build_proxy()
mc.run()
``` |
{
"source": "jiyanggao/CTAP",
"score": 2
} |
#### File: CTAP/TAR/dataset.py
```python
import numpy as np
from math import sqrt
import os
import random
import pickle
def calculate_IoU(i0,i1):
union=(min(i0[0],i1[0]) , max(i0[1],i1[1]))
inter=(max(i0[0],i1[0]) , min(i0[1],i1[1]))
iou=1.0*(inter[1]-inter[0])/(union[1]-union[0])
return iou
'''
A class that handles the training set
'''
class TrainingDataSet(object):
def __init__(self,flow_feat_dir,appr_feat_dir,clip_gt_path,background_path,batch_size,movie_length_info,ctx_num,central_num, unit_feature_size,unit_size,
pos_neg_ratio=10.0):
#it_path: image_token_file path
self.ctx_num=ctx_num
self.unit_feature_size=unit_feature_size
self.unit_size=unit_size
self.batch_size=batch_size
self.movie_length_info=movie_length_info
self.visual_feature_dim=self.unit_feature_size
self.flow_feat_dir=flow_feat_dir
self.appr_feat_dir=appr_feat_dir
self.training_samples=[]
self.central_num=central_num
print "Reading training data list from "+clip_gt_path+" and "+background_path
db_size = 0
with open(clip_gt_path) as f:
db_size += len(f.readlines())
with open(background_path) as f:
db_size += len(f.readlines())
with open(clip_gt_path) as f:
for l in f:
movie_name=l.rstrip().split(" ")[0]
clip_start=float(l.rstrip().split(" ")[1])
clip_end=float(l.rstrip().split(" ")[2])
gt_start=float(l.rstrip().split(" ")[3])
gt_end=float(l.rstrip().split(" ")[4])
round_gt_start=np.round(gt_start/unit_size)*self.unit_size+1
round_gt_end=np.round(gt_end/unit_size)*self.unit_size+1
self.training_samples.append((movie_name,clip_start,clip_end,gt_start,gt_end,round_gt_start,round_gt_end,1))
print str(len(self.training_samples))+" training samples are read"
positive_num=len(self.training_samples)*1.0
with open(background_path) as f:
for l in f:
# control the ratio between background samples and positive samples to be 10:1
if random.random()>pos_neg_ratio*positive_num/db_size: continue
movie_name=l.rstrip().split(" ")[0]
clip_start=float(l.rstrip().split(" ")[1])
clip_end=float(l.rstrip().split(" ")[2])
self.training_samples.append((movie_name,clip_start,clip_end,0,0,0,0,0))
self.num_samples=len(self.training_samples)
print str(len(self.training_samples))+" training samples are read"
def calculate_regoffset(self,clip_start,clip_end,round_gt_start,round_gt_end):
start_offset=(round_gt_start-clip_start)/self.unit_size
end_offset=(round_gt_end-clip_end)/self.unit_size
return start_offset, end_offset
'''
Get the central features
'''
def get_pooling_feature(self,flow_feat_dir,appr_feat_dir,movie_name,start,end):
swin_step=self.unit_size
all_feat=np.zeros([0,self.unit_feature_size],dtype=np.float32)
current_pos=start
while current_pos<end:
swin_start=current_pos
swin_end=swin_start+swin_step
flow_feat=np.load(flow_feat_dir+movie_name+".mp4"+"_"+str(swin_start)+"_"+str(swin_end)+".npy")
appr_feat=np.load(appr_feat_dir+movie_name+".mp4"+"_"+str(swin_start)+"_"+str(swin_end)+".npy")
flow_feat=flow_feat/np.linalg.norm(flow_feat)
appr_feat=appr_feat/np.linalg.norm(appr_feat)
#feat=flow_feat
feat=np.hstack((flow_feat,appr_feat))
all_feat=np.vstack((all_feat,feat))
current_pos+=swin_step
pool_feat=all_feat
return pool_feat
'''
Get the past (on the left of the central unit) context features
'''
def get_left_context_feature(self,flow_feat_dir,appr_feat_dir,movie_name,start,end):
swin_step=self.unit_size
all_feat=np.zeros([0,self.unit_feature_size],dtype=np.float32)
count=0
current_pos=start
context_ext=False
while count<self.ctx_num/2:
swin_start=current_pos-swin_step
swin_end=current_pos
if os.path.exists(flow_feat_dir+movie_name+".mp4"+"_"+str(swin_start)+"_"+str(swin_end)+".npy"):
flow_feat=np.load(flow_feat_dir+movie_name+".mp4"+"_"+str(swin_start)+"_"+str(swin_end)+".npy")
appr_feat=np.load(appr_feat_dir+movie_name+".mp4"+"_"+str(swin_start)+"_"+str(swin_end)+".npy")
flow_feat=flow_feat/np.linalg.norm(flow_feat)
appr_feat=appr_feat/np.linalg.norm(appr_feat)
#feat=flow_feat
feat=np.hstack((flow_feat,appr_feat))
all_feat=np.vstack((all_feat,feat))
context_ext=True
current_pos-=swin_step
count+=1
count=0
current_pos=start
while count<self.ctx_num/2:
swin_start=current_pos
swin_end=current_pos+swin_step
if os.path.exists(flow_feat_dir+movie_name+".mp4"+"_"+str(swin_start)+"_"+str(swin_end)+".npy"):
flow_feat=np.load(flow_feat_dir+movie_name+".mp4"+"_"+str(swin_start)+"_"+str(swin_end)+".npy")
appr_feat=np.load(appr_feat_dir+movie_name+".mp4"+"_"+str(swin_start)+"_"+str(swin_end)+".npy")
flow_feat=flow_feat/np.linalg.norm(flow_feat)
appr_feat=appr_feat/np.linalg.norm(appr_feat)
#feat=flow_feat
feat=np.hstack((flow_feat,appr_feat))
all_feat=np.vstack((all_feat,feat))
context_ext=True
current_pos+=swin_step
count+=1
if context_ext:
pool_feat=all_feat
else:
# print "no left "+str(start)
pool_feat=np.zeros([0,self.unit_feature_size],dtype=np.float32)
#print pool_feat.shape
return pool_feat
'''
Get the future (on the right of the central unit) context features
'''
def get_right_context_feature(self,flow_feat_dir,appr_feat_dir,movie_name,start,end):
swin_step=self.unit_size
all_feat=np.zeros([0,self.unit_feature_size],dtype=np.float32)
count=0
current_pos=end
context_ext=False
while count<self.ctx_num/2:
swin_start=current_pos
swin_end=current_pos+swin_step
if os.path.exists(flow_feat_dir+movie_name+".mp4"+"_"+str(swin_start)+"_"+str(swin_end)+".npy"):
flow_feat=np.load(flow_feat_dir+movie_name+".mp4"+"_"+str(swin_start)+"_"+str(swin_end)+".npy")
appr_feat=np.load(appr_feat_dir+movie_name+".mp4"+"_"+str(swin_start)+"_"+str(swin_end)+".npy")
flow_feat=flow_feat/np.linalg.norm(flow_feat)
appr_feat=appr_feat/np.linalg.norm(appr_feat)
#feat=flow_feat
feat=np.hstack((flow_feat,appr_feat))
all_feat=np.vstack((all_feat,feat))
context_ext=True
current_pos+=swin_step
count+=1
count=0
current_pos=end
while count<self.ctx_num/2:
swin_start=current_pos-swin_step
swin_end=current_pos
if os.path.exists(flow_feat_dir+movie_name+".mp4"+"_"+str(swin_start)+"_"+str(swin_end)+".npy"):
flow_feat=np.load(flow_feat_dir+movie_name+".mp4"+"_"+str(swin_start)+"_"+str(swin_end)+".npy")
appr_feat=np.load(appr_feat_dir+movie_name+".mp4"+"_"+str(swin_start)+"_"+str(swin_end)+".npy")
flow_feat=flow_feat/np.linalg.norm(flow_feat)
appr_feat=appr_feat/np.linalg.norm(appr_feat)
#feat=flow_feat
feat=np.hstack((flow_feat,appr_feat))
all_feat=np.vstack((all_feat,feat))
context_ext=True
current_pos-=swin_step
count+=1
if context_ext:
pool_feat=all_feat
else:
# print "no right "+str(end)
pool_feat=np.zeros([0,self.unit_feature_size],dtype=np.float32)
#print pool_feat.shape
return pool_feat
def sample_to_number(self,all_feats,num):
sampled_feats=np.zeros([num,all_feats.shape[1]],dtype=np.float32)
if all_feats.shape[0]==0: return sampled_feats
if all_feats.shape[0]==num: return all_feats
else:
for k in range(num):
sampled_feats[k]=all_feats[all_feats.shape[0]/num*k,:]
return sampled_feats
def next_batch(self):
random_batch_index=random.sample(range(self.num_samples),self.batch_size)
central_batch=np.zeros([self.batch_size,self.central_num, self.visual_feature_dim])
left_batch=np.zeros([self.batch_size, self.ctx_num, self.visual_feature_dim])
right_batch=np.zeros([self.batch_size, self.ctx_num, self.visual_feature_dim])
label_batch=np.zeros([self.batch_size],dtype=np.int32)
offset_batch=np.zeros([self.batch_size,2],dtype=np.float32)
index=0
while index < self.batch_size:
k=random_batch_index[index]
movie_name=self.training_samples[k][0]
if self.training_samples[k][7]==1:
clip_start=self.training_samples[k][1]
clip_end=self.training_samples[k][2]
round_gt_start=self.training_samples[k][5]
round_gt_end=self.training_samples[k][6]
start_offset,end_offset=self.calculate_regoffset(clip_start,clip_end,round_gt_start,round_gt_end)
featmap=self.get_pooling_feature(self.flow_feat_dir, self.appr_feat_dir, movie_name,clip_start,clip_end)
left_feat=self.get_left_context_feature(self.flow_feat_dir, self.appr_feat_dir, movie_name,clip_start,clip_end)
right_feat=self.get_right_context_feature(self.flow_feat_dir, self.appr_feat_dir, movie_name,clip_start,clip_end)
featmap=self.sample_to_number(featmap,self.central_num)
right_feat=self.sample_to_number(right_feat,self.ctx_num)
left_feat=self.sample_to_number(left_feat,self.ctx_num)
central_batch[index,:,:]=featmap
left_batch[index,:,:]=left_feat
right_batch[index,:,:]=right_feat
label_batch[index]=1
offset_batch[index,0]=start_offset
offset_batch[index,1]=end_offset
#print str(clip_start)+" "+str(clip_end)+" "+str(round_gt_start)+" "+str(round_gt_end)+" "+str(start_offset)+" "+str(end_offset)
index+=1
else:
clip_start=self.training_samples[k][1]
clip_end=self.training_samples[k][2]
left_feat=self.get_left_context_feature(self.flow_feat_dir, self.appr_feat_dir,movie_name,clip_start,clip_end)
right_feat=self.get_right_context_feature(self.flow_feat_dir, self.appr_feat_dir,movie_name,clip_start,clip_end)
featmap=self.get_pooling_feature(self.flow_feat_dir, self.appr_feat_dir,movie_name,clip_start,clip_end)
featmap=self.sample_to_number(featmap,self.central_num)
right_feat=self.sample_to_number(right_feat,self.ctx_num)
left_feat=self.sample_to_number(left_feat,self.ctx_num)
central_batch[index,:,:]=featmap
left_batch[index,:,:]=left_feat
right_batch[index,:,:]=right_feat
label_batch[index]=0
offset_batch[index,0]=0
offset_batch[index,1]=0
index+=1
return central_batch, left_batch, right_batch, label_batch,offset_batch
'''
A class that handles the test set
'''
class TestingDataSet(object):
def __init__(self,flow_feat_dir,appr_feat_dir,test_clip_path,batch_size,ctx_num):
self.ctx_num=ctx_num
#il_path: image_label_file path
self.batch_size=batch_size
self.flow_feat_dir=flow_feat_dir
self.appr_feat_dir=appr_feat_dir
print "Reading testing data list from "+test_clip_path
self.test_samples=[]
with open(test_clip_path) as f:
for l in f:
movie_name=l.rstrip().split(" ")[0]
clip_start=float(l.rstrip().split(" ")[1])
clip_end=float(l.rstrip().split(" ")[2])
self.test_samples.append((movie_name,clip_start,clip_end))
self.num_samples=len(self.test_samples)
print "test clips number: "+str(len(self.test_samples))
``` |
{
"source": "JiyangJiang/CNS",
"score": 2
} |
#### File: asl/gui/calib_tab.py
```python
import wx
import wx.grid
from .widgets import TabPage
class AslCalibration(TabPage):
"""
Tab page containing calibration options
"""
def __init__(self, parent, idx, n):
TabPage.__init__(self, parent, "Calibration", idx, n)
self.calib_cb = self.checkbox("Enable Calibration", bold=True, handler=self.calib_changed)
self.calib_image_picker = self.file_picker("Calibration Image")
self.m0_type_ch = self.choice("M0 Type", choices=["Proton Density (long TR)", "Saturation Recovery"])
self.seq_tr_num = self.number("Sequence TR (s)", min=0,max=10,initial=6)
self.calib_gain_num = self.number("Calibration Gain", min=0,max=5,initial=1)
self.calib_mode_ch = self.choice("Calibration mode", choices=["Reference Region", "Voxelwise"])
self.section("Reference tissue")
self.ref_tissue_type_ch = self.choice("Type", choices=["CSF", "WM", "GM", "None"], handler=self.ref_tissue_type_changed)
self.ref_tissue_mask_picker = self.file_picker("Mask", optional=True)
self.ref_t1_num = self.number("Reference T1 (s)", min=0,max=5,initial=4.3)
self.seq_te_num = self.number("Sequence TE (ms)", min=0,max=30,initial=0)
self.ref_t2_num = self.number("Reference T2 (ms)", min=0,max=1000,initial=750, step=10)
self.blood_t2_num = self.number("Blood T2 (ms)", min=0,max=1000,initial=150, step=10)
self.coil_image_picker = self.file_picker("Coil Sensitivity Image", optional=True)
self.sizer.AddGrowableCol(2, 1)
self.SetSizer(self.sizer)
self.next_prev()
def calib(self): return self.calib_cb.IsChecked()
def m0_type(self): return self.m0_type_ch.GetSelection()
def seq_tr(self): return self.seq_tr_num.GetValue()
def seq_te(self): return self.seq_te_num.GetValue()
def calib_image(self): return self.calib_image_picker.GetPath()
def calib_gain(self): return self.calib_gain_num.GetValue()
def calib_mode(self): return self.calib_mode_ch.GetSelection()
def ref_tissue_type(self): return self.ref_tissue_type_ch.GetSelection()
def ref_tissue_type_name(self): return self.ref_tissue_type_ch.GetString(self.ref_tissue_type())
def ref_tissue_mask(self):
if self.ref_tissue_mask_picker.checkbox.IsChecked():
return self.ref_tissue_mask_picker.GetPath()
else:
return None
def ref_t1(self): return self.ref_t1_num.GetValue()
def ref_t2(self): return self.ref_t2_num.GetValue()
def blood_t2(self): return self.blood_t2_num.GetValue()
def coil_image(self):
if self.coil_image_picker.checkbox.IsChecked(): return self.coil_image_picker.GetPath()
else: return None
def ref_tissue_type_changed(self, event):
if self.ref_tissue_type() == 0: # CSF
self.ref_t1_num.SetValue(4.3)
self.ref_t2_num.SetValue(750)
elif self.ref_tissue_type() == 1: # WM
self.ref_t1_num.SetValue(1.0)
self.ref_t2_num.SetValue(50)
elif self.ref_tissue_type() == 2: # GM
self.ref_t1_num.SetValue(1.3)
self.ref_t2_num.SetValue(100)
self.update()
def calib_changed(self, event):
self.distcorr.calib_changed(self.calib())
self.update()
def wp_changed(self, wp):
self.update()
def update(self, event=None):
enable = self.calib()
self.m0_type_ch.Enable(enable)
self.seq_tr_num.Enable(enable and self.m0_type() == 0)
self.calib_image_picker.Enable(enable)
self.calib_gain_num.Enable(enable)
self.coil_image_picker.checkbox.Enable(enable)
if self.analysis.wp(): self.calib_mode_ch.SetSelection(1)
self.calib_mode_ch.Enable(enable and not self.analysis.wp())
self.ref_tissue_type_ch.Enable(enable and self.calib_mode() == 0)
if self.ref_tissue_type() == 3:
# Ref tissue = None - enforce mask
self.ref_tissue_mask_picker.checkbox.Enable(False)
self.ref_tissue_mask_picker.checkbox.SetValue(enable and self.calib_mode() == 0)
self.ref_tissue_mask_picker.Enable(enable and self.calib_mode() == 0)
else:
self.ref_tissue_mask_picker.checkbox.Enable(enable and self.calib_mode() == 0)
self.ref_tissue_mask_picker.Enable(enable and self.ref_tissue_mask_picker.checkbox.IsChecked() and self.calib_mode() == 0)
self.coil_image_picker.checkbox.Enable(enable and self.calib_mode() == 0)
self.coil_image_picker.Enable(enable and self.calib_mode() == 0 and self.coil_image_picker.checkbox.IsChecked())
self.seq_te_num.Enable(enable and self.calib_mode() == 0)
self.blood_t2_num.Enable(enable and self.calib_mode() == 0)
self.ref_t1_num.Enable(enable and self.calib_mode() == 0)
self.ref_t2_num.Enable(enable and self.calib_mode() == 0)
TabPage.update(self)
```
#### File: asl/gui/run_box.py
```python
import sys
import os
import traceback
import tempfile
import shutil
import shlex
import subprocess
from threading import Thread
import nibabel as nib
import wx
from wx.lib.pubsub import pub
class OptionError(RuntimeError):
pass
class Mkdir:
def __init__(self, dirname):
self.dirname = dirname
def run(self):
if not os.path.exists(self.dirname):
os.makedirs(self.dirname)
return 0
class FslCmd:
def __init__(self, cmd):
script_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
fsldevdir = os.path.join(os.environ.get("FSLDEVDIR", ""), "bin")
fsldir = os.path.join(os.environ.get("FSLDIR", ""), "bin")
self.cmd = cmd
for d in (script_dir, fsldevdir, fsldir):
if os.path.exists(os.path.join(d, cmd)):
self.cmd = os.path.join(d, cmd)
break
def add(self, opt, val=None):
if val is not None:
self.cmd += " %s=%s" % (opt, str(val))
else:
self.cmd += " %s" % opt
def write_output(self, line):
wx.CallAfter(pub.sendMessage, "run_stdout", line=line)
def run(self):
self.write_output(self.cmd + "\n")
args = shlex.split(self.cmd)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while 1:
retcode = p.poll() #returns None while subprocess is running
line = p.stdout.readline()
self.write_output(line)
if retcode is not None: break
self.write_output("\nReturn code: %i\n\n" % retcode)
return retcode
def __str__(self): return self.cmd
class CmdRunner(Thread):
def __init__(self, cmds, done_cb):
Thread.__init__(self)
self.cmds = cmds
self.done_cb = done_cb
def run(self):
ret = -1
try:
for cmd in self.cmds:
ret = -1
ret = cmd.run()
if ret != 0:
break
finally:
wx.CallAfter(pub.sendMessage, "run_finished", retcode=ret)
class AslRun(wx.Frame):
"""
Determines the commands to run and displays them in a window
"""
# The options we need to pass to oxford_asl for various data orderings
order_opts = {"trp" : "--ibf=tis --iaf=diff",
"trp,tc" : "--ibf=tis --iaf=tcb",
"trp,ct" : "--ibf=tis --iaf=ctb",
"rtp" : "--ibf=rpt --iaf=diff",
"rtp,tc" : "--rpt --iaf=tcb",
"rtp,ct" : "--ibf=rpt --iaf=ctb",
"ptr,tc" : "--ibf=tis --iaf=tc",
"ptr,ct" : "--ibf=tis --iaf=ct",
"prt,tc" : "--ibf=rpt --iaf=tc",
"prt,ct" : "--ibf=rpt --iaf=ct"}
def __init__(self, parent, run_btn, run_label):
wx.Frame.__init__(self, parent, title="Run", size=(600, 400), style=wx.DEFAULT_FRAME_STYLE)
self.run_seq = None
self.run_btn = run_btn
self.run_btn.Bind(wx.EVT_BUTTON, self.dorun)
self.run_label = run_label
self.preview_data = None
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.output_text = wx.TextCtrl(self, style=wx.TE_READONLY | wx.TE_MULTILINE)
font = wx.Font(8, wx.TELETYPE, wx.NORMAL, wx.NORMAL)
self.output_text.SetFont(font)
self.sizer.Add(self.output_text, 1, flag=wx.EXPAND)
self.SetSizer(self.sizer)
self.Bind(wx.EVT_CLOSE, self.close)
pub.subscribe(self.write_output, "run_stdout")
pub.subscribe(self.finished, "run_finished")
def write_output(self, line):
self.output_text.AppendText(line)
def close(self, _):
self.Hide()
def finished(self, retcode):
if retcode != 0:
self.write_output("\nWARNING: command failed\n")
self.update()
def dorun(self, _):
if self.run_seq:
self.Show()
self.Raise()
self.output_text.Clear()
self.run_btn.Enable(False)
self.run_label.SetForegroundColour(wx.Colour(0, 0, 128))
self.run_label.SetLabel("Running - Please Wait")
runner = CmdRunner(self.run_seq, self.finished)
runner.start()
def update(self):
"""
Get the sequence of commands and enable the run button if options are valid. Otherwise
display the first error in the status label
"""
self.run_seq = None
try:
self.run_seq = self.get_run_sequence()
self.run_label.SetForegroundColour(wx.Colour(0, 128, 0))
self.run_label.SetLabel("Ready to Go")
self.run_btn.Enable(True)
except (OptionError, nib.filebasedimages.ImageFileError) as e:
self.run_btn.Enable(False)
self.run_label.SetForegroundColour(wx.Colour(255, 0, 0))
self.run_label.SetLabel(str(e))
except:
# Any other exception is a program bug - report it to STDERR
self.run_btn.Enable(False)
self.run_label.SetForegroundColour(wx.Colour(255, 0, 0))
self.run_label.SetLabel("Unexpected error - see console and report as a bug")
traceback.print_exc(sys.exc_info()[1])
def check_exists(self, label, fname):
if not os.path.exists(fname):
raise OptionError("%s - no such file or directory" % label)
def get_preview_data(self):
"""
Run ASL_FILE for perfusion weighted image - just for the preview
"""
infile = self.input.data()
if infile == "":
# Don't bother if we have not input file yet!
return None
tempdir = tempfile.mkdtemp()
self.preview_data = None
try:
meanfile = "%s/mean.nii.gz" % tempdir
cmd = FslCmd("asl_file")
cmd.add('--data="%s"' % self.input.data())
cmd.add("--ntis=%i" % self.input.ntis())
cmd.add('--mean="%s"' % meanfile)
cmd.add(" ".join(self.get_data_order_options()))
cmd.run()
img = nib.load(meanfile)
return img.get_data()
except:
traceback.print_exc()
return None
finally:
shutil.rmtree(tempdir)
def get_data_order_options(self):
"""
Check data order is supported and return the relevant options
"""
order, tagfirst = self.input.data_order()
diff_opt = ""
if self.input.tc_pairs():
if tagfirst: order += ",tc"
else: order += ",ct"
diff_opt = "--diff"
if order not in self.order_opts:
raise OptionError("This data ordering is not supported by ASL_FILE")
else:
return self.order_opts[order], diff_opt
def get_run_sequence(self):
"""
Get the sequence of commands for the selected options, throwing exception
if any problems are found (e.g. files don't exist, mandatory options not specified)
Exception text is reported by the GUI
"""
run = []
# Check input file exists, is an image and the TIs/repeats/TC pairs is consistent
self.check_exists("Input data", self.input.data())
img = nib.load(self.input.data())
if len(img.shape) != 4:
raise OptionError("Input data is not a 4D image")
nvols = img.shape[3]
N = self.input.ntis()
if self.input.tc_pairs(): N *= 2
if nvols % N != 0:
self.input.nrepeats_label.SetLabel("<Invalid>")
raise OptionError("Input data contains %i volumes - not consistent with %i TIs and TC pairs=%s" % (img.shape[3], self.input.ntis(), self.input.tc_pairs()))
else:
self.input.nrepeats_label.SetLabel("%i" % (nvols / N))
self.preview.order_preview.n_tis = self.input.ntis()
self.preview.order_preview.n_repeats = nvols / N
self.preview.order_preview.tc_pairs = self.input.tc_pairs()
self.preview.order_preview.tagfirst = self.input.tc_ch.GetSelection() == 0
self.preview.order_preview.Refresh()
# Build OXFORD_ASL command
outdir = self.analysis.outdir()
if os.path.exists(outdir) and not os.path.isdir(outdir):
raise OptionError("Output directory already exists and is a file")
run.append(Mkdir(outdir))
# Input data
cmd = FslCmd("oxford_asl")
cmd.add(' -i "%s"' % self.input.data())
cmd.add(self.get_data_order_options()[0])
cmd.add("--tis %s" % ",".join(["%.2f" % v for v in self.input.tis()]))
cmd.add("--bolus %s" % ",".join(["%.2f" % v for v in self.input.bolus_dur()]))
if self.input.labelling() == 1:
cmd.add("--casl")
if self.input.readout() == 1:
# 2D multi-slice readout - must give dt in seconds
cmd.add("--slicedt %.5f" % (self.input.time_per_slice() / 1000))
if self.input.multiband():
cmd.add("--sliceband %i" % self.input.slices_per_band())
# Structure - may require FSL_ANAT to be run
fsl_anat_dir = self.structure.existing_fsl_anat()
struc_image = self.structure.struc_image()
if fsl_anat_dir is not None:
# Have an existing FSL_ANAT directory
self.check_exists("FSL_ANAT", fsl_anat_dir)
cmd.add('--fslanat="%s"' % fsl_anat_dir)
elif self.structure.run_fsl_anat():
# FIXME set this up and pass in the dir using --fslanat
self.check_exists("Structural image", struc_image)
fsl_anat = FslCmd("fsl_anat")
fsl_anat.add('-i "%s"' % struc_image)
fsl_anat.add('-o "%s/struc"' % outdir)
run.append(fsl_anat)
cmd.add('--fslanat="%s/struc.anat"' % outdir)
elif struc_image is not None:
# Providing independent structural data
self.check_exists("Structural image", struc_image)
cp = FslCmd("imcp")
cp.add('"%s"' % struc_image)
cp.add('"%s/structural_head"' % outdir)
run.append(cp)
cmd.add('-s "%s/structural_head"' % outdir)
# Brain image can be provided or can use BET
brain_image = self.structure.struc_image_brain()
if brain_image is not None:
self.check_exists("Structural brain image", brain_image)
cp = FslCmd("imcp")
cp.add('"%s"' % brain_image)
cp.add('"%s/structural_brain"' % outdir)
run.append(cp)
else:
bet = FslCmd("bet")
bet.add('"%s"' % struc_image)
bet.add('"%s/structural_brain"' % outdir)
run.append(bet)
cmd.add('--sbrain "%s/structural_brain"' % outdir)
else:
# No structural data
pass
# Structure transform
if self.structure.transform():
if self.structure.transform_type() == self.structure.TRANS_MATRIX:
self.check_exists("Transformation matrix", self.structure.transform_file())
cmd.add('--asl2struc "%s"' % self.structure.transform_file())
elif self.structure.transform_type() == self.structure.TRANS_IMAGE:
self.check_exists("Warp image", self.structure.transform_file())
cmd.add('--regfrom "%s"' % self.structure.transform_file())
else:
# This implies that FSLANAT output is being used, and hence
# --fslanat is already specified
pass
# Calibration - do this via oxford_asl rather than calling asl_calib separately
if self.calibration.calib():
self.check_exists("Calibration image", self.calibration.calib_image())
cmd.add('-c "%s"' % self.calibration.calib_image())
if self.calibration.m0_type() == 0:
#calib.add("--mode longtr")
cmd.add("--tr %.2f" % self.calibration.seq_tr())
else:
raise OptionError("Saturation recovery not supported by oxford_asl")
#calib.add("--mode satrevoc")
#calib.add("--tis %s" % ",".join([str(v) for v in self.input.tis()]))
# FIXME change -c option in sat recov mode?
cmd.add("--cgain %.2f" % self.calibration.calib_gain())
if self.calibration.calib_mode() == 0:
cmd.add("--cmethod single")
cmd.add("--tissref %s" % self.calibration.ref_tissue_type_name().lower())
cmd.add("--te %.2f" % self.calibration.seq_te())
cmd.add("--t1csf %.2f" % self.calibration.ref_t1())
cmd.add("--t2csf %.2f" % self.calibration.ref_t2())
cmd.add("--t2bl %.2f" % self.calibration.blood_t2())
if self.calibration.ref_tissue_mask() is not None:
self.check_exists("Calibration reference tissue mask", self.calibration.ref_tissue_mask())
cmd.add('--csf "%s"' % self.calibration.ref_tissue_mask())
if self.calibration.coil_image() is not None:
self.check_exists("Coil sensitivity reference image", self.calibration.coil_image())
cmd.add('--cref "%s"' % self.calibration.coil_image())
else:
cmd.add("--cmethod voxel")
# Distortion correction
if self.distcorr.distcorr():
if self.distcorr.distcorr_type() == self.distcorr.FIELDMAP:
# Fieldmap image
fmap = self.distcorr.fmap()
self.check_exists("Fieldmap image", fmap)
cmd.add('--fmap="%s"' % fmap)
fmap_mag = self.distcorr.fmap_mag()
self.check_exists("Fieldmap magnitude image", fmap_mag)
cmd.add('--fmapmag="%s"' % fmap_mag)
fmap_be = self.distcorr.fmap_mag_be()
if fmap_be is not None:
self.check_exists("Brain-extracted fieldmap magnitude image", fmap_be)
cmd.add('--fmapmagbrain="%s"' % fmap_be)
else:
# Calibration image
calib = self.distcorr.calib()
self.check_exists("Phase encode reversed calibration image", calib)
cmd.add('--cblip="%s"' % calib)
# Generic options
cmd.add("--echospacing=%.5f" % self.distcorr.echosp())
cmd.add("--pedir=%s" % self.distcorr.pedir())
# Analysis options
if self.analysis.wp():
cmd.add("--wp")
else:
cmd.add("--t1 %.2f" % self.analysis.t1())
cmd.add("--bat %.2f" % self.analysis.bat())
cmd.add("--t1b %.2f" % self.analysis.t1b())
cmd.add("--alpha %.2f" % self.analysis.ie())
if self.analysis.fixbolus(): cmd.add("--fixbolus")
if self.analysis.spatial(): cmd.add("--spatial")
if self.analysis.mc(): cmd.add("--mc")
if self.analysis.infer_t1(): cmd.add("--infert1")
if self.analysis.pv(): cmd.add("--pvcorr")
if not self.analysis.macro(): cmd.add("--artoff")
if self.analysis.mask() is not None:
self.check_exists("Analysis mask", self.analysis.mask())
cmd.add('-m "%s"' % self.analysis.mask())
# Output dir
if outdir == "":
raise OptionError("Output directory not specified")
cmd.add('-o "%s"' % outdir)
run.append(cmd)
return run
``` |
{
"source": "jiyanglii/OpenTransformer",
"score": 3
} |
#### File: otrans/module/conformer.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
class ConformerConvolutionModule(nn.Module):
def __init__(self, channels, kernel_size, bias=True, dropout=0.0):
super(ConformerConvolutionModule, self).__init__()
assert kernel_size % 2 == 1
self.pointwise_conv1 = nn.Linear(channels, 2 * channels, bias=bias)
self.depthwise_conv = nn.Conv1d(
channels,
channels,
kernel_size,
stride=1,
padding=(kernel_size - 1) // 2,
groups=channels,
bias=bias
)
self.batch_norm = nn.BatchNorm1d(channels)
self.pointwise_conv2 = nn.Linear(channels, channels, bias=bias)
self.dropout = nn.Dropout(dropout)
def forward(self, x, mask):
"""
Args:
x: [batch_size, time, channels]
mask: [batch_size, time]
"""
mask = mask.unsqueeze(2).repeat([1, 1, x.size(-1)])
x = self.pointwise_conv1(x)
x = F.glu(x)
x.masked_fill_(~mask, 0.0)
x = x.transpose(1, 2)
x = self.depthwise_conv(x)
x = self.batch_norm(x)
x = x * torch.sigmoid(x) # swish
x = x.transpose(1, 2)
x = self.pointwise_conv2(x)
x.masked_fill_(~mask, 0.0)
return x
```
#### File: otrans/recognize/speech2text.py
```python
import torch
from otrans.data import EOS, BOS
from otrans.recognize.base import Recognizer
from packaging import version
class SpeechToTextRecognizer(Recognizer):
def __init__(self, model, lm=None, lm_weight=0.1, ctc_weight=0.0, beam_width=5, nbest=1,
max_len=50, idx2unit=None, penalty=0, lamda=5, ngpu=1, apply_cache=False):
super(SpeechToTextRecognizer, self).__init__(model, idx2unit, lm, lm_weight, ngpu)
self.beam_width = beam_width
self.max_len = max_len
self.nbest = nbest
self.penalty = penalty
self.lamda = lamda
self.ctc_weight = ctc_weight
self.lm_weight = lm_weight
self.attn_weights = {}
self.apply_cache = False
def encode(self, inputs, inputs_mask, cache=None):
new_cache = {}
inputs, inputs_mask, fe_cache = self.model.frontend.inference(inputs, inputs_mask, cache['frontend'] if cache is not None else None)
new_cache['frontend'] = fe_cache
# memory, memory_mask, enc_cache, enc_attn_weights = self.model.encoder.inference(inputs, inputs_mask, cache['encoder'] if cache is not None else None)
memory, memory_mask, enc_attn_weights = self.model.encoder(inputs, inputs_mask)
# new_cache['encoder'] = enc_cache
return memory, memory_mask, new_cache, enc_attn_weights
def decode(self, preds, memory, memory_mask, cache=None):
log_probs, dec_cache, dec_attn_weights = self.model.decoder.inference(preds, memory, memory_mask, cache)
return log_probs, dec_cache, dec_attn_weights
def recognize(self, inputs, inputs_mask):
cache = {'fronend': None, 'encoder': None, 'decoder': None, 'lm': None}
self.attn_weights = {}
memory, memory_mask, _, enc_attn_weights = self.encode(inputs, inputs_mask)
self.attn_weights['encoder'] = enc_attn_weights
self.attn_weights['decoder'] = []
b, t, v = memory.size()
beam_memory = memory.unsqueeze(1).repeat([1, self.beam_width, 1, 1]).view(b * self.beam_width, t, v)
beam_memory_mask = memory_mask.unsqueeze(1).repeat([1, self.beam_width, 1]).view(b * self.beam_width, t)
preds = torch.ones([b * self.beam_width, 1], dtype=torch.long, device=memory.device) * BOS
scores = torch.FloatTensor([0.0] + [-float('inf')] * (self.beam_width - 1))
scores = scores.to(memory.device).repeat([b]).unsqueeze(1)
ending_flag = torch.zeros_like(scores, dtype=torch.bool)
with torch.no_grad():
for _ in range(1, self.max_len+1):
preds, cache, scores, ending_flag = self.decode_step(
preds, beam_memory, beam_memory_mask, cache, scores, ending_flag)
# whether stop or not
if ending_flag.sum() == b * self.beam_width:
break
scores = scores.view(b, self.beam_width)
preds = preds.view(b, self.beam_width, -1)
lengths = torch.sum(torch.ne(preds, EOS).float(), dim=-1)
# length penalty
if self.penalty:
lp = torch.pow((self.lamda + lengths) /
(self.lamda + 1), self.penalty)
scores /= lp
sorted_scores, offset_indices = torch.sort(scores, dim=-1, descending=True)
base_indices = torch.arange(b, dtype=torch.long, device=offset_indices.device) * self.beam_width
base_indices = base_indices.unsqueeze(1).repeat([1, self.beam_width]).view(-1)
preds = preds.view(b * self.beam_width, -1)
indices = offset_indices.view(-1) + base_indices
# remove BOS
sorted_preds = preds[indices].view(b, self.beam_width, -1)
nbest_preds = sorted_preds[:, :min(self.beam_width, self.nbest), 1:]
nbest_scores = sorted_scores[:, :min(self.beam_width, self.nbest)]
return self.nbest_translate(nbest_preds), nbest_scores
def decode_step(self, preds, memory, memory_mask, cache, scores, flag):
""" decode an utterance in a stepwise way"""
batch_size = int(scores.size(0) / self.beam_width)
batch_log_probs, dec_cache, dec_attn_weights = self.decode(preds, memory, memory_mask, cache['decoder'])
if self.lm is not None:
batch_lm_log_probs, lm_hidden = self.lm_decode(preds, cache['lm'])
batch_lm_log_probs = batch_lm_log_probs.squeeze(1)
batch_log_probs = batch_log_probs + self.lm_weight * batch_lm_log_probs
else:
lm_hidden = None
if batch_log_probs.dim() == 3:
batch_log_probs = batch_log_probs.squeeze(1)
last_k_scores, last_k_preds = batch_log_probs.topk(self.beam_width)
last_k_scores = mask_finished_scores(last_k_scores, flag)
last_k_preds = mask_finished_preds(last_k_preds, flag)
# update scores
scores = scores + last_k_scores
scores = scores.view(batch_size, self.beam_width * self.beam_width)
# pruning
scores, offset_k_indices = torch.topk(scores, k=self.beam_width)
scores = scores.view(-1, 1)
device = scores.device
base_k_indices = torch.arange(batch_size, device=device).view(-1, 1).repeat([1, self.beam_width])
base_k_indices *= self.beam_width ** 2
best_k_indices = base_k_indices.view(-1) + offset_k_indices.view(-1)
# update predictions
best_k_preds = torch.index_select(
last_k_preds.view(-1), dim=-1, index=best_k_indices)
if version.parse(torch.__version__) < version.parse('1.6.0'):
preds_index = best_k_indices.div(self.beam_width)
else:
preds_index = best_k_indices.floor_divide(self.beam_width)
preds_symbol = torch.index_select(
preds, dim=0, index=preds_index)
preds_symbol = torch.cat(
(preds_symbol, best_k_preds.view(-1, 1)), dim=1)
# dec_hidden = reselect_hidden_list(dec_hidden, self.beam_width, best_k_indices)
# lm_hidden = reselect_hidden_list(lm_hidden, self.beam_width, best_k_indices)
# finished or not
end_flag = torch.eq(preds_symbol[:, -1], EOS).view(-1, 1)
# hidden = {
# 'decoder': dec_hidden,
# 'lm': lm_hidden
# }
return preds_symbol, cache, scores, end_flag
def mask_finished_scores(score, flag):
"""
If a sequence is finished, we only allow one alive branch. This function aims to give one branch a zero score
and the rest -inf score.
Args:
score: A real value array with shape [batch_size * beam_size, beam_size].
flag: A bool array with shape [batch_size * beam_size, 1].
Returns:
A real value array with shape [batch_size * beam_size, beam_size].
"""
beam_width = score.size(-1)
zero_mask = torch.zeros_like(flag, dtype=torch.bool)
if beam_width > 1:
unfinished = torch.cat(
(zero_mask, flag.repeat([1, beam_width - 1])), dim=1)
finished = torch.cat(
(flag.bool(), zero_mask.repeat([1, beam_width - 1])), dim=1)
else:
unfinished = zero_mask
finished = flag.bool()
score.masked_fill_(unfinished, -float('inf'))
score.masked_fill_(finished, 0)
return score
def mask_finished_preds(pred, flag):
"""
If a sequence is finished, all of its branch should be </S> (3).
Args:
pred: A int array with shape [batch_size * beam_size, beam_size].
flag: A bool array with shape [batch_size * beam_size, 1].
Returns:
A int array with shape [batch_size * beam_size].
"""
beam_width = pred.size(-1)
finished = flag.repeat([1, beam_width])
return pred.masked_fill_(finished.bool(), EOS)
def reselect_hidden(tensor, beam_width, indices):
n_layers, batch_size, hidden_size = tensor.size()
tensor = tensor.transpose(0, 1).unsqueeze(1).repeat([1, beam_width, 1, 1])
tensor = tensor.reshape(batch_size * beam_width, n_layers, hidden_size)
new_tensor = torch.index_select(tensor, dim=0, index=indices)
new_tensor = new_tensor.transpose(0, 1).contiguous()
return new_tensor
def reselect_hidden_list(tensor_list, beam_width, indices):
if tensor_list is None:
return None
new_tensor_list = []
for tensor in tensor_list:
if isinstance(tensor, tuple):
h = reselect_hidden(tensor[0], beam_width, indices)
c = reselect_hidden(tensor[1], beam_width, indices)
new_tensor_list.append((h, c))
else:
new_tensor_list.append(reselect_hidden(tensor, beam_width, indices))
return new_tensor_list
``` |
{
"source": "JIYANG-PLUS/JDjango",
"score": 2
} |
#### File: djangotools/basetools/content.py
```python
from ..common import *
__all__ = [
'get_list_patt_content', # 通过正则和括号匹配算法获取列表内容区域(不包含两侧括号)
'get_list_patt_content_contain_code', # 通过正则和括号匹配算法获取列表内容区域(包含两侧括号)
'add_oneline_to_listattr', # 利用正则,向列表中加入一行元素
'add_lines_to_listattr', # 利用正则,向列表中加入多行元素
'pop_oneline_to_listattr', # 利用正则,从列表中删除一行元素
'pop_lines_to_listattr', # 利用正则,从列表中删除多行元素
]
def get_list_patt_content(patt, path: str, leftCode: str='[', rightCode: str=']', mode=0, content="")->str:
"""通过正则和括号匹配算法获取列表内容区域(不包含两侧括号)
mode:0表示处理路径,1表示处理数据
"""
if 0 == mode:
content = read_file(path)
obj = patt.search(content)
if obj:
complex_content = patt.findall(content)[0]
return cut_content_by_doublecode(complex_content, leftCode=leftCode, rightCode=rightCode)
else:
return ''
def get_list_patt_content_contain_code(patt, content: str, leftCode: str='[', rightCode: str=']')->str:
"""通过正则和括号匹配算法获取列表内容区域(包含两侧括号)"""
obj = patt.search(content)
if obj:
complex_content = patt.findall(content)[0]
return leftCode + cut_content_by_doublecode(complex_content, leftCode=leftCode, rightCode=rightCode) + rightCode
else:
return leftCode + rightCode
def add_oneline_to_listattr(setting_path: str, patt, idata: str, indent: int=4, position:int = -1)->None:
"""向列表变量添加一行"""
content = get_list_patt_content(patt, setting_path)
insert_data = " " * indent + f"{idata},\n"
if -1 == position: # 尾插
new_content = f"{content}{insert_data}"
else: # 头插
new_content = f"\n{insert_data[:-1]}{content}"
write_file(setting_path, read_file(setting_path).replace(content, new_content))
def add_lines_to_listattr(setting_path: str, patt, idatas: List[str], indent: int=4)->None:
"""向列表变量添加多行"""
for idata in idatas:
add_oneline_to_listattr(setting_path, patt, idata, indent)
def pop_oneline_to_listattr(setting_path: str, patt, idata: str, indent: int=4)->None:
"""向settings.py中的列表类型变量删除一指定行"""
content = get_list_patt_content(patt, setting_path)
insert_data = " " * indent + f"{idata},\n"
new_content = content.replace(insert_data, '')
write_file(setting_path, read_file(setting_path).replace(content, new_content))
def pop_lines_to_listattr(setting_path: str, patt, idatas: List[str], indent: int=4)->None:
"""向settings.py中的列表类型变量删除多指定行"""
for idata in idatas:
pop_oneline_to_listattr(setting_path, patt, idata, indent)
```
#### File: djangotools/basetools/file.py
```python
from ..common import *
__all__ = [
'django_file_path', # 补全获取 Django 模板的全部路径
'read_file_lists', # 列表式读取模板文件(依赖${}定位正则语法)
'get_content', # 对 django_file_path 和 get_content 的封包
'append_content' , # 向一个已存在的文件末尾追加 模板 文本
]
def django_file_path(file_name: str, concat: List[str]=None)->str:
"""补全模板路径"""
# 这里的concat用于弥补中间的残缺路径(因为文件可能分类在不同的文件夹下,但均使用同一个根目录)
if None == concat:
concat = []
return os.path.join(TEMPLATE_DIR, *concat, file_name) # 模板路径
def read_file_lists(r_path: str, *args, **kwargs)->List[str]:
"""列表式读取文件"""
with open(r_path, 'r', encoding='utf-8') as f:
lines = f.readlines()
if 'replace' in kwargs and kwargs['replace']: # 替换开启
lines = [retools.PATT_REPLACE.sub(lambda x:kwargs[x.group(1)], _) for _ in lines]
return lines
def get_content(file_name: str, *args, **kwargs)->List[str]:
"""获取规则替换后的文件列表"""
return read_file_lists(django_file_path(file_name, concat=kwargs.get('concat')), *args, **kwargs)
def append_content(path: str, name: str, *args, **kwargs)->None:
"""向一个已存在的文本末尾添加另一个文本的规则替换内容(模板替换语法)"""
# 调用方式:append_content(alias_paths[0], 'renameHeader.django', concat=['admin'], replace=True, model_name=k, site_name=site_name)
content = get_content(name, *args, **kwargs)
append_file(path, content)
```
#### File: djangotools/templates/write.py
```python
from ..common import *
from ..basetools import *
__all__ = [
'startproject', # 新建项目
'startapp', # 新建应用程序
]
def startproject(path: str, project_name: str)->int:
"""新建项目,成功返回0,失败返回1"""
if retools.PATT_CHARSNUMBER.match(project_name) and not os.path.exists(os.path.join(path, project_name)):
"""project_name"""
os.mkdir(os.path.join(path, project_name))
path = os.path.join(path, project_name)
os.mkdir(os.path.join(path, project_name))
PDir = os.path.join(path, project_name)
new_file(os.path.join(PDir, '__init__.py'))
new_file(os.path.join(PDir, 'urls.py'), content=get_content('urls.django', concat=['project']))
new_file(os.path.join(PDir, 'asgi.py'), content=get_content('asgi.django', concat=['project'], replace=True, project_name=project_name))
new_file(os.path.join(PDir, 'wsgi.py'), content=get_content('wsgi.django', concat=['project'], replace=True, project_name=project_name))
new_file(os.path.join(PDir, 'settings.py'), content=get_content('settings.django', concat=['project'], replace=True, project_name=project_name, secret_key=generate_secret_key()))
"""templates"""
# os.mkdir(os.path.join(path, 'templates'))
# os.mkdir(os.path.join(path, 'templates', 'includes'))
# new_file(os.path.join(path, 'templates', 'base.html'), content=get_content('baseHtml.django'))
"""static"""
# os.mkdir(os.path.join(path, 'static'))
# os.mkdir(os.path.join(path, 'static', 'js'))
# os.mkdir(os.path.join(path, 'static', 'img'))
# os.mkdir(os.path.join(path, 'static', 'css'))
"""manage.py"""
new_file(os.path.join(path, 'manage.py'), content=get_content('manage.django', concat=['project'], replace=True, project_name=project_name))
return 0
else:
return 1
def startapp(app_name: str)->None:
"""新建应用程序"""
configs = get_configs(CONFIG_PATH)
PROJECT_BASE_DIR = configs['dirname']
if retools.PATT_CHARSNUMBER.match(app_name) and not os.path.exists(os.path.join(PROJECT_BASE_DIR, app_name)):
"""""""""main"""
""""""
os.mkdir(os.path.join(PROJECT_BASE_DIR, app_name))
APP_DIR = os.path.join(PROJECT_BASE_DIR, app_name)
new_file(os.path.join(APP_DIR, '__init__.py'))
new_file(os.path.join(APP_DIR, 'admin.py'), content=get_content('admin.django'))
new_file(os.path.join(APP_DIR, 'apps.py'), content=get_content('apps.django', replace=True, app_name=app_name))
new_file(os.path.join(APP_DIR, 'forms.py'), content=get_content('forms.django'))
new_file(os.path.join(APP_DIR, 'models.py'), content=get_content('models.django'))
new_file(os.path.join(APP_DIR, 'controller.py'), content=get_content('controller.django'))
new_file(os.path.join(APP_DIR, 'tests.py'), content=get_content('tests.django'))
new_file(os.path.join(APP_DIR, 'urls.py'), content=get_content('urls.django', replace=True, app_name=app_name))
new_file(os.path.join(APP_DIR, 'views.py'), content=get_content('views.django'))
""""""
"""""""""templates"""
""""""
# os.mkdir(os.path.join(APP_DIR, 'templates'))
# os.mkdir(os.path.join(APP_DIR, 'templates', app_name))
# os.mkdir(os.path.join(APP_DIR, 'templates', app_name, 'includes'))
# TEMP_DIR = os.path.join(APP_DIR, 'templates', app_name)
# new_file(os.path.join(TEMP_DIR, 'base.html'), content=get_content('baseHtml.django'))
# new_file(os.path.join(TEMP_DIR, 'includes', 'paginator.html'), content=get_content('paginator.django'))
""""""
"""""""""static"""
""""""
# os.mkdir(os.path.join(APP_DIR, 'static'))
# os.mkdir(os.path.join(APP_DIR, 'static', app_name))
# os.mkdir(os.path.join(APP_DIR, 'static', app_name, 'js'))
# os.mkdir(os.path.join(APP_DIR, 'static', app_name, 'img'))
# os.mkdir(os.path.join(APP_DIR, 'static', app_name, 'css'))
""""""
"""""""""templatetags"""
""""""
# os.mkdir(os.path.join(APP_DIR, 'templatetags'))
# new_file(os.path.join(APP_DIR, 'templatetags', '__init__.py'))
# new_file(os.path.join(APP_DIR, 'templatetags', 'filter.py'), content=get_content('filter.django'))
""""""
"""""""""migrations"""
""""""
os.mkdir(os.path.join(APP_DIR, 'migrations'))
new_file(os.path.join(APP_DIR, 'migrations', '__init__.py'))
""""""
return 0
else:
return 1
```
#### File: JDjangoDemo/docs/models.py
```python
from django.db import models
from django.utils.translation import gettext_lazy as _
from django.contrib.auth.models import User
from django.utils import timezone
from .manage import MenuManage, ArticleManage
LEVELS = [
('a', 'A级'),
('b', 'B级'),
('c', 'C级'),
('d', 'D级'),
('e', 'E级'),
('f', 'F级'),
('g', 'G级'),
('h', 'H级'),
('i', 'I级'),
] # 九级区分,A级最低,I级最高
# 菜单在这里可以理解为板块
class Menu(models.Model):
name = models.CharField(_("节点名"), max_length=30)
description = models.TextField(_("节点描述"))
isroot = models.BooleanField(_("是否根节点"), default=False)
isvisible = models.BooleanField(_("是否可见"), default=True)
order = models.PositiveIntegerField(_("节点顺序"), default=0)
parent_menu = models.ForeignKey('self', on_delete=models.CASCADE, null=True, blank=True, related_name='sub_menu', verbose_name=_('上级节点'))
url_path = models.CharField(_("页面绑定"), max_length=60, default='', blank=True)
add_time = models.DateTimeField(_("添加时间"), auto_now_add=True)
# 管理器
objects = models.Manager() # 默认
menuManage = MenuManage() # 自定义全部
def __str__(self):
return self.name
class Meta:
verbose_name = '菜单'
verbose_name_plural = '菜单'
class Article(models.Model):
title = models.CharField(_("标题"), max_length=100)
content = models.TextField(_("内容"))
abstract = models.CharField(_("摘要"), max_length=100)
label = models.CharField(_("标签"), max_length=100)
level = models.CharField(_("文章评级"), max_length=1, choices=LEVELS, default='a')
version = models.CharField(_("版本号"), default='1.0.0', max_length=30)
create_time = models.DateTimeField(_("创建时间"), auto_now_add=True)
modify_time = models.DateTimeField(_("修改时间"), auto_now=True)
isvisible = models.BooleanField(_("是否可见"), default=True)
iswrite = models.BooleanField(_("是否已撰写示例"), default=False)
url_path = models.CharField(_("页面绑定"), max_length=60, default='', blank=True)
menu = models.ForeignKey(Menu, on_delete=models.CASCADE, related_name='marticles', verbose_name=_('所属菜单'))
author = models.ForeignKey(User, on_delete=models.CASCADE, related_name='warticles', verbose_name=_('作者'))
auditor = models.ForeignKey(User, on_delete=models.CASCADE, related_name='aarticles', verbose_name=_('审核人'), null=True, blank=True)
# 管理器
objects = models.Manager() # 默认
articleManage = ArticleManage() # 自定义
def __str__(self):
return self.title
class Meta:
verbose_name = '接口文章'
verbose_name_plural = '接口文章'
# Article的互动扩展【暂时搁置的功能】
class ArticleA(models.Model):
use_time = models.PositiveIntegerField(_("调用次数"), default=0)
votes = models.PositiveIntegerField(_("支持数"), default=0)
article = models.ForeignKey(Article, on_delete=models.CASCADE, related_name='articleA', verbose_name=_('接口'))
def __str__(self):
return 'articleA'
class Meta:
verbose_name = '接口互动'
verbose_name_plural = '接口互动'
# Article的评论【暂时搁置的功能】
class Remark(models.Model):
class Meta:
verbose_name = '评论'
verbose_name_plural = '评论'
# 接口类
class PlugIn(models.Model):
url = models.URLField(_("接口"), unique=True)
only_code = models.CharField(_("接口唯一标识"), max_length=32, unique=True)
isvalid = models.BooleanField(_("可用"), default=True)
create_time = models.DateTimeField(_("创建时间"), auto_now_add=True)
generator = models.ForeignKey(User, on_delete=models.CASCADE, related_name='urls', verbose_name=_('创作者'))
article = models.OneToOneField(Article, on_delete=models.CASCADE, related_name='aurls', verbose_name=_('关联文章'))
def __str__(self):
return self.url
class Meta:
verbose_name = '接口注册'
verbose_name_plural = '接口注册'
# 接口授权码
class LimitLinkPlugIn(models.Model):
access_code = models.CharField(_("授权码"), max_length=8)
times = models.PositiveIntegerField(_("剩余调用次数"), default=100)
continue_times = models.PositiveIntegerField(_("续约次数"), default=0)
islegal = models.BooleanField(_("禁用"), default=False)
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='ulimits', verbose_name=_('使用者'))
plugin = models.ForeignKey(PlugIn, on_delete=models.CASCADE, related_name='plimits', verbose_name=_('接口'))
def __str__(self):
return self.access_code
class Meta:
verbose_name = '授权码'
verbose_name_plural = '授权码'
```
#### File: dialogs/dialogModels/dialogModels.py
```python
from ..common import *
"""
Mac上布局有BUG,推测是RadioBox和scrolledpanel组合使用的问题,Mac上勉强还能用,暂时不改。(已修复,控件顺序影响布局)
###1 新增参数步骤:
1、在 constant.py 文件中注册参数,注册的有关变量:CON_MODELSCREATEDIALOG_COLS、CON_ARGS_NAME_DICT;
2、将参数匹配对应的控件,添加到指定的页面位置(即布局);
3、在 self.allArgs 变量中注册参数控件(仅需要最核心的一个输入控件);
4、按 共用参数/特殊参数 区分,分别添加到 self.commonArgs / self.specialArgs 中(所有与核心控件相关的,包括布局都必须添加进去);
5、在 self.readmeStaticTexts 和 self.labelStaticTexts 中分别添加 字段说明 和 字段标签;
6、在 onBtnAddFieldToArea() 方法中加入行数据(根据实际情况适当做个校验);
7、如果是特殊参数,则在选中对应字段类型时予以显示;
8、最终,在 onBtnPreview() 函数中,进行预览展示。
###2 新增字段类型步骤:
1、在 constant.py 文件中新增一个变量,按照 '<类型>--<字段类型名>--<字段详细>' 的格式赋值,并添加到列表 CON_FIELD_TYPES 中最合适的位置;
2、在 onChoiceFieldType() 方法中,编写选中事件。
"""
"""
### 关联字段的一些注意点:
1、OneToOneField 字段类型和其它关联字段类型不同,默认的反向名称是 '<model_name>',而 ManyToManyField 和 ForeignField 默认是 '<model_name>_set';
(当然,反向名称可以自己指定.)
"""
"""
### 一些使用注意点:
1、选择应用程序后,模型创建代码默认写入程序搜索到的第一个模型文件路径。(若路径不存在,请手动在应用程序中新建一个模型文件,并在environment.xml文件中注册别名。)
"""
STATIC_TEXT_WIDTH = -1 # StaticText宽度
class ModelsCreateDialog(wx.Dialog):
def __init__(self, parent):
wx.Dialog.__init__(self, parent, id = wx.ID_ANY, title = '新增模型', size=(730, 666), style=wx.DEFAULT_DIALOG_STYLE|wx.MAXIMIZE_BOX|wx.MINIMIZE_BOX|wx.RESIZE_BORDER)
# 必要的控制容器
self.allArgs = [] # 所有的参数选项
self.commonArgs = [] # 共有的参数选项
self.specialArgs = [] # 特有的参数选项
self.afterBtns = [] # 所有的后触发按钮
self.allRows = [] # 所有的待新增字段及其参数
self.readmeStaticTexts = [] # 所有的脚注提示信息控件
self.labelStaticTexts = [] # 所有的标签控件
self._init_UI()
self._disable_all_args()
self._init_all_args_value()
self._init_input_args()
self._disable_all_afterBtns()
# 按顺序布局面板
self._init_table() # 表格布局默认加最后
self._init_Meta_panel() # 初始化Meta选项面板
# 字体默认设置
self._init_readme_font()
self._init_label_font()
def _init_readme_font(self):
"""脚注提示信息字体初始化"""
for _ in self.readmeStaticTexts:
_.SetFont(wx.Font(12, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
_.SetForegroundColour(CON_COLOR_MAIN)
def _init_label_font(self):
"""标签提示信息字体初始化"""
for _ in self.labelStaticTexts:
_.SetFont(wx.Font(16, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD))
def _init_UI(self):
"""初始化界面布局"""
# 主界面
self.panel = wx.Panel(self)
self.panelSizer = wx.BoxSizer(wx.VERTICAL)
self.panel.SetSizer(self.panelSizer)
# self.panel.SetBackgroundColour(CON_COLOR_MAIN)
# 选择文件写入路径【此处更改为选择App】
self.selectFilePanel = wx.Panel(self.panel)
selectFilePanelSizer = wx.BoxSizer(wx.HORIZONTAL)
self.selectFilePanel.SetSizer(selectFilePanelSizer)
self.panelSizer.Add(self.selectFilePanel, 0, wx.EXPAND | wx.ALL, 2)
self.selectFilePanel.SetBackgroundColour(CON_COLOR_MAIN) # CON_COLOR_PURE_WHITE
self.labelSelectFile = wx.StaticText(self.selectFilePanel, -1, "请在右侧下拉列表选择模型所属的应用程序")
self.choiceSelectFile = wx.Choice(self.selectFilePanel, -1, choices=[' ',] + djangotools.SCONFIGS.app_names())
selectFilePanelSizer.Add(self.labelSelectFile, 0, wx.EXPAND | wx.ALL, 2)
selectFilePanelSizer.Add(self.choiceSelectFile, 1, wx.EXPAND | wx.ALL, 2)
self.labelSelectFile.SetFont(wx.Font(12, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
self.labelSelectFile.SetForegroundColour(CON_COLOR_PURE_WHITE)
# 自定义工具栏
self.toolPanel = wx.Panel(self.panel)
toolPanelSizer = wx.BoxSizer(wx.HORIZONTAL)
self.toolPanel.SetSizer(toolPanelSizer)
self.panelSizer.Add(self.toolPanel, 0, wx.EXPAND | wx.ALL, 2)
self.toolPanel.SetBackgroundColour(CON_COLOR_MAIN)
self.btnAddNew = buttons.GenButton(self.toolPanel, -1, '新增字段')
self.btnResetInput = buttons.GenButton(self.toolPanel, -1, '重置字段')
self.btnAddFieldToArea = buttons.GenButton(self.toolPanel, -1, '添加至待新增区')
# self.btnModifyFieldArgs = buttons.GenButton(self.toolPanel, -1, '修改')
self.btnPreview = buttons.GenButton(self.toolPanel, -1, '代码预览')
self.btnExecSave = buttons.GenButton(self.toolPanel, -1, '保存')
self.btnExit = buttons.GenButton(self.toolPanel, -1, '退出')
self.autoRegister = wx.CheckBox(self.toolPanel, -1, label = '自动注册后台') # 是否自动注册后台单选框
self.autoRegister.SetForegroundColour(CON_COLOR_PURE_WHITE)
self.btnWhite = buttons.GenButton(self.toolPanel, -1, ' ') # 空白区域补全按钮
toolPanelSizer.Add(self.btnAddNew, 0, wx.EXPAND | wx.ALL, 2)
toolPanelSizer.Add(self.btnResetInput, 0, wx.EXPAND | wx.ALL, 2)
toolPanelSizer.Add(self.btnAddFieldToArea, 0, wx.EXPAND | wx.ALL, 2)
# toolPanelSizer.Add(self.btnModifyFieldArgs, 0, wx.EXPAND | wx.ALL, 2)
toolPanelSizer.Add(self.btnPreview, 0, wx.EXPAND | wx.ALL, 2)
toolPanelSizer.Add(self.btnExecSave, 0, wx.EXPAND | wx.ALL, 2)
toolPanelSizer.Add(self.btnExit, 0, wx.EXPAND | wx.ALL, 2)
toolPanelSizer.Add(self.autoRegister, 0, wx.EXPAND | wx.ALL, 2)
toolPanelSizer.Add(self.btnWhite, 1, wx.EXPAND | wx.ALL, 2)
self.btnWhite.Enable(False)
# 选择字段类型【行冻结】
self.selectFieldTypeStaticBox = wx.StaticBox(self.panel, -1, '')
self.selectFieldTypePanel = wx.StaticBoxSizer(self.selectFieldTypeStaticBox, wx.HORIZONTAL)
self.panelSizer.Add(self.selectFieldTypePanel, 0, wx.EXPAND | wx.ALL, 2)
self.choiceFieldTypeLabel = wx.StaticText(self.panel, -1, "1、字段类型:")
self.choiceFieldType = wx.Choice(self.panel, -1, choices = [' ']+CON_FIELD_TYPES) # , style = wx.CB_SORT
self.readmeChoiceFieldType = wx.StaticText(self.panel, -1, "【字段类型】** 新增字段前,必须先选择字段类型,选择后即可填写详细的参数数据。") # 选项说明
self.selectFieldTypePanel.Add(self.choiceFieldTypeLabel, 0, wx.EXPAND | wx.ALL, 2)
self.selectFieldTypePanel.Add(self.choiceFieldType, 1, wx.EXPAND | wx.ALL, 2)
self.panelSizer.Add(self.readmeChoiceFieldType, 0, wx.EXPAND | wx.ALL, 2)
# 可滚动面板(包裹所有的参数)
self.scollPanel = scrolledpanel.ScrolledPanel(self.panel, -1)
self.scollPanel.SetupScrolling()
scollPanelSizer = wx.BoxSizer(wx.VERTICAL)
self.scollPanel.SetSizer(scollPanelSizer)
self.panelSizer.Add(self.scollPanel, 3, wx.EXPAND | wx.ALL, 2)
# 字段属性命名
self.modelsNameStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.modelsNamePanel = wx.StaticBoxSizer(self.modelsNameStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.modelsNamePanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelFieldModelName = wx.StaticText(self.scollPanel, -1, "2、字段属性名:")
self.inputFieldModelName = wx.TextCtrl(self.scollPanel, -1)
self.readmeInputFieldModelName = wx.StaticText(self.scollPanel, -1, "【字段属性名】** 字段属性名,是代码中的字段名称,并非数据库中实际存储的列名。")
self.modelsNamePanel.Add(self.labelFieldModelName, 0, wx.EXPAND | wx.ALL, 2)
self.modelsNamePanel.Add(self.inputFieldModelName, 1, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeInputFieldModelName, 0, wx.EXPAND | wx.ALL, 2)
# 数据库列名(db_column)
self.dbColumnNameStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.dbColumnNamePanel = wx.StaticBoxSizer(self.dbColumnNameStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.dbColumnNamePanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelFieldDatabaseName = wx.StaticText(self.scollPanel, -1, "3、数据库列名(db_column):")
self.inputFieldDatabaseName = wx.TextCtrl(self.scollPanel, -1)
self.readmeInputFieldDatabaseName = wx.StaticText(self.scollPanel, -1, "【数据库列名(db_column)】** 实际存储在数据库中的列名,若不指定默认取【字段属性名】。")
self.dbColumnNamePanel.Add(self.labelFieldDatabaseName, 0, wx.EXPAND | wx.ALL, 2)
self.dbColumnNamePanel.Add(self.inputFieldDatabaseName, 1, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeInputFieldDatabaseName, 0, wx.EXPAND | wx.ALL, 2)
# 字段备注
self.fieldRemarkStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.fieldRemarkPanel = wx.StaticBoxSizer(self.fieldRemarkStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.fieldRemarkPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelFieldRemarkName = wx.StaticText(self.scollPanel, -1, "4、字段备注:", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.inputFieldRemarkName = wx.TextCtrl(self.scollPanel, -1)
self.readmeInputFieldRemarkName = wx.StaticText(self.scollPanel, -1, "【字段备注】** 字段备注默认取【字段属性名】,下划线将自动转换成空格。")
self.fieldRemarkPanel.Add(self.labelFieldRemarkName, 0, wx.EXPAND | wx.ALL, 2)
self.fieldRemarkPanel.Add(self.inputFieldRemarkName, 1, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeInputFieldRemarkName, 0, wx.EXPAND | wx.ALL, 2)
# 默认值(default)
self.inputDefaultValueStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.inputDefaultValuePanel = wx.StaticBoxSizer(self.inputDefaultValueStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.inputDefaultValuePanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelInputDefaultValue = wx.StaticText(self.scollPanel, -1, "5、默认值(default)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.inputDefaultValue = wx.TextCtrl(self.scollPanel, -1)
self.readmeInputDefaultValue = wx.StaticText(self.scollPanel, -1, "【默认值(default)】** 字段默认值,可以是常量,也可以是一个函数。字符串用''括起来。")
self.inputDefaultValuePanel.Add(self.labelInputDefaultValue, 0, wx.EXPAND | wx.ALL, 2)
self.inputDefaultValuePanel.Add(self.inputDefaultValue, 1, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeInputDefaultValue, 0, wx.EXPAND | wx.ALL, 2)
# 与日期组合唯一(unique_for_date)
self.choicesFiledUniqueForDateStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.choicesFiledUniqueForDatePanel = wx.StaticBoxSizer(self.choicesFiledUniqueForDateStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.choicesFiledUniqueForDatePanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelChoicesFiledUniqueForDate = wx.StaticText(self.scollPanel, -1, "6、与日期组合唯一(unique_for_date)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.choicesFiledUniqueForDate = wx.Choice(self.scollPanel, -1, choices=[' ',])
self.readmeChoicesFiledUniqueForDate = wx.StaticText(self.scollPanel, -1, "【与日期组合唯一(unique_for_date)】** 当前字段与当前选择日期字段的值组合唯一。")
self.choicesFiledUniqueForDatePanel.Add(self.labelChoicesFiledUniqueForDate, 0, wx.EXPAND | wx.ALL, 2)
self.choicesFiledUniqueForDatePanel.Add(self.choicesFiledUniqueForDate, 1, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeChoicesFiledUniqueForDate, 0, wx.EXPAND | wx.ALL, 2)
# 与月份组合唯一(unique_for_month)
self.choicesFiledUniqueForMonthStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.choicesFiledUniqueForMonthPanel = wx.StaticBoxSizer(self.choicesFiledUniqueForMonthStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.choicesFiledUniqueForMonthPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelChoicesFiledUniqueForMonth = wx.StaticText(self.scollPanel, -1, "7、与月份组合唯一(unique_for_month)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.choicesFiledUniqueForMonth = wx.Choice(self.scollPanel, -1, choices=[' ',])
self.readmeChoicesFiledUniqueForMonth = wx.StaticText(self.scollPanel, -1, "【与月份组合唯一(unique_for_month)】** 当前字段与当前选择月份字段的值组合唯一。")
self.choicesFiledUniqueForMonthPanel.Add(self.labelChoicesFiledUniqueForMonth, 0, wx.EXPAND | wx.ALL, 2)
self.choicesFiledUniqueForMonthPanel.Add(self.choicesFiledUniqueForMonth, 1, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeChoicesFiledUniqueForMonth, 0, wx.EXPAND | wx.ALL, 2)
# 与年份组合唯一(unique_for_year)
self.choicesFiledUniqueForYearStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.choicesFiledUniqueForYearPanel = wx.StaticBoxSizer(self.choicesFiledUniqueForYearStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.choicesFiledUniqueForYearPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelChoicesFiledUniqueForYear = wx.StaticText(self.scollPanel, -1, "8、与年份组合唯一(unique_for_year)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.choicesFiledUniqueForYear = wx.Choice(self.scollPanel, -1, choices=[' ',])
self.readmeChoicesFiledUniqueForYear = wx.StaticText(self.scollPanel, -1, "【与年份组合唯一(unique_for_year)】** 当前字段与当前选择年份字段的值组合唯一。")
self.choicesFiledUniqueForYearPanel.Add(self.labelChoicesFiledUniqueForYear, 0, wx.EXPAND | wx.ALL, 2)
self.choicesFiledUniqueForYearPanel.Add(self.choicesFiledUniqueForYear, 1, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeChoicesFiledUniqueForYear, 0, wx.EXPAND | wx.ALL, 2)
# 主键(primary_key)
self.radiosFiledPrimaryStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.radiosFiledPrimaryPanel = wx.StaticBoxSizer(self.radiosFiledPrimaryStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.radiosFiledPrimaryPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelRadiosFiledPrimary = wx.StaticText(self.scollPanel, -1, "9、主键(primary_key):", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.radiosFiledPrimary = wx.RadioBox(self.scollPanel, -1, "", choices=['是', '否'])
self.readmeRadiosFiledPrimary = wx.StaticText(self.scollPanel, -1, "【主键(primary_key)】** 数据库主键唯一字段。")
self.radiosFiledPrimaryPanel.Add(self.labelRadiosFiledPrimary, 0, wx.EXPAND | wx.ALL, 2)
self.radiosFiledPrimaryPanel.Add(self.radiosFiledPrimary, 0, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeRadiosFiledPrimary, 0, wx.EXPAND | wx.ALL, 2)
# 值唯一(unique)
self.radiosFiledUniqueStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.radiosFiledUniquePanel = wx.StaticBoxSizer(self.radiosFiledUniqueStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.radiosFiledUniquePanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelRadiosFiledUnique = wx.StaticText(self.scollPanel, -1, "10、值唯一(unique):", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.radiosFiledUnique = wx.RadioBox(self.scollPanel, -1, "", choices=['唯一', '不唯一'])
self.readmeRadiosFiledUnique = wx.StaticText(self.scollPanel, -1, "【值唯一(unique)】** 数据库字段值唯一。")
self.radiosFiledUniquePanel.Add(self.labelRadiosFiledUnique, 0, wx.EXPAND | wx.ALL, 2)
self.radiosFiledUniquePanel.Add(self.radiosFiledUnique, 0, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeRadiosFiledUnique, 0, wx.EXPAND | wx.ALL, 2)
# 允许为空、blank
self.radiosFiledBlankStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.radiosFiledBlankPanel = wx.StaticBoxSizer(self.radiosFiledBlankStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.radiosFiledBlankPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelRadiosFiledBlank = wx.StaticText(self.scollPanel, -1, "11、允许为空(blank):", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.radiosFiledBlank = wx.RadioBox(self.scollPanel, -1, "", choices=['允许', '不允许'])
self.readmeRadiosFiledBlank = wx.StaticText(self.scollPanel, -1, "【允许为空(blank)】** 数据库表字段允许为空,表单验证允许为空。")
self.radiosFiledBlankPanel.Add(self.labelRadiosFiledBlank, 0, wx.EXPAND | wx.ALL, 2)
self.radiosFiledBlankPanel.Add(self.radiosFiledBlank, 0, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeRadiosFiledBlank, 0, wx.EXPAND | wx.ALL, 2)
# 为空时赋NULL(null)
self.radiosFiledNullStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.radiosFiledNullPanel = wx.StaticBoxSizer(self.radiosFiledNullStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.radiosFiledNullPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelRadiosFiledNull = wx.StaticText(self.scollPanel, -1, "12、为空时赋NULL(null):", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.radiosFiledNull = wx.RadioBox(self.scollPanel, -1, "", choices=['赋', '不赋'])
self.readmeRadiosFiledNull = wx.StaticText(self.scollPanel, -1, "【为空时赋NULL(null)】** 数据库表字段为空时,用NULL作默认值。")
self.radiosFiledNullPanel.Add(self.labelRadiosFiledNull, 0, wx.EXPAND | wx.ALL, 2)
self.radiosFiledNullPanel.Add(self.radiosFiledNull, 0, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeRadiosFiledNull, 0, wx.EXPAND | wx.ALL, 2)
# 创建索引(db_index)
self.radiosFiledDbIndexStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.radiosFiledDbIndexPanel = wx.StaticBoxSizer(self.radiosFiledDbIndexStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.radiosFiledDbIndexPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelRadiosFiledDbIndex = wx.StaticText(self.scollPanel, -1, "13、创建索引(db_index):", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.radiosFiledDbIndex = wx.RadioBox(self.scollPanel, -1, "", choices=['创建', '不创建'])
self.readmeRadiosFiledDbIndex = wx.StaticText(self.scollPanel, -1, "【创建索引(db_index)】** 创建数据库的字段索引。")
self.radiosFiledDbIndexPanel.Add(self.labelRadiosFiledDbIndex, 0, wx.EXPAND | wx.ALL, 2)
self.radiosFiledDbIndexPanel.Add(self.radiosFiledDbIndex, 0, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeRadiosFiledDbIndex, 0, wx.EXPAND | wx.ALL, 2)
# 表单显示(editable)
self.radiosFiledEditableStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.radiosFiledEditablePanel = wx.StaticBoxSizer(self.radiosFiledEditableStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.radiosFiledEditablePanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelRadiosFiledEditable = wx.StaticText(self.scollPanel, -1, "14、表单显示(editable):", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.radiosFiledEditable = wx.RadioBox(self.scollPanel, -1, "", choices=['显示', '不显示'])
self.readmeRadiosFiledEditable = wx.StaticText(self.scollPanel, -1, "【表单显示(editable)】** 表单页面提供交互式控件。")
self.radiosFiledEditablePanel.Add(self.labelRadiosFiledEditable, 0, wx.EXPAND | wx.ALL, 2)
self.radiosFiledEditablePanel.Add(self.radiosFiledEditable, 0, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeRadiosFiledEditable, 0, wx.EXPAND | wx.ALL, 2)
# 表单帮助文本信息(help_text)
self.inputFormHelpTextStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.inputFormHelpTextPanel = wx.StaticBoxSizer(self.inputFormHelpTextStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.inputFormHelpTextPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelInputFormHelpText = wx.StaticText(self.scollPanel, -1, "15、表单帮助信息(help_text)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.inputFormHelpText = wx.TextCtrl(self.scollPanel, -1)
self.readmeInputFormHelpText = wx.StaticText(self.scollPanel, -1, "【表单帮助信息(help_text)】** 表单填写时的提示信息。")
self.inputFormHelpTextPanel.Add(self.labelInputFormHelpText, 0, wx.EXPAND | wx.ALL, 2)
self.inputFormHelpTextPanel.Add(self.inputFormHelpText, 1, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeInputFormHelpText, 0, wx.EXPAND | wx.ALL, 2)
# 表单错误提醒(error_messages)
self.inputFormErrorMessageStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.inputFormErrorMessagePanel = wx.StaticBoxSizer(self.inputFormErrorMessageStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.inputFormErrorMessagePanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelInputFormErrorMessage = wx.StaticText(self.scollPanel, -1, "16、表单错误提醒(error_messages)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.inputFormErrorMessage = wx.TextCtrl(self.scollPanel, -1)
self.readmeInputFormErrorMessage = wx.StaticText(self.scollPanel, -1, "【表单错误提醒(error_messages)】** 表单填写错误时的提示信息。")
self.inputFormErrorMessagePanel.Add(self.labelInputFormErrorMessage, 0, wx.EXPAND | wx.ALL, 2)
self.inputFormErrorMessagePanel.Add(self.inputFormErrorMessage, 1, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeInputFormErrorMessage, 0, wx.EXPAND | wx.ALL, 2)
# 长度上限(max_length)
self.inputMaxLengthStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.inputMaxLengthPanel = wx.StaticBoxSizer(self.inputMaxLengthStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.inputMaxLengthPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelInputMaxLength = wx.StaticText(self.scollPanel, -1, "17、长度上限(max_length):", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.inputMaxLength = wx.TextCtrl(self.scollPanel, -1)
self.readmeInputMaxLength = wx.StaticText(self.scollPanel, -1, "【长度上限(max_length)】** 数据库允许存储的最大长度。")
self.inputMaxLengthPanel.Add(self.labelInputMaxLength, 0, wx.EXPAND | wx.ALL, 2)
self.inputMaxLengthPanel.Add(self.inputMaxLength, 1, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeInputMaxLength, 0, wx.EXPAND | wx.ALL, 2)
# 实数总位数(max_digits)
self.inputMaxDigitsStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.inputMaxDigitsPanel = wx.StaticBoxSizer(self.inputMaxDigitsStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.inputMaxDigitsPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelInputMaxDigits = wx.StaticText(self.scollPanel, -1, "18、实数总位数(max_digits)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.inputMaxDigits = wx.TextCtrl(self.scollPanel, -1)
self.readmeInputMaxDigits = wx.StaticText(self.scollPanel, -1, "【实数总位数(max_digits)】** 整数位数和小数位数的总和,不包括小数点。")
self.inputMaxDigitsPanel.Add(self.labelInputMaxDigits, 0, wx.EXPAND | wx.ALL, 2)
self.inputMaxDigitsPanel.Add(self.inputMaxDigits, 1, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeInputMaxDigits, 0, wx.EXPAND | wx.ALL, 2)
# 小数总位数(decimal_places)(默认为0)
self.inputDecimalPlacesStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.inputDecimalPlacesPanel = wx.StaticBoxSizer(self.inputDecimalPlacesStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.inputDecimalPlacesPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelInputDecimalPlaces = wx.StaticText(self.scollPanel, -1, "19、小数总位数(decimal_places)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.inputDecimalPlaces = wx.TextCtrl(self.scollPanel, -1)
self.readmeInputDecimalPlaces = wx.StaticText(self.scollPanel, -1, "【小数总位数(decimal_places)】** 小数位数的总和,不包括小数点。")
self.inputDecimalPlacesPanel.Add(self.labelInputDecimalPlaces, 0, wx.EXPAND | wx.ALL, 2)
self.inputDecimalPlacesPanel.Add(self.inputDecimalPlaces, 1, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeInputDecimalPlaces, 0, wx.EXPAND | wx.ALL, 2)
# save调用更新日期(auto_now)
self.radiosAutoNowStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.radiosAutoNowPanel = wx.StaticBoxSizer(self.radiosAutoNowStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.radiosAutoNowPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelRadiosAutoNow = wx.StaticText(self.scollPanel, -1, "20、保存更新日期(auto_now):", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.radiosAutoNow = wx.RadioBox(self.scollPanel, -1, "", choices=['启用', '不启用'])
self.readmeRadiosAutoNow = wx.StaticText(self.scollPanel, -1, "【保存更新日期(auto_now)】** 仅在调用模型控制器的save()方法时自动更新该日期字段。")
self.radiosAutoNowPanel.Add(self.labelRadiosAutoNow, 0, wx.EXPAND | wx.ALL, 2)
self.radiosAutoNowPanel.Add(self.radiosAutoNow, 0, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeRadiosAutoNow, 0, wx.EXPAND | wx.ALL, 2)
# 仅创建时一次赋值日期(auto_now_add)
self.radiosAutoNowAddStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.radiosAutoNowAddPanel = wx.StaticBoxSizer(self.radiosAutoNowAddStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.radiosAutoNowAddPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelRadiosAutoNowAdd = wx.StaticText(self.scollPanel, -1, "21、仅创建时赋值日期(auto_now_add):", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.radiosAutoNowAdd = wx.RadioBox(self.scollPanel, -1, "", choices=['启用', '不启用'])
self.readmeRadiosAutoNowAdd = wx.StaticText(self.scollPanel, -1, "【创建赋值日期(auto_now_add)】** 仅在创建记录时一次赋值该日期,赋值后不允许修改。")
self.radiosAutoNowAddPanel.Add(self.labelRadiosAutoNowAdd, 0, wx.EXPAND | wx.ALL, 2)
self.radiosAutoNowAddPanel.Add(self.radiosAutoNowAdd, 0, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeRadiosAutoNowAdd, 0, wx.EXPAND | wx.ALL, 2)
# 文件上传路径(upload_to)
self.inputUploadToStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.inputUploadToPanel = wx.StaticBoxSizer(self.inputUploadToStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.inputUploadToPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelInputUploadTo = wx.StaticText(self.scollPanel, -1, "22、文件上传路径(upload_to)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.inputUploadTo = wx.TextCtrl(self.scollPanel, -1)
self.readmeInputUploadTo = wx.StaticText(self.scollPanel, -1, "【文件上传路径(upload_to)】** 指定文件上传路径。")
self.inputUploadToPanel.Add(self.labelInputUploadTo, 0, wx.EXPAND | wx.ALL, 2)
self.inputUploadToPanel.Add(self.inputUploadTo, 1, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeInputUploadTo, 0, wx.EXPAND | wx.ALL, 2)
# 关联关系--模型下拉列表选择(多对一的一)
self.choiceSelectModelStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.choiceSelectModelPanel = wx.StaticBoxSizer(self.choiceSelectModelStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.choiceSelectModelPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelChoiceSelectModel = wx.StaticText(self.scollPanel, -1, "A、关联关系模型【外键关联模型】", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
# self.choiceSelectModel = wx.Choice(self.scollPanel, -1, choices = [' ']+['self'])
self.choiceSelectModel = wx.TextCtrl(self.scollPanel, -1)
self.readmeChoiceSelectModel = wx.StaticText(self.scollPanel, -1, " ** 多对一的一、一对一的一、多对多的多。如:Person、'Person'、'other_app.Person'。")
self.choiceSelectModelPanel.Add(self.labelChoiceSelectModel, 0, wx.EXPAND | wx.ALL, 2)
self.choiceSelectModelPanel.Add(self.choiceSelectModel, 1, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeChoiceSelectModel, 0, wx.EXPAND | wx.ALL, 2)
# 删除规则【on_delete】
self.choiceSelectDelRuleStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.choiceSelectDelRulePanel = wx.StaticBoxSizer(self.choiceSelectDelRuleStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.choiceSelectDelRulePanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelChoiceSelectDelRule = wx.StaticText(self.scollPanel, -1, "B、删除规则(on_delete)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.choiceSelectDelRule = wx.Choice(self.scollPanel, -1, choices = [' ']+['models.CASCADE','models.SET_NULL','models.PROTECT','models.SET_DEFAULT','models.DO_NOTHING',])
self.readmeChoiceSelectDelRule = wx.StaticText(self.scollPanel, -1, " ** 默认级联删除。")
self.choiceSelectDelRulePanel.Add(self.labelChoiceSelectDelRule, 0, wx.EXPAND | wx.ALL, 2)
self.choiceSelectDelRulePanel.Add(self.choiceSelectDelRule, 1, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeChoiceSelectDelRule, 0, wx.EXPAND | wx.ALL, 2)
# 备注名【verbose_name】
self.inputRelationRemarkStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.inputRelationRemarkPanel = wx.StaticBoxSizer(self.inputRelationRemarkStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.inputRelationRemarkPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelInputRelationRemark = wx.StaticText(self.scollPanel, -1, "C、关联字段备注名(verbose_name)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.inputRelationRemark = wx.TextCtrl(self.scollPanel, -1)
self.readmeInputRelationRemark = wx.StaticText(self.scollPanel, -1, " ** 后台显示的关联字段的可读名称。")
self.inputRelationRemarkPanel.Add(self.labelInputRelationRemark, 0, wx.EXPAND | wx.ALL, 2)
self.inputRelationRemarkPanel.Add(self.inputRelationRemark, 1, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeInputRelationRemark, 0, wx.EXPAND | wx.ALL, 2)
# 筛选关联字段【limit_choices_to】
self.inputLimitChoicesToStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.inputLimitChoicesToPanel = wx.StaticBoxSizer(self.inputLimitChoicesToStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.inputLimitChoicesToPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelInputLimitChoicesTo = wx.StaticText(self.scollPanel, -1, "D、筛选关联字段【limit_choices_to】", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.inputLimitChoicesTo = wx.TextCtrl(self.scollPanel, -1)
self.readmeInputLimitChoicesTo = wx.StaticText(self.scollPanel, -1, " ** 如:{'is_staff': True}。也可为一个Q对象,或可回调函数返回字典/Q。")
self.inputLimitChoicesToPanel.Add(self.labelInputLimitChoicesTo, 0, wx.EXPAND | wx.ALL, 2)
self.inputLimitChoicesToPanel.Add(self.inputLimitChoicesTo, 1, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeInputLimitChoicesTo, 0, wx.EXPAND | wx.ALL, 2)
# 反向名称(related_name)
self.inputRelatedNameStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.inputRelatedNamePanel = wx.StaticBoxSizer(self.inputRelatedNameStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.inputRelatedNamePanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelInputRelatedName = wx.StaticText(self.scollPanel, -1, "E、反向名称(related_name)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.inputRelatedName = wx.TextCtrl(self.scollPanel, -1)
self.readmeInputRelatedName = wx.StaticText(self.scollPanel, -1, " ** 被关联模型对象找到本模型对象的名称。赋值'+'关闭反向查找功能。抽象类必需。")
self.inputRelatedNamePanel.Add(self.labelInputRelatedName, 0, wx.EXPAND | wx.ALL, 2)
self.inputRelatedNamePanel.Add(self.inputRelatedName, 1, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeInputRelatedName, 0, wx.EXPAND | wx.ALL, 2)
# 反向过滤器名称(related_query_name)
self.inputRelatedQueryNameStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.inputRelatedQueryNamePanel = wx.StaticBoxSizer(self.inputRelatedQueryNameStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.inputRelatedQueryNamePanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelInputRelatedQueryName = wx.StaticText(self.scollPanel, -1, "F、反向过滤器名称(related_query_name)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.inputRelatedQueryName = wx.TextCtrl(self.scollPanel, -1)
self.readmeInputRelatedQueryName = wx.StaticText(self.scollPanel, -1, " ** 默认取related_name的值。用于:tag__name='important'之类的反向过滤前缀。")
self.inputRelatedQueryNamePanel.Add(self.labelInputRelatedQueryName, 0, wx.EXPAND | wx.ALL, 2)
self.inputRelatedQueryNamePanel.Add(self.inputRelatedQueryName, 1, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeInputRelatedQueryName, 0, wx.EXPAND | wx.ALL, 2)
# 指定关联外键(to_field)
self.inputToFieldStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.inputToFieldPanel = wx.StaticBoxSizer(self.inputToFieldStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.inputToFieldPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelInputToField = wx.StaticText(self.scollPanel, -1, "G、指定关联外键(to_field)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.inputToField = wx.TextCtrl(self.scollPanel, -1)
self.readmeInputToField = wx.StaticText(self.scollPanel, -1, " ** 默认取primary_key=True的字段。若要改变,必须是设置unique=True的字段。")
self.inputToFieldPanel.Add(self.labelInputToField, 0, wx.EXPAND | wx.ALL, 2)
self.inputToFieldPanel.Add(self.inputToField, 1, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeInputToField, 0, wx.EXPAND | wx.ALL, 2)
# 外键约束(db_constraint)
self.radiosDBConstraintStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.radiosDBConstraintPanel = wx.StaticBoxSizer(self.radiosDBConstraintStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.radiosDBConstraintPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelRadiosDBConstraint = wx.StaticText(self.scollPanel, -1, "H、外键约束(db_constraint)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.radiosDBConstraint = wx.RadioBox(self.scollPanel, -1, "", choices=['开启', '关闭'])
self.readmeRadiosDBConstraint = wx.StaticText(self.scollPanel, -1, " ** 当有无效冗余数据或为共享数据库时可关闭,否则不建议关闭。")
self.radiosDBConstraintPanel.Add(self.labelRadiosDBConstraint, 0, wx.EXPAND | wx.ALL, 2)
self.radiosDBConstraintPanel.Add(self.radiosDBConstraint, 0, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeRadiosDBConstraint, 0, wx.EXPAND | wx.ALL, 2)
# 多对多中间表名(db_table)
self.inputDBTableStaticBox = wx.StaticBox(self.scollPanel, -1, '')
self.inputDBTablePanel = wx.StaticBoxSizer(self.inputDBTableStaticBox, wx.HORIZONTAL)
scollPanelSizer.Add(self.inputDBTablePanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelInputDBTable = wx.StaticText(self.scollPanel, -1, "I、多对多中间表名(db_table)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.inputDBTable = wx.TextCtrl(self.scollPanel, -1)
self.readmeInputDBTable = wx.StaticText(self.scollPanel, -1, " ** Django默认生成关联表的哈希值表名,保证值唯一。也可自己命名。")
self.inputDBTablePanel.Add(self.labelInputDBTable, 0, wx.EXPAND | wx.ALL, 2)
self.inputDBTablePanel.Add(self.inputDBTable, 1, wx.EXPAND | wx.ALL, 2)
scollPanelSizer.Add(self.readmeInputDBTable, 0, wx.EXPAND | wx.ALL, 2)
# all 交换类型(swappable)
# 多对多 指定多对多模型(through)
# 多对多 指定多对多模型外键(through_fields)
# 一对一 父类链接(parent_link)
# 暂时不开放上述参数
# 后触发按钮
self.afterBtns.extend([
self.btnResetInput, self.btnAddFieldToArea,
# self.btnExecSave,
])
# 所有的参数
self.allArgs.extend([
self.choiceFieldType, # 字段类型选择放这里不合理【暂时不调整】
self.inputFieldModelName, self.inputFieldDatabaseName, self.inputFieldRemarkName,
self.radiosFiledBlank, self.radiosFiledNull, self.radiosFiledPrimary, # 英文拼错了,不改了
self.radiosFiledUnique, self.radiosFiledDbIndex, self.radiosFiledEditable,
self.choicesFiledUniqueForDate, self.choicesFiledUniqueForMonth, self.choicesFiledUniqueForYear,
self.inputDefaultValue, self.inputFormHelpText, self.inputFormErrorMessage,
self.inputMaxLength, self.inputMaxDigits, self.inputDecimalPlaces,
self.radiosAutoNow, self.radiosAutoNowAdd, self.inputUploadTo,
self.choiceSelectModel, self.choiceSelectDelRule, self.inputRelationRemark,
self.inputLimitChoicesTo, self.inputRelatedName, self.inputRelatedQueryName,
self.inputToField, self.radiosDBConstraint, self.inputDBTable,
])
# 共用参数
self.commonArgs.extend([
self.inputFieldModelName, self.inputFieldDatabaseName, self.inputFieldRemarkName,
self.radiosFiledBlank, self.radiosFiledNull, self.radiosFiledPrimary,
self.radiosFiledUnique, self.radiosFiledDbIndex, self.radiosFiledEditable,
self.choicesFiledUniqueForDate, self.choicesFiledUniqueForMonth, self.choicesFiledUniqueForYear,
self.inputDefaultValue, self.inputFormHelpText, self.inputFormErrorMessage,
])
# 私有参数
self.specialArgs.extend([
# 一行表示一组私有参数
self.inputMaxLengthStaticBox, self.inputMaxLength, self.labelInputMaxLength, self.readmeInputMaxLength,
self.inputMaxDigitsStaticBox, self.inputMaxDigits, self.labelInputMaxDigits, self.readmeInputMaxDigits,
self.inputDecimalPlacesStaticBox, self.inputDecimalPlaces, self.labelInputDecimalPlaces, self.readmeInputDecimalPlaces,
self.radiosAutoNowStaticBox, self.radiosAutoNow, self.labelRadiosAutoNow, self.readmeRadiosAutoNow,
self.radiosAutoNowAddStaticBox, self.radiosAutoNowAdd, self.labelRadiosAutoNowAdd, self.readmeRadiosAutoNowAdd,
self.inputUploadToStaticBox, self.inputUploadTo, self.labelInputUploadTo, self.readmeInputUploadTo,
# 关联字段
self.choiceSelectModelStaticBox, self.choiceSelectModel, self.labelChoiceSelectModel, self.readmeChoiceSelectModel,
self.choiceSelectDelRuleStaticBox, self.choiceSelectDelRule, self.labelChoiceSelectDelRule, self.readmeChoiceSelectDelRule,
self.inputRelationRemarkStaticBox, self.inputRelationRemark, self.labelInputRelationRemark, self.readmeInputRelationRemark,
self.inputLimitChoicesToStaticBox, self.inputLimitChoicesTo, self.labelInputLimitChoicesTo, self.readmeInputLimitChoicesTo,
self.inputRelatedNameStaticBox, self.inputRelatedName, self.labelInputRelatedName, self.readmeInputRelatedName,
self.inputRelatedQueryNameStaticBox, self.inputRelatedQueryName, self.labelInputRelatedQueryName, self.readmeInputRelatedQueryName,
self.inputToFieldStaticBox, self.inputToField, self.labelInputToField, self.readmeInputToField,
self.radiosDBConstraintStaticBox, self.radiosDBConstraint, self.labelRadiosDBConstraint, self.readmeRadiosDBConstraint,
self.inputDBTableStaticBox, self.inputDBTable, self.labelInputDBTable, self.readmeInputDBTable,
])
# 字体初始化控件录入
self.readmeStaticTexts.extend([
self.readmeChoiceFieldType,self.readmeInputFieldModelName,
self.readmeInputFieldDatabaseName,self.readmeInputFieldRemarkName,
self.readmeRadiosFiledBlank,self.readmeRadiosFiledNull,
self.readmeRadiosFiledPrimary,self.readmeRadiosFiledUnique,
self.readmeRadiosFiledDbIndex,self.readmeRadiosFiledEditable,
self.readmeInputMaxLength,self.readmeRadiosAutoNow,
self.readmeRadiosAutoNowAdd,self.readmeInputDefaultValue,
self.readmeInputFormHelpText,self.readmeInputFormErrorMessage,
self.readmeInputUploadTo,self.readmeInputMaxDigits,
self.readmeInputDecimalPlaces,self.readmeChoicesFiledUniqueForDate,
self.readmeChoicesFiledUniqueForMonth,self.readmeChoicesFiledUniqueForYear,
self.readmeChoiceSelectModel,self.readmeChoiceSelectDelRule,
self.readmeInputRelationRemark,self.readmeInputLimitChoicesTo,
self.readmeInputRelatedName,self.readmeInputRelatedQueryName,
self.readmeInputToField,self.readmeRadiosDBConstraint,
self.readmeInputDBTable,
])
self.labelStaticTexts.extend([
self.choiceFieldTypeLabel,self.labelFieldModelName,
self.labelFieldDatabaseName,self.labelFieldRemarkName,
self.labelRadiosFiledBlank,self.labelRadiosFiledNull,
self.labelRadiosFiledPrimary,self.labelRadiosFiledUnique,
self.labelRadiosFiledDbIndex,self.labelRadiosFiledEditable,
self.labelInputMaxLength,self.labelRadiosAutoNow,
self.labelRadiosAutoNowAdd,self.labelInputDefaultValue,
self.labelInputFormHelpText,self.labelInputFormErrorMessage,
self.labelInputUploadTo,self.labelInputMaxDigits,
self.labelInputDecimalPlaces,self.labelChoicesFiledUniqueForDate,
self.labelChoicesFiledUniqueForMonth,self.labelChoicesFiledUniqueForYear,
self.labelChoiceSelectModel,self.labelChoiceSelectDelRule,
self.labelInputRelationRemark,self.labelInputLimitChoicesTo,
self.labelInputRelatedName,self.labelInputRelatedQueryName,
self.labelInputToField,self.labelRadiosDBConstraint,
self.labelInputDBTable,
])
# 按钮点击事件
self.Bind(wx.EVT_BUTTON, self.onExit, self.btnExit)
self.Bind(wx.EVT_BUTTON, self.onBtnAddNew, self.btnAddNew)
self.Bind(wx.EVT_BUTTON, self.onBtnResetInput, self.btnResetInput)
self.Bind(wx.EVT_BUTTON, self.onBtnAddFieldToArea, self.btnAddFieldToArea)
self.Bind(wx.EVT_BUTTON, self.onBtnExecSave, self.btnExecSave)
self.Bind(wx.EVT_BUTTON, self.onBtnPreview, self.btnPreview)
# 下拉框选择事件
self.Bind(wx.EVT_CHOICE, self.onChoiceFieldType, self.choiceFieldType)
self.Bind(wx.EVT_CHOICE, self.onChoiceSelectDelRule, self.choiceSelectDelRule)
# 文本实时监听事件
self.Bind(wx.EVT_TEXT, self.onInputFieldModelName, self.inputFieldModelName)
self.Bind(wx.EVT_TEXT, self.onInputMaxLength, self.inputMaxLength)
self.Bind(wx.EVT_TEXT, self.onInputMaxDigits, self.inputMaxDigits)
self.Bind(wx.EVT_TEXT, self.onInputDecimalPlaces, self.inputDecimalPlaces)
self.Bind(wx.EVT_TEXT, self.onInputRelatedName, self.inputRelatedName)
# 单选框事件
self.Bind(wx.EVT_RADIOBOX, self.onRadioChanged, self.radiosFiledBlank)
self.Bind(wx.EVT_RADIOBOX, self.onRadioChanged, self.radiosFiledNull)
self.Bind(wx.EVT_RADIOBOX, self.onRadioChanged, self.radiosFiledPrimary)
self.Bind(wx.EVT_RADIOBOX, self.onRadioChanged, self.radiosFiledUnique)
self.Bind(wx.EVT_RADIOBOX, self.onRadioChanged, self.radiosFiledDbIndex)
self.Bind(wx.EVT_RADIOBOX, self.onRadioChanged, self.radiosFiledEditable)
self.Bind(wx.EVT_RADIOBOX, self.onRadioChanged, self.radiosAutoNow)
self.Bind(wx.EVT_RADIOBOX, self.onRadioChanged, self.radiosAutoNowAdd)
def _init_Meta_panel(self):
"""初始化Meta选项面板"""
# 显示和隐藏Meta按钮,用于空间的合理布局
self.btnShowUnshowMeta = buttons.GenButton(self.panel, -1, '【显示】Meta元数据(表级参数设置)')
self.panelSizer.Add(self.btnShowUnshowMeta, 0, wx.EXPAND | wx.ALL, 2)
self.btnShowUnshowMeta.SetBackgroundColour(CON_COLOR_MAIN)
self.btnShowUnshowMeta.SetForegroundColour(CON_COLOR_WHITE)
self.metaScollPanel = scrolledpanel.ScrolledPanel(self.panel, -1, size=(730,444))
self.metaScollPanel.SetupScrolling()
metaScollPanelSizer = wx.BoxSizer(wx.VERTICAL)
self.metaScollPanel.SetSizer(metaScollPanelSizer)
self.panelSizer.Add(self.metaScollPanel, 0, wx.EXPAND | wx.ALL, 2)
# Meta的各种选项
# 抽象类(abstract)
self.metaAbstractOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')
self.metaAbstractOptionPanel = wx.StaticBoxSizer(self.metaAbstractOptionStaticBox, wx.HORIZONTAL)
metaScollPanelSizer.Add(self.metaAbstractOptionPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelMetaAbstractOption = wx.StaticText(self.metaScollPanel, -1, "1、抽象类(abstract):", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.metaAbstractOption = wx.RadioBox(self.metaScollPanel, -1, "", choices=['是', '否'])
self.readmeMetaAbstractOption = wx.StaticText(self.metaScollPanel, -1, " ** 该模型声明为抽象模型后,不会在数据库中建表。")
self.metaAbstractOptionPanel.Add(self.labelMetaAbstractOption, 0, wx.EXPAND | wx.ALL, 2)
self.metaAbstractOptionPanel.Add(self.metaAbstractOption, 0, wx.EXPAND | wx.ALL, 2)
metaScollPanelSizer.Add(self.readmeMetaAbstractOption, 0, wx.EXPAND | wx.ALL, 2)
# 模型归属应用程序(app_label)
# 可以用model._meta.label或model._meta.label_lower获取模型名称
self.metaAppLabelOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')
self.metaAppLabelOptionPanel = wx.StaticBoxSizer(self.metaAppLabelOptionStaticBox, wx.HORIZONTAL)
metaScollPanelSizer.Add(self.metaAppLabelOptionPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelMetaAppLabelOption = wx.StaticText(self.metaScollPanel, -1, "2、模型归属应用程序(app_label):", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.metaAppLabelOption = wx.Choice(self.metaScollPanel, -1, choices=[' ',]+get_configs(CONFIG_PATH)['app_names'])
self.readmeMetaAppLabelOption = wx.StaticText(self.metaScollPanel, -1, " ** 不指定,则默认归属于当前模型文件所在的应用程序。")
self.metaAppLabelOptionPanel.Add(self.labelMetaAppLabelOption, 0, wx.EXPAND | wx.ALL, 2)
self.metaAppLabelOptionPanel.Add(self.metaAppLabelOption, 1, wx.EXPAND | wx.ALL, 2)
metaScollPanelSizer.Add(self.readmeMetaAppLabelOption, 0, wx.EXPAND | wx.ALL, 2)
# 模型管理器名称(base_manager_name)
self.metaObjectsOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')
self.metaObjectsOptionPanel = wx.StaticBoxSizer(self.metaObjectsOptionStaticBox, wx.HORIZONTAL)
metaScollPanelSizer.Add(self.metaObjectsOptionPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelMetaObjectsOption = wx.StaticText(self.metaScollPanel, -1, "3、模型管理器名称(base_manager_name)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.metaObjectsOption = wx.TextCtrl(self.metaScollPanel, -1)
self.readmeMetaObjectsOption = wx.StaticText(self.metaScollPanel, -1, " ** 默认为objects。可用model.objects调出管理器。")
self.metaObjectsOptionPanel.Add(self.labelMetaObjectsOption, 0, wx.EXPAND | wx.ALL, 2)
self.metaObjectsOptionPanel.Add(self.metaObjectsOption, 1, wx.EXPAND | wx.ALL, 2)
metaScollPanelSizer.Add(self.readmeMetaObjectsOption, 0, wx.EXPAND | wx.ALL, 2)
# 数据表名(db_table)
# 在mysql中均小写,Oracle中数据库表名要用双引号括起来
self.metaDBTableOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')
self.metaDBTableOptionPanel = wx.StaticBoxSizer(self.metaDBTableOptionStaticBox, wx.HORIZONTAL)
metaScollPanelSizer.Add(self.metaDBTableOptionPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelMetaDBTableOption = wx.StaticText(self.metaScollPanel, -1, "4、数据表名(db_table)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.metaDBTableOption = wx.TextCtrl(self.metaScollPanel, -1)
self.readmeMetaDBTableOption = wx.StaticText(self.metaScollPanel, -1, " ** 默认为应用程序名+模型名,全小写。如:app_model。")
self.metaDBTableOptionPanel.Add(self.labelMetaDBTableOption, 0, wx.EXPAND | wx.ALL, 2)
self.metaDBTableOptionPanel.Add(self.metaDBTableOption, 1, wx.EXPAND | wx.ALL, 2)
metaScollPanelSizer.Add(self.readmeMetaDBTableOption, 0, wx.EXPAND | wx.ALL, 2)
# 表空间名(db_tablespace)
self.metaDBTableSpaceOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')
self.metaDBTableSpaceOptionPanel = wx.StaticBoxSizer(self.metaDBTableSpaceOptionStaticBox, wx.HORIZONTAL)
metaScollPanelSizer.Add(self.metaDBTableSpaceOptionPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelMetaDBTableSpaceOption = wx.StaticText(self.metaScollPanel, -1, "5、表空间名(db_tablespace)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.metaDBTableSpaceOption = wx.TextCtrl(self.metaScollPanel, -1)
self.readmeMetaDBTableSpaceOption = wx.StaticText(self.metaScollPanel, -1, " ** 默认使用settings.py中的DEFAULT_TABLESPACE值。")
self.metaDBTableSpaceOptionPanel.Add(self.labelMetaDBTableSpaceOption, 0, wx.EXPAND | wx.ALL, 2)
self.metaDBTableSpaceOptionPanel.Add(self.metaDBTableSpaceOption, 1, wx.EXPAND | wx.ALL, 2)
metaScollPanelSizer.Add(self.readmeMetaDBTableSpaceOption, 0, wx.EXPAND | wx.ALL, 2)
# 指定默认解析管理器(default_manager_name)
self.metaDefaultManagerNameOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')
self.metaDefaultManagerNameOptionPanel = wx.StaticBoxSizer(self.metaDefaultManagerNameOptionStaticBox, wx.HORIZONTAL)
metaScollPanelSizer.Add(self.metaDefaultManagerNameOptionPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelMetaDefaultManagerNameOption = wx.StaticText(self.metaScollPanel, -1, "6、指定默认解析管理器(default_manager_name)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.metaDefaultManagerNameOption = wx.TextCtrl(self.metaScollPanel, -1)
self.readmeMetaDefaultManagerNameOption = wx.StaticText(self.metaScollPanel, -1, " ** 用于Django的默认行为,防止数据集缺失导致的错误。常用于一个模型多个解析器的情况。")
self.metaDefaultManagerNameOptionPanel.Add(self.labelMetaDefaultManagerNameOption, 0, wx.EXPAND | wx.ALL, 2)
self.metaDefaultManagerNameOptionPanel.Add(self.metaDefaultManagerNameOption, 1, wx.EXPAND | wx.ALL, 2)
metaScollPanelSizer.Add(self.readmeMetaDefaultManagerNameOption, 0, wx.EXPAND | wx.ALL, 2)
# 默认关联名称(default_related_name)
self.metaDefaultRelatedNameOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')
self.metaDefaultRelatedNameOptionPanel = wx.StaticBoxSizer(self.metaDefaultRelatedNameOptionStaticBox, wx.HORIZONTAL)
metaScollPanelSizer.Add(self.metaDefaultRelatedNameOptionPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelMetaDefaultRelatedNameOption = wx.StaticText(self.metaScollPanel, -1, "7、反向名称(default_related_name)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.metaDefaultRelatedNameOption = wx.TextCtrl(self.metaScollPanel, -1)
self.readmeMetaDefaultRelatedNameOption = wx.StaticText(self.metaScollPanel, -1, " ** 外键关联反向名称,默认<model_name>_set。")
self.metaDefaultRelatedNameOptionPanel.Add(self.labelMetaDefaultRelatedNameOption, 0, wx.EXPAND | wx.ALL, 2)
self.metaDefaultRelatedNameOptionPanel.Add(self.metaDefaultRelatedNameOption, 1, wx.EXPAND | wx.ALL, 2)
metaScollPanelSizer.Add(self.readmeMetaDefaultRelatedNameOption, 0, wx.EXPAND | wx.ALL, 2)
# 取最新的一条记录(get_latest_by)
# 配合latest()函数使用
self.metaGetLatestByOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')
self.metaGetLatestByOptionPanel = wx.StaticBoxSizer(self.metaGetLatestByOptionStaticBox, wx.HORIZONTAL)
metaScollPanelSizer.Add(self.metaGetLatestByOptionPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelMetaGetLatestByOption = wx.StaticText(self.metaScollPanel, -1, "8、取最新的一条记录(get_latest_by)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.metaGetLatestByOption = wx.TextCtrl(self.metaScollPanel, -1)
self.readmeMetaGetLatestByOption = wx.StaticText(self.metaScollPanel, -1, " ** 推荐指定日期字段,加前缀'-'表示倒序,可组合,用英文逗号隔开。配合latest()使用。")
self.metaGetLatestByOptionPanel.Add(self.labelMetaGetLatestByOption, 0, wx.EXPAND | wx.ALL, 2)
self.metaGetLatestByOptionPanel.Add(self.metaGetLatestByOption, 1, wx.EXPAND | wx.ALL, 2)
metaScollPanelSizer.Add(self.readmeMetaGetLatestByOption, 0, wx.EXPAND | wx.ALL, 2)
# 托管模型(managed)
self.metaManagedOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')
self.metaManagedOptionPanel = wx.StaticBoxSizer(self.metaManagedOptionStaticBox, wx.HORIZONTAL)
metaScollPanelSizer.Add(self.metaManagedOptionPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelMetaManagedOption = wx.StaticText(self.metaScollPanel, -1, "9、托管模型(managed)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.metaManagedOption = wx.RadioBox(self.metaScollPanel, -1, "", choices=['是', '否'])
self.readmeMetaManagedOption = wx.StaticText(self.metaScollPanel, -1, " ** 托管意味着由Django掌控模型的所有生命周期,这也是Django的默认行为。")
self.metaManagedOptionPanel.Add(self.labelMetaManagedOption, 0, wx.EXPAND | wx.ALL, 2)
self.metaManagedOptionPanel.Add(self.metaManagedOption, 0, wx.EXPAND | wx.ALL, 2)
metaScollPanelSizer.Add(self.readmeMetaManagedOption, 0, wx.EXPAND | wx.ALL, 2)
# 指定排序字段(ordering)
# ordering = [F('author').asc(nulls_last=True)]
self.metaOrderingOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')
self.metaOrderingOptionPanel = wx.StaticBoxSizer(self.metaOrderingOptionStaticBox, wx.HORIZONTAL)
metaScollPanelSizer.Add(self.metaOrderingOptionPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelMetaOrderingOption = wx.StaticText(self.metaScollPanel, -1, "10、指定排序字段(ordering)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.metaOrderingOption = wx.TextCtrl(self.metaScollPanel, -1)
self.readmeMetaOrderingOption = wx.StaticText(self.metaScollPanel, -1, " ** 前缀'-'表示倒叙,可多字段组合,中间用英文逗号隔开。")
self.metaOrderingOptionPanel.Add(self.labelMetaOrderingOption, 0, wx.EXPAND | wx.ALL, 2)
self.metaOrderingOptionPanel.Add(self.metaOrderingOption, 1, wx.EXPAND | wx.ALL, 2)
metaScollPanelSizer.Add(self.readmeMetaOrderingOption, 0, wx.EXPAND | wx.ALL, 2)
# 默认权限(default_permissions)
self.metaDefaultPermissionsOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')
self.metaDefaultPermissionsOptionPanel = wx.StaticBoxSizer(self.metaDefaultPermissionsOptionStaticBox, wx.HORIZONTAL)
metaScollPanelSizer.Add(self.metaDefaultPermissionsOptionPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelMetaDefaultPermissionsOption = wx.StaticText(self.metaScollPanel, -1, "11、默认权限(default_permissions)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.metaDefaultPermissionsOption = wx.TextCtrl(self.metaScollPanel, -1)
self.readmeMetaDefaultPermissionsOption = wx.StaticText(self.metaScollPanel, -1, " ** 默认值('add', 'change', 'delete', 'view'),view为Django2.1版本后添加。")
self.metaDefaultPermissionsOptionPanel.Add(self.labelMetaDefaultPermissionsOption, 0, wx.EXPAND | wx.ALL, 2)
self.metaDefaultPermissionsOptionPanel.Add(self.metaDefaultPermissionsOption, 1, wx.EXPAND | wx.ALL, 2)
metaScollPanelSizer.Add(self.readmeMetaDefaultPermissionsOption, 0, wx.EXPAND | wx.ALL, 2)
# 额外权限(permissions)
# (permission_code, human_readable_permission_name)
self.metaPermissionsOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')
self.metaPermissionsOptionPanel = wx.StaticBoxSizer(self.metaPermissionsOptionStaticBox, wx.HORIZONTAL)
metaScollPanelSizer.Add(self.metaPermissionsOptionPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelMetaPermissionsOption = wx.StaticText(self.metaScollPanel, -1, "12、额外权限(permissions)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.metaPermissionsOption = wx.TextCtrl(self.metaScollPanel, -1)
self.readmeMetaPermissionsOption = wx.StaticText(self.metaScollPanel, -1, " ** 默认添加增删改查权限,可新增权限,用二元组列表表示。如[('code', 'name'),]")
self.metaPermissionsOptionPanel.Add(self.labelMetaPermissionsOption, 0, wx.EXPAND | wx.ALL, 2)
self.metaPermissionsOptionPanel.Add(self.metaPermissionsOption, 1, wx.EXPAND | wx.ALL, 2)
metaScollPanelSizer.Add(self.readmeMetaPermissionsOption, 0, wx.EXPAND | wx.ALL, 2)
# 代理模型(proxy)
self.metaProxyOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')
self.metaProxyOptionPanel = wx.StaticBoxSizer(self.metaProxyOptionStaticBox, wx.HORIZONTAL)
metaScollPanelSizer.Add(self.metaProxyOptionPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelMetaProxyOption = wx.StaticText(self.metaScollPanel, -1, "13、代理模型(proxy)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.metaProxyOption = wx.RadioBox(self.metaScollPanel, -1, "", choices=['是', '否'])
self.readmeMetaProxyOption = wx.StaticText(self.metaScollPanel, -1, " ** 为原模型创建一个代理,用于扩展排序或管理器,与原模型共用一个表。")
self.metaProxyOptionPanel.Add(self.labelMetaProxyOption, 0, wx.EXPAND | wx.ALL, 2)
self.metaProxyOptionPanel.Add(self.metaProxyOption, 0, wx.EXPAND | wx.ALL, 2)
metaScollPanelSizer.Add(self.readmeMetaProxyOption, 0, wx.EXPAND | wx.ALL, 2)
# 保存旧算法(select_on_save)
self.metaSelectOnSaveOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')
self.metaSelectOnSaveOptionPanel = wx.StaticBoxSizer(self.metaSelectOnSaveOptionStaticBox, wx.HORIZONTAL)
metaScollPanelSizer.Add(self.metaSelectOnSaveOptionPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelMetaSelectOnSaveOption = wx.StaticText(self.metaScollPanel, -1, "14、保存旧算法(select_on_save)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.metaSelectOnSaveOption = wx.RadioBox(self.metaScollPanel, -1, "", choices=['是', '否'])
self.readmeMetaSelectOnSaveOption = wx.StaticText(self.metaScollPanel, -1, " ** 旧算法先查询后更新,新算法直接尝试更新。")
self.metaSelectOnSaveOptionPanel.Add(self.labelMetaSelectOnSaveOption, 0, wx.EXPAND | wx.ALL, 2)
self.metaSelectOnSaveOptionPanel.Add(self.metaSelectOnSaveOption, 0, wx.EXPAND | wx.ALL, 2)
metaScollPanelSizer.Add(self.readmeMetaSelectOnSaveOption, 0, wx.EXPAND | wx.ALL, 2)
# 指定后端数据库类型(required_db_vendor)
self.metaRequiredDBVendorOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')
self.metaRequiredDBVendorOptionPanel = wx.StaticBoxSizer(self.metaRequiredDBVendorOptionStaticBox, wx.HORIZONTAL)
metaScollPanelSizer.Add(self.metaRequiredDBVendorOptionPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelMetaRequiredDBVendorOption = wx.StaticText(self.metaScollPanel, -1, "15、指定后端数据库类型(required_db_vendor)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.metaRequiredDBVendorOption = wx.Choice(self.metaScollPanel, -1, choices=[' ',]+env.getDjangoSupportDatabase())
self.readmeMetaRequiredDBVendorOption = wx.StaticText(self.metaScollPanel, -1, " ** 不指定则默认支持所有。")
self.metaRequiredDBVendorOptionPanel.Add(self.labelMetaRequiredDBVendorOption, 0, wx.EXPAND | wx.ALL, 2)
self.metaRequiredDBVendorOptionPanel.Add(self.metaRequiredDBVendorOption, 0, wx.EXPAND | wx.ALL, 2)
metaScollPanelSizer.Add(self.readmeMetaRequiredDBVendorOption, 0, wx.EXPAND | wx.ALL, 2)
# 索引集合(indexes)
self.metaIndexesOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')
self.metaIndexesOptionPanel = wx.StaticBoxSizer(self.metaIndexesOptionStaticBox, wx.HORIZONTAL)
metaScollPanelSizer.Add(self.metaIndexesOptionPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelMetaIndexesOption = wx.StaticText(self.metaScollPanel, -1, "16、索引集合(indexes)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.metaIndexesOption = wx.TextCtrl(self.metaScollPanel, -1)
self.readmeMetaIndexesOption = wx.StaticText(self.metaScollPanel, -1, " ** 示例:[models.Index(fields=['first_name',], name='first_name_idx'),]")
self.metaIndexesOptionPanel.Add(self.labelMetaIndexesOption, 0, wx.EXPAND | wx.ALL, 2)
self.metaIndexesOptionPanel.Add(self.metaIndexesOption, 1, wx.EXPAND | wx.ALL, 2)
metaScollPanelSizer.Add(self.readmeMetaIndexesOption, 0, wx.EXPAND | wx.ALL, 2)
# 值唯一组合(unique_together)
self.metaUniqueTogetherOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')
self.metaUniqueTogetherOptionPanel = wx.StaticBoxSizer(self.metaUniqueTogetherOptionStaticBox, wx.HORIZONTAL)
metaScollPanelSizer.Add(self.metaUniqueTogetherOptionPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelMetaUniqueTogetherOption = wx.StaticText(self.metaScollPanel, -1, "17、值唯一组合(unique_together)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.metaUniqueTogetherOption = wx.TextCtrl(self.metaScollPanel, -1)
self.readmeMetaUniqueTogetherOption = wx.StaticText(self.metaScollPanel, -1, " ** 示例:[['driver', 'restaurant',],]。将来可能被弃用。")
self.metaUniqueTogetherOptionPanel.Add(self.labelMetaUniqueTogetherOption, 0, wx.EXPAND | wx.ALL, 2)
self.metaUniqueTogetherOptionPanel.Add(self.metaUniqueTogetherOption, 1, wx.EXPAND | wx.ALL, 2)
metaScollPanelSizer.Add(self.readmeMetaUniqueTogetherOption, 0, wx.EXPAND | wx.ALL, 2)
# 索引组合(index_together)
self.metaIndexTogetherOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')
self.metaIndexTogetherOptionPanel = wx.StaticBoxSizer(self.metaIndexTogetherOptionStaticBox, wx.HORIZONTAL)
metaScollPanelSizer.Add(self.metaIndexTogetherOptionPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelMetaIndexTogetherOption = wx.StaticText(self.metaScollPanel, -1, "18、索引组合(index_together)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.metaIndexTogetherOption = wx.TextCtrl(self.metaScollPanel, -1)
self.readmeMetaIndexTogetherOption = wx.StaticText(self.metaScollPanel, -1, " ** 示例:[['pub_date', 'deadline'],]。将来可能被弃用。")
self.metaIndexTogetherOptionPanel.Add(self.labelMetaIndexTogetherOption, 0, wx.EXPAND | wx.ALL, 2)
self.metaIndexTogetherOptionPanel.Add(self.metaIndexTogetherOption, 1, wx.EXPAND | wx.ALL, 2)
metaScollPanelSizer.Add(self.readmeMetaIndexTogetherOption, 0, wx.EXPAND | wx.ALL, 2)
# 约束条件(constraints)
self.metaConstraintsOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')
self.metaConstraintsOptionPanel = wx.StaticBoxSizer(self.metaConstraintsOptionStaticBox, wx.HORIZONTAL)
metaScollPanelSizer.Add(self.metaConstraintsOptionPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelMetaConstraintsOption = wx.StaticText(self.metaScollPanel, -1, "19、约束条件(constraints)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.metaConstraintsOption = wx.TextCtrl(self.metaScollPanel, -1)
self.readmeMetaConstraintsOption = wx.StaticText(self.metaScollPanel, -1, " ** 示例:[models.CheckConstraint(check=models.Q(age__gte=18), name='age_gte_18'),]。")
self.metaConstraintsOptionPanel.Add(self.labelMetaConstraintsOption, 0, wx.EXPAND | wx.ALL, 2)
self.metaConstraintsOptionPanel.Add(self.metaConstraintsOption, 1, wx.EXPAND | wx.ALL, 2)
metaScollPanelSizer.Add(self.readmeMetaConstraintsOption, 0, wx.EXPAND | wx.ALL, 2)
# 模型可读单数名称(verbose_name)
self.metaVerboseNameOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')
self.metaVerboseNameOptionPanel = wx.StaticBoxSizer(self.metaVerboseNameOptionStaticBox, wx.HORIZONTAL)
metaScollPanelSizer.Add(self.metaVerboseNameOptionPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelMetaVerboseNameOption = wx.StaticText(self.metaScollPanel, -1, "20、模型可读单数名称(verbose_name)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.metaVerboseNameOption = wx.TextCtrl(self.metaScollPanel, -1)
self.readmeMetaVerboseNameOption = wx.StaticText(self.metaScollPanel, -1, " ** 用于后台展示模型的可读名称。")
self.metaVerboseNameOptionPanel.Add(self.labelMetaVerboseNameOption, 0, wx.EXPAND | wx.ALL, 2)
self.metaVerboseNameOptionPanel.Add(self.metaVerboseNameOption, 1, wx.EXPAND | wx.ALL, 2)
metaScollPanelSizer.Add(self.readmeMetaVerboseNameOption, 0, wx.EXPAND | wx.ALL, 2)
# 模型可读复数名称(verbose_name_plural)
self.metaVerboseNamePluralOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')
self.metaVerboseNamePluralOptionPanel = wx.StaticBoxSizer(self.metaVerboseNamePluralOptionStaticBox, wx.HORIZONTAL)
metaScollPanelSizer.Add(self.metaVerboseNamePluralOptionPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelMetaVerboseNamePluralOption = wx.StaticText(self.metaScollPanel, -1, "21、模型可读复数名称(verbose_name_plural)", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)
self.metaVerboseNamePluralOption = wx.TextCtrl(self.metaScollPanel, -1)
self.readmeMetaVerboseNamePluralOption = wx.StaticText(self.metaScollPanel, -1, " ** 默认是verbose_name+s。")
self.metaVerboseNamePluralOptionPanel.Add(self.labelMetaVerboseNamePluralOption, 0, wx.EXPAND | wx.ALL, 2)
self.metaVerboseNamePluralOptionPanel.Add(self.metaVerboseNamePluralOption, 1, wx.EXPAND | wx.ALL, 2)
metaScollPanelSizer.Add(self.readmeMetaVerboseNamePluralOption, 0, wx.EXPAND | wx.ALL, 2)
# order_with_respect_to暂不放出
# 标签显示优化
self.readmeStaticTexts.extend([
self.readmeMetaAbstractOption,
self.readmeMetaAppLabelOption,
self.readmeMetaObjectsOption,
self.readmeMetaDBTableOption,
self.readmeMetaDBTableSpaceOption,
self.readmeMetaDefaultManagerNameOption,
self.readmeMetaDefaultRelatedNameOption,
self.readmeMetaGetLatestByOption,
self.readmeMetaManagedOption,
self.readmeMetaOrderingOption,
self.readmeMetaPermissionsOption,
self.readmeMetaDefaultPermissionsOption,
self.readmeMetaProxyOption,
self.readmeMetaSelectOnSaveOption,
self.readmeMetaRequiredDBVendorOption,
self.readmeMetaIndexesOption,
self.readmeMetaUniqueTogetherOption,
self.readmeMetaIndexTogetherOption,
self.readmeMetaConstraintsOption,
self.readmeMetaVerboseNameOption,
self.readmeMetaVerboseNamePluralOption,
])
self.labelStaticTexts.extend([
self.labelMetaAbstractOption,
self.labelMetaAppLabelOption,
self.labelMetaObjectsOption,
self.labelMetaDBTableOption,
self.labelMetaDBTableSpaceOption,
self.labelMetaDefaultManagerNameOption,
self.labelMetaDefaultRelatedNameOption,
self.labelMetaGetLatestByOption,
self.labelMetaManagedOption,
self.labelMetaOrderingOption,
self.labelMetaPermissionsOption,
self.labelMetaDefaultPermissionsOption,
self.labelMetaProxyOption,
self.labelMetaSelectOnSaveOption,
self.labelMetaRequiredDBVendorOption,
self.labelMetaIndexesOption,
self.labelMetaUniqueTogetherOption,
self.labelMetaIndexTogetherOption,
self.labelMetaConstraintsOption,
self.labelMetaVerboseNameOption,
self.labelMetaVerboseNamePluralOption,
])
# 按钮事件
self.Bind(wx.EVT_BUTTON, self.onBtnShowUnshowMeta, self.btnShowUnshowMeta)
# 单选框事件
self.Bind(wx.EVT_RADIOBOX, self.onMetaRadioChanged, self.metaAbstractOption)
self.metaScollPanel.Show(False) # 默认不显示
self._init_meta_data()
def _init_meta_data(self):
"""初始化Meta选项数据"""
self.metaAbstractOption.SetSelection(1)
self.metaAppLabelOption.SetSelection(0)
self.metaObjectsOption.SetValue('objects')
self.metaDBTableOption.SetValue('')
self.metaDBTableSpaceOption.SetValue('')
self.metaDefaultManagerNameOption.SetValue('')
self.metaDefaultRelatedNameOption.SetValue('')
self.metaGetLatestByOption.SetValue('')
self.metaManagedOption.SetSelection(0)
self.metaOrderingOption.SetValue('')
self.metaDefaultPermissionsOption.SetValue("('add', 'change', 'delete', 'view')")
self.metaPermissionsOption.SetValue('')
self.metaProxyOption.SetSelection(1)
self.metaSelectOnSaveOption.SetSelection(1)
self.metaRequiredDBVendorOption.SetSelection(0)
self.metaIndexesOption.SetValue('')
self.metaUniqueTogetherOption.SetValue('')
self.metaIndexTogetherOption.SetValue('')
self.metaConstraintsOption.SetValue('')
self.metaVerboseNameOption.SetValue('')
self.metaVerboseNamePluralOption.SetValue('')
def onMetaRadioChanged(self, e):
"""单选框值更新事件"""
fid = e.GetId() # 控件id
status_abstract = self.metaAbstractOption.GetSelection()
if fid == self.metaAbstractOption.GetId():
if 0 == status_abstract:
RichMsgDialog.showOkMsgDialog(self, '抽象模型不会在数据库中建表,并且表级的一些参数设置将对子类无效。', '警告')
def onBtnShowUnshowMeta(self, e):
"""显示和隐藏Meta按钮,用于空间的合理布局"""
if '【显示】Meta元数据(表级参数设置)' == self.btnShowUnshowMeta.Label:
self.metaScollPanel.Show(True)
self.btnShowUnshowMeta.SetLabel('【隐藏】Meta元数据(表级参数设置)')
self.panel.Layout() # 重新计算布局
else:
self.metaScollPanel.Show(False)
self.btnShowUnshowMeta.SetLabel('【显示】Meta元数据(表级参数设置)')
self.panel.Layout()
def _generate_create_code(self, mode: str='A'):
"""生成创建模型代码"""
# A: 预览模式
# B: 写入模式
pre_fields = self._get_fields_attrs() # 字段详细定义列表
meta_attrs = self._get_meta_attrs() # Meta参数
if len(pre_fields) > 0:
fields_code = '\n'.join([f' {_}' for _ in pre_fields])
else:
fields_code = ' pass'
# Meta元数据定义
if len(meta_attrs) > 0:
meta_code = '\n'.join([f' {_}' for _ in meta_attrs])
else:
meta_code = ' pass'
# __str__()返回值
str_msg = " # return ''"
# 如果没有设置主键,则自动增加主键【预览界面有效,实际代码无此行】
if len([_ for _ in self.allRows if CON_YES==_['primary_key']]) <= 0: # 用户无主动设置主键
if 'A' == mode:
auto_primary = ' id = models.AutoField(primary_key=True)'
else:
auto_primary = ''
else:
auto_primary = ''
return f"""\
class <model_name>(models.Model):
{auto_primary}
{fields_code}
class Meta:
{meta_code}
def __str__(self):
return '如:self.name'
"""
def onBtnPreview(self, e):
"""预览待插入代码"""
model_code = self._generate_create_code()
RichMsgDialog.showScrolledMsgDialog(self, model_code, "代码预览")
def _get_fields_attrs(self):
"""获取字段参数输出字符串"""
pre_fields = []
for _ in self.allRows:
# 若和默认值一致,则不显式显示参数
args = []
field_name = _['field_name']
field_type = _['field_type']
# 位置参数
if field_type not in CON_FOREIGN_FIELDS and _['remarker'] != _['field_name'].replace('_', ' '): # 默认下划线默认换成空格)
t = _['remarker']
args.append(f"'{t}'")
if field_type in CON_FOREIGN_FIELDS and '' != _["relate_model"]:
t = _['relate_model']
args.append(f"{t}")
# 关键字参数
if _['field_name'] != _['db_column']: # 默认一致,不一致则新增
t = _['db_column']
args.append(f"db_column='{t}'")
if CON_YES == _['primary_key']:
args.append(f"primary_key=True")
if CON_YES == _['blank']:
args.append(f"blank=True")
if CON_YES == _['null']:
args.append(f"null=True")
if CON_YES == _['unique']:
args.append(f"unique=True")
if CON_YES == _['db_index'] and field_type not in CON_FOREIGN_FIELDS:
args.append(f"db_index=True")
if CON_NO == _['db_index'] and field_type in CON_FOREIGN_FIELDS:
args.append(f"db_index=False")
if CON_YES == _['auto_now']:
args.append(f"auto_now=True")
if CON_YES == _['auto_now_add']:
args.append(f"auto_now_add=True")
if CON_NO == _['editable']:
args.append(f"editable=False")
if '' != _['default']:
t = _['default']
args.append(f"default={t}")
if '' != _['unique_for_date']:
t = _['unique_for_date']
args.append(f"unique_for_date='{t}'")
if '' != _['unique_for_month']:
t = _['unique_for_month']
args.append(f"unique_for_month='{t}'")
if '' != _['unique_for_year']:
t = _['unique_for_year']
args.append(f"unique_for_year='{t}'")
if '' != _['error_messages']:
t = _['error_messages']
args.append(f"error_messages='{t}'")
if '' != _['help_text']:
t = _['help_text']
args.append(f"help_text='{t}'")
if '' != _['max_length']:
t = _['max_length']
args.append(f"max_length={t}")
if 'DecimalField' == field_type:
if '' != _['max_digits']:
t = _['max_digits']
args.append(f"max_digits={t}")
if '' != _['decimal_places']:
t = _['decimal_places']
args.append(f"decimal_places={t}")
if '' != _['upload_to']:
t = _['upload_to']
args.append(f"upload_to={t}")
# 关联字段专属
if field_type in CON_FOREIGN_FIELDS:
if '' != _["on_delete"] and 'ManyToManyField' != field_type:
t = _['on_delete']
args.append(f"on_delete={t}")
if '' != _["verbose_name"]:
t = _['verbose_name']
args.append(f"verbose_name='{t}'")
if '' != _["limit_choices_to"]:
t = _['limit_choices_to']
args.append(f"limit_choices_to={t}")
if '' != _["related_name"]:
t = _['related_name']
args.append(f"related_name='{t}'")
if '' != _["related_query_name"]:
t = _['related_query_name']
args.append(f"related_query_name='{t}'")
if '' != _["to_field"]:
t = _['to_field']
args.append(f"to_field='{t}'")
if CON_NO == _['db_constraint']:
args.append(f"db_constraint=False")
if '' != _['db_table']:
t = _['db_table']
args.append(f"db_table='{t}'")
pre_fields.append(f"{field_name} = models.{field_type}({', '.join(args)})")
return pre_fields
def _get_meta_attrs(self):
"""获取Meta参数输出字符串"""
meta_str = []
if 0 == self.metaAbstractOption.GetSelection():
meta_str.append("abstract = True")
app_label = self.metaAppLabelOption.GetString(self.metaAppLabelOption.GetSelection()).strip()
if app_label:
meta_str.append(f"app_label = '{app_label}'")
base_manager_name = self.metaObjectsOption.GetValue().strip()
if base_manager_name and 'objects' != base_manager_name:
meta_str.append(f"base_manager_name = '{base_manager_name}'")
db_table = self.metaDBTableOption.GetValue().strip()
if db_table:
meta_str.append(f"db_table = '{db_table}'")
db_tablespace = self.metaDBTableSpaceOption.GetValue().strip()
if db_tablespace:
meta_str.append(f"db_tablespace = '{db_tablespace}'")
default_manager_name = self.metaDefaultManagerNameOption.GetValue().strip()
if default_manager_name:
meta_str.append(f"default_manager_name = '{default_manager_name}'")
default_related_name = self.metaDefaultRelatedNameOption.GetValue().strip()
if default_related_name:
meta_str.append(f"default_related_name = '{default_related_name}'")
get_latest_by = self.metaGetLatestByOption.GetValue().strip()
if get_latest_by:
temp = ", ".join([f"'{_}'" for _ in get_latest_by.split(',') if _])
meta_str.append(f"get_latest_by = [{temp}]")
if 1 == self.metaManagedOption.GetSelection():
meta_str.append("managed = False")
ordering = self.metaOrderingOption.GetValue().strip()
if ordering:
temp = ", ".join([f"'{_}'" for _ in ordering.split(',') if _])
meta_str.append(f"ordering = [{temp}]")
default_permissions = self.metaDefaultPermissionsOption.GetValue().strip()
if default_permissions and "('add', 'change', 'delete', 'view')" != default_permissions:
meta_str.append(f"default_permissions = {default_permissions}")
permissions = self.metaPermissionsOption.GetValue().strip()
if permissions:
meta_str.append(f"permissions = {permissions}")
if 0 == self.metaProxyOption.GetSelection():
meta_str.append("proxy = True")
if 0 == self.metaSelectOnSaveOption.GetSelection():
meta_str.append("select_on_save = True")
required_db_vendor = self.metaRequiredDBVendorOption.GetString(self.metaRequiredDBVendorOption.GetSelection()).strip()
if required_db_vendor:
meta_str.append(f"required_db_vendor = '{required_db_vendor}'")
indexes = self.metaIndexesOption.GetValue().strip()
if indexes:
meta_str.append(f"indexes = {indexes}")
unique_together = self.metaUniqueTogetherOption.GetValue().strip()
if unique_together:
meta_str.append(f"unique_together = {unique_together}")
index_together = self.metaIndexTogetherOption.GetValue().strip()
if index_together:
meta_str.append(f"index_together = {index_together}")
constraints = self.metaConstraintsOption.GetValue().strip()
if constraints:
meta_str.append(f"constraints = {constraints}")
verbose_name = self.metaVerboseNameOption.GetValue().strip()
if verbose_name:
meta_str.append(f"verbose_name = '{verbose_name}'")
verbose_name_plural = self.metaVerboseNamePluralOption.GetValue().strip()
if verbose_name_plural:
meta_str.append(f"verbose_name_plural = '{verbose_name_plural}'")
return meta_str
def _show_special_args(self):
"""显示特殊参数"""
for _ in self.specialArgs:
_.Show(True)
def _unshow_special_args(self):
"""隐藏特殊参数"""
for _ in self.specialArgs:
_.Show(False)
def onRadioChanged(self, e):
"""单选框值更新事件"""
fid = e.GetId() # 控件id
field_type = con_getFieldTypeName(self.choiceFieldType.GetString(self.choiceFieldType.GetSelection()).strip()) # 当前字段类型
status_null = self.radiosFiledNull.GetSelection()
status_blank = self.radiosFiledBlank.GetSelection()
status_unique = self.radiosFiledUnique.GetSelection()
status_primary_key = self.radiosFiledPrimary.GetSelection()
status_editable = self.radiosFiledEditable.GetSelection()
status_autonow = self.radiosAutoNow.GetSelection()
status_autonowadd = self.radiosAutoNowAdd.GetSelection()
if fid == self.radiosFiledPrimary.GetId():
# 同时只能有一个显式主键存在
if len([_ for _ in self.allRows if CON_YES==_['primary_key']]) > 0:
self.radiosFiledPrimary.SetSelection(1)
RichMsgDialog.showOkMsgDialog(self, '一个模型只能拥有一个显式主键,若想对此字段设置主键,请使用隐式方式:null=False且unique=True。', '警告')
return
# 自动赋值默认值None
if 0 == status_primary_key: # 主键
self.inputDefaultValue.SetValue('None')
self.inputDefaultValue.Enable(False)
# 自动锁定null blank unique db_index
self.radiosFiledNull.Enable(False)
self.radiosFiledBlank.Enable(False)
self.radiosFiledUnique.Enable(False)
self.radiosFiledDbIndex.Enable(False)
# 初始状态
self.radiosFiledBlank.SetSelection(1) # 不允许为空
self.radiosFiledNull.SetSelection(1) # 字段为空不赋值NULL
self.radiosFiledUnique.SetSelection(1) # 值不唯一
self.radiosFiledDbIndex.SetSelection(1) # 不创建索引
else: # 反向操作,状态复原
self.inputDefaultValue.SetValue('')
self.inputDefaultValue.Enable(True)
self.radiosFiledNull.Enable(True)
self.radiosFiledBlank.Enable(True)
self.radiosFiledUnique.Enable(True)
self.radiosFiledDbIndex.Enable(True)
elif fid == self.radiosFiledNull.GetId():
# 避免在CharField之类的字段中使用 null=True 【用户选中时给予提示】
# 当 CharField 同时具有 unique=True 和 blank=True 时。 在这种情况下,需要设置 null=True
if field_type in CON_CHAR_FIELDS and 0 == status_null:
RichMsgDialog.showOkMsgDialog(self, '字符类型的字段设置null=True会出现两种可能的值,如非必要,请勿选择。', '警告')
if 'BooleanField' == field_type and 0 == status_null:
RichMsgDialog.showOkMsgDialog(self, 'BooleanField字段在2.1版本之前不支持设置null=True,新版本可以。不建议使用NullBooleanField。', '警告')
elif fid == self.radiosFiledBlank.GetId():
if field_type in CON_CHAR_FIELDS and 0 == status_unique and 0 == status_blank:
self.radiosFiledNull.SetSelection(0)
self.radiosFiledNull.Enable(False) # 同时锁定无法修改
RichMsgDialog.showOkMsgDialog(self, '字符类型的字段同时设置unique=True和blank=True时,必须设置null=True。', '警告')
if 0 != status_blank:
self.radiosFiledNull.Enable(True) # 不是同时选中的状态,解锁null字段
elif fid == self.radiosFiledUnique.GetId():
if field_type in CON_CHAR_FIELDS and 0 == status_unique and 0 == status_blank:
self.radiosFiledNull.SetSelection(0)
self.radiosFiledNull.Enable(False) # 同时锁定无法修改
RichMsgDialog.showOkMsgDialog(self, '字符类型的字段同时设置unique=True和blank=True时,必须设置null=True。', '警告')
if 0 != status_unique:
self.radiosFiledNull.Enable(True) # 不是同时选中的状态,解锁null字段
elif fid == self.radiosFiledEditable.GetId():
# BinaryField字段在2.1版本之前不支持editable=True
if 'BinaryField' == field_type and 0 == status_editable:
RichMsgDialog.showOkMsgDialog(self, 'Django2.1版本之前(不包括2.1),不支持设置editable=True。', '警告')
elif fid == self.radiosAutoNow.GetId():
if 0 == status_autonow:
self.radiosAutoNowAdd.SetSelection(1)
self.inputDefaultValue.SetValue('')
self.inputDefaultValue.Enable(False)
# 当设置auto_now_add=True或auto_now=True时,默认同时设置editable=False和blank=True
self.radiosFiledEditable.SetSelection(1)
self.radiosFiledBlank.SetSelection(0)
self.radiosFiledEditable.Enable(False)
self.radiosFiledBlank.Enable(False)
else:
if 1 == status_autonowadd:
self.inputDefaultValue.SetValue('date.today')
# 反向操作
self.inputDefaultValue.Enable(True)
self.radiosFiledEditable.SetSelection(0)
self.radiosFiledBlank.SetSelection(1)
self.radiosFiledEditable.Enable(True)
self.radiosFiledBlank.Enable(True)
elif fid == self.radiosAutoNowAdd.GetId():
if 0 == status_autonowadd:
self.radiosAutoNow.SetSelection(1)
self.inputDefaultValue.SetValue('')
self.inputDefaultValue.Enable(False)
# 当设置auto_now_add=True或auto_now=True时,默认同时设置editable=False和blank=True
self.radiosFiledEditable.SetSelection(1)
self.radiosFiledBlank.SetSelection(0)
self.radiosFiledEditable.Enable(False)
self.radiosFiledBlank.Enable(False)
else:
if 1 == status_autonow:
self.inputDefaultValue.SetValue('date.today')
self.inputDefaultValue.Enable(True)
self.radiosFiledEditable.SetSelection(0)
self.radiosFiledBlank.SetSelection(1)
self.radiosFiledEditable.Enable(True)
self.radiosFiledBlank.Enable(True)
def onInputFieldModelName(self, e):
"""模型字段名设置时自动触发值更新"""
field_name = self.inputFieldModelName.GetValue().strip()
# 每次取最新的一次输入字符
if retools.PATT_CHARS.match(field_name):
self.inputFieldDatabaseName.SetValue(field_name)
self.inputFieldRemarkName.SetValue(field_name.replace('_', ' '))
else:
self.inputFieldModelName.SetValue(retools.PATT_CHARS_REVERSED.sub('', field_name))
self.inputFieldModelName.SetInsertionPointEnd() # 光标定位到最后
def onInputMaxLength(self, e):
"""长度上限属性填写时自动触发值更新"""
v = str(self.inputMaxLength.GetValue().strip())
if '0' == v:
self.inputMaxLength.SetValue('')
return
if v and isinstance(v, str): # 此处条件分支解决递归错误问题
if not retools.PATT_DIGITS_WHOLE.match(v):
self.inputMaxLength.SetValue(retools.PATT_DIGITS_REVERSED.sub('', v))
self.inputMaxLength.SetInsertionPointEnd()
def onInputMaxDigits(self, e):
"""实数总位数自动触发值更新"""
v = str(self.inputMaxDigits.GetValue().strip())
if '0' == v:
self.inputMaxDigits.SetValue('')
return
if v and isinstance(v, str):
if not retools.PATT_DIGITS_WHOLE.match(v):
self.inputMaxDigits.SetValue(retools.PATT_DIGITS_REVERSED.sub('', v))
self.inputMaxDigits.SetInsertionPointEnd()
def onInputRelatedName(self, e):
"""反向名称->反向过滤器名称"""
v = str(self.inputRelatedName.GetValue().strip())
self.inputRelatedQueryName.SetValue(v)
def onInputDecimalPlaces(self, e):
"""小数总位数自动触发值更新"""
v = str(self.inputDecimalPlaces.GetValue().strip())
if '0' == v:
self.inputDecimalPlaces.SetValue('')
return
if v and isinstance(v, str):
if not retools.PATT_DIGITS_WHOLE.match(v):
self.inputDecimalPlaces.SetValue(retools.PATT_DIGITS_REVERSED.sub('', v))
self.inputDecimalPlaces.SetInsertionPointEnd()
def _disable_all_args(self):
"""关闭所有的参数填写入口"""
for _ in self.allArgs:
_.Enable(False)
def _init_all_args_value(self):
"""初始化参数默认值"""
self.radiosFiledBlank.SetSelection(1) # 不允许为空
self.radiosFiledNull.SetSelection(1) # 字段为空不赋值NULL
self.radiosFiledPrimary.SetSelection(1) # 不是主键
self.radiosFiledUnique.SetSelection(1) # 值不唯一
self.radiosFiledDbIndex.SetSelection(1) # 不创建索引
self.radiosFiledEditable.SetSelection(0) # 菜单默认可编辑
self.choicesFiledUniqueForDate.SetSelection(0) # 无组合唯一
self.choicesFiledUniqueForMonth.SetSelection(0) # 无组合唯一
self.choicesFiledUniqueForYear.SetSelection(0) # 无组合唯一
self.radiosAutoNow.SetSelection(1)
self.radiosAutoNowAdd.SetSelection(1)
# self.choiceSelectModel.SetSelection(0)
self.choiceSelectDelRule.SetSelection(1)
self.radiosDBConstraint.SetSelection(0)
def _init_input_args(self):
"""初始化输入框"""
self.choiceFieldType.SetSelection(0)
self.inputFieldModelName.SetValue('')
self.inputFieldRemarkName.SetValue('')
self.inputFieldDatabaseName.SetValue('')
self.inputDefaultValue.SetValue('')
self.inputFormHelpText.SetValue('')
self.inputFormErrorMessage.SetValue('')
self.inputMaxLength.SetValue('')
self.inputMaxDigits.SetValue('')
self.inputDecimalPlaces.SetValue('')
self.inputUploadTo.SetValue('')
self.inputRelationRemark.SetValue('')
self.choiceSelectModel.SetValue('') # 后期类型改动(暂不更改)
self.inputRelationRemark.SetValue('')
self.inputLimitChoicesTo.SetValue('')
self.inputRelatedName.SetValue('')
self.inputRelatedQueryName.SetValue('')
self.inputToField.SetValue('')
def _disable_all_afterBtns(self):
"""关闭所有的后触发按钮"""
for _ in self.afterBtns:
_.Enable(False)
def _init_table(self):
"""初始化表格控件"""
# 显示和隐藏按钮,用于空间的合理布局
self.btnShowUnshowTable = buttons.GenButton(self.panel, -1, '【显示】待新增字段表格数据')
self.panelSizer.Add(self.btnShowUnshowTable, 0, wx.EXPAND | wx.ALL, 2)
self.btnShowUnshowTable.SetBackgroundColour(CON_COLOR_MAIN)
self.btnShowUnshowTable.SetForegroundColour(CON_COLOR_WHITE)
# 表格
self.tableObjPanel = wx.Panel(self.panel, size=(730, 222))
tableObjPanelSizer = wx.BoxSizer(wx.VERTICAL)
self.tableObjPanel.SetSizer(tableObjPanelSizer)
self.panelSizer.Add(self.tableObjPanel, 0, wx.EXPAND | wx.ALL, 2)
self.tableObjPanel.SetBackgroundColour('#000000')
# 表头
self.gridToolsPanel = wx.Panel(self.tableObjPanel)
gridToolsPanelSizer = wx.BoxSizer(wx.HORIZONTAL)
self.gridToolsPanel.SetSizer(gridToolsPanelSizer)
tableObjPanelSizer.Add(self.gridToolsPanel, 0, wx.EXPAND | wx.ALL, 2)
self.gridBtnDelete = buttons.GenButton(self.gridToolsPanel, -1, '删除选中行')
self.gridBtnOther = buttons.GenButton(self.gridToolsPanel, -1, ' ')
self.gridBtnOther.Enable(False)
gridToolsPanelSizer.Add(self.gridBtnDelete, 0, wx.EXPAND | wx.ALL, 2)
gridToolsPanelSizer.Add(self.gridBtnOther, 1, wx.EXPAND | wx.ALL, 2)
# 表体
self.infoGrid = wx.grid.Grid( self.tableObjPanel, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, 0 )
self.infoGrid.CreateGrid( 0, len(CON_MODELSCREATEDIALOG_COLS) ) # row col
self.infoGrid.EnableEditing( False )
self.infoGrid.EnableGridLines( True )
self.infoGrid.EnableDragGridSize( True )
self.infoGrid.SetMargins( 0, 0 )
self.infoGrid.EnableDragColMove( False )
self.infoGrid.EnableDragColSize( True )
self.infoGrid.SetColLabelSize( 30 )
self.infoGrid.SetColLabelAlignment( wx.ALIGN_CENTER, wx.ALIGN_CENTER )
self.infoGrid.EnableDragRowSize( True )
self.infoGrid.SetRowLabelSize( 70 )
self.infoGrid.SetRowLabelAlignment( wx.ALIGN_CENTER, wx.ALIGN_CENTER )
self.infoGrid.SetDefaultCellAlignment( wx.ALIGN_LEFT, wx.ALIGN_TOP )
tableObjPanelSizer.Add( self.infoGrid, 1, wx.EXPAND | wx.ALL, 2 ) # 表格默认加最后
self._init_header()
# 事件
self.Bind(wx.EVT_BUTTON, self.onGridBtnDelete, self.gridBtnDelete)
self.Bind(wx.EVT_BUTTON, self.onBtnShowUnshowTable, self.btnShowUnshowTable)
self.tableObjPanel.Show(False) # 默认隐藏
def onBtnShowUnshowTable(self, e):
"""显示和隐藏按钮,用于空间的合理布局"""
if '【显示】待新增字段表格数据' == self.btnShowUnshowTable.Label:
self.tableObjPanel.Show(True)
self.btnShowUnshowTable.SetLabel('【隐藏】待新增字段表格数据')
self.panel.Layout() # 重新计算布局
else:
self.tableObjPanel.Show(False)
self.btnShowUnshowTable.SetLabel('【显示】待新增字段表格数据')
self.panel.Layout()
def onGridBtnDelete(self, e):
"""删除行"""
row_indexs = self.infoGrid.GetSelectedRows()
t = '、'.join([str(_+1) for _ in row_indexs])
if len(row_indexs) > 0:
dlg_tip = wx.MessageDialog(self, f"确认删除第{t}行?一旦删除不可恢复。", CON_TIPS_COMMON, wx.CANCEL | wx.OK)
if dlg_tip.ShowModal() == wx.ID_OK:
result = self.removeRows(row_indexs)
if not result:
RichMsgDialog.showOkMsgDialog(self, '删除成功!', '提示')
else:
if isinstance(result, list):
RichMsgDialog.showOkMsgDialog(self, f"{'、'.join(result)}删除失败!", '提示')
else:
RichMsgDialog.showOkMsgDialog(self, '未知错误,删除失败。', '提示')
dlg_tip.Close(True)
else:
RichMsgDialog.showOkMsgDialog(self, '无选择行可删除。', '警告')
def _init_header(self):
"""初始化列名"""
for i,v in enumerate(CON_MODELSCREATEDIALOG_COLS):
self.infoGrid.SetColLabelValue(i, v)
def onChoiceSelectDelRule(self, e):
"""on_delete选项监听"""
delete_type = e.GetString().strip()
if 'models.SET_NULL' == delete_type:
self.radiosFiledBlank.SetSelection(0)
self.radiosFiledNull.SetSelection(0)
self.radiosFiledBlank.Enable(False)
self.radiosFiledNull.Enable(False)
else:
self.radiosFiledBlank.SetSelection(1)
self.radiosFiledNull.SetSelection(1)
self.radiosFiledBlank.Enable(True)
self.radiosFiledNull.Enable(True)
def onChoiceFieldType(self, e):
"""选择要新建的字段类型"""
field_type = e.GetString().strip(string.whitespace+'-')
if not field_type:
return
# try:
# if self.record != field_type: # 值未更新
# # 每次更新时均初始化状态
# self._init_all_args_value()
# self._init_input_args()
# except: ...
# self.record = field_type # 记录上一次的状态
self._open_required_args() # 共用参数开启
self._unshow_special_args() # 先隐藏所有的特殊参数,后按需开启
if CON_BINARYFIELD == field_type:
self.selectBinaryField()
elif CON_SMALLINTEGERFIELD == field_type:
self.selectSmallIntegerField()
elif CON_POSITIVESMALLINTEGERFIELD == field_type:
self.selectPositiveSmallIntegerField()
elif CON_INTEGERFIELD == field_type:
self.selectIntegerField()
elif CON_POSITIVEINTEGERFIELD == field_type:
self.selectPositiveIntegerField()
elif CON_BIGINTEGERFIELD == field_type:
self.selectBigIntegerField()
elif CON_AUTOFIELD == field_type:
self.selectAutoField()
elif CON_BIGAUTOFIELD == field_type:
self.selectBigAutoField()
elif CON_FLOATFIELD == field_type:
self.selectFloatField()
elif CON_DECIMALFIELD == field_type:
self.selectDecimalField()
elif CON_BOOLEANFIELD == field_type:
self.selectBooleanField()
elif CON_CHARFIELD == field_type:
self.selectCharField()
elif CON_TEXTFIELD == field_type:
self.selectTextField()
elif CON_EMAILFIELD == field_type:
self.selectEmailField()
elif CON_IPADRESSFIELD == field_type:
self.selectGenericIPAddressField()
elif CON_SLUGFIELD == field_type:
self.selectSlugField()
elif CON_URLFIELD == field_type:
self.selectURLField()
elif CON_UUIDFIELD == field_type:
self.selectUUIDField()
elif CON_DATEFIELD == field_type:
self.selectDateField()
elif CON_DATETIMEFIELD == field_type:
self.selectDateTimeField()
elif CON_DURATIONFIELD == field_type:
self.selectDurationField()
elif CON_TIMEFIELD == field_type:
self.selectTimeField()
elif CON_FILEFIELD == field_type:
self.selectFileField()
elif CON_IMAGEFIELD == field_type:
self.selectImageField()
elif CON_FILEPATHFIELD == field_type:
self.selectFilePathField()
elif CON_FOREIGNFIELD == field_type:
self.selectForeignKey()
# RichMsgDialog.showOkMsgDialog(self, '在创建关联字段时,默认在【被关联模型】数据库表中新增<当前模型名小写>_id列。', '提示')
elif CON_MANYTOMANYFIELD == field_type:
self.selectManyToManyField()
# RichMsgDialog.showOkMsgDialog(self, '在创建关联字段时,默认在【被关联模型】数据库表中新增<当前模型名小写>_id列。', '提示')
elif CON_ONETOONEFIELD == field_type:
self.selectOneToOneField()
# RichMsgDialog.showOkMsgDialog(self, '在创建关联字段时,默认在【被关联模型】数据库表中新增<当前模型名小写>_id列。', '提示')
self.choiceFieldType.Enable(False) # 一旦选择将锁定字段的重新选择,可点击【重置字段】解锁
self.panelSizer.Layout() # 重要!!! 重新计算布局
def onBtnAddNew(self, e):
"""新增字段"""
self.choiceFieldType.Enable(True) # 开放字段下拉选择框
self._show_special_args() # 显示所有的可选参数
# 开放 后触发 按钮
for _ in self.afterBtns:
_.Enable(True)
# 锁定新增按钮
self.btnAddNew.Enable(False)
self.panel.Layout()
def onBtnResetInput(self, e):
"""恢复字段默认值"""
dlg_tip = wx.MessageDialog(self, f"确认重置字段?重置后将丢失界面所有已填数据。(待新增区不受影响)", CON_TIPS_COMMON, wx.CANCEL | wx.OK)
if dlg_tip.ShowModal() == wx.ID_OK:
self._init_all_args_value()
self._init_input_args()
# 参数重新选定,开放类型选择按钮
self._disable_all_args()
self._show_special_args() # 显示所有的可选参数
self.choiceFieldType.Enable(True)
self.panel.Layout()
dlg_tip.Close(True)
def onBtnAddFieldToArea(self, e):
"""添加至待生成区"""
dlg_tip = wx.MessageDialog(self, f"确认添加?", CON_TIPS_COMMON, wx.CANCEL | wx.OK)
if dlg_tip.ShowModal() == wx.ID_OK:
# 添加操作
# 获取界面的所有值
vchoiceFieldType = self.choiceFieldType.GetString(self.choiceFieldType.GetSelection()).strip()
vinputFieldModelName = self.inputFieldModelName.GetValue().strip()
vinputFieldDatabaseName = self.inputFieldDatabaseName.GetValue().strip()
vinputDefaultValue = self.inputDefaultValue.GetValue().strip()
vinputFormHelpText = self.inputFormHelpText.GetValue().strip()
vinputFormErrorMessage = self.inputFormErrorMessage.GetValue().strip()
vinputFieldRemarkName = self.inputFieldRemarkName.GetValue().strip()
vinputMaxLength = self.inputMaxLength.GetValue().strip()
vinputMaxDigits = self.inputMaxDigits.GetValue().strip()
vinputDecimalPlaces = self.inputDecimalPlaces.GetValue().strip()
vinputUploadTo = self.inputUploadTo.GetValue().strip()
vradiosFiledBlank = self.radiosFiledBlank.GetSelection()
vradiosFiledNull = self.radiosFiledNull.GetSelection()
vradiosFiledPrimary = self.radiosFiledPrimary.GetSelection()
vradiosFiledUnique = self.radiosFiledUnique.GetSelection()
vradiosFiledDbIndex = self.radiosFiledDbIndex.GetSelection()
vradiosFiledEditable = self.radiosFiledEditable.GetSelection()
vradiosAutoNow = self.radiosAutoNow.GetSelection()
vradiosAutoNowAdd = self.radiosAutoNowAdd.GetSelection()
vchoicesFiledUniqueForDate = self.choicesFiledUniqueForDate.GetString(self.choicesFiledUniqueForDate.GetSelection()).strip()
vchoicesFiledUniqueForMonth = self.choicesFiledUniqueForMonth.GetString(self.choicesFiledUniqueForMonth.GetSelection()).strip()
vchoicesFiledUniqueForYear = self.choicesFiledUniqueForYear.GetString(self.choicesFiledUniqueForYear.GetSelection()).strip()
# 二次添加
vchoiceSelectModel = self.choiceSelectModel.GetValue().strip()
vchoiceSelectDelRule = self.choiceSelectDelRule.GetString(self.choiceSelectDelRule.GetSelection()).strip()
vinputRelationRemark = self.inputRelationRemark.GetValue().strip()
vinputLimitChoicesTo = self.inputLimitChoicesTo.GetValue().strip()
vinputRelatedName = self.inputRelatedName.GetValue().strip()
vinputRelatedQueryName = self.inputRelatedQueryName.GetValue().strip()
vinputToField = self.inputToField.GetValue().strip()
vradiosDBConstraint = self.radiosDBConstraint.GetSelection()
vinputDBTable = self.inputDBTable.GetValue().strip()
# 先校验,后操作
# 字段属性名+数据库列名+字段备注,三者只要有一个重复,便不允许新增该字段
tfield_name, tfield_dbname, tfieldremark = [], [], []
for _ in self.allRows:
tfield_name.append(_['field_name'])
tfield_dbname.append(_['field_name'])
tfieldremark.append(_['remarker'])
if vinputFieldModelName in tfield_name or vinputFieldDatabaseName in tfield_dbname or ('' != vinputFieldRemarkName and vinputFieldRemarkName in tfieldremark):
RichMsgDialog.showOkMsgDialog(self, '字段属性名、数据库列名、字段备注均不能重复。', '警告')
return
# 必填项检测
if not vchoiceFieldType: # 字段类型必选
RichMsgDialog.showOkMsgDialog(self, '请选择字段类型!', '错误')
return
if not vinputFieldModelName: # 字段属性名必填
RichMsgDialog.showOkMsgDialog(self, '请填写【字段属性名】!', '错误')
return
if (con_getFieldTypeName(vchoiceFieldType) in CON_OWN_MAX_LENGTH_FILEDS) and (not vinputMaxLength): # 所有有max_length属性的字段,必填max_length
RichMsgDialog.showOkMsgDialog(self, '【长度上限】max_length必填!', '错误')
return
if 'DecimalField' == con_getFieldTypeName(vchoiceFieldType):
if not vinputMaxDigits:
RichMsgDialog.showOkMsgDialog(self, '【实数总位数】必填!', '错误')
return
else:
maxdigits = int(vinputMaxDigits)
dicimalplaces = int(vinputDecimalPlaces if vinputDecimalPlaces else '0')
if maxdigits < dicimalplaces:
RichMsgDialog.showOkMsgDialog(self, '【实数总位数】必需大于等于【小数总位数】!', '错误')
return
if con_getFieldTypeName(vchoiceFieldType) in CON_FOREIGN_FIELDS:
if not vchoiceSelectModel:
RichMsgDialog.showOkMsgDialog(self, '【A、关联关系模型】必填!', '错误')
return
if not vchoiceSelectDelRule:
RichMsgDialog.showOkMsgDialog(self, '【B、删除规则(on_delete)】必选!', '错误')
return
# 待插入的行
insertRow = {}
insertRow['field_name'] = vinputFieldModelName
insertRow['db_column'] = vinputFieldDatabaseName
insertRow['remarker'] = vinputFieldRemarkName
insertRow['field_type'] = con_getFieldTypeName(vchoiceFieldType)
insertRow['primary_key'] = self._replace01_to_bool(vradiosFiledPrimary)
insertRow['blank'] = self._replace01_to_bool(vradiosFiledBlank)
insertRow['null'] = self._replace01_to_bool(vradiosFiledNull)
insertRow['default'] = vinputDefaultValue
insertRow['unique'] = self._replace01_to_bool(vradiosFiledUnique)
insertRow['db_index'] = self._replace01_to_bool(vradiosFiledDbIndex)
insertRow['choices'] = '' # 前端暂未放出
insertRow['unique_for_date'] = vchoicesFiledUniqueForDate
insertRow['unique_for_month'] = vchoicesFiledUniqueForMonth
insertRow['unique_for_year'] = vchoicesFiledUniqueForYear
insertRow['error_messages'] = vinputFormErrorMessage
insertRow['editable'] = self._replace01_to_bool(vradiosFiledEditable)
insertRow['help_text'] = vinputFormHelpText
insertRow['max_length'] = vinputMaxLength
insertRow['max_digits'] = vinputMaxDigits
insertRow['decimal_places'] = vinputDecimalPlaces if vinputDecimalPlaces else '0'
insertRow['auto_now'] = self._replace01_to_bool(vradiosAutoNow)
insertRow['auto_now_add'] = self._replace01_to_bool(vradiosAutoNowAdd)
insertRow['upload_to'] = vinputUploadTo
# 二次添加
insertRow['relate_model'] = vchoiceSelectModel
insertRow['on_delete'] = vchoiceSelectDelRule
insertRow['verbose_name'] = vinputRelationRemark
insertRow['limit_choices_to'] = vinputLimitChoicesTo
insertRow['related_name'] = vinputRelatedName
insertRow['related_query_name'] = vinputRelatedQueryName
insertRow['to_field'] = vinputToField
insertRow['db_constraint'] = self._replace01_to_bool(vradiosDBConstraint)
insertRow['db_table'] = vinputDBTable
self.allRows.append(insertRow)
# 插入待新增数据区域
self.infoGrid.AppendRows(1)
row = self.infoGrid.GetNumberRows() - 1
for col, _ in enumerate(CON_MODELSCREATEDIALOG_COLS):
self.infoGrid.SetCellValue(row, col, str(insertRow.get(CON_ARGS_NAME_DICT[_])))
# 界面数据全部初始化【全部参数暂时不放,只显示上一个字段相关的参数锁定界面】
self._disable_all_args()
self._init_all_args_value()
self._init_input_args()
self.choiceFieldType.SetSelection(0) # 单独拎出来初始化【不影响大体功能】
# 重新开放新增按钮 锁定后触发按钮
self.btnAddNew.Enable(True)
self._disable_all_afterBtns()
# 更新日期组合唯一的三个相关下拉框【只给日期字段相关的字段属性名】
self.choicesFiledUniqueForDate.Clear()
self.choicesFiledUniqueForMonth.Clear()
self.choicesFiledUniqueForYear.Clear()
# 日期选择
self.choicesFiledUniqueForDate.Append(' ')
self.choicesFiledUniqueForMonth.Append(' ')
self.choicesFiledUniqueForYear.Append(' ')
for _ in self.allRows:
if _['field_type'] in CON_DATE_FIELDS:
self.choicesFiledUniqueForDate.Append(_['field_name'])
self.choicesFiledUniqueForMonth.Append(_['field_name'])
self.choicesFiledUniqueForYear.Append(_['field_name'])
self.panel.Layout()
RichMsgDialog.showOkMsgDialog(self, '字段添加成功,可在(待新增字段表格数据)中查看已添加字段信息。', '成功')
dlg_tip.Close(True)
def _replace01_to_bool(self, v):
if 0 == v: return CON_YES
else: return CON_NO
def removeRows(self, row_indexs):
"""同步删除界面和数据包里的数据"""
errors = []
for i in sorted(row_indexs, reverse=True): # 倒序
try:
temp = self.infoGrid.GetCellValue(i, 0) # 字段属性名
self.infoGrid.DeleteRows(i)
except:
errors.append(str(i+1))
else:
self._removeRowsByFieldName(temp)
return errors
def _removeRowsByFieldName(self, field_name):
""""根据字段属性名删除"""
for i,_ in enumerate(self.allRows):
if field_name == _['field_name']:
self.allRows.pop(i)
break
def _checkFiledsNameIsConflict(self)->bool:
"""检查字段名是否与内置API名称冲突"""
# 取所有的模型内置API名
modelAPINames = env.getConflictFieldsName()
c_l = []
for _ in self.allRows:
if _['field_name'].lower() in modelAPINames:
c_l.append(_['field_name'])
if len(c_l) > 0: # 冲突返回True
return True, c_l
else:
return False, c_l
def _auto_register_model(self, appName, model_name):
"""自动注册模型到后台"""
if self.autoRegister.GetValue():
# models 存储模型类名
# modelFiles 是无后缀名的存储模型的文件名
modelFiles, models = [], [] # modelFiles 无后缀名
models.extend([model_name,])
modelfile_alias = os.path.basename(env.getModelsAlias()[0]).split('.')[0] # 默认取models.py的第一个别名
modelFiles.extend([modelfile_alias,])
classify = set(modelFiles) # 将所有的模型文件名称去重
importData = {} # 构建插入数据包
for _ in classify:
importData[_] = []
for _ in zip(models, modelFiles):
importData[_[1]].append(_[0]) # 以文件名为分组依据,将模型归类到对应的文件下
alias = env.getAdminAlias() # 读取admin.py的别名
for _ in alias:
# 下面将在所有的模块别名路径中写入注册数据【可能有点不合理】
insert_path = os.path.join(get_configs(CONFIG_PATH)['dirname'], appName, _) # 因为 _ 别名是包含紧邻app路径之后的路径,所以理论上不管层级有多深,都可以找的到
djangotools.write_admin_base(insert_path, importData) # 写入注册代码
def onBtnExecSave(self, e):
"""保存"""
if len(self.allRows) <= 0:
dlg_tip = wx.MessageDialog(self, f"未添加任何字段,是否创建空模型?", CON_TIPS_COMMON, wx.CANCEL | wx.OK)
if dlg_tip.ShowModal() == wx.ID_OK:
dlg = wx.TextEntryDialog(self, u"模型命名:", u"保存模型", u"")
if dlg.ShowModal() == wx.ID_OK:
model_name = dlg.GetValue().strip() # 获取要创建的模型名称
if model_name:
model_code = self._generate_create_code(mode='B').replace('<model_name>', model_name)
# 将代码追加到对应的应用程序中
app_name = self.choiceSelectFile.GetString(self.choiceSelectFile.GetSelection()).strip()
if app_name:
temp_path = djangotools.get_models_path_by_appname(app_name)
if len(temp_path) > 0:
append_file_whole(temp_path[0], model_code) # 默认写入第一个模型文件
self._auto_register_model(app_name, model_name) # 自动注册
RichMsgDialog.showOkMsgDialog(self, '保存成功', '成功')
else:
RichMsgDialog.showOkMsgDialog(self, '程序缺失模型文件', '错误')
else:
RichMsgDialog.showOkMsgDialog(self, '请先选择模型所属的应用程序。', '错误')
else:
RichMsgDialog.showOkMsgDialog(self, '未输入模型名称', '错误')
dlg.Close(True)
dlg_tip.Close(True)
else:
check_result = self._checkFiledsNameIsConflict()
conflict_info = '、'.join(check_result[1])
if check_result[0]:
RichMsgDialog.showOkMsgDialog(self, f'{conflict_info} 字段名称与模型内置API名称冲突,请删除后重新新增字段。', '错误')
return
dlg = wx.TextEntryDialog(self, u"模型命名:", u"保存模型", u"")
if dlg.ShowModal() == wx.ID_OK:
model_name = dlg.GetValue().strip() # 获取要创建的模型名称
if model_name:
model_code = self._generate_create_code(mode='B').replace('<model_name>', model_name)
# 将代码追加到对应的应用程序中
app_name = self.choiceSelectFile.GetString(self.choiceSelectFile.GetSelection()).strip()
if app_name:
temp_path = djangotools.get_models_path_by_appname(app_name)
if len(temp_path) > 0:
append_file_whole(temp_path[0], model_code) # 默认写入第一个模型文件
self._auto_register_model(app_name, model_name) # 自动注册
RichMsgDialog.showOkMsgDialog(self, '保存成功', '成功')
else:
RichMsgDialog.showOkMsgDialog(self, '程序缺失模型文件', '错误')
else:
RichMsgDialog.showOkMsgDialog(self, '请先选择模型所属的应用程序', '错误')
else:
RichMsgDialog.showOkMsgDialog(self, '未输入模型名称', '错误')
dlg.Close(True)
def _open_required_args(self):
"""所有字段必须同步开启的参数"""
for _ in self.commonArgs:
_.Enable(True)
def _open_max_length_field(self):
"""开启max_length字段"""
self.inputMaxLengthStaticBox.Show(True)
self.inputMaxLength.Show(True)
self.labelInputMaxLength.Show(True)
self.readmeInputMaxLength.Show(True)
self.inputMaxLength.Enable(True)
def selectBinaryField(self):
"""字节型字段"""
self.radiosFiledEditable.SetSelection(1)
self._open_max_length_field()
def selectSmallIntegerField(self):
...
def selectPositiveSmallIntegerField(self):
...
def selectIntegerField(self):
...
def selectPositiveIntegerField(self):
...
def selectBigIntegerField(self):
...
def selectAutoField(self):
"""32位自增型字段"""
def selectBigAutoField(self):
...
def selectFloatField(self):
...
def selectDecimalField(self):
"""高精度浮点型字段"""
self.inputMaxDigitsStaticBox.Show(True)
self.inputMaxDigits.Show(True)
self.labelInputMaxDigits.Show(True)
self.readmeInputMaxDigits.Show(True)
self.inputDecimalPlacesStaticBox.Show(True)
self.inputDecimalPlaces.Show(True)
self.labelInputDecimalPlaces.Show(True)
self.readmeInputDecimalPlaces.Show(True)
self.inputMaxDigits.Enable(True)
self.inputDecimalPlaces.Enable(True)
def selectBooleanField(self):
"""布尔类型字段"""
self.inputDefaultValue.SetValue('None')
def selectCharField(self):
"""字符型字段"""
self._open_max_length_field()
self.inputMaxLength.SetValue('255') # 默认长度255
def selectTextField(self):
...
def selectEmailField(self):
"""电子邮件字段"""
self._open_max_length_field()
self.inputMaxLength.SetValue('254')
def selectGenericIPAddressField(self):
...
def selectSlugField(self):
"""字母、数字、连字符字段"""
self._open_max_length_field()
self.inputMaxLength.SetValue('50')
def selectURLField(self):
"""url字段"""
self._open_max_length_field()
self.inputMaxLength.SetValue('200')
def selectUUIDField(self):
...
def _open_autonow_add(self):
"""开启日期相关的特殊参数"""
self.radiosAutoNowStaticBox.Show(True)
self.radiosAutoNow.Show(True)
self.labelRadiosAutoNow.Show(True)
self.readmeRadiosAutoNow.Show(True)
self.radiosAutoNowAddStaticBox.Show(True)
self.radiosAutoNowAdd.Show(True)
self.labelRadiosAutoNowAdd.Show(True)
self.readmeRadiosAutoNowAdd.Show(True)
self.radiosAutoNow.Enable(True)
self.radiosAutoNowAdd.Enable(True)
def selectDateField(self):
"""日期型字段"""
self._open_autonow_add()
self.inputDefaultValue.SetValue('date.today')
def selectDateTimeField(self):
"""长日期字段"""
self._open_autonow_add()
self.inputDefaultValue.SetValue('timezone.now')
def selectDurationField(self):
"""时间戳字段"""
def selectTimeField(self):
"""时间字段"""
self._open_autonow_add()
def selectFileField(self):
"""文件字段"""
self._open_max_length_field()
self.inputMaxLength.SetValue('100')
self.inputUploadToStaticBox.Show(True)
self.inputUploadTo.Show(True)
self.labelInputUploadTo.Show(True)
self.readmeInputUploadTo.Show(True)
self.inputUploadTo.Enable(True)
self.inputUploadTo.SetValue(r"'uploads/%Y/%m/%d/'")
def selectImageField(self):
...
def selectFilePathField(self):
...
def selectForeignKey(self):
"""多对一字段"""
self.radiosFiledDbIndex.SetSelection(0)
self.inputFieldRemarkName.Enable(False) # 锁定位置参数备注名,使用关键字参数备注名
self.choiceSelectModelStaticBox.Show(True)
self.choiceSelectModel.Show(True)
self.labelChoiceSelectModel.Show(True)
self.readmeChoiceSelectModel.Show(True)
self.choiceSelectDelRuleStaticBox.Show(True)
self.choiceSelectDelRule.Show(True)
self.labelChoiceSelectDelRule.Show(True)
self.readmeChoiceSelectDelRule.Show(True)
self.inputRelationRemarkStaticBox.Show(True)
self.inputRelationRemark.Show(True)
self.labelInputRelationRemark.Show(True)
self.readmeInputRelationRemark.Show(True)
self.inputLimitChoicesToStaticBox.Show(True)
self.inputLimitChoicesTo.Show(True)
self.labelInputLimitChoicesTo.Show(True)
self.readmeInputLimitChoicesTo.Show(True)
self.inputRelatedNameStaticBox.Show(True)
self.inputRelatedName.Show(True)
self.labelInputRelatedName.Show(True)
self.readmeInputRelatedName.Show(True)
self.inputRelatedQueryNameStaticBox.Show(True)
self.inputRelatedQueryName.Show(True)
self.labelInputRelatedQueryName.Show(True)
self.readmeInputRelatedQueryName.Show(True)
self.inputToFieldStaticBox.Show(True)
self.inputToField.Show(True)
self.labelInputToField.Show(True)
self.readmeInputToField.Show(True)
self.radiosDBConstraintStaticBox.Show(True)
self.radiosDBConstraint.Show(True)
self.labelRadiosDBConstraint.Show(True)
self.readmeRadiosDBConstraint.Show(True)
self.choiceSelectModel.Enable(True)
self.choiceSelectDelRule.Enable(True)
self.inputRelationRemark.Enable(True)
self.inputLimitChoicesTo.Enable(True)
self.inputRelatedName.Enable(True)
self.inputRelatedQueryName.Enable(True)
self.inputToField.Enable(True)
self.radiosDBConstraint.Enable(True)
def selectManyToManyField(self):
"""多对多字段"""
self.radiosFiledDbIndex.SetSelection(0)
self.inputFieldRemarkName.Enable(False) # 锁定位置参数备注名,使用关键字参数备注名
self.choiceSelectModelStaticBox.Show(True)
self.choiceSelectModel.Show(True)
self.labelChoiceSelectModel.Show(True)
self.readmeChoiceSelectModel.Show(True)
self.inputRelatedNameStaticBox.Show(True)
self.inputRelatedName.Show(True)
self.labelInputRelatedName.Show(True)
self.readmeInputRelatedName.Show(True)
self.inputRelatedQueryNameStaticBox.Show(True)
self.inputRelatedQueryName.Show(True)
self.labelInputRelatedQueryName.Show(True)
self.readmeInputRelatedQueryName.Show(True)
self.inputLimitChoicesToStaticBox.Show(True)
self.inputLimitChoicesTo.Show(True)
self.labelInputLimitChoicesTo.Show(True)
self.readmeInputLimitChoicesTo.Show(True)
self.radiosDBConstraintStaticBox.Show(True)
self.radiosDBConstraint.Show(True)
self.labelRadiosDBConstraint.Show(True)
self.readmeRadiosDBConstraint.Show(True)
self.inputDBTableStaticBox.Show(True)
self.inputDBTable.Show(True)
self.labelInputDBTable.Show(True)
self.readmeInputDBTable.Show(True)
self.choiceSelectModel.Enable(True)
self.inputLimitChoicesTo.Enable(True)
self.inputRelatedName.Enable(True)
self.inputRelatedQueryName.Enable(True)
self.radiosDBConstraint.Enable(True)
self.inputDBTable.Enable(True)
# 多对多字段不支持 validators、null
self.radiosFiledNull.Enable(False)
def selectOneToOneField(self):
"""一对一字段"""
self.radiosFiledDbIndex.SetSelection(0)
self.inputFieldRemarkName.Enable(False) # 锁定位置参数备注名,使用关键字参数备注名
self.choiceSelectModelStaticBox.Show(True)
self.choiceSelectModel.Show(True)
self.labelChoiceSelectModel.Show(True)
self.readmeChoiceSelectModel.Show(True)
self.choiceSelectDelRuleStaticBox.Show(True)
self.choiceSelectDelRule.Show(True)
self.labelChoiceSelectDelRule.Show(True)
self.readmeChoiceSelectDelRule.Show(True)
self.inputRelationRemarkStaticBox.Show(True)
self.inputRelationRemark.Show(True)
self.labelInputRelationRemark.Show(True)
self.readmeInputRelationRemark.Show(True)
self.inputLimitChoicesToStaticBox.Show(True)
self.inputLimitChoicesTo.Show(True)
self.labelInputLimitChoicesTo.Show(True)
self.readmeInputLimitChoicesTo.Show(True)
self.inputRelatedNameStaticBox.Show(True)
self.inputRelatedName.Show(True)
self.labelInputRelatedName.Show(True)
self.readmeInputRelatedName.Show(True)
self.inputRelatedQueryNameStaticBox.Show(True)
self.inputRelatedQueryName.Show(True)
self.labelInputRelatedQueryName.Show(True)
self.readmeInputRelatedQueryName.Show(True)
self.inputToFieldStaticBox.Show(True)
self.inputToField.Show(True)
self.labelInputToField.Show(True)
self.readmeInputToField.Show(True)
self.radiosDBConstraintStaticBox.Show(True)
self.radiosDBConstraint.Show(True)
self.labelRadiosDBConstraint.Show(True)
self.readmeRadiosDBConstraint.Show(True)
self.choiceSelectModel.Enable(True)
self.choiceSelectDelRule.Enable(True)
self.inputRelationRemark.Enable(True)
self.inputLimitChoicesTo.Enable(True)
self.inputRelatedName.Enable(True)
self.inputRelatedQueryName.Enable(True)
self.inputToField.Enable(True)
self.radiosDBConstraint.Enable(True)
def onExit(self, e):
"""退出窗口"""
dlg_tip = wx.MessageDialog(self, f"确认退出?退出后界面数据将丢失。", CON_TIPS_COMMON, wx.CANCEL | wx.OK)
if dlg_tip.ShowModal() == wx.ID_OK:
self.Close(True)
dlg_tip.Close(True)
```
#### File: dialogs/dialogViews/dialogViews.py
```python
from ..common import *
"""
### 使用者自定义视图模板并为此模板编辑逻辑的步骤:【后期补全】
"""
###
"""
### 路由默认写入根urls.py文件,也可用新的别名替代第一个名字(不推荐这么做,因为随之改动的地方会很多,除非你很熟悉Django,否则不建议更改urls.py在别名中的位置)。
### 存在隐患,在用户创建路由时应当判断路由明是否冲突,反向名称是否冲突。这将在未来版本中修复。
### urlpatterns参数必须要有一个空行,否则会错误处理。这将在未来版本中修复。
"""
LABEL_COL_LEN = 200
class ViewGenerateDialog(wx.Dialog):
def __init__(self, parent):
wx.Dialog.__init__(self, parent, id = wx.ID_ANY, title = '新增视图', size=(920, 600), style=wx.DEFAULT_DIALOG_STYLE|wx.MAXIMIZE_BOX|wx.MINIMIZE_BOX|wx.RESIZE_BORDER)
# 一些控制容器
self.labelStaticTexts = []
self.allCtrlsWithoutType = [] # 选择参数
self._init_UI()
# 布局后,美化界面
self._init_label_font()
self._init_all_args()
self._unshow_allctrls_withouttype()
def _init_UI(self):
"""初始化界面布局"""
# 总面板
self.panel = wx.Panel(self)
self.panelSizer = wx.BoxSizer(wx.VERTICAL)
self.panel.SetSizer(self.panelSizer)
# 分割面板(左右分割)
self.splitWindow = wx.SplitterWindow(self.panel, -1)
self.panelSizer.Add(self.splitWindow, 1, wx.EXPAND | wx.ALL, 2)
# 左子面板
self.leftPanel = wx.Panel(self.splitWindow, style=wx.SUNKEN_BORDER)
self.leftPanelSizer = wx.BoxSizer(wx.VERTICAL)
self.leftPanel.SetSizer(self.leftPanelSizer )
# 右子面板
self.rightPanel = wx.Panel(self.splitWindow, style=wx.SUNKEN_BORDER)
self.rightPanelSizer = wx.BoxSizer(wx.VERTICAL)
self.rightPanel.SetSizer(self.rightPanelSizer)
self.splitWindow.Initialize(self.leftPanel)
self.splitWindow.Initialize(self.rightPanel)
self.splitWindow.SplitVertically(self.leftPanel, self.rightPanel, 520)
self._init_left_panel()
self._init_right_panel()
# 模板变量
self.views_template = ''
self.argsStruct = {} #存放模板内容替换的所有内容
def _unshow_allctrls_withouttype(self):
"""隐藏所有的非类型选择交互式控件"""
for _ in self.allCtrlsWithoutType:
_.Show(False)
def _show_allctrls_withouttype(self):
"""显示所有的非类型选择交互式控件"""
for _ in self.allCtrlsWithoutType:
_.Show(True)
def _init_left_panel(self):
"""初始化左子面板"""
# 滚动面板
self.leftScrollPanel = scrolledpanel.ScrolledPanel(self.leftPanel, -1)
self.leftScrollPanel.SetupScrolling()
leftScrollPanelSizer = wx.BoxSizer(wx.VERTICAL)
self.leftScrollPanel.SetSizer(leftScrollPanelSizer)
self.leftPanelSizer.Add(self.leftScrollPanel, 1, wx.EXPAND | wx.ALL, 2)
# 选择文件写入路径【此处更改为选择App】
self.selectFilePanelStaticBox = wx.StaticBox(self.leftScrollPanel, -1, '')
self.selectFilePanel = wx.StaticBoxSizer(self.selectFilePanelStaticBox, wx.HORIZONTAL)
leftScrollPanelSizer.Add(self.selectFilePanel, 0, wx.EXPAND | wx.ALL, 2)
# self.selectFilePanel.SetBackgroundColour(CON_COLOR_BLACK) # CON_COLOR_PURE_WHITE
self.labelSelectFile = wx.StaticText(self.leftScrollPanel, -1, "选择视图所属的应用程序", size=(LABEL_COL_LEN, -1))
self.choiceSelectFile = wx.Choice(self.leftScrollPanel, -1, choices=[' ',] + djangotools.SCONFIGS.app_names())
self.selectFilePanel.Add(self.labelSelectFile, 0, wx.EXPAND | wx.ALL, 2)
self.selectFilePanel.Add(self.choiceSelectFile, 1, wx.EXPAND | wx.ALL, 2)
# self.labelSelectFile.SetFont(wx.Font(16, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD))
# 选择视图类型
self.choiceViewTypeStaticBox = wx.StaticBox(self.leftScrollPanel, -1, '')
self.viewTypePanel = wx.StaticBoxSizer(self.choiceViewTypeStaticBox, wx.HORIZONTAL)
leftScrollPanelSizer.Add(self.viewTypePanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelChoiceViewType = wx.StaticText(self.leftScrollPanel, -1, "选择要创建的视图类型:", style=wx.ALIGN_CENTRE_HORIZONTAL, size=(LABEL_COL_LEN, -1))
self.choiceViewType = wx.Choice(self.leftScrollPanel, -1, choices=[' ',] + CON_VIEW_CHOICES)
self.viewTypePanel.Add(self.labelChoiceViewType, 0, wx.EXPAND | wx.ALL, 2)
self.viewTypePanel.Add(self.choiceViewType, 1, wx.EXPAND | wx.ALL, 2)
# 视图名称
self.inputViewNameStaticBox = wx.StaticBox(self.leftScrollPanel, -1, '')
self.inputViewNamePanel = wx.StaticBoxSizer(self.inputViewNameStaticBox, wx.HORIZONTAL)
leftScrollPanelSizer.Add(self.inputViewNamePanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelInputViewName = wx.StaticText(self.leftScrollPanel, -1, "视图名称:", style=wx.ALIGN_CENTRE_HORIZONTAL, size=(LABEL_COL_LEN, -1))
self.inputViewName = wx.TextCtrl(self.leftScrollPanel, -1, style = wx.ALIGN_LEFT)
self.inputViewNamePanel.Add(self.labelInputViewName, 0, wx.EXPAND | wx.ALL, 2)
self.inputViewNamePanel.Add(self.inputViewName, 1, wx.EXPAND | wx.ALL, 2)
# 路由反向解析名称【默认取 视图名称】
self.inputReverseViewNameStaticBox = wx.StaticBox(self.leftScrollPanel, -1, '')
self.inputReverseViewNamePanel = wx.StaticBoxSizer(self.inputReverseViewNameStaticBox, wx.HORIZONTAL)
leftScrollPanelSizer.Add(self.inputReverseViewNamePanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelInputReverseViewName = wx.StaticText(self.leftScrollPanel, -1, "反向解析名称:", style=wx.ALIGN_CENTRE_HORIZONTAL, size=(LABEL_COL_LEN, -1))
self.inputReverseViewName = wx.TextCtrl(self.leftScrollPanel, -1, style = wx.ALIGN_LEFT)
self.inputReverseViewNamePanel.Add(self.labelInputReverseViewName, 0, wx.EXPAND | wx.ALL, 2)
self.inputReverseViewNamePanel.Add(self.inputReverseViewName, 1, wx.EXPAND | wx.ALL, 2)
# 路由路径指定
self.inputUrlPathStaticBox = wx.StaticBox(self.leftScrollPanel, -1, '')
self.inputUrlPathPanel = wx.StaticBoxSizer(self.inputUrlPathStaticBox, wx.HORIZONTAL)
leftScrollPanelSizer.Add(self.inputUrlPathPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelInputUrlPath = wx.StaticText(self.leftScrollPanel, -1, "路径和参数:", style=wx.ALIGN_CENTRE_HORIZONTAL, size=(LABEL_COL_LEN, -1))
self.inputUrlPath = wx.TextCtrl(self.leftScrollPanel, -1, style = wx.ALIGN_LEFT)
self.inputUrlPathPanel.Add(self.labelInputUrlPath, 0, wx.EXPAND | wx.ALL, 2)
self.inputUrlPathPanel.Add(self.inputUrlPath, 1, wx.EXPAND | wx.ALL, 2)
# 路由预览
self.inputUrlPreviewStaticBox = wx.StaticBox(self.leftScrollPanel, -1, '')
self.inputUrlPreviewPanel = wx.StaticBoxSizer(self.inputUrlPreviewStaticBox, wx.HORIZONTAL)
leftScrollPanelSizer.Add(self.inputUrlPreviewPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelInputUrlPreview = wx.StaticText(self.leftScrollPanel, -1, "路由预览:", style=wx.ALIGN_CENTRE_HORIZONTAL, size=(LABEL_COL_LEN, -1))
self.inputUrlPreview = wx.TextCtrl(self.leftScrollPanel, -1, style = wx.ALIGN_LEFT)
self.inputUrlPreviewPanel.Add(self.labelInputUrlPreview, 0, wx.EXPAND | wx.ALL, 2)
self.inputUrlPreviewPanel.Add(self.inputUrlPreview, 1, wx.EXPAND | wx.ALL, 2)
self.inputUrlPreview.Enable(False)
# 响应对象
self.choiceReturnTypeStaticBox = wx.StaticBox(self.leftScrollPanel, -1, '')
self.choiceReturnTypePanel = wx.StaticBoxSizer(self.choiceReturnTypeStaticBox, wx.HORIZONTAL)
leftScrollPanelSizer.Add(self.choiceReturnTypePanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelChoiceReturnType = wx.StaticText(self.leftScrollPanel, -1, "响应对象:", style=wx.ALIGN_CENTRE_HORIZONTAL, size=(LABEL_COL_LEN, -1))
self.choiceReturnType = wx.Choice(self.leftScrollPanel, -1, choices=[' ',] + CON_VIEWS_RETURN_TYPE)
self.choiceReturnTypePanel.Add(self.labelChoiceReturnType, 0, wx.EXPAND | wx.ALL, 2)
self.choiceReturnTypePanel.Add(self.choiceReturnType, 1, wx.EXPAND | wx.ALL, 2)
# 快捷响应对象
self.choiceShortcutsStaticBox = wx.StaticBox(self.leftScrollPanel, -1, '')
self.choiceShortcutsPanel = wx.StaticBoxSizer(self.choiceShortcutsStaticBox, wx.HORIZONTAL)
leftScrollPanelSizer.Add(self.choiceShortcutsPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelChoiceShortcuts = wx.StaticText(self.leftScrollPanel, -1, "快捷响应对象:", style=wx.ALIGN_CENTRE_HORIZONTAL, size=(LABEL_COL_LEN, -1))
self.choiceShortcuts = wx.Choice(self.leftScrollPanel, -1, choices=[' ',] + CON_VIEWS_SHORTCUTS)
self.choiceShortcutsPanel.Add(self.labelChoiceShortcuts, 0, wx.EXPAND | wx.ALL, 2)
self.choiceShortcutsPanel.Add(self.choiceShortcuts, 1, wx.EXPAND | wx.ALL, 2)
# 装饰器
self.choiceDecoratorsStaticBox = wx.StaticBox(self.leftScrollPanel, -1, '')
self.choiceDecoratorsPanel = wx.StaticBoxSizer(self.choiceDecoratorsStaticBox, wx.HORIZONTAL)
leftScrollPanelSizer.Add(self.choiceDecoratorsPanel, 0, wx.EXPAND | wx.ALL, 2)
self.labelChoiceDecorators = wx.StaticText(self.leftScrollPanel, -1, "装饰器:", style=wx.ALIGN_CENTRE_HORIZONTAL, size=(LABEL_COL_LEN, -1))
self.choiceDecorators = wx.Choice(self.leftScrollPanel, -1, choices=[' ',] + CON_VIEWS_DECORATORS)
self.choiceDecoratorsPanel.Add(self.labelChoiceDecorators, 0, wx.EXPAND | wx.ALL, 2)
self.choiceDecoratorsPanel.Add(self.choiceDecorators, 1, wx.EXPAND | wx.ALL, 2)
# 按钮
# self.btnRetrySelect = buttons.GenButton(self.leftScrollPanel, -1, '重新选择视图类型')
self.btnSubmit = buttons.GenButton(self.leftScrollPanel, -1, '创建')
# leftScrollPanelSizer.Add(self.btnRetrySelect, 0, wx.EXPAND | wx.ALL, 2)
leftScrollPanelSizer.Add(self.btnSubmit, 0, wx.EXPAND | wx.ALL, 2)
# self.btnRetrySelect.SetBackgroundColour(CON_COLOR_BLUE)
# self.btnRetrySelect.SetForegroundColour(CON_COLOR_WHITE)
self.btnSubmit.SetBackgroundColour(CON_COLOR_BLUE)
self.btnSubmit.SetForegroundColour(CON_COLOR_WHITE)
# 标签美化
self.labelStaticTexts.extend([
self.labelSelectFile,
self.labelChoiceViewType, self.labelInputUrlPath,
self.labelInputViewName, self.labelInputReverseViewName,
self.labelInputUrlPreview, self.labelChoiceReturnType,
self.labelChoiceShortcuts, self.labelChoiceDecorators,
])
# 隐藏控制
self.allCtrlsWithoutType.extend([
self.inputViewNameStaticBox,self.labelInputViewName,self.inputViewName,
self.inputReverseViewNameStaticBox,self.labelInputReverseViewName,self.inputReverseViewName,
self.inputUrlPathStaticBox,self.labelInputUrlPath,self.inputUrlPath,
self.inputUrlPreviewStaticBox,self.labelInputUrlPreview,self.inputUrlPreview,
self.choiceReturnTypeStaticBox,self.labelChoiceReturnType,self.choiceReturnType,
self.choiceShortcutsStaticBox,self.labelChoiceShortcuts,self.choiceShortcuts,
self.choiceDecoratorsStaticBox,self.labelChoiceDecorators,self.choiceDecorators,
# self.btnRetrySelect,
self.btnSubmit,
])
# 文本实时监听事件
self.Bind(wx.EVT_TEXT, self.onInputViewName, self.inputViewName)
self.Bind(wx.EVT_TEXT, self.onInputUrlPath, self.inputUrlPath)
# 下拉框选择事件
self.Bind(wx.EVT_CHOICE, self.onChoiceViewType, self.choiceViewType)
# 选择框选择事件
self.Bind(wx.EVT_CHOICE, self.onChoiceDecorators, self.choiceDecorators)
self.Bind(wx.EVT_CHOICE, self.onChoiceReturnType, self.choiceReturnType)
self.Bind(wx.EVT_CHOICE, self.onChoiceShortcuts, self.choiceShortcuts)
# 按钮点击事件
self.Bind(wx.EVT_BUTTON, self.onBtnSubmit, self.btnSubmit)
def onBtnSubmit(self, e):
"""创建视图"""
# 获取所有的值
vchoiceSelectFile = self.choiceSelectFile.GetString(self.choiceSelectFile.GetSelection()).strip()
vchoiceViewType = self.choiceViewType.GetString(self.choiceViewType.GetSelection()).strip()
vinputViewName = self.inputViewName.GetValue().strip()
vinputReverseViewName = self.inputReverseViewName.GetValue().strip()
vinputUrlPath = self.inputUrlPath.GetValue().strip()
vinputUrlPreview = self.inputUrlPreview.GetValue().strip()
vchoiceReturnType = self.choiceReturnType.GetString(self.choiceReturnType.GetSelection()).strip()
vchoiceShortcuts = self.choiceShortcuts.GetString(self.choiceShortcuts.GetSelection()).strip()
vchoiceDecorators = self.choiceDecorators.GetString(self.choiceDecorators.GetSelection()).strip()
vinputCodeReview = self.inputCodeReview.GetValue().strip()
if not vchoiceSelectFile:
RichMsgDialog.showOkMsgDialog(self, '请选择视图即将写入的应用程序', '错误')
return
if not vchoiceViewType:
RichMsgDialog.showOkMsgDialog(self, '无法写入空数据', '错误')
return
if not vinputViewName:
RichMsgDialog.showOkMsgDialog(self, '视图名称不允许为空', '错误')
return
if not vinputReverseViewName:
RichMsgDialog.showOkMsgDialog(self, '反向名称不允许为空', '错误')
return
if not vinputUrlPath or '/' == vinputUrlPath: # 后期增加
RichMsgDialog.showOkMsgDialog(self, '请正确填写路由路径', '错误')
return
vinputUrlPath = vinputUrlPath if '/' == vinputUrlPath[-1] else vinputUrlPath+'/' # 必须以 / 结尾
# 默认取路由urls.py别名
op_url = env.getUrlsAlias()[0] # 这里不做路径处理,路径默认是以应用程路目录为根目录向内延伸
views = env.getViewsAlias() # 取views.py别名
CONFIG = get_configs(CONFIG_PATH) # 项目路径
op_path = os.path.join(CONFIG['dirname'], vchoiceSelectFile, op_url) # 应用程序路由路径
view_path = os.path.join(CONFIG['dirname'], vchoiceSelectFile, views[0]) # 应用程序视图路径
views = [os.path.basename(_) for _ in views][0] # 取view文件名(带后缀)
content = djangotools.get_list_patt_content(retools.PATT_URLPATTERNS, op_path) # 截取 urlpatterns 参数内容
if '函数视图' == vchoiceViewType:
temp = views.split('.')[0] + '.' + vinputViewName # 函数视图
else:
temp = views.split('.')[0] + '.' + vinputViewName + '.as_view()' # 类视图
new_content = content + f" path('{vinputUrlPath}', {temp}, name='{vinputReverseViewName}'),\n" # 即将写入的路由
append_file_whole(view_path, vinputCodeReview+'\n') # 写入视图
write_file(op_path, read_file(op_path).replace(content, new_content)) # 注册路由
RichMsgDialog.showOkMsgDialog(self, '路由添加成功', '成功')
def onChoiceReturnType(self, e):
"""视图返回对象"""
return_obj = self.choiceReturnType.GetString(self.choiceReturnType.GetSelection()).strip()
if 'HttpResponse(200)' == return_obj:
self.argsStruct['return_obj'] = "return HttpResponse('<h1>Hello World!<h1>', content_type='text/plain')"
elif 'HttpResponseNotFound(404)' == return_obj:
self.argsStruct['return_obj'] = "return HttpResponseNotFound('<p>404 Not Found.<p>')"
elif 'HttpResponseRedirect(302)' == return_obj:
self.argsStruct['return_obj'] = "return HttpResponseRedirect('<p>302重定向.<p>')"
elif 'HttpResponseNotModified(304)' == return_obj:
self.argsStruct['return_obj'] = "return HttpResponseNotModified('<p>304未改变.<p>')"
elif 'HttpResponseBadRequest(400)' == return_obj:
self.argsStruct['return_obj'] = "return HttpResponseBadRequest('<p>400.<p>')"
elif 'HttpResponseForbidden(403)' == return_obj:
self.argsStruct['return_obj'] = "return HttpResponseForbidden('<p>403拒绝连接.<p>')"
elif 'HttpResponseNotAllowed(405)' == return_obj:
self.argsStruct['return_obj'] = "return HttpResponseNotAllowed('<p>405无法访问.<p>')"
elif 'HttpResponseGone(410)' == return_obj:
self.argsStruct['return_obj'] = "return HttpResponseGone('<p>410.<p>')"
elif 'HttpResponseServerError(500)' == return_obj:
self.argsStruct['return_obj'] = "return HttpResponseServerError('<p>500服务器错误.<p>')"
elif 'JsonResponse' == return_obj:
self.argsStruct['return_obj'] = "return JsonResponse({'status':200,'datas':[1,2,3]})"
else:
self.argsStruct['return_obj'] = ""
self._insert_data_to_template_by_argstruct()
def onChoiceShortcuts(self, e):
"""视图快捷返回对象"""
shortcut_obj = self.choiceShortcuts.GetString(self.choiceShortcuts.GetSelection()).strip()
self.argsStruct['shortcut_obj'] = shortcut_obj
self._insert_data_to_template_by_argstruct()
def onChoiceDecorators(self, e):
"""选择函数装饰器"""
decorator_type = e.GetString().strip()
self.argsStruct['decorator'] = f'@{decorator_type}' if decorator_type and '(无)' != decorator_type else ''
self._insert_data_to_template_by_argstruct()
def _init_all_args(self):
"""初始化所有的交互式控件值"""
self.choiceSelectFile.SetSelection(0)
self.choiceViewType.SetSelection(0)
self.inputViewName.SetValue('')
self.inputReverseViewName.SetValue('')
self.inputUrlPath.SetValue('')
self.inputUrlPreview.SetValue('')
self.choiceReturnType.SetSelection(0)
self.choiceShortcuts.SetSelection(0)
self.choiceDecorators.SetSelection(0)
def onInputUrlPath(self, e):
"""路由路径指定"""
path = self.inputUrlPath.GetValue().strip()
if retools.PATT_CAPTURE_URLSPATH_ARGS.search(path):
args = [
_ if -1 == _.find(':') else _[_.find(':')+1:]
for _ in retools.PATT_CAPTURE_URLSPATH_ARGS.findall(path)
]
self.argsStruct['func_args'] = args
else:
self.argsStruct['func_args'] = []
self._insert_data_to_template_by_argstruct()
# 路由预览
# get_app_rooturl_config_by_appname
app_name = self.choiceSelectFile.GetString(self.choiceSelectFile.GetSelection()).strip()
if app_name:
root_name = djangotools.get_app_rooturl_config_by_appname(app_name)
if root_name:
if '/' != root_name[-1]:
root_name += '/'
# 显示
self.inputUrlPreview.SetValue('/' + root_name + path)
def _insert_data_to_template_by_argstruct(self):
"""用模板变量填充模板"""
temp_template = self.views_template
# 路由函数参数填充
if self.argsStruct.get('func_args'):
temp_template = retools.patt_sub_only_capture_obj_add(retools.PATT_FUNC_ARGS, ', '+', '.join(self.argsStruct['func_args']), temp_template)
# 路由方法名/类名
if self.argsStruct.get('view_name'):
temp_template = temp_template.replace('${view_name}', self.argsStruct['view_name'])
# 装饰器
if None != self.argsStruct.get('decorator'):
temp_template = temp_template.replace('${decorator}', self.argsStruct['decorator'])
# 试图返回对象
if self.argsStruct.get('return_obj'):
temp_template = temp_template.replace('${return}', self.argsStruct['return_obj'])
# 试图返回快捷对象
if self.argsStruct.get('shortcut_obj'):
temp_template = temp_template.replace('${return}', self.argsStruct['shortcut_obj'])
self.inputCodeReview.SetValue(temp_template)
def onChoiceViewType(self, e):
"""选择要新建的视图类型"""
view_type = e.GetString().strip()
self._unshow_allctrls_withouttype() # 全部关闭,按需开启
if not view_type:
self.inputCodeReview.SetValue('')
return
if CON_VIEW_TYPE_FUNC == view_type:
self.views_template = djangotools.get_views_base_func()
self.inputCodeReview.SetValue(self.views_template)
# 显示本视图类型下的特殊参数设置
self._show_allctrls_withouttype()
elif CON_VIEW_TYPE_CLASS == view_type:
self.views_template = djangotools.get_views_base_class()
self.inputCodeReview.SetValue(self.views_template)
# 显示本视图类型下的特殊参数设置
self.inputViewNameStaticBox.Show(True)
self.labelInputViewName.Show(True)
self.inputViewName.Show(True)
self.inputReverseViewNameStaticBox.Show(True)
self.labelInputReverseViewName.Show(True)
self.inputReverseViewName.Show(True)
self.inputUrlPathStaticBox.Show(True)
self.labelInputUrlPath.Show(True)
self.inputUrlPath.Show(True)
self.inputUrlPreviewStaticBox.Show(True)
self.labelInputUrlPreview.Show(True)
self.inputUrlPreview.Show(True)
self.btnSubmit.Show(True)
else:
self.inputCodeReview.SetValue('')
self.leftPanel.Layout()
def _init_right_panel(self):
"""初始化右子面板"""
# 代码预览面板
self.codeReviewPanel = wx.Panel(self.rightPanel)
self.codeReviewPanelSizer = wx.BoxSizer(wx.HORIZONTAL)
self.codeReviewPanel.SetSizer(self.codeReviewPanelSizer)
self.rightPanelSizer.Add(self.codeReviewPanel, 1, wx.EXPAND | wx.ALL, 2)
self.inputCodeReview = wx.TextCtrl(self.codeReviewPanel, -1, style=wx.TE_MULTILINE)
self.codeReviewPanelSizer.Add(self.inputCodeReview, 1, wx.EXPAND | wx.ALL, 2)
self.inputCodeReview.SetFont(wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD, False))
# 标签美化
self.labelStaticTexts.extend([])
def onInputViewName(self, e):
"""视图名称监听实时输入"""
view_name = self.inputViewName.GetValue().strip()
self.inputReverseViewName.SetValue(view_name.lower())
if view_name:
self.argsStruct['view_name'] = view_name
self._insert_data_to_template_by_argstruct() # 更新代码显示
def _init_label_font(self):
"""标签提示信息字体初始化"""
for _ in self.labelStaticTexts:
_.SetFont(wx.Font(12, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD))
_.SetForegroundColour(CON_COLOR_BLUE)
```
#### File: frames/mainFrame/events.py
```python
from .listener import *
from ..sqliteFrame import *
"""
作用:实现事件功能
"""
class MainFrameFuncs(MainFrameListener):
def __init__(self, parent=None):
super().__init__(parent=parent)
self.order_container = (self.cmdCodes, self.info_cmdCodes,)
def onHelpsORM(self, e):
"""ORM帮助(一键生成)"""
dlg = ORMDialog(self)
dlg.ShowModal()
dlg.Close(True)
def onMenuVSCode(self, e):
"""外部发起VSCode编辑"""
# 检测是否配置code命令环境
if wx.Shell("code -v"):
dirname = get_configs(CONFIG_PATH)['dirname']
self.cmdVscode = subprocess.Popen(f'code {dirname}', shell=True)
self.cmdCodes.append(self.cmdVscode)
self.info_cmdCodes[self.cmdVscode] = '开启VSCode编辑器'
else:
self.infoBar.ShowMessage(f'未检测到code命令', wx.ICON_ERROR)
def onPortProgressVirtualView(self, e):
"""查看虚拟环境路径"""
RichMsgDialog.showOkMsgDialog(self, env.getPython3Env(), '虚拟环境路径')
@RegisterOriginOrderDecorator(msg = 'collectstatic')
@VirtualEnvMustExistDecorator()
def onPortProgressCollectstatic(self, e):
"""python manage.py collectstatic"""
return (
subprocess.Popen(f'{env.getDjangoOrderArgs()} collectstatic', shell=True)
, *self.order_container
)
@RegisterOriginOrderDecorator(msg = 'freeze')
@VirtualEnvMustExistDecorator()
def onPortProgressPipFreeze(self, e):
"""导出包pip freeze"""
return (
subprocess.Popen(f'{env.getPipOrderArgs(mode="freeze")}', shell=True)
, *self.order_container
)
@VirtualEnvMustExistDecorator()
def onPortProgressPipInstall(self, e):
"""虚拟环境安装包pip install"""
dlg = wx.TextEntryDialog(self, u"包名:", u"虚拟环境安装三方库", u"")
if dlg.ShowModal() == wx.ID_OK:
module_name = dlg.GetValue()
self.cmdPipInstall = subprocess.Popen(f'{env.getPipOrderArgs()} {module_name}', shell=True)
self.cmdCodes.append(self.cmdPipInstall)
self.info_cmdCodes[self.cmdPipInstall] = 'install'
dlg.Close(True)
@RegisterOriginOrderDecorator(msg = 'shell')
@VirtualEnvMustExistDecorator()
def onPortProgressShell(self, e):
"""python manage.py shell"""
return (
subprocess.Popen(f'{env.getDjangoOrderArgs()} shell', shell=True)
, *self.order_container
)
@RegisterOriginOrderDecorator(msg = 'makemigrations')
@VirtualEnvMustExistDecorator()
def onPortProgressMakemigrations(self, e):
"""python manage.py makemigrations"""
return (
subprocess.Popen(f'{env.getDjangoOrderArgs()} makemigrations', shell=True)
, *self.order_container
)
@RegisterOriginOrderDecorator(msg = 'migrate')
@VirtualEnvMustExistDecorator()
def onPortProgressMigrate(self, e):
"""python manage.py migtrate"""
return (
subprocess.Popen(f'{env.getDjangoOrderArgs()} migrate', shell=True)
, *self.order_container
)
@RegisterOriginOrderDecorator(msg = 'flush')
@VirtualEnvMustExistDecorator()
def onPortProgressFlush(self, e):
"""python manage.py flush"""
return (
subprocess.Popen(f'{env.getDjangoOrderArgs()} flush', shell=True)
, *self.order_container
)
@RegisterOriginOrderDecorator(msg = 'createsuperuser')
@VirtualEnvMustExistDecorator()
def onPortProgressCreatesuperuser(self, e):
"""python manage.py createsuperuser"""
return (
subprocess.Popen(f'{env.getDjangoOrderArgs()} createsuperuser', shell=True)
, *self.order_container
)
def onPortProgressVirtual(self, e):
"""创建虚拟环境"""
# venv.create(env_dir, system_site_packages=False, clear=False, symlinks=False, with_pip=False, prompt=None)
dlg = wx.DirDialog(self, u"选择即将写入的虚拟环境文件夹", style=wx.DD_DEFAULT_STYLE)
if dlg.ShowModal() == wx.ID_OK:
env_dir = dlg.GetPath()
t = len(os.listdir(env_dir))
if t > 0:
self.infoBar.ShowMessage(f'检测到选择的文件夹下存在其它文件,禁止操作。', wx.ICON_ERROR)
else:
venv.create(env_dir, system_site_packages=False, clear=True, symlinks=False, with_pip=True, prompt=None)
# 分操作系统自动绑定python解释器
this_platform = env.getPlatform().lower()
if 'windows' == this_platform:
temp_path = os.path.join(env_dir, 'Scripts', 'python.exe')
env.setPython3Env(temp_path)
self.infoBar.ShowMessage(f'创建并绑定成功,命令路径:{temp_path}', wx.ICON_INFORMATION)
elif 'darwin' == this_platform:
temp_path = os.path.join(env_dir, 'bin', 'python')
env.setPython3Env(temp_path)
self.infoBar.ShowMessage(f'创建并绑定成功,命令路径:{temp_path}', wx.ICON_INFORMATION)
else:
self.infoBar.ShowMessage(f'创建成功,虚拟目录:{env_dir}', wx.ICON_INFORMATION)
dlg.Destroy()
def onPortProgressKillProgress(self, e):
"""终止进程"""
dlg = wx.TextEntryDialog(self, u"占用端口号:", u"终止进程", u"")
if dlg.ShowModal() == wx.ID_OK:
port = dlg.GetValue()
env.killProgress(port = port)
self.infoBar.ShowMessage(f'已终止。', wx.ICON_INFORMATION)
dlg.Close(True)
def onPortProgressFaster(self, e):
"""一键配置镜像环境"""
rpath = os.path.expanduser('~')
# 根据系统依次安装镜像环境
platform = env.getPlatform().lower()
if 'windows' == platform:
if 'pip' in os.listdir(rpath):
pip_path = os.path.join(rpath, 'pip')
if 'pip.ini' in os.listdir(pip_path):
self.infoBar.ShowMessage(f'当前环境已配置镜像。', wx.ICON_WARNING)
else:
# TEMPLATE_DIR
write_file(os.path.join(pip_path, 'pip.ini'), read_file(os.path.join(TEMPLATE_DIR, 'pip', 'pip.ini')))
self.infoBar.ShowMessage(f'配置镜像环境成功。', wx.ICON_INFORMATION)
else:
pip_path = os.path.join(rpath, 'pip')
os.mkdir(pip_path)
write_file(os.path.join(pip_path, 'pip.ini'), read_file(os.path.join(TEMPLATE_DIR, 'pip', 'pip.ini')))
self.infoBar.ShowMessage(f'配置镜像环境成功。', wx.ICON_INFORMATION)
elif 'linux' == platform: # 理论上,Mac和Linux配置镜像环境步骤一致
if '.pip' in os.listdir(rpath):
pip_path = os.path.join(rpath, '.pip')
if 'pip.conf' in os.listdir(pip_path):
self.infoBar.ShowMessage(f'当前环境已配置镜像。', wx.ICON_WARNING)
else:
write_file(os.path.join(pip_path, 'pip.conf'), read_file(os.path.join(TEMPLATE_DIR, 'pip', 'pip.ini')))
self.infoBar.ShowMessage(f'配置镜像环境成功。', wx.ICON_INFORMATION)
else:
pip_path = os.path.join(rpath, '.pip')
os.mkdir(pip_path)
write_file(os.path.join(pip_path, 'pip.conf'), read_file(os.path.join(TEMPLATE_DIR, 'pip', 'pip.ini')))
self.infoBar.ShowMessage(f'配置镜像环境成功。', wx.ICON_INFORMATION)
elif 'darwin' == platform:
if '.pip' in os.listdir(rpath):
pip_path = os.path.join(rpath, '.pip')
if 'pip.conf' in os.listdir(pip_path):
self.infoBar.ShowMessage(f'当前环境已配置镜像。', wx.ICON_WARNING)
else:
write_file(os.path.join(pip_path, 'pip.conf'), read_file(os.path.join(TEMPLATE_DIR, 'pip', 'pip.ini')))
self.infoBar.ShowMessage(f'配置镜像环境成功。', wx.ICON_INFORMATION)
else:
pip_path = os.path.join(rpath, '.pip')
os.mkdir(pip_path)
write_file(os.path.join(pip_path, 'pip.conf'), read_file(os.path.join(TEMPLATE_DIR, 'pip', 'pip.ini')))
self.infoBar.ShowMessage(f'配置镜像环境成功。', wx.ICON_INFORMATION)
else:
self.infoBar.ShowMessage(f'未知系统', wx.ICON_WARNING)
def onModelsProxyGenerate(self, e):
"""创建代理模型"""
def onPortProgressStop(self, e):
"""关闭网站运行状态"""
try:
self.server.terminate()
env.killProgress()
except:
self.infoBar.ShowMessage(f'网站未正常启动或启动异常,导致关闭失败。', wx.ICON_ERROR)
else:
self.infos.AppendText(out_infos(f"网站已关闭。", level=1))
self.portProgressRun.Enable(True)
self.portProgressStop.Enable(False)
self.sys_toolbar.EnableTool(self.shotcut_run.GetId(), True)
self.sys_toolbar.EnableTool(self.shotcut_stop.GetId(), False)
self.infoBar.ShowMessage(f'网站已关闭。', wx.ICON_INFORMATION)
def onPortProgressVirtualChoice(self, e):
"""选择虚拟环境"""
dlg = wx.FileDialog(self, "选择虚拟环境下的python.exe文件", "", "", "*.*", wx.FD_OPEN)
if dlg.ShowModal() == wx.ID_OK:
env.setPython3Env(os.path.join(dlg.GetDirectory(), dlg.GetFilename()))
self.infoBar.ShowMessage(f'虚拟环境绑定成功!', wx.ICON_INFORMATION)
dlg.Close(True)
def onHelpSeeOrKill(self, e):
"""查看或终止进程"""
RichMsgDialog.showOkMsgDialog(self, CON_MSG_PROGRESS_USE, CON_TIPS_COMMON)
@VirtualEnvMustExistDecorator()
def onPortProgressRun(self, e):
"""子进程运行Django"""
port = env.getDjangoRunPort()
host = env.getDjangoRunHost()
try:
self.server = subprocess.Popen(f'{env.getDjangoOrderArgs()} runserver {port}', shell=True) # , stderr=subprocess.PIPE, stdout=subprocess.PIPE
except:
self.infos.AppendText(out_infos(f"虚拟环境错误,或项目路径错误,或端口被占用。", level=3))
else:
import webbrowser
webbrowser.open(f"{host}:{port}/admin/")
self.infos.AppendText(out_infos(f"网站正在运行,根路由:{host}:{port}。可复制到浏览器打开", level=1))
self.portProgressRun.Enable(False)
self.portProgressStop.Enable(True)
self.sys_toolbar.EnableTool(self.shotcut_run.GetId(), False)
self.sys_toolbar.EnableTool(self.shotcut_stop.GetId(), True)
def onModelsGenerate(self, e):
"""创建模型"""
# dlg = ModelsCreateDialog(self)
# dlg.ShowModal()
# dlg.Close(True)
self.auiNotebook.AddPage(AutoGenModelsPanel(self.auiNotebook), '新增模型', select=True)
self.auiNotebook.SetSelection(self.auiNotebook.GetPageCount())
def onSqliteManageTool(self, e):
"""跨平台的Sqlite工具"""
subFrame = SQLiteManageFrame(None)
subFrame.Show()
# manager = os.path.join(os.path.dirname(BASE_DIR), 'sqlite3Manager.pyw')
# subprocess.Popen(f'{env.getRealPythonOrder()} {manager}', shell=True)
def onMenusSettings(self, e):
"""Settings"""
dlg = SettingsDialog(self)
dlg.ShowModal()
dlg.Close(True)
def onHelpsDocumentation(self, e):
"""帮助文档"""
dlg = DocumentationDialog(self)
dlg.ShowModal()
dlg.Close(True)
def onCreateProject(self, e):
"""新建项目"""
dlg = ProjectCreateDialog(self)
dlg.ShowModal()
dlg.Close(True)
def onUrlsFix(self, e):
"""修复路由"""
for _ in self.unurls:
djangotools.fix_urls(_) # 逐个修复
self.infos.AppendText(out_infos(f"{_}注册完成!", level=1))
else:
self.unurls.clear()
self.infos.AppendText(out_infos(f"路由修复完成!", level=1))
if 'urls' in self.needfix:
self.needfix.remove('urls')
self._open_checked_fix_btn('urls', f_type='close')
def onUrlsCheck(self, e):
"""检查路由"""
# 检查情形有:
# 只针对以本工具生成的app,而不是Django原生命令python manage.py startapp ...
# 路由必须在主路径urls.py中用include()函数注册
# 默认未每个应用程序注册ulrs,取environment.py中的urls别名
self.unurls = set(djangotools.judge_in_main_urls()) # 全局监测
if len(self.unurls) <= 0:
self._open_checked_fix_btn('urls', f_type='close')
self.infos.AppendText(out_infos(f"路由检测完成,无已知错误。", level=1))
else:
msg = ','.join(self.unurls)
self.infos.AppendText(out_infos(f"{msg}未注册。", level=3))
self._open_checked_fix_btn('urls')
def onAdminRename(self, e):
"""重命名后台名称"""
dlg = AdminRenameDialog(self)
dlg.ShowModal()
dlg.Close(True)
def onViewsGenerateFunc(self, e):
"""多样式新增视图"""
# dlg = ViewGenerateDialog(self)
# dlg.ShowModal()
# dlg.Close(True)
self.auiNotebook.AddPage(AutoGenViewsPanel(self.auiNotebook), '新增视图', select=True)
self.auiNotebook.SetSelection(self.auiNotebook.GetPageCount()) # 页签焦点切换
def onFontsMinus(self, e):
"""显示框字体减小"""
env.setFontSize(step = 1, method = 'minus')
self._set_fonts(e)
def onFontsAdd(self, e):
"""显示框字体增大"""
env.setFontSize(step = 1, method = 'add')
self._set_fonts(e)
def OnKeyDown(self, event):
"""键盘监听"""
code = event.GetKeyCode()
if wx.WXK_NUMPAD_ENTER == code or 13 == code:
self.onExecCommand()
def onAbout(self, e):
"""关于"""
aboutInfo = wx.adv.AboutDialogInfo()
aboutInfo.SetName("JDjango")
aboutInfo.SetVersion(MY_APP_VERSION_STRING)
aboutInfo.SetDescription(T_("一种快速编写Django的辅助工具!QQ交流群:781517315"))
aboutInfo.SetCopyright("(C) 2020-2021")
aboutInfo.SetWebSite("https://github.com/JIYANG-PLUS/JDjango")
aboutInfo.AddDeveloper("笔小芯 -- <EMAIL>\n感谢:@coshare")
wx.adv.AboutBox(aboutInfo)
def onExit(self, e):
"""退出"""
self.Close(True)
def onOpen(self, e):
"""查看文件"""
self.dirname = r''
dlg = wx.FileDialog(self, "选择一个文件", self.dirname, "", "*.*", wx.FD_OPEN)
if dlg.ShowModal() == wx.ID_OK:
self.filename = dlg.GetFilename()
self.dirname = dlg.GetDirectory()
with open(os.path.join(self.dirname, self.filename), 'r', encoding="utf-8") as f:
self.infos.SetValue(f.read())
dlg.Close(True)
def onClear(self, e):
"""清空提示台"""
self.infos.Clear()
def onGenerate(self, e):
"""生成应用程序"""
dlg = wx.TextEntryDialog(None, u"请输入应用程序名:", u"创建应用程序", u"")
if dlg.ShowModal() == wx.ID_OK:
message = dlg.GetValue() # 获取文本框中输入的值
returnStatus = djangotools.startapp(message)
if 0 == returnStatus:
self.unapps.add(message)
url_alias = [os.path.basename(_).split('.')[0] for _ in env.getUrlsAlias()][0]
self.unurls.add(f'{message}.{url_alias}')
self.infos.AppendText(out_infos(f"{message}应用程序创建成功!", level=1))
self.onAppsFix(e) # 自动完成注册
self.onUrlsFix(e) # 自动完成路由注册
self._init_config() # 重新初始化 配置文件【此操作为敏感操作】
self.infoBar.ShowMessage(f"{message}应用程序创建成功!", wx.ICON_INFORMATION)
else:
dlg_tip = wx.MessageDialog(None, f"{message}应用程序名已存在,或不符合纯字母+数字命名的约定!", CON_TIPS_COMMON, wx.OK | wx.ICON_INFORMATION)
if dlg_tip.ShowModal() == wx.ID_OK: pass
dlg_tip.Close(True)
dlg.Close(True)
def onButtonClick(self, e):
"""界面按钮点击事件"""
bId = e.GetId()
if bId == self.btn_select_project.GetId(): # 选择项目根路径
self.onSelectProjectRoot()
elif bId == self.btn_check_project.GetId(): # 检测/校验项目
self.onCheckGlobalProject(e)
self.infoBar.ShowMessage("检测成功,具体内容详见输出窗口。", wx.ICON_INFORMATION)
elif bId == self.btn_fixed_project.GetId(): # 修复项目
self.onFixGlobalProject(e)
self.infoBar.ShowMessage(f"修复成功!", wx.ICON_INFORMATION)
elif bId == self.btn_config_project.GetId(): # 项目配置和修改
dlg = SettingsDialog(self)
dlg.ShowModal()
dlg.Close(True)
elif bId == self.btn_exec.GetId(): # 执行命令
self.onExecCommand()
elif bId == self.btn_clear_text.GetId():
self.onClear(e)
def onBtnOpenDocs(self, e):
"""查看帮助文档"""
dlg = DocumentationDialog(self)
dlg.ShowModal()
dlg.Close(True)
def onExecCommand(self):
"""仿Linux命令"""
command = self.cmdInput.GetValue().strip()
try:
order_split = [_ for _ in command.split() if _]
if order_split:
args = order_split[1:]
if 'ls' == order_split[0].lower():
s = cmd.ls(*args)
elif 'pwd' == command.lower():
s = cmd.pwd()
elif 'cd' == order_split[0].lower():
s = cmd.cd(*args)
elif 'zip' == order_split[0].lower():
s = cmd.zip(*args)
elif 'unzip' == order_split[0].lower():
s = cmd.unzip(*args)
elif 'rm' == order_split[0].lower():
s = cmd.rm(*args)
elif 'mkdir' == order_split[0].lower():
s = cmd.mkdir(*args)
elif 'mkfile' == order_split[0].lower():
s = cmd.mkfile(*args)
elif 'ping' == order_split[0].lower():
s = cmd.ping(*args)
elif 'date' == command.lower():
s = cmd.date()
elif '>' == order_split[0].lower():
s = cmd.print(' '.join(args))
else:
s = cmd.exec(' '.join(order_split))
self.infos.AppendText(out_command_infos(command))
if s:
self.infos.AppendText(f"{s}\n")
self.cmdInput.Clear()
except Exception as e:
self.infos.AppendText(out_infos(f'{e}'))
def onSelectProjectRoot(self):
"""选择项目根路径【项目入口】"""
dlg = wx.FileDialog(self, "选择Django项目的manage.py文件", r'', "", "*.py", wx.FD_OPEN)
if dlg.ShowModal() == wx.ID_OK:
self._disable_all_btn() # 初始化按钮状态
filename = dlg.GetFilename()
self.dirname = dlg.GetDirectory()
if 'manage.py' == filename:
# self.path.SetValue(f'当前项目路径:{self.dirname}') 【为了美观而放弃】
self.SetStatusText(f'{self.dirname}', 2)
try:
self._init_config() # 初始化配置文件
except Exception as e:
self.infos.AppendText(out_infos('配置文件config.json初始化失败!', level=3))
else:
# 开放所有的检测按钮
self._open_all_check_btn()
# 开放部分必要按钮
self._open_part_necessary_btns()
self.infos.Clear()
# self.path.Clear()
self.infos.AppendText(out_infos(f'项目{os.path.basename(self.dirname)}导入成功!', level=1))
self.infoBar.ShowMessage(f'项目{os.path.basename(self.dirname)}导入成功!', wx.ICON_INFORMATION)
else:
self.infos.AppendText(out_infos('项目导入失败,请选择Django项目根路径下的manage.py文件。', level=3))
dlg.Close(True)
def onAppsCheck(self, e):
"""应用程序 检测"""
apps = get_configs(CONFIG_PATH)['app_names'] # 实际的 所有 应用程序
flag = 0
with open(self.path_settings, 'r', encoding='utf-8') as f:
settings_apps = eval(djangotools.get_list_patt_content_contain_code(retools.PATT_INSTALLED_APPS, f.read()))
for app in apps:
if app not in settings_apps:
self.unapps.add(app)
self.infos.AppendText(out_infos(f'{app}应用程序未注册!', 2))
flag = 1
if 1 == flag:
self._open_checked_fix_btn('apps')
else:
self._open_checked_fix_btn('apps', f_type='close')
self.infos.AppendText(out_infos('应用程序检测完成,无已知错误。', level=1))
def onCheckGlobalProject(self, e):
"""检测项目【全局】"""
self.onAppsCheck(e) # 校验 APP
self.onUrlsCheck(e) # 校验 路由
def onAppsFix(self, e):
"""修复未注册应用"""
try:
content = read_file(self.path_settings)
temp = retools.PATT_INSTALLED_APPS.search(content).group(0)
INSTALLED_APPS = temp.split('\n')
for _ in self.unapps:
INSTALLED_APPS.insert(-1, f" '{_}',")
self.infos.AppendText(out_infos(f'{_}注册完成。', level=1))
self.unapps.clear() # 清空未注册应用程序
except:
self.infos.AppendText(
out_infos('项目残缺,无法修复。请检查本项目是否为Django项目。', level=3))
else:
new_content = content.replace(temp, '\n'.join(INSTALLED_APPS))
write_file(self.path_settings, new_content)
self.infos.AppendText(out_infos('应用程序修复完成。', level=1))
if 'apps' in self.needfix:
self.needfix.remove('apps')
self._open_checked_fix_btn('apps', f_type='close') # 必须最后执行(控件的不可用性)
def onFixGlobalProject(self, e):
"""修复项目 【全局】"""
self.onAppsFix(e) # 修复 应用程序
self.onUrlsFix(e) # 修复 路由
def onAdminGenerateBase(self, e):
"""管理中心 简单配置"""
dlg = AdminCreateSimpleDialog(self)
dlg.ShowModal()
dlg.Close(True)
def onCloseWindow(self, e):
"""窗口关闭前操作"""
if self.timer is not None:
self.timer.Stop()
self.timer = None
self.Destroy()
def DoSearch(self, text):
return True
def onAuiNotebookClose(self, e):
"""切换标签关闭前"""
# print(self.auiNotebook.GetPageText(self.auiNotebook.GetCurrentPage()))
if (0 == e.GetSelection()):
# e.Skip()
# e.StopPropagation()
e.Veto() # 否决掉事件的发生
self.infoBar.ShowMessage(f"核心标签不允许关闭!", wx.ICON_WARNING)
def onLanguage(self, e):
"""语言"""
self.auiNotebook.AddPage(wx.Panel(self.auiNotebook), '测试', select=True)
self.auiNotebook.SetSelection(self.auiNotebook.GetPageCount())
def OnTest(self, e):
"""开发用,测试函数"""
r = RichMsgDialog.showAskQuestionDialog(self, '测试', '标题')
print(r)
```
#### File: frames/sqliteFrame/sqliteFrame.py
```python
from .common import *
LEN_COL = 26 # 列数
class SQLiteManageFrame(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, parent, id = wx.ID_ANY, title = CON_SQLITE3_TITLE, pos = wx.DefaultPosition, size = wx.Size(1200,720), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL)
self._init_UI()
self._init_menus()
self._init_toolbar()
self._init_statusbar()
self.connectSQLiteObj = None # 连接对象
self._init_data()
def _init_data(self):
"""初始化界面数据"""
self._connect_sqlite3_default()
def _init_UI(self):
"""初始化页面控件"""
self.mainSizer = wx.BoxSizer( wx.VERTICAL )
self.SetSizeHints( wx.DefaultSize, wx.DefaultSize )
self.mainPanel = wx.Panel( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
self.mainPanelSizer = wx.BoxSizer( wx.VERTICAL )
self.mainPanel.SetSizer( self.mainPanelSizer )
self.mainPanel.Layout()
self.mainPanelSizer.Fit( self.mainPanel )
self.mainSizer.Add( self.mainPanel, 1, wx.EXPAND |wx.ALL, 5 )
self.SetSizer( self.mainSizer )
self.path = wx.TextCtrl(self.mainPanel, -1) # sqlite路径
self.path.SetEditable(False)
self.toolPanel = wx.Panel(self.mainPanel) # 工具按钮集
toolSizer = wx.BoxSizer( wx.HORIZONTAL ) # 水平
self.btnOpenSQLite3 = wx.Button( self.toolPanel, wx.ID_ANY, u"打开/切换数据源", wx.DefaultPosition, wx.DefaultSize, 0 )
toolSizer.Add(self.btnOpenSQLite3, 0, wx.EXPAND | wx.ALL, 2)
self.toolPanel.SetSizer(toolSizer)
# 分割面板(左右分割)
self.splitWindow = wx.SplitterWindow(self.mainPanel, -1, style = wx.SP_LIVE_UPDATE)
self.leftPanel = wx.Panel(self.splitWindow, style=wx.SUNKEN_BORDER) # 左子面板
self.rightPanel = wx.Panel(self.splitWindow, style=wx.SUNKEN_BORDER) # 右子面板
self.splitWindow.Initialize(self.leftPanel)
self.splitWindow.Initialize(self.rightPanel)
self.splitWindow.SplitVertically(self.leftPanel, self.rightPanel, 888)
# 左子面板继续分割
leftPanelSizer = wx.BoxSizer(wx.HORIZONTAL) # 水平
self.leftSplitWindow = wx.SplitterWindow(self.leftPanel, -1, style = wx.SP_LIVE_UPDATE)
self.leftLeftPanel = wx.Panel(self.leftSplitWindow, style=wx.SUNKEN_BORDER) # 左-左子面板
self.leftRightPanel = wx.Panel(self.leftSplitWindow, style=wx.SUNKEN_BORDER) # 左-右子面板
self.leftSplitWindow.Initialize(self.leftLeftPanel)
self.leftSplitWindow.Initialize(self.leftRightPanel)
self.leftSplitWindow.SplitVertically(self.leftLeftPanel, self.leftRightPanel, 212)
leftPanelSizer.Add(self.leftSplitWindow, 1, wx.EXPAND | wx.ALL, 0)
self.leftPanel.SetSizer(leftPanelSizer)
# 左面板-左面板 树形控件
leftLeftPanelSizer = wx.BoxSizer(wx.VERTICAL)
self.leftLeftPanel.SetSizer(leftLeftPanelSizer)
self.tree = wx.TreeCtrl(self.leftLeftPanel, -1, wx.DefaultPosition, (-1, -1)) # , wx.TR_HAS_BUTTONS
self.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.OnClickTree, self.tree)
self.Bind( wx.EVT_TREE_ITEM_RIGHT_CLICK, self.onRightTreeClick, self.tree )
leftLeftPanelSizer.Add(self.tree, 1, wx.EXPAND | wx.ALL, 2)
self.mainPanelSizer.Add(self.path, 0, wx.EXPAND | wx.ALL, 2)
self.mainPanelSizer.Add(self.toolPanel, 0, wx.EXPAND | wx.ALL, 2)
self.mainPanelSizer.Add(self.splitWindow, 1, wx.EXPAND | wx.ALL, 2)
# 左-右面板 表格
self.leftRightPanelSizer = wx.BoxSizer(wx.VERTICAL)
self.leftRightPanel.SetSizer(self.leftRightPanelSizer)
self._init_table()
# 右面板 SQL查询窗口
self.rightPanelSizer = wx.BoxSizer(wx.VERTICAL)
self.rightPanel.SetSizer(self.rightPanelSizer)
self.labelSelect = wx.StaticText(self.rightPanel, -1, "SQL查询语句:")
self.inputSQL = wx.TextCtrl(self.rightPanel, -1, size=(-1, -1), style=wx.TE_MULTILINE)
self.sql_msg = wx.TextCtrl(self.rightPanel, -1, style=wx.TE_MULTILINE)
self.sql_msg.SetEditable(False)
self.btnExecute = buttons.GenButton(self.rightPanel, -1, label='执行')
self.rightPanelSizer.Add(self.labelSelect, 0, wx.EXPAND | wx.ALL, 2)
self.rightPanelSizer.Add(self.inputSQL, 1, wx.EXPAND | wx.ALL, 2)
self.rightPanelSizer.Add(self.sql_msg, 1, wx.EXPAND | wx.ALL, 2)
self.rightPanelSizer.Add(self.btnExecute, 0, wx.EXPAND | wx.ALL, 2)
# 事件监听
self.Bind(wx.EVT_BUTTON, self.onNewSQLite3, self.btnOpenSQLite3)
self.Bind(wx.EVT_BUTTON, self.onBtnExecute, self.btnExecute)
def _connect_sqlite3_default(self):
"""初始化连接"""
if os.path.exists(CONFIG_PATH):
# 读config.json配置文件
CONFIGS = get_configs(CONFIG_PATH)
if ('DATABASES' in CONFIGS) and ('default' in CONFIGS['DATABASES']) and ('NAME' in CONFIGS['DATABASES']['default']):
sqlite_path = CONFIGS['DATABASES']['default']['NAME']
if os.path.isfile(sqlite_path):
try:
self.connectSQLiteObj = sqlite3.connect(sqlite_path)
except:
self.connectSQLiteObj = None
else:
self.cursorObj = self.connectSQLiteObj.cursor()
# 初始化树
self._init_tree()
# 先提示,后显示
self.path.SetValue(f"SQLite数据库路径:{sqlite_path}")
dlg = wx.MessageDialog(self, f"已自动连接SQLite数据库,读取数据库路径{sqlite_path}", "提示信息", wx.OK)
dlg.ShowModal()
dlg.Destroy()
def onBtnExecute(self, e):
"""点击SQL执行按钮"""
sql = self.inputSQL.GetValue()
try:
self.cursorObj.execute(sql)
except:
self.sql_msg.SetValue("SQL语句错误,请检查后重新执行")
else:
self.connectSQLiteObj.commit() # 提交保存
affect_rows = self.cursorObj.rowcount
if affect_rows < 0:
self.sql_msg.SetValue("查询成功!")
# 显示查询结果
self.setTableData(self.cursorObj.fetchall())
self.setTableHeader(None)
else:
self.sql_msg.SetValue(f"执行成功,受影响行数:{affect_rows}。")
def _init_table(self):
"""初始化表格"""
self.attrbutesGrid = wx.grid.Grid( self.leftRightPanel, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, 0 )
# Grid
self.attrbutesGrid.CreateGrid(1000, LEN_COL)
self.attrbutesGrid.EnableEditing(False)
self.attrbutesGrid.EnableGridLines(True)
self.attrbutesGrid.EnableDragGridSize(False)
self.attrbutesGrid.SetMargins(0, 0)
# Columns
self.attrbutesGrid.EnableDragColMove(False)
self.attrbutesGrid.EnableDragColSize( True )
self.attrbutesGrid.SetColLabelSize( 30 )
self.attrbutesGrid.SetColLabelAlignment( wx.ALIGN_CENTER, wx.ALIGN_CENTER )
# Rows
self.attrbutesGrid.EnableDragRowSize( True )
self.attrbutesGrid.SetRowLabelSize( 70 )
self.attrbutesGrid.SetRowLabelAlignment( wx.ALIGN_CENTER, wx.ALIGN_CENTER )
# Label Appearance
# Cell Defaults
self.attrbutesGrid.SetDefaultCellAlignment( wx.ALIGN_LEFT, wx.ALIGN_TOP )
self.leftRightPanelSizer.Add( self.attrbutesGrid, 1, wx.EXPAND | wx.ALL, 2 )
for i in range(LEN_COL):
self.attrbutesGrid.SetColLabelValue(i, ' ')
def _clear_table(self):
"""清空表格"""
self.attrbutesGrid.ClearGrid()
def setTableHeader(self, headers=None):
"""初始化表头"""
if headers:
for i, _ in enumerate(headers):
self.attrbutesGrid.SetColLabelValue(i, _)
def setTableData(self, datas):
"""初始化表格数据"""
self._clear_table()
for i in range(LEN_COL):
self.attrbutesGrid.SetColLabelValue(i, ' ')
for row, _ in enumerate(datas):
for col, data in enumerate(_):
self.attrbutesGrid.SetCellValue(row, col, f'{data}')
def onRightTreeClick(self, e):
"""树子项右击直接查看属性"""
nodeName = self.tree.GetItemText(e.GetItem())
if nodeName != self.nodeRootName:
dlg = TableAttrbutesDialog(self, node_name = nodeName, datas = self.get_columns_name(nodeName))
dlg.ShowModal()
dlg.Destroy()
def _init_tree(self):
"""构建左-左目录树"""
database_name = [_[1] for _ in self.cursorObj.execute("PRAGMA database_list;")][0] # 数据库列表
tables = self.cursorObj.execute("select name from sqlite_master where type='table'").fetchall() # 所有的表名
self.nodeRootName = f'{database_name}[右击查看字段属性]'
self.root = self.tree.AddRoot(self.nodeRootName) # 根
for _ in sorted(tables, key=lambda x:x[0], reverse=False):
self.tree.AppendItem(self.root, _[0])
self._setStatusRight(f"数据库{database_name}连接成功!")
self.tree.ExpandAll() # 展开所有节点
def _clear_tree(self):
"""清空树"""
self.tree.Delete(self.root)
def _setStatusRight(self, msg):
"""设置底部状态栏右侧信息"""
self.SetStatusText(msg, 1)
def get_columns_name(self, table_name):
"""根据表名获取列名和列属性"""
# 序号、列名、类型、允许为NULL、默认值、主键
self.cursorObj.execute(f'pragma table_info({table_name})')
col_names = self.cursorObj.fetchall()
return col_names
def get_table_datas(self, table_name):
"""获取数据表格"""
self.cursorObj.execute(f'SELECT * FROM {table_name}')
datas = self.cursorObj.fetchall()
return datas
def OnClickTree(self, e):
"""双击树节点事件"""
nodeName = self.tree.GetItemText(e.GetItem())
if nodeName != self.nodeRootName:
self.setTableData(self.get_table_datas(nodeName))
self.setTableHeader([_[1] for _ in self.get_columns_name(nodeName)])
def _init_statusbar(self):
"""初始化底部状态条"""
self.statusBar = self.CreateStatusBar( 2, wx.STB_SIZEGRIP, wx.ID_ANY )
self.SetStatusWidths([-1, -2]) # 比例为1:2
def _init_toolbar(self):
"""初始化工具条"""
# self.toolBar = self.CreateToolBar( wx.TB_HORIZONTAL, wx.ID_ANY )
# self.toolBar.Realize()
def _init_menus(self):
"""初始化菜单项"""
self.menubar = wx.MenuBar( 0 )
self.linkBar = wx.Menu()
self.newSQLite3 = wx.MenuItem( self.linkBar, wx.ID_ANY, u"SQLite3", wx.EmptyString, wx.ITEM_NORMAL )
self.linkBar.Append( self.newSQLite3 )
self.menubar.Append( self.linkBar, u"连接" )
self.operateBar = wx.Menu()
self.DDL = wx.Menu()
self.DDLCreateDatabase = wx.MenuItem( self.DDL, wx.ID_ANY, u"CREATE DATABASE", wx.EmptyString, wx.ITEM_NORMAL )
self.DDL.Append( self.DDLCreateDatabase )
self.DDLCreateTable = wx.MenuItem( self.DDL, wx.ID_ANY, u"CREATE TABLE", wx.EmptyString, wx.ITEM_NORMAL )
self.DDL.Append( self.DDLCreateTable )
self.DDLAlterTable = wx.MenuItem( self.DDL, wx.ID_ANY, u"ALTER TABLE", wx.EmptyString, wx.ITEM_NORMAL )
self.DDL.Append( self.DDLAlterTable )
self.DDLDropTable = wx.MenuItem( self.DDL, wx.ID_ANY, u"DROP TABLE", wx.EmptyString, wx.ITEM_NORMAL )
self.DDL.Append( self.DDLDropTable )
self.DDLCreateView = wx.MenuItem( self.DDL, wx.ID_ANY, u"CREATE VIEW", wx.EmptyString, wx.ITEM_NORMAL )
self.DDL.Append( self.DDLCreateView )
self.DDLAlterView = wx.MenuItem( self.DDL, wx.ID_ANY, u"ALTER VIEW", wx.EmptyString, wx.ITEM_NORMAL )
self.DDL.Append( self.DDLAlterView )
self.DDLDropView = wx.MenuItem( self.DDL, wx.ID_ANY, u"DROP VIEW", wx.EmptyString, wx.ITEM_NORMAL )
self.DDL.Append( self.DDLDropView )
self.DDLTruncateTable = wx.MenuItem( self.DDL, wx.ID_ANY, u"TRUNCATE TABLE", wx.EmptyString, wx.ITEM_NORMAL )
self.DDL.Append( self.DDLTruncateTable )
self.operateBar.AppendSubMenu( self.DDL, u"DDL" )
self.DML = wx.Menu()
self.DMLInsert = wx.MenuItem( self.DML, wx.ID_ANY, u"INSERT", wx.EmptyString, wx.ITEM_NORMAL )
self.DML.Append( self.DMLInsert )
self.DMLUpdate = wx.MenuItem( self.DML, wx.ID_ANY, u"UPDATE", wx.EmptyString, wx.ITEM_NORMAL )
self.DML.Append( self.DMLUpdate )
self.DMLDelete = wx.MenuItem( self.DML, wx.ID_ANY, u"DELETE", wx.EmptyString, wx.ITEM_NORMAL )
self.DML.Append( self.DMLDelete )
self.operateBar.AppendSubMenu( self.DML, u"DML" )
self.menubar.Append( self.operateBar, u"数据库操作" )
self.lltimeMenu = wx.Menu()
self.lltimeInsert = wx.MenuItem( self.lltimeMenu, wx.ID_ANY, u"SQL导入", wx.EmptyString, wx.ITEM_NORMAL )
self.lltimeMenu.Append( self.lltimeInsert )
self.lltimeOutput = wx.MenuItem( self.lltimeMenu, wx.ID_ANY, u"SQL导出", wx.EmptyString, wx.ITEM_NORMAL )
self.lltimeMenu.Append( self.lltimeOutput )
self.menubar.Append( self.lltimeMenu, u"持久化" )
self.directExit = wx.Menu()
self.btnDirectExit = self.directExit.Append(wx.ID_ANY, "&退出", "退出")
self.menubar.Append( self.directExit, u"退出" )
self.SetMenuBar( self.menubar )
# 事件监听
self.Bind( wx.EVT_MENU, self.onNewSQLite3, id = self.newSQLite3.GetId() )
self.Bind(wx.EVT_MENU, self.onExit, self.btnDirectExit)
def _connect_sqlite3(self):
"""连接数据库"""
# 先关闭之前的连接
if self.connectSQLiteObj:
self.connectSQLiteObj.close()
self._clear_tree()
sqlite3_path = os.path.join(self.dirname, self.filename)
try:
self.connectSQLiteObj = sqlite3.connect(sqlite3_path)
except:
self.connectSQLiteObj = None
self._setStatusRight(f"连接失败!")
else:
self.cursorObj = self.connectSQLiteObj.cursor()
self._init_tree() # 初始化树
# 先提示,后显示
self.path.SetValue(f"SQLite数据库路径:{sqlite3_path}")
dlg = wx.MessageDialog(self, f"已连接SQLite数据库,读取数据库路径{sqlite3_path}", "成功", wx.OK)
dlg.ShowModal()
dlg.Destroy()
def onNewSQLite3(self, e):
"""创建新的SQLite3连接"""
dlg = wx.FileDialog(self, "选择SQLite文件", "", "", "*.*", wx.FD_OPEN)
if dlg.ShowModal() == wx.ID_OK:
self.filename = dlg.GetFilename()
self.dirname = dlg.GetDirectory()
try:
self._connect_sqlite3()
except:
wx.MessageBox(f'打开失败', '错误', wx.OK | wx.ICON_INFORMATION)
else:
pass
dlg.Destroy()
def onExit(self, e):
"""退出"""
self.Close(True)
def __del__( self ):
"""释放资源"""
if self.connectSQLiteObj:
self.connectSQLiteObj.close()
```
#### File: panels/models/ShowUrlsModel.py
```python
import wx.dataview as wxdv
class Url(object):
"""真正的数据行"""
def __init__(self,
app: str, gen_way: str, alias: str,
relate_path: str, full_path: str, split_path: str,
app_file: str, belong_app: str, code_app_name: str,
url_level: str, op: bool=False
):
self.op = op
self.gen_way = gen_way
self.app = app # 非真正的应用程序名称,而是节点名称
self.alias = alias
self.full_path = full_path
self.relate_path = relate_path
self.split_path = split_path
self.app_file = app_file
self.code_app_name = code_app_name
self.belong_app = belong_app # 真实的应用程序名称
self.url_level = url_level
def __repr__(self):
return 'Url: %s-%s' % (self.app, self.alias)
class App(object):
"""节点构造"""
def __init__(self, name):
self.name = name # 应用程序名称
self.urls = [] # 路由集合
def __repr__(self):
return 'App: ' + self.name
class ShowUrlsModel(wxdv.PyDataViewModel):
def __init__(self, data):
wxdv.PyDataViewModel.__init__(self)
self.data = data # 获取传递过来的渲染数据
self.UseWeakRefs(True) # 数据节点是 弱引用 时启用
def GetColumnCount(self):
"""返回数据总的列数"""
return 12
def GetColumnType(self, col):
"""设置列的类型"""
# string 字符串;datetime 日期;bool 布尔类型复选框
types = [
'string', # 节点名称
'bool', # 操作
'string', # 路由级数
'string', # 相对路径
'string', # 代码生成方式
'string', # 别名
'string', # 全路由
'string', # 路由拆解
'string', # 归属应用程序
'string', # 应用程序检索名称
'string', # 归属文件
'string', # 解决 BUG 的空列
]
mapper = {i:_ for i,_ in enumerate(types)}
return mapper[col] # 返回给构造器类型
def GetChildren(self, parent, children):
"""获取所有的孩子节点数据"""
if not parent: # 没有父节点
for app in self.data: # 遍历节点的数据包
children.append(self.ObjectToItem(app))
return len(self.data)
node = self.ItemToObject(parent) # 有父节点的情况(复杂数据结构)
if isinstance(node, App): # 父节点类型检测
for url in node.urls: # 取复杂数据结构的数据区域
children.append(self.ObjectToItem(url))
return len(node.urls)
return 0
def IsContainer(self, item):
"""当前节点有子节点则返回 True """
if not item: # 节点
return True
node = self.ItemToObject(item)
if isinstance(node, App): # 数据包
return True
return False # 数据行
def GetParent(self, item):
"""返回该节点的父节点"""
if not item:
return wxdv.NullDataViewItem
node = self.ItemToObject(item)
if isinstance(node, App):
return wxdv.NullDataViewItem
elif isinstance(node, Url):
for g in self.data:
if g.name == node.app:
return self.ObjectToItem(g)
def HasValue(self, item, col):
"""判断是否是有效的数据行(非展开节点)"""
node = self.ItemToObject(item)
if isinstance(node, App) and col > 0: # 只在第一列渲染节点数据
return False
return True
def GetValue(self, item, col):
"""获取某一具体单元格的值"""
node = self.ItemToObject(item) # 获取当前节点对象
if isinstance(node, App):
assert col == 0, "展开节点必须在第一列" # 再次校验
return node.name # 节点只需要名称
elif isinstance(node, Url): # 数据包的行
data = [
node.app,
node.op,
node.url_level,
node.relate_path,
node.gen_way,
node.alias,
node.full_path,
node.split_path,
node.belong_app,
node.code_app_name,
node.app_file,
"", # 解决最后一列无法显示的 BUG
]
mapper = {i:_ for i, _ in enumerate(data)}
return mapper[col]
else:
raise RuntimeError("未知的节点类型")
def GetRowListValue(self, item):
"""获取焦点所在行的列表数据集(不包含 首列 和 尾列)"""
node = self.ItemToObject(item)
if isinstance(node, App):
return [] # 节点返回空数据
return [self.GetValue(item, i) for i in range(self.GetColumnCount())[1:-1]]
def GetRowDictValue(self, item):
"""获取焦点所在行的字典数据集"""
node = self.ItemToObject(item)
if isinstance(node, App):
return {}
return {
"app": node.app,
"op": node.op,
"url_level": node.url_level,
"relate_path": node.relate_path,
"gen_way": node.gen_way,
"alias": node.alias,
"full_path": node.full_path,
"split_path": node.split_path,
"belong_app": node.belong_app,
"code_app_name": node.code_app_name,
"app_file": node.app_file,
"end_line": "",
}
def GetAttr(self, item, col, attr):
"""设置节点的属性"""
node = self.ItemToObject(item)
if isinstance(node, App):
attr.SetColour('blue')
attr.SetBold(True)
return True
return False
def SetValue(self, value, item, col):
"""第一列不允许改变,即不存在 0 == col 的情况"""
node = self.ItemToObject(item)
if isinstance(node, Url):
if col == 1:
node.op = value
# elif col == 2:
# node.relate_path = value
pass
return True
```
#### File: JDjango/panels/WxPythonCtrlsPanel.py
```python
from .common import *
from .PythonEditor import *
TREE_NODES = [
("按钮", ("普通按钮","渐变按钮","通用按钮")),
]
class WxPythonCtrlsPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, wx.ID_ANY)
self._init_UI()
def _init_UI(self):
sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(sizer)
self.SetAutoLayout(True)
self.SetBackgroundColour(CON_COLOR_PURE_WHITE)
self.panel = wx.Panel(self, wx.ID_ANY)
topsizer = wx.BoxSizer(wx.VERTICAL)
self.panel.SetSizer(topsizer)
topsizer.SetSizeHints(self.panel)
sizer.Add(self.panel, 1, wx.EXPAND)
# 分割面板(左右分割)
self.splitWindow = wx.SplitterWindow(self.panel, -1, style = wx.SP_LIVE_UPDATE)
topsizer.Add(self.splitWindow, 1, wx.EXPAND)
self.leftPanel = wx.Panel(self.splitWindow, style=wx.SUNKEN_BORDER) # 左子面板
self.rightPanel = wx.Panel(self.splitWindow, style=wx.SUNKEN_BORDER) # 右子面板
self.splitWindow.Initialize(self.leftPanel)
self.splitWindow.Initialize(self.rightPanel)
self.splitWindow.SplitVertically(self.leftPanel, self.rightPanel, 180)
self._init_leftWindow()
self._init_rightWindow()
def _init_leftWindow(self):
"""初始化左面板"""
leftPanelSizer = wx.BoxSizer(wx.VERTICAL)
self.leftPanel.SetSizer(leftPanelSizer)
self.tree = wx.TreeCtrl(self.leftPanel, -1, wx.DefaultPosition, (-1, -1)) # , wx.TR_HAS_BUTTONS
self.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.onClickTree, self.tree)
leftPanelSizer.Add(self.tree, 1, wx.EXPAND | wx.ALL, 2)
self._init_tree_data()
def _init_rightWindow(self):
"""初始化右面板"""
rightPanelSizer = wx.BoxSizer(wx.VERTICAL)
self.rightPanel.SetSizer(rightPanelSizer)
self.py_editor = PythonEditor(self.rightPanel)
rightPanelSizer.Add(self.py_editor, 1, wx.EXPAND | wx.ALL, 2)
self._init_editor_data()
# self.code_editor = editor.Editor(self.rightPanel, -1, style=wx.SUNKEN_BORDER)
# rightPanelSizer.Add(self.code_editor, 1, wx.EXPAND | wx.ALL, 2)
def _init_editor_data(self):
"""初始化编辑器的数据"""
text = """\
def SayHi():
print("Hello World!")
"""
self.py_editor.SetText(text)
self.py_editor.EmptyUndoBuffer()
self.py_editor.Colourise(0, -1)
def _init_tree_data(self):
"""初始化树控件的数据"""
self.nodeRootName = "wxPython常用控件"
self.root = self.tree.AddRoot(self.nodeRootName)
for node in TREE_NODES:
temp_node = self.tree.AppendItem(self.root, node[0])
for _ in node[1]:
self.tree.AppendItem(temp_node, _)
self.tree.ExpandAll()
def onClickTree(self, e):
""""""
``` |
{
"source": "JIYANG-PLUS/ustjson",
"score": 3
} |
#### File: ustjson/CleanTools/_Delete.py
```python
__all__ = [
'delete_head_tail_space', # 删除首尾泛空字符,这通常是程序处理的第一步。
'common_clean_repeat_word' # 清楚重复字符。如:'我我我爱爱爱爱中国' 会清洗成 '我爱中国'。
]
import re
import warnings
from typing import List
class PunctuationError(Exception): pass
def delete_head_tail_space(content: str, other_punctuation: str='') ->List[str]:
"""删除首尾泛空字符。
content: 要处理的字符串列表。
other_punctuation: 其余非正规的泛空字符。
return: 处理后的字符串列表。
"""
delete_chars = ' \t\n\r\f\v' + other_punctuation
return [x.strip(delete_chars) for x in content if bool(x.strip(delete_chars))]
def clean_series_space(t_str: str, other_punctuation: str='', replace_char: str=' ') ->str:
"""将分割符统一化,连续的泛空将删减至单一空格,方便后面数据的分解与整合。
不严谨说明:这里的泛空字符可以不严格地理解为一切想缩减删除替换的特殊字符。
other_punctuation: 其他的泛空字符。
replace_char: 替换字符默认是空格,可自定义替换符。对全文进行替换的时候不建议更改默认值。
但可利用此接口实现部分特殊需求,如:替换连续重复单一值为一个值。
return: 清洗后的紧凑字符串。
"""
warn_info = """\
本函数和combine_series_single配合使用时,建议用clean_and_combine_series_single一个函数替代.\
""".strip(' ')
warnings.warn(warn_info, DeprecationWarning)
if not isinstance(other_punctuation, str):
raise PunctuationError('传入标点符号格式错误。')
flag = ' \t\n\r\f\v'+other_punctuation
t_str = t_str.strip(flag)
t_str_len, result_str = len( t_str ), list( t_str )
i = 0
while i < t_str_len-1:
if result_str[i] in flag and result_str[i+1] in flag:
result_str[i] = replace_char
result_str.pop( i+1 )
t_str_len -= 1
i -= 1
i += 1
return ''.join( result_str )
def _re_clean_repeat_word(t_str: str) ->str:
"""正则清理连续重复的字,效率不高。功能暂定,未使用。
t_str: 要清理的字符串。
return: 清理重复值之后的字符串。
"""
patt = r'(?P<repeat>\w)\1'
result_str, counts = re.subn(patt, repl=lambda x:x['repeat'], string=t_str)
while counts:
result_str, counts = re.subn(patt, repl=lambda x:x['repeat'], string=result_str)
return result_str
def common_clean_repeat_word(t_str: str) ->str:
"""常规清理连续重复的字。功能暂定,未使用。
t_str: 要清理的字符串。
return: 清理重复值之后的字符串。
"""
list_t_str = list(t_str)
for i in range(1,len(list_t_str))[::-1]:
if list_t_str[i] == list_t_str[i-1]:
list_t_str.pop(i)
return ''.join(list_t_str)
def delete_puredigit(lines: List[str]):
"""删除纯数字字符的行。
包括删除一行中只有数字和标点的行。(只有数字和标点的行极有可能是页码)
"""
``` |
{
"source": "JIYANG-PLUS/xml-to-chain-dict",
"score": 3
} |
#### File: xml-to-chain-dict/xmltocd/_collections.py
```python
from collections import OrderedDict
class ChainDict(OrderedDict):
cdata_key = 'text_'
def __init__(self):
super(ChainDict, self).__init__()
def __getattr__(self, name):
if not name.startswith('_'):
if 1 == len(self[name]) and self.cdata_key in self[name]:
return self[name][self.cdata_key]
else:
return self[name]
return super(ChainDict, self).__getattr__(name)
def __setattr__(self, name, value):
if name == self.cdata_key:
...
if not name.startswith('_'):
self[name] = value
else:
super(ChainDict, self).__setattr__(name, value)
``` |
{
"source": "JiyangZhang/seutil",
"score": 2
} |
#### File: seutil/seutil/GitHubUtils.py
```python
import math
import traceback
from datetime import datetime
from time import sleep
from typing import *
from github import Github, RateLimitExceededException
from github.GithubException import GithubException
from github.NamedUser import NamedUser
from github.Repository import Repository
from . import _config
from .LoggingUtils import LoggingUtils
from .BashUtils import BashUtils
class GitHubUtils:
logger = LoggingUtils.get_logger("GitHubUtils", LoggingUtils.DEBUG)
GITHUB_SEARCH_ITEMS_MAX = 1000
try:
DEFAULT_ACCESS_TOKEN = _config.get_config("github_access_token")
DEFAULT_GITHUB_OBJECT = Github(DEFAULT_ACCESS_TOKEN, per_page=100)
except:
DEFAULT_ACCESS_TOKEN = None
DEFAULT_GITHUB_OBJECT = None
logger.info("Fail to get github_access_token from config file. Using GitHubUtils APIs will require compulsory input access_token")
# end try
@classmethod
def get_github(cls, access_token: str = None) -> Github:
if access_token is None:
return cls.DEFAULT_GITHUB_OBJECT
else:
return Github(access_token)
class wait_rate_limit:
"""
Wait for rate limit of the github accessor. For use with "with".
Use the default github accessor if no argument is given.
"""
DEFAULT_GITHUB_OBJECT = None
logger = None
def __init__(self, github: Github = DEFAULT_GITHUB_OBJECT):
self.github = github
return
def __enter__(self):
if self.github is None:
self.github = self.DEFAULT_GITHUB_OBJECT
# end if
# Check rate limit
rate_limit_remain, rate_limit = self.github.rate_limiting
if rate_limit_remain <= 1:
self.logger.debug("Rate limit {} / {}".format(rate_limit_remain, rate_limit))
rate_limit_reset_time = datetime.fromtimestamp(self.github.rate_limiting_resettime)
rate_limit_wait_seconds = math.ceil((rate_limit_reset_time - datetime.now()).total_seconds()) + 1
if rate_limit_wait_seconds > 0:
self.logger.warning("Rate limit will recover at: {}, will wait for {} seconds.".format(rate_limit_reset_time, rate_limit_wait_seconds))
sleep(rate_limit_wait_seconds)
self.logger.warning("Rate limit recovered")
# end if
# end if
return self.github
def __exit__(self, type, value, tb):
return
# end class
wait_rate_limit.DEFAULT_GITHUB_OBJECT = DEFAULT_GITHUB_OBJECT
wait_rate_limit.logger = logger
T = TypeVar("T")
@classmethod
def ensure_github_api_call(cls, call: Callable[[Github], T], github: Github = DEFAULT_GITHUB_OBJECT, max_retry_times: int = float("inf")) -> T:
retry_times = 0
while True:
try:
with cls.wait_rate_limit(github) as g:
return call(g)
# end with
except (GithubException, RateLimitExceededException) as e:
if e.status == 422:
cls.logger.warning("Validation Error. Will not retry.")
raise
else:
cls.logger.warning("Unexpected exception during api call: {}".format(traceback.format_exc()))
retry_times += 1
if retry_times > max_retry_times:
cls.logger.warning("Exceeding max retry times {}".format(max_retry_times))
raise
# end if
retry_wait_time = min(retry_times * 30, 600)
cls.logger.warning("Will wait {} seconds before retry {}".format(retry_wait_time, retry_times))
sleep(retry_wait_time)
# end try
# end while
@classmethod
def search_repos(cls, q: str = "", sort: str = "stars", order: str = "desc",
is_allow_fork: bool = False,
max_num_repos: int = GITHUB_SEARCH_ITEMS_MAX,
github: Github = DEFAULT_GITHUB_OBJECT,
max_retry_times: int = float("inf"),
*_, **qualifiers) -> List[Repository]:
"""
Searches the repos by querying GitHub API v3.
:return: a list of full names of the repos match the query.
"""
cls.logger.debug("Search for repos with query {}, sort {}, order {}".format(q, sort, order))
repos = list()
num_repos = 0
repos_iterator = iter(github.search_repositories(q, sort, order, **qualifiers))
while True:
try:
repo = cls.ensure_github_api_call(lambda g: next(repos_iterator), github, max_retry_times)
# Check fork
if not is_allow_fork:
if repo.fork:
continue
# end if, if
repos.append(repo)
num_repos += 1
# Check number
if num_repos >= max_num_repos:
break
# end if
except StopIteration:
break
except:
cls.logger.warning("Unknown exception: {}".format(traceback.format_exc()))
cls.logger.warning("Returning partial results")
break
# end try except
# end while
if num_repos < max_num_repos:
cls.logger.info("Got {}/{} repos".format(num_repos, max_num_repos))
else:
cls.logger.info("Got {}/{} repos".format(num_repos, max_num_repos))
# end if
return repos
@classmethod
def search_users(cls, q: str = "", sort: str = "repositories", order: str = "desc",
max_num_users: int = GITHUB_SEARCH_ITEMS_MAX,
github: Github = DEFAULT_GITHUB_OBJECT,
max_retry_times: int = float("inf"),
*_, **qualifiers) -> List[NamedUser]:
"""
Searches the users by querying GitHub API v3.
:return: a list of usernames (login) of the users match the query.
"""
cls.logger.debug("Search for users with query {}, sort {}, order {}".format(q, sort, order))
users = list()
num_users = 0
users_iterator = iter(github.search_users(q, sort, order, **qualifiers))
while True:
try:
user = cls.ensure_github_api_call(lambda g: next(users_iterator), github, max_retry_times)
users.append(user)
num_users += 1
# Check number
if num_users >= max_num_users:
break
# end if
except StopIteration:
break
except:
cls.logger.warning("Unknown exception: {}".format(traceback.format_exc()))
cls.logger.warning("Returning partial results.")
break
# end try except
# end while
if num_users < max_num_users:
cls.logger.warning("Got {}/{} users".format(num_users, max_num_users))
else:
cls.logger.info("Got {}/{} users".format(num_users, max_num_users))
# end if
return users
@classmethod
def search_repos_of_language(cls, language: str, max_num_repos: int = float("inf"),
is_allow_fork: bool = False,
max_retry_times: int = float("inf"),
strategies: List[str] = None) -> List[Repository]:
"""
Searches for all the repos of the language.
:return: a list of full names of matching repos.
"""
if strategies is None:
strategies = ["search_repos", "search_users"]
# end if
# Check supported strategies
supported_strategies = ["search_repos", "search_users", "enum_users"]
for strategy in strategies:
assert strategy in supported_strategies, strategy
# end for
names_repos = dict()
try:
# Strategy 1: search repos (limited to 1000)
strategy = "search_repos"
if strategy in strategies:
cls.logger.info("Using strategy {}".format(strategy))
new_repos = cls.search_repos("language:{}".format(language), is_allow_fork=is_allow_fork, max_retry_times=max_retry_times, max_num_repos=max_num_repos)
for repo in new_repos:
names_repos[repo.full_name] = repo
# end for
cls.logger.warning("Progress {}/{} repos.".format(len(names_repos), max_num_repos))
if len(names_repos) >= max_num_repos:
return list(names_repos.values())
# end if
# end if
# Strategy 2: search users (~37000?)
strategy = "search_users"
if strategy in strategies:
cls.logger.info("Using strategy {}".format(strategy))
s_users = set()
# s_users = s_users.union([u.login for u in cls.search_users("language:{}".format(language), sort="repositories", max_retry_times=max_retry_times)])
s_users = s_users.union([u.login for u in cls.search_users("language:{}".format(language), sort="followers", max_retry_times=max_retry_times)])
# s_users = s_users.union([u.login for u in cls.search_users("language:{}".format(language), sort="joined", max_retry_times=max_retry_times)])
users_count = 0
total_users_count = len(s_users)
for user in s_users:
try:
new_repos = cls.search_repos("language:{} user:{}".format(language, user), is_allow_fork=is_allow_fork, max_retry_times=max_retry_times)
except GithubException as e:
cls.logger.warning("Cannot get the repos of user {}".format(user))
continue
# end try
for repo in new_repos:
names_repos[repo.full_name] = repo
# end for
users_count += 1
cls.logger.debug("Progress {}/{} repos, {}/{} users.".format(len(names_repos), max_num_repos, users_count, total_users_count))
if len(names_repos) >= max_num_repos:
return list(names_repos.values())
# end if
# end for
# end if
# Strategy 3: enum users (?)
strategy = "enum_users"
if strategy in strategies:
cls.logger.warning("Strategy {} is not implemented yet.".format(strategy))
cls.logger.warning("Nothing happens.")
# end if
except KeyboardInterrupt as e:
cls.logger.warning("Interrupted. Returning partial results.")
finally:
cls.logger.warning("Got {}/{} repos.".format(len(names_repos), max_num_repos))
return list(names_repos.values())
@classmethod
def is_url_valid_git_repo(cls, url: str) -> bool:
if BashUtils.run(f"git ls-remote {url}").return_code == 0:
return True
else:
return False
```
#### File: seutil/seutil/IOUtils.py
```python
from typing import *
from collections import defaultdict
from enum import Enum
import inspect
import json
import ijson
import os
from pathlib import Path
import pickle as pkl
import pydoc
import recordclass
import shutil
import subprocess
import typing_inspect
import yaml
from .BashUtils import BashUtils
class IOUtils:
"""
Utility functions for I/O.
"""
# ----------
# Directory operations
class cd:
"""
Change directory. Usage:
with IOUtils.cd(path):
<statements>
# end with
Using a string path is supported for backward compatibility.
Using pathlib.Path should be preferred.
"""
def __init__(self, path: Union[str, Path]):
if isinstance(path, str):
path = Path(path)
# end if
self.path = path # Path
self.old_path = Path.cwd() # Path
return
def __enter__(self):
os.chdir(self.path)
return
def __exit__(self, type, value, tb):
os.chdir(self.old_path)
return
# Deprecated
# Use pathlib.Path.is_dir() instead
@classmethod
def has_dir(cls, dirname) -> bool:
return os.path.isdir(dirname)
# Deprecated
# Use pathlib.Path.mkdir() instead
@classmethod
def mk_dir(cls, dirname, mode=0o777,
is_remove_if_exists: bool = False,
is_make_parent: bool = True):
"""
Makes the directory.
:param dirname: the name of the directory.
:param mode: mode of the directory.
:param is_remove_if_exists: if the directory with name already exists, whether to remove.
:param is_make_parent: if make parent directory if not exists.
"""
if cls.has_dir(dirname):
if is_remove_if_exists:
rm_cmd = "rm {} -rf".format(dirname)
subprocess.run(["bash", "-c", rm_cmd])
else:
return
# end if
parent_dir = os.path.dirname(dirname)
if not cls.has_dir(parent_dir):
if is_make_parent:
cls.mk_dir(parent_dir, mode, is_remove_if_exists=False, is_make_parent=True)
else:
raise FileNotFoundError("Path not found: {}".format(parent_dir))
# end if
os.mkdir(dirname, mode)
return
@classmethod
def rm_dir(cls, path: Path,
ignore_non_exist: bool = True,
force: bool = True,
):
"""
Removes the directory.
:param path: the name of the directory.
:param ignore_non_exist: ignores error if the directory does not exist.
:param force: force remove the directory even it's non-empty.
"""
if path.is_dir():
if force:
shutil.rmtree(path, ignore_errors=True)
else:
path.rmdir()
# end if
else:
if ignore_non_exist:
return
else:
raise FileNotFoundError("Trying to remove non-exist directory {}".format(path))
# end if
# end if
return
@classmethod
def rm(cls, path: Path,
ignore_non_exist: bool = True,
force: bool = True,
):
"""
Removes the file/dir.
:param path: the path to the file/dir to remove.
:param ignore_non_exist: ignores error if the file/dir does not exist.
:param force: force remove the file even it's protected / dir even it's non-empty.
"""
if path.exists():
if force:
BashUtils.run(f"rm -rf {path}")
else:
BashUtils.run(f"rm -r {path}")
# end if
else:
if ignore_non_exist:
return
else:
raise FileNotFoundError("Trying to remove non-exist file/dir {}".format(path))
# end if
# end if
return
# ----------
# File operations
class Format(Enum):
txt = 0, # Plain text format
pkl = 1, # Pickle format
jsonPretty = 2, # Json format, with pretty-printing
jsonNoSort = 3, # Json format, with pretty-printing, without sorting the keys in dictionary
json = 4, # Json format, without pretty-printing (eveything on one line)
jsonList = 5, # Json format, assuming a list structure and put each item on one line
txtList = 6, # Plain text format, dump/load as a list where each line is an element
@classmethod
def from_str(cls, string: str) -> "IOUtils.Format":
return {
"pkl": IOUtils.Format.pkl,
"json": IOUtils.Format.jsonPretty,
"json-nosort": IOUtils.Format.jsonNoSort,
"json_nosort": IOUtils.Format.jsonNoSort,
"json-min": IOUtils.Format.json,
"json_min": IOUtils.Format.json,
}.get(string, IOUtils.Format.txt)
def get_extension(self) -> str:
return {
IOUtils.Format.txt: "txt",
IOUtils.Format.pkl: "pkl",
IOUtils.Format.jsonPretty: "json",
IOUtils.Format.jsonNoSort: "json",
IOUtils.Format.json: "json",
IOUtils.Format.jsonList: "jsonl",
IOUtils.Format.txtList: "txt",
}.get(self, "unknown")
IO_FORMATS: Dict[Format, Dict] = defaultdict(lambda: {
"mode": "t",
"dumpf": (lambda obj, f: f.write(obj)),
"loadf": (lambda f: f.read())
})
IO_FORMATS[Format.pkl]["mode"] = "b"
IO_FORMATS[Format.pkl]["dumpf"] = lambda obj, f: pkl.dump(obj, f, protocol=pkl.HIGHEST_PROTOCOL)
IO_FORMATS[Format.pkl]["loadf"] = lambda f: pkl.load(f)
IO_FORMATS[Format.jsonPretty]["dumpf"] = lambda obj, f: json.dump(obj, f, indent=4, sort_keys=True)
IO_FORMATS[Format.jsonPretty]["loadf"] = lambda f: yaml.load(f, Loader=yaml.FullLoader) # allows some format errors (e.g., trailing commas)
IO_FORMATS[Format.jsonNoSort]["dumpf"] = lambda obj, f: json.dump(obj, f, indent=4)
IO_FORMATS[Format.jsonNoSort]["loadf"] = lambda f: yaml.load(f, Loader=yaml.FullLoader) # allows some format errors (e.g., trailing commas)
IO_FORMATS[Format.json]["dumpf"] = lambda obj, f: json.dump(obj, f, sort_keys=True)
IO_FORMATS[Format.json]["loadf"] = lambda f: json.load(f)
@classmethod
def dumpf_json_list(cls, obj, f):
for item in obj:
f.write(json.dumps(item) + "\n")
@classmethod
def loadf_json_list(cls, f) -> List:
obj = []
for line in f.readlines():
obj.append(json.loads(line))
return obj
IO_FORMATS[Format.jsonList]["dumpf"] = lambda obj, f: IOUtils.dumpf_json_list(obj, f)
IO_FORMATS[Format.jsonList]["loadf"] = lambda f: IOUtils.loadf_json_list(f)
@classmethod
def dumpf_txt_list(cls, obj, f):
for item in obj:
f.write(str(item) + "\n")
@classmethod
def loadf_txt_list(cls, f) -> List:
return f.read().splitlines()
IO_FORMATS[Format.txtList]["dumpf"] = lambda obj, f: IOUtils.dumpf_txt_list(obj, f)
IO_FORMATS[Format.txtList]["loadf"] = lambda f: IOUtils.loadf_txt_list(f)
@classmethod
def dump(cls, file_path: Union[str, Path], obj: object, fmt: Union[Format, str] = Format.jsonPretty):
if isinstance(file_path, str):
file_path = Path(file_path)
# end if
file_path.touch(exist_ok=True)
if isinstance(fmt, str): fmt = cls.Format.from_str(fmt)
conf = cls.IO_FORMATS[fmt]
with open(file_path, "w" + conf["mode"]) as f:
conf["dumpf"](obj, f)
return
@classmethod
def load(cls, file_path: Union[str, Path], fmt: Union[Format, str] = Format.jsonPretty) -> Any:
if isinstance(file_path, str):
file_path = Path(file_path)
# end if
if isinstance(fmt, str): fmt = cls.Format.from_str(fmt)
conf = cls.IO_FORMATS[fmt]
try:
with open(file_path, "r" + conf["mode"]) as f:
obj = conf["loadf"](f)
# end with
except FileNotFoundError as e:
raise FileNotFoundError(str(e) + " at {}".format(Path.cwd()))
# end try
return obj
@classmethod
def load_json_stream(cls, file_path: Union[str, Path], fmt: Union[Format, str] = Format.jsonPretty):
"""
Reads large json file containing a list of data iteratively. Returns a generator function.
"""
if isinstance(file_path, str):
file_path = Path(file_path)
# end if
if isinstance(fmt, str): fmt = cls.Format.from_str(fmt)
conf = cls.IO_FORMATS[fmt]
try:
with open(file_path, "r" + conf["mode"]) as f:
objects = ijson.items(f, "item")
for obj in objects:
yield obj
# end with
except FileNotFoundError as e:
raise FileNotFoundError(str(e) + " at {}".format(Path.cwd()))
# end try
@classmethod
def update_json(cls, file_name, data):
"""
Updates the json data file. The data should be dict like (support update).
"""
try:
orig_data = cls.load(file_name)
except:
orig_data = dict()
# end try
orig_data.update(data)
cls.dump(file_name, orig_data)
return orig_data
@classmethod
def extend_json(cls, file_name, data):
"""
Updates the json data file. The data should be list like (support extend).
"""
try:
orig_data = cls.load(file_name)
except:
orig_data = list()
# end try
orig_data.extend(data)
cls.dump(file_name, orig_data)
return orig_data
JSONFY_FUNC_NAME = "jsonfy"
DEJSONFY_FUNC_NAME = "dejsonfy"
JSONFY_ATTR_FIELD_NAME = "jsonfy_attr"
@classmethod
def jsonfy(cls, obj):
"""
Turns an object to a json-compatible data structure.
A json-compatible data can only have list, dict (with str keys), str, int and float.
Any object of other classes will be casted through (try each option in order, if applicable):
1. JSONFY function, which takes no argument and returns a json-compatible data;
should have the name {@link IOUtils#JSONFY_FUNC_NAME};
2. JSONFY_ATTR field, which is a dict of attribute name-type pairs, that will be extracted from the object to a dict;
should have the name {@link IOUtils#JSONFY_ATTR_FIELD_NAME};
3. cast to a string.
"""
if obj is None:
return None
elif isinstance(obj, (int, float, str, bool)):
# primitive types
return obj
elif isinstance(obj, (list, set, tuple)):
# array
return [cls.jsonfy(item) for item in obj]
elif isinstance(obj, dict):
# dict
return {k: cls.jsonfy(v) for k, v in obj.items()}
elif isinstance(obj, Enum):
# Enum
return obj.value
elif hasattr(obj, cls.JSONFY_FUNC_NAME):
# with jsonfy function
return getattr(obj, cls.JSONFY_FUNC_NAME)()
elif hasattr(obj, cls.JSONFY_ATTR_FIELD_NAME):
# with jsonfy_attr annotations
return {attr: cls.jsonfy(getattr(obj, attr)) for attr in getattr(obj, cls.JSONFY_ATTR_FIELD_NAME).keys()}
elif isinstance(obj, recordclass.mutabletuple):
# RecordClass
return {k: cls.jsonfy(v) for k, v in obj.__dict__.items()}
else:
# Last effort: toString
return repr(obj)
@classmethod
def dejsonfy(cls, data, clz=None):
"""
Turns a json-compatible data structure to an object of class {@code clz}.
If {@code clz} is not assigned, the data will be casted to dict or list if possible.
Otherwise the data will be casted to the object through (try each option in order, if applicable):
1. DEJSONFY function, which takes the data as argument and returns a object;
should have the name {@link IOUtils#DEJSONFY_FUNC_NAME};
2. JSONFY_ATTR field, which is a dict of attribute name-type pairs, that will be extracted from the object to a dict;
should have the name {@link IOUtils#JSONFY_ATTR_FIELD_NAME};
"""
if isinstance(clz, str):
clz = pydoc.locate(clz)
# end if
if data is None:
# None value
return None
elif clz is not None and typing_inspect.get_origin(clz) == list:
# List[XXX]
return [cls.dejsonfy(item, clz.__args__[0]) for item in data]
elif clz is not None and typing_inspect.get_origin(clz) == tuple:
# Tuple[XXX]
return tuple([cls.dejsonfy(item, clz.__args__[0]) for item in data])
elif clz is not None and typing_inspect.get_origin(clz) == set:
# Set[XXX]
return set([cls.dejsonfy(item, clz.__args__[0]) for item in data])
elif isinstance(data, list):
# array
return [cls.dejsonfy(item, clz) for item in data]
elif clz is not None and hasattr(clz, cls.DEJSONFY_FUNC_NAME):
# with dejsonfy function
return clz.dejsonfy(data)
elif clz is not None and hasattr(clz, cls.JSONFY_ATTR_FIELD_NAME):
# with jsonfy_attr annotations
obj = clz()
for attr, attr_clz in getattr(clz, cls.JSONFY_ATTR_FIELD_NAME).items():
if attr in data:
setattr(obj, attr, cls.dejsonfy(data[attr], attr_clz))
# end for, if
return obj
elif clz is not None and inspect.isclass(clz) and issubclass(clz, recordclass.mutabletuple):
# RecordClass
field_values = dict()
for f, t in get_type_hints(clz).items():
if f in data: field_values[f] = cls.dejsonfy(data.get(f), t)
# end for
return clz(**field_values)
elif clz is not None and inspect.isclass(clz) and issubclass(clz, Enum):
# Enum
return clz(data)
elif isinstance(data, dict):
# dict
return {k: cls.dejsonfy(v, clz) for k, v in data.items()}
else:
# primitive types / unresolvable things
if clz is not None:
try: return clz(data)
except: pass
# end if
return data
```
#### File: seutil/project/ProjectResults.py
```python
from pathlib import Path
from typing import *
from .. import IOUtils, Stream
class ProjectResults:
def __init__(self):
self.full_name: str = "UNKNOWN"
self.results_dir: Path = None
return
@classmethod
def from_base_results_dir(cls, base_results_dir: Path) -> List["ProjectResults"]:
full_names = Stream.of_dirs(base_results_dir)
return [cls.get_project_results(n, base_results_dir/n) for n in full_names]
@classmethod
def get_project_results(cls, full_name: str, results_dir: Path) -> "ProjectResults":
results = cls()
results.full_name = full_name
results.results_dir = results_dir
return results
@property
def meta_dir(self) -> Path:
meta_dir: Path = self.results_dir / "META"
meta_dir.mkdir(parents=True, exist_ok=True)
return meta_dir
def load_meta_result(self, file_name: str, fmt: str = "json") -> Any:
return IOUtils.load(self.meta_dir / file_name, fmt)
def dump_meta_result(self, file_name: str, data: Any, fmt: str = "json") -> None:
IOUtils.dump(self.meta_dir / file_name, data, fmt)
return
def get_revision_dir(self, revision: str) -> Path:
revision_dir = self.results_dir / revision
revision_dir.mkdir(parents=True, exist_ok=True)
return revision_dir
def load_revision_result(self, revision: str, file_name: str, fmt: str = "json") -> Any:
return IOUtils.load(self.get_revision_dir(revision) / file_name, fmt)
def dump_revision_result(self, revision: str, file_name: str, data: Any, fmt: str = "json") -> None:
IOUtils.dump(self.get_revision_dir(revision) / file_name, data, fmt)
return
```
#### File: seutil/seutil/Stream.py
```python
from pathlib import Path
import numpy as np
import random
import subprocess
from typing import *
from .IOUtils import IOUtils
class Stream:
"""
Streams help manipulate sequences of objects.
"""
def __init__(self):
self.items = list()
return
@classmethod
def of(cls, one_or_more_items):
"""
Get a new stream from the item / items.
:param one_or_more_items: is converted to list with builtin `list` function.
"""
stream = Stream()
if one_or_more_items is not None:
stream.items = list(one_or_more_items)
# end if, if
return stream
@classmethod
def of_files(cls, dir_path: Union[str, Path]):
"""
Get a stream of the files under the directory.
"""
with IOUtils.cd(dir_path):
cmd_find = "find -mindepth 1 -maxdepth 1 -type f"
files = subprocess.run(["bash","-c",cmd_find], stdout=subprocess.PIPE).stdout.decode("utf-8").split("\n")[:-1]
# end with
files = [file[2:] for file in files]
stream = cls.of(files)
stream.sorted()
return stream
@classmethod
def of_dirs(cls, dir_path: Union[str, Path]):
"""
Get a stream of the sub-directories under the directory.
"""
with IOUtils.cd(dir_path):
cmd_find = "find -mindepth 1 -maxdepth 1 -type d"
dirs = subprocess.run(["bash","-c",cmd_find], stdout=subprocess.PIPE).stdout.decode("utf-8").split("\n")[:-1]
# end with
dirs = [dir[2:] for dir in dirs]
stream = cls.of(dirs)
stream.sorted()
return stream
def filter(self, predicate_func: Callable[[object], bool]):
"""
Returns a stream consisting of the elements of this stream that match the given predicate.
"""
return Stream.of(item for item in self.items if predicate_func(item))
def count(self):
return sum(self.items)
def reduce(self, count_func: Callable[[str], float] = lambda x: 1):
return sum([count_func(f) for f in self.items])
def sorted(self, key: Callable[[str], object] = lambda f: f,
reverse: bool = False):
"""
Sorts the list of files in the dataset.
"""
list.sort(self.items, key=key, reverse=reverse)
return self
def map(self, map_func: Callable[[str], object],
errors: str = "raise", default: object = ""):
def new_items_generator():
for item in self.items:
try:
new_item = map_func(item)
except:
if errors == "ignore":
yield default
else:
raise
else:
yield new_item
# end for
# end def
return Stream.of(new_items_generator())
def peak(self, peak_func: Callable[[str], None],
errors: str = "ignore"):
for item in self.items:
try:
peak_func(item)
except:
if errors == "ignore":
continue
else:
raise
# end for
return self
def split(self, fraction_list: List[float],
count_func: Callable[[str], float] = lambda x: 1):
"""
Splits the dataset as each part specified by the fractions (assumed to sum up to 1).
Splitting is done by finding the cutting points. If randomization is needed, call shuffle first.
:param count_func: customize the number of data counts in each file.
"""
if self.is_empty():
return tuple(Stream() for i in range(len(fraction_list)))
count_list = [count_func(f) for f in self.items]
cum_count_list = np.cumsum(count_list)
cum_expected_count_list = [f * cum_count_list[-1] for f in np.cumsum(fraction_list)]
cut_index_list = []
last_i = 0
for i, cum_count in enumerate(cum_count_list):
if cum_count >= cum_expected_count_list[len(cut_index_list)]:
last_i = i+1
cut_index_list.append(i+1)
if len(cut_index_list) >= len(cum_expected_count_list):
break
# end if
# end for if
if last_i != len(cum_count_list):
cut_index_list.append(len(cum_count_list))
# end if
cut_index_list.insert(0,0)
return tuple(Stream.of(self.items[cut_index_list[i]:cut_index_list[i + 1]]) for i in range(len(cut_index_list) - 1))
def shuffle(self, seed=None):
"""
Shuffles the list of files in the dataset.
"""
random.seed(seed)
random.shuffle(self.items)
return self
def get(self, index: int):
return self.items[index]
def is_empty(self):
return len(self.items) == 0
def __getitem__(self, item):
new_items = self.items.__getitem__(item)
if not isinstance(item, slice):
new_items = [new_items]
return Stream.of(new_items)
def __setitem__(self, key, value):
return self.items.__setitem__(key, value)
def __delitem__(self, key):
return self.items.__delitem__(key)
def __iter__(self):
return self.items.__iter__()
def __len__(self):
return self.items.__len__()
def __str__(self):
return "Stream with {} items".format(len(self.items))
def __repr__(self):
return self.__str__()
def __add__(self, other):
if isinstance(other, Stream):
return Stream.of(self.items+other.items)
else:
raise NotImplementedError
``` |
{
"source": "Jiyao17/fledge",
"score": 3
} |
#### File: src/baseline/text_classification.py
```python
import torch
from torchtext.datasets import AG_NEWS
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator
tokenizer = get_tokenizer('basic_english')
train_iter = AG_NEWS(split='train')
def yield_tokens(data_iter):
for _, text in data_iter:
yield tokenizer(text)
vocab = build_vocab_from_iterator(yield_tokens(train_iter), specials=["<unk>"])
vocab.set_default_index(vocab["<unk>"])
text_pipeline = lambda x: vocab(tokenizer(x))
label_pipeline = lambda x: int(x) - 1
from torch.utils.data import DataLoader
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def collate_batch(batch):
label_list, text_list, offsets = [], [], [0]
for (_label, _text) in batch:
label_list.append(label_pipeline(_label))
processed_text = torch.tensor(text_pipeline(_text), dtype=torch.int64)
text_list.append(processed_text)
offsets.append(processed_text.size(0))
label_list = torch.tensor(label_list, dtype=torch.int64)
offsets = torch.tensor(offsets[:-1]).cumsum(dim=0)
text_list = torch.cat(text_list)
return label_list.to(device), text_list.to(device), offsets.to(device)
train_iter = AG_NEWS(split='train')
dataloader = DataLoader(train_iter, batch_size=8, shuffle=False, collate_fn=collate_batch)
from torch import nn
class TextClassificationModel(nn.Module):
def __init__(self, vocab_size, embed_dim, num_class):
super(TextClassificationModel, self).__init__()
self.embedding = nn.EmbeddingBag(vocab_size, embed_dim, sparse=True)
self.fc = nn.Linear(embed_dim, num_class)
self.init_weights()
def init_weights(self):
initrange = 0.5
self.embedding.weight.data.uniform_(-initrange, initrange)
self.fc.weight.data.uniform_(-initrange, initrange)
self.fc.bias.data.zero_()
def forward(self, text, offsets):
embedded = self.embedding(text, offsets)
return self.fc(embedded)
train_iter = AG_NEWS(split='train')
num_class = len(set([label for (label, text) in train_iter]))
vocab_size = len(vocab)
emsize = 64
model = TextClassificationModel(vocab_size, emsize, num_class).to(device)
# print(vocab_size)
# print(num_class)
# exit()
import time
def train(dataloader):
model.train()
total_acc, total_count = 0, 0
log_interval = 500
start_time = time.time()
for idx, (label, text, offsets) in enumerate(dataloader):
optimizer.zero_grad()
predicted_label = model(text, offsets)
loss = criterion(predicted_label, label)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)
optimizer.step()
total_acc += (predicted_label.argmax(1) == label).sum().item()
total_count += label.size(0)
if idx % log_interval == 0 and idx > 0:
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d}/{:5d} batches '
'| accuracy {:8.3f}'.format(epoch, idx, len(dataloader),
total_acc/total_count))
total_acc, total_count = 0, 0
start_time = time.time()
def evaluate(dataloader):
model.eval()
total_acc, total_count = 0, 0
with torch.no_grad():
for idx, (label, text, offsets) in enumerate(dataloader):
predicted_label = model(text, offsets)
loss = criterion(predicted_label, label)
total_acc += (predicted_label.argmax(1) == label).sum().item()
total_count += label.size(0)
return total_acc/total_count
from torch.utils.data.dataset import random_split
from torchtext.data.functional import to_map_style_dataset
# Hyperparameters
EPOCHS = 10 # epoch
LR = 5 # learning rate
BATCH_SIZE = 64 # batch size for training
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=LR)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.1)
total_accu = None
train_iter, test_iter = AG_NEWS()
train_dataset = to_map_style_dataset(train_iter)
test_dataset = to_map_style_dataset(test_iter)
num_train = int(len(train_dataset) * 0.95)
split_train_, split_valid_ = \
random_split(train_dataset, [num_train, len(train_dataset) - num_train])
train_dataloader = DataLoader(split_train_, batch_size=BATCH_SIZE,
shuffle=True, collate_fn=collate_batch)
valid_dataloader = DataLoader(split_valid_, batch_size=BATCH_SIZE,
shuffle=True, collate_fn=collate_batch)
test_dataloader = DataLoader(test_dataset, batch_size=BATCH_SIZE,
shuffle=True, collate_fn=collate_batch)
for epoch in range(1, EPOCHS + 1):
epoch_start_time = time.time()
train(train_dataloader)
# accu_val = evaluate(valid_dataloader)
# if total_accu is not None and total_accu > accu_val:
# scheduler.step()
# else:
# total_accu = accu_val
print('-' * 59)
# print('| end of epoch {:3d} | time: {:5.2f}s | '
# 'valid accuracy {:8.3f} '.format(epoch,
# time.time() - epoch_start_time,
# accu_val))
print('| end of epoch {:3d} | time: {:5.2f}s | '.format(epoch,
time.time() - epoch_start_time,
))
print('-' * 59)
print('Checking the results of test dataset.')
accu_test = evaluate(test_dataloader)
print('test accuracy {:8.3f}'.format(accu_test))
```
#### File: fledge/src/FD_AG_NEWS.py
```python
import torch
from torchtext.datasets import AG_NEWS
from torch.utils.data import DataLoader
from torch import nn
import time
from multiprocessing.context import Process
from multiprocessing import Process, Queue, set_start_method
from utils.model import AG_NEWS_TEXT
from utils.funcs import get_argument_parser, check_device, get_partitioned_datasets, get_test_dataset
class TextClassificationModel(nn.Module):
def __init__(self, vocab_size, embed_dim, num_class):
super(TextClassificationModel, self).__init__()
self.embedding = nn.EmbeddingBag(vocab_size, embed_dim, sparse=True)
self.fc = nn.Linear(embed_dim, num_class)
self.init_weights()
def init_weights(self):
initrange = 0.5
self.embedding.weight.data.uniform_(-initrange, initrange)
self.fc.weight.data.uniform_(-initrange, initrange)
self.fc.bias.data.zero_()
def forward(self, text, offsets):
embedded = self.embedding(text, offsets)
return self.fc(embedded)
class ClientAGNEWS:
def __init__(self, dataset, testset, l_epoch, batch_size, lr) -> None:
self.batch_size = batch_size
self.lr = lr
self.l_epoch = l_epoch
self.test_dataloader = DataLoader(testset, batch_size=self.batch_size,
shuffle=True, collate_fn=self.collate_batch)
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator
self.tokenizer = get_tokenizer('basic_english')
self.train_iter = AG_NEWS(split='train')
self.vocab = build_vocab_from_iterator(self.yield_tokens(self.train_iter), specials=["<unk>"])
self.vocab.set_default_index(self.vocab["<unk>"])
self.text_pipeline = lambda x: self.vocab(self.tokenizer(x))
self.label_pipeline = lambda x: int(x) - 1
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# self.dataloader = None
# train_iter = AG_NEWS(split='train')
self.dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False, collate_fn=self.collate_batch)
train_iter = AG_NEWS(split='train')
self.num_class = len(set([label for (label, text) in train_iter]))
self.vocab_size = len(self.vocab)
self.emsize = 64
self.model = AG_NEWS_TEXT(self.vocab_size, self.emsize, self.num_class).to(self.device)
self.criterion = torch.nn.CrossEntropyLoss()
self.optimizer = torch.optim.SGD(self.model.parameters(), lr=lr)
self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, 1.0, gamma=0.1)
def yield_tokens(self, data_iter):
for _, text in data_iter:
yield self.tokenizer(text)
def collate_batch(self, batch):
label_list, text_list, offsets = [], [], [0]
for (_label, _text) in batch:
label_list.append(self.label_pipeline(_label))
processed_text = torch.tensor(self.text_pipeline(_text), dtype=torch.int64)
text_list.append(processed_text)
offsets.append(processed_text.size(0))
label_list = torch.tensor(label_list, dtype=torch.int64)
offsets = torch.tensor(offsets[:-1]).cumsum(dim=0)
text_list = torch.cat(text_list)
return label_list.to(self.device), text_list.to(self.device), offsets.to(self.device)
def train(self):
self.model.train()
total_acc, total_count = 0, 0
for idx, (label, text, offsets) in enumerate(self.dataloader):
self.optimizer.zero_grad()
predicted_label = self.model(text, offsets)
loss = self.criterion(predicted_label, label)
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.1)
self.optimizer.step()
total_acc += (predicted_label.argmax(1) == label).sum().item()
total_count += label.size(0)
return total_acc/total_count
# if idx % log_interval == 0 and idx > 0:
# elapsed = time.time() - start_time
# print('| epoch {:3d} | {:5d}/{:5d} batches '
# '| accuracy {:8.3f}'.format(epoch, idx, len(dataloader),
# total_acc/total_count))
# total_acc, total_count = 0, 0
# start_time = time.time()
def evaluate(self):
self.model.eval()
total_acc, total_count = 0, 0
with torch.no_grad():
for idx, (label, text, offsets) in enumerate(self.test_dataloader):
predicted_label = self.model(text, offsets)
loss = self.criterion(predicted_label, label)
total_acc += (predicted_label.argmax(1) == label).sum().item()
total_count += label.size(0)
return total_acc/total_count
def run(self):
# import time
# from torch.utils.data.dataset import random_split
# from torchtext.data.functional import to_map_style_dataset
# train_iter, test_iter = AG_NEWS()
# train_dataset = to_map_style_dataset(train_iter)
# test_dataset = to_map_style_dataset(test_iter)
# train_dataloader = DataLoader(self.dataloader, batch_size=self.batch_size,
# shuffle=True, collate_fn=self.model.collate_batch)
# test_dataloader = DataLoader(self.testset, batch_size=self.batch_size,
# shuffle=True, collate_fn=self.model.collate_batch)
for epoch in range(self.l_epoch):
# epoch_start_time = time.time()
self.train()
# accu_val = evaluate(valid_dataloader)
# if total_accu is not None and total_accu > accu_val:
# scheduler.step()
# else:
# total_accu = accu_val
# print('-' * 59)
# print('| end of epoch {:3d} | time: {:5.2f}s | '
# 'valid accuracy {:8.3f} '.format(epoch,
# time.time() - epoch_start_time,
# accu_val))
# print('| end of epoch {:3d} | time: {:5.2f}s | '.format(epoch,
# time.time() - epoch_start_time,
# ))
# print('-' * 59)
# print('Checking the results of test dataset.')
# accu_test = self.evaluate(test_dataloader)
# print('test accuracy {:8.3f}'.format(accu_test))
def run_sim(que: Queue, progress_file: str, task, g_epoch_num, client_num, l_data_num, l_epoch_num, l_batch_size, l_lr, data_path, device, verbosity):
# partition data
datasets = get_partitioned_datasets(task, client_num, l_data_num, l_batch_size, data_path)
test_dataset = get_test_dataset(task, data_path)
clients = [ClientAGNEWS(datasets[i], test_dataset, l_epoch_num, l_batch_size, l_lr)
for i in range(client_num)]
result: list[float] = []
for i in range(g_epoch_num):
clients[0].run()
g_accuracy = clients[0].evaluate()
if verbosity >= 1:
pf = open(progress_file, "a")
print(f"Global accuracy:{g_accuracy*100:.9f}%")
pf.write(f"Epoch {i}: {g_accuracy*100:.2f}%\n")
# if i % 10 == 9:
pf.flush()
pf.close()
# print(f"Local accuracy after training: {[acc for acc in l_accuracy]}")
if i % 10 == 9:
result.append(g_accuracy)
que.put(result)
if __name__ == "__main__":
ap = get_argument_parser()
args = ap.parse_args()
TASK: str = args.task # limited: FashionMNIST/SpeechCommand/
# global parameters
G_EPOCH_NUM: int = args.g_epoch_num
# local parameters
CLIENT_NUM: int = args.client_num
L_DATA_NUM: int = args.l_data_num
L_EPOCH_NUM: int = args.l_epoch_num
L_BATCH_SIZE: int = args.l_batch_size
L_LR: float = args.l_lr
# shared settings
DATA_PATH: str = args.datapath
DEVICE: str = torch.device(args.device)
RESULT_FILE: str = args.result_file
VERBOSITY: int = args.verbosity
RUN_NUM: int = args.run_num
PROGRESS_FILE: str = args.progress_file
if VERBOSITY >= 2:
print("Input args: %s %d %d %d %d %d %f %s %s %s" %
(TASK, G_EPOCH_NUM, CLIENT_NUM, L_DATA_NUM, L_EPOCH_NUM, L_BATCH_SIZE, L_LR, DATA_PATH, DEVICE, RESULT_FILE)
)
# input check
SUPPORTED_TASKS = ["FashionMNIST", "SpeechCommand", "AG_NEWS"]
if TASK not in SUPPORTED_TASKS:
raise "Task not supported!"
if check_device(DEVICE) == False:
raise "CUDA required by input but not equipped!"
# run_sim(Queue(), "progress.txt", TASK, G_EPOCH_NUM, CLIENT_NUM, L_DATA_NUM, L_EPOCH_NUM, L_BATCH_SIZE, L_LR, DATA_PATH, DEVICE, VERBOSITY)
# exit()
set_start_method("spawn")
que = Queue()
procs: 'list[Process]' = []
for i in range(RUN_NUM):
proc = Process(
target=run_sim,
args=(que, PROGRESS_FILE, TASK, G_EPOCH_NUM, CLIENT_NUM, L_DATA_NUM, L_EPOCH_NUM, L_BATCH_SIZE, L_LR, DATA_PATH, DEVICE, VERBOSITY)
)
proc.start()
procs.append(proc)
for proc in procs:
proc.join()
with open(RESULT_FILE, "a") as f:
args = "{:12} {:11} {:10} {:10} {:11} {:12} {:4}".format(
TASK, G_EPOCH_NUM, CLIENT_NUM, L_DATA_NUM, L_EPOCH_NUM, L_BATCH_SIZE, L_LR
)
f.write(
"TASK G_EPOCH_NUM CLIENT_NUM L_DATA_NUM L_EPOCH_NUM L_BATCH_SIZE L_LR\n" +
args + "\n"
)
for i in range(RUN_NUM):
result = que.get(block=True)
print(result)
[f.write(f"{num*100:.2f}% ") for num in result]
f.write("\n")
f.flush()
f.write("\n")
```
#### File: src/utils/audio.py
```python
import os
from torchaudio.datasets import SPEECHCOMMANDS
import torch
class SubsetSC(SPEECHCOMMANDS):
def __init__(self, subset, data_path):
super().__init__(root=data_path, download=True)
def load_list(filename):
filepath = os.path.join(self._path, filename)
with open(filepath) as fileobj:
return [os.path.join(self._path, line.strip()) for line in fileobj]
if subset == "validation":
self._walker = load_list("validation_list.txt")
elif subset == "testing":
self._walker = load_list("testing_list.txt")
elif subset == "training":
excludes = load_list("validation_list.txt") + load_list("testing_list.txt")
excludes = set(excludes)
self._walker = [w for w in self._walker if w not in excludes]
def set_LABELS(labels):
global LABELS
LABELS = labels
def label_to_index(word):
# Return the position of the word in labels
return torch.tensor(LABELS.index(word))
def index_to_label(index):
# Return the word corresponding to the index in labels
# This is the inverse of label_to_index
return LABELS[index]
def pad_sequence(batch):
# Make all tensor in a batch the same length by padding with zeros
batch = [item.t() for item in batch]
batch = torch.nn.utils.rnn.pad_sequence(batch, batch_first=True, padding_value=0.)
return batch.permute(0, 2, 1)
def collate_fn(batch):
# A data tuple has the form:
# waveform, sample_rate, label, speaker_id, utterance_number
tensors, targets = [], []
# Gather in lists, and encode labels as indices
for waveform, _, label, *_ in batch:
tensors += [waveform]
targets += [label_to_index(label)]
# Group the list of tensors into a batched tensor
tensors = pad_sequence(tensors)
targets = torch.stack(targets)
return tensors, targets
def number_of_correct(pred, target):
# count number of correct predictions
return pred.squeeze().eq(target).sum().item()
def get_likely_index(tensor):
# find most likely label index for each element in the batch
return tensor.argmax(dim=-1)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
```
#### File: src/utils/client.py
```python
import torch
from torch.optim.lr_scheduler import StepLR
from torch import nn, optim
from torch.optim import Optimizer
from torch.nn.modules import loss
from torch.utils.data import Dataset, DataLoader
from torchaudio.transforms import Resample
from torch.nn.modules.loss import CrossEntropyLoss
import torchaudio
import torch.nn.functional as F
import copy
from torchtext.datasets import AG_NEWS
from utils.model import FashionMNIST_CNN, SpeechCommand_M5, AG_NEWS_TEXT
from utils.audio import collate_fn, set_LABELS
from utils.funcs import get_test_dataset
class Client():
def __init__(self,
task: str,
train_dataset: Dataset,
epoch_num: int=5,
batch_size: int=256,
lr: int=0.01,
device: str="cpu"
):
self.task = task
self.train_dataset = train_dataset
self.epoch_num = epoch_num
self.batch_size=batch_size
self.lr=lr
self.device = device
# set by self.init_task()
self.train_dataloader: DataLoader = None
self.transform: nn.Module = None
self.loss_fn = None
self.optimizer: Optimizer = None
self.scheduler = None
self.model: nn.Module = None
self.init_task()
def init_task(self) -> nn.Module:
if self.task == "FashionMNIST":
self._init_FashionMNIST()
elif self.task == "SpeechCommand":
self._init_SpeechCommand()
elif self.task == "AG_NEWS":
self._init_AG_NEWS()
else:
raise "Unsupported task."
def train_model(self):
self.model = self.model.to(self.device)
self.model.train()
for i in range(self.epoch_num):
if self.task == "FashionMNIST":
self._train_FashionMNIST()
elif self.task == "SpeechCommand":
self._train_SpeechCommand()
self.scheduler.step()
elif self.task == "AG_NEWS":
acc = self._train_AG_NEWS_1()
# self.scheduler.step()
# return acc
else:
raise "Unsupported task."
def _init_FashionMNIST(self):
self.train_dataloader = DataLoader(
self.train_dataset,
batch_size=self.batch_size,
shuffle=True,
drop_last=True
)
self.loss_fn = CrossEntropyLoss()
self.model = FashionMNIST_CNN()
self.optimizer = optim.SGD(self.model.parameters(), lr=self.lr)
self.scheduler = None
def _init_SpeechCommand(self):
if self.device == "cuda":
num_workers = 1
pin_memory = True
else:
num_workers = 0
pin_memory = False
self.dataloader = DataLoader(
self.train_dataset,
batch_size=self.batch_size,
shuffle=True,
collate_fn=collate_fn,
num_workers=num_workers,
pin_memory=pin_memory,
drop_last=True
)
waveform, sample_rate, label, speaker_id, utterance_number = self.train_dataset[0]
labels = sorted(list(set(datapoint[2] for datapoint in self.train_dataset)))
set_LABELS(labels)
new_sample_rate = 8000
transform = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=new_sample_rate)
transformed: Resample = transform(waveform)
self.transform = transform.to(self.device)
self.loss_fn = F.nll_loss
self.model = SpeechCommand_M5(
n_input=transformed.shape[0],
n_output=len(labels)
)
self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr, weight_decay=0.0001)
self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, step_size=20, gamma=0.5) # reduce the learning after 20 epochs by a factor of 10
def _init_AG_NEWS(self):
from test import ClientAGNEWS
self.agent = ClientAGNEWS(self.train_dataset, self.batch_size, self.lr)
self.model = self.agent.model
def _train_FashionMNIST(self):
for batch, (X, y) in enumerate(self.train_dataloader):
# Compute prediction and loss
pred = self.model(X.cuda())
loss = self.loss_fn(pred, y.cuda())
# Backpropagation
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def _train_SpeechCommand(self):
self.transform = self.transform.to(self.device)
for batch_idx, (data, target) in enumerate(self.dataloader):
data = data.to(self.device)
target = target.to(self.device)
# apply transform and model on whole batch directly on device
data = self.transform(data)
output = self.model(data)
# negative log-likelihood for a tensor of size (batch x 1 x n_output)
loss = self.loss_fn(output.squeeze(), target)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def _train_AG_NEWS(self):
self.model.train()
for idx, (label, text, offsets) in enumerate(self.train_dataloader, 0):
predicted_label = self.model(text.cuda(), offsets.cuda())
loss = self.loss_fn(predicted_label, label.cuda())
self.optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.1)
self.optimizer.step()
def _train_AG_NEWS_1(self):
self.agent.run(10, 64)
def test_model(self) -> float:
# functionality of testing local model is not guaranteed yet
self.model = self.model.to(self.device)
self.model.eval()
if self.task == "FashionMNIST":
accuracy = self._test_FashionMNIST()
if self.task == "SpeechCommand":
accuracy = self._test_SpeechCommand()
if self.task == "AG_NEWS":
accuracy = self._test_AG_NEWS()
return accuracy
def _test_FashionMNIST(self):
test_dataset = get_test_dataset("FashionMNIST", "~/projects/fledge/data")
self.test_dataloader = DataLoader(test_dataset, batch_size=64, drop_last=True)
size = len(self.test_dataloader.dataset)
test_loss, correct = 0, 0
for X, y in self.test_dataloader:
pred = self.model(X.to(self.device))
# test_loss += loss_fn(pred, y.to(self.device)).item()
correct += (pred.argmax(1) == y.to(self.device)).type(torch.float).sum().item()
correct /= size
return correct
def _test_SpeechCommand(self):
# dataset_size = len(self.test_dataloader.dataset)
correct = 0
# for data, target in self.test_dataloader:
# data = data.to(self.device)
# target = target.to(self.device)
# # apply transform and model on whole batch directly on device
# data = self.transform(data)
# output = self.model(data)
# pred = get_likely_index(output)
# # pred = output.argmax(dim=-1)
# correct += number_of_correct(pred, target)
# return 1.0 * correct / dataset_size
def _test_AG_NEWS(self):
self.model.eval()
total_acc, total_count = 0, 0
with torch.no_grad():
for idx, (label, text, offsets) in enumerate(self.test_dataloader, 0):
predicted_label = self.model(text, offsets)
# loss = self.loss_fn(predicted_label, label)
total_acc += (predicted_label.argmax(1) == label).sum().item()
total_count += label.size(0)
return total_acc/total_count
``` |
{
"source": "Jiyao17/fl-opt",
"score": 3
} |
#### File: fl-opt/utils_backup/models.py
```python
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.modules.flatten import Flatten
# Code for CIFAR ResNet is modified from https://github.com/itchencheng/pytorch-residual-networks
class FashionMNIST(nn.Module):
def __init__(self):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(1, 12, kernel_size=5),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Conv2d(12, 24, 5),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Flatten(),
nn.Linear(24*4*4, 50),
nn.ReLU(),
nn.Linear(50, 10),
nn.ReLU(),
)
def forward(self, x):
x = self.net(x)
return x
class SpeechCommand_Simplified(nn.Module):
def __init__(self, n_input=1, n_output=35, stride=16, n_channel=32):
super().__init__()
self.net = nn.Sequential(
# 1*8000
nn.Conv1d(n_input, n_channel, kernel_size=80, stride=stride),
# 32*496
nn.BatchNorm1d(n_channel),
nn.ReLU(),
nn.MaxPool1d(kernel_size=4, stride=1),
# 32*493
nn.Conv1d(n_channel, n_channel//2, kernel_size=3),
# 16*491
nn.BatchNorm1d(n_channel//2),
nn.ReLU(),
nn.MaxPool1d(kernel_size=4, stride=1),
# 16*488
nn.Conv1d(n_channel//2, n_channel//2, kernel_size=3),
# 16*486
nn.BatchNorm1d(n_channel//2),
nn.ReLU(),
nn.MaxPool1d(kernel_size=4, stride=1),
# 16*483
nn.Flatten(),
nn.Linear(16*483, 512),
nn.Linear(512, n_output),
nn.LogSoftmax(dim=1)
)
def forward(self, x):
x = self.net(x)
return x
class SpeechCommand(nn.Module):
def __init__(self, n_input=1, n_output=35, stride=16, n_channel=32):
super().__init__()
self.conv1 = nn.Conv1d(n_input, n_channel, kernel_size=80, stride=stride)
self.bn1 = nn.BatchNorm1d(n_channel)
self.pool1 = nn.MaxPool1d(4)
self.conv2 = nn.Conv1d(n_channel, n_channel, kernel_size=3)
self.bn2 = nn.BatchNorm1d(n_channel)
self.pool2 = nn.MaxPool1d(4)
self.conv3 = nn.Conv1d(n_channel, 2 * n_channel, kernel_size=3)
self.bn3 = nn.BatchNorm1d(2 * n_channel)
self.pool3 = nn.MaxPool1d(4)
self.conv4 = nn.Conv1d(2 * n_channel, 2 * n_channel, kernel_size=3)
self.bn4 = nn.BatchNorm1d(2 * n_channel)
self.pool4 = nn.MaxPool1d(4)
self.fc1 = nn.Linear(2 * n_channel, n_output)
def forward(self, x):
x = self.conv1(x)
x = F.relu(self.bn1(x))
x = self.pool1(x)
x = self.conv2(x)
x = F.relu(self.bn2(x))
x = self.pool2(x)
x = self.conv3(x)
x = F.relu(self.bn3(x))
x = self.pool3(x)
x = self.conv4(x)
x = F.relu(self.bn4(x))
x = self.pool4(x)
x = F.avg_pool1d(x, x.shape[-1])
x = x.permute(0, 2, 1)
x = self.fc1(x)
return F.log_softmax(x, dim=2)
class AGNEWS(nn.Module):
def __init__(self, vocab_size = 95811, embed_dim = 64, num_class = 4):
super().__init__()
self.embedding = nn.EmbeddingBag(vocab_size, embed_dim, sparse=True)
self.fc = nn.Linear(embed_dim, num_class)
self.init_weights()
def init_weights(self):
initrange = 0.5
self.embedding.weight.data.uniform_(-initrange, initrange)
self.fc.weight.data.uniform_(-initrange, initrange)
self.fc.bias.data.zero_()
def forward(self, text, offsets):
embedded = self.embedding(text, offsets)
return self.fc(embedded)
class CIFAR_CNN(nn.Module):
def __init__(self):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(3, 32, 3, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Conv2d(32, 128, 3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Conv2d(128, 256, 3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Dropout(0.1),
nn.Flatten(),
nn.Linear(256*4*4, 256),
nn.ReLU(),
nn.Linear(256, 10),
nn.ReLU(),
)
# self.conv1 = nn.Conv2d(3, 6, 5)
# self.pool = nn.MaxPool2d(2, 2)
# self.conv2 = nn.Conv2d(6, 16, 5)
# self.fc1 = nn.Linear(16 * 5 * 5, 120)
# self.fc2 = nn.Linear(120, 84)
# self.fc3 = nn.Linear(84, 10)
def forward(self, x):
# x = self.pool(F.relu(self.conv1(x)))
# x = self.pool(F.relu(self.conv2(x)))
# x = torch.flatten(x, 1) # flatten all dimensions except batch
# x = F.relu(self.fc1(x))
# x = F.relu(self.fc2(x))
# x = self.fc3(x)
x = self.net(x)
return x
class ResBlock(nn.Module):
def __init__(self, in_chann, chann, stride):
super(ResBlock, self).__init__()
self.conv1 = nn.Conv2d(in_chann, chann, kernel_size=3, padding=1, stride=stride)
self.bn1 = nn.BatchNorm2d(chann)
self.conv2 = nn.Conv2d(chann, chann, kernel_size=3, padding=1, stride=1)
self.bn2 = nn.BatchNorm2d(chann)
def forward(self, x):
y = self.conv1(x)
y = self.bn1(y)
y = F.relu(y)
y = self.conv2(y)
y = self.bn2(y)
if (x.shape == y.shape):
z = x
else:
z = F.avg_pool2d(x, kernel_size=2, stride=2)
x_channel = x.size(1)
y_channel = y.size(1)
ch_res = (y_channel - x_channel)//2
pad = (0, 0, 0, 0, ch_res, ch_res)
z = F.pad(z, pad=pad, mode="constant", value=0)
z = z + y
z = F.relu(z)
return z
class BaseNet(nn.Module):
def __init__(self, Block, n):
super(BaseNet, self).__init__()
self.Block = Block
self.conv0 = nn.Conv2d(3, 16, kernel_size=3, padding=1)
self.bn0 = nn.BatchNorm2d(16)
self.convs = self._make_layers(n)
self.avgpool = nn.AvgPool2d(kernel_size=8, stride=1)
self.fc = nn.Linear(64, 10)
def forward(self, x):
x = self.conv0(x)
x = self.bn0(x)
x = F.relu(x)
x = self.convs(x)
x = self.avgpool(x)
x = x.view(x.size(0),-1)
x = self.fc(x)
return x
def _make_layers(self, n):
layers = []
in_chann = 16
chann = 16
stride = 1
for i in range(3):
for j in range(n):
if ((i > 0) and (j == 0)):
in_chann = chann
chann = chann * 2
stride = 2
layers += [self.Block(in_chann, chann, stride)]
stride = 1
in_chann = chann
return nn.Sequential(*layers)
class CIFARResNet(BaseNet):
def __init__(self, n=3):
super().__init__(ResBlock, n)
```
#### File: fl-opt/utils_backup/task.py
```python
from copy import deepcopy
from typing import overload
from torch import nn, optim
import torch
import torchvision
import torchvision.transforms as tvtf
from torch.utils.data.dataset import Dataset
from torch.utils.data import DataLoader, dataloader
from utils.models import CIFARResNet
class ExpConfig:
TEST_TYPE = ("grouping", "r", "test")
TASK_NAME = ("CIFAR", )
def __init__(self,
test_type: str="grouping",
task_name: str="CIFAR",
global_epoch_num: int=500,
group_epoch_num: int=5,
local_epoch_num: int=1,
client_num: int=100,
group_size: int=10,
group_num: int=10,
local_data_num: int=500,
batch_size: int=32,
lr: int=0.001,
noniid_degree: float=5,
device: int="cuda",
datapath: int="./data/",
result_dir: str="./cifar/noniid/",
simulation_num: int=3,
simulation_index: int=0,
log_interval: int=5,
comment: str="",
) -> None:
self.task_type: str = test_type
self.task_name: str = task_name
self.global_epoch_num: int = global_epoch_num
self.group_epoch_num: int = group_epoch_num
self.local_epoch_num: int = local_epoch_num
self.client_num: int = client_num
self.group_size: int = group_size
self.group_num: int = group_num
self.local_data_num: int = local_data_num
self.batch_size: int = batch_size
self.lr: int = lr
self.noniid_degree: float = noniid_degree
self.datapath: int = datapath
self.device: int = device
self.result_dir: str = result_dir
self.simulation_num: int = simulation_num
self.simulation_index: int = simulation_index
self.log_interval: int = log_interval
self.comment: str = comment
def get_task_class(self):
if self.task_name == ExpConfig.TASK_NAME[0]:
return TaskCIFAR
else:
raise "Unspported task"
def get_model_class(self):
if self.task_name == ExpConfig.TASK_NAME[0]:
return CIFARResNet
else:
raise "Unspported task"
class Task:
@overload
@staticmethod
def load_dataset(data_path: str):
pass
@overload
@staticmethod
def test_model(model: nn.Module, testloader: DataLoader, device: str) \
-> 'tuple[float, float]':
pass
# create new model while calling __init__ in subclasses
def __init__(self, model: nn.Module, trainset: Dataset, config: ExpConfig) -> None:
self.model: nn.Module = model
self.trainset = trainset
self.config = deepcopy(config)
self.optimizer: optim.Optimizer = None
self.scheduler: optim.lr_scheduler._LRScheduler = None
def set_model(self, model: nn.Module):
# self.optmizaer
self.model.load_state_dict(deepcopy(model.state_dict()))
self.model.to(self.config.device)
def get_model(self) -> nn.Module:
return self.model
@overload
def train_model(self):
pass
@overload
def test_model():
pass
class TaskCIFAR(Task):
loss = nn.CrossEntropyLoss()
classes = ('plane', 'car', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck')
@staticmethod
def load_dataset(data_path: str, type: str="both"):
# enhance
# Use the torch.transforms, a package on PIL Image.
transform_enhanc_func = tvtf.Compose([
tvtf.RandomHorizontalFlip(p=0.5),
tvtf.RandomCrop(32, padding=4, padding_mode='edge'),
tvtf.ToTensor(),
tvtf.Lambda(lambda x: x.mul(255)),
tvtf.Normalize([125., 123., 114.], [1., 1., 1.])
])
# transform
transform_func = tvtf.Compose([
tvtf.ToTensor(),
tvtf.Lambda(lambda x: x.mul(255)),
tvtf.Normalize([125., 123., 114.], [1., 1., 1.])
])
trainset, testset = None, None
if type != "test":
trainset = torchvision.datasets.CIFAR10(root=data_path, train=True,
download=True, transform=transform_enhanc_func)
if type != "train":
testset = torchvision.datasets.CIFAR10(root=data_path, train=False,
download=True, transform=transform_func)
return (trainset, testset)
@staticmethod
def test_model(model: nn.Module, testloader: DataLoader, device: str) \
-> 'tuple[float, float]':
model.to(device)
model.eval()
loss = TaskCIFAR.loss
size = 0
correct: float = 0.0
test_loss: float = 0.0
# with torch.no_grad():
for samples, labels in testloader:
pred = model(samples.to(device))
correct += (pred.argmax(1) == labels.to(device)).type(torch.float).sum().item()
if loss is not None:
test_loss += loss(pred, labels.to(device)).item()
size += len(samples)
correct /= 1.0*size
test_loss /= 1.0*size
return correct, test_loss
def __init__(self, trainset: Dataset, config: ExpConfig) -> None:
super().__init__(CIFARResNet(), trainset, config)
self.trainloader: DataLoader = DataLoader(self.trainset, batch_size=self.config.batch_size,
shuffle=True, drop_last=True)
# self.testloader: DataLoader = DataLoader(self.testset, batch_size=512,
# shuffle=False)
self.lr = self.config.lr
self.loss = nn.CrossEntropyLoss()
self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.lr, momentum=0.9, weight_decay=0.0001)
self.scheduler = optim.lr_scheduler.ExponentialLR(self.optimizer, gamma=0.9)
self.round_counter = 0
self.decay_period = self.config.group_epoch_num * self.config.local_epoch_num // 2.
if self.config.task_type == ExpConfig.TEST_TYPE[0]:
# self.decay_period = self.config.global_epoch_num // 2 * self.config.local_epoch_num
self.decay_period = self.config.global_epoch_num * self.config.group_epoch_num * self.config.local_epoch_num // 2
def train_model(self):
self.model.to(self.config.device)
self.model.train()
# running_loss = 0
for (image, label) in self.trainloader:
y = self.model(image.to(self.config.device))
loss = self.loss(y, label.to(self.config.device))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# running_loss += loss.item()
self.round_counter += 1
if self.round_counter == self.decay_period:
self.lr /= 10
self.optimizer = torch.optim.SGD(self.model.parameters(),
lr=self.lr, momentum=0.9, weight_decay=0.0001)
# print("learning rate now is %f" % (self.lr,))
# self.task.scheduler.step()
# class UniTask:
# def __init__(self, config: ExpConfig) -> None:
# self.config = deepcopy(config)
# self.task_class: type = None
# if self.config.task_name == "CIFAR":
# self.task_class = TaskCIFAR
```
#### File: fl-opt/utils/sim.py
```python
from copy import deepcopy
from torch import nn
import numpy as np
import math
import torch
from torch.utils.data import dataset
from torch.utils.data.dataset import Subset
from torch.utils.data import DataLoader
# def compare_models(model: nn.Module, clients: 'list[Client]', num: int):
# def print_params(model: nn.Module, num: int):
# counter = 1
# for name, param in model.state_dict().items():
# if counter > num:
# break
# else:
# print(param[0][0], end="")
# counter += 1
# print("")
# print_params(model, num)
# print_params(clients[0].model, num)
# print_params(clients[len(clients)//2].model, num)
# print_params(clients[-1].model, num)
# def regroup(G: np.ndarray, A: np.ndarray, s: int) -> 'tuple[np.ndarry, np.ndarry]':
# "s: each new group contains s old groups"
# group_num: int = G.shape[1]
# new_group_size: int = math.ceil(group_num / s)
# A_T = A.transpose()
# group2server: list[int] = []
# # get new groups as list
# new_groups: 'list[list[int]]' = []
# for i, server in enumerate(A_T):
# new_group: 'list[int]' = []
# for j, group in server:
# if A_T[i][j] == 1:
# if len(new_group) < new_group_size:
# new_group.append(j)
# else:
# new_groups.append(new_group)
# new_group = []
# # construct new A
# new_A = np.zeros((len(new_groups), A.shape[1],))
# for i, new_group in enumerate(new_groups):
# one_group = new_group[0]
# belong_to_server = 0
# for j, to_server in enumerate(A[one_group]):
# if to_server == 1:
# belong_to_server = j
# break
# new_A[i][belong_to_server] = 1
# # construct new G
# new_G = np.zeros((G.shape[0], len(new_groups),))
# for i, new_group in enumerate(new_groups):
# for old_group in new_group:
# G_T = G.transpose()
# for k, contain_client in enumerate(G_T[old_group]):
# if contain_client == 1:
# new_G[k][i] = 1
# return new_G, new_A
``` |
{
"source": "Jiyao17/fl",
"score": 2
} |
#### File: fl/utils/tasks.py
```python
from typing import List, Tuple, overload
import copy
import os
from numpy.lib.function_base import select
import torch
from torch import nn, optim, Tensor
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Dataset, Subset
from torchvision import datasets, transforms
from torch import randperm
# SpeechCommand
from torchaudio.datasets import SPEECHCOMMANDS
from torchaudio.transforms import Resample
import torch.nn.functional as F
# AG_NEWS
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator
from torchtext.datasets import AG_NEWS
from torchtext.data.functional import to_map_style_dataset
from utils.models import FashionMNIST, SpeechCommand, AGNEWS
class Config:
TEST_TYPES = ["iid", "iid-range", "noniid-sigma", "noniid-sigma-group", "noniid-r", "noniid-group"]
def __init__(self,
task_name: str,
g_epoch_num: int,
client_num: int,
l_data_num: int,
l_epoch_num: int,
l_batch_size: int,
l_lr: int,
datapath: int,
device: int,
result_dir: str,
verbosity: int,
simulation_num: int,
reside: int=0,
simulation_index: int=0,
l_trainset: Dataset=None,
testset: Dataset=None,
sigma: float=0.1,
test_type: str="iid",
) -> None:
self.task_name: str = task_name
# global parameters
self.g_epoch_num: int = g_epoch_num
# local parameters
self.client_num: int = client_num
self.l_data_num: int = l_data_num
self.l_epoch_num: int = l_epoch_num
self.l_batch_size: int = l_batch_size
self.l_lr: float = l_lr
# shared settings
self.datapath: str = datapath
self.device: torch.device = torch.device(device)
self.result_dir: str = result_dir
self.verbosity:int = verbosity
# run multiple simulations in processes at one time
self.simulation_num: int = simulation_num
# for single simulators to know its index
# so it can write results to its file
self.simulation_index:int = simulation_index
# task reside on server (-1) or client (0, 1, ..., client_num-1)
self.reside:int = reside
# this should be different for every client
self.l_trainset: Dataset = l_trainset
# this should be used by the server
self.testset: Dataset = testset
# non-IID degree
self.sigma: int = sigma
self.test_type: str = test_type
def __init__(self):
if len(UniTask.supported_tasks) < 1:
raise "No supported task, cannot run"
self.task_name: str = UniTask.supported_tasks[0]
self.g_epoch_num: int = 100
self.client_num: int = 100
self.l_data_num: int = 500
self.l_epoch_num: int = 5
self.l_batch_size: int = 64
self.l_lr: float = 0.01
self.datapath: str = "./data/"
self.device: torch.device = torch.device("cuda")
self.result_dir: str = "./result/"
self.verbosity:int = 2
self.simulation_num: int = 1
self.reside:int = -1
self.simulation_index:int = 0
self.l_trainset: Dataset = None
self.testset: Dataset = None
self.sigma: float = 0.1
self.test_type: str = "iid"
class Task:
# test dataloader for tasks on server
# train dataloader for tasks on clients
# init once for every simulation
testset: Dataset = None
trainset: Dataset = None
trainset_perm: 'list[int]' = None
def __init__(self, configs: Config):
self.configs = configs
self.model: nn.Module = None
self.train_dataloader: DataLoader = None
self.test_dataloader: DataLoader = None
self.optimizer: Optimizer = None
self.scheduler = None
@overload
def get_dataloader(self):
"""
Initialize static members
"""
pass
@overload
def train(self) -> float:
pass
@overload
def test(self) -> float:
pass
def get_model(self) -> nn.Module:
return self.model.to(self.configs.device)
def update_model(self, new_model: nn.Module):
state_dict = new_model.state_dict()
new_state_dict = copy.deepcopy(state_dict)
self.model.load_state_dict(new_state_dict)
self.model.to(self.configs.device)
def load_state_dict(self, new_state_dict: 'dict[str, Tensor]'):
state_dict = copy.deepcopy(new_state_dict)
self.model.load_state_dict(state_dict)
self.model.to(self.configs.device)
class TaskFashionMNIST(Task):
@staticmethod
def get_datasets(config: Config) -> Tuple[Dataset, Dataset]:
testset = datasets.FashionMNIST(
root=config.datapath,
train=False,
# download=True,
transform=transforms.ToTensor(),
)
trainset = datasets.FashionMNIST(
root=config.datapath,
train=True,
# download=True,
transform=transforms.ToTensor(),
)
return (trainset, testset)
def __init__(self, configs: Config):
super().__init__(configs)
self.model = FashionMNIST()
self.loss_fn = nn.modules.loss.CrossEntropyLoss()
self.optimizer = optim.SGD(self.model.parameters(), lr=self.configs.l_lr)
self.get_dataloader()
def get_dataloader(self):
# if dataset not loaded, load first
# if Task.trainset_perm == None:
# Task.trainset_perm = randperm(len(Task.trainset)).tolist()
self.testset = self.configs.testset
self.test_dataloader = DataLoader(
self.testset,
batch_size=self.configs.l_batch_size,
shuffle=False,
drop_last=True
)
if 0 <= self.configs.reside and self.configs.reside <= self.configs.client_num:
# data_num = self.configs.l_data_num
# reside = self.configs.reside
# self.trainset = Subset(Task.trainset,
# Task.trainset_perm[data_num*reside: data_num*(reside+1)])
self.trainset = self.configs.l_trainset
# print(len(self.trainset))
self.train_dataloader = DataLoader(
self.trainset,
batch_size=self.configs.l_batch_size,
shuffle=True,
drop_last=True
)
if self.configs.verbosity >= 3:
if self.configs.reside == -1:
print("Test set length in simulation %d: %d" %
(self.configs.simulation_index, len(self.testset)))
else:
print("Dataset length in simulation %d: %d, %d-%d" %
(self.configs.simulation_index, len(self.configs.l_trainset)))
def train(self) -> float:
self.model.to(self.configs.device)
self.model.train()
for X, y in self.train_dataloader:
# Compute prediction and loss
pred = self.model(X.to(self.configs.device))
# print(y.shape)
loss = self.loss_fn(pred, y.to(self.configs.device))
# Backpropagation
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return 0
def test(self) -> float:
self.model.to(self.configs.device)
self.model.eval()
size = len(self.testset)
test_loss, correct = 0, 0
# with torch.no_grad():
for X, y in self.test_dataloader:
pred = self.model(X.to(self.configs.device))
test_loss += self.loss_fn(pred, y.to(self.configs.device)).item()
correct += (pred.argmax(1) == y.to(self.configs.device)).type(torch.float).sum().item()
correct /= 1.0*size
test_loss /= 1.0*size
return correct, test_loss
class TaskSpeechCommand(Task):
labels: list = ['backward', 'bed', 'bird', 'cat', 'dog', 'down', 'eight', 'five', 'follow',
'forward', 'four', 'go', 'happy', 'house', 'learn', 'left', 'marvin', 'nine', 'no', 'off',
'on', 'one', 'right', 'seven', 'sheila', 'six', 'stop', 'three', 'tree', 'two', 'up',
'visual', 'wow', 'yes', 'zero']
class SubsetSC(SPEECHCOMMANDS):
def __init__(self, subset, data_path):
super().__init__(root=data_path, download=True)
def load_list(filename):
filepath = os.path.join(self._path, filename)
with open(filepath) as fileobj:
return [os.path.join(self._path, line.strip()) for line in fileobj]
if subset == "validation":
self._walker = load_list("validation_list.txt")
elif subset == "testing":
self._walker = load_list("testing_list.txt")
elif subset == "training":
excludes = load_list("validation_list.txt") + load_list("testing_list.txt")
excludes = set(excludes)
self._walker = [w for w in self._walker if w not in excludes]
def __init__(self, configs: Config):
super().__init__(configs)
self.model = SpeechCommand()
self.loss_fn = F.nll_loss
self.optimizer = optim.SGD(
self.model.parameters(), lr=self.configs.l_lr)
self.get_dataloader()
waveform, sample_rate, label, speaker_id, utterance_number = self.testset[0]
new_sample_rate = 8000
transform = Resample(orig_freq=sample_rate, new_freq=new_sample_rate)
# transformed: Resample = transform(waveform)
self.transform = transform.to(self.configs.device)
self.optimizer = optim.Adam(self.model.parameters(), lr=self.configs.l_lr, weight_decay=0.0001)
self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, step_size=20, gamma=0.5) # reduce the learning after 20 epochs by a factor of 10
@staticmethod
def get_datasets(config: Config) -> Tuple[Dataset, Dataset]:
testset = TaskSpeechCommand.SubsetSC("testing", config.datapath)
trainset = TaskSpeechCommand.SubsetSC("training", config.datapath)
return (trainset, testset)
def get_dataloader(self):
if self.configs.device == torch.device("cuda"):
num_workers = 1
pin_memory = True
else:
num_workers = 0
pin_memory = False
# test dataloader
self.testset = self.configs.testset
self.test_dataloader = DataLoader(
self.testset,
batch_size=self.configs.l_batch_size,
shuffle=False,
drop_last=True,
collate_fn=TaskSpeechCommand.collate_fn,
num_workers=num_workers,
pin_memory=pin_memory,
)
# train dataloader
if 0 <= self.configs.reside and self.configs.reside <= self.configs.client_num:
# data_num = self.configs.l_data_num
# reside = self.configs.reside
# self.trainset = Subset(Task.trainset,
# Task.trainset_perm[data_num*reside: data_num*(reside+1)])
self.trainset = self.configs.l_trainset
self.train_dataloader = DataLoader(
self.trainset,
batch_size=self.configs.l_batch_size,
shuffle=True,
collate_fn=TaskSpeechCommand.collate_fn,
num_workers=num_workers,
pin_memory=pin_memory,
drop_last=True
)
def train(self):
self.model.to(self.configs.device)
self.model.train()
self.transform = self.transform.to(self.configs.device)
for data, target in self.train_dataloader:
data = data.to(self.configs.device)
target = target.to(self.configs.device)
# apply transform and model on whole batch directly on device
data = self.transform(data)
output = self.model(data)
# negative log-likelihood for a tensor of size (batch x 1 x n_output)
loss = self.loss_fn(output.squeeze(), target)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# self.scheduler.step()
def test(self):
self.model.to(self.configs.device)
self.model.eval()
dataset_size = len(self.test_dataloader.dataset)
correct, loss = 0, 0
for data, target in self.test_dataloader:
data = data.to(self.configs.device)
target = target.to(self.configs.device)
# apply transform and model on whole batch directly on device
data = self.transform(data)
output = self.model(data)
pred = TaskSpeechCommand.get_likely_index(output)
loss += self.loss_fn(output.squeeze(), target).item()
# pred = output.argmax(dim=-1)
correct += TaskSpeechCommand.number_of_correct(pred, target)
correct /= 1.0*dataset_size
loss /= 1.0*dataset_size
return correct, loss
@staticmethod
def label_to_index(word):
# Return the position of the word in labels
return torch.tensor(TaskSpeechCommand.labels.index(word))
@staticmethod
def index_to_label(index):
# Return the word corresponding to the index in labels
# This is the inverse of label_to_index
return TaskSpeechCommand.labels[index]
@staticmethod
def pad_sequence(batch):
# Make all tensor in a batch the same length by padding with zeros
batch = [item.t() for item in batch]
batch = torch.nn.utils.rnn.pad_sequence(batch, batch_first=True, padding_value=0.)
return batch.permute(0, 2, 1)
@staticmethod
def collate_fn(batch):
# A data tuple has the form:
# waveform, sample_rate, label, speaker_id, utterance_number
tensors, targets = [], []
# Gather in lists, and encode labels as indices
for waveform, _, label, *_ in batch:
tensors += [waveform]
targets += [TaskSpeechCommand.label_to_index(label)]
# Group the list of tensors into a batched tensor
tensors = TaskSpeechCommand.pad_sequence(tensors)
targets = torch.stack(targets)
return tensors, targets
@staticmethod
def number_of_correct(pred, target):
# count number of correct predictions
return pred.squeeze().eq(target).sum().item()
@staticmethod
def get_likely_index(tensor):
# find most likely label index for each element in the batch
return tensor.argmax(dim=-1)
@staticmethod
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
class TaskAGNEWS(Task):
def __init__(self, configs: Config):
super().__init__(configs)
self.get_dataloader()
self.tokenizer = get_tokenizer('basic_english')
self.train_iter = AG_NEWS(root=self.configs.datapath, split='train')
self.vocab = build_vocab_from_iterator(self.yield_tokens(self.train_iter), specials=["<unk>"])
self.vocab.set_default_index(self.vocab["<unk>"])
self.text_pipeline = lambda x: self.vocab(self.tokenizer(x))
self.label_pipeline = lambda x: int(x) - 1
self.model = AGNEWS()
self.loss_fn = torch.nn.CrossEntropyLoss()
self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.configs.l_lr)
self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, 1.0, gamma=0.1)
self.get_dataloader()
@staticmethod
def get_datasets(config: Config) -> Tuple[Dataset, Dataset]:
test_iter = AG_NEWS(root=config.datapath, split="test")
testset = to_map_style_dataset(test_iter)
train_iter = AG_NEWS(root=config.datapath, split="train")
trainset = to_map_style_dataset(train_iter)
return (trainset, testset)
def get_dataloader(self):
self.testset = self.configs.testset
self.trainset = self.configs.l_trainset
self.test_dataloader = DataLoader(
self.testset,
batch_size=self.configs.l_batch_size,
shuffle=True,
collate_fn=self.collate_batch)
self.train_dataloader = DataLoader(
self.trainset,
batch_size=self.configs.l_batch_size,
shuffle=False,
collate_fn=self.collate_batch)
def train(self) -> float:
self.model.to(self.configs.device)
self.model.train()
total_acc, total_count = 0, 0
for label, text, offsets in self.train_dataloader:
self.optimizer.zero_grad()
predicted_label = self.model(text, offsets)
loss = self.loss_fn(predicted_label, label)
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.1)
self.optimizer.step()
total_acc += (predicted_label.argmax(1) == label).sum().item()
total_count += label.size(0)
return total_acc/total_count
def test(self) -> float:
self.model.to(self.configs.device)
self.model.eval()
total_acc, loss = 0, 0
with torch.no_grad():
for label, text, offsets in self.test_dataloader:
predicted_label = self.model(text, offsets)
loss += self.loss_fn(predicted_label, label)
total_acc += (predicted_label.argmax(1) == label).sum().item()
size = len(self.test_dataloader.dataset)
total_acc /= 1.0*size
loss /= 1.0*size
return total_acc, loss
def yield_tokens(self, data_iter):
# return [self.tokenizer(text) for _, text in data_iter]
for _, text in data_iter:
yield self.tokenizer(text)
# def transform_to_token(self, data)
def collate_batch(self, batch):
label_list, text_list, offsets = [], [], [0]
for (_label, _text) in batch:
label_list.append(self.label_pipeline(_label))
processed_text = torch.tensor(self.text_pipeline(_text), dtype=torch.int64)
text_list.append(processed_text)
offsets.append(processed_text.size(0))
label_list = torch.tensor(label_list, dtype=torch.int64)
offsets = torch.tensor(offsets[:-1]).cumsum(dim=0)
text_list = torch.cat(text_list)
device = self.configs.device
return label_list.to(device), text_list.to(device), offsets.to(device)
class UniTask:
"""
Use UniTask().get_task() to get correct task type
"""
# "AG_NEWS"
supported_tasks = ["FashionMNIST", "SpeechCommand", "AG_NEWS"]
def __init__(self) -> None:
pass
@staticmethod
def get_task(config: Config) -> Task:
if config.task_name not in UniTask.supported_tasks:
raise "Task not supported yet."
if config.task_name == "FashionMNIST":
task = TaskFashionMNIST(config)
if config.task_name == "SpeechCommand":
task = TaskSpeechCommand(config)
if config.task_name == "AG_NEWS":
task = TaskAGNEWS(config)
return task
def get_datasets(config: Config) -> Tuple[Dataset, Dataset]:
if config.task_name == "FashionMNIST":
trainset, testset = TaskFashionMNIST.get_datasets(config)
if config.task_name == "SpeechCommand":
trainset, testset = TaskSpeechCommand.get_datasets(config)
if config.task_name == "AG_NEWS":
trainset, testset = TaskAGNEWS.get_datasets(config)
return (trainset, testset)
``` |
{
"source": "jiyauppal/face-mask-detector.github.io",
"score": 2
} |
#### File: jiyauppal/face-mask-detector.github.io/person_detection_vedio.py
```python
import cv2
import datetime
import imutils
import numpy as np
protopath = "MobileNetSSD_deploy.prototxt"
modelpath = "MobileNetSSD_deploy.caffemodel"
detector = cv2.dnn.readNetFromCaffe(prototxt=protopath, caffeModel=modelpath)
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
def main():
cap = cv2.VideoCapture('test_video.mp4')
fps_start_time = datetime.datetime.now()
fps = 0
total_frames = 0
while True:
ret, frame = cap.read()
frame = imutils.resize(frame, width=600)
total_frames = total_frames + 1
(H, W) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
detector.setInput(blob)
person_detections = detector.forward()
for i in np.arange(0, person_detections.shape[2]):
confidence = person_detections[0, 0, i, 2]
if confidence > 0.5:
idx = int(person_detections[0, 0, i, 1])
if CLASSES[idx] != "person":
continue
person_box = person_detections[0, 0, i, 3:7] * np.array([W, H, W, H])
(startX, startY, endX, endY) = person_box.astype("int")
cv2.rectangle(frame, (startX, startY), (endX, endY), (0, 0, 255), 2)
fps_end_time = datetime.datetime.now()
time_diff = fps_end_time - fps_start_time
if time_diff.seconds == 0:
fps = 0.0
else:
fps = (total_frames / time_diff.seconds)
fps_text = "FPS: {:.2f}".format(fps)
cv2.putText(frame, fps_text, (5, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 1)
cv2.imshow("Application", frame)
key = cv2.waitKey(1)
if key == ord('q'):
break
cv2.destroyAllWindows()
main()
``` |
{
"source": "jiya-zhang/document-ai-samples",
"score": 2
} |
#### File: document-ai-samples/tax-processing-pipeline-python/docai_pipeline.py
```python
from os.path import basename as path_basename
from typing import List, Tuple
from google.api_core.exceptions import GoogleAPICallError
from consts import (
DOCAI_PROCESSOR_LOCATION,
DOCAI_PROJECT_ID,
FIRESTORE_PROJECT_ID,
)
from docai_utils import (
classify_document_bytes,
extract_document_entities,
process_document_bytes,
select_processor_from_classification,
)
from firestore_utils import save_to_firestore
def run_docai_pipeline(
local_files: List[Tuple[str, str]], firestore_collection: str
) -> List[str]:
"""
Classify Document Types,
Select Appropriate Parser Processor,
Extract Entities,
Save Entities to Firestore
"""
status_messages: List[str] = []
def progress_update(message: str):
"""
Print progress update to stdout and add to message queue
"""
print(message)
status_messages.append(message)
for file_path, mime_type in local_files:
file_name = path_basename(file_path)
# Read File into Memory
with open(file_path, "rb") as file:
file_content = file.read()
progress_update(f"Processing {file_name}")
document_classification = classify_document_bytes(file_content, mime_type)
progress_update(f"\tClassification: {document_classification}")
# Optional: If you want to ignore unclassified documents
if document_classification == "other":
progress_update(f"\tSkipping file: {file_name}")
continue
# Get Specialized Processor
(
processor_type,
processor_id,
) = select_processor_from_classification(document_classification)
progress_update(f"\tUsing Processor {processor_type}: {processor_id}")
# Run Parser
try:
document_proto = process_document_bytes(
DOCAI_PROJECT_ID,
DOCAI_PROCESSOR_LOCATION,
processor_id,
file_content,
mime_type,
)
except GoogleAPICallError:
print("Skipping file:", file_path)
continue
# Extract Entities from Document
document_entities = extract_document_entities(document_proto)
# Specific Classification
# e.g. w2_2020, 1099int_2020, 1099div_2020
document_entities["classification"] = document_classification
# Processor Type corresponds to a Broad Category
# e.g. Multiple W2 Years correspond to the same processor type
document_entities["broad_classification"] = processor_type.removesuffix(
"_PROCESSOR"
)
document_entities["source_file"] = file_name
document_id = document_entities["broad_classification"]
# Save Document Entities to Firestore
progress_update(f"\tWriting to Firestore Collection {firestore_collection}")
progress_update(f"\tDocument ID: {document_id}")
save_to_firestore(
project_id=FIRESTORE_PROJECT_ID,
collection=firestore_collection,
document_id=document_id,
data=document_entities,
)
return status_messages
``` |
{
"source": "jiyegui/feapder",
"score": 2
} |
#### File: core/spiders/batch_spider.py
```python
import datetime
import os
import time
import warnings
from collections import Iterable
import feapder.setting as setting
import feapder.utils.tools as tools
from feapder.buffer.item_buffer import MAX_ITEM_COUNT
from feapder.buffer.request_buffer import RequestBuffer
from feapder.core.base_parser import BatchParser
from feapder.core.scheduler import Scheduler
from feapder.db.mysqldb import MysqlDB
from feapder.db.redisdb import RedisDB
from feapder.network.item import Item
from feapder.network.item import UpdateItem
from feapder.network.request import Request
from feapder.utils.log import log
from feapder.utils.perfect_dict import PerfectDict
from feapder.utils.redis_lock import RedisLock
CONSOLE_PIPELINE_PATH = "feapder.pipelines.console_pipeline.ConsolePipeline"
MYSQL_PIPELINE_PATH = "feapder.pipelines.mysql_pipeline.MysqlPipeline"
class BatchSpider(BatchParser, Scheduler):
def __init__(
self,
task_table,
batch_record_table,
batch_name,
batch_interval,
task_keys,
task_state="state",
min_task_count=10000,
check_task_interval=5,
task_limit=10000,
related_redis_key=None,
related_batch_record=None,
task_condition="",
task_order_by="",
redis_key=None,
thread_count=None,
begin_callback=None,
end_callback=None,
delete_keys=(),
auto_stop_when_spider_done=None,
send_run_time=False,
):
"""
@summary: 批次爬虫
必要条件
1、需有任务表
任务表中必须有id 及 任务状态字段 如 state。如指定parser_name字段,则任务会自动下发到对应的parser下, 否则会下发到所有的parser下。其他字段可根据爬虫需要的参数自行扩充
参考建表语句如下:
CREATE TABLE `table_name` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`param` varchar(1000) DEFAULT NULL COMMENT '爬虫需要的抓取数据需要的参数',
`state` int(11) DEFAULT NULL COMMENT '任务状态',
`parser_name` varchar(255) DEFAULT NULL COMMENT '任务解析器的脚本类名',
PRIMARY KEY (`id`),
UNIQUE KEY `nui` (`param`) USING BTREE
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
2、需有批次记录表 不存在自动创建
---------
@param task_table: mysql中的任务表
@param batch_record_table: mysql 中的批次记录表
@param batch_name: 批次采集程序名称
@param batch_interval: 批次间隔 天为单位。 如想一小时一批次,可写成1/24
@param task_keys: 需要获取的任务字段 列表 [] 如需指定解析的parser,则需将parser_name字段取出来。
@param task_state: mysql中任务表的任务状态字段
@param min_task_count: redis 中最少任务数, 少于这个数量会从mysql的任务表取任务
@param check_task_interval: 检查是否还有任务的时间间隔;
@param task_limit: 从数据库中取任务的数量
@param redis_key: 任务等数据存放在redis中的key前缀
@param thread_count: 线程数,默认为配置文件中的线程数
@param begin_callback: 爬虫开始回调函数
@param end_callback: 爬虫结束回调函数
@param delete_keys: 爬虫启动时删除的key,类型: 元组/bool/string。 支持正则; 常用于清空任务队列,否则重启时会断点续爬
@param auto_stop_when_spider_done: 爬虫抓取完毕后是否自动结束或等待任务,默认自动结束
@param send_run_time: 发送运行时间
@param related_redis_key: 有关联的其他爬虫任务表(redis)注意:要避免环路 如 A -> B & B -> A 。
@param related_batch_record: 有关联的其他爬虫批次表(mysql)注意:要避免环路 如 A -> B & B -> A 。
related_redis_key 与 related_batch_record 选其一配置即可;用于相关联的爬虫没结束时,本爬虫也不结束
若相关连的爬虫为批次爬虫,推荐以related_batch_record配置,
若相关连的爬虫为普通爬虫,无批次表,可以以related_redis_key配置
@param task_condition: 任务条件 用于从一个大任务表中挑选出数据自己爬虫的任务,即where后的条件语句
@param task_order_by: 取任务时的排序条件 如 id desc
---------
@result:
"""
Scheduler.__init__(
self,
redis_key=redis_key,
thread_count=thread_count,
begin_callback=begin_callback,
end_callback=end_callback,
delete_keys=delete_keys,
auto_stop_when_spider_done=auto_stop_when_spider_done,
auto_start_requests=False,
send_run_time=send_run_time,
batch_interval=batch_interval,
task_table=task_table,
)
self._redisdb = RedisDB()
self._mysqldb = MysqlDB()
self._request_buffer = RequestBuffer(self._redis_key)
self._task_table = task_table # mysql中的任务表
self._batch_record_table = batch_record_table # mysql 中的批次记录表
self._batch_name = batch_name # 批次采集程序名称
self._task_keys = task_keys # 需要获取的任务字段
self._task_state = task_state # mysql中任务表的state字段名
self._min_task_count = min_task_count # redis 中最少任务数
self._check_task_interval = check_task_interval
self._task_limit = task_limit # mysql中一次取的任务数量
self._related_task_tables = [
setting.TAB_REQUSETS.format(redis_key=redis_key)
] # 自己的task表也需要检查是否有任务
if related_redis_key:
self._related_task_tables.append(
setting.TAB_REQUSETS.format(redis_key=related_redis_key)
)
self._related_batch_record = related_batch_record
self._task_condition = task_condition
self._task_condition_prefix_and = task_condition and " and {}".format(
task_condition
)
self._task_condition_prefix_where = task_condition and " where {}".format(
task_condition
)
self._task_order_by = task_order_by and " order by {}".format(task_order_by)
self._batch_date_cache = None
if self._batch_interval >= 1:
self._date_format = "%Y-%m-%d"
elif self._batch_interval < 1 and self._batch_interval >= 1 / 24:
self._date_format = "%Y-%m-%d %H"
else:
self._date_format = "%Y-%m-%d %H:%M"
# 报警相关
self._send_msg_interval = datetime.timedelta(hours=1) # 每隔1小时发送一次报警
self._last_send_msg_time = None
self._spider_last_done_time = None # 爬虫最近已做任务数量时间
self._spider_last_done_count = 0 # 爬虫最近已做任务数量
self._spider_deal_speed_cached = None
self._is_more_parsers = True # 多模版类爬虫
def init_property(self):
"""
每个批次开始时需要重置的属性
@return:
"""
self._last_send_msg_time = None
self._spider_last_done_time = None
self._spider_last_done_count = 0 # 爬虫刚开始启动时已做任务数量
def add_parser(self, parser):
parser = parser(
self._task_table,
self._batch_record_table,
self._task_state,
self._date_format,
self._mysqldb,
) # parser 实例化
self._parsers.append(parser)
def start_monitor_task(self):
"""
@summary: 监控任务状态
---------
---------
@result:
"""
if not self._parsers: # 不是多模版模式, 将自己注入到parsers,自己为模版
self._is_more_parsers = False
self._parsers.append(self)
elif len(self._parsers) <= 1:
self._is_more_parsers = False
self.create_batch_record_table()
# 添加任务
for parser in self._parsers:
parser.add_task()
is_first_check = True
while True:
try:
if self.check_batch(is_first_check): # 该批次已经做完
if not self._auto_stop_when_spider_done:
is_first_check = True
log.info("爬虫所有任务已做完,不自动结束,等待新任务...")
time.sleep(self._check_task_interval)
continue
else:
break
is_first_check = False
# 检查redis中是否有任务 任务小于_min_task_count 则从mysql中取
tab_requests = setting.TAB_REQUSETS.format(redis_key=self._redis_key)
todo_task_count = self._redisdb.zget_count(tab_requests)
tasks = []
if todo_task_count < self._min_task_count: # 从mysql中取任务
# 更新batch表的任务状态数量
self.update_task_done_count()
log.info("redis 中剩余任务%s 数量过小 从mysql中取任务追加" % todo_task_count)
tasks = self.get_todo_task_from_mysql()
if not tasks: # 状态为0的任务已经做完,需要检查状态为2的任务是否丢失
if (
todo_task_count == 0
): # redis 中无待做任务,此时mysql中状态为2的任务为丢失任务。需重新做
lose_task_count = self.get_lose_task_count()
if not lose_task_count:
time.sleep(self._check_task_interval)
continue
elif (
lose_task_count > self._task_limit * 5
): # 丢失任务太多,直接重置,否则每次等redis任务消耗完再取下一批丢失任务,速度过慢
log.info("正在重置丢失任务为待做 共 {} 条".format(lose_task_count))
# 重置正在做的任务为待做
if self.reset_lose_task_from_mysql():
log.info("重置丢失任务成功")
else:
log.info("重置丢失任务失败")
continue
else: # 丢失任务少,直接取
log.info(
"正在取丢失任务 共 {} 条, 取 {} 条".format(
lose_task_count,
self._task_limit
if self._task_limit <= lose_task_count
else lose_task_count,
)
)
tasks = self.get_doing_task_from_mysql()
else:
log.info("mysql 中取到待做任务 %s 条" % len(tasks))
else:
log.info("redis 中尚有%s条积压任务,暂时不派发新任务" % todo_task_count)
if not tasks:
if todo_task_count >= self._min_task_count:
# log.info('任务正在进行 redis中剩余任务 %s' % todo_task_count)
pass
else:
log.info("mysql 中无待做任务 redis中剩余任务 %s" % todo_task_count)
else:
# make start requests
self.distribute_task(tasks)
log.info("添加任务到redis成功")
except Exception as e:
log.exception(e)
time.sleep(self._check_task_interval)
def create_batch_record_table(self):
sql = (
"select table_name from information_schema.tables where table_name like '%s'"
% self._batch_record_table
)
tables_name = self._mysqldb.find(sql)
if not tables_name:
sql = """
CREATE TABLE `{table_name}` (
`id` int(11) UNSIGNED NOT NULL AUTO_INCREMENT,
`batch_date` {batch_date} DEFAULT NULL COMMENT '批次时间',
`total_count` int(11) DEFAULT NULL COMMENT '任务总数',
`done_count` int(11) DEFAULT NULL COMMENT '完成数 (1,-1)',
`fail_count` int(11) DEFAULT NULL COMMENT '失败任务数 (-1)',
`interval` float(11) DEFAULT NULL COMMENT '批次间隔',
`interval_unit` varchar(20) DEFAULT NULL COMMENT '批次间隔单位 day, hour',
`create_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '批次开始时间',
`update_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '本条记录更新时间',
`is_done` int(11) DEFAULT '0' COMMENT '批次是否完成 0 未完成 1 完成',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
""".format(
table_name=self._batch_record_table,
batch_date="date" if self._date_format == "%Y-%m-%d" else "datetime",
)
self._mysqldb.execute(sql)
def distribute_task(self, tasks):
"""
@summary: 分发任务
---------
@param tasks:
---------
@result:
"""
if self._is_more_parsers: # 为多模版类爬虫,需要下发指定的parser
for task in tasks:
for parser in self._parsers: # 寻找task对应的parser
if parser.name in task:
task = PerfectDict(
_dict=dict(zip(self._task_keys, task)), _values=list(task)
)
requests = parser.start_requests(task)
if requests and not isinstance(requests, Iterable):
raise Exception(
"%s.%s返回值必须可迭代" % (parser.name, "start_requests")
)
result_type = 1
for request in requests or []:
if isinstance(request, Request):
request.parser_name = request.parser_name or parser.name
self._request_buffer.put_request(request)
result_type = 1
elif isinstance(request, Item):
self._item_buffer.put_item(request)
result_type = 2
if (
self._item_buffer.get_items_count()
>= MAX_ITEM_COUNT
):
self._item_buffer.flush()
elif callable(request): # callbale的request可能是更新数据库操作的函数
if result_type == 1:
self._request_buffer.put_request(request)
else:
self._item_buffer.put_item(request)
if (
self._item_buffer.get_items_count()
>= MAX_ITEM_COUNT
):
self._item_buffer.flush()
else:
raise TypeError(
"start_requests yield result type error, expect Request、Item、callback func, bug get type: {}".format(
type(requests)
)
)
break
else: # task没对应的parser 则将task下发到所有的parser
for task in tasks:
for parser in self._parsers:
task = PerfectDict(
_dict=dict(zip(self._task_keys, task)), _values=list(task)
)
requests = parser.start_requests(task)
if requests and not isinstance(requests, Iterable):
raise Exception(
"%s.%s返回值必须可迭代" % (parser.name, "start_requests")
)
result_type = 1
for request in requests or []:
if isinstance(request, Request):
request.parser_name = request.parser_name or parser.name
self._request_buffer.put_request(request)
result_type = 1
elif isinstance(request, Item):
self._item_buffer.put_item(request)
result_type = 2
if self._item_buffer.get_items_count() >= MAX_ITEM_COUNT:
self._item_buffer.flush()
elif callable(request): # callbale的request可能是更新数据库操作的函数
if result_type == 1:
self._request_buffer.put_request(request)
else:
self._item_buffer.put_item(request)
if (
self._item_buffer.get_items_count()
>= MAX_ITEM_COUNT
):
self._item_buffer.flush()
self._request_buffer.flush()
self._item_buffer.flush()
def __get_task_state_count(self):
sql = "select {state}, count(1) from {task_table}{task_condition} group by {state}".format(
state=self._task_state,
task_table=self._task_table,
task_condition=self._task_condition_prefix_where,
)
task_state_count = self._mysqldb.find(sql)
task_state = {
"total_count": sum(count for state, count in task_state_count),
"done_count": sum(
count for state, count in task_state_count if state in (1, -1)
),
"failed_count": sum(
count for state, count in task_state_count if state == -1
),
}
return task_state
def update_task_done_count(self):
"""
@summary: 更新批次表中的任务状态
---------
---------
@result:
"""
task_count = self.__get_task_state_count()
# log.info('《%s》 批次进度 %s/%s' % (self._batch_name, done_task_count, total_task_count))
# 更新批次表
sql = "update {} set done_count = {}, total_count = {}, fail_count = {}, update_time = CURRENT_TIME, is_done=0, `interval` = {}, interval_unit = '{}' where batch_date = '{}'".format(
self._batch_record_table,
task_count.get("done_count"),
task_count.get("total_count"),
task_count.get("failed_count"),
self._batch_interval
if self._batch_interval >= 1
else self._batch_interval * 24,
"day" if self._batch_interval >= 1 else "hour",
self.batch_date,
)
self._mysqldb.update(sql)
def update_is_done(self):
sql = "update {} set is_done = 1, update_time = CURRENT_TIME where batch_date = '{}' and is_done = 0".format(
self._batch_record_table, self.batch_date
)
self._mysqldb.update(sql)
def get_todo_task_from_mysql(self):
"""
@summary: 取待做的任务
---------
---------
@result:
"""
# TODO 分批取数据 每批最大取 1000000个,防止内存占用过大
# 查询任务
sql = "select %s from %s where %s = 0%s%s limit %s" % (
", ".join(self._task_keys),
self._task_table,
self._task_state,
self._task_condition_prefix_and,
self._task_order_by,
self._task_limit,
)
tasks = self._mysqldb.find(sql)
if tasks:
# 更新任务状态
for i in range(0, len(tasks), 10000): # 10000 一批量更新
task_ids = str(
tuple([task[0] for task in tasks[i : i + 10000]])
).replace(",)", ")")
sql = "update %s set %s = 2 where id in %s" % (
self._task_table,
self._task_state,
task_ids,
)
self._mysqldb.update(sql)
return tasks
def get_doing_task_from_mysql(self):
"""
@summary: 取正在做的任务
---------
---------
@result:
"""
# 查询任务
sql = "select %s from %s where %s = 2%s%s limit %s" % (
", ".join(self._task_keys),
self._task_table,
self._task_state,
self._task_condition_prefix_and,
self._task_order_by,
self._task_limit,
)
tasks = self._mysqldb.find(sql)
return tasks
def get_lose_task_count(self):
sql = 'select date_format(batch_date, "{date_format}"), total_count, done_count from {batch_record_table} order by id desc limit 1'.format(
date_format=self._date_format.replace(":%M", ":%i"),
batch_record_table=self._batch_record_table,
)
batch_info = self._mysqldb.find(sql) # (('2018-08-19', 49686, 0),)
batch_date, total_count, done_count = batch_info[0]
return total_count - done_count
def reset_lose_task_from_mysql(self):
"""
@summary: 重置丢失任务为待做
---------
---------
@result:
"""
sql = "update {table} set {state} = 0 where {state} = 2{task_condition}".format(
table=self._task_table,
state=self._task_state,
task_condition=self._task_condition_prefix_and,
)
return self._mysqldb.update(sql)
def get_deal_speed(self, total_count, done_count, last_batch_date):
"""
获取处理速度
@param total_count: 总数量
@param done_count: 做完数量
@param last_batch_date: 批次时间 datetime
@return:
deal_speed (条/小时), need_time (秒), overflow_time(秒) ( overflow_time < 0 时表示提前多少秒完成 )
或
None
"""
if not self._spider_last_done_count:
now_date = datetime.datetime.now()
self._spider_last_done_count = done_count
self._spider_last_done_time = now_date
if done_count > self._spider_last_done_count:
now_date = datetime.datetime.now()
time_interval = (now_date - self._spider_last_done_time).total_seconds()
deal_speed = (
done_count - self._spider_last_done_count
) / time_interval # 条/秒
need_time = (total_count - done_count) / deal_speed # 单位秒
overflow_time = (
(now_date - last_batch_date).total_seconds()
+ need_time
- datetime.timedelta(days=self._batch_interval).total_seconds()
) # 溢出时间 秒
calculate_speed_time = now_date.strftime("%Y-%m-%d %H:%M:%S") # 统计速度时间
deal_speed = int(deal_speed * 3600) # 条/小时
# 更新最近已做任务数及时间
self._spider_last_done_count = done_count
self._spider_last_done_time = now_date
self._spider_deal_speed_cached = (
deal_speed,
need_time,
overflow_time,
calculate_speed_time,
)
return self._spider_deal_speed_cached
def init_task(self):
"""
@summary: 初始化任务表中的任务, 新一个批次开始时调用。 可能会重写
---------
---------
@result:
"""
sql = "update {task_table} set {state} = 0 where {state} != -1{task_condition}".format(
task_table=self._task_table,
state=self._task_state,
task_condition=self._task_condition_prefix_and,
)
return self._mysqldb.update(sql)
def check_batch(self, is_first_check=False):
"""
@summary: 检查批次是否完成
---------
@param: is_first_check 是否为首次检查,若首次检查,且检查结果为批次已完成,则不发送批次完成消息。因为之前发送过了
---------
@result: 完成返回True 否则False
"""
sql = 'select date_format(batch_date, "{date_format}"), total_count, done_count from {batch_record_table} order by id desc limit 1'.format(
date_format=self._date_format.replace(":%M", ":%i"),
batch_record_table=self._batch_record_table,
)
batch_info = self._mysqldb.find(sql) # (('2018-08-19', 49686, 0),)
if batch_info:
batch_date, total_count, done_count = batch_info[0]
now_date = datetime.datetime.now()
last_batch_date = datetime.datetime.strptime(batch_date, self._date_format)
time_difference = now_date - last_batch_date
if total_count == done_count and time_difference < datetime.timedelta(
days=self._batch_interval
): # 若在本批次内,再次检查任务表是否有新增任务
# # 改成查询任务表 看是否真的没任务了,因为batch_record表里边的数量可能没来得及更新
task_count = self.__get_task_state_count()
total_count = task_count.get("total_count")
done_count = task_count.get("done_count")
if total_count == done_count:
# 检查相关联的爬虫是否完成
releated_spider_is_done = self.related_spider_is_done()
if releated_spider_is_done == False:
msg = "《{}》本批次未完成, 正在等待依赖爬虫 {} 结束. 批次时间 {} 批次进度 {}/{}".format(
self._batch_name,
self._related_batch_record or self._related_task_tables,
batch_date,
done_count,
total_count,
)
log.info(msg)
# 检查是否超时 超时发出报警
if time_difference >= datetime.timedelta(
days=self._batch_interval
): # 已经超时
if (
not self._last_send_msg_time
or now_date - self._last_send_msg_time
>= self._send_msg_interval
):
self._last_send_msg_time = now_date
self.send_msg(msg, level="error")
return False
elif releated_spider_is_done == True:
# 更新is_done 状态
self.update_is_done()
else:
self.update_is_done()
msg = "《{}》本批次完成 批次时间 {} 共处理 {} 条任务".format(
self._batch_name, batch_date, done_count
)
log.info(msg)
if not is_first_check:
self.send_msg(msg)
# 判断下一批次是否到
if time_difference >= datetime.timedelta(days=self._batch_interval):
msg = "《{}》下一批次开始".format(self._batch_name)
log.info(msg)
self.send_msg(msg)
# 初始化任务表状态
if self.init_task() != False: # 更新失败返回False 其他返回True/None
# 初始化属性
self.init_property()
is_success = (
self.record_batch()
) # 有可能插入不成功,但是任务表已经重置了,不过由于当前时间为下一批次的时间,检查批次是否结束时不会检查任务表,所以下次执行时仍然会重置
if is_success:
log.info("插入新批次记录成功 1分钟后开始下发任务") # 防止work批次时间没来得及更新
tools.delay_time(60)
return False # 下一批次开始
else:
return True # 下一批次不开始。先不派发任务,因为批次表新批次插入失败了,需要插入成功后再派发任务
else:
log.info("《{}》下次批次时间未到".format(self._batch_name))
if not is_first_check:
self.send_msg("《{}》下次批次时间未到".format(self._batch_name))
return True
else:
if time_difference >= datetime.timedelta(
days=self._batch_interval
): # 已经超时
time_out = time_difference - datetime.timedelta(
days=self._batch_interval
)
time_out_pretty = tools.format_seconds(time_out.total_seconds())
msg = "《{}》本批次已超时{} 批次时间 {}, 批次进度 {}/{}".format(
self._batch_name,
time_out_pretty,
batch_date,
done_count,
total_count,
)
if self._batch_interval >= 1:
msg += ", 期望时间{}天".format(self._batch_interval)
else:
msg += ", 期望时间{}小时".format(self._batch_interval * 24)
result = self.get_deal_speed(
total_count=total_count,
done_count=done_count,
last_batch_date=last_batch_date,
)
if result:
deal_speed, need_time, overflow_time, calculate_speed_time = (
result
)
msg += ", 任务处理速度于{}统计, 约 {}条/小时, 预计还需 {}".format(
calculate_speed_time,
deal_speed,
tools.format_seconds(need_time),
)
if overflow_time > 0:
msg += ", 该批次预计总超时 {}, 请及时处理".format(
tools.format_seconds(overflow_time)
)
log.info(msg)
if (
not self._last_send_msg_time
or now_date - self._last_send_msg_time
>= self._send_msg_interval
):
self._last_send_msg_time = now_date
self.send_msg(msg, level="error")
else: # 未超时
remaining_time = (
datetime.timedelta(days=self._batch_interval) - time_difference
)
remaining_time_pretty = tools.format_seconds(
remaining_time.total_seconds()
)
if self._batch_interval >= 1:
msg = "《{}》本批次正在进行, 批次时间 {}, 批次进度 {}/{}, 期望时间{}天, 剩余{}".format(
self._batch_name,
batch_date,
done_count,
total_count,
self._batch_interval,
remaining_time_pretty,
)
else:
msg = "《{}》本批次正在进行, 批次时间 {}, 批次进度 {}/{}, 期望时间{}小时, 剩余{}".format(
self._batch_name,
batch_date,
done_count,
total_count,
self._batch_interval * 24,
remaining_time_pretty,
)
result = self.get_deal_speed(
total_count=total_count,
done_count=done_count,
last_batch_date=last_batch_date,
)
if result:
deal_speed, need_time, overflow_time, calculate_speed_time = (
result
)
msg += ", 任务处理速度于{}统计, 约 {}条/小时, 预计还需 {}".format(
calculate_speed_time,
deal_speed,
tools.format_seconds(need_time),
)
if overflow_time > 0:
msg += ", 该批次可能会超时 {}, 请及时处理".format(
tools.format_seconds(overflow_time)
)
# 发送警报
if (
not self._last_send_msg_time
or now_date - self._last_send_msg_time
>= self._send_msg_interval
):
self._last_send_msg_time = now_date
self.send_msg(msg, level="error")
elif overflow_time < 0:
msg += ", 该批次预计提前 {} 完成".format(
tools.format_seconds(-overflow_time)
)
log.info(msg)
else:
# 插入batch_date
self.record_batch()
# 初始化任务表状态 可能有产生任务的代码
self.init_task()
return False
def related_spider_is_done(self):
"""
相关连的爬虫是否跑完
@return: True / False / None 表示无相关的爬虫 可由自身的total_count 和 done_count 来判断
"""
for related_redis_task_table in self._related_task_tables:
if self._redisdb.exists_key(related_redis_task_table):
return False
if self._related_batch_record:
sql = "select is_done from {} order by id desc limit 1".format(
self._related_batch_record
)
is_done = self._mysqldb.find(sql)
is_done = is_done[0][0] if is_done else None
if is_done is None:
log.warning("相关联的批次表不存在或无批次信息")
return None
if not is_done:
return False
return True
def record_batch(self):
"""
@summary: 记录批次信息(初始化)
---------
---------
@result:
"""
# 查询总任务数
sql = "select count(1) from %s%s" % (
self._task_table,
self._task_condition_prefix_where,
)
total_task_count = self._mysqldb.find(sql)[0][0]
batch_date = tools.get_current_date(self._date_format)
sql = (
"insert into %s (batch_date, done_count, total_count, `interval`, interval_unit, create_time) values ('%s', %s, %s, %s, '%s', CURRENT_TIME)"
% (
self._batch_record_table,
batch_date,
0,
total_task_count,
self._batch_interval
if self._batch_interval >= 1
else self._batch_interval * 24,
"day" if self._batch_interval >= 1 else "hour",
)
)
affect_count = self._mysqldb.add(sql) # None / 0 / 1 (1 为成功)
if affect_count:
# 重置批次日期
self._batch_date_cache = batch_date
# 重新刷下self.batch_date 中的 os.environ.get('batch_date') 否则日期还停留在上一个批次
os.environ["batch_date"] = self._batch_date_cache
# 爬虫开始
self.spider_begin()
self.record_spider_state(
spider_type=2,
state=0,
batch_date=batch_date,
spider_start_time=tools.get_current_date(),
batch_interval=self._batch_interval,
)
else:
log.error("插入新批次失败")
return affect_count
# -------- 批次结束逻辑 ------------
def task_is_done(self):
"""
@summary: 检查任务状态 是否做完 同时更新批次时间 (不能挂 挂了批次时间就不更新了)
---------
---------
@result: True / False (做完 / 未做完)
"""
is_done = False
# 查看批次记录表任务状态
sql = 'select date_format(batch_date, "{date_format}"), total_count, done_count, is_done from {batch_record_table} order by id desc limit 1'.format(
date_format=self._date_format.replace(":%M", ":%i"),
batch_record_table=self._batch_record_table,
)
batch_info = self._mysqldb.find(sql)
if batch_info is None:
raise Exception("查询批次信息失败")
if batch_info:
self._batch_date_cache, total_count, done_count, is_done = batch_info[
0
] # 更新self._batch_date_cache, 防止新批次已经开始了,但self._batch_date_cache还是原来的批次时间
log.info(
"《%s》 批次时间%s 批次进度 %s/%s 完成状态 %d"
% (
self._batch_name,
self._batch_date_cache,
done_count,
total_count,
is_done,
)
)
os.environ["batch_date"] = self._batch_date_cache # 更新BatchParser里边的批次时间
if is_done: # 检查任务表中是否有没做的任务 若有则is_done 为 False
# 比较耗时 加锁防止多进程同时查询
with RedisLock(
key=self._spider_name,
timeout=3600,
wait_timeout=0,
redis_cli=RedisDB().get_redis_obj(),
) as lock:
if lock.locked:
log.info("批次表标记已完成,正在检查任务表是否有未完成的任务")
sql = "select 1 from %s where (%s = 0 or %s=2)%s limit 1" % (
self._task_table,
self._task_state,
self._task_state,
self._task_condition_prefix_and,
)
tasks = self._mysqldb.find(sql) # [(1,)] / []
if tasks:
log.info("检测到任务表中有未完成任务,等待任务下发")
is_done = False
# 更新batch_record 表的is_done 状态,减少查询任务表的次数
sql = 'update {batch_record_table} set is_done = 0 where batch_date = "{batch_date}"'.format(
batch_record_table=self._batch_record_table,
batch_date=self._batch_date_cache,
)
self._mysqldb.update(sql)
else:
log.info("任务表中任务均已完成,爬虫结束")
else:
log.info("批次表标记已完成,其他爬虫进程正在检查任务表是否有未完成的任务,本进程跳过检查,继续等待")
is_done = False
return is_done
def run(self):
"""
@summary: 重写run方法 检查mysql中的任务是否做完, 做完停止
---------
---------
@result:
"""
try:
self.create_batch_record_table()
if not self._parsers: # 不是add_parser 模式
self._parsers.append(self)
self._start()
while True:
if (
self.task_is_done() and self.all_thread_is_done()
): # redis全部的任务已经做完 并且mysql中的任务已经做完(检查各个线程all_thread_is_done,防止任务没做完,就更新任务状态,导致程序结束的情况)
if not self._is_notify_end:
self.spider_end()
self.record_spider_state(
spider_type=2,
state=1,
batch_date=self._batch_date_cache,
spider_end_time=tools.get_current_date(),
batch_interval=self._batch_interval,
)
self._is_notify_end = True
if self._auto_stop_when_spider_done:
self._stop_all_thread()
break
else:
self._is_notify_end = False
self.check_task_status()
tools.delay_time(10) # 10秒钟检查一次爬虫状态
except Exception as e:
msg = "《%s》主线程异常 爬虫结束 exception: %s" % (self._batch_name, e)
log.error(msg)
self.send_msg(msg, level="error")
os._exit(137) # 使退出码为35072 方便爬虫管理器重启
@classmethod
def to_DebugBatchSpider(cls, *args, **kwargs):
# DebugBatchSpider 继承 cls
DebugBatchSpider.__bases__ = (cls,)
DebugBatchSpider.__name__ = cls.__name__
return DebugBatchSpider(*args, **kwargs)
class DebugBatchSpider(BatchSpider):
"""
Debug批次爬虫
"""
__debug_custom_setting__ = dict(
COLLECTOR_SLEEP_TIME=1,
COLLECTOR_TASK_COUNT=1,
# SPIDER
SPIDER_THREAD_COUNT=1,
SPIDER_SLEEP_TIME=0,
SPIDER_TASK_COUNT=1,
SPIDER_MAX_RETRY_TIMES=10,
REQUEST_TIME_OUT=600, # 10分钟
PROXY_ENABLE=False,
RETRY_FAILED_REQUESTS=False,
# 保存失败的request
SAVE_FAILED_REQUEST=False,
# 过滤
ITEM_FILTER_ENABLE=False,
REQUEST_FILTER_ENABLE=False,
OSS_UPLOAD_TABLES=(),
DELETE_KEYS=True,
ITEM_PIPELINES=[CONSOLE_PIPELINE_PATH],
)
def __init__(
self,
task_id=None,
task=None,
save_to_db=False,
update_stask=False,
*args,
**kwargs
):
"""
@param task_id: 任务id
@param task: 任务 task 与 task_id 二者选一即可
@param save_to_db: 数据是否入库 默认否
@param update_stask: 是否更新任务 默认否
@param args:
@param kwargs:
"""
warnings.warn(
"您正处于debug模式下,该模式下不会更新任务状态及数据入库,仅用于调试。正式发布前请更改为正常模式", category=Warning
)
if not task and not task_id:
raise Exception("task_id 与 task 不能同时为null")
kwargs["redis_key"] = kwargs["redis_key"] + "_debug"
if save_to_db and not self.__class__.__custom_setting__.get("ITEM_PIPELINES"):
self.__class__.__debug_custom_setting__.update(
ITEM_PIPELINES=[MYSQL_PIPELINE_PATH]
)
self.__class__.__custom_setting__.update(
self.__class__.__debug_custom_setting__
)
super(DebugBatchSpider, self).__init__(*args, **kwargs)
self._task_id = task_id
self._task = task
self._update_task = update_stask
def start_monitor_task(self):
"""
@summary: 监控任务状态
---------
---------
@result:
"""
if not self._parsers: # 不是多模版模式, 将自己注入到parsers,自己为模版
self._is_more_parsers = False
self._parsers.append(self)
elif len(self._parsers) <= 1:
self._is_more_parsers = False
if self._task:
self.distribute_task([self._task])
else:
tasks = self.get_todo_task_from_mysql()
if not tasks:
raise Exception("未获取到任务 请检查 task_id: {} 是否存在".format(self._task_id))
self.distribute_task(tasks)
os.environ.setdefault("batch_date", "1970-00-00")
log.debug("下发任务完毕")
def get_todo_task_from_mysql(self):
"""
@summary: 取待做的任务
---------
---------
@result:
"""
# 查询任务
sql = "select %s from %s where id=%s" % (
", ".join(self._task_keys),
self._task_table,
self._task_id,
)
tasks = self._mysqldb.find(sql)
return tasks
def save_cached(self, request, response, table):
pass
def update_task_state(self, task_id, state=1, *args, **kwargs):
"""
@summary: 更新任务表中任务状态,做完每个任务时代码逻辑中要主动调用。可能会重写
调用方法为 yield lambda : self.update_task_state(task_id, state)
---------
@param task_id:
@param state:
---------
@result:
"""
if self._update_task:
kwargs["id"] = task_id
kwargs[self._task_state] = state
sql = tools.make_update_sql(
self._task_table,
kwargs,
condition="id = {task_id}".format(task_id=task_id),
)
if self._mysqldb.update(sql):
log.debug("置任务%s状态成功" % task_id)
else:
log.error("置任务%s状态失败 sql=%s" % (task_id, sql))
def update_task_batch(self, task_id, state=1, *args, **kwargs):
"""
批量更新任务 多处调用,更新的字段必须一致
注意:需要 写成 yield update_task_batch(...) 否则不会更新
@param task_id:
@param state:
@param kwargs:
@return:
"""
if self._update_task:
kwargs["id"] = task_id
kwargs[self._task_state] = state
update_item = UpdateItem(**kwargs)
update_item.table_name = self._task_table
update_item.name_underline = self._task_table + "_item"
return update_item
def delete_tables(self, delete_tables_list):
if isinstance(delete_tables_list, bool):
delete_tables_list = [self._redis_key + "*"]
elif not isinstance(delete_tables_list, (list, tuple)):
delete_tables_list = [delete_tables_list]
redis = RedisDB()
for delete_tab in delete_tables_list:
if delete_tab == "*":
delete_tab = self._redis_key + "*"
tables = redis.getkeys(delete_tab)
for table in tables:
log.info("正在删除表 %s" % table)
redis.clear(table)
def run(self):
self.start_monitor_task()
if not self._parsers: # 不是add_parser 模式
self._parsers.append(self)
self._start()
while True:
if self.all_thread_is_done():
self._stop_all_thread()
break
tools.delay_time(1) # 1秒钟检查一次爬虫状态
self.delete_tables([self._redis_key + "*"])
def record_spider_state(
self,
spider_type,
state,
batch_date=None,
spider_start_time=None,
spider_end_time=None,
batch_interval=None,
):
pass
``` |
{
"source": "jiyeking/js2dts",
"score": 2
} |
#### File: jiyeking/js2dts/merge_d_ts_files.py
```python
import os
def handle_file(path):
if os.path.isdir(path):
dir_files = os.listdir(path)
for dir_file in dir_files:
handle_file(os.path.join(path, dir_file))
else:
handle_d_ts(path)
new_lines = []
# only retain declare class content, delete other content
def handle_d_ts(d_ts_file):
if d_ts_file.endswith('.d.ts'):
d_ts_file_obj = open(d_ts_file, mode='r')
for line in d_ts_file_obj.readlines():
if line.startswith('declare class') > -1:
new_lines.append(line.replace('declare class', 'export declare class'))
elif line.find('declare const _') > -1:
break
elif line.find('export = ') > -1:
pass
else:
new_lines.append(line)
d_ts_file_obj.close()
write_lines = []
def delete_repeat_content(new_lines):
class_list = []
class_dict = {}
current_class_name = ''
left_brace_num = 0
for line_str in new_lines:
origin_line_str = line_str
line_str = line_str.strip()
if line_str.startswith('/**') | line_str.startswith('*') | line_str.startswith('**/'):
class_list.append(origin_line_str)
elif line_str.endswith('{'):
left_brace_num = left_brace_num + 1
class_list.append(origin_line_str)
elif line_str.endswith('}'):
left_brace_num = left_brace_num - 1
class_list.append(origin_line_str)
if left_brace_num == 0:
# save to class dict - delete repeat content
if class_dict.get(current_class_name):
pass
else:
class_dict[current_class_name] = class_list[:]
class_list = []
elif left_brace_num > 0:
class_list.append(origin_line_str)
if line_str.startswith('export declare class'):
current_class_name = line_str
for class_dict_k in class_dict.keys():
for write_line_str in class_dict[class_dict_k]:
write_lines.append(write_line_str)
# write to index.d.ts file
def write_to_index_d_ts(dist_dir):
delete_repeat_content(new_lines)
fo = open(os.path.join(dist_dir, "index.d.ts"), "w")
fo.writelines(write_lines)
fo.close()
dir_name = input('please input d.ts files dir: ')
dist_dir_name = input('please input dist merged index.d.ts dir: ')
if dist_dir_name:
pass
else:
dist_dir_name = os.getcwd()
handle_file(os.path.abspath(dir_name))
write_to_index_d_ts(dist_dir_name)
``` |
{
"source": "jiyeme/proxy_pool",
"score": 2
} |
#### File: proxy_pool/helper/proxy.py
```python
__author__ = 'JHao'
import json
class Proxy(object):
def __init__(self, proxy, fail_count=0, region="", proxy_type="",
source="", check_count=0, last_status="", last_time="", proxy_tag=[]):
self._proxy = proxy
self._fail_count = fail_count
self._region = region
self._type = proxy_type
self._tag = proxy_tag
self._source = source
self._check_count = check_count
self._last_status = last_status
self._last_time = last_time
@classmethod
def createFromJson(cls, proxy_json):
"""
根据proxy属性json创建Proxy实例
:param proxy_json:
:return:
"""
proxy_dict = json.loads(proxy_json)
return cls(proxy=proxy_dict.get("proxy", ""),
fail_count=proxy_dict.get("fail_count", 0),
region=proxy_dict.get("region", ""),
proxy_type=proxy_dict.get("type", ""),
proxy_tag=proxy_dict.get("tag", ""),
source=proxy_dict.get("source", ""),
check_count=proxy_dict.get("check_count", 0),
last_status=proxy_dict.get("last_status", ""),
last_time=proxy_dict.get("last_time", "")
)
@property
def proxy(self):
""" 代理 ip:port """
return self._proxy
@property
def fail_count(self):
""" 检测失败次数 """
return self._fail_count
@property
def region(self):
""" 地理位置(国家/城市) """
return self._region
@property
def type(self):
""" 透明/匿名/高匿 """
return self._type
@property
def tag(self):
""" 标签 """
return self._tag
@property
def source(self):
""" 代理来源 """
return self._source
@property
def check_count(self):
""" 代理检测次数 """
return self._check_count
@property
def last_status(self):
""" 最后一次检测结果 1 -> 可用; 0 -> 不可用"""
return self._last_status
@property
def last_time(self):
""" 最后一次检测时间 """
return self._last_time
@property
def to_dict(self):
""" 属性字典 """
return {"proxy": self._proxy,
"fail_count": self._fail_count,
"region": self._region,
"type": self._type,
"tag": self._tag,
"source": self._source,
"check_count": self.check_count,
"last_status": self.last_status,
"last_time": self.last_time}
@property
def to_json(self):
""" 属性json格式 """
return json.dumps(self.to_dict, ensure_ascii=False)
# --- proxy method ---
@fail_count.setter
def fail_count(self, value):
self._fail_count = value
@region.setter
def region(self, value):
self._region = value
@type.setter
def type(self, value):
self._type = value
@tag.setter
def tag(self, value):
self._tag = value
@source.setter
def source(self, value):
self._source = value
@check_count.setter
def check_count(self, value):
self._check_count = value
@last_status.setter
def last_status(self, value):
self._last_status = value
@last_time.setter
def last_time(self, value):
self._last_time = value
``` |
{
"source": "jiye-ML/CoCosNet",
"score": 2
} |
#### File: CoCosNet/data/celebahq_dataset.py
```python
import os
import torch
import numpy as np
from PIL import Image
from data.pix2pix_dataset import Pix2pixDataset
from data.base_dataset import get_params, get_transform
class CelebAHQDataset(Pix2pixDataset):
#hair, skin, l_brow, r_blow, l_eye, r_eye, l_ear, r_ear, nose, u_lip, mouth, l_lip, neck,
#cloth, hat, eye_g, ear_r, neck_l
@staticmethod
def modify_commandline_options(parser, is_train):
parser = Pix2pixDataset.modify_commandline_options(parser, is_train)
parser.set_defaults(preprocess_mode='resize_and_crop')
parser.set_defaults(no_pairing_check=True)
if is_train:
parser.set_defaults(load_size=286)
else:
parser.set_defaults(load_size=256)
parser.set_defaults(crop_size=256)
parser.set_defaults(display_winsize=256)
parser.set_defaults(label_nc=19)
parser.set_defaults(contain_dontcare_label=False)
parser.set_defaults(cache_filelist_read=False)
parser.set_defaults(cache_filelist_write=False)
return parser
def get_paths(self, opt):
if opt.phase == 'train':
fd = open(os.path.join(opt.dataroot, 'train.txt'))
lines = fd.readlines()
fd.close()
elif opt.phase == 'test':
fd = open(os.path.join(opt.dataroot, 'val.txt'))
lines = fd.readlines()
fd.close()
image_paths = []
label_paths = []
for i in range(len(lines)):
image_paths.append(os.path.join(opt.dataroot, 'CelebA-HQ-img', lines[i].strip() + '.jpg'))
label_paths.append(os.path.join(opt.dataroot, 'CelebAMask-HQ-mask-anno', 'all_parts_except_glasses', lines[i].strip().zfill(5) + '.png'))
return label_paths, image_paths
def get_ref(self, opt):
extra = ''
if opt.phase == 'test':
extra = '_test'
with open('./data/celebahq_ref{}.txt'.format(extra)) as fd:
lines = fd.readlines()
ref_dict = {}
for i in range(len(lines)):
items = lines[i].strip().split(',')
key = items[0]
if opt.phase == 'test':
val = items[1:]
else:
val = [items[1], items[-1]]
ref_dict[key] = val
train_test_folder = ('', '')
return ref_dict, train_test_folder
def get_label_tensor(self, path):
# parts = ['skin', 'hair', 'l_brow', 'r_brow', 'l_eye', 'r_eye', 'l_ear', 'r_ear', 'nose', 'u_lip', 'mouth', 'l_lip', 'neck',
# 'cloth', 'hat', 'eye_g', 'ear_r', 'neck_l']
label_except_glasses = Image.open(path).convert('L')
root, name = path.replace('\\', '/').split('all_parts_except_glasses/')
idx = name.split('.')[0]
subfolder = str(int(idx) // 2000)
if os.path.exists(os.path.join(root, subfolder, idx + '_eye_g.png')):
glasses = Image.open(os.path.join(root, subfolder, idx + '_eye_g.png')).convert('L')
else:
glasses = Image.fromarray(np.zeros(label_except_glasses.size, dtype=np.uint8))
params = get_params(self.opt, label_except_glasses.size)
transform_label = get_transform(self.opt, params, method=Image.NEAREST, normalize=False)
label_except_glasses_tensor = transform_label(label_except_glasses) * 255.0
glasses_tensor = transform_label(glasses)
label_tensor = torch.cat((label_except_glasses_tensor, glasses_tensor), dim=0)
return label_tensor, params
def imgpath_to_labelpath(self, path):
root, name = path.split('CelebA-HQ-img/')
label_path = os.path.join(root, 'CelebAMask-HQ-mask-anno', 'all_parts_except_glasses', name.split('.')[0].zfill(5) + '.png')
return label_path
```
#### File: CoCosNet/data/celebahqedge_dataset.py
```python
import os
import cv2
import torch
import numpy as np
from PIL import Image
from skimage import feature
from data.pix2pix_dataset import Pix2pixDataset
from data.base_dataset import get_params, get_transform
class CelebAHQEdgeDataset(Pix2pixDataset):
#hair, skin, l_brow, r_blow, l_eye, r_eye, l_ear, r_ear, nose, u_lip, mouth, l_lip, neck,
@staticmethod
def modify_commandline_options(parser, is_train):
parser = Pix2pixDataset.modify_commandline_options(parser, is_train)
parser.set_defaults(preprocess_mode='resize_and_crop')
parser.set_defaults(no_pairing_check=True)
if is_train:
parser.set_defaults(load_size=286)
else:
parser.set_defaults(load_size=256)
parser.set_defaults(crop_size=256)
parser.set_defaults(display_winsize=256)
parser.set_defaults(label_nc=15)
parser.set_defaults(contain_dontcare_label=False)
parser.set_defaults(cache_filelist_read=False)
parser.set_defaults(cache_filelist_write=False)
return parser
def get_paths(self, opt):
if opt.phase == 'train':
fd = open(os.path.join(opt.dataroot, 'train.txt'))
lines = fd.readlines()
fd.close()
elif opt.phase == 'test':
fd = open(os.path.join(opt.dataroot, 'val.txt'))
lines = fd.readlines()
fd.close()
image_paths = []
label_paths = []
for i in range(len(lines)):
image_paths.append(os.path.join(opt.dataroot, 'CelebA-HQ-img', lines[i].strip() + '.jpg'))
subfolder = str(int(lines[i].strip()) // 2000)
label_paths.append(os.path.join(opt.dataroot, 'CelebAMask-HQ-mask-anno', subfolder, lines[i].strip().zfill(5) + '_{}.png'))
return label_paths, image_paths
def get_ref(self, opt):
extra = ''
if opt.phase == 'test':
extra = '_test'
with open('./data/celebahq_ref{}.txt'.format(extra)) as fd:
lines = fd.readlines()
ref_dict = {}
for i in range(len(lines)):
items = lines[i].strip().split(',')
key = items[0]
if opt.phase == 'test':
val = items[1:]
else:
val = [items[1], items[-1]]
ref_dict[key] = val
train_test_folder = ('', '')
return ref_dict, train_test_folder
def get_edges(self, edge, t):
edge[:,1:] = edge[:,1:] | (t[:,1:] != t[:,:-1])
edge[:,:-1] = edge[:,:-1] | (t[:,1:] != t[:,:-1])
edge[1:,:] = edge[1:,:] | (t[1:,:] != t[:-1,:])
edge[:-1,:] = edge[:-1,:] | (t[1:,:] != t[:-1,:])
return edge
def get_label_tensor(self, path):
inner_parts = ['skin', 'l_brow', 'r_brow', 'l_eye', 'r_eye', 'l_ear', 'r_ear', 'nose', 'u_lip', 'mouth', 'l_lip', 'eye_g', 'hair']
img_path = self.labelpath_to_imgpath(path)
img = Image.open(img_path).resize((self.opt.load_size, self.opt.load_size), resample=Image.BILINEAR)
params = get_params(self.opt, img.size)
transform_label = get_transform(self.opt, params, method=Image.NEAREST, normalize=False)
transform_img = get_transform(self.opt, params, method=Image.BILINEAR, normalize=False)
inner_label = np.ones(img.size, dtype=np.uint8)
edges = np.zeros(img.size, dtype=np.uint8)
tensors_dist = 0
e = 1
for part in inner_parts:
edge = np.zeros(img.size, dtype=np.uint8) #this for distance transform map on each facial part
if os.path.exists(path.format(part)):
part_label = Image.open(path.format(part)).convert('L').resize((self.opt.load_size, self.opt.load_size), resample=Image.NEAREST)
part_label = np.array(part_label)
if part == 'hair':
inner_label[part_label == 255] = 1
else:
inner_label[part_label == 255] = 0
edges = self.get_edges(edges, part_label)
edge = self.get_edges(edge, part_label)
im_dist = cv2.distanceTransform(255-edge*255, cv2.DIST_L1, 3)
im_dist = np.clip((im_dist / 3), 0, 255).astype(np.uint8)
tensor_dist = transform_img(Image.fromarray(im_dist))
tensors_dist = tensor_dist if e == 1 else torch.cat([tensors_dist, tensor_dist])
e += 1
# canny edge for background
canny_edges = feature.canny(np.array(img.convert('L')))
canny_edges = canny_edges * inner_label
edges_all = edges + canny_edges
edges_all[edges_all > 1] = 1
tensor_edges_all = transform_label(Image.fromarray(edges_all * 255))
edges[edges > 1] = 1
tensor_edges = transform_label(Image.fromarray(edges * 255))
label_tensor = torch.cat((tensor_edges_all, tensors_dist, tensor_edges), dim=0)
return label_tensor, params
def imgpath_to_labelpath(self, path):
root, name = path.split('CelebA-HQ-img/')
subfolder = str(int(name.split('.')[0]) // 2000)
label_path = os.path.join(root, 'CelebAMask-HQ-mask-anno', subfolder, name.split('.')[0].zfill(5) + '_{}.png')
return label_path
def labelpath_to_imgpath(self, path):
root= path.replace('\\', '/').split('CelebAMask-HQ-mask-anno/')[0]
name = os.path.basename(path).split('_')[0]
img_path = os.path.join(root, 'CelebA-HQ-img', str(int(name)) + '.jpg')
return img_path
# In ADE20k, 'unknown' label is of value 0.
# Change the 'unknown' label to the last label to match other datasets.
# def postprocess(self, input_dict):
# label = input_dict['label']
# label = label - 1
# label[label == -1] = self.opt.label_nc
# input_dict['label'] = label
# if input_dict['label_ref'] is not None:
# label_ref = input_dict['label_ref']
# label_ref = label_ref - 1
# label_ref[label_ref == -1] = self.opt.label_nc
# input_dict['label_ref'] = label_ref
```
#### File: CoCosNet/data/flickr_dataset.py
```python
import os
from data.pix2pix_dataset import Pix2pixDataset
class FlickrDataset(Pix2pixDataset):
@staticmethod
def modify_commandline_options(parser, is_train):
parser = Pix2pixDataset.modify_commandline_options(parser, is_train)
parser.set_defaults(preprocess_mode='resize_and_crop')
if is_train:
parser.set_defaults(load_size=286)
else:
parser.set_defaults(load_size=256)
parser.set_defaults(crop_size=256)
parser.set_defaults(display_winsize=256)
parser.set_defaults(label_nc=150)
parser.set_defaults(contain_dontcare_label=True)
parser.set_defaults(cache_filelist_read=False)
parser.set_defaults(cache_filelist_write=False)
return parser
def get_paths(self, opt):
root = os.path.join(opt.dataroot, 'test/images') if opt.phase == 'test' else os.path.join(opt.dataroot, 'images')
root_mask = root.replace('images', 'mask')
image_paths = sorted(os.listdir(root))
image_paths = [os.path.join(root, it) for it in image_paths]
label_paths = sorted(os.listdir(root_mask))
label_paths = [os.path.join(root_mask, it) for it in label_paths]
return label_paths, image_paths
def get_ref(self, opt):
extra = '_test_from_train' if opt.phase == 'test' else ''
with open('./data/flickr_ref{}.txt'.format(extra)) as fd:
lines = fd.readlines()
ref_dict = {}
for i in range(len(lines)):
items = lines[i].strip().split(',')
key = items[0]
if opt.phase == 'test':
val = items[1:]
else:
val = [items[1], items[-1]]
ref_dict[key] = val
train_test_folder = ('', 'test')
return ref_dict, train_test_folder
def imgpath_to_labelpath(self, path):
path_ref_label = path.replace('images', 'mask')
return path_ref_label
# In ADE20k, 'unknown' label is of value 0.
# Change the 'unknown' label to the last label to match other datasets.
# def postprocess(self, input_dict):
# label = input_dict['label']
# label = label - 1
# label[label == -1] = self.opt.label_nc
# input_dict['label'] = label
# if input_dict['label_ref'] is not None:
# label_ref = input_dict['label_ref']
# label_ref = label_ref - 1
# label_ref[label_ref == -1] = self.opt.label_nc
# input_dict['label_ref'] = label_ref
```
#### File: models/networks/generator.py
```python
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function
from models.networks.base_network import BaseNetwork
from models.networks.normalization import get_nonspade_norm_layer, equal_lr
from models.networks.architecture import ResnetBlock as ResnetBlock
from models.networks.architecture import SPADEResnetBlock as SPADEResnetBlock
from models.networks.architecture import Attention
from models.networks.sync_batchnorm import SynchronizedBatchNorm2d, SynchronizedBatchNorm1d
class SPADEGenerator(BaseNetwork):
@staticmethod
def modify_commandline_options(parser, is_train):
parser.set_defaults(norm_G='spectralspadesyncbatch3x3')
return parser
def __init__(self, opt):
super().__init__()
self.opt = opt
nf = opt.ngf
self.sw, self.sh = self.compute_latent_vector_size(opt)
ic = 0 + (3 if 'warp' in self.opt.CBN_intype else 0) + (
self.opt.semantic_nc if 'mask' in self.opt.CBN_intype else 0)
self.fc = nn.Conv2d(ic, 16 * nf, 3, padding=1)
if opt.eqlr_sn:
self.fc = equal_lr(self.fc)
self.head_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.G_middle_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.G_middle_1 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.up_0 = SPADEResnetBlock(16 * nf, 8 * nf, opt)
self.up_1 = SPADEResnetBlock(8 * nf, 4 * nf, opt)
if opt.use_attention:
self.attn = Attention(4 * nf, 'spectral' in opt.norm_G)
self.up_2 = SPADEResnetBlock(4 * nf, 2 * nf, opt)
self.up_3 = SPADEResnetBlock(2 * nf, 1 * nf, opt)
final_nc = nf
self.conv_img = nn.Conv2d(final_nc, 3, 3, padding=1)
self.up = nn.Upsample(scale_factor=2)
def compute_latent_vector_size(self, opt):
num_up_layers = 5
sw = opt.crop_size // (2 ** num_up_layers)
sh = round(sw / opt.aspect_ratio)
return sw, sh
def forward(self, input, warp_out=None):
seg = input if warp_out is None else warp_out
# we downsample segmap and run convolution
x = F.interpolate(seg, size=(self.sh, self.sw))
x = self.fc(x)
x = self.head_0(x, seg)
x = self.up(x)
x = self.G_middle_0(x, seg)
x = self.G_middle_1(x, seg)
x = self.up(x)
x = self.up_0(x, seg)
x = self.up(x)
x = self.up_1(x, seg)
x = self.up(x)
if self.opt.use_attention:
x = self.attn(x)
x = self.up_2(x, seg)
x = self.up(x)
x = self.up_3(x, seg)
x = self.conv_img(F.leaky_relu(x, 2e-1))
x = F.tanh(x)
return x
class AdaptiveFeatureGenerator(BaseNetwork):
@staticmethod
def modify_commandline_options(parser, is_train):
parser.set_defaults(norm_G='spectralspadesyncbatch3x3')
parser.add_argument('--num_upsampling_layers',
choices=('normal', 'more', 'most'), default='normal',
help="If 'more', adds upsampling layer between the two middle resnet blocks. If 'most', also add one more upsampling + resnet layer at the end of the generator")
return parser
def __init__(self, opt):
# TODO: kernel=4, concat noise, or change architecture to vgg feature pyramid
super().__init__()
self.opt = opt
kw = 3
pw = int(np.ceil((kw - 1.0) / 2))
ndf = opt.ngf
norm_layer = get_nonspade_norm_layer(opt, opt.norm_E)
self.layer1 = norm_layer(nn.Conv2d(opt.spade_ic, ndf, kw, stride=1, padding=pw))
self.layer2 = norm_layer(nn.Conv2d(ndf * 1, ndf * 2, opt.adaptor_kernel, stride=2, padding=pw))
self.layer3 = norm_layer(nn.Conv2d(ndf * 2, ndf * 4, kw, stride=1, padding=pw))
if opt.warp_stride == 2:
self.layer4 = norm_layer(nn.Conv2d(ndf * 4, ndf * 8, kw, stride=1, padding=pw))
else:
self.layer4 = norm_layer(nn.Conv2d(ndf * 4, ndf * 8, opt.adaptor_kernel, stride=2, padding=pw))
self.layer5 = norm_layer(nn.Conv2d(ndf * 8, ndf * 8, kw, stride=1, padding=pw))
self.actvn = nn.LeakyReLU(0.2, False)
self.opt = opt
nf = opt.ngf
self.head_0 = SPADEResnetBlock(8 * nf, 8 * nf, opt, use_se=opt.adaptor_se)
if opt.adaptor_nonlocal:
self.attn = Attention(8 * nf, False)
self.G_middle_0 = SPADEResnetBlock(8 * nf, 8 * nf, opt, use_se=opt.adaptor_se)
self.G_middle_1 = SPADEResnetBlock(8 * nf, 4 * nf, opt, use_se=opt.adaptor_se)
if opt.adaptor_res_deeper:
self.deeper0 = SPADEResnetBlock(4 * nf, 4 * nf, opt)
if opt.dilation_conv:
self.deeper1 = SPADEResnetBlock(4 * nf, 4 * nf, opt, dilation=2)
self.deeper2 = SPADEResnetBlock(4 * nf, 4 * nf, opt, dilation=4)
self.degridding0 = norm_layer(nn.Conv2d(ndf * 4, ndf * 4, 3, stride=1, padding=2, dilation=2))
self.degridding1 = norm_layer(nn.Conv2d(ndf * 4, ndf * 4, 3, stride=1, padding=1))
else:
self.deeper1 = SPADEResnetBlock(4 * nf, 4 * nf, opt)
self.deeper2 = SPADEResnetBlock(4 * nf, 4 * nf, opt)
def forward(self, input, seg):
x = self.layer1(input)
x = self.layer2(self.actvn(x))
x = self.layer3(self.actvn(x))
x = self.layer4(self.actvn(x))
x = self.layer5(self.actvn(x))
x = self.head_0(x, seg)
if self.opt.adaptor_nonlocal:
x = self.attn(x)
x = self.G_middle_0(x, seg)
x = self.G_middle_1(x, seg)
if self.opt.adaptor_res_deeper:
x = self.deeper0(x, seg)
x = self.deeper1(x, seg)
x = self.deeper2(x, seg)
if self.opt.dilation_conv:
x = self.degridding0(x)
x = self.degridding1(x)
return x
class ReverseGenerator(BaseNetwork):
def __init__(self, opt, ic, oc, size):
super().__init__()
self.opt = opt
self.downsample = True if size == 256 else False
nf = opt.ngf
opt.spade_ic = ic
if opt.warp_reverseG_s:
self.backbone_0 = SPADEResnetBlock(4 * nf, 4 * nf, opt)
else:
self.backbone_0 = SPADEResnetBlock(4 * nf, 8 * nf, opt)
self.backbone_1 = SPADEResnetBlock(8 * nf, 8 * nf, opt)
self.backbone_2 = SPADEResnetBlock(8 * nf, 8 * nf, opt)
self.backbone_3 = SPADEResnetBlock(8 * nf, 4 * nf, opt)
self.backbone_4 = SPADEResnetBlock(4 * nf, 2 * nf, opt)
self.backbone_5 = SPADEResnetBlock(2 * nf, nf, opt)
del opt.spade_ic
if self.downsample:
kw = 3
pw = int(np.ceil((kw - 1.0) / 2))
ndf = opt.ngf
norm_layer = get_nonspade_norm_layer(opt, opt.norm_E)
self.layer1 = norm_layer(nn.Conv2d(ic, ndf, kw, stride=1, padding=pw))
self.layer2 = norm_layer(nn.Conv2d(ndf * 1, ndf * 2, 4, stride=2, padding=pw))
self.layer3 = norm_layer(nn.Conv2d(ndf * 2, ndf * 4, kw, stride=1, padding=pw))
self.layer4 = norm_layer(nn.Conv2d(ndf * 4, ndf * 4, 4, stride=2, padding=pw))
self.up = nn.Upsample(scale_factor=2)
self.actvn = nn.LeakyReLU(0.2, False)
self.conv_img = nn.Conv2d(nf, oc, 3, padding=1)
def forward(self, x):
input = x
if self.downsample:
x = self.layer1(input)
x = self.layer2(self.actvn(x))
x = self.layer3(self.actvn(x))
x = self.layer4(self.actvn(x))
x = self.backbone_0(x, input)
if not self.opt.warp_reverseG_s:
x = self.backbone_1(x, input)
x = self.backbone_2(x, input)
x = self.backbone_3(x, input)
if self.downsample:
x = self.up(x)
x = self.backbone_4(x, input)
if self.downsample:
x = self.up(x)
x = self.backbone_5(x, input)
x = self.conv_img(F.leaky_relu(x, 2e-1))
x = F.tanh(x)
return x
class DomainClassifier(BaseNetwork):
def __init__(self, opt):
super().__init__()
nf = opt.ngf
kw = 4 if opt.domain_rela else 3
pw = int((kw - 1.0) / 2)
self.feature = nn.Sequential(nn.Conv2d(4 * nf, 2 * nf, kw, stride=2, padding=pw),
SynchronizedBatchNorm2d(2 * nf, affine=True),
nn.LeakyReLU(0.2, False),
nn.Conv2d(2 * nf, nf, kw, stride=2, padding=pw),
SynchronizedBatchNorm2d(nf, affine=True),
nn.LeakyReLU(0.2, False),
nn.Conv2d(nf, int(nf // 2), kw, stride=2, padding=pw),
SynchronizedBatchNorm2d(int(nf // 2), affine=True),
nn.LeakyReLU(0.2, False)) # 32*8*8
model = [nn.Linear(int(nf // 2) * 8 * 8, 100),
SynchronizedBatchNorm1d(100, affine=True),
nn.ReLU()]
if opt.domain_rela:
model += [nn.Linear(100, 1)]
else:
model += [nn.Linear(100, 2),
nn.LogSoftmax(dim=1)]
self.classifier = nn.Sequential(*model)
def forward(self, x):
x = self.feature(x)
x = self.classifier(x.view(x.shape[0], -1))
return x
class ReverseLayerF(Function):
@staticmethod
def forward(ctx, x, alpha):
ctx.alpha = alpha
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
output = grad_output.neg() * ctx.alpha
return output, None
class EMA():
def __init__(self, mu):
self.mu = mu
self.shadow = {}
self.original = {}
def register(self, name, val):
self.shadow[name] = val.clone()
def __call__(self, model):
for name, param in model.named_parameters():
if param.requires_grad:
assert name in self.shadow
decay = self.mu
new_average = (1.0 - decay) * param.data + decay * self.shadow[name]
self.shadow[name] = new_average.clone()
def assign(self, model):
for name, param in model.named_parameters():
if param.requires_grad:
assert name in self.shadow
self.original[name] = param.data.clone()
param.data = self.shadow[name]
def resume(self, model):
for name, param in model.named_parameters():
if param.requires_grad:
assert name in self.shadow
param.data = self.original[name]
``` |
{
"source": "jiyeonjoo/cdms-cinder",
"score": 2
} |
#### File: drivers/huawei/huawei_dorado.py
```python
import re
from cinder.openstack.common import log as logging
from cinder.volume.drivers.huawei import huawei_t
from cinder.volume.drivers.huawei import ssh_common
LOG = logging.getLogger(__name__)
class HuaweiDoradoISCSIDriver(huawei_t.HuaweiTISCSIDriver):
"""ISCSI driver class for Huawei OceanStor Dorado storage arrays."""
def __init__(self, *args, **kwargs):
super(HuaweiDoradoISCSIDriver, self).__init__(*args, **kwargs)
def do_setup(self, context):
"""Instantiate common class."""
self.common = ssh_common.DoradoCommon(configuration=self.configuration)
self.common.do_setup(context)
self._assert_cli_out = self.common._assert_cli_out
self._assert_cli_operate_out = self.common._assert_cli_operate_out
class HuaweiDoradoFCDriver(huawei_t.HuaweiTFCDriver):
"""FC driver class for Huawei OceanStor Dorado storage arrays."""
def __init__(self, *args, **kwargs):
super(HuaweiDoradoFCDriver, self).__init__(*args, **kwargs)
def do_setup(self, context):
"""Instantiate common class."""
self.common = ssh_common.DoradoCommon(configuration=self.configuration)
self.common.do_setup(context)
self._assert_cli_out = self.common._assert_cli_out
self._assert_cli_operate_out = self.common._assert_cli_operate_out
def _get_host_port_details(self, hostid):
cli_cmd = 'showfcmode'
out = self.common._execute_cli(cli_cmd)
self._assert_cli_out(re.search('FC Port Topology Mode', out),
'_get_tgt_fc_port_wwns',
'Failed to get FC port WWNs.',
cli_cmd, out)
return [line.split()[3] for line in out.split('\r\n')[6:-2]]
def _get_tgt_fc_port_wwns(self, port_details):
return port_details
def initialize_connection(self, volume, connector):
"""Create FC connection between a volume and a host."""
LOG.debug(_('initialize_connection: volume name: %(vol)s '
'host: %(host)s initiator: %(wwn)s')
% {'vol': volume['name'],
'host': connector['host'],
'wwn': connector['wwpns']})
self.common._update_login_info()
# First, add a host if it is not added before.
host_id = self.common.add_host(connector['host'])
# Then, add free FC ports to the host.
ini_wwns = connector['wwpns']
free_wwns = self._get_connected_free_wwns()
for wwn in free_wwns:
if wwn in ini_wwns:
self._add_fc_port_to_host(host_id, wwn)
fc_port_details = self._get_host_port_details(host_id)
tgt_wwns = self._get_tgt_fc_port_wwns(fc_port_details)
LOG.debug(_('initialize_connection: Target FC ports WWNS: %s')
% tgt_wwns)
# Finally, map the volume to the host.
volume_id = volume['provider_location']
hostlun_id = self.common.map_volume(host_id, volume_id)
properties = {}
properties['target_discovered'] = False
properties['target_wwn'] = tgt_wwns
properties['target_lun'] = int(hostlun_id)
properties['volume_id'] = volume['id']
return {'driver_volume_type': 'fibre_channel',
'data': properties}
``` |
{
"source": "jiyeonkim127/im3d",
"score": 2
} |
#### File: ldif/inference/metrics.py
```python
import numpy as np
import scipy
# ldif is an internal package, and should be imported last.
# pylint: disable=g-bad-import-order
# pylint: enable=g-bad-import-order
OCCNET_FSCORE_EPS = 1e-09
def sample_points_and_face_normals(mesh, sample_count):
points, indices = mesh.sample(sample_count, return_index=True)
points = points.astype(np.float32)
normals = mesh.face_normals[indices]
return points, normals
def pointcloud_neighbor_distances_indices(source_points, target_points):
target_kdtree = scipy.spatial.cKDTree(target_points)
distances, indices = target_kdtree.query(source_points, n_jobs=-1)
return distances, indices
def dot_product(a, b):
if len(a.shape) != 2:
raise ValueError('Dot Product with input shape: %s' % repr(a.shape))
if len(b.shape) != 2:
raise ValueError('Dot Product with input shape: %s' % repr(b.shape))
return np.sum(a * b, axis=1)
def normal_consistency(mesh1, mesh2, sample_count=100000, return_points=False):
"""Computes the normal consistency metric between two meshes."""
points1, normals1 = sample_points_and_face_normals(mesh1, sample_count)
points2, normals2 = sample_points_and_face_normals(mesh2, sample_count)
_, indices12 = pointcloud_neighbor_distances_indices(points1, points2)
_, indices21 = pointcloud_neighbor_distances_indices(points2, points1)
normals12 = normals2[indices12]
normals21 = normals1[indices21]
# We take abs because the OccNet code takes abs...
nc12 = np.abs(dot_product(normals1, normals12))
nc21 = np.abs(dot_product(normals2, normals21))
nc = 0.5 * np.mean(nc12) + 0.5 * np.mean(nc21)
if return_points:
return nc, points1, points2
return nc
def percent_below(dists, thresh):
return np.mean((dists ** 2 <= thresh).astype(np.float32)) * 100.0
def f_score(a_to_b, b_to_a, thresh):
precision = percent_below(a_to_b, thresh)
recall = percent_below(b_to_a, thresh)
return (2 * precision * recall) / (precision + recall + OCCNET_FSCORE_EPS)
def fscore(mesh1,
mesh2,
sample_count=100000,
tau=1e-04,
points1=None,
points2=None):
"""Computes the F-Score at tau between two meshes."""
points1, points2 = get_points(mesh1, mesh2, points1, points2, sample_count)
dist12, _ = pointcloud_neighbor_distances_indices(points1, points2)
dist21, _ = pointcloud_neighbor_distances_indices(points2, points1)
f_score_tau = f_score(dist12, dist21, tau)
return f_score_tau
def mesh_chamfer_via_points(mesh1=None,
mesh2=None,
sample_count=100000,
points1=None,
points2=None):
points1, points2 = get_points(mesh1, mesh2, points1, points2, sample_count)
dist12, _ = pointcloud_neighbor_distances_indices(points1, points2)
dist21, _ = pointcloud_neighbor_distances_indices(points2, points1)
chamfer = 1000.0 * (np.mean(dist12 ** 2) + np.mean(dist21 ** 2))
return chamfer
def get_points(mesh1, mesh2, points1, points2, sample_count):
if points1 is not None or points2 is not None:
assert points1 is not None and points2 is not None
else:
points1, _ = sample_points_and_face_normals(mesh1, sample_count)
points2, _ = sample_points_and_face_normals(mesh2, sample_count)
return points1, points2
def all_mesh_metrics(mesh1, mesh2, sample_count=100000):
nc, points1, points2 = normal_consistency(
mesh1, mesh2, sample_count, return_points=True)
fs_tau = fscore(mesh1, mesh2, sample_count, 1e-04, points1, points2)
fs_2tau = fscore(mesh1, mesh2, sample_count, 2.0 * 1e-04, points1, points2)
chamfer = mesh_chamfer_via_points(mesh1, mesh2, sample_count, points1,
points2)
return {
'fscore_tau': fs_tau,
'fscore_2tau': fs_2tau,
'chamfer': chamfer,
'normal_consistency': nc
}
```
#### File: ldif/util/gaps_util.py
```python
import os
import numpy as np
# LDIF is an internal package, should be imported last.
# pylint: disable=g-bad-import-order
from . import file_util
from .file_util import log
# pylint: enable=g-bad-import-order
def read_pts_file(path):
"""Reads a .pts or a .sdf point samples file."""
_, ext = os.path.splitext(path)
assert ext in ['.sdf', '.pts']
l = 4 if ext == '.sdf' else 6
with file_util.open_file(path, 'rb') as f:
points = np.fromfile(f, dtype=np.float32)
points = np.reshape(points, [-1, l])
return points
```
#### File: models/ldif/training.py
```python
from models.training import BaseTrainer
import torch
class Trainer(BaseTrainer):
'''
Trainer object for total3d.
'''
def eval_step(self, data):
'''
performs a step in evaluation
:param data (dict): data dictionary
:return:
'''
loss = self.compute_loss(data)
loss['total'] = loss['total'].item()
return loss
def visualize_step(self, epoch, phase, iter, data):
''' Performs a visualization step.
'''
pass
def to_device(self, data):
device = self.device
ndata = {}
for k, v in data.items():
if type(v) is torch.Tensor and v.dtype is torch.float32:
ndata[k] = v.to(device)
else:
ndata[k] = v
return ndata
def compute_loss(self, data):
'''
compute the overall loss.
:param data (dict): data dictionary
:return:
'''
'''load input and ground-truth data'''
data = self.to_device(data)
'''network forwarding'''
est_data = self.net(data)
'''computer losses'''
loss = self.net.loss(est_data, data)
return loss
```
#### File: total3d/modules/mesh_reconstruction.py
```python
import torch
import torch.nn as nn
from models.registers import MODULES
from configs.data_config import number_pnts_on_template, pix3d_n_classes
from models.modules import resnet
from models.modules.resnet import model_urls
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
from net_utils.misc import weights_init, sphere_edges, sphere_faces, sphere_edge2face, sphere_adjacency, sphere_points_normals, sample_points_on_edges
from external.ldif.representation.structured_implicit_function import StructuredImplicit
import numpy as np
from external.ldif.util import np_util
from external.ldif.inference import extract_mesh
import trimesh
from external.PIFu.lib import mesh_util
from external.ldif.util import file_util
import os
import struct
import tempfile
import shutil
import subprocess
class PointGenCon(nn.Module):
def __init__(self, bottleneck_size = 2500, output_dim = 3):
super(PointGenCon, self).__init__()
self.conv1 = torch.nn.Conv1d(bottleneck_size, bottleneck_size, 1)
self.conv2 = torch.nn.Conv1d(bottleneck_size, bottleneck_size//2, 1)
self.conv3 = torch.nn.Conv1d(bottleneck_size//2, bottleneck_size//4, 1)
self.conv4 = torch.nn.Conv1d(bottleneck_size//4, output_dim, 1)
self.th = nn.Tanh()
self.bn1 = torch.nn.BatchNorm1d(bottleneck_size)
self.bn2 = torch.nn.BatchNorm1d(bottleneck_size//2)
self.bn3 = torch.nn.BatchNorm1d(bottleneck_size//4)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = self.th(self.conv4(x))
return x
class EREstimate(nn.Module):
def __init__(self, bottleneck_size=2500, output_dim = 3):
super(EREstimate, self).__init__()
self.conv1 = torch.nn.Conv1d(bottleneck_size, bottleneck_size, 1)
self.conv2 = torch.nn.Conv1d(bottleneck_size, bottleneck_size//2, 1)
self.conv3 = torch.nn.Conv1d(bottleneck_size//2, bottleneck_size//4, 1)
self.conv4 = torch.nn.Conv1d(bottleneck_size//4, output_dim, 1)
self.bn1 = torch.nn.BatchNorm1d(bottleneck_size)
self.bn2 = torch.nn.BatchNorm1d(bottleneck_size//2)
self.bn3 = torch.nn.BatchNorm1d(bottleneck_size//4)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = self.conv4(x)
return x
@MODULES.register_module
class DensTMNet(nn.Module):
def __init__(self, cfg, optim_spec=None, bottleneck_size=1024, n_classes=pix3d_n_classes, pretrained_encoder=True):
super(DensTMNet, self).__init__()
'''Optimizer parameters used in training'''
self.optim_spec = optim_spec
'''Module parameters'''
self.num_points = number_pnts_on_template
self.subnetworks = cfg.config['data']['tmn_subnetworks']
self.train_e_e = cfg.config['data']['with_edge_classifier']
'''Modules'''
self.encoder = resnet.resnet18_full(pretrained=False, num_classes=1024,
input_channels=4 if cfg.config['data'].get('mask', False) else 3)
self.decoders = nn.ModuleList(
[PointGenCon(bottleneck_size=3 + bottleneck_size + n_classes) for i in range(0, self.subnetworks)])
if self.train_e_e:
self.error_estimators = nn.ModuleList(
[EREstimate(bottleneck_size=3 + bottleneck_size + n_classes, output_dim=1) for i in range(0, max(self.subnetworks-1, 1))])
self.face_samples = cfg.config['data']['face_samples']
# initialize weight
self.apply(weights_init)
# initialize resnet
if pretrained_encoder:
pretrained_dict = model_zoo.load_url(model_urls['resnet18'])
model_dict = self.encoder.state_dict()
if pretrained_dict['conv1.weight'].shape != model_dict['conv1.weight'].shape:
model_dict['conv1.weight'][:,:3,...] = pretrained_dict['conv1.weight']
pretrained_dict.pop('conv1.weight')
pretrained_dict = {k: v for k, v in pretrained_dict.items() if
k in model_dict and not k.startswith('fc.')}
model_dict.update(pretrained_dict)
self.encoder.load_state_dict(model_dict)
def unfreeze_parts(self, loose_parts):
# freeze all
for param in self.parameters():
param.requires_grad = False
print('All layers freezed.')
# unfreeze parts
if 'encoder' in loose_parts:
for param in self.encoder.parameters():
param.requires_grad = True
print('Encoder unfrozen.')
def freeze_encoder(self):
for param in self.encoder.parameters():
param.requires_grad = False
print('Encoder freezed.')
def freeze_by_stage(self, stage, loose_parts):
if stage >= 1:
# freeze all
for param in self.parameters():
param.requires_grad = False
print('All layers freezed.')
if 'decoder' in loose_parts:
# unfreeze the last sub-network of decoders.
for param in self.decoders[-1].parameters():
param.requires_grad = True
print('Decoder unfrozen.')
if 'ee' in loose_parts and hasattr(self, 'error_estimators'):
# unfreeze the last sub-network of error estimators.
for param in self.error_estimators[-1].parameters():
param.requires_grad = True
print('EE unfrozen.')
def forward(self, image, size_cls, threshold = 0.1, factor = 1., mask_status = None, reconstruction = 'mesh'):
mode = 'train' if self.training else 'test'
device = image.device
n_batch = image.size(0)
n_edges = sphere_edges.shape[0]
# image encoding
image = image.contiguous()
afeature = self.encoder(image)
code = torch.cat([afeature, size_cls], 1)
if mask_status is not None:
code4recon = code[mask_status.nonzero()]
n_batch = code4recon.size(0)
if n_batch == 0:
return {'mgn_afeature':afeature}
else:
code4recon = code
if reconstruction is None:
return {'mgn_afeature':afeature}
if mode == 'test':
current_faces = sphere_faces.clone().unsqueeze(0).to(device)
current_faces = current_faces.repeat(n_batch, 1, 1)
else:
current_faces = None
current_edges = sphere_edges.clone().unsqueeze(0).to(device)
current_edges = current_edges.repeat(n_batch, 1, 1)
current_shape_grid = sphere_points_normals[:, :3].t().expand(n_batch, 3, self.num_points).to(device)
# outputs for saving
out_shape_points = []
out_sampled_mesh_points = []
out_indicators = []
# boundary faces for boundary refinement
boundary_point_ids = torch.zeros(size=(n_batch, self.num_points), dtype=torch.uint8).to(device)
remove_edges_list = []
# AtlasNet deformation + topoly modification
for i in range(self.subnetworks):
current_image_grid = code4recon.unsqueeze(2).expand(code4recon.size(0), code4recon.size(1),
current_shape_grid.size(2)).contiguous()
current_image_grid = torch.cat((current_shape_grid, current_image_grid), 1).contiguous()
current_shape_grid = current_shape_grid + self.decoders[i](current_image_grid)
# save deformed point cloud
out_shape_points.append(current_shape_grid)
if i == self.subnetworks - 1 and self.subnetworks > 1:
remove_edges_list = [item for item in remove_edges_list if len(item)]
if remove_edges_list:
remove_edges_list = torch.unique(torch.cat(remove_edges_list), dim=0)
for batch_id in range(n_batch):
rm_edges = remove_edges_list[remove_edges_list[:, 0] == batch_id, 1]
if len(rm_edges) > 0:
rm_candidates, counts = torch.unique(sphere_edges[rm_edges], return_counts=True)
boundary_ids = counts < sphere_adjacency[rm_candidates - 1].sum(1)
boundary_point_ids[batch_id][rm_candidates[boundary_ids] - 1] = 1
return {'mesh_coordinates_results': out_shape_points, 'points_from_edges': out_sampled_mesh_points,
'point_indicators': out_indicators, 'output_edges': current_edges,
'boundary_point_ids': boundary_point_ids, 'faces': current_faces,
'mgn_afeature':afeature}
if self.train_e_e:
# sampling from deformed mesh
sampled_points = sample_points_on_edges(current_shape_grid, current_edges, quantity=self.face_samples, mode=mode)
# save sampled points from deformed mesh
out_sampled_mesh_points.append(sampled_points)
# preprare for face error estimation
current_image_grid = code4recon.unsqueeze(2).expand(code4recon.size(0), code4recon.size(1), sampled_points.size(2)).contiguous()
current_image_grid = torch.cat((sampled_points, current_image_grid), 1).contiguous()
# estimate the distance from deformed points to gt mesh.
indicators = self.error_estimators[i](current_image_grid)
indicators = indicators.view(n_batch, 1, n_edges, self.face_samples)
indicators = indicators.squeeze(1)
indicators = torch.mean(indicators, dim=2)
# save estimated distance values from deformed points to gt mesh.
out_indicators.append(indicators)
# remove faces and modify the topology
remove_edges = torch.nonzero(torch.sigmoid(indicators) < threshold)
remove_edges_list.append(remove_edges)
for batch_id in range(n_batch):
rm_edges = remove_edges[remove_edges[:, 0] == batch_id, 1]
if len(rm_edges)>0:
# cutting edges in training
current_edges[batch_id][rm_edges, :] = 1
if mode == 'test':
current_faces[batch_id][sphere_edge2face[rm_edges].sum(0).type(torch.bool), :] = 1
threshold *= factor
return {'mesh_coordinates_results':out_shape_points, 'points_from_edges':out_sampled_mesh_points,
'point_indicators':out_indicators, 'output_edges':current_edges,
'boundary_point_ids':boundary_point_ids, 'faces':current_faces,
'mgn_afeature':afeature}
class BatchedCBNLayer(nn.Module):
def __init__(self, f_dim=32):
super(BatchedCBNLayer, self).__init__()
self.fc_beta = nn.Linear(f_dim, f_dim)
self.fc_gamma = nn.Linear(f_dim, f_dim)
self.register_buffer('running_mean', torch.zeros(1))
self.register_buffer('running_var', torch.ones(1))
def forward(self, shape_embedding, sample_embeddings):
beta = self.fc_beta(shape_embedding)
gamma = self.fc_gamma(shape_embedding)
if self.training:
batch_mean, batch_variance = sample_embeddings.mean().detach(), sample_embeddings.var().detach()
self.running_mean = 0.995 * self.running_mean + 0.005 * batch_mean
self.running_var = 0.995 * self.running_var + 0.005 * batch_variance
sample_embeddings = (sample_embeddings - self.running_mean) / torch.sqrt(self.running_var + 1e-5)
out = gamma.unsqueeze(1) * sample_embeddings + beta.unsqueeze(1)
return out
class BatchedOccNetResnetLayer(nn.Module):
def __init__(self, f_dim=32):
super(BatchedOccNetResnetLayer, self).__init__()
self.bn1 = BatchedCBNLayer(f_dim=f_dim)
self.fc1 = nn.Linear(f_dim, f_dim)
self.bn2 = BatchedCBNLayer(f_dim=f_dim)
self.fc2 = nn.Linear(f_dim, f_dim)
def forward(self, shape_embedding, sample_embeddings):
sample_embeddings = self.bn1(shape_embedding, sample_embeddings)
init_sample_embeddings = sample_embeddings
sample_embeddings = torch.relu(sample_embeddings)
sample_embeddings = self.fc1(sample_embeddings)
sample_embeddings = self.bn2(shape_embedding, sample_embeddings)
sample_embeddings = torch.relu(sample_embeddings)
sample_embeddings = self.fc2(sample_embeddings)
return init_sample_embeddings + sample_embeddings
class OccNetDecoder(nn.Module):
def __init__(self, f_dim=32):
super(OccNetDecoder, self).__init__()
self.fc1 = nn.Linear(3, f_dim)
self.resnet = BatchedOccNetResnetLayer(f_dim=f_dim)
self.bn = BatchedCBNLayer(f_dim=f_dim)
self.fc2 = nn.Linear(f_dim, 1)
def write_occnet_file(self, path):
"""Serializes an occnet network and writes it to disk."""
f = file_util.open_file(path, 'wb')
def write_fc_layer(layer):
weights = layer.weight.t().cpu().numpy()
biases = layer.bias.cpu().numpy()
f.write(weights.astype('f').tostring())
f.write(biases.astype('f').tostring())
def write_cbn_layer(layer):
write_fc_layer(layer.fc_beta)
write_fc_layer(layer.fc_gamma)
running_mean = layer.running_mean.item()
running_var = layer.running_var.item()
f.write(struct.pack('ff', running_mean, running_var))
# write_header
f.write(struct.pack('ii', 1, self.fc1.out_features))
# write_input_layer
write_fc_layer(self.fc1)
# write_resnet
write_cbn_layer(self.resnet.bn1)
write_fc_layer(self.resnet.fc1)
write_cbn_layer(self.resnet.bn2)
write_fc_layer(self.resnet.fc2)
# write_cbn_layer
write_cbn_layer(self.bn)
# write_activation_layer
weights = self.fc2.weight.t().cpu().numpy()
bias = self.fc2.bias.data.item()
f.write(weights.astype('f').tostring())
f.write(struct.pack('f', bias))
f.close()
def forward(self, embedding, samples):
sample_embeddings = self.fc1(samples)
sample_embeddings = self.resnet(embedding, sample_embeddings)
sample_embeddings = self.bn(embedding, sample_embeddings)
vals = self.fc2(sample_embeddings)
return vals
@MODULES.register_module
class LDIF(nn.Module):
def __init__(self, cfg, optim_spec=None, n_classes=pix3d_n_classes,
pretrained_encoder=True):
super(LDIF, self).__init__()
'''Optimizer parameters used in training'''
self.optim_spec = optim_spec
'''Module parameters'''
self.cfg = cfg
self.bottleneck_size = cfg.config['model']['mesh_reconstruction'].get('bottleneck_size', 2048)
cfg.config['model']['mesh_reconstruction']['bottleneck_size'] = self.bottleneck_size
self.element_count = cfg.config['model']['mesh_reconstruction']['element_count']
self.sym_element_count = cfg.config['model']['mesh_reconstruction']['sym_element_count']
self.effective_element_count = self.element_count + self.sym_element_count
cfg.config['model']['mesh_reconstruction']['effective_element_count'] = self.effective_element_count
self.implicit_parameter_length = cfg.config['model']['mesh_reconstruction']['implicit_parameter_length']
self.element_embedding_length = 10 + self.implicit_parameter_length
cfg.config['model']['mesh_reconstruction']['analytic_code_len'] = 10 * self.element_count
cfg.config['model']['mesh_reconstruction']['structured_implicit_vector_len'] = \
self.element_embedding_length * self.element_count
self._temp_folder = None
'''Modules'''
self.encoder = resnet.resnet18_full(pretrained=False, num_classes=self.bottleneck_size,
input_channels=4 if cfg.config['data'].get('mask', False) else 3)
self.mlp = nn.Sequential(
nn.Linear(self.bottleneck_size + n_classes, self.bottleneck_size), nn.LeakyReLU(0.2, True),
nn.Linear(self.bottleneck_size, self.bottleneck_size), nn.LeakyReLU(0.2, True),
nn.Linear(self.bottleneck_size, self.element_count * self.element_embedding_length)
)
self.decoder = OccNetDecoder(f_dim=self.implicit_parameter_length)
# initialize weight
self.apply(weights_init)
# initialize resnet
if pretrained_encoder:
pretrained_dict = model_zoo.load_url(model_urls['resnet18'])
model_dict = self.encoder.state_dict()
if pretrained_dict['conv1.weight'].shape != model_dict['conv1.weight'].shape:
model_dict['conv1.weight'][:,:3,...] = pretrained_dict['conv1.weight']
pretrained_dict.pop('conv1.weight')
pretrained_dict = {k: v for k, v in pretrained_dict.items() if
k in model_dict and not k.startswith('fc.')}
model_dict.update(pretrained_dict)
self.encoder.load_state_dict(model_dict)
def eval_implicit_parameters(self, implicit_parameters, samples):
batch_size, element_count, element_embedding_length = list(implicit_parameters.shape)
sample_count = samples.shape[-2]
batched_parameters = torch.reshape(implicit_parameters, [batch_size * element_count, element_embedding_length])
batched_samples = torch.reshape(samples, [batch_size * element_count, sample_count, -1])
batched_vals = self.decoder(batched_parameters, batched_samples)
vals = torch.reshape(batched_vals, [batch_size, element_count, sample_count, 1])
return vals
def extract_mesh(self, structured_implicit, resolution=64, extent=0.75, num_samples=10000,
cuda=True, marching_cube=True):
if cuda:
mesh = []
for s in structured_implicit.unbind():
if self._temp_folder is None:
# self._temp_folder = tempfile.mktemp(dir='/dev/shm')
self._temp_folder = tempfile.mktemp(dir='/home/im3d/shm')
os.makedirs(self._temp_folder)
self.decoder.write_occnet_file(os.path.join(self._temp_folder, 'serialized.occnet'))
shutil.copy('./external/ldif/ldif2mesh/ldif2mesh', self._temp_folder)
si_path = os.path.join(self._temp_folder, 'ldif.txt')
grd_path = os.path.join(self._temp_folder, 'grid.grd')
s.savetxt(si_path)
cmd = (f"{os.path.join(self._temp_folder, 'ldif2mesh')} {si_path}"
f" {os.path.join(self._temp_folder, 'serialized.occnet')}"
f' {grd_path} -resolution {resolution} -extent {extent}')
subprocess.check_output(cmd, shell=True)
_, volume = file_util.read_grd(grd_path)
_, m = extract_mesh.marching_cubes(volume, extent)
mesh.append(m)
else:
mesh = mesh_util.reconstruction(structured_implicit=structured_implicit, resolution=resolution,
b_min=np.array([-extent] * 3), b_max=np.array([extent] * 3),
use_octree=True, num_samples=num_samples, marching_cube=marching_cube)
return mesh
def forward(self, image=None, size_cls=None, samples=None, occnet2gaps=None, structured_implicit=None,
resolution=None, cuda=True, reconstruction='mesh', apply_class_transfer=True):
return_dict = {}
# predict structured_implicit
return_structured_implicit = structured_implicit
if isinstance(structured_implicit, dict):
structured_implicit = StructuredImplicit(config=self.cfg.config, **structured_implicit, net=self)
elif structured_implicit is None or isinstance(structured_implicit, bool):
# encoder (ldif.model.model.StructuredImplicitModel.forward)
# image encoding (ldif.nets.cnn.early_fusion_cnn)
embedding = self.encoder(image)
return_dict['ldif_afeature'] = embedding
embedding = torch.cat([embedding, size_cls], 1)
structured_implicit_activations = self.mlp(embedding)
structured_implicit_activations = torch.reshape(
structured_implicit_activations, [-1, self.element_count, self.element_embedding_length])
return_dict['structured_implicit_activations'] = structured_implicit_activations
# SIF decoder
structured_implicit = StructuredImplicit.from_activation(
self.cfg.config, structured_implicit_activations, self)
else:
raise NotImplementedError
return_dict['structured_implicit'] = structured_implicit.dict()
# if only want structured_implicit
if return_structured_implicit is True:
return return_dict
# predict class or mesh
if samples is not None:
global_decisions, local_outputs = structured_implicit.class_at_samples(samples, apply_class_transfer)
return_dict.update({'global_decisions': global_decisions,
'element_centers': structured_implicit.centers})
return return_dict
elif reconstruction is not None:
if resolution is None:
resolution = self.cfg.config['data'].get('marching_cube_resolution', 128)
mesh = self.extract_mesh(structured_implicit, extent=self.cfg.config['data']['bounding_box'],
resolution=resolution, cuda=cuda, marching_cube=reconstruction == 'mesh')
if reconstruction == 'mesh':
if occnet2gaps is not None:
mesh = [m.apply_transform(t.inverse().cpu().numpy()) if not isinstance(m, trimesh.primitives.Sphere) else m
for m, t in zip(mesh, occnet2gaps)]
mesh_coordinates_results = []
faces = []
for m in mesh:
mesh_coordinates_results.append(
torch.from_numpy(m.vertices).type(torch.float32).transpose(-1, -2).to(structured_implicit.device))
faces.append(torch.from_numpy(m.faces).to(structured_implicit.device) + 1)
return_dict.update({'mesh': mesh, 'mesh_coordinates_results': [mesh_coordinates_results, ],
'faces': faces, 'element_centers': structured_implicit.centers})
elif reconstruction == 'sdf':
return_dict.update({'sdf': mesh[0], 'mat': mesh[1], 'element_centers': structured_implicit.centers})
else:
raise NotImplementedError
return return_dict
else:
return return_dict
def __del__(self):
if self._temp_folder is not None:
shutil.rmtree(self._temp_folder)
```
#### File: jiyeonkim127/im3d/test_epoch.py
```python
from net_utils.utils import LossRecorder, ETA
from time import time
import numpy as np
import torch
import wandb
def test_func(cfg, tester, test_loader):
'''
test function.
:param cfg: configuration file
:param tester: specific tester for networks
:param test_loader: dataloader for testing
:return:
'''
batch_size = cfg.config[cfg.config['mode']]['batch_size']
loss_recorder = LossRecorder(batch_size)
cfg.log_string('-'*100)
eta_calc = ETA(smooth=0.99, ignore_first=True)
for iter, data in enumerate(test_loader):
loss = tester.test_step(data)
# visualize intermediate results.
tester.visualize_step(0, cfg.config['mode'], iter, data)
loss_recorder.update_loss(loss, data.get('class_name', None))
eta = eta_calc(len(test_loader) - iter - 1)
if ((iter + 1) % cfg.config['log']['print_step']) == 0:
pretty_loss = []
for k, v in loss.items():
if isinstance(v, list):
pretty_loss.append(str(k) + ': [' + ', '.join([f'{i:.3f}' for i in v]) + ']')
else:
pretty_loss.append(f"{k}: {v:.3f}")
pretty_loss = '{' + ', '.join(pretty_loss) + '}'
cfg.log_string('Process: Phase: %s. Epoch %d: %d/%d. ETA: %s. Current loss: %s.'
% (cfg.config['mode'], 0, iter + 1, len(test_loader), eta, pretty_loss))
wandb.summary['ETA'] = str(eta)
for key, test_loss in loss_recorder.loss_recorder.items():
cfg.log_string('Test loss (%s): %f' % (key, test_loss.avg))
return loss_recorder.loss_recorder
def test(cfg, tester, test_loader):
'''
train epochs for network
:param cfg: configuration file
:param tester: specific tester for networks
:param test_loader: dataloader for testing
:return:
'''
cfg.log_string('-' * 100)
# set mode
tester.net.train(cfg.config['mode'] == 'train')
start = time()
with torch.no_grad():
test_loss_recoder = test_func(cfg, tester, test_loader)
cfg.log_string('Test time elapsed: (%f).' % (time()-start))
table = None
for key, test_loss in test_loss_recoder.items():
cfg.log_string('Test loss (%s): %f' % (key, test_loss.avg))
wandb.summary.update({f"{key}_avg": test_loss.avg})
wandb.summary.update({f"{key}_hist": wandb.Histogram(test_loss.val)})
if len(test_loss.cls) > 0:
if table is None:
table = wandb.Table(columns=['metric'] + [k for k in test_loss.cls.keys()] + ['mean'])
cfg.log_string({k: v.avg for k, v in test_loss.cls.items()})
table.add_data(key, *[f"{v.avg:.2f}" for v in test_loss.cls.values()], f"{test_loss.avg:.5f}")
if table is not None:
wandb.summary['metrics_table'] = table
```
#### File: im3d/utils/preprocess_pix3d4ldif.py
```python
import sys
sys.path.append('.')
from configs.pix3d_config import Config
import os
import glob
import tqdm
from multiprocessing import Pool
from libs.tools import read_obj, write_obj, sample_pnts_from_obj, normalize_to_unit_square
from PIL import Image
import numpy as np
import json
from external.mesh_fusion import scale, fusion, simplify
import sys
import subprocess
from scipy.spatial import cKDTree
# preprocess param
del_intermediate_result = True
skip_done = False
processes = 12
# path settings
config = Config('pix3d')
mesh_folder = os.path.join(config.metadata_path, 'model')
output_root = 'data/pix3d/ldif'
gaps = './external/ldif/gaps/bin/x86_64'
mesh_fusion = 'external/mesh_fusion'
python_bin = sys.executable
skip = ['IKEA_JULES_1.model_-108.706406967_-139.417398691']
# ldif param
scale_norm = 0.25
bbox_half = 0.7
bbox = ' '.join([str(-bbox_half), ] * 3 + [str(bbox_half), ] * 3)
spacing = bbox_half * 2 / 32
print({'bbox_half': bbox_half, 'spacing': spacing})
# mgnet param
neighbors = 30
def normalize(input_path, output_folder):
output_path = os.path.join(output_folder, 'mesh_normalized.obj')
obj_data = read_obj(input_path, ['v', 'f'])
obj_data['v'] = normalize_to_unit_square(obj_data['v'])[0]
write_obj(output_path, obj_data)
return output_path
def make_watertight(input_path, output_folder):
output_path = os.path.join(output_folder, 'mesh_orig.obj')
# convert mesh to off
off_path = os.path.splitext(output_path)[0] + '.off'
subprocess.check_output(f'xvfb-run -a -s "-screen 0 800x600x24" meshlabserver -i {input_path} -o {off_path}',
shell=True)
# scale mesh
# app = scale.Scale(
# f'--in_file {off_path} --out_dir {output_folder} --t_dir {output_folder} --overwrite'.split(' '))
# app.run()
subprocess.check_output(f'{python_bin} {mesh_fusion}/scale.py'
f' --in_file {off_path} --out_dir {output_folder} --t_dir {output_folder} --overwrite',
shell=True)
# create depth maps
# app = fusion.Fusion(
# f'--mode=render --in_file {off_path} --out_dir {output_folder} --overwrite'.split(' '))
# app.run()
subprocess.check_output(f'xvfb-run -a -s "-screen 0 800x600x24" {python_bin} {mesh_fusion}/fusion.py'
f' --mode=render --in_file {off_path} --out_dir {output_folder} --overwrite',
shell=True)
# produce watertight mesh
depth_path = off_path + '.h5'
transform_path = os.path.splitext(output_path)[0] + '.npz'
# app = fusion.Fusion(
# f'--mode=fuse --in_file {depth_path} --out_dir {output_folder} --t_dir {output_folder} --overwrite'.split(' '))
# app.run()
subprocess.check_output(f'{python_bin} {mesh_fusion}/fusion.py --mode=fuse'
f' --in_file {depth_path} --out_dir {output_folder} --t_dir {output_folder} --overwrite',
shell=True)
# # simplify mesh
# obj_path = os.path.splitext(output_path)[0] + '.obj'
# app = simplify.Simplification(
# f'--in_file={obj_path} --out_dir {output_folder}'.split(' '))
# app.run()
# subprocess.check_output(f'xvfb-run -a -s "-screen 0 800x600x24" {python_bin} {mesh_fusion}/simplify.py'
# f' --in_file={obj_path} --out_dir {output_folder}', shell=True)
os.remove(off_path)
os.remove(transform_path)
os.remove(depth_path)
return output_path
def remove_if_exists(f):
if os.path.exists(f):
os.remove(f)
def make_output_folder(mesh_path):
rel_folder = os.path.relpath(mesh_path, mesh_folder).split('/')
model_folder = '.'.join(os.path.splitext(mesh_path)[0].split('/')[-2:])
rel_folder = os.path.join(*rel_folder[:-2], model_folder)
output_folder = os.path.join(output_root, rel_folder)
os.makedirs(output_folder, exist_ok=True)
return output_folder
def process_mgnet(obj_path, output_folder, ext):
obj_data = read_obj(obj_path, ['v', 'f'])
sampled_points = sample_pnts_from_obj(obj_data, 10000, mode='random')
sampled_points.tofile(os.path.join(output_folder, f'gt_3dpoints.{ext}'))
tree = cKDTree(sampled_points)
dists, indices = tree.query(sampled_points, k=neighbors)
densities = np.array([max(dists[point_set, 1]) ** 2 for point_set in indices])
densities.tofile(os.path.join(output_folder, f'densities.{ext}'))
def process_mesh(mesh_path):
output_folder = make_output_folder(mesh_path)
mesh_name = os.path.basename(output_folder)
if mesh_name in skip:
print(f"skipping {mesh_name}")
return
if skip_done and os.path.exists(f'{output_folder}/uniform_points.sdf'):
return
# Step 0) Normalize and watertight the mesh before applying all other operations.
normalized_obj = normalize(mesh_path, output_folder)
watertight_obj = make_watertight(normalized_obj, output_folder)
# conver mesh to ply
normalized_ply = os.path.splitext(normalized_obj)[0] + '.ply'
subprocess.check_output(
f'xvfb-run -a -s "-screen 0 800x600x24" meshlabserver -i {normalized_obj} -o {normalized_ply}',
shell=True)
watertight_ply = os.path.splitext(watertight_obj)[0] + '.ply'
subprocess.check_output(
f'xvfb-run -a -s "-screen 0 800x600x24" meshlabserver -i {watertight_obj} -o {watertight_ply}',
shell=True)
scaled_ply = os.path.join(output_folder, 'scaled_watertight.ply')
os.system(f'{gaps}/msh2msh {watertight_ply} {scaled_ply} -scale_by_pca -translate_by_centroid'
f' -scale {scale_norm} -debug_matrix {output_folder}/orig_to_gaps.txt')
# Step 1) Generate the coarse inside/outside grid:
os.system(f'{gaps}/msh2df {scaled_ply} {output_folder}/coarse_grid.grd'
f' -bbox {bbox} -border 0 -spacing {spacing} -estimate_sign')
# Step 2) Generate the near surface points:
os.system(f'{gaps}/msh2pts {scaled_ply} {output_folder}/nss_points.sdf'
f' -near_surface -max_distance {spacing} -num_points 100000 -binary_sdf')
# Step 3) Generate the uniform points:
os.system(f'{gaps}/msh2pts {scaled_ply} {output_folder}/uniform_points.sdf'
f' -uniform_in_bbox -bbox {bbox} -npoints 100000 -binary_sdf')
# Step 4) Generate surface points for MGNet:
process_mgnet(watertight_obj, output_folder, 'mgn')
process_mgnet(normalized_obj, output_folder, 'org')
if del_intermediate_result:
remove_if_exists(normalized_obj)
remove_if_exists(watertight_obj)
remove_if_exists(scaled_ply)
def process_img(sample):
output_folder = make_output_folder(os.path.join(config.metadata_path, sample['model']))
img_name = os.path.splitext(os.path.split(sample['img'])[1])[0]
output_path = os.path.join(output_folder, img_name + '.npy')
if not skip_done or not os.path.exists(output_path):
img = np.array(Image.open(os.path.join(config.metadata_path, sample['img'])).convert('RGB'))
img = img[sample['bbox'][1]:sample['bbox'][3], sample['bbox'][0]:sample['bbox'][2]]
np.save(output_path, img)
img_name = os.path.splitext(os.path.split(sample['mask'])[1])[0]
output_path = os.path.join(output_folder, img_name + '_mask.npy')
if not skip_done or not os.path.exists(output_path):
img = np.array(Image.open(os.path.join(config.metadata_path, sample['mask'])).convert('L'))
img = img[sample['bbox'][1]:sample['bbox'][3], sample['bbox'][0]:sample['bbox'][2]]
np.save(output_path, img)
if __name__ == '__main__':
print('Processing imgs...')
with open(config.metadata_file, 'r') as file:
metadata = json.load(file)
with open(config.train_split, 'r') as file:
splits = json.load(file)
with open(config.test_split, 'r') as file:
splits += json.load(file)
ids = [int(os.path.basename(file).split('.')[0]) for file in splits if 'flipped' not in file]
samples = [metadata[id] for id in ids]
if processes:
with Pool(processes=processes) as p:
r = list(tqdm.tqdm(p.imap(process_img, samples), total=len(samples)))
else:
for sample in tqdm.tqdm(samples):
process_img(sample)
print('Processing meshs...')
mesh_paths = glob.glob(os.path.join(mesh_folder, '*', '*', '*.obj'))
if processes:
with Pool(processes=processes) as p:
r = list(tqdm.tqdm(p.imap(process_mesh, mesh_paths), total=len(mesh_paths)))
else:
for mesh_path in tqdm.tqdm(mesh_paths):
process_mesh(mesh_path)
```
#### File: im3d/utils/sunrgbd_utils.py
```python
import numpy as np
import os
import pickle
from PIL import Image
import json
from scipy.io import loadmat
from libs.tools import get_world_R, normalize_point, yaw_pitch_roll_from_R, R_from_yaw_pitch_roll
from utils.sunrgbd_config import SUNRGBD_CONFIG, SUNRGBD_DATA
import pandas as pd
import jellyfish as jf
from copy import deepcopy
import cv2
sunrgbd_config = SUNRGBD_CONFIG()
class_mapping = pd.read_csv(sunrgbd_config.class_mapping_file).drop(['Unnamed: 0'], axis=1)
def get_cam_KRT(cam_paras, im_size):
'''
Get the camera intrinsic matrix, rotation matrix and origin point.
A point [x, y, z] in world coordinate system can be transformed to the camera system by:
[x, y, z].dot(R)
:param cam_paras: camera parameters with SUNCG form.
:param im_size: [width, height] of an image.
:return: R, ori_pnt
'''
ori_pnt = cam_paras[:3]
toward = cam_paras[3:6] # x-axis
toward /= np.linalg.norm(toward)
up = cam_paras[6:9] # y-axis
up /= np.linalg.norm(up)
right = np.cross(toward, up) # z-axis
right /= np.linalg.norm(right)
R = np.vstack([toward, up, right]).T # columns respectively corresponds to toward, up, right vectors.
fov_x = cam_paras[9]
fov_y = cam_paras[10]
width = im_size[0]
height = im_size[1]
f_x = width / (2 * np.tan(fov_x))
f_y = height / (2 * np.tan(fov_y))
K = np.array([[f_x, 0., (width-1)/2.], [0., f_y, (height-1)/2.], [0., 0., 1.]])
return K, R, ori_pnt
def rotate_towards_cam_front(normal, point, frontal_basis_id):
'''
roate normal in horizontal plane with pi/2 to make it the same direction with point.
'''
rot_matrix = np.array([[0, 0, 1], [0, 1, 0], [-1, 0, 0]])
rotated_normals = [np.linalg.matrix_power(rot_matrix, i).dot(normal) for i in range(4)]
max_dot_value = 0.
best_normal = None
best_hori_id = None
hori_id = 1 - frontal_basis_id
for vector in rotated_normals:
dot_value = vector.dot(point)
hori_id = 1-hori_id
if dot_value > max_dot_value:
max_dot_value = dot_value
best_normal = vector
best_hori_id = hori_id
return best_normal, best_hori_id
def get_layout_info(layout_3D, cam_front):
'''
get the layout bbox center, sizes and orientation.
We rotate the forward vector of layout (by pi/2), to make its dot product (with camera forward vector) to be maximal.
'''
center = layout_3D['centroid']
vectors = layout_3D['vectors']
coeffs = np.linalg.norm(vectors,axis=1)
basis = np.array([vector/np.linalg.norm(vector) for vector in vectors])
# frontal axis
horizontal_dims = [0, 2] # must be two dimensional. It means x and z axis are the horizontal axes.
horizontal_id = 0 # we rotate the x-axis (horizontal_dims[horizontal_id]) toward cam front.
frontal_basis = basis[0, : ]
frontal_basis, horizontal_id = rotate_towards_cam_front(frontal_basis, cam_front, horizontal_id)
up_basis = basis[1, : ]
right_basis = np.cross(frontal_basis, up_basis)
frontal_coeff = coeffs[horizontal_dims[horizontal_id]]
up_coeff = coeffs[1]
right_coeff = coeffs[horizontal_dims[1-horizontal_id]]
layout = {}
layout['centroid'] = center
layout['coeffs'] = np.array([frontal_coeff, up_coeff, right_coeff])
layout['basis'] = np.vstack([frontal_basis, up_basis, right_basis])
return layout
def correct_flipped_objects(obj_points, transform_matrix, model_path, voxels=None, sampled_points=None, flipped_objects_in_sunrgbd=[]):
'''
correct those wrongly labeled objects to correct orientation.
:param obj_points: obj points
:param model_path: the path of the obj model.
:param transform_matrix: original transfrom matrix from object system to world system
:return:
'''
# These objects are with an opposite frontal direction.
if model_path.split('/')[-1] in flipped_objects_in_sunrgbd:
R = np.array([[-1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., -1.]])
obj_points = obj_points.dot(R)
transform_matrix[:3,:3] = transform_matrix[:3,:3].dot(R)
if isinstance(voxels, np.ndarray):
voxels = np.rot90(voxels, 2, (0, 2))
if isinstance(sampled_points, np.ndarray):
sampled_points = sampled_points.dot(R)
return obj_points, transform_matrix, voxels, sampled_points
def proj_from_point_to_2d(_points, _K, _R):
'''
To project 3d points from world system to 2D image plane.
Note: The origin center of world system has been moved to the cam center.
:param points: Nx3 vector
:param K: 3x3 intrinsic matrix
:param R: Camera orientation. R:=[v1, v2, v3], the three column vectors respectively denote the toward, up,
right vector relative to the world system.
:return:
'''
points = np.copy(_points)
K = np.copy(_K)
R = np.copy(_R)
D_FLAG = 0
if len(points.shape) == 1:
points = points[None, :]
D_FLAG = 1
p_cam = points.dot(R)
# convert to traditional image coordinate system
T_cam = np.array([[0., 0., 1.], [0., -1., 0.], [1., 0., 0.]])
p_cam = p_cam.dot(T_cam.T)
# delete those points whose depth value is non-positive.
invalid_ids = np.where(p_cam[:,2]<=0)[0]
p_cam[invalid_ids, 2] = 0.0001
p_cam_h = p_cam/p_cam[:,2][:, None]
pixels = (K.dot(p_cam_h.T)).T
if D_FLAG == 1:
pixels = pixels[0][:2]
else:
pixels = pixels[:, :2]
return pixels, invalid_ids
def get_corners_of_bb3d_no_index(basis, coeffs, centroid):
corners = np.zeros((8, 3))
coeffs = np.abs(coeffs)
corners[0, :] = - basis[0, :] * coeffs[0] + basis[1, :] * coeffs[1] - basis[2, :] * coeffs[2]
corners[1, :] = - basis[0, :] * coeffs[0] + basis[1, :] * coeffs[1] + basis[2, :] * coeffs[2]
corners[2, :] = + basis[0, :] * coeffs[0] + basis[1, :] * coeffs[1] + basis[2, :] * coeffs[2]
corners[3, :] = + basis[0, :] * coeffs[0] + basis[1, :] * coeffs[1] - basis[2, :] * coeffs[2]
corners[4, :] = - basis[0, :] * coeffs[0] - basis[1, :] * coeffs[1] - basis[2, :] * coeffs[2]
corners[5, :] = - basis[0, :] * coeffs[0] - basis[1, :] * coeffs[1] + basis[2, :] * coeffs[2]
corners[6, :] = + basis[0, :] * coeffs[0] - basis[1, :] * coeffs[1] + basis[2, :] * coeffs[2]
corners[7, :] = + basis[0, :] * coeffs[0] - basis[1, :] * coeffs[1] - basis[2, :] * coeffs[2]
corners = corners + np.tile(centroid, (8, 1))
return corners
def read_seg2d_data(seg2d_path):
# load seg 2d data.
try:
with open(seg2d_path, encoding='utf-8') as data_file:
seg2d_data = json.load(data_file)
except Exception as err:
with open(seg2d_path, 'r') as data_file:
content = data_file.readlines()[0]
if "\\" in content:
error_string = "\\"
else:
error_string = content[err.pos - 1:err.pos + 7]
content = content.replace(error_string, "")
seg2d_data = json.loads(content)
number_of_anot = len(seg2d_data["frames"][0]["polygon"])
seg_list = []
for i in range(number_of_anot):
x = seg2d_data["frames"][0]["polygon"][i]["x"]
y = seg2d_data["frames"][0]["polygon"][i]["y"]
idx_obj = seg2d_data["frames"][0]["polygon"][i]["object"]
if idx_obj >= len(seg2d_data['objects']):
continue
label = seg2d_data['objects'][idx_obj]["name"].lower()
label = ''.join(i for i in label if not i.isdigit())
if type(x) != list or type(y) != list:
continue
all_points_x = list(map(round, x))
all_points_y = list(map(round, y))
seg_data = {'polygon':
{'x': all_points_x,
'y': all_points_y},
'name': label}
seg_list.append(seg_data)
return seg_list
# class of SUNRGBD Data
class SUNRGBDData(object):
def __init__(self, K, R_ex, R_tilt, bdb2d, bdb3d, gt3dcorner, imgdepth, imgrgb, seg2d, semantic_seg2d, manhattan_layout,
sequence_name, sequence_id, scene_type):
self._K = K
# R_ex.T is the left-hand camera coordinates -> world coordinates transformation P_world = R_ex*P_camera
self._R_ex = R_ex
# R_tilt is the right-hand camera coordinates -> world coordinates transformation P_world = R_tilt*P_camera(after transformed to x, z, -y)
self._R_tilt = R_tilt
self._bdb2d = bdb2d
self._bdb3d = bdb3d
self._gt3dcorner = gt3dcorner
self._imgdepth = imgdepth
self._imgrgb = imgrgb
self._seg2d = seg2d
self._semantic_seg2d = semantic_seg2d
self._manhattan_layout = manhattan_layout
self._sequence_name = sequence_name
self._sequence_id = sequence_id
self._height, self._width = np.shape(self._imgrgb)[:2]
self._scene_type = scene_type
def __str__(self):
return 'sequence_name: {}, sequence_id: {}'.format(self._sequence_name, self._sequence_id)
def __repr__(self):
return self.__str__()
@property
def width(self):
return self._width
@property
def height(self):
return self._height
@property
def K(self):
return self._K
@property
def R_ex(self):
return self._R_ex
@property
def R_tilt(self):
return self._R_tilt
@property
def bdb2d(self):
return self._bdb2d
@property
def bdb3d(self):
return self._bdb3d
@property
def gt3dcorner(self):
return self._gt3dcorner
@property
def imgdepth(self):
return self._imgdepth
@property
def imgrgb(self):
return self._imgrgb
@property
def seg2d(self):
return self._seg2d
@property
def semantic_seg2d(self):
return self._semantic_seg2d
@property
def manhattan_layout(self):
return self._manhattan_layout
@property
def sequence_name(self):
return self._sequence_name
@property
def sequence_id(self):
return self._sequence_id
@property
def scene_type(self):
return self._scene_type
def readsunrgbdframe(config, image_name=None, image_id=None):
clean_data_path = config.clean_data_root
with open(os.path.join(clean_data_path, 'imagelist.txt'), 'r') as f:
image_list = [line.replace('\n', '') for line in f]
f.close()
if image_name:
image_id = image_list.index(image_name) + 1
with open(os.path.join(clean_data_path, 'data_all', str(image_id) + '.pickle'), 'rb') as f:
img_info = pickle.load(f, encoding='latin1')
# change data root manually
img_info['imgrgb_path'] = img_info['imgrgb_path'].replace('/home/siyuan/Documents/Dataset/SUNRGBD_ALL', config.data_root)
img_info['imgdepth_path'] = img_info['imgdepth_path'].replace('/home/siyuan/Documents/Dataset/SUNRGBD_ALL', config.data_root)
img_info['seg2d_path'] = os.path.join(os.path.dirname(os.path.dirname(img_info['imgdepth_path'])), 'annotation2Dfinal', 'index.json')
img_info['semantic_seg_path'] = os.path.join(config.data_root, 'SUNRGBD/train_test_labels', "img-{0:06d}.png".format(image_id))
# load rgb img
img_info['imgrgb'] = np.array(Image.open(img_info['imgrgb_path']))
# load depth img
imgdepth = np.array(Image.open(img_info['imgdepth_path'])).astype('uint16')
imgdepth = (imgdepth >> 3) | (imgdepth << 13)
imgdepth = imgdepth.astype('single') / 1000
imgdepth[imgdepth > 8] = 8
img_info['imgdepth'] = imgdepth
if 'gt3dcorner' not in img_info.keys():
img_info['gt3dcorner'] = None
# load segmentation
try:
img_info['seg2d'] = read_seg2d_data(img_info['seg2d_path'])
except:
print(img_info['seg2d_path'])
# img_info['seg2d'] = None
img_info['manhattan_layout'] = loadmat(os.path.join(sunrgbd_config.data_root, '3dlayout', str(image_id) + '.mat'))['manhattan_layout'].T
scene_category_path = os.path.join(config.data_root, img_info['sequence_name'], 'scene.txt')
if not os.path.exists(scene_category_path):
scene_category = None
else:
with open(scene_category_path, 'r') as f:
scene_category = f.readline()
# use updated R_tilt
R_tilt = loadmat(os.path.join(sunrgbd_config.data_root, 'updated_rtilt', str(image_id) + '.mat'))['r_tilt']
R_ex = np.array([[1, 0, 0], [0, 0, -1], [0, 1, 0]]).dot(R_tilt).dot(
np.array([[1, 0, 0], [0, 0, 1], [0, -1, 0]]))
img_info['R_tilt'] = R_tilt
img_info['R_ex'] = R_ex
data_frame = SUNRGBDData(img_info['K'], img_info['R_ex'], img_info['R_tilt'], img_info['bdb2d'], img_info['bdb3d'],
img_info['gt3dcorner'], img_info['imgdepth'], img_info['imgrgb'], img_info['seg2d'], img_info['semantic_seg_path'],
img_info['manhattan_layout'], img_info['sequence_name'], image_id, scene_category)
return data_frame
def cvt_R_ex_to_cam_R(R_ex):
'''
convert SUNRGBD camera R_ex matrix to transform objects from world system to camera system
both under the 'toward-up-right' system.
:return: cam_R matrix
'''
trans_mat = np.array([[0, 0, 1], [0, -1, 0], [1, 0, 0]])
return (trans_mat.T).dot(R_ex).dot(trans_mat)
def get_layout_bdb_from_corners(layout_t):
'''
get coeffs, basis, centroid from corners
:param corners: 8x3 numpy array corners of a 3D bounding box
[toward, up, right] coordinates
:return: bounding box parameters
'''
y_max = layout_t[:, 1].max()
y_min = layout_t[:, 1].min()
points_2d = layout_t[layout_t[:, 1] == y_max, :]
points_2d = points_2d[np.argsort(points_2d[:, 0]), :]
vector1 = points_2d[3] - points_2d[1]
vector2 = points_2d[1] - points_2d[0]
coeff1 = np.linalg.norm(vector1)
coeff2 = np.linalg.norm(vector2)
vector1 = normalize_point(vector1)
vector2 = np.cross(vector1, [0, 1, 0])
centroid = np.array(
[points_2d[0, 0] + points_2d[3, 0], float(y_max) + float(y_min), points_2d[0, 2] + points_2d[3, 2]]) * 0.5
basis = np.array([vector1, [0, 1, 0], vector2])
coeffs = np.array([coeff1, y_max-y_min, coeff2]) * 0.5
assert np.linalg.det(basis) > 0.
bdb = {'centroid':centroid, 'basis':basis, 'coeffs':coeffs}
return bdb
def process_layout(layout):
'''
transform sunrgbd layout to toward-up-right form.
:param layout: sunrgbd layout
:return: toward-up-right form.
'''
trans_mat = np.array([[0., 1., 0.], [0., 0., 1.], [1., 0., 0.]])
layout_t = (trans_mat.dot(layout.T)).T
bdb = get_layout_bdb_from_corners(layout_t)
return bdb
def check_bdb(bdb2d, m, n):
"""
Check valid a bounding box is valid
Parameters
----------
bdb2d: dict
Keys: {'x1', 'x2', 'y1', 'y2'}
The (x1, y1) position is at the top left corner,
the (x2, y2) position is at the bottom right corner
m: int
width
n: int
height
Returns
-------
valid: bool
"""
if bdb2d['x1'] >= bdb2d['x2'] or bdb2d['y1'] >= bdb2d['y2'] or bdb2d['x1'] > m or bdb2d['y1'] > n:
return False
else:
return True
def check_bdb2d(bdb2ds, img_shape):
result = []
for bdb2d in bdb2ds:
if check_bdb(bdb2d, img_shape[1] - 1, img_shape[0] - 1):
result.append(bdb2d)
else:
print('ground truth not valid')
return result
def find_close_name(name, label_list):
'''
find a close name from label list
:param name: input name
:param label_list: name dictionary
:return: close name.
'''
leve = {}
for label in label_list:
leve[label] = jf.jaro_distance(name, label)
return max(leve, key=leve.get)
def cvt2nyu37class_map(inst_map, mapping):
class_map = np.zeros_like(inst_map)
for key, value in mapping.items():
class_map[inst_map == key] = value
return class_map
def get_inst_map(seg2d_data, cls_map):
'''
get 2D instance map from segmented polygons.
:param seg2d_data: polygon data for each segmented object.
:param cls_map: semantic cls maps.
:return: 2D instance map with NYU37 labels.
'''
inst_map = np.zeros_like(cls_map, dtype=np.uint8)
inst_cls = {}
for inst_id, inst in enumerate(seg2d_data):
mask = np.zeros_like(cls_map)
cv2.fillConvexPoly(mask, np.vstack([inst['polygon']['x'], inst['polygon']['y']]).T, 1)
labels, counts = np.unique(cls_map[np.nonzero(mask)], return_counts=True)
if len(counts) == 0 :
continue
inst_cls[inst_id + 1] = labels[counts.argmax()]
cv2.fillConvexPoly(inst_map, np.vstack([inst['polygon']['x'], inst['polygon']['y']]).T, inst_id + 1)
return inst_map, inst_cls
def get_campact_layout(layout, depth_map, cam_K, cam_R, bdb3ds):
# get 3d points cloud from depth map
u, v = np.meshgrid(range(depth_map.shape[1]), range(depth_map.shape[0]))
u = u.reshape([1, -1])[0]
v = v.reshape([1, -1])[0]
z_cam = depth_map[v, u]
# remove zeros
non_zero_indices = np.argwhere(z_cam).T[0]
z_cam = z_cam[non_zero_indices]
u = u[non_zero_indices]
v = v[non_zero_indices]
# calculate coordinates
x_cam = (u - cam_K[0][2]) * z_cam / cam_K[0][0]
y_cam = (v - cam_K[1][2]) * z_cam / cam_K[1][1]
# transform to toward-up-right coordinate system
x3 = z_cam
y3 = -y_cam
z3 = x_cam
# transform from camera system to layout system
points_cam = np.vstack([x3, y3, z3]).T
points_cloud = points_cam.dot(cam_R.T).dot(layout['basis'].T)
# layout corners in layout system
layout_corners = get_corners_of_bb3d_no_index(layout['basis'], layout['coeffs'], layout['centroid']).dot(layout['basis'].T)
# instance corners in layout system
instance_corners = []
for bdb3d in bdb3ds:
instance_corners.append(get_corners_of_bb3d_no_index(bdb3d['basis'], bdb3d['coeffs'], bdb3d['centroid']).dot(layout['basis'].T))
if instance_corners:
instance_corners = np.vstack(instance_corners)
# scope
x_min = min(points_cloud[:, 0].min(), instance_corners[:, 0].min())
x_max = max(min(layout_corners[:, 0].max(), points_cloud[:, 0].max()), instance_corners[:, 0].max())
y_min = min(max(points_cloud[:, 1].min(), layout_corners[:, 1].min()), instance_corners[:, 1].min())
y_max = y_min + 3.
z_min = min(max(layout_corners[:, 2].min(), points_cloud[:, 2].min()), instance_corners[:, 2].min())
z_max = max(min(layout_corners[:, 2].max(), points_cloud[:, 2].max()), instance_corners[:, 2].max())
else:
# scope
x_min = points_cloud[:, 0].min()
x_max = min(layout_corners[:, 0].max(), points_cloud[:, 0].max())
y_min = max(points_cloud[:, 1].min(), layout_corners[:, 1].min())
y_max = y_min + 3.
z_min = max(layout_corners[:, 2].min(), points_cloud[:, 2].min())
z_max = min(layout_corners[:, 2].max(), points_cloud[:, 2].max())
new_layout_centroid = np.array([(x_min + x_max)/2., (y_min + y_max)/2., (z_min + z_max)/2.])
new_layout_coeffs = np.array([(x_max - x_min)/2., (y_max - y_min)/2., (z_max - z_min)/2.])
new_layout = deepcopy(layout)
new_layout['centroid'] = new_layout_centroid.dot(layout['basis'])
new_layout['coeffs'] = new_layout_coeffs
return new_layout
def get_NYU37_class_id(names):
'''
get the NYU class id for each class name.
:param names: class names
:return: nyu id.
'''
Name_6585 = class_mapping.Name_6585.values.astype('str')
nyu37class_dict = {}
for inst_id, name in enumerate(names):
# process name
name = name.lower()
name = ''.join(i for i in name if not i.isdigit())
# match name in class_mapping
name = name if name in Name_6585 else find_close_name(name, Name_6585)
nyu37class_dict[inst_id + 1] = class_mapping[class_mapping.Name_6585 == name].Label_37.item()
return nyu37class_dict
def process_bdb2d(bdb2ds, img_shape):
bdb2ds_t_list = []
for bdb2d in bdb2ds:
bdb2ds_t = {}
if 'class_id' in bdb2d.keys():
class_id = bdb2d['class_id']
else:
class_id = get_NYU37_class_id([bdb2d['classname']])[1]
bdb2ds_t['class_id'] = class_id
bdb2ds_t['x1'] = max(bdb2d['x1'], 0)
bdb2ds_t['y1'] = max(bdb2d['y1'], 0)
bdb2ds_t['x2'] = min(bdb2d['x2'], img_shape[1] - 1)
bdb2ds_t['y2'] = min(bdb2d['y2'], img_shape[0] - 1)
bdb2ds_t_list.append(bdb2ds_t)
return bdb2ds_t_list
def process_msk(bdb2ds, cls_masks, seg2d, flip_seg=False):
'''
get instance masks from semantic masks
:param bdb2ds: instance bounding boxes
:param cls_masks: semantic masks
:return: instance masks with each entry as instance id.
'''
# recover the NYU 37 class label for each object
inst_cls = []
inst_masks = []
if not flip_seg:
for inst_id, inst in enumerate(seg2d):
if ('polygon' not in inst) or ('x' not in inst['polygon']) or ('y' not in inst['polygon']) or (
not inst['polygon']['x']) or (not inst['polygon']['y']):
continue
mask = np.zeros_like(cls_masks)
cv2.fillConvexPoly(mask, np.vstack([inst['polygon']['x'], inst['polygon']['y']]).T, 1)
labels, counts = np.unique(cls_masks[np.nonzero(mask)], return_counts=True)
if len(counts) == 0 :
continue
inst_cls.append(labels[counts.argmax()])
inst_masks.append(mask)
else:
for inst_id, inst in enumerate(seg2d):
if ('polygon' not in inst) or ('x' not in inst['polygon']) or ('y' not in inst['polygon']) or len(
inst['polygon']['x']) == 0 or len(inst['polygon']['y']) == 0:
continue
mask = np.zeros_like(cls_masks)
cv2.fillConvexPoly(mask,
np.vstack([mask.shape[1] - 1 - np.array(inst['polygon']['x']), inst['polygon']['y']]).T,
1)
labels, counts = np.unique(cls_masks[np.nonzero(mask)], return_counts=True)
if len(counts) == 0 :
continue
inst_cls.append(labels[counts.argmax()])
inst_masks.append(mask)
inst_masks = np.stack(inst_masks)
target_inst_masks = []
for inst_id, bdb2d in enumerate(bdb2ds):
candidate_inst_ids = [idx for idx, cls in enumerate(inst_cls) if cls == bdb2d['class_id']]
if not candidate_inst_ids:
target_inst_masks.append(None)
continue
candidate_inst_masks = inst_masks[candidate_inst_ids]
n_pixel_for_each_inst = np.sum(candidate_inst_masks.reshape(candidate_inst_masks.shape[0], -1), axis=1)
in_box_inst_masks = candidate_inst_masks[:, bdb2d['y1']:bdb2d['y2'] + 1, bdb2d['x1']:bdb2d['x2'] + 1]
n_in_box_pixel_for_each_inst = np.sum(in_box_inst_masks.reshape(in_box_inst_masks.shape[0], -1), axis=1)
in_box_ratio = n_in_box_pixel_for_each_inst/n_pixel_for_each_inst
if True not in (in_box_ratio >= 0.8):
target_inst_masks.append(None)
continue
target_inst_mask = candidate_inst_masks[in_box_ratio >= 0.8].sum(0).astype(np.bool)
locs = np.argwhere(target_inst_mask)
y1, x1 = locs.min(0)
y2, x2 = locs.max(0)
target_inst_mask = {'msk_bdb': [x1, y1, x2, y2], 'msk': target_inst_mask[y1:y2 + 1, x1:x2 + 1], 'class_id':bdb2d['class_id']}
target_inst_masks.append(target_inst_mask)
return target_inst_masks
def unprocess_bdb3d(bdb3ds, bdb3d_inv):
trans_mat = np.array([[0., 0., 1.], [1., 0., 0.], [0., 1., 0.]])
bdb3ds_t = []
# for bdb3d in bdb3ds:
for bdb3d, inv in zip(bdb3ds, bdb3d_inv):
centroid = trans_mat.dot(bdb3d['centroid'])
basis = bdb3d['basis']
coeffs = bdb3d['coeffs']
vectors = np.diag(coeffs).dot(basis)
vectors = np.matmul(inv, vectors)
# vectors = np.matmul(np.array([[0., 0., -1.], [0., 1., 0.], [1., 0., 0.]]), vectors)
vectors = trans_mat.dot(trans_mat.dot(vectors.T).T)
coeffs = np.linalg.norm(vectors, axis=1)
basis = np.array([normalize_point(vector) for vector in vectors])
bdb3d_t = {'coeffs': coeffs, 'basis': basis, 'centroid': centroid}
if 'class_id' in bdb3d.keys():
bdb3d_t['class_id'] = bdb3d['class_id']
else:
bdb3d_t['classid'] = bdb3d['classid']
bdb3ds_t.append(bdb3d_t)
return bdb3ds_t
def process_bdb3d(bdb3ds, inv=False):
'''
transform sunrgbd layout to toward-up-right form in world system.
:param layout: sunrgbd layout
:return: toward-up-right form.
'''
trans_mat = np.array([[0., 1., 0.], [0., 0., 1.], [1., 0., 0.]])
bdb3ds_t = []
bdb3d_inv = []
bdb3ds_o = []
for bdb3d in bdb3ds:
centroid = trans_mat.dot(bdb3d['centroid'][0])
coeffs = bdb3d['coeffs'][0]
basis = bdb3d['basis'].astype('float32')
vectors = trans_mat.dot((trans_mat.dot((np.diag(coeffs).dot(basis)).T)).T)
# let z-axis face forward (consistent with suncg data.)
minv = np.array([[0., 0., -1.], [0., 1., 0.], [1., 0., 0.]])
vectors = np.array([vectors[2], vectors[1], -vectors[0]])
if np.linalg.det(vectors)>0.:
vectors[0] = vectors[0]
else:
vectors[0] = -vectors[0]
minv[2, 0] *= -1
bdb3d_t = {}
bdb3d_t['coeffs'] = np.linalg.norm(vectors, axis=1)
bdb3d_t['basis'] = np.array([normalize_point(vector) for vector in vectors])
if np.linalg.det(vectors)<=0.:
continue
if inv:
bdb3d_inv.append(minv)
bdb3ds_o.append(bdb3d)
bdb3d_t['centroid'] = centroid
if isinstance(bdb3d, dict):
bdb3d_t['class_id'] = bdb3d['class_id']
else:
bdb3d_t['class_id'] = get_NYU37_class_id(bdb3d['classname'])[1]
bdb3ds_t.append(bdb3d_t)
if inv:
return bdb3ds_t, bdb3d_inv, bdb3ds_o
return bdb3ds_t
def transform_to_world(layout, bdb3ds, cam_R, world_R):
'''
transform scene to global world system
:param layout_3D:
:param bdb3ds_ws:
:param cam_R:
:param world_R:
:return:
'''
new_layout = deepcopy(layout)
new_layout['centroid'] = layout['centroid'].dot(world_R) # layout centroid in world system
new_layout['basis'] = layout['basis'].dot(world_R) # layout vectors in world system
new_cam_R = (world_R.T).dot(cam_R)
new_bdb3ds = []
for bdb3d in bdb3ds:
new_bdb3d = deepcopy(bdb3d)
new_bdb3d['centroid'] = bdb3d['centroid'].dot(world_R)
new_bdb3d['basis'] = bdb3d['basis'].dot(world_R)
new_bdb3ds.append(new_bdb3d)
return new_layout, new_bdb3ds, new_cam_R
def flip_layout(layout, cam_R, cam_R_flip):
'''
transform and flip sunrgbd layout to toward-up-right form.
:param layout: sunrgbd layout
:return: toward-up-right form.
'''
# layout is the layout coordinates in world system (toward-up-right form).
centroid_flip = layout['centroid'].dot(cam_R) # layout centroid in camera system
centroid_flip[2] = -1 * centroid_flip[2] # flip right-coordinate values
centroid_flip = centroid_flip.dot(cam_R_flip.T) # transform back to world system
vectors_flip = np.diag(layout['coeffs']).dot(layout['basis']).dot(cam_R) # layout vectors in camera system
vectors_flip[:,2] = -1 * vectors_flip[:,2] # flip right-coordinate values
vectors_flip = vectors_flip.dot(cam_R_flip.T) # transform back to world system
coeffs_flip = np.linalg.norm(vectors_flip, axis=1)
basis_flip = np.array([normalize_point(vector) for vector in vectors_flip])
basis_flip[2, :] = basis_flip[2, :] if np.linalg.det(basis_flip)>0 else -basis_flip[2, :]
bdb_flip = {}
bdb_flip['basis'] = basis_flip
bdb_flip['coeffs'] = coeffs_flip
bdb_flip['centroid'] = centroid_flip
return bdb_flip
def flip_bdb2d(bdb2ds, im_width):
bdb2ds_flip = deepcopy(bdb2ds)
for bdb_idx, bdb2d in enumerate(bdb2ds):
bdb2ds_flip[bdb_idx]['x1'] = im_width - 1 - bdb2d['x2']
bdb2ds_flip[bdb_idx]['x2'] = im_width - 1 - bdb2d['x1']
return bdb2ds_flip
def flip_bdb3d(bdb3ds, cam_R, cam_R_flip):
bdb3ds_flip = deepcopy(bdb3ds)
for bdb_idx, bdb3d in enumerate(bdb3ds):
centroid_flip = bdb3d['centroid'].dot(cam_R) # transform bdb centroid to camera system
centroid_flip[2] = -1 * centroid_flip[2] # flip right-coordinate
centroid_flip = centroid_flip.dot(cam_R_flip.T) # transform back to world system
vectors_flip = np.diag(bdb3d['coeffs']).dot(bdb3d['basis']).dot(cam_R) # transform vectors to camera system
vectors_flip[:, 2] = -1 * vectors_flip[:, 2] # flip right-coordinate
vectors_flip = vectors_flip.dot(cam_R_flip.T) # transform back to world system
coeffs_flip = np.linalg.norm(vectors_flip, axis=1)
basis_flip = np.array([normalize_point(vector) for vector in vectors_flip])
# keep the basis_flip[2,:] vector, because it stands for the forward direction of an object.
basis_flip[0, :] = basis_flip[0, :] if np.linalg.det(basis_flip) > 0 else -basis_flip[0, :]
bdb3ds_flip[bdb_idx]['basis'] = basis_flip
bdb3ds_flip[bdb_idx]['coeffs'] = coeffs_flip
bdb3ds_flip[bdb_idx]['centroid'] = centroid_flip
return bdb3ds_flip
def process_sunrgbd_frame(sample, flip=False):
'''
Read SUNRGBD frame and transform all 3D data to 'toward-up-right' layout system.
:param sample: SUNRGBD frame
:return:
'''
# TODO: define global coordinate system
if not flip:
cam_K = sample.K
cam_R = cvt_R_ex_to_cam_R(sample.R_ex) # camera_rotation matrix in world system
# define a world system
world_R = get_world_R(cam_R)
layout = process_layout(sample.manhattan_layout) # layout bbox in world system
centroid = layout['centroid']
vectors = np.diag(layout['coeffs']).dot(layout['basis'])
# Set all points relative to layout orientation. (i.e. let layout orientation to be the world system.)
# The forward direction (x-axis) of layout orientation should point toward camera forward direction.
layout_3D = get_layout_info({'centroid': centroid, 'vectors': vectors}, cam_R[:, 0])
lo_inv = np.matmul(layout['basis'], np.linalg.inv(layout_3D['basis']))
bdb2ds = process_bdb2d(check_bdb2d(sample.bdb2d, sample.imgrgb.shape), sample.imgrgb.shape)
masks = np.array(Image.open(sample.semantic_seg2d))
masks = process_msk(bdb2ds, masks, sample.seg2d, flip_seg=False)
bdb3ds_ws, bdb3d_inv, bdb3ds_o = process_bdb3d(sample.bdb3d, inv=True) # bdb3d in old world system
# transform everything to world system
world_R_inv = np.linalg.inv(world_R)
layout_3D, bdb3ds_ws, cam_R = transform_to_world(layout_3D, bdb3ds_ws, cam_R, world_R)
instance_info_list = {} # bdb2d and bdb3d in layout system
instance_info_list['bdb2d'] = bdb2ds
instance_info_list['bdb3d'] = bdb3ds_ws
instance_info_list['inst_masks'] = masks
# layout_3D = get_campact_layout(layout_3D, sample.imgdepth, cam_K, cam_R, bdb3ds_ws)
# invert total3d gt back to co
def remove_error(inv):
inv[inv > 0.1] = 1.
inv[inv < -0.1] = -1.
inv[(-0.1 < inv) & (inv < 0.1)] = 0.
return inv
lo_inv = remove_error(lo_inv)
layout_3D_co, bdb3ds_ws_co, cam_R_co = transform_to_world(layout_3D, bdb3ds_ws, cam_R, world_R_inv)
cam_R_co = cvt_R_ex_to_cam_R(cam_R_co)
assert (sample.R_ex - cam_R_co).mean() < 0.0001
layout_3D_co['basis'] = np.matmul(lo_inv, layout_3D_co['basis'])
layout_3D_co['coeffs'] = np.matmul(lo_inv, layout_3D_co['coeffs'])
layout_3D_co = get_corners_of_bb3d_no_index(
layout_3D_co['basis'], layout_3D_co['coeffs'], layout_3D_co['centroid'])
trans_mat = np.array([[0., 0., 1.], [1., 0., 0.], [0., 1., 0.]])
layout_3D_co = (trans_mat.dot(layout_3D_co.T)).T
bdb3ds_ws_co = unprocess_bdb3d(bdb3ds_ws_co, bdb3d_inv)
for b1, b2 in zip(bdb3ds_ws_co, bdb3ds_o):
assert (b1['basis'] - b2['basis']).mean() < 0.0001
assert (b1['coeffs'] - b2['coeffs']).mean() < 0.0001
assert (b1['centroid'] - b2['centroid']).mean() < 0.0001
frame = SUNRGBD_DATA(cam_K, cam_R, sample.scene_type, sample.imgrgb, sample.imgdepth, layout_3D, sample.sequence_id,
sample.sequence_name, instance_info_list, world_R_inv, lo_inv, bdb3d_inv)
else:
img_shape = sample.imgrgb.shape[:2]
cam_K_flip = deepcopy(sample.K)
cam_K_flip[0][2] = img_shape[1] - cam_K_flip[0][2] # flip cam_K
# camera vectors in world system.
cam_R = cvt_R_ex_to_cam_R(sample.R_ex) # camera_rotation matrix in world system
_, pitch, roll = yaw_pitch_roll_from_R(cam_R)
# flip camera R
cam_R_flip = R_from_yaw_pitch_roll(0, pitch, -roll)
# get ordinary layout first in world system.
layout = process_layout(sample.manhattan_layout) # layout bbox in world system
centroid = layout['centroid']
vectors = np.diag(layout['coeffs']).dot(layout['basis'])
# The forward direction (x-axis) of layout orientation should point toward camera forward direction.
layout_3D = get_layout_info({'centroid': centroid, 'vectors': vectors}, cam_R[:, 0])
# flip layout (we now need to horienzontally flip layout in camera system first and transform it back to world system.)
layout_3D_flip = flip_layout(layout_3D, cam_R, cam_R_flip) # flipped layout bbox in world system
# Set all points relative to layout orientation. (i.e. let layout orientation to be the world system.)
bdb2ds = process_bdb2d(check_bdb2d(sample.bdb2d, sample.imgrgb.shape), sample.imgrgb.shape)
bdb2ds_flip = flip_bdb2d(bdb2ds, sample.imgrgb.shape[1])
masks = np.array(Image.open(sample.semantic_seg2d).transpose(Image.FLIP_LEFT_RIGHT))
masks = process_msk(bdb2ds_flip, masks, sample.seg2d, flip_seg=True)
bdb3ds_ws = process_bdb3d(sample.bdb3d) # bdb3d in world system
bdb3ds_ws_flip = flip_bdb3d(bdb3ds_ws, cam_R, cam_R_flip)
instance_info_list = {} # bdb2d and bdb3d in layout system
instance_info_list['bdb2d'] = bdb2ds_flip
instance_info_list['bdb3d'] = bdb3ds_ws_flip
instance_info_list['inst_masks'] = masks
# # get compact layout
# depth_img_flip = np.array(Image.fromarray(sample.imgdepth).transpose(Image.FLIP_LEFT_RIGHT))
# layout_3D_flip = get_campact_layout(layout_3D_flip, depth_img_flip, cam_K_flip, cam_R_flip, bdb3ds_ws_flip)
# flip image in the end.
rgb_img = np.array(Image.fromarray(sample.imgrgb).transpose(Image.FLIP_LEFT_RIGHT))
depth_map = np.array(Image.fromarray(sample.imgdepth).transpose(Image.FLIP_LEFT_RIGHT))
frame = SUNRGBD_DATA(cam_K_flip, cam_R_flip, sample.scene_type, rgb_img, depth_map, layout_3D_flip,
sample.sequence_id, sample.sequence_name, instance_info_list)
return frame
``` |
{
"source": "jiyeonkim127/PSI",
"score": 2
} |
#### File: human_body_prior/tools/training_tools.py
```python
import numpy as np
import numpy as np
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, patience=7):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
"""
self.reset(patience)
def __call__(self, val_loss):
score = -val_loss
if self.best_score is None:
self.best_score = score
elif score < self.best_score:
self.counter += 1
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.counter = 0
return self.early_stop
def reset(self, patience=7):
self.patience = patience
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
```
#### File: PSI/qe/train.py
```python
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import sys, os, glob
import pdb
import json
import argparse
import numpy as np
import open3d as o3d
import datetime
# proj_path = '/home/ryeon/project/psi'
proj_path = '/home/uwang/psi_jy'
sys.path.append(proj_path)
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
import torch.optim as optim
from torch.optim import lr_scheduler
import smplx
from human_body_prior.tools.model_loader import load_vposer
import chamfer_pytorch.dist_chamfer as ext
from cvae import BodyParamParser, HumanCVAE, GeometryTransformer
from batch_gen_hdf5 import BatchGeneratorWithAction
import pdb
import time
class TrainOP:
def __init__(self, trainconfig, lossconfig):
for key, val in trainconfig.items():
setattr(self, key, val)
for key, val in lossconfig.items():
setattr(self, key, val)
if not os.path.exists(self.save_dir):
os.mkdir(self.save_dir)
if len(self.ckp_dir) > 0:
self.resume_training = True
### define model
# if self.use_cont_rot:
# n_dim_body=72+3
# else:
# n_dim_body=72
n_dim_body=51
self.model_h_latentD = 128
self.model_h = HumanCVAE(latentD=self.model_h_latentD,
scene_model_ckpt=self.scene_model_ckpt,
n_dim_body=n_dim_body)
self.optimizer_h = optim.Adam(self.model_h.parameters(),
lr=self.init_lr_h)
### body mesh model
# self.vposer, _ = load_vposer(self.vposer_ckpt_path, vp_model='snapshot')
# self.body_mesh_model = smplx.create(self.human_model_path,
# model_type='smplx',
# gender='neutral', ext='npz',
# num_pca_comps=12,
# create_global_orient=True,
# create_body_pose=True,
# create_betas=True,
# create_left_hand_pose=True,
# create_right_hand_pose=True,
# create_expression=True,
# create_jaw_pose=True,
# create_leye_pose=True,
# create_reye_pose=True,
# create_transl=True,
# batch_size=self.batch_size
# )
#
# self.smplx_face_idx_np = np.load(os.path.join(self.human_model_path,
# 'smplx/SMPLX_NEUTRAL.npz'),
# allow_pickle=True)['f'].reshape(-1,3)
# self.smplx_face_idx = torch.tensor(
# self.smplx_face_idx_np.astype(np.int64),
# device=self.device
# )
print('--[INFO] device: '+str(torch.cuda.get_device_name(self.device)) )
def cal_loss(self, xa, xh):
# normalize global trans
xhn = GeometryTransformer.normalize_global_T(xh, cam_int, max_d)
# # convert rotation
xhnr = GeometryTransformer.convert_to_6D_rot(xhn)
# forward pass of model
xhnr_rec, mu, logsigma2 = self.model_h(xhnr, xa)
# recover rotation
xhn_rec = GeometryTransformer.convert_to_3D_rot(xhnr_rec)
# recover global trans
xh_rec = GeometryTransformer.recover_global_T(xhn_rec, cam_int, max_d)
loss_rec_t = self.weight_loss_rec_h*( 0.5*F.l1_loss(xhnr_rec[:,:3], xhnr[:,:3])
+0.5*F.l1_loss(xh_rec[:,:3], xh[:,:3]))
loss_rec_p = self.weight_loss_rec_h*(F.l1_loss(xhnr_rec[:,3:], xhnr[:,3:]))
### kl divergence loss, anealing scheme
fca = 1.0
if self.loss_weight_anealing:
fca = min(1.0, max(float(ep) / (self.epoch*0.75),0) )
loss_KL = (fca**2 *self.weight_loss_kl
* 0.5*torch.mean(torch.exp(logsigma2) +mu**2 -1.0 -logsigma2))
### Vposer loss
vposer_pose = xh_rec[:,16:48]
loss_vposer = self.weight_loss_vposer * torch.mean(vposer_pose**2)
### contact loss
# (1) get the reconstructed body mesh
body_param_rec = BodyParamParser.body_params_encapsulate_batch(xh_rec)
joint_rot_batch = self.vposer.decode(body_param_rec['body_pose_vp'],
output_type='aa').view(self.batch_size, -1)
body_param_ = {}
for key in body_param_rec.keys():
if key in ['body_pose_vp']:
continue
else:
body_param_[key] = body_param_rec[key]
smplx_output = self.body_mesh_model(return_verts=True,
body_pose=joint_rot_batch,
**body_param_)
body_verts_batch = smplx_output.vertices #[b, 10475,3]
body_verts_batch = GeometryTransformer.verts_transform(body_verts_batch, cam_ext)
# (2) get vertices accroding to prox contact annotation
vid, fid = GeometryTransformer.get_contact_id(body_segments_folder=self.contact_id_folder,
contact_body_parts=self.contact_part)
body_verts_contact_batch = body_verts_batch[:, vid, :]
## (3) compute chamfer loss between pcd_batch and body_verts_batch
dist_chamfer_contact = ext.chamferDist()
contact_dist, _ = dist_chamfer_contact(
body_verts_contact_batch.contiguous(),
scene_verts.contiguous()
)
fcc = 0.0
if ep > 0.75*self.epoch:
fcc = 1.0
loss_contact = (fcc *self.weight_contact *
torch.mean( torch.sqrt(contact_dist+1e-4)
/(torch.sqrt(contact_dist+1e-4)+1.0) ) )
### SDF scene penetration loss
s_grid_min_batch = s_grid_min_batch.unsqueeze(1)
s_grid_max_batch = s_grid_max_batch.unsqueeze(1)
norm_verts_batch = ((body_verts_batch - s_grid_min_batch)
/ (s_grid_max_batch - s_grid_min_batch) *2 -1)
n_verts = norm_verts_batch.shape[1]
body_sdf_batch = F.grid_sample(s_grid_sdf_batch.unsqueeze(1),
norm_verts_batch[:,:,[2,1,0]].view(-1, n_verts,1,1,3),
padding_mode='border')
# if there are no penetrating vertices then set sdf_penetration_loss = 0
if body_sdf_batch.lt(0).sum().item() < 1:
loss_sdf_pene = torch.tensor(0.0, dtype=torch.float32,
device=self.device)
else:
loss_sdf_pene = body_sdf_batch[body_sdf_batch < 0].abs().mean()
fsp = 0.0
if ep > 0.75*self.epoch:
fsp = 1.0
loss_sdf_pene = fsp*self.weight_collision*loss_sdf_pene
return [loss_rec_t, loss_rec_p, loss_KL, loss_contact,
loss_vposer, loss_sdf_pene]
def train(self, batch_gen):
self.model_h.train()
self.model_h.to(self.device)
self.vposer.to(self.device)
self.body_mesh_model.to(self.device)
starting_ep = 0
if self.resume_training:
ckp_list = sorted(glob.glob(os.path.join(self.ckp_dir,'epoch-*.ckp')),
key=os.path.getmtime)
if len(ckp_list)>0:
checkpoint = torch.load(ckp_list[-1])
self.model_h.load_state_dict(checkpoint['model_h_state_dict'])
self.optimizer_h.load_state_dict(checkpoint['optimizer_h_state_dict'])
starting_ep = checkpoint['epoch']
print('[INFO] --resuming training from {}'.format(ckp_list[-1]))
print('--[INFO] start training')
start_time = time.time()
for ep in range(starting_ep, self.epoch):
epoch_loss_rec_h = 0
epoch_loss_KL = 0
epoch_loss_vposer = 0
epoch_loss_contact = 0
epoch_loss_collision = 0
while batch_gen.has_next_batch():
self.optimizer_h.zero_grad()
######### get training data batch #########
train_data = batch_gen.next_batch(self.batch_size)
if train_data is None:
continue
[action_batch, smpl_batch] = train_data
### calculate loss
[loss_rec_t,
loss_rec_p,
loss_KL,
loss_vposer,] = self.cal_loss(xa=action_batch, xh=smpl_batch)
# loss_coll = loss_sdf_pene
# loss_h = (loss_rec_t + loss_rec_p + loss_vposer+ loss_KL
# + loss_contact+loss_coll)
loss_h = (loss_rec_t + loss_rec_p + loss_vposer)
### update alternatingly
loss_h.backward(retain_graph=True)
self.optimizer_h.step()
if self.verbose:
print("---in [epoch {:d}]: rec_t={:f}, rec_p={:f}, kl={:f}, vp={:f}"
.format(ep + 1,
loss_rec_t.item(),
loss_rec_p.item(),
loss_KL.item(),
loss_vposer.item()
))
elapsed_time = (time.time() - start_time)/3600.0
if elapsed_time >= 2:
start_time = time.time()
torch.save({
'epoch': ep+1,
'model_h_state_dict': self.model_h.state_dict(),
'optimizer_h_state_dict': self.optimizer_h.state_dict(),
}, self.save_dir + "/epoch-{:06d}".format(ep + 1) + ".ckp")
batch_gen.reset()
## save checkpoints
if (ep+1) % 10 == 0:
torch.save({
'epoch': ep+1,
'model_h_state_dict': self.model_h.state_dict(),
'optimizer_h_state_dict': self.optimizer_h.state_dict(),
}, self.save_dir + "/epoch-{:06d}".format(ep + 1) + ".ckp")
# if self.verbose:
# print("--[epoch {:d}]: rec_s={:f}, rec_h={:f}, kl={:f}, vp={:f}, contact={:f}, collision={:f}"
# .format(ep + 1,
# epoch_loss_rec_s / (len(batch_gen.rec_list)//self.batch_size),
# epoch_loss_rec_h / (len(batch_gen.rec_list)//self.batch_size),
# epoch_loss_KL / (len(batch_gen.rec_list)//self.batch_size),
# epoch_loss_vposer / (len(batch_gen.rec_list)//self.batch_size),
# epoch_loss_contact / (len(batch_gen.rec_list)//self.batch_size),
# epoch_loss_collision / (len(batch_gen.rec_list)//self.batch_size) ))
if self.verbose:
print('[INFO]: Training completes!')
print()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--ckp_dir', type=str, default='', help='dir for checkpoints')
parser.add_argument('--batch_size', type=int, default=128,
help='batch size to train')
parser.add_argument('--lr_s', type=float, default=0.001,
help='initial learing rate')
parser.add_argument('--lr_h', type=float, default=0.0001,
help='initial learing rate')
parser.add_argument('--num_epoch', type=int, default=50,
help='#epochs to train')
parser.add_argument('--weight_loss_vposer', type=float, default=1e-3)
parser.add_argument('--weight_loss_kl', type=float, default=1e-1)
parser.add_argument('--weight_loss_contact', type=float, default=1e-1)
parser.add_argument('--weight_loss_collision', type=float, default=1e-1)
parser.add_argument('--use_all', type=int, default=0, help='use samples in training and testing scenes')
parser.add_argument('--only_vircam', type=int, default=0, help='only samples from virtualcams')
args = parser.parse_args()
### setup dataset paths and traing configures
# dataset_path = '/data/proxe'
dataset_path = '/home/uwang/dataset/proxe'
KST = datetime.timezone(datetime.timedelta(hours=9))
save_folder = str(datetime.datetime.now(tz=KST))[5:-16]
save_folder = save_folder.replace(" ", "_")
save_dir= '/home/uwang/psi_jy/checkpoints/s1/{}'.format(save_folder)
resume_training=False
if args.only_vircam == 1:
trainfile = os.path.join(dataset_path, 'virtualcams_TNoise0.5.hdf5')
else:
trainfile = [os.path.join(dataset_path, 'virtualcams_TNoise0.5.hdf5'),
os.path.join(dataset_path, 'realcams.hdf5')]
ckp_dir = args.ckp_dir
resume_training=False
trainconfig={
'train_data_path': trainfile,
'scene_verts_path': os.path.join(dataset_path, 'scenes_downsampled'),
'scene_sdf_path': os.path.join(dataset_path, 'scenes_sdf'),
'scene_model_ckpt': os.path.join(proj_path,'data/resnet18.pth'),
# 'human_model_path': '/is/ps2/yzhang/body_models/VPoser',
# 'vposer_ckpt_path': '/is/ps2/yzhang/body_models/VPoser/vposer_v1_0',
'human_model_path': '/home/uwang/dataset/smpl_models',
'vposer_ckpt_path': '/home/uwang/dataset/smpl_models/vposer_v1_0',
'init_lr_s': args.lr_s,
'init_lr_h': args.lr_h,
'batch_size': args.batch_size, # e.g. 30
'epoch': args.num_epoch,
'loss_weight_anealing': True,
'device': torch.device("cuda" if torch.cuda.is_available() else "cpu"),
'fine_tuning': None,
'save_dir' : save_dir,
'ckp_dir': ckp_dir,
'contact_id_folder': os.path.join(dataset_path, 'body_segments'),
'contact_part': ['back','butt','L_Hand','R_Hand','L_Leg','R_Leg','thighs'],
'saving_per_X_ep': 2,
'verbose': True,
'use_cont_rot': True,
'resume_training': resume_training
}
lossconfig={
'weight_loss_rec_s': 1.0,
'weight_loss_rec_h': 1.0,
'weight_loss_vposer':args.weight_loss_vposer,
'weight_loss_kl': args.weight_loss_kl,
'weight_contact': args.weight_loss_contact,
'weight_collision' : args.weight_loss_collision
}
if args.use_all == 1:
mode='all'
else:
mode='train'
# batch_gen = BatchGeneratorWithSceneMesh(dataset_path=trainconfig['train_data_path'],
# scene_verts_path = trainconfig['scene_verts_path'],
# scene_sdf_path = trainconfig['scene_sdf_path'],
# mode=mode,
# device=trainconfig['device'],
# read_all_to_ram=False)
action_list = ['6', '7', '8', '11', '12', '14']
batch_gen = BatchGeneratorWithAction(dataset_path='/home/uwang/psi_jy/data/ava_action_smpl.json',
action_list=action_list,
device=torch.device("cuda" if torch.cuda.is_available() else "cpu"),
mode='train')
train_op = TrainOP(trainconfig, lossconfig)
train_op.train(batch_gen)
```
#### File: PSI/utils/utils_eval_diversity.py
```python
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import pickle
import sys, os, glob
import pdb
import json
import argparse
import numpy as np
import open3d as o3d
sys.path.append('/home/yzhang/workspaces/smpl-env-gen-3d-internal')
sys.path.append('/home/yzhang/workspaces/smpl-env-gen-3d-internal/source')
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.autograd import Variable
import smplx
from human_body_prior.tools.model_loader import load_vposer
import chamfer_pytorch.dist_chamfer as ext
from cvae import BodyParamParser, GeometryTransformer
from sklearn.cluster import KMeans
def fitting(fittingconfig):
input_data_file = fittingconfig['input_data_file']
with open(input_data_file, 'rb') as f:
body_param_input = pickle.load(f)
xh, _, _= BodyParamParser.body_params_parse_fitting(body_param_input)
return xh.detach().cpu().numpy()
if __name__=='__main__':
gen_path = sys.argv[1]
if 'proxe' in gen_path:
scene_test_list = ['MPH16', 'MPH1Library','N0SittingBooth', 'N3OpenArea']
elif 'habitat' in gen_path:
scene_test_list = ['17DRP5sb8fy-bedroom', '17DRP5sb8fy-familyroomlounge',
'17DRP5sb8fy-livingroom', 'sKLMLpTHeUy-familyname_0_1',
'X7HyMhZNoso-livingroom_0_16', 'zsNo4HB9uLZ-bedroom0_0',
'zsNo4HB9uLZ-livingroom0_13']
xh_list = []
for scenename in scene_test_list:
for ii in range(5000):
input_data_file = os.path.join(gen_path,scenename+'/body_gen_{:06d}.pkl'.format(ii))
if not os.path.exists(input_data_file):
continue
fittingconfig={
'input_data_file': input_data_file,
'scene_verts_path': '/home/yzhang/Videos/PROXE/scenes_downsampled/'+scenename+'.ply',
'scene_sdf_path': '/home/yzhang/Videos/PROXE/scenes_sdf/'+scenename,
'human_model_path': '/home/yzhang/body_models/VPoser',
'vposer_ckpt_path': '/home/yzhang/body_models/VPoser/vposer_v1_0',
'init_lr_h': 0.1,
'num_iter': 50,
'batch_size': 1,
'device': torch.device("cuda" if torch.cuda.is_available() else "cpu"),
'contact_id_folder': '/is/cluster/work/yzhang/PROX/body_segments',
'contact_part': ['back','butt','L_Hand','R_Hand','L_Leg','R_Leg','thighs'],
'verbose': False
}
xh = fitting(fittingconfig)
xh_list.append(xh)
ar = np.concatenate(xh_list, axis=0)
## k-means
import scipy.cluster
codes, dist = scipy.cluster.vq.kmeans(ar, 20)
vecs, dist = scipy.cluster.vq.vq(ar, codes) # assign codes
counts, bins = scipy.histogram(vecs, len(codes)) # count occurrences
from scipy.stats import entropy
ee = entropy(counts)
print('entropy:' + str(ee))
print('mean distance:' + str(np.mean(dist)) )
```
#### File: PSI/utils/utils_prox_snapshots_realcam.py
```python
import os, sys
import os.path as osp
import cv2
import numpy as np
import json
import yaml
import open3d as o3d
import trimesh
import argparse
import matplotlib.pyplot as plt
sys.path.append('/home/yzhang/workspaces/smpl-env-gen-3d-internal')
import torch
import pickle
import smplx
from human_body_prior.tools.model_loader import load_vposer
import pandas as pd
from scipy.spatial.transform import Rotation as R
import scipy.io as sio
import glob
def hex2rgb(hex_color_list):
rgb_list = []
for hex_color in hex_color_list:
h = hex_color.lstrip('#')
rgb = list(int(h[i:i+2], 16) for i in (0, 2, 4))
rgb_list.append(rgb)
return np.array(rgb_list)
def color_encoding(mesh):
'''
we use the color coding of Matterport3D
'''
## get the color coding from Matterport3D
matter_port_label_filename = '/is/ps2/yzhang/Pictures/Matterport/metadata/mpcat40.tsv'
df = pd.DataFrame()
df = pd.read_csv(matter_port_label_filename,sep='\t')
color_coding_hex = list(df['hex']) # list of str
color_coding_rgb = hex2rgb(color_coding_hex)
## update the mesh vertex color accordingly
verid = np.mean(np.asarray(mesh.vertex_colors)*255/5.0,axis=1).astype(int)
verid[verid>=41]=41
vercolor = np.take(color_coding_rgb, list(verid), axis=0)
mesh.vertex_colors = o3d.utility.Vector3dVector(vercolor/255.0)
return mesh
def update_cam(cam_param, trans):
cam_R = np.transpose(trans[:-1, :-1])
cam_T = -trans[:-1, -1:]
cam_T = np.matmul(cam_R, cam_T) #!!!!!! T is applied in the rotated coord
cam_aux = np.array([[0,0,0,1]])
mat = np.concatenate([cam_R, cam_T],axis=-1)
mat = np.concatenate([mat, cam_aux],axis=0)
cam_param.extrinsic = mat
return cam_param
def get_trans_mat(R, T):
mat_aux = np.array([[0,0,0,1]])
mat = np.concatenate([R, T.reshape([3,1])],axis=-1)
mat = np.concatenate([mat, mat_aux],axis=0)
return mat
def main(args):
fitting_dir = args.fitting_dir
recording_name = os.path.abspath(fitting_dir).split("/")[-1]
fitting_dir = osp.join(fitting_dir, 'results')
data_dir = args.data_dir
cam2world_dir = osp.join(data_dir, 'cam2world')
scene_dir = osp.join(data_dir, 'scenes_semantics')
recording_dir = osp.join(data_dir, 'recordings', recording_name)
color_dir = os.path.join(recording_dir, 'Color')
scene_name = os.path.abspath(recording_dir).split("/")[-1].split("_")[0]
output_folder = os.path.join('/mnt/hdd','PROX','realcams_v3')
if not os.path.exists(output_folder):
os.mkdir(output_folder)
### setup visualization window
vis = o3d.visualization.Visualizer()
vis.create_window(width=480, height=270,visible=True)
render_opt = vis.get_render_option().mesh_show_back_face=True
### put the scene into the environment
if scene_name in ['MPH112', 'MPH16']:
scene = o3d.io.read_triangle_mesh(osp.join(scene_dir, scene_name + '_withlabels_OpenAWall.ply'))
else:
scene = o3d.io.read_triangle_mesh(osp.join(scene_dir, scene_name + '_withlabels.ply'))
trans = np.eye(4)
with open(os.path.join(cam2world_dir, scene_name + '.json'), 'r') as f:
trans = np.array(json.load(f))
vis.add_geometry(scene)
### setup rendering cam, depth capture, segmentation capture
ctr = vis.get_view_control()
cam_param = ctr.convert_to_pinhole_camera_parameters()
cam_param = update_cam(cam_param, trans)
ctr.convert_from_pinhole_camera_parameters(cam_param)
## capture depth image
depth = np.asarray(vis.capture_depth_float_buffer(do_render=True))
_h = depth.shape[0]
_w = depth.shape[1]
factor = 4
depth = cv2.resize(depth, (_w//factor, _h//factor))
## capture semantics
seg = np.asarray(vis.capture_screen_float_buffer(do_render=True))
verid = np.mean(seg*255/5.0,axis=-1) #.astype(int)
verid = cv2.resize(verid, (_w//factor, _h//factor))
## get render cam parameters
cam_dict = {}
cam_dict['extrinsic'] = cam_param.extrinsic
cam_dict['intrinsic'] = cam_param.intrinsic.intrinsic_matrix
count = 0
for img_name in sorted(os.listdir(fitting_dir))[::15]:
print('viz frame {}'.format(img_name))
## get humam body params
filename =osp.join(fitting_dir, img_name, '000.pkl')
if not os.path.exists(filename):
continue
with open(osp.join(fitting_dir, img_name, '000.pkl'), 'rb') as f:
param = pickle.load(f)
body_dict={}
for key, val in param.items():
if key in ['camera_rotation', 'camera_translation',
'jaw_pose', 'leye_pose','reye_pose','expression']:
continue
else:
body_dict[key]=param[key]
## save depth, semantics and render cam
outname1 = os.path.join(output_folder,recording_name)
if not os.path.exists(outname1):
os.mkdir(outname1)
outname = os.path.join(outname1, 'rec_{:06d}.mat'.format(count))
ot_dict={}
ot_dict['scaling_factor']=factor
ot_dict['depth']=depth
ot_dict['seg'] = verid
ot_dict['cam'] = cam_dict
ot_dict['body'] = body_dict
sio.savemat(outname, ot_dict)
count += 1
class Struct:
def __init__(self, **entries):
self.__dict__.update(entries)
if __name__ == '__main__':
fitting_dir_list = glob.glob('/mnt/hdd/PROX/PROXD/*')
data_dir = '/mnt/hdd/PROX'
model_folder = '/home/yzhang/body_models/VPoser/'
args= {}
for fitting_dir in fitting_dir_list:
print('-- process {}'.format(fitting_dir))
args['fitting_dir'] = fitting_dir
args['data_dir'] = data_dir
args['model_folder'] = model_folder
main(Struct(**args))
``` |
{
"source": "jiyeon-ops/TizenRT",
"score": 2
} |
#### File: tools/ramdump/dumpParser.py
```python
from __future__ import print_function
import re
import os
import string
import sys, time
import struct
import subprocess
from getopt import GetoptError, getopt as GetOpt
# Global variables
FP = 11
SP = 13
LR = 14
PC = 15
g_stext=0
g_etext=0
config_path = '../../os/.config'
elf_path = '../../build/output/bin/tinyara'
debug_cmd = 'addr2line'
file_data = 'HeapInfo'
# Top level class to parse the dump and assert logs parsing feature
class dumpParser:
# Sub class to hold the essential stack frame registers
class Stackframe ():
def __init__(self, fp, sp, lr,pc):
self.fp = fp
self.sp = sp
self.lr = lr
self.pc = pc
# Init function for dumpParser Class
def __init__(self, dump_file=None, elf=None, gdb_path=None, nm_path=None, readelf_path=None, log_file=None, debug=False):
self.dump_file=dump_file # dump file
self.elf=elf # Elf file
self.gdb_path=gdb_path # Path of GDB tool
self.nm_path=nm_path # Path of NM tool
self.readelf_path=readelf_path # Path of readelf tool
self.log_file=log_file # Path of Log file passed as argument
self.symbol_lookup_table = [] # Lookup table to store key(addr) value(symbol) from elf
self.stack_table = [] # Lookup table created from stack contents of assert log file
# These 2 addresses will be overwritten with ramdump file name
self.ram_base_addr=0x4A0000 # Default RAM start address for ARTIK board
self.ram_end_addr=0x6A0000 # Default RAM end address for ARTIK board
self.stacksize=0 # Stack size set to 0 and it gets updated dynamically while using
# Check for ELF file existence, if exists open the ELF (tinyara) file in binary format
# As well as open using NM utility so that, we can read the symbols easily
if self.elf is not None:
self.elf_file_fd = open(elf, 'rb')
if not self.elf_file_fd:
print(('Failed to open {0}'.format(elf)))
return None
# Check for ramdump file existence, if exists open the dump file in binary format
if self.dump_file is not None:
self.dump_file_fd = open(dump_file, 'rb')
if not self.dump_file_fd:
print(('Failed to open {0}'.format(dump_file)))
return None
#Split the dump_file with delimiters _ and . to read the start and end addr
# This is required to support any RAM dump size of any board
temp = re.split(r'[_.]',dump_file)
# Get the Start address and end address from dump file name
self.ram_base_addr = int(temp[-3],16)
self.ram_end_addr = int(temp[-2],16)
print(('self.ram_base_addr {0:x}'.format(self.ram_base_addr)))
print(('self.ram_end_addr {0:x}'.format(self.ram_end_addr)))
# Read the elf header to get the offset of text section and ARM exidx section
# These offsets will be used while creating ARM exidx table as well as while reading
# the address from ELF file at a particular text address
with os.popen(self.readelf_path + ' -S ' + elf) as readelf_fd:
elfdata = readelf_fd.readlines()
for line in elfdata:
if '.text' in line:
word = line.split()
#word 5 contains the offset value
self.text_offset = word[5]
if debug:
print(line)
print(self.text_offset)
def __del__(self):
self.elf_file_fd.close() # Close the elf file instance
# Function to search the instructions present in stack address.
def stack_lookup(self, addr):
if(addr is None):
return ('(Invalid address)', 0x0)
low = 0
high = len(self.stack_table)
mid = (low + high) >> 1
premid = 0
while(low <= high):
if(addr == self.stack_table[mid][0]):
return self.stack_table[mid][1]
if(addr < self.stack_table[mid][0]):
high = mid - 1
else:
low = mid + 1
mid = (high + low) >> 1
print("Sorry. Given address does not present in stack lookup")
return None
# Function to Parse the i/p log file (which contains stackdump during assert) passed as part of -t argument ( No dump is supplied )
def parse_file(self,log_file,debug=False):
k = 1
i = 0
got = 0
offset = 0
extra_str = ''
# Parse the contents based on tokens in log file.
with open(log_file) as searchfile:
for line in searchfile:
# It's imp to get the stack size dynamically as it's diff for diff tasks
if 'up_dumpstate:' in line:
word = re.split(r'[:]',line)
if word[1] == ' User stack':
continue
if word[1] == ' size':
self.stacksize = int(word[2],16)
if debug:
print("stackSize :", self.stacksize)
continue
# Read the stack contents of aborted stack and Populate stack_table
if 'up_stackdump:' in line:
word = line.split(':')
#print word[2]
t = word[2].split( )
if k == 1:
self.stack_start_addr = int(word[1], 16)
#print 'stack_start_addr: ', hex(self.stack_start_addr)
k = 0
for sub_word in t:
self.stack_table.append((self.stack_start_addr,int(sub_word,16)))
self.stack_start_addr = self.stack_start_addr + 4
# Print the generated Stack table
if debug:
print("{0:x} {1:x}".format(self.stack_table[i][0],self.stack_table[i][1]))
i = i + 1
continue
# Read only critical ARM registers ( PC, LR , SP and FP )
if 'up_registerdump:' in line:
word = line.split(':')
if word[1] == ' R0': # Observe the space before R0
continue
if word[1] == ' R8': # Observe the space before R8
t = word[2].split()
pc = int(t[-1],16) # converting string to base 16 (hex)
lr = int(t[-2],16)
sp = int(t[-3],16)
fp = int(t[-5],16)
if debug:
print('&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&')
print(' ')
print('stack size : ', self.stacksize)
print('PC : ', hex(pc))
print('LR : ', hex(lr))
print('SP : ', hex(sp))
print('FP : ', hex(fp))
print(' ')
print(' ')
# If the PC is not withing RAM range, it's a Prefetch issue
# So, fill the PC with LR and help to get the call stack
if ( pc < g_stext or pc > g_etext):
print("It'S A PRE-FETCH ABORT @ PC", hex(pc))
# Assign LR to PC to help constructing the stack
pc = lr
continue
# Incase if log file already has this data, address to symbol mapping is
# enough to get the call stack.
if 'unwind_backtrace_with_fp:' in line:
word = line.split(':')
if word[1] == ' CallTrace_Start':
got = 1
print('&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&')
print('Call Trace:')
print('')
continue
if word[1] == ' CallTrace_End':
got = 0
print('')
print('&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&')
if got == 1:
# word.line.split(':')
word = re.split(r'[:><\[\]]',line)
r = self.symbol_table_lookup(int(word[3],16))
if r is None:
symname = 'UNKNOWN'
offset = 0x0
else:
symname, offset = r
pstring = (extra_str + '[<{0:x}>] {1}+0x{2:x}'.format(int(word[3],16), symname, offset))
print(pstring)
continue
print('CALL STACK of Aborted task: ')
print('&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&')
print('')
# Since Fp is enabled, use simple approach to get backtrace
self.unwind_backtrace_with_framepointer(fp,sp,lr,pc,self.stacksize)
print('')
print('&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&')
# Function to setup the Address to Symbol mapping table from the ELF file ( tinyara in our case)
def setup_symbol_table(self,tinyara_elf_file, debug=False):
# Reading the tinyara elf and preparing the symbol map table
with os.popen(self.nm_path + ' -n ' + tinyara_elf_file) as elf_file_fd_nm:
symbols = elf_file_fd_nm.readlines()
for line in symbols:
s = line.split(' ')
if len(s) == 3:
if not '$' in s[2]:
# s[0] contains address and s[2] contains Symbol name
self.symbol_lookup_table.append((int(s[0], 16), s[2].rstrip()))
# Printing the Created Symbol table
if debug:
print('~~~~~~~~~~~~~~~~~~~~~~~~ SYMBOL TABLE START ~~~~~~~~~~~~~~~~~~~~~')
for line in self.symbol_lookup_table:
print("{0:x} {1}".format(line[0], line[1]))
print('~~~~~~~~~~~~~~~~~~~~~~~~ SYMBOL TABLE END ~~~~~~~~~~~~~~~~~~~~~')
# Function to read the contents of given length from specific RAM/ELF address
def read_address(self, addr, length, debug=False):
# First check whether address falls within the code section, if so read from elf
if (addr >= g_stext and addr <= g_etext):
if debug:
print(('address {0:x} is in text range'.format(addr)))
# Checking in ELF file once for the offset at which we need to read the address
offset = (addr - g_stext ) + int(self.text_offset, 16)
if debug:
print(('Offset = {0:x}'.format(offset)))
print(('Length = {0:x}'.format(length)))
self.elf_file_fd.seek(offset)
a = self.elf_file_fd.read(length)
return a
# Since the given address does not belong to ELF section, read from DUMP
else:
# Calculate the OFFSET in the FILE by subtracting RAM start address
offset = addr - self.ram_base_addr
if debug:
print('offset = {0:x}'.format(offset))
print('length = {0:x}'.format(length))
# If dump file is Passed, Read contents of address from DUMP file descriptor
if self.log_file is None:
self.dump_file_fd.seek(offset)
a = self.dump_file_fd.read(length)
else:
print('AM HERE, SOMETHING WRONG')
# If only Log file is passed as i/p, Read the contents of address from ELF file
self.elf_file_fd.seek(offset+self.text_offset) # offset got from elf header
a = self.elf_file_fd.read(length)
return a
# returns a tuple of the result by reading address from the "specified format string"
# return None on failure
def read_string(self, address, format_string, debug=False):
addr = address
s = self.read_address(addr, struct.calcsize(format_string), debug)
if (s is None) or (s == ''):
if debug and addr is not None:
print(('Failed to read address {0:x}'.format(addr)))
return None
# Unpack the string with proper format and return to calle.
return struct.unpack(format_string, s)
# returns a word size (4 bytes = 32 bits) read from dump "<" means little endian format
# I indicates word
def read_word(self, address, debug=False):
if debug:
print('reading {0:x}'.format(address))
s = self.read_string(address, '<I', debug)
if s is None:
if debug:
print('read_word s is None')
return None
else:
return s[0]
# returns a single Byte read from given dump address "<" means little endian format
# H indicates Half word
def read_halfword(self, address, debug=False):
if debug:
print('reading {0:x}'.format(address))
s = self.read_string(address, '<h', debug)
if s is None:
if debug:
print('read_halfword s is None')
return None
else:
return s[0]
# returns a single Byte read from given dump address "<" means little endian format
# B indicates Byte, It's useful while reading a String from dump
def read_byte(self, address, debug=False):
if debug:
print('reading {0:x}'.format(address))
s = self.read_string(address, '<B', debug)
if s is None:
if debug:
print('read_byte s is None')
return None
else:
return s[0]
# Function to return the address of a Symbol from mapping table
def get_address_of_symbol(self, symbol, debug=False):
i = 0
while (self.symbol_lookup_table[i][1] != symbol and i <= len(self.symbol_lookup_table)):
i = i+1
if (i > len(self.symbol_lookup_table)):
return None
else:
if debug:
print("Address of symbol {0:x} {1}".format(self.symbol_lookup_table[i][0], symbol))
return self.symbol_lookup_table[i][0]
# Function to search for a address from "Address to Symbol" mapping table
# Apply Generic binary search
def symbol_table_lookup(self, addr, symbol_size=0):
if (addr is None):
return ('(Invalid address)', 0x0)
low = 0
high = len(self.symbol_lookup_table)
mid = (low + high) >> 1
premid = 0
# Check whether given address is within the 2 neighbour address range or not ?
while(not(addr >= self.symbol_lookup_table[mid][0] and addr < self.symbol_lookup_table[mid + 1][0])):
if(addr < self.symbol_lookup_table[mid][0]):
high = mid - 1
if(addr > self.symbol_lookup_table[mid][0]):
low = mid + 1
mid = (high + low) >> 1
if(mid == premid):
return None
if(mid + 1) >= len(self.symbol_lookup_table) or mid < 0:
return None
premid = mid
if symbol_size == 0:
return (self.symbol_lookup_table[mid][1], addr - self.symbol_lookup_table[mid][0])
else:
return (self.symbol_lookup_table[mid][1], self.symbol_lookup_table[mid + 1][0] - self.symbol_lookup_table[mid][0])
# Function to unwind the FRAME with generic method of using Frame pointer
def unwind_frame_using_framepointer(self, frame, debug=False):
high = 0
fp = frame.fp
low = (frame.sp) # Get the Stack pointer
mask = (self.stacksize) - 1 # Prepare Mask for stacksize for ex.stack:0x8000, mask=0x7FFF
high = (low + mask) & (~mask) # Get the end of stack with alignment.ALIGN(low, THREAD_SIZE)
# Verify the Addresses passed
if debug:
print("fp:{0:x} sp low:{1:x} high:{2:x}".format(frame.fp, frame.sp, high))
# Check current frame pointer is within bounds of Stack*/
if (fp < (low + 12) or fp + 4 >= high):
print("End of stack frame")
return -1
# If the dump is given, Read contents of address from the DUMP file
# by using read_word API
if self.log_file is None:
fp_is_at = self.read_word(frame.fp - 12)
sp_is_at = self.read_word(frame.fp - 8)
pc_is_at = self.read_word(frame.fp - 4)
else:
# If only log file is given, Read contents of addr from the populated stack_table
fp_is_at = self.stack_lookup(frame.fp - 12)
sp_is_at = self.stack_lookup(frame.fp - 8)
pc_is_at = self.stack_lookup(frame.fp - 4)
frame.fp = fp_is_at
frame.sp = sp_is_at
frame.pc = pc_is_at
return 0
# Function to walk the stack frame when framepointer is enabled
def find_stackframe_using_framepointer(self, frame, extra_str='', output_file=None):
while True:
offset = 0
# If PC is Null, prefetch abort or Something wrong, Break
if frame.pc is None:
break
if(frame.pc == 0):
break
# Read the Symbol name and offset for a specific PC address from symbol table
r = self.symbol_table_lookup(frame.pc)
if r is None:
symname = 'UNKNOWN'
offset = 0x0
else:
symname, offset = r # Update both symbol name and offset
address = '{0:x}'.format(frame.pc)
cmd = ['addr2line', '-e', self.elf, address]
fd_popen = subprocess.Popen(cmd, stdout=subprocess.PIPE).stdout
data = fd_popen.read()
temp = data.split('/')[-1]
line = temp.split(':')[-1].rstrip()
name = temp[:-len(line)-2]
pstring = (extra_str + '[<{0:x}>] {1}+0x{2:x} [Line {3} of {4}]'.format(frame.pc, symname, offset, line, name))
if output_file:
out_file.write(pstring + '\n')
else:
print(pstring)
# Unwind the complete Stack with the help of Frame pointer register
ret = self.unwind_frame_using_framepointer(frame)
if ret < 0:
break
# Function to backtrace the stack in Generic method, which uses frame pointer register
# First function to get called to unwind
def unwind_backtrace_with_framepointer(self, fp, sp, lr, pc, ss):
frame = self.Stackframe(fp,sp,lr,pc)
frame.fp = fp
frame.sp = sp
frame.lr = lr
frame.pc = pc
self.stacksize = ss
self.find_stackframe_using_framepointer(frame)
def usage():
print('*************************************************************')
print('\nUsage: %s -e ELF_FILE -r DUMP_FILE [OPTIONS]' % sys.argv[0])
print(' OR')
print('Usage: %s -e ELF_FILE -t LOG_FILE [OPTIONS]\n' % sys.argv[0])
print('Following options are available')
print('\t-e, --elf tinyara elf file along with path')
print('\t-r, --dump_file RAM/FLASH dump_file along with path')
print('\t-t, --log_file Enter Logfile which contains stackdump during assert')
print('\t-G, --gdb_path Enter gdb tool path')
print('\t-N, --nm_path Enter nm tool path')
print('\t-E, --readelf_path Enter readelf tool path')
print('\t-h, --help Show help')
print('')
print('syntax :')
print('--------')
print('python %s -e <Tinyara elf path> -r Filename_ramBaseAddr_ramEndAddr.bin -G <Gdb path> -N < NM path> ' % sys.argv[0])
print('')
print('I assume, gdb and nm tool exist in your linux machine like /usr/bin/gdb and /usr/bin/nm, so hard coded this path inside script')
print('')
print('Below example if you give dump file as path: ')
print('--------------------------------------------')
print('python dumpParser.py -e build/output/bin/tinyara -r build/output/bin/ramdump_0x4a0000_0x6a0000.bin')
print('')
print('Below example if you give simple assert log file as path: ')
print('---------------------------------------------------------')
print('python dumpParser.py -e build/output/bin/tinyara -r log.txt ')
print('')
print('')
print('Note:')
print('--------')
print('1) For getting call stack CONFIG_FRAME_POINTER flag should be enabled in your build')
print('')
print('If you do not have gdb and nm path set, please pass the path as below')
print('')
print('python dumpParser.py -r /build/bin/ramdump_0x4a0000_0x6a0000.bin -e build/bin/tinyara -G <your_gdb_path> -N <your_nm_path>')
print('')
print('')
print('*************************************************************')
sys.exit(1)
def main():
dump_file = None
log_file = None
elf = None
framePointer = 0
stackPointer = 0
programCounter = 0
stackSize = 0
gdb_path='/usr/bin/gdb'
nm_path='/usr/bin/nm'
readelf_path='/usr/bin/readelf'
try:
opts, args = GetOpt(sys.argv[1:],'r:e:G:N:d:t:g:h', ['dump_file=', 'elf=','gdb_path=','nm_path=','readelf_path=','log_file=','help'])
except GetoptError as e:
print(' ')
print(' ')
print('*************************************************************')
print('Usage error:', e)
usage()
for opt, arg in opts:
if opt in ('-r', '--dump_file'):
dump_file = arg
elif opt in ('-e', '--elf'):
elf = arg
elif opt in ('-G', '--gdb_path'):
gdb_path = arg
elif opt in ('-N', '--nm_path'):
nm_path = arg
elif opt in ('-E', '--readelf_path'):
readelf_path = arg
elif opt in ('-t', '--log_file'):
log_file = arg
elif opt in ('-h', '--help'):
usage()
print('')
print('')
print('*************************************************************')
print('dump_file :', dump_file)
print('log_file :', log_file)
print('Tinyara_elf_file :', elf)
print('*************************************************************')
print('')
if not elf :
print('Usage error: Must specify -e option, please find below for proper usage')
usage()
if log_file is not None:
if not os.path.exists(log_file):
print('{0} does not exist. Please provide the proper path for log_file...'.format(log_file))
sys.exit(1)
if dump_file is not None:
if not os.path.exists(dump_file):
print('{0} does not exist. Plz provide proper path for dump_file...'.format(dump_file))
sys.exit(1)
if not log_file and not dump_file:
print('Usage error: Must specify one of the -t or -e options. Plz find below for proper usage')
usage()
if not os.path.exists(elf):
print('{0} does not exist. Cannot proceed without System.map Exiting...'.format(elf))
sys.exit(1)
if not os.path.exists(gdb_path):
print('{0} does not exist. Cannot proceed without GDB Tool Exiting...'.format(gdb_path))
sys.exit(1)
if not os.access(gdb_path, os.X_OK):
print("!!! No execute permissions on gdb path {0}".format(gdb_path))
print("!!! Please check the path settings")
print("!!! If this tool is being run from a shared location, contact the maintainer")
sys.exit(1)
if not os.path.exists(nm_path):
print('{0} does not exist. Cannot proceed without NM Tool Exiting...'.format(nm_path))
sys.exit(1)
if not os.access(nm_path, os.X_OK):
print("!!! No execute permissions on gdb path {0}".format(nm_path))
print("!!! Please check the path settings")
print("!!! If this tool is being run from a shared location, contact the maintainer")
sys.exit(1)
if not os.path.exists(readelf_path):
print('{0} does not exist. Cannot proceed without readelf Tool Exiting...'.format(readelf_path))
sys.exit(1)
if not os.access(readelf_path, os.X_OK):
print("!!! No execute permissions on readelf path {0}".format(readelf_path))
print("!!! Please check the path settings")
print("!!! If this tool is being run from a shared location, contact the maintainer")
sys.exit(1)
try:
# Calling the Constructor with the initial set of arguments
rParser = dumpParser(dump_file=dump_file,elf=elf,gdb_path=gdb_path,nm_path=nm_path,readelf_path=readelf_path,log_file=log_file, debug=False)
# Setup the Symbol table lookup with given System.map file
rParser.setup_symbol_table(elf,debug=False)
# Find offset
global g_stext
g_stext = rParser.get_address_of_symbol("_stext")
global g_etext
g_etext = rParser.get_address_of_symbol("_etext")
# If the log file is given, then parse that log file only and exit
if log_file is not None:
rParser.parse_file(log_file,debug=False)
return None
# If the log file not given, Consider full dump
# Unwind the Callstack by obtaining FP, SP and PC from the proper global variable
# task_ctxt_regs is used if SEC_DEBUG is enabled , else current_regs is used
current_regs_pointer = rParser.get_address_of_symbol("current_regs")
if current_regs_pointer is not None:
ctxt_regs = rParser.read_word(current_regs_pointer)
if ctxt_regs != 0x0:
print('current_regs is not NULL')
else:
cpu_ctxt_regs_pointer = rParser.get_address_of_symbol("cpu_ctxt_regs")
if cpu_ctxt_regs_pointer is not None:
ctxt_regs = cpu_ctxt_regs_pointer
if ctxt_regs is None:
print('System is not crashed')
return None
framePointer = rParser.read_word(ctxt_regs+(4* FP))
stackPointer = rParser.read_word(ctxt_regs+(4* SP))
linkRegister = rParser.read_word(ctxt_regs+(4*LR))
programCounter = rParser.read_word(ctxt_regs+(4*PC))
# There are spl case where PC can be invalid, So assigning LR to PC
if ( programCounter < g_stext or programCounter > g_etext):
# This is possible for a prefetch abort. so am taking care by assigning LR to PC
print("It's a Prefetch abort at Addr : ", hex(programCounter))
programCounter = linkRegister
# Explicitly getting the PC and LR symbol names for display purpose
temp1 = rParser.symbol_table_lookup(programCounter)
if temp1 is None:
temp1_symname = 'UNKNOWN'
temp1_offset = 0x0
else:
temp1_symname, temp1_offset = temp1
temp2 = rParser.symbol_table_lookup(linkRegister)
if temp2 is None:
temp2_symname = 'UNKNOWN'
temp2_offset = 0x0
else:
temp2_symname, temp2_offset = temp2
# Reading the Current Task from g_readytorun list, usually head is the current
# Since its a pointer, first am getting pointer address and then reading the
# contents of that Pointer to get the proper task address
current_task_pointer = rParser.get_address_of_symbol('g_readytorun')
current_task_addr = rParser.read_word(current_task_pointer)
# Reading the stack size value from tbe tcb_s struct
stackSize = rParser.read_word(current_task_addr+0x2c)
print('&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&')
print('')
print("Board Crashed at :")
print("PC: [0x{0:x}] {1}+0x{2:x}]\"".format(programCounter,temp1_symname,temp1_offset))
print("LR: [0x{0:x}] {1}+0x{2:x}]\"".format(linkRegister,temp2_symname,temp2_offset))
print(' ')
print("FP: 0x{0:x} and SP: 0x{1:x}".format(framePointer, stackPointer))
print(' ')
print('CALL STACK of Aborted task: ')
print('*******************************************************************')
rParser.unwind_backtrace_with_framepointer(framePointer,stackPointer,linkRegister,programCounter,stackSize)
print('')
print('')
print('&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&')
g_mmheap = rParser.get_address_of_symbol("g_mmheap")
# Read config information
fd = open(config_path, 'r')
data = fd.read()
fd.close()
if not 'CONFIG_DEBUG_MM_HEAPINFO=y' in data:
print('DEBUG_MM_HEAPINFO is not enable. Enable DEBUG_MM_HEAPINFO to see heap usage')
return
# This information depends on the mm_heap_s structure
SIZE_OF_MM_SEM = 4
SIZE_OF_MM_HOLDER = 4
SIZE_OF_MM_COUNTS_HELD = 4
HEAP_SIZE_POINT = SIZE_OF_MM_SEM + SIZE_OF_MM_HOLDER + SIZE_OF_MM_COUNTS_HELD
SIZE_OF_MM_HEAPSIZE = 4
PEAK_ALLOC_SIZE_POINT = HEAP_SIZE_POINT + SIZE_OF_MM_HEAPSIZE
SIZE_OF_PEAK_ALLOC_SIZE = 4
TOTAL_ALLOC_SIZE_POINT = PEAK_ALLOC_SIZE_POINT + SIZE_OF_PEAK_ALLOC_SIZE
SIZE_OF_TOTAL_ALLOC_SIZE = 4
SIZE_OF_HEAPINFO_TCB_INFO = 16
SIZE_OF_ALLOC_NODE = 16
ALLOC_LIST_POINT = TOTAL_ALLOC_SIZE_POINT + SIZE_OF_TOTAL_ALLOC_SIZE
max_tasks = 0
# get MAX_TASKS num
if 'CONFIG_MAX_TASKS=' in data:
index = data.find('CONFIG_MAX_TASKS=')
index += len('CONFIG_MAX_TASKS=')
while data[index] != '\n' :
max_tasks *= 10
max_tasks += int(data[index])
index += 1
HEAP_START_POINT = (ALLOC_LIST_POINT + (max_tasks * SIZE_OF_HEAPINFO_TCB_INFO))
if 'CONFIG_MM_SMALL=y' in data:
MM_ALLOC_BIT = 0x8000
else:
MM_ALLOC_BIT = 0x80000000
start_heap = rParser.read_word(g_mmheap + HEAP_START_POINT)
end_heap = rParser.read_word(g_mmheap + HEAP_START_POINT + 4)
print('')
print('')
print('Details of Heap Usages (Size in Bytes)')
print('')
print('start heap address : ', hex(start_heap))
print('end heap address : ', hex(end_heap))
print('')
point = start_heap + SIZE_OF_ALLOC_NODE
stack_size = [0 for i in range(max_tasks)]
idle_stack_size = 0
if 'CONFIG_IDLETHREAD_STACKSIZE=' in data:
index = data.find('CONFIG_IDLETHREAD_STACKSIZE=')
index += len('CONFIG_IDLETHREAD_STACKSIZE=')
while data[index] != '\n' :
idle_stack_size *= 10
idle_stack_size += int(data[index])
index += 1
stack_size[0] = idle_stack_size
print('******************************************************************')
print(' MemAddr | Size | Status | Pid | Owner ')
print('----------|----------|--------|-------|---------------------------')
f = open(file_data, 'w')
while point < end_heap:
size = rParser.read_word(point)
preceding = rParser.read_word(point + 4)
owner = rParser.read_word(point + 8)
pid = rParser.read_halfword(point + 12)
if preceding & MM_ALLOC_BIT :
fd_popen = subprocess.Popen([debug_cmd, '-e', elf_path, hex(owner)], stdout=subprocess.PIPE).stdout
data = fd_popen.read()
fd_popen.close()
if pid >= 0:
print('{:^10}|'.format(hex(point)), '{:>6}'.format(size), ' |', '{:^7}|'.format('Alloc'), '{:^6}|'.format(pid), data[14:], end=' ')
f.write(str(size) + ' 0 ' + str(hex(point)) + ' ' + str(pid) + ' ' + data[14:])
else: # If pid is less than 0, it is the stack size of (-pid)
stack_size[(-pid) & (max_tasks - 1)] = size
print('{:^10}|'.format(hex(point)), '{:>6}'.format(size), ' |', '{:^7}|'.format('Alloc'), '{:^6}|'.format(-pid), data[14:], end=' ')
f.write(str(size) + ' 1 ' + str(hex(point)) + ' ' + str(-pid) + ' ' + data[14:])
else:
print('{:^10}|'.format(hex(point)), '{:>6}'.format(size), ' |', '{:^7}|'.format('Free'), '{:6}|'.format(""))
f.write(str(size) +' 2 ' + str(hex(point)))
# next node
point = point + size
f.close()
print('')
print('***********************************************************')
print(' Summary of Heap Usages (Size in Bytes)')
print('***********************************************************')
heap_size = rParser.read_word(g_mmheap + HEAP_SIZE_POINT)
print('HEAP SIZE : ', heap_size)
peack_alloc_size = rParser.read_word(g_mmheap + PEAK_ALLOC_SIZE_POINT)
print('PEAK ALLOC SIZE : ', peack_alloc_size)
total_alloc_size = rParser.read_word(g_mmheap + TOTAL_ALLOC_SIZE_POINT)
print('TOTAL ALLOC SIZE : ', total_alloc_size)
print('FREE SIZE : ', heap_size - total_alloc_size)
print('')
print('***********************************************************')
print(' PID | STACK SIZE | CUR ALLOC SIZE | PEAK ALLOC SIZE |')
print('-------|------------|-------------------|-----------------|')
INVALID_PROCESS_ID = 0xFFFFFFFF
alloc_list = ALLOC_LIST_POINT + g_mmheap
for i in range(0, max_tasks):
pid = rParser.read_word(alloc_list)
if pid != INVALID_PROCESS_ID :
# This information depends on the heapinfo_tcb_info_t
cur_alloc = rParser.read_word(alloc_list + 4)
peak_alloc = rParser.read_word(alloc_list + 8)
print('{:^7}|'.format(pid), '{:>7}'.format(stack_size[i]), ' |', '{:>13}'.format(cur_alloc), ' |', '{:>13}'.format(peak_alloc), ' |')
# next alloc list
alloc_list += SIZE_OF_HEAPINFO_TCB_INFO
except Exception as e:
print("ERROR:", e)
if __name__ == '__main__':
main()
``` |
{
"source": "jiyeoun/PS",
"score": 4
} |
#### File: PS/baekjoon/1934.py
```python
def gcd(a, b):
while b != 0:
x = a % b
a = b
b = x
return a
def lcm(a, b):
gcd2 = gcd(a, b)
return (a * b) // gcd2
n=int(input())
for i in range(n):
a,b=input().split()
a=int(a)
b=int(b)
print(lcm(a, b))
``` |
{
"source": "jiyfeng/RSTParser",
"score": 3
} |
#### File: jiyfeng/RSTParser/tree.py
```python
from datastructure import *
from buildtree import *
from feature import FeatureGenerator
from parser import SRParser
from util import extractrelation
class RSTTree(object):
def __init__(self, fname=None, tree=None):
""" Initialization
:type text: string
:param text: dis file content
"""
self.fname = fname
self.binary = True
self.tree = tree
def build(self):
""" Build BINARY RST tree
"""
text = open(self.fname).read()
self.tree = buildtree(text)
self.tree = binarizetree(self.tree)
self.tree = backprop(self.tree)
def write(self, fname):
""" Write tree into file
:type fname: string
:param fname: tree file name
"""
pass
def bracketing(self):
""" Generate brackets according an Binary RST tree
"""
nodelist = postorder_DFT(self.tree, [])
nodelist.pop() # Remove the root node
brackets = []
for node in nodelist:
relation = extractrelation(node.relation)
b = (node.eduspan, node.prop, relation)
brackets.append(b)
return brackets
def generate_samples(self):
""" Generate samples from an binary RST tree
"""
# Sample list
samplelist = []
# Parsing action
actionlist = decodeSRaction(self.tree)
# Initialize queue and stack
queue = getedunode(self.tree)
stack = []
# Start simulating the shift-reduce parsing
for action in actionlist:
# Generate features
fg = FeatureGenerator(stack, queue)
features = fg.features()
samplelist.append(features)
# Change status of stack/queue
sr = SRParser(stack, queue)
sr.operate(action)
# stack, queue = sr.getstatus()
return (actionlist, samplelist)
def getedutext(self):
""" Get all EDU text here
"""
edunodelist = getedunode(self.tree)
texts = []
for node in edunodelist:
texts.append(node.text)
return texts
def gettree(self):
""" Get the RST tree
"""
return self.tree
def test():
fname = "examples/wsj_0603.out.dis"
rst = RSTTree(fname)
rst.build()
# actionlist, samplelist = rst.generate_samples()
# print actionlist
# print samplelist
# for (action, sample) in zip(actionlist, samplelist):
# print action
print rst.bracketing()
print '----------------------------'
rst = RSTTree("examples/wsj_0600.out.dis")
rst.build()
print rst.bracketing()
if __name__ == '__main__':
test()
``` |
{
"source": "jiyfeng/transformer-text-tasks",
"score": 3
} |
#### File: transformer-text-tasks/transformer/transformer_block.py
```python
from torch import nn
from transformer.self_attention import SelfAttention
class TransformerBlock(nn.Module):
def __init__(self, emb, heads):
super().__init__()
self.attention = SelfAttention(emb, heads=heads)
# The layer normalization is applied over the embedding dimension only.
self.norm1 = nn.LayerNorm(emb)
self.norm2 = nn.LayerNorm(emb)
self.ff = nn.Sequential(
nn.Linear(emb, 4 * emb),
nn.ReLU(),
nn.Linear(4 * emb, emb)
)
def forward(self, x):
attended = self.attention(x)
x = self.norm1(attended + x)
ff = self.ff(x)
res = self.norm2(ff + x)
return res
``` |
{
"source": "jiyilanzhou/eth2.0-specs",
"score": 2
} |
#### File: unittests/fork_choice/test_on_attestation.py
```python
from eth2spec.test.context import PHASE0, with_all_phases, spec_state_test
from eth2spec.test.helpers.block import build_empty_block_for_next_slot
from eth2spec.test.helpers.attestations import get_valid_attestation, sign_attestation
from eth2spec.test.helpers.state import transition_to, state_transition_and_sign_block, next_epoch, next_slot
def run_on_attestation(spec, state, store, attestation, valid=True):
if not valid:
try:
spec.on_attestation(store, attestation)
except AssertionError:
return
else:
assert False
indexed_attestation = spec.get_indexed_attestation(state, attestation)
spec.on_attestation(store, attestation)
sample_index = indexed_attestation.attesting_indices[0]
if spec.fork == PHASE0:
latest_message = spec.LatestMessage(
epoch=attestation.data.target.epoch,
root=attestation.data.beacon_block_root,
)
else:
latest_message = spec.LatestMessage(
epoch=attestation.data.target.epoch,
root=attestation.data.beacon_block_root,
)
shard_latest_message = spec.ShardLatestMessage(
epoch=attestation.data.target.epoch,
root=attestation.data.shard_head_root,
)
assert store.shard_stores[attestation.data.shard].latest_messages[sample_index] == shard_latest_message
assert (
store.latest_messages[sample_index] == latest_message
)
@with_all_phases
@spec_state_test
def test_on_attestation_current_epoch(spec, state):
store = spec.get_forkchoice_store(state)
spec.on_tick(store, store.time + spec.SECONDS_PER_SLOT * 2)
block = build_empty_block_for_next_slot(spec, state)
signed_block = state_transition_and_sign_block(spec, state, block)
# store block in store
spec.on_block(store, signed_block)
attestation = get_valid_attestation(spec, state, slot=block.slot, signed=True)
assert attestation.data.target.epoch == spec.GENESIS_EPOCH
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == spec.GENESIS_EPOCH
run_on_attestation(spec, state, store, attestation)
@with_all_phases
@spec_state_test
def test_on_attestation_previous_epoch(spec, state):
store = spec.get_forkchoice_store(state)
spec.on_tick(store, store.time + spec.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH)
block = build_empty_block_for_next_slot(spec, state)
signed_block = state_transition_and_sign_block(spec, state, block)
# store block in store
spec.on_block(store, signed_block)
attestation = get_valid_attestation(spec, state, slot=block.slot, signed=True)
assert attestation.data.target.epoch == spec.GENESIS_EPOCH
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == spec.GENESIS_EPOCH + 1
run_on_attestation(spec, state, store, attestation)
@with_all_phases
@spec_state_test
def test_on_attestation_past_epoch(spec, state):
store = spec.get_forkchoice_store(state)
# move time forward 2 epochs
time = store.time + 2 * spec.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH
spec.on_tick(store, time)
# create and store block from 3 epochs ago
block = build_empty_block_for_next_slot(spec, state)
signed_block = state_transition_and_sign_block(spec, state, block)
spec.on_block(store, signed_block)
# create attestation for past block
attestation = get_valid_attestation(spec, state, slot=state.slot, signed=True)
assert attestation.data.target.epoch == spec.GENESIS_EPOCH
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == spec.GENESIS_EPOCH + 2
run_on_attestation(spec, state, store, attestation, False)
@with_all_phases
@spec_state_test
def test_on_attestation_mismatched_target_and_slot(spec, state):
store = spec.get_forkchoice_store(state)
spec.on_tick(store, store.time + spec.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH)
block = build_empty_block_for_next_slot(spec, state)
signed_block = state_transition_and_sign_block(spec, state, block)
# store block in store
spec.on_block(store, signed_block)
attestation = get_valid_attestation(spec, state, slot=block.slot)
attestation.data.target.epoch += 1
sign_attestation(spec, state, attestation)
assert attestation.data.target.epoch == spec.GENESIS_EPOCH + 1
assert spec.compute_epoch_at_slot(attestation.data.slot) == spec.GENESIS_EPOCH
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == spec.GENESIS_EPOCH + 1
run_on_attestation(spec, state, store, attestation, False)
@with_all_phases
@spec_state_test
def test_on_attestation_inconsistent_target_and_head(spec, state):
store = spec.get_forkchoice_store(state)
spec.on_tick(store, store.time + 2 * spec.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH)
# Create chain 1 as empty chain between genesis and start of 1st epoch
target_state_1 = state.copy()
next_epoch(spec, target_state_1)
# Create chain 2 with different block in chain from chain 1 from chain 1 from chain 1 from chain 1
target_state_2 = state.copy()
diff_block = build_empty_block_for_next_slot(spec, target_state_2)
signed_diff_block = state_transition_and_sign_block(spec, target_state_2, diff_block)
spec.on_block(store, signed_diff_block)
next_epoch(spec, target_state_2)
next_slot(spec, target_state_2)
# Create and store block new head block on target state 1
head_block = build_empty_block_for_next_slot(spec, target_state_1)
signed_head_block = state_transition_and_sign_block(spec, target_state_1, head_block)
spec.on_block(store, signed_head_block)
# Attest to head of chain 1
attestation = get_valid_attestation(spec, target_state_1, slot=head_block.slot, signed=False)
epoch = spec.compute_epoch_at_slot(attestation.data.slot)
# Set attestation target to be from chain 2
attestation.data.target = spec.Checkpoint(epoch=epoch, root=spec.get_block_root(target_state_2, epoch))
sign_attestation(spec, state, attestation)
assert attestation.data.target.epoch == spec.GENESIS_EPOCH + 1
assert spec.compute_epoch_at_slot(attestation.data.slot) == spec.GENESIS_EPOCH + 1
assert spec.get_block_root(target_state_1, epoch) != attestation.data.target.root
run_on_attestation(spec, state, store, attestation, False)
@with_all_phases
@spec_state_test
def test_on_attestation_target_block_not_in_store(spec, state):
store = spec.get_forkchoice_store(state)
time = store.time + spec.SECONDS_PER_SLOT * (spec.SLOTS_PER_EPOCH + 1)
spec.on_tick(store, time)
# move to immediately before next epoch to make block new target
next_epoch = spec.get_current_epoch(state) + 1
transition_to(spec, state, spec.compute_start_slot_at_epoch(next_epoch) - 1)
target_block = build_empty_block_for_next_slot(spec, state)
state_transition_and_sign_block(spec, state, target_block)
# do not add target block to store
attestation = get_valid_attestation(spec, state, slot=target_block.slot, signed=True)
assert attestation.data.target.root == target_block.hash_tree_root()
run_on_attestation(spec, state, store, attestation, False)
@with_all_phases
@spec_state_test
def test_on_attestation_target_checkpoint_not_in_store(spec, state):
store = spec.get_forkchoice_store(state)
time = store.time + spec.SECONDS_PER_SLOT * (spec.SLOTS_PER_EPOCH + 1)
spec.on_tick(store, time)
# move to immediately before next epoch to make block new target
next_epoch = spec.get_current_epoch(state) + 1
transition_to(spec, state, spec.compute_start_slot_at_epoch(next_epoch) - 1)
target_block = build_empty_block_for_next_slot(spec, state)
signed_target_block = state_transition_and_sign_block(spec, state, target_block)
# add target block to store
spec.on_block(store, signed_target_block)
# target checkpoint state is not yet in store
attestation = get_valid_attestation(spec, state, slot=target_block.slot, signed=True)
assert attestation.data.target.root == target_block.hash_tree_root()
run_on_attestation(spec, state, store, attestation)
@with_all_phases
@spec_state_test
def test_on_attestation_target_checkpoint_not_in_store_diff_slot(spec, state):
store = spec.get_forkchoice_store(state)
time = store.time + spec.SECONDS_PER_SLOT * (spec.SLOTS_PER_EPOCH + 1)
spec.on_tick(store, time)
# move to two slots before next epoch to make target block one before an empty slot
next_epoch = spec.get_current_epoch(state) + 1
transition_to(spec, state, spec.compute_start_slot_at_epoch(next_epoch) - 2)
target_block = build_empty_block_for_next_slot(spec, state)
signed_target_block = state_transition_and_sign_block(spec, state, target_block)
# add target block to store
spec.on_block(store, signed_target_block)
# target checkpoint state is not yet in store
attestation_slot = target_block.slot + 1
transition_to(spec, state, attestation_slot)
attestation = get_valid_attestation(spec, state, slot=attestation_slot, signed=True)
assert attestation.data.target.root == target_block.hash_tree_root()
run_on_attestation(spec, state, store, attestation)
@with_all_phases
@spec_state_test
def test_on_attestation_beacon_block_not_in_store(spec, state):
store = spec.get_forkchoice_store(state)
time = store.time + spec.SECONDS_PER_SLOT * (spec.SLOTS_PER_EPOCH + 1)
spec.on_tick(store, time)
# move to immediately before next epoch to make block new target
next_epoch = spec.get_current_epoch(state) + 1
transition_to(spec, state, spec.compute_start_slot_at_epoch(next_epoch) - 1)
target_block = build_empty_block_for_next_slot(spec, state)
signed_target_block = state_transition_and_sign_block(spec, state, target_block)
# store target in store
spec.on_block(store, signed_target_block)
head_block = build_empty_block_for_next_slot(spec, state)
state_transition_and_sign_block(spec, state, head_block)
# do not add head block to store
attestation = get_valid_attestation(spec, state, slot=head_block.slot, signed=True)
assert attestation.data.target.root == target_block.hash_tree_root()
assert attestation.data.beacon_block_root == head_block.hash_tree_root()
run_on_attestation(spec, state, store, attestation, False)
@with_all_phases
@spec_state_test
def test_on_attestation_future_epoch(spec, state):
store = spec.get_forkchoice_store(state)
time = store.time + 3 * spec.SECONDS_PER_SLOT
spec.on_tick(store, time)
block = build_empty_block_for_next_slot(spec, state)
signed_block = state_transition_and_sign_block(spec, state, block)
# store block in store
spec.on_block(store, signed_block)
# move state forward but not store
next_epoch(spec, state)
attestation = get_valid_attestation(spec, state, slot=state.slot, signed=True)
run_on_attestation(spec, state, store, attestation, False)
@with_all_phases
@spec_state_test
def test_on_attestation_future_block(spec, state):
store = spec.get_forkchoice_store(state)
time = store.time + spec.SECONDS_PER_SLOT * 5
spec.on_tick(store, time)
block = build_empty_block_for_next_slot(spec, state)
signed_block = state_transition_and_sign_block(spec, state, block)
spec.on_block(store, signed_block)
# attestation for slot immediately prior to the block being attested to
attestation = get_valid_attestation(spec, state, slot=block.slot - 1, signed=False)
attestation.data.beacon_block_root = block.hash_tree_root()
sign_attestation(spec, state, attestation)
run_on_attestation(spec, state, store, attestation, False)
@with_all_phases
@spec_state_test
def test_on_attestation_same_slot(spec, state):
store = spec.get_forkchoice_store(state)
time = store.time + spec.SECONDS_PER_SLOT
spec.on_tick(store, time)
block = build_empty_block_for_next_slot(spec, state)
signed_block = state_transition_and_sign_block(spec, state, block)
spec.on_block(store, signed_block)
attestation = get_valid_attestation(spec, state, slot=block.slot, signed=True)
run_on_attestation(spec, state, store, attestation, False)
@with_all_phases
@spec_state_test
def test_on_attestation_invalid_attestation(spec, state):
store = spec.get_forkchoice_store(state)
time = store.time + 3 * spec.SECONDS_PER_SLOT
spec.on_tick(store, time)
block = build_empty_block_for_next_slot(spec, state)
signed_block = state_transition_and_sign_block(spec, state, block)
spec.on_block(store, signed_block)
attestation = get_valid_attestation(spec, state, slot=block.slot, signed=True)
# make invalid by using an invalid committee index
attestation.data.index = spec.MAX_COMMITTEES_PER_SLOT * spec.SLOTS_PER_EPOCH
run_on_attestation(spec, state, store, attestation, False)
```
#### File: phase1/unittests/test_get_start_shard.py
```python
from eth2spec.test.context import (
PHASE0,
with_all_phases_except,
spec_state_test,
)
from eth2spec.test.helpers.state import next_epoch
@with_all_phases_except([PHASE0])
@spec_state_test
def test_get_committee_count_delta(spec, state):
assert spec.get_committee_count_delta(state, 0, 0) == 0
assert spec.get_committee_count_per_slot(state, 0) != 0
assert spec.get_committee_count_delta(state, 0, 1) == spec.get_committee_count_per_slot(state, 0)
assert spec.get_committee_count_delta(state, 1, 2) == spec.get_committee_count_per_slot(state, 0)
assert spec.get_committee_count_delta(state, 0, 2) == spec.get_committee_count_per_slot(state, 0) * 2
assert spec.get_committee_count_delta(state, 0, spec.SLOTS_PER_EPOCH) == (
spec.get_committee_count_per_slot(state, 0) * spec.SLOTS_PER_EPOCH
)
assert spec.get_committee_count_delta(state, 0, 2 * spec.SLOTS_PER_EPOCH) == (
spec.get_committee_count_per_slot(state, 0) * spec.SLOTS_PER_EPOCH
+ spec.get_committee_count_per_slot(state, 1) * spec.SLOTS_PER_EPOCH
)
@with_all_phases_except([PHASE0])
@spec_state_test
def test_get_start_shard_current_epoch_start(spec, state):
assert state.current_epoch_start_shard == 0
next_epoch(spec, state)
active_shard_count = spec.get_active_shard_count(state)
assert state.current_epoch_start_shard == (
spec.get_committee_count_delta(state, 0, spec.SLOTS_PER_EPOCH) % active_shard_count
)
current_epoch_start_slot = spec.compute_start_slot_at_epoch(spec.get_current_epoch(state))
slot = current_epoch_start_slot
start_shard = spec.get_start_shard(state, slot)
assert start_shard == state.current_epoch_start_shard
@with_all_phases_except([PHASE0])
@spec_state_test
def test_get_start_shard_next_slot(spec, state):
next_epoch(spec, state)
active_shard_count = spec.get_active_shard_count(state)
current_epoch_start_slot = spec.compute_start_slot_at_epoch(spec.get_current_epoch(state))
slot = current_epoch_start_slot + 1
start_shard = spec.get_start_shard(state, slot)
current_epoch_start_slot = spec.compute_start_slot_at_epoch(spec.get_current_epoch(state))
expected_start_shard = (
state.current_epoch_start_shard
+ spec.get_committee_count_delta(state, start_slot=current_epoch_start_slot, stop_slot=slot)
) % active_shard_count
assert start_shard == expected_start_shard
@with_all_phases_except([PHASE0])
@spec_state_test
def test_get_start_shard_previous_slot(spec, state):
next_epoch(spec, state)
active_shard_count = spec.get_active_shard_count(state)
current_epoch_start_slot = spec.compute_start_slot_at_epoch(spec.get_current_epoch(state))
slot = current_epoch_start_slot - 1
start_shard = spec.get_start_shard(state, slot)
current_epoch_start_slot = spec.compute_start_slot_at_epoch(spec.get_current_epoch(state))
expected_start_shard = (
state.current_epoch_start_shard
+ spec.MAX_COMMITTEES_PER_SLOT * spec.SLOTS_PER_EPOCH * active_shard_count
- spec.get_committee_count_delta(state, start_slot=slot, stop_slot=current_epoch_start_slot)
) % active_shard_count
assert start_shard == expected_start_shard
```
#### File: generators/bls/main.py
```python
from hashlib import sha256
from typing import Tuple, Iterable, Any, Callable, Dict
from eth_utils import (
encode_hex,
int_to_big_endian,
)
import milagro_bls_binding as milagro_bls
from eth2spec.utils import bls
from eth2spec.test.context import PHASE0
from gen_base import gen_runner, gen_typing
def to_bytes(i):
return i.to_bytes(32, "big")
def hash(x):
return sha256(x).digest()
def int_to_hex(n: int, byte_length: int = None) -> str:
byte_value = int_to_big_endian(n)
if byte_length:
byte_value = byte_value.rjust(byte_length, b'\x00')
return encode_hex(byte_value)
def hex_to_int(x: str) -> int:
return int(x, 16)
MESSAGES = [
bytes(b'\x00' * 32),
bytes(b'\x56' * 32),
bytes(b'\xab' * 32),
]
PRIVKEYS = [
# Curve order is 256 so private keys are 32 bytes at most.
# Also not all integers is a valid private key, so using pre-generated keys
hex_to_int('0x00000000000000000000000000000000263dbd792f5b1be47ed85f8938c0f29586af0d3ac7b977f21c278fe1462040e3'),
hex_to_int('0x0000000000000000000000000000000047b8192d77bf871b62e87859d653922725724a5c031afeabc60bcef5ff665138'),
hex_to_int('0x00000000000000000000000000000000328388aff0d4a5b7dc9205abd374e7e98f3cd9f3418edb4eafda5fb16473d216'),
]
Z1_PUBKEY = b'\xc0' + b'\x00' * 47
NO_SIGNATURE = b'\x00' * 96
Z2_SIGNATURE = b'\xc0' + b'\x00' * 95
def case01_sign():
for privkey in PRIVKEYS:
for message in MESSAGES:
sig = bls.Sign(privkey, message)
identifier = f'{int_to_hex(privkey)}_{encode_hex(message)}'
yield f'sign_case_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
'input': {
'privkey': int_to_hex(privkey),
'message': encode_hex(message),
},
'output': encode_hex(sig)
}
def case02_verify():
for i, privkey in enumerate(PRIVKEYS):
for message in MESSAGES:
# Valid signature
signature = bls.Sign(privkey, message)
pubkey = bls.SkToPk(privkey)
assert milagro_bls.SkToPk(to_bytes(privkey)) == pubkey
assert milagro_bls.Sign(to_bytes(privkey), message) == signature
identifier = f'{encode_hex(pubkey)}_{encode_hex(message)}'
assert bls.Verify(pubkey, message, signature)
assert milagro_bls.Verify(pubkey, message, signature)
yield f'verify_valid_case_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
'input': {
'pubkey': encode_hex(pubkey),
'message': encode_hex(message),
'signature': encode_hex(signature),
},
'output': True,
}
# Invalid signatures -- wrong pubkey
wrong_pubkey = bls.SkToPk(PRIVKEYS[(i + 1) % len(PRIVKEYS)])
identifier = f'{encode_hex(wrong_pubkey)}_{encode_hex(message)}'
assert not bls.Verify(wrong_pubkey, message, signature)
assert not milagro_bls.Verify(wrong_pubkey, message, signature)
yield f'verify_wrong_pubkey_case_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
'input': {
'pubkey': encode_hex(wrong_pubkey),
'message': encode_hex(message),
'signature': encode_hex(signature),
},
'output': False,
}
# Invalid signature -- tampered with signature
tampered_signature = signature[:-4] + b'\xFF\xFF\xFF\xFF'
identifier = f'{encode_hex(pubkey)}_{encode_hex(message)}'
assert not bls.Verify(pubkey, message, tampered_signature)
assert not milagro_bls.Verify(pubkey, message, tampered_signature)
yield f'verify_tampered_signature_case_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
'input': {
'pubkey': encode_hex(pubkey),
'message': encode_hex(message),
'signature': encode_hex(tampered_signature),
},
'output': False,
}
# Valid pubkey and signature with the point at infinity
assert bls.Verify(Z1_PUBKEY, message, Z2_SIGNATURE)
assert milagro_bls.Verify(Z1_PUBKEY, message, Z2_SIGNATURE)
yield f'verify_infinity_pubkey_and_infinity_signature', {
'input': {
'pubkey': encode_hex(Z1_PUBKEY),
'message': encode_hex(message),
'signature': encode_hex(Z2_SIGNATURE),
},
'output': True,
}
def case03_aggregate():
for message in MESSAGES:
sigs = [bls.Sign(privkey, message) for privkey in PRIVKEYS]
yield f'aggregate_{encode_hex(message)}', {
'input': [encode_hex(sig) for sig in sigs],
'output': encode_hex(bls.Aggregate(sigs)),
}
# Invalid pubkeys -- len(pubkeys) == 0
try:
bls.Aggregate([])
except Exception:
pass
else:
raise Exception("Should have been INVALID")
# No signatures to aggregate. Follow IETF BLS spec, return `None` to represent INVALID.
# https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-02#section-2.8
yield f'aggregate_na_signatures', {
'input': [],
'output': None,
}
def case04_fast_aggregate_verify():
for i, message in enumerate(MESSAGES):
privkeys = PRIVKEYS[:i + 1]
sigs = [bls.Sign(privkey, message) for privkey in privkeys]
aggregate_signature = bls.Aggregate(sigs)
pubkeys = [bls.SkToPk(privkey) for privkey in privkeys]
pubkeys_serial = [encode_hex(pubkey) for pubkey in pubkeys]
# Valid signature
identifier = f'{pubkeys_serial}_{encode_hex(message)}'
assert bls.FastAggregateVerify(pubkeys, message, aggregate_signature)
assert milagro_bls.FastAggregateVerify(pubkeys, message, aggregate_signature)
yield f'fast_aggregate_verify_valid_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
'input': {
'pubkeys': pubkeys_serial,
'message': encode_hex(message),
'signature': encode_hex(aggregate_signature),
},
'output': True,
}
# Invalid signature -- extra pubkey
pubkeys_extra = pubkeys + [bls.SkToPk(PRIVKEYS[-1])]
pubkeys_extra_serial = [encode_hex(pubkey) for pubkey in pubkeys_extra]
identifier = f'{pubkeys_extra_serial}_{encode_hex(message)}'
assert not bls.FastAggregateVerify(pubkeys_extra, message, aggregate_signature)
assert not milagro_bls.FastAggregateVerify(pubkeys_extra, message, aggregate_signature)
yield f'fast_aggregate_verify_extra_pubkey_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
'input': {
'pubkeys': pubkeys_extra_serial,
'message': encode_hex(message),
'signature': encode_hex(aggregate_signature),
},
'output': False,
}
# Invalid signature -- tampered with signature
tampered_signature = aggregate_signature[:-4] + b'\xff\xff\xff\xff'
identifier = f'{pubkeys_serial}_{encode_hex(message)}'
assert not bls.FastAggregateVerify(pubkeys, message, tampered_signature)
assert not milagro_bls.FastAggregateVerify(pubkeys, message, tampered_signature)
yield f'fast_aggregate_verify_tampered_signature_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
'input': {
'pubkeys': pubkeys_serial,
'message': encode_hex(message),
'signature': encode_hex(tampered_signature),
},
'output': False,
}
# Invalid pubkeys and signature -- len(pubkeys) == 0 and signature == Z1_SIGNATURE
assert not bls.FastAggregateVerify([], message, Z2_SIGNATURE)
assert not milagro_bls.FastAggregateVerify([], message, Z2_SIGNATURE)
yield f'fast_aggregate_verify_na_pubkeys_and_infinity_signature', {
'input': {
'pubkeys': [],
'message': encode_hex(message),
'signature': encode_hex(Z2_SIGNATURE),
},
'output': False,
}
# Invalid pubkeys and signature -- len(pubkeys) == 0 and signature == 0x00...
assert not bls.FastAggregateVerify([], message, NO_SIGNATURE)
assert not milagro_bls.FastAggregateVerify([], message, NO_SIGNATURE)
yield f'fast_aggregate_verify_na_pubkeys_and_na_signature', {
'input': {
'pubkeys': [],
'message': encode_hex(message),
'signature': encode_hex(NO_SIGNATURE),
},
'output': False,
}
def case05_aggregate_verify():
pubkeys = []
pubkeys_serial = []
messages = []
messages_serial = []
sigs = []
for privkey, message in zip(PRIVKEYS, MESSAGES):
sig = bls.Sign(privkey, message)
pubkey = bls.SkToPk(privkey)
pubkeys.append(pubkey)
pubkeys_serial.append(encode_hex(pubkey))
messages.append(message)
messages_serial.append(encode_hex(message))
sigs.append(sig)
aggregate_signature = bls.Aggregate(sigs)
assert bls.AggregateVerify(pubkeys, messages, aggregate_signature)
assert milagro_bls.AggregateVerify(pubkeys, messages, aggregate_signature)
yield f'aggregate_verify_valid', {
'input': {
'pubkeys': pubkeys_serial,
'messages': messages_serial,
'signature': encode_hex(aggregate_signature),
},
'output': True,
}
tampered_signature = aggregate_signature[:4] + b'\xff\xff\xff\xff'
assert not bls.AggregateVerify(pubkey, messages, tampered_signature)
assert not milagro_bls.AggregateVerify(pubkeys, messages, tampered_signature)
yield f'aggregate_verify_tampered_signature', {
'input': {
'pubkeys': pubkeys_serial,
'messages': messages_serial,
'signature': encode_hex(tampered_signature),
},
'output': False,
}
# Invalid pubkeys and signature -- len(pubkeys) == 0 and signature == Z1_SIGNATURE
assert not bls.AggregateVerify([], [], Z2_SIGNATURE)
assert not milagro_bls.AggregateVerify([], [], Z2_SIGNATURE)
yield f'aggregate_verify_na_pubkeys_and_infinity_signature', {
'input': {
'pubkeys': [],
'messages': [],
'signature': encode_hex(Z2_SIGNATURE),
},
'output': False,
}
# Invalid pubkeys and signature -- len(pubkeys) == 0 and signature == 0x00...
assert not bls.AggregateVerify([], [], NO_SIGNATURE)
assert not milagro_bls.AggregateVerify([], [], NO_SIGNATURE)
yield f'aggregate_verify_na_pubkeys_and_na_signature', {
'input': {
'pubkeys': [],
'messages': [],
'signature': encode_hex(NO_SIGNATURE),
},
'output': False,
}
def create_provider(handler_name: str,
test_case_fn: Callable[[], Iterable[Tuple[str, Dict[str, Any]]]]) -> gen_typing.TestProvider:
def prepare_fn(configs_path: str) -> str:
# Nothing to load / change in spec. Maybe in future forks.
# Put the tests into the general config category, to not require any particular configuration.
return 'general'
def cases_fn() -> Iterable[gen_typing.TestCase]:
for data in test_case_fn():
print(data)
(case_name, case_content) = data
yield gen_typing.TestCase(
fork_name=PHASE0,
runner_name='bls',
handler_name=handler_name,
suite_name='small',
case_name=case_name,
case_fn=lambda: [('data', 'data', case_content)]
)
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
if __name__ == "__main__":
bls.use_py_ecc() # Py-ecc is chosen instead of Milagro, since the code is better understood to be correct.
gen_runner.run_generator("bls", [
create_provider('sign', case01_sign),
create_provider('verify', case02_verify),
create_provider('aggregate', case03_aggregate),
create_provider('fast_aggregate_verify', case04_fast_aggregate_verify),
create_provider('aggregate_verify', case05_aggregate_verify),
])
``` |
{
"source": "jiyingtk/YCSB-LSM",
"score": 3
} |
#### File: YCSB-LSM/util/parse_fp.py
```python
import sys
import re
basex = 10 #10000
def parse(fin, fout, dtype = "diff"):
fi = open(fin, "r")
fo = open(fout, "w")
fo.write("#from file:" + fin + "\n")
fo.write("#data type:" + dtype + "\n")
# for line in fi:
line = fi.readline()
datas = line.split(",")
y0 = 0
for i in range(len(datas)):
if datas[i] == '':
continue
y = float(datas[i])
if dtype == "diff":
y = y * (i + 1) - y0 * i
y0 = float(datas[i])
fo.write(str(basex * (i + 1)) + " " + str(y) + "\n")
fi.close()
fo.close()
if __name__ == '__main__':
parse(sys.argv[1], sys.argv[2], sys.argv[3])
``` |
{
"source": "jiyolla/StudyForCodingTestWithDongbinNa",
"score": 3
} |
#### File: BOJwithDongbinNa/10217/10217.py
```python
import sys
import math
import heapq
def solve():
read = sys.stdin.readline
# sys.setrecursionlimit(10**7)
for _ in range(int(read())):
n, m, k = map(int, read().split())
graph = [[] for _ in range(n + 1)]
for _ in range(k):
u, v, c, d = map(int, read().split())
# dp 용
# graph[v].append((u, c, d))
# dijkstra용
graph[u].append((c, d, v))
for i in range(1, n + 1):
graph[i].sort()
# 기본적으로 d에 대해서 다익스트라를 돌리고 싶다.
# 근데 거기에 c에 대한 제한이 걸린 것이다.
# 이거 잘하면 dp로 아주 깔끔하게 정리될 것 같은데?
# 기억하는가? 2차 dp테이블.
# dp[c][v]를 c라는 코스트 제한 하에 s에서 v까지는 최적값으로 보는 것이다.
# 그러면 점화식이 어떻게 되냐면, v로 가는 노드의 집합이 U라고 보면
# dp[c][v] = min(dp[c - u_i][u_i] + cost(u_i, v)), u_i in U 이다.
# 이러면 다익스트라보다는 밸만포드에 가깝다고 볼 수 있네... 가까운게 아니라 그냥 밸만포득 같은데
# 오케이 구현해보자.
# 좀 구체화해야할 것 같다.
# 어떤식으로 순회해서 dp테이블을 구축할 것인가?
# dp간만에 하니까 가물가물하네...c를 그냥 0에서 시작해서 m까지 늘리는 게 말이되나? 너무 느린거 아니야..?
# 하나의 c값에 대해서... 아니 좀 이상한데..
# 아니 맞는 거 같다. 모든 dp[c][v]는 자기보다 작은 c에 의존하기에, c를 오름차순으로 테이블을 만드는 게 맞다.
# 그리고 그래프를 읽을 때 거꾸로 저장하는 게 편하다. graph[v]를 v로 시작하는 것이 아닌 v로 끝나는 에지로 저장하는 게 맞다.
"""
dp = [[math.inf] * (n + 1) for _ in range(m + 1)]
for c in range(0, m + 1):
dp[c][1] = 0
for v2 in range(2, n + 1):
dp[c][v2] = dp[c - 1][v2]
for v1, c1, d1 in graph[v2]:
if c - c1 >= 0 and dp[c][v2] > dp[c - c1][v1] + d1:
dp[c][v2] = dp[c - c1][v1] + d1
print('Poor KCM' if math.isinf(dp[m][n]) else dp[m][n])
"""
# 아 시간초과가 떠버리네...최악의 경우 100*100*10000*t의 시간이 걸려서...흠
# pypy3에서도 겨우 통과...
# 어떻게 최적화를 좀 해볼 수 있을까..
# 탑다운으로 가볼까...?
"""
mem = {}
def topdown_dp(c, v2):
if (c, v2) in mem:
return mem[(c, v2)]
if v2 == 1:
mem[(c, v2)] = 0
return 0
res = math.inf
for v1, c1, d1 in graph[v2]:
if c - c1 >= 0:
candidate = topdown_dp(c - c1, v1) + d1
res = candidate if candidate < res else res
mem[(c, v2)] = res
return res
print('Poor KCM' if math.isinf(topdown_dp(m, n)) else topdown_dp(m, n))
# pypy3에서도 메모리 초과가 떠버리네...
"""
# 1등 답 보니까, dp로 dijkstra를 할 수 있는 듯...?
# dijkstra는 기본적으로 최단 거리의 간선들을 지속적으로 택하는 것인데, 제약이 초과될 때
# 흠. 제약을 초과하는 순간에 체크해봤자 의미 없다. 그전부터 계속 트래킹해야 된다.
# 응. 다익스트라에서는 기본적으로 같은 지점을 늦게 방문할 경우는 무시되었는데, 오히려, 일찍 도착한 간선들이 cost를 초과해서
# cost때문에 방문 못하고, 늦게 도착한 간선이 cost는 높지 않아서 방문할 수도 있겠네, 그러면 기본적으로 일찍 오는 놈들을 선호하기는 하지만
# cost가 적절한 애만 거르겠네! 대박!.
h = []
dist = [[math.inf] * (m + 1) for _ in range(n + 1)]
heapq.heappush(h, (0, 0, 1))
dist[1] = [0] * (m + 1)
while h:
d, c, v1 = heapq.heappop(h)
if dist[v1][c] < d:
continue
if v1 == n:
dist[n][m] = d
break
for c2, d2, v2 in graph[v1]:
nc = c + c2
nd = d + d2
if nc > m:
break
if dist[v2][nc] > nd:
for i in range(nc, m + 1):
if nd >= dist[v2][i]:
break
dist[v2][i] = nd
heapq.heappush(h, (nd, nc, v2))
print('Poor KCM' if math.isinf(dist[n][m]) else dist[n][m])
solve()
```
#### File: BOJwithDongbinNa/13913/13913.py
```python
import math
from collections import deque
def solve():
n, k = map(int, input().split())
q = deque()
q.append((n, 0))
mem = {n: (0, 0)}
found = False
ans = 0
while q and not found:
for _ in range(len(q)):
cur, step = q.popleft()
if cur == k:
found = True
ans = step
if cur - 1 >= 0 and not cur - 1 in mem:
mem[cur - 1] = (step + 1, 1)
q.append((cur - 1, step + 1))
if cur + 1 <= k and not cur + 1 in mem:
mem[cur + 1] = (step + 1, 2)
q.append((cur + 1, step + 1))
if cur * 2 <= k + 1 and not cur * 2 in mem:
mem[cur * 2] = (step + 1, 3)
q.append((cur * 2, step + 1))
print(ans)
cur = k
route = []
for i in range(ans + 1):
route.append(str(cur))
if mem[cur][1] == 1:
cur += 1
elif mem[cur][1] == 2:
cur -= 1
elif mem[cur][1] == 3:
cur //= 2
print(' '.join(route[::-1]))
solve()
```
#### File: BOJwithDongbinNa/1717/1717.py
```python
import sys
def solve():
read = sys.stdin.readline
n, m = map(int, read().split())
parent = [i for i in range(n + 1)]
res = []
def root(v):
while v != parent[v]:
v = parent[v]
return v
for _ in range(m):
c, a, b = read().split()
a, b = int(a), int(b)
if c == '0':
# union
root_a = root(a)
root_b = root(b)
if root_a > root_b:
parent[root_a] = root_b
else:
parent[root_b] = root_a
else:
# find
parent[a] = root(a)
parent[b] = root(b)
res.append('YES' if parent[a] == parent[b] else 'NO')
print('\n'.join(res))
solve()
```
#### File: BOJwithDongbinNa/17298/17298.py
```python
def solve():
n = int(input())
seq = list(map(int, input().split()))
stack = [seq[-1]]
res = [-1]
for i in seq[-2::-1]:
for _ in range(len(stack)):
t = stack.pop(-1)
if i < t:
stack.append(t)
res.append(t)
stack.append(i)
break
if stack == []:
res.append(-1)
stack = [i]
"""
temp = stack[::-1]
found = False
for j, t in enumerate(temp):
if i < t:
res.append(t)
stack = stack[:len(stack) - j]
stack.append(i)
found = True
break
if not found:
res.append(-1)
stack = [i]
"""
print(' '.join([str(i) for i in res[::-1]]))
solve()
```
#### File: BOJwithDongbinNa/17435/17435.py
```python
import sys
import math
def solve():
read = sys.stdin.readline
m = int(read())
f = tuple([None]) + tuple(map(int, read().split()))
q = int(read())
queries = [tuple(map(int, read().split())) for _ in range(q)]
# f_2tothe[i][j] = f_{2^i}(j)
max_power = math.ceil(math.log2(500000))
f_2tothe = [[None] * (m + 1) for _ in range(max_power)]
for x in range(1, m + 1):
f_2tothe[0][x] = f[x]
for power in range(1, max_power):
for x in range(1, m + 1):
f_2tothe[power][x] = f_2tothe[power - 1][f_2tothe[power - 1][x]]
def f_(n, x):
if n == 0:
return x
biggest_power_under = math.floor(math.log2(n))
next_n = n - pow(2, biggest_power_under)
return f_(next_n, f_2tothe[biggest_power_under][x])
print_buf = []
for n, x in queries:
print_buf.append(f'{f_(n, x)}')
print('\n'.join(print_buf))
solve()
```
#### File: BOJwithDongbinNa/1753/1753.py
```python
import sys
import heapq
import math
def solve():
read = sys.stdin.readline
v, e = map(int, read().split())
s = int(read())
graph = [[] for _ in range(v + 1)]
for _ in range(e):
v1, v2, w = map(int, read().split())
graph[v1].append((v2, w))
dist = [math.inf for _ in range(v + 1)]
hq = []
dist[s] = 0
heapq.heappush(hq, (0, s))
while hq:
cost, v1 = heapq.heappop(hq)
for v2, w in graph[v1]:
if dist[v2] > cost + w:
dist[v2] = cost + w
heapq.heappush(hq, (dist[v2], v2))
res = [str(i).upper() for i in dist[1:]]
print('\n'.join(res))
solve()
```
#### File: BOJwithDongbinNa/1774/1774.py
```python
import sys
def solve():
read = sys.stdin.readline
n, m = map(int, read().split())
points = [tuple(map(int, read().split())) for _ in range(n)]
parent = [i for i in range(n + 1)]
size = [1 for _ in range(n + 1)]
edges = []
def find(v):
while v != parent[v]:
parent[v] = parent[parent[v]]
v = parent[v]
return v
def union(v1, v2):
root_v1 = find(v1)
root_v2 = find(v2)
if root_v1 == root_v2:
return False
else:
parent[root_v1] = root_v2
size[root_v2] += size[root_v1]
return True
def dist(p1, p2):
return pow((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2, 0.5)
for i, p1 in enumerate(points):
for j, p2 in enumerate(points):
if i != j:
edges.append((dist(p1, p2), i + 1, j + 1))
for _ in range(m):
v1, v2 = map(int, read().split())
union(v1, v2)
ans = 0
for d, v1, v2 in sorted(edges):
if union(v1, v2):
ans += d
if size[v2] == n:
break
print(f'{ans:.2f}')
solve()
```
#### File: BOJwithDongbinNa/1904/1904.py
```python
def solve():
n = int(input())
f = [1, 2] + [0] * (n - 2)
for i in range(2, n):
f[i] = (f[i - 2] + f[i - 1]) % 15746
print(f[n - 1])
solve()
```
#### File: BOJwithDongbinNa/1920/1920_bisect.py
```python
import bisect
def solve():
input()
table = list(map(int, input().split()))
table.sort()
input()
print('\n'.join(['1' if bisect.bisect_right(table, int(i)) - bisect.bisect_left(table, int(i)) > 0 else '0' for i in input().split()]))
solve()
```
#### File: BOJwithDongbinNa/1967/1967.py
```python
import sys
def solve():
sys.setrecursionlimit(10**6)
read = sys.stdin.readline
n = int(read())
child = [[] for _ in range(n + 1)]
for _ in range(n - 1):
p, c, w = map(int, read().split())
child[p].append((c, w))
# 1167번이랑 거의 같다. 방금보다 조금 더 아름답게 짜볼 수 있을 것 같다.
max_diameter = 0
def dfs(node):
nonlocal max_diameter
candidates = []
for c, w in child[node]:
if child[c]:
w += dfs(c)
candidates.append(w)
candidates.sort()
local_max = sum(candidates[-2:])
if max_diameter < local_max:
max_diameter = local_max
return candidates[-1]
if child[1]:
dfs(1)
# 기가 막힌다. 모든 상황이 다 그냥 처리된다.
print(max_diameter)
solve()
```
#### File: BOJwithDongbinNa/20040/20040.py
```python
import sys
def solve():
read = sys.stdin.readline
n, m = map(int, read().split())
parent = [i for i in range(n)]
def find(v):
while v != parent[v]:
parent[v] = parent[parent[v]]
v = parent[v]
return v
def union(v1, v2):
root_v1 = find(v1)
root_v2 = find(v2)
if root_v1 != root_v2:
parent[root_v1] = root_v2
return True
else:
return False
cycle_found = False
for i in range(m):
v1, v2 = map(int, read().split())
if not union(v1, v2):
cycle_found = True
break
if cycle_found:
print(i + 1)
else:
print(0)
solve()
```
#### File: BOJwithDongbinNa/2098/2098.py
```python
import math
def solve():
n = int(input())
w = []
for _ in range(n):
w.append(list(map(int, input().split())))
for i in range(n):
for j in range(n):
if w[i][j] == 0 and i != j:
w[i][j] = math.inf
# mem[state][last_city] = best_cost
# state = 0 ~ (n - 1) bits of 1. State exclude 0th city, as it's visited on boot.
# city number = 0 ~ n - 1, including 0th city(the starting city).
mem = [[math.inf] * n for _ in range(1 << (n - 1))]
# mem[0] should only be visited on its first element,
# as mem[0][i] would mean to start the travel from city i,
# and we only reckon travels starting from 0th city.
# But, simply setting others to inf would simplify the codes.
mem[0] = [0] + [math.inf] * (n - 1)
# Compute mem for states from 1 to those with (n - 1 - 1)bits of 1 and a 0.
# States with (n - 1 - 1)bits of 1 and a 0 only have one decision to make,
# but their decisioning policy need some special care,
# which is the cost of going back to the starting city.
# So, they are not processed here.
for state in range(1, (1 << (n - 1)) - 1):
target_city_bit = 1
target_city = 1
while target_city_bit <= state:
if state & target_city_bit:
for last_city, cost in enumerate(mem[state & (~target_city_bit)]):
new_cost = cost + w[last_city][target_city]
if mem[state][target_city] > new_cost:
mem[state][target_city] = new_cost
target_city_bit <<= 1
target_city += 1
# For states with last decision to make
ans = math.inf
for i in range(1, n):
for last_city, cost in enumerate(mem[((1 << (n - 1)) - 1) & (~(1 << (i - 1)))]):
new_cost = cost + w[last_city][i] + w[i][0]
if ans > new_cost:
ans = new_cost
print(ans)
solve()
```
#### File: BOJwithDongbinNa/2533/2533.py
```python
import sys
def solve():
sys.setrecursionlimit(10**6)
read = sys.stdin.readline
n = int(read())
adj = [[] for _ in range(n + 1)]
for _ in range(n - 1):
u, v = map(int, read().split())
adj[u].append(v)
adj[v].append(u)
# dp[for_tree_rooted_at][w/_or_wo/_root_being_early_adopter] = min_num_early_adopters
dp = [[0, 1] for _ in range(n + 1)]
# Bottom Up approach would be difficult,
# because we have to travarse the tree in descending depth manner which
# requires a depth labeling preprocess before the traversal.
# Though not that hard, anyway, we'll do top down way.
visited = [False] * (n + 1)
def min_ea(node):
visited[node] = True
for child in adj[node]:
if not visited[child]:
min_ea(child)
dp[node][0] += dp[child][1]
dp[node][1] += dp[child][0] if dp[child][0] < dp[child][1] else dp[child][1]
min_ea(1)
print(min(dp[1]))
# Amazingly Simple!
solve()
```
#### File: BOJwithDongbinNa/2606/2606.py
```python
from sys import stdin
read = stdin.readline()
num_node = int(read())
connected = [[] for i in range(num_node + 1)]
visited = [False] * (num_node + 1)
num_edge = int(read())
for e in stdin:
n1, n2 = list(map(int, e.split()))
connected[n1].append(n2)
connected[n2].append(n1)
def dfs(node):
if not visited[node]:
visited[node] = True
for n in connected[node]:
dfs(n)
dfs(1)
print(sum(visited[2:]))
```
#### File: BOJwithDongbinNa/5639/5639.py
```python
import sys
def solve():
preorder = list(map(int, sys.stdin.readlines()))
n = len(preorder)
# Procedure
# 0. Given a preorder of a binary search tree
# 1. The first element is the root, and is printed after all sub trees are printed
# 2. Remaining elements is divided into two by the value of first element
# 3. Do the procedure again on both sub tree
print_buf = []
stack = [(0, n - 1)]
while stack:
start, end = stack.pop()
print_buf.append(f'{preorder[start]}')
for i in range(start + 1, end + 1):
if preorder[start] < preorder[i]:
if start + 1 <= i - 1:
stack.append((start + 1, i - 1))
if i <= end:
stack.append((i, end))
break
else:
if start + 1 <= end:
stack.append((start + 1, end))
print('\n'.join(print_buf[::-1]))
solve()
```
#### File: BOJwithDongbinNa/7562/7562.py
```python
from collections import deque
def solve():
t = int(input())
for _ in range(t):
n = int(input())
visited = [[False] * n for _ in range(n)]
x0, y0 = map(int, input().split())
x1, y1 = map(int, input().split())
queue = deque()
queue.append((x0, y0))
visited[x0][y0] = True
ans = 0
while not visited[x1][y1]:
ans += 1
for i in range(len(queue)):
x, y = queue.popleft()
if x - 1 >= 0 and y - 2 >= 0 and not visited[x - 1][y - 2]:
visited[x - 1][y - 2] = True
queue.append((x - 1, y - 2))
if x - 2 >= 0 and y - 1 >= 0 and not visited[x - 2][y - 1]:
visited[x - 2][y - 1] = True
queue.append((x - 2, y - 1))
if x + 1 < n and y + 2 < n and not visited[x + 1][y + 2]:
visited[x + 1][y + 2] = True
queue.append((x + 1, y + 2))
if x + 2 < n and y + 1 < n and not visited[x + 2][y + 1]:
visited[x + 2][y + 1] = True
queue.append((x + 2, y + 1))
if x + 1 < n and y - 2 >= 0 and not visited[x + 1][y - 2]:
visited[x + 1][y - 2] = True
queue.append((x + 1, y - 2))
if x + 2 < n and y - 1 >= 0 and not visited[x + 2][y - 1]:
visited[x + 2][y - 1] = True
queue.append((x + 2, y - 1))
if x - 1 >= 0 and y + 2 < n and not visited[x - 1][y + 2]:
visited[x - 1][y + 2] = True
queue.append((x - 1, y + 2))
if x - 2 >= 0 and y + 1 < n and not visited[x - 2][y + 1]:
visited[x - 2][y + 1] = True
queue.append((x - 2, y + 1))
print(ans)
solve()
```
#### File: BOJwithDongbinNa/7569/7569.py
```python
from sys import stdin
from collections import deque
@profile
def solve():
read = stdin.readline
m, n, h = list(map(int, read().split()))
new = deque()
mature = [[[0] * m for j in range(n)] for k in range(h)]
for k in range(h):
for j in range(n):
mature[k][j] = list(map(int, read().split()))
for i in range(m):
if mature[k][j][i] == 1:
new.append((i, j, k))
d = -1
while new:
for _ in range(len(new)):
t = new.popleft()
x, y, z = t
if x + 1 < m and mature[z][y][x + 1] == 0:
mature[z][y][x + 1] = 1
new.append((x + 1, y, z))
if x > 0 and mature[z][y][x - 1] == 0:
mature[z][y][x - 1] = 1
new.append((x - 1, y, z))
if y + 1 < n and mature[z][y + 1][x] == 0:
mature[z][y + 1][x] = 1
new.append((x, y + 1, z))
if y > 0 and mature[z][y - 1][x] == 0:
mature[z][y - 1][x] = 1
new.append((x, y - 1, z))
if z + 1 < h and mature[z + 1][y][x] == 0:
mature[z + 1][y][x] = 1
new.append((x, y, z + 1))
if z > 0 and mature[z - 1][y][x] == 0:
mature[z - 1][y][x] = 1
new.append((x, y, z - 1))
d += 1
for z in range(h):
for y in range(n):
if 0 in mature[z][y]:
d = -1
break
print(d)
if __name__ == '__main__':
solve()
```
#### File: BOJwithDongbinNa/9184/9184.py
```python
def solve():
mem = {}
def w(a, b, c):
if a <= 0 or b <= 0 or c <= 0:
return 1
if a > 20 or b > 20 or c > 20:
mem[(a, b, c)] = w(20, 20, 20)
return mem[(a, b, c)]
if (a, b, c) in mem:
return mem[(a, b, c)]
if a < b and b < c:
mem[(a, b, c)] = w(a, b, c - 1) + w(a, b - 1, c - 1) - w(a, b - 1, c)
return mem[(a, b, c)]
mem[(a, b, c)] = w(a - 1, b, c) + w(a - 1, b - 1, c) + w(a - 1, b, c - 1) - w(a - 1, b - 1, c - 1)
return mem[(a, b, c)]
while True:
a, b, c = map(int, input().split())
if a == -1 and b == -1 and c == -1:
break
else:
print(f'w({a}, {b}, {c}) = {w(a, b, c)}')
solve()
```
#### File: BOJwithDongbinNa/9370/9370.py
```python
import sys
import heapq
import math
def solve():
read = sys.stdin.readline
for _ in range(int(read())):
n, m, t = map(int, read().split())
s, g, h = map(int, read().split())
graph = [{} for _ in range(n + 1)]
for _ in range(m):
a, b, d = map(int, read().split())
graph[a][b] = d * 2
graph[b][a] = d * 2
dests = [int(read()) for _ in range(t)]
# 이게 시발 운발이 좋아서 문제 없는 거 아닌가?
# 한번 곰곰히 생각해보자.
# 그니까 원래 값으로 보면, g-h가 0.5작아지면 선택되고, 그대로면 선택 안 되는 상황이 있는가?
# 0.5만 작아져서 변화가 생겼다면 적어도 원래 채택되었던 간선과의 차이가 0.5이하였다는 거구 그러면 실질적으로는
# 같았다는 뜻이다...즉, 애초에 둘 다 채택될 수 있는 상황이었으니 문제가 최적해에 영향을 안 끼친다 ㄷㄷ....
# 시발 문제 없네...오케이 인정..
graph[g][h] -= 1
graph[h][g] -= 1
def dijkstra(start, *end):
h = []
heapq.heappush(h, (0, start))
dist = [math.inf for _ in range(n + 1)]
dist[start] = 0
while h:
d1, v1 = heapq.heappop(h)
if dist[v1] < d1:
continue
for v2, d2 in graph[v1].items():
if dist[v2] > d1 + d2:
dist[v2] = d1 + d2
heapq.heappush(h, (dist[v2], v2))
return tuple([dist[i] for i in end])
"""
d_s_g, d_s_h, *d_s_dests = dijkstra(s, *([g, h] + dests))
d_g_dests = dijkstra(g, *dests)
d_h_dests = dijkstra(h, *dests)
res = []
for i in range(t):
# 시발. d_s_dests[i]가 inf일 때 처리 안 한 거 찾는 데 ㅈㄴ 오래걸렸다...
if d_s_dests[i] == min(d_s_g + d_h_dests[i], d_s_h + d_g_dests[i]) + graph[g][h] and not math.isinf(d_s_dests[i]):
res.append(dests[i])
print(*sorted(res))
"""
d_s_dests = dijkstra(s, *dests)
res = []
for i in range(t):
if d_s_dests[i] % 2 == 1:
res.append(dests[i])
print(*sorted(res))
solve()
```
#### File: BOJwithDongbinNa/9372/9372.py
```python
import sys
def solve():
read = sys.stdin.readline
"""
def find(v):
while v != parent[v]:
parent[v] = parent[parent[v]]
v = parent[v]
return v
def union(v1, v2):
root_v1 = find(v1)
root_v2 = find(v2)
if root_v1 == root_v2:
return False
else:
parent[root_v1] = root_v2
size[root_v2] += size[root_v1]
return True
res = []
for _ in range(int(read())):
n, m = map(int, read().split())
parent = [i for i in range(n)]
size = [1 for _ in range(n)]
ans = 0
found = False
for _ in range(m):
a, b = map(int, read().split())
if not found and union(a - 1, b - 1):
ans += 1
if size[b - 1] == n:
found = True
res.append(str(ans))
print('\n'.join(res))
"""
# 와 개뻘짓했네... 1등 답안지 보고 개놀랬네... 뭐지 했지 스벌... 인정.
# res = []
for _ in range(int(read())):
n, m = map(int, read().split())
for _ in range(m):
read()
# res.append(str(n - 1))
print(n - 1)
# print('\n'.join(res))
solve()
```
#### File: StudyForCodingTestWithDongbinNa/programmers/1_72410.py
```python
import re
def solution(new_id: str):
# Step 1
new_id = new_id.lower()
# Step 2
new_id = re.sub('[^a-z0-9_.-]', '', new_id)
# Step 3
new_id = re.sub('\.\.+', '.', new_id)
# Step 4
new_id = new_id.strip('.')
# Step 5
new_id = 'a' if not new_id else new_id
# Step 6
new_id = new_id[:15].strip('.')
# Step 7
new_id = new_id + new_id[-1]*(3 - len(new_id))
return new_id
print(solution("...!@BaT#*..y.abcdefghijklm"))
```
#### File: StudyForCodingTestWithDongbinNa/programmers/2_12899.py
```python
def solution(n):
def recursive(n, i):
if i == 0:
return str(n - 1)
a = (n - 1) // 3**i
return str(a) + recursive(n - a*3**i, i - 1)
for i in range(100):
if 3**(i + 1) > n:
break
else:
n -= 3**(i + 1)
if n == 0:
return '4'*i
ret = []
for r in recursive(n, i):
if r == '0':
ret.append('1')
elif r == '1':
ret.append('2')
elif r == '2':
ret.append('4')
return ''.join(ret)
print(solution(38))
```
#### File: StudyForCodingTestWithDongbinNa/programmers/2_42888.py
```python
def solution(record):
uid_name_dict = {}
for cmd, uid, *name in map(str.split, reversed(record)):
if cmd in {'Enter', 'Change'} and uid not in uid_name_dict:
uid_name_dict[uid] = name[0]
ret = []
for cmd, uid, *name in map(str.split, record):
if cmd == 'Enter':
ret.append(f'{uid_name_dict[uid]}님이 들어왔습니다.')
elif cmd == 'Leave':
ret.append(f'{uid_name_dict[uid]}님이 나갔습니다.')
return ret
print(solution(["Enter uid1234 Muzi", "Enter uid4567 Prodo","Leave uid1234","Enter uid1234 Prodo","Change uid4567 Ryan"]))
```
#### File: StudyForCodingTestWithDongbinNa/programmers/2_60058.py
```python
def solution(p):
def inverse(p):
return ''.join([')' if item == '(' else '(' for item in p])
def is_correct(p):
stack = []
try:
for item in p:
if item == '(':
stack.append(0)
else:
stack.pop()
except IndexError:
return False
return True
def transform(p):
if not p:
return p
l_count = 0
r_count = 0
for i in range(len(p)):
if p[i] == '(':
l_count += 1
elif p[i] == ')':
r_count += 1
if l_count != 0 and l_count == r_count:
break
u = p[:i + 1]
v = p[i + 1:]
if is_correct(u):
return u + transform(v)
ret = '(' + transform(v) + ')' + inverse(u[1:-1])
return ret
return transform(p)
print(solution("(()())()"))
```
#### File: StudyForCodingTestWithDongbinNa/programmers/3_42895.py
```python
MAX_n = 8
# A list of set
dp = [set() for _ in range(MAX_n + 1)]
def construct(n, N):
global dp
dp[n].add(int(str(N)*n))
for i in range(1, n):
opds_1 = dp[i]
opds_2 = dp[n - i]
for opd_1 in opds_1:
for opd_2 in opds_2:
dp[n].add(opd_1 + opd_2)
dp[n].add(opd_1 - opd_2)
dp[n].add(opd_1 * opd_2)
if opd_2 != 0:# and opd_1 % opd_2 == 0:
dp[n].add(opd_1 // opd_2)
# dp[n].add(int(opd_1 / opd_2))
def solution(N, number):
for i in range(1, MAX_n + 1):
construct(i, N)
if number in dp[i]:
print(dp)
return i
return -1
print(solution(2, 2))
```
#### File: StudyForCodingTestWithDongbinNa/programmers/3_77486.py
```python
def solution(enrolls, referrals, sellers, amounts):
referrals_of = {}
for enroll, referral in zip(enrolls, referrals):
referrals_of[enroll] = [enroll]
if referral != '-':
referrals_of[enroll].extend(referrals_of[referral])
account = {enroll: 0 for enroll in enrolls}
for seller, amount in zip(sellers, amounts):
amount *= 100
for referral in referrals_of[seller]:
if amount == 0:
break
account[referral] += amount - int(0.1*amount)
amount = int(0.1*amount)
return list(account.values())
print(solution(
["john", "mary", "edward", "sam", "emily", "jaimie", "tod", "young"],
["-", "-", "mary", "edward", "mary", "mary", "jaimie", "edward"],
["young", "john", "tod", "emily", "mary"],
[12, 4, 2, 5, 10]
))
```
#### File: StudyForCodingTestWithDongbinNa/programmers/4_43236.py
```python
from collections import namedtuple
import math
import bisect
class Node:
def __init__(self, val):
self.l = None
self.r = None
self.v = val
class Tree:
def __init__(self):
self.root = None
def getRoot(self):
return self.root
def add(self, val):
if self.root is None:
self.root = Node(val)
else:
self._add(val, self.root)
def _add(self, val, node):
if val < node.v:
if node.l is not None:
self._add(val, node.l)
else:
node.l = Node(val)
else:
if node.r is not None:
self._add(val, node.r)
else:
node.r = Node(val)
def find(self, val):
if self.root is not None:
return self._find(val, self.root)
else:
return None
def _find(self, val, node):
if val == node.v:
return node
elif (val < node.v and node.l is not None):
return self._find(val, node.l)
elif (val > node.v and node.r is not None):
return self._find(val, node.r)
def deleteTree(self):
# garbage collector will do this for us.
self.root = None
def printTree(self):
if self.root is not None:
self._printTree(self.root)
def _printTree(self, node):
if node is not None:
self._printTree(node.l)
print(str(node.v) + ' ')
self._printTree(node.r)
tree = Tree()
tree.add(3)
tree.add(4)
tree.add(0)
tree.add(8)
tree.add(2)
tree.printTree()
print(tree.find(3).v)
print(tree.find(10))
tree.deleteTree()
tree.printTree()
def solution(distance, rocks, n):
rocks.extend([0, distance])
rocks.sort()
Rock = namedtuple('Rock', ['left', 'right'])
new_rocks = [Rock(math.inf, rocks[1])]
for i in range(1, len(rocks) - 1):
new_rocks.append(Rock(rocks[i] - rocks[i - 1], rocks[i + 1] - rocks[i]))
new_rocks.append(Rock(rocks[-1], math.inf))
rocks = new_rocks
Distance = namedtuple('Distance', ['length', 'left_rock', 'right_rock'])
distances = []
for i in range(1, len(rocks)):
distances.append(Distance(rocks[i].left, rocks[i - 1], rocks[i]))
distances.sort(key=lambda distance: (distance.length, min(distance.left_rock.right, distance.right_rock.left)))
for _ in range(n):
# Pop the shortest distance
distance = distances.pop(0)
# Modify the affected distance
if distance.left_rock.right < distance.right_rock.left:
dist = distance.left_rock.right
for i in range(bisect.bisect_left(distances, dist), bisect.bisect(distances, dist)):
if distances[i].right_rock == distance.left_rock:
distances[i].right_rock = distance.right_rock
modified_distance = distances.pop(i)
else:
dist = distance.right_rock.left
for i in range(bisect.bisect_left(distances, dist), bisect.bisect(distances, dist)):
if distances[i].left_rock == distance.right_rock:
distances[i].left_rock = distance.left_rock
modified_distance = distances.pop(i)
bisect.modified_distance
answer = 0
return answer
print(solution(25, [2, 14, 11, 21, 17], 2))
```
#### File: kakao2022/kakao2022/grader.py
```python
import random
from .api import put_change_grade
# grades[id] = grade for user #{id}.
# grades[0] is not used. Since user id starts from 1.
def change_grade_randomshuffle(grades):
changed_users_id = set(range(len(grades)))
changed_users_id.remove(0)
grades = list(range(len(grades)))
random.shuffle(grades)
commands = []
for changed_user_id in changed_users_id:
commands.append({'id': changed_user_id, 'grade': grades[changed_user_id]})
put_change_grade(commands)
def change_grade_simplelinear(grades, game_results):
MAX_TAKEN = 40
changed_users_id = set()
for game_result in game_results:
changed_users_id.add(game_result['win'])
changed_users_id.add(game_result['lose'])
grades[game_result['win']] += MAX_TAKEN - game_result['taken']
grades[game_result['lose']] -= MAX_TAKEN - game_result['taken']
commands = []
for changed_user_id in changed_users_id:
commands.append({'id': changed_user_id, 'grade': grades[changed_user_id]})
put_change_grade(commands)
def change_grade_discountedlinear(grades, game_results):
BASE_SCORE = 100
MIN_TAKEN = 3
MAX_TAKEN = 40
changed_users_id = set()
for game_result in game_results:
changed_users_id.add(game_result['win'])
changed_users_id.add(game_result['lose'])
grades[game_result['win']] += BASE_SCORE * (2 - 1.6*(game_result['taken'] - MIN_TAKEN)/(MAX_TAKEN - MIN_TAKEN))
grades[game_result['lose']] -= BASE_SCORE * (2 - 1.6*(game_result['taken'] - MIN_TAKEN)/(MAX_TAKEN - MIN_TAKEN))
commands = []
for changed_user_id in changed_users_id:
commands.append({'id': changed_user_id, 'grade': grades[changed_user_id]})
put_change_grade(commands)
def change_grade_simplequadratic(grades, game_results):
MAX_TAKEN = 40
changed_users_id = set()
for game_result in game_results:
changed_users_id.add(game_result['win'])
changed_users_id.add(game_result['lose'])
grades[game_result['win']] += (MAX_TAKEN - game_result['taken'])**2
grades[game_result['lose']] -= (MAX_TAKEN - game_result['taken'])**2
commands = []
for changed_user_id in changed_users_id:
commands.append({'id': changed_user_id, 'grade': grades[changed_user_id]})
put_change_grade(commands)
def change_grade_preventabusediscountedlinear(grades, game_results, suspicion_marks):
BASE_SCORE = 4000
MIN_TAKEN = 3
MAX_TAKEN = 40
changed_users_id = set()
for game_result in game_results:
winner = game_result['win']
loser = game_result['lose']
game_time = game_result['taken']
changed_users_id.add(winner)
changed_users_id.add(loser)
if game_time < 11:
expected_game_time = 40 - abs(grades[winner] - grades[loser])/99000*35
tolerance = 5 + 5
if game_time < expected_game_time - tolerance:
suspicion_marks[loser] += 1
if suspicion_marks[loser] > 2:
continue
expected_win_rate = grades[winner]/(grades[winner] + grades[loser])
win_rate_modifier = expected_win_rate # (expected_win_rate - 0.3)*2 + 0.2
grades[winner] += win_rate_modifier*BASE_SCORE*(3 - 2.5*(game_time - MIN_TAKEN)/(MAX_TAKEN - MIN_TAKEN))
grades[loser] -= win_rate_modifier*BASE_SCORE*(3 - 2.5*(game_time - MIN_TAKEN)/(MAX_TAKEN - MIN_TAKEN))
commands = []
for changed_user_id in changed_users_id:
commands.append({'id': changed_user_id, 'grade': grades[changed_user_id]})
put_change_grade(commands)
``` |
{
"source": "jiyongfeng/imap",
"score": 3
} |
#### File: jiyongfeng/imap/editor.py
```python
import global_var
import modules.map.proto as proto
import matplotlib.pyplot as plt
# Elements with changed attributes
changed_artist = []
def clear_properties():
for artist in changed_artist:
artist.set_color('g')
artist.set_label('')
def set_properties(obj, color, label):
obj.set_color(color)
obj.set_label(label)
def show_lane_detail(line, lane):
set_properties(line, 'red', "cur_lane: " + lane.id.id)
changed_artist.append(line)
# pre lanes
for predecessor_id in lane.predecessor_id:
line = global_var.get_element_value(predecessor_id.id)
if line:
set_properties(line, 'cyan', "pre_lane: " + predecessor_id.id)
changed_artist.append(line)
# successor lanes
for successor_id in lane.successor_id:
line = global_var.get_element_value(successor_id.id)
if line:
set_properties(line, 'purple', "suc_lane: " + successor_id.id)
changed_artist.append(line)
# lane.left_neighbor_forward_lane_id
# lane.right_neighbor_forward_lane_id
# lane.left_neighbor_reverse_lane_id
# lane.right_neighbor_reverse_lane_id
def on_click(event):
pass
def on_pick(event):
# 1. clear preview label first
clear_properties()
# 2. find event.artist
obj = global_var.get_artist_value(event.artist)
# print(event.artist)
if isinstance(obj, proto.map_lane_pb2.Lane):
show_lane_detail(event.artist, obj)
# 3. redraw
plt.legend()
plt.draw()
def on_press(event):
pass
def on_release(event):
pass
def on_motion(event):
pass
```
#### File: jiyongfeng/imap/main.py
```python
import argparse
import matplotlib.pyplot as plt
import editor
import global_var
from map import Map
def draw(hdmap):
lane_ids = []
junction_ids = []
hdmap.draw_lanes(ax, lane_ids)
hdmap.draw_junctions(ax, junction_ids)
hdmap.draw_crosswalks(ax)
hdmap.draw_stop_signs(ax)
hdmap.draw_yields(ax)
def show_map():
hdmap=Map()
hdmap.load(args.map)
draw(hdmap)
# max windows
manager=plt.get_current_fig_manager()
manager.window.showMaximized()
# tight layout
# todo(zero): why tight layout not work?
plt.tight_layout()
plt.axis('equal')
plt.show()
def add_editor():
fig.canvas.mpl_connect('button_press_event', editor.on_click)
fig.canvas.mpl_connect('button_press_event', editor.on_press)
fig.canvas.mpl_connect('button_release_event', editor.on_release)
fig.canvas.mpl_connect('pick_event', editor.on_pick)
fig.canvas.mpl_connect('motion_notify_event', editor.on_motion)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Mapshow is a tool to display hdmap info on a map.",
prog="mapshow.py")
parser.add_argument(
"-m", "--map", action="store", type=str, required=True,
help="Specify the map file in txt or binary format")
args = parser.parse_args()
# Init global var
global_var._init()
fig, ax = plt.subplots()
# 1. add select
add_editor()
# 2. show map
show_map()
```
#### File: jiyongfeng/imap/map.py
```python
import global_var
import lib.proto_utils as proto_utils
from modules.map.proto import map_pb2
from matplotlib.patches import Polygon
class Map:
def __init__(self):
self.map_pb = map_pb2.Map()
def load(self, map_file_name):
res = proto_utils.get_pb_from_file(map_file_name, self.map_pb)
return res is not None
def save(self, map_output_file):
proto_utils.write_pb_to_text_file(self.map_pb, map_output_file)
def draw_roads(self, ax, road_ids):
pass
def draw_lanes(self, ax, lane_ids):
for lane in self.map_pb.lane:
if len(lane_ids) == 0 or lane.id.id in lane_ids:
# todo(zero): add option
# self._draw_lane_boundary(lane, ax, "yellow")
self._draw_lane_central(lane, ax, 'g')
def draw_junctions(self, ax, junction_ids):
for junction in self.map_pb.junction:
if len(junction_ids) == 0 or junction.id.id in junction_ids:
self._draw_polygon_boundary(junction.polygon, ax, 'c')
# self._draw_polygon(junction.polygon, ax, 'c')
def draw_signals(self, ax):
pass
def draw_crosswalks(self, ax):
for crosswalk in self.map_pb.crosswalk:
# self._draw_polygon_boundary(crosswalk.polygon, ax, "red")
self._draw_polygon(crosswalk.polygon, ax, 'c')
def draw_stop_signs(self, ax):
for stop_sign in self.map_pb.stop_sign:
for stop_line in stop_sign.stop_line:
for curve in stop_line.segment:
self._draw_stop_line(curve.line_segment, ax, "tomato")
def draw_yields(self, ax):
pass
# for yield_sign in self.map_pb.yield:
# for stop_line in yield_sign.stop_line:
# for curve in stop_line.segment:
# self._draw_stop_line(curve.line_segment, ax, "yellow")
def draw_clear_areas(self, ax):
pass
def draw_overlaps(self, ax):
pass
def draw_speed_bumps(self, ax):
pass
def draw_parking_spaces(self, ax):
pass
def draw_pnc_junctions(self, ax):
pass
@staticmethod
def _draw_lane_boundary(lane, ax, color_val):
"""draw boundary"""
for curve in lane.left_boundary.curve.segment:
if curve.HasField('line_segment'):
px = []
py = []
for p in curve.line_segment.point:
px.append(float(p.x))
py.append(float(p.y))
ax.plot(px, py, ls='-', c=color_val, alpha=0.5, picker=True)
for curve in lane.right_boundary.curve.segment:
if curve.HasField('line_segment'):
px = []
py = []
for p in curve.line_segment.point:
px.append(float(p.x))
py.append(float(p.y))
ax.plot(px, py, ls='-', c=color_val, alpha=0.5, picker=True)
@staticmethod
def _draw_lane_central(lane, ax, color_val):
for curve in lane.central_curve.segment:
if curve.HasField('line_segment'):
px = []
py = []
for p in curve.line_segment.point:
px.append(float(p.x))
py.append(float(p.y))
line2d, = ax.plot(px, py, ls='-', linewidth=5, c=color_val, alpha=0.5, picker=True)
global_var.set_artist_value(line2d, lane)
global_var.set_element_vaule(lane.id.id, line2d)
# ax.plot(px, py, 'o-', linewidth=5, c=color_val, alpha=0.5)
@staticmethod
def _draw_polygon_boundary(polygon, ax, color_val):
px = []
py = []
for point in polygon.point:
px.append(point.x)
py.append(point.y)
if px:
px.append(px[0])
py.append(py[0])
ax.plot(px, py, ls='-', linewidth=2, c=color_val, alpha=0.5, picker=True)
@staticmethod
def _draw_polygon(polygon, ax, color_val):
# todo(zero): need to fix
pxy = []
for point in polygon.point:
pxy.append([point.x, point.y])
polygon = Polygon(pxy, True)
ax.add_patch(polygon)
@staticmethod
def _draw_stop_line(line_segment, ax, color_val):
px = []
py = []
for p in line_segment.point:
px.append(float(p.x))
py.append(float(p.y))
ax.plot(px, py, 'o-', linewidth=1, c=color_val, picker=True)
``` |
{
"source": "JiYonG-Lee/grr",
"score": 2
} |
#### File: grr_response_client_builder/repackers/osx.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import shutil
import zipfile
from grr_response_client_builder import build
from grr_response_client_builder import build_helpers
from grr_response_core.lib import utils
class DarwinClientRepacker(build.ClientRepacker):
"""Repackage OSX clients."""
def MakeDeployableBinary(self, template_path, output_path):
"""This will add the config to the client template."""
context = self.context + ["Client Context"]
utils.EnsureDirExists(os.path.dirname(output_path))
client_config_data = build_helpers.GetClientConfig(context)
shutil.copyfile(template_path, output_path)
zip_file = zipfile.ZipFile(output_path, mode="a")
zip_info = zipfile.ZipInfo(filename="config.yaml")
zip_file.writestr(zip_info, client_config_data)
zip_file.close()
return output_path
```
#### File: lib/rdfvalues/timeline_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import io
import os
import random
import stat as stat_mode
import time
from absl.testing import absltest
from grr_response_core.lib.rdfvalues import timeline as rdf_timeline
from grr_response_core.lib.util import temp
class TimelineEntryTest(absltest.TestCase):
def testFromStat(self):
with temp.AutoTempFilePath() as filepath:
time_before = round(time.time())
with io.open(filepath, mode="wb") as filedesc:
filedesc.write(b"1234567")
time_after = round(time.time())
# TODO(hanuszczak): `AutoTempFilePath` should return a `Path` object.
filepath_bytes = filepath.encode("utf-8")
filepath_stat = os.lstat(filepath)
entry = rdf_timeline.TimelineEntry.FromStat(filepath_bytes, filepath_stat)
self.assertEqual(entry.size, 7)
self.assertTrue(stat_mode.S_ISREG(entry.mode))
# TODO(hanuszczak): Switch this test to use nanosecond precision once we
# are Python 3.7-only.
self.assertBetween(round(entry.atime_ns / 1e9), time_before, time_after)
self.assertBetween(round(entry.mtime_ns / 1e9), time_before, time_after)
self.assertBetween(round(entry.ctime_ns / 1e9), time_before, time_after)
self.assertEqual(entry.dev, filepath_stat.st_dev)
self.assertEqual(entry.ino, filepath_stat.st_ino)
self.assertEqual(entry.uid, filepath_stat.st_uid)
self.assertEqual(entry.gid, filepath_stat.st_gid)
def testSerializeAndDeserializeStream(self):
serialize = rdf_timeline.TimelineEntry.SerializeStream
deserialize = rdf_timeline.TimelineEntry.DeserializeStream
def RandomEntry() -> rdf_timeline.TimelineEntry:
entry = rdf_timeline.TimelineEntry()
entry.path = os.urandom(4096)
entry.mode = random.randint(0x0000, 0xFFFF - 1)
entry.size = random.randint(0, 1e9)
return entry
entries = [RandomEntry() for _ in range(3000)]
self.assertEqual(list(deserialize(serialize(iter(entries)))), entries)
if __name__ == "__main__":
absltest.main()
```
#### File: grr_response_server/databases/db_flows_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import queue
import random
import time
import mock
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import file_finder as rdf_file_finder
from grr_response_core.lib.rdfvalues import flows as rdf_flows
from grr_response_core.lib.util import compatibility
from grr_response_server import flow
from grr_response_server.databases import db
from grr_response_server.flows import file
from grr_response_server.rdfvalues import flow_objects as rdf_flow_objects
from grr_response_server.rdfvalues import flow_runner as rdf_flow_runner
from grr_response_server.rdfvalues import hunt_objects as rdf_hunt_objects
from grr_response_server.rdfvalues import objects as rdf_objects
from grr.test_lib import test_lib
class DatabaseTestFlowMixin(object):
"""An abstract class for testing db.Database implementations.
This mixin adds methods to test the handling of flows.
"""
def _SetupClient(self, client_id=None):
client_id = client_id or u"C.1234567890123456"
self.db.WriteClientMetadata(client_id, fleetspeak_enabled=False)
return client_id
def _SetupClientAndFlow(self, client_id=None, **additional_flow_args):
client_id = self._SetupClient(client_id)
flow_id = flow.RandomFlowId()
rdf_flow = rdf_flow_objects.Flow(
client_id=client_id,
flow_id=flow_id,
create_time=rdfvalue.RDFDatetime.Now(),
**additional_flow_args)
self.db.WriteFlowObject(rdf_flow)
return client_id, flow_id
def _SetupUser(self, username="foo"):
self.db.WriteGRRUser(username)
return username
def testClientActionRequestStorage(self):
client_id, flow_id = self._SetupClientAndFlow()
self.db.WriteFlowRequests([
rdf_flow_objects.FlowRequest(
client_id=client_id, flow_id=flow_id, request_id=1)
])
req = rdf_flows.ClientActionRequest(
client_id=client_id, flow_id=flow_id, request_id=1)
self.db.WriteClientActionRequests([req])
read_reqs = self.db.ReadAllClientActionRequests(client_id)
self.assertLen(read_reqs, 1)
self.assertEqual(req, read_reqs[0])
self.db.DeleteClientActionRequests([req])
read_reqs = self.db.ReadAllClientActionRequests(client_id)
self.assertEmpty(read_reqs)
# Extra delete should not raise.
self.db.DeleteClientActionRequests([req])
# Deleting the same message multiple times is an error.
with self.assertRaises(ValueError):
self.db.DeleteClientActionRequests([req, req])
def testWriteClientActionRequestsRaisesOnUnknownRequest(self):
req = rdf_flows.ClientActionRequest(
client_id=u"C.1234567890000000", flow_id="ABCD1234", request_id=5)
with self.assertRaises(db.AtLeastOneUnknownRequestError):
self.db.WriteClientActionRequests([req])
def testClientActionRequestUpdate(self):
client_id, flow_id = self._SetupClientAndFlow()
req = rdf_flows.ClientActionRequest(
client_id=client_id, flow_id=flow_id, request_id=1)
self.db.WriteFlowRequests([
rdf_flow_objects.FlowRequest(
client_id=client_id, flow_id=flow_id, request_id=1)
])
cpu_limit = req.cpu_limit_ms
self.assertGreater(cpu_limit, 1000000)
for _ in range(5):
req.cpu_limit_ms -= 100000
self.db.WriteClientActionRequests([req])
read_reqs = self.db.ReadAllClientActionRequests(client_id)
self.assertLen(read_reqs, 1)
self.assertEqual(req, read_reqs[0])
def testClientActionRequestLeasing(self):
client_id, flow_id = self._SetupClientAndFlow()
flow_requests = []
client_requests = []
for i in range(10):
flow_requests.append(
rdf_flow_objects.FlowRequest(
client_id=client_id, flow_id=flow_id, request_id=i))
client_requests.append(
rdf_flows.ClientActionRequest(
client_id=client_id, flow_id=flow_id, request_id=i))
lease_time = rdfvalue.Duration.From(5, rdfvalue.MINUTES)
self.db.WriteFlowRequests(flow_requests)
self.db.WriteClientActionRequests(client_requests)
t0 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(100000)
with test_lib.FakeTime(t0):
t0_expiry = t0 + lease_time
leased = self.db.LeaseClientActionRequests(
client_id, lease_time=lease_time, limit=5)
self.assertLen(leased, 5)
for request in leased:
self.assertEqual(request.leased_until, t0_expiry)
self.assertEqual(request.leased_by, utils.ProcessIdString())
t1 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(100000 + 100)
with test_lib.FakeTime(t1):
t1_expiry = t1 + lease_time
leased = self.db.LeaseClientActionRequests(
client_id, lease_time=lease_time, limit=5)
self.assertLen(leased, 5)
for request in leased:
self.assertEqual(request.leased_until, t1_expiry)
self.assertEqual(request.leased_by, utils.ProcessIdString())
# Nothing left to lease.
leased = self.db.LeaseClientActionRequests(
client_id, lease_time=lease_time, limit=2)
self.assertEmpty(leased)
read = self.db.ReadAllClientActionRequests(client_id)
self.assertLen(read, 10)
for r in read:
self.assertEqual(r.leased_by, utils.ProcessIdString())
self.assertLen([r for r in read if r.leased_until == t0_expiry], 5)
self.assertLen([r for r in read if r.leased_until == t1_expiry], 5)
# Half the leases expired.
t2 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(100000 + 350)
with test_lib.FakeTime(t2):
leased = self.db.LeaseClientActionRequests(
client_id, lease_time=lease_time)
self.assertLen(leased, 5)
# All of them expired.
t3 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(100000 + 10350)
with test_lib.FakeTime(t3):
leased = self.db.LeaseClientActionRequests(
client_id, lease_time=lease_time)
self.assertLen(leased, 10)
def testClientActionRequestsTTL(self):
client_id, flow_id = self._SetupClientAndFlow()
flow_requests = []
client_requests = []
for i in range(10):
flow_requests.append(
rdf_flow_objects.FlowRequest(
client_id=client_id, flow_id=flow_id, request_id=i))
client_requests.append(
rdf_flows.ClientActionRequest(
client_id=client_id, flow_id=flow_id, request_id=i))
self.db.WriteFlowRequests(flow_requests)
self.db.WriteClientActionRequests(client_requests)
reqs = self.db.ReadAllClientActionRequests(client_id)
self.assertLen(reqs, 10)
for request in reqs:
self.assertEqual(request.ttl, db.Database.CLIENT_MESSAGES_TTL)
now = rdfvalue.RDFDatetime.Now()
lease_time = rdfvalue.Duration.From(60, rdfvalue.SECONDS)
for i in range(db.Database.CLIENT_MESSAGES_TTL):
now += rdfvalue.Duration.From(120, rdfvalue.SECONDS)
with test_lib.FakeTime(now):
leased = self.db.LeaseClientActionRequests(
client_id, lease_time=lease_time, limit=10)
self.assertLen(leased, 10)
# Check that the ttl is read.
for request in leased:
self.assertEqual(request.ttl, db.Database.CLIENT_MESSAGES_TTL - i - 1)
reqs = self.db.ReadAllClientActionRequests(client_id)
self.assertLen(reqs, 10)
for request in reqs:
self.assertEqual(request.ttl, db.Database.CLIENT_MESSAGES_TTL - i - 1)
now += rdfvalue.Duration.From(120, rdfvalue.SECONDS)
with test_lib.FakeTime(now):
leased = self.db.LeaseClientActionRequests(
client_id, lease_time=lease_time, limit=10)
self.assertEmpty(leased)
# ReadAllClientActionRequests includes also requests whose TTL has
# expired. Make sure that the requests have been deleted from the db.
self.assertEqual(self.db.ReadAllClientActionRequests(client_id), [])
def testFlowWritingUnknownClient(self):
flow_id = u"1234ABCD"
client_id = u"C.1234567890123456"
rdf_flow = rdf_flow_objects.Flow(
client_id=client_id,
flow_id=flow_id,
create_time=rdfvalue.RDFDatetime.Now())
with self.assertRaises(db.UnknownClientError):
self.db.WriteFlowObject(rdf_flow)
def testFlowWriting(self):
flow_id = u"1234ABCD"
client_id = u"C.1234567890123456"
self.db.WriteClientMetadata(client_id, fleetspeak_enabled=False)
rdf_flow = rdf_flow_objects.Flow(
client_id=client_id,
flow_id=flow_id,
long_flow_id=f"{client_id}/{flow_id}",
next_request_to_process=4,
create_time=rdfvalue.RDFDatetime.Now())
self.db.WriteFlowObject(rdf_flow)
read_flow = self.db.ReadFlowObject(client_id, flow_id)
# Last update time has changed, everything else should be equal.
read_flow.last_update_time = None
self.assertEqual(read_flow, rdf_flow)
# Invalid flow id or client id raises.
with self.assertRaises(db.UnknownFlowError):
self.db.ReadFlowObject(client_id, u"1234AAAA")
with self.assertRaises(db.UnknownFlowError):
self.db.ReadFlowObject(u"C.1234567890000000", flow_id)
def testFlowOverwrite(self):
flow_id = u"1234ABCD"
client_id = u"C.1234567890123456"
self.db.WriteClientMetadata(client_id, fleetspeak_enabled=False)
rdf_flow = rdf_flow_objects.Flow(
client_id=client_id,
flow_id=flow_id,
next_request_to_process=4,
create_time=rdfvalue.RDFDatetime.Now())
self.db.WriteFlowObject(rdf_flow)
read_flow = self.db.ReadFlowObject(client_id, flow_id)
# Last update time has changed, everything else should be equal.
read_flow.last_update_time = None
self.assertEqual(read_flow, rdf_flow)
# Now change the flow object.
rdf_flow.next_request_to_process = 5
self.db.WriteFlowObject(rdf_flow)
read_flow_after_update = self.db.ReadFlowObject(client_id, flow_id)
self.assertEqual(read_flow_after_update.next_request_to_process, 5)
def testFlowOverwriteFailsWithAllowUpdateFalse(self):
flow_id = u"1234ABCD"
client_id = u"C.1234567890123456"
self.db.WriteClientMetadata(client_id, fleetspeak_enabled=False)
rdf_flow = rdf_flow_objects.Flow(
client_id=client_id,
flow_id=flow_id,
next_request_to_process=4,
create_time=rdfvalue.RDFDatetime.Now())
self.db.WriteFlowObject(rdf_flow, allow_update=False)
# Now change the flow object.
rdf_flow.next_request_to_process = 5
with self.assertRaises(db.FlowExistsError) as context:
self.db.WriteFlowObject(rdf_flow, allow_update=False)
self.assertEqual(context.exception.client_id, client_id)
self.assertEqual(context.exception.flow_id, flow_id)
read_flow_after_update = self.db.ReadFlowObject(client_id, flow_id)
self.assertEqual(read_flow_after_update.next_request_to_process, 4)
def testFlowTimestamp(self):
client_id = "C.0123456789012345"
flow_id = "0F00B430"
self.db.WriteClientMetadata(client_id, fleetspeak_enabled=False)
before_timestamp = rdfvalue.RDFDatetime.Now()
flow_obj = rdf_flow_objects.Flow(client_id=client_id, flow_id=flow_id)
self.db.WriteFlowObject(flow_obj)
after_timestamp = rdfvalue.RDFDatetime.Now()
flow_obj = self.db.ReadFlowObject(client_id=client_id, flow_id=flow_id)
self.assertBetween(flow_obj.create_time, before_timestamp, after_timestamp)
def testFlowTimestampWithMissingCreationTime(self):
client_id = "C.0123456789012345"
flow_id = "0F00B430"
self.db.WriteClientMetadata(client_id, fleetspeak_enabled=False)
before_timestamp = rdfvalue.RDFDatetime.Now()
flow_obj = rdf_flow_objects.Flow(client_id=client_id, flow_id=flow_id)
flow_obj.create_time = None
self.db.WriteFlowObject(flow_obj)
after_timestamp = rdfvalue.RDFDatetime.Now()
flow_obj = self.db.ReadFlowObject(client_id=client_id, flow_id=flow_id)
self.assertBetween(flow_obj.create_time, before_timestamp, after_timestamp)
def testFlowNameWithMissingNameInProtobuf(self):
client_id = "C.0123456789012345"
flow_id = "0F00B430"
self.db.WriteClientMetadata(client_id, fleetspeak_enabled=False)
flow_obj = rdf_flow_objects.Flow(client_id=client_id, flow_id=flow_id)
flow_obj.flow_class_name = "Quux"
self.db.WriteFlowObject(flow_obj)
flow_obj.flow_class_name = None
self.db.UpdateFlow(client_id=client_id, flow_id=flow_id, flow_obj=flow_obj)
flow_obj = self.db.ReadFlowObject(client_id=client_id, flow_id=flow_id)
self.assertEqual(flow_obj.flow_class_name, "Quux")
def testFlowKeyMetadataUnchangable(self):
client_id = "C.0123456789012345"
flow_id = "0F00B430"
self.db.WriteClientMetadata(client_id, fleetspeak_enabled=False)
flow_obj = rdf_flow_objects.Flow(client_id=client_id, flow_id=flow_id)
flow_obj.long_flow_id = f"{client_id}/{flow_id}"
self.db.WriteFlowObject(flow_obj)
flow_obj.client_id = "C.0123456789ABCDEF"
flow_obj.flow_id = "0B43F0000"
flow_obj.long_flow_id = f"{flow_obj.client_id}/{flow_obj.flow_id}"
self.db.UpdateFlow(client_id=client_id, flow_id=flow_id, flow_obj=flow_obj)
flow_obj = self.db.ReadFlowObject(client_id=client_id, flow_id=flow_id)
self.assertEqual(flow_obj.client_id, client_id)
self.assertEqual(flow_obj.flow_id, flow_id)
self.assertEqual(flow_obj.long_flow_id, f"{client_id}/{flow_id}")
def testFlowParentMetadataUnchangable(self):
client_id = "C.0123456789012345"
flow_id = "0F00B430"
self.db.WriteClientMetadata(client_id, fleetspeak_enabled=False)
flow_obj = rdf_flow_objects.Flow(client_id=client_id, flow_id=flow_id)
flow_obj.parent_flow_id = "0B43F000"
flow_obj.parent_hunt_id = "48151623"
self.db.WriteFlowObject(flow_obj)
flow_obj.parent_flow_id = "08133780"
flow_obj.parent_hunt_id = "01081080"
self.db.UpdateFlow(client_id=client_id, flow_id=flow_id, flow_obj=flow_obj)
flow_obj = self.db.ReadFlowObject(client_id=client_id, flow_id=flow_id)
self.assertEqual(flow_obj.parent_flow_id, "0B43F000")
self.assertEqual(flow_obj.parent_hunt_id, "48151623")
def testFlowNameUnchangable(self):
client_id = "C.0123456789012345"
flow_id = "0F00B430"
self.db.WriteClientMetadata(client_id, fleetspeak_enabled=False)
flow_obj = rdf_flow_objects.Flow(client_id=client_id, flow_id=flow_id)
flow_obj.flow_class_name = "Quux"
self.db.WriteFlowObject(flow_obj)
flow_obj.flow_class_name = "Norf"
self.db.UpdateFlow(client_id=client_id, flow_id=flow_id, flow_obj=flow_obj)
flow_obj = self.db.ReadFlowObject(client_id=client_id, flow_id=flow_id)
self.assertEqual(flow_obj.flow_class_name, "Quux")
def testFlowCreatorUnchangable(self):
client_id = "C.0123456789012345"
flow_id = "0F00B430"
self.db.WriteClientMetadata(client_id, fleetspeak_enabled=False)
flow_obj = rdf_flow_objects.Flow(client_id=client_id, flow_id=flow_id)
flow_obj.creator = "norf"
self.db.WriteFlowObject(flow_obj)
flow_obj.creator = "thud"
self.db.UpdateFlow(client_id=client_id, flow_id=flow_id, flow_obj=flow_obj)
flow_obj = self.db.ReadFlowObject(client_id=client_id, flow_id=flow_id)
self.assertEqual(flow_obj.creator, "norf")
def testFlowCreatorUnsetInProtobuf(self):
client_id = "C.0123456789012345"
flow_id = "0F00B430"
self.db.WriteClientMetadata(client_id, fleetspeak_enabled=False)
flow_obj = rdf_flow_objects.Flow(client_id=client_id, flow_id=flow_id)
flow_obj.creator = "norf"
self.db.WriteFlowObject(flow_obj)
flow_obj.creator = None
self.db.UpdateFlow(client_id=client_id, flow_id=flow_id, flow_obj=flow_obj)
flow_obj = self.db.ReadFlowObject(client_id=client_id, flow_id=flow_id)
self.assertEqual(flow_obj.creator, "norf")
def testReadAllFlowObjects(self):
client_id_1 = "C.1111111111111111"
client_id_2 = "C.2222222222222222"
self.db.WriteClientMetadata(client_id_1, fleetspeak_enabled=False)
self.db.WriteClientMetadata(client_id_2, fleetspeak_enabled=False)
# Write a flow and a child flow for client 1.
flow1 = rdf_flow_objects.Flow(
client_id=client_id_1,
flow_id="000A0001",
create_time=rdfvalue.RDFDatetime.Now())
self.db.WriteFlowObject(flow1)
flow2 = rdf_flow_objects.Flow(
client_id=client_id_1,
flow_id="000A0002",
parent_flow_id="000A0001",
create_time=rdfvalue.RDFDatetime.Now())
self.db.WriteFlowObject(flow2)
# Same flow id for client 2.
flow3 = rdf_flow_objects.Flow(
client_id=client_id_2,
flow_id="000A0001",
create_time=rdfvalue.RDFDatetime.Now())
self.db.WriteFlowObject(flow3)
flows = self.db.ReadAllFlowObjects()
self.assertCountEqual([f.flow_id for f in flows],
["000A0001", "000A0002", "000A0001"])
def testReadAllFlowObjectsWithMinCreateTime(self):
now = rdfvalue.RDFDatetime.Now()
client_id_1 = "C.1111111111111111"
self.db.WriteClientMetadata(client_id_1, fleetspeak_enabled=False)
self.db.WriteFlowObject(
rdf_flow_objects.Flow(
client_id=client_id_1,
flow_id="0000001A",
create_time=now - rdfvalue.Duration.From(2, rdfvalue.HOURS)))
self.db.WriteFlowObject(
rdf_flow_objects.Flow(
client_id=client_id_1,
flow_id="0000001B",
create_time=now - rdfvalue.Duration.From(1, rdfvalue.HOURS)))
flows = self.db.ReadAllFlowObjects(
min_create_time=now - rdfvalue.Duration.From(1, rdfvalue.HOURS))
self.assertEqual([f.flow_id for f in flows], ["0000001B"])
def testReadAllFlowObjectsWithMaxCreateTime(self):
now = rdfvalue.RDFDatetime.Now()
client_id_1 = "C.1111111111111111"
self.db.WriteClientMetadata(client_id_1, fleetspeak_enabled=False)
self.db.WriteFlowObject(
rdf_flow_objects.Flow(
client_id=client_id_1,
flow_id="0000001A",
create_time=now - rdfvalue.Duration.From(2, rdfvalue.HOURS)))
self.db.WriteFlowObject(
rdf_flow_objects.Flow(
client_id=client_id_1,
flow_id="0000001B",
create_time=now - rdfvalue.Duration.From(1, rdfvalue.HOURS)))
flows = self.db.ReadAllFlowObjects(
max_create_time=now - rdfvalue.Duration.From(2, rdfvalue.HOURS))
self.assertEqual([f.flow_id for f in flows], ["0000001A"])
def testReadAllFlowObjectsWithClientID(self):
now = rdfvalue.RDFDatetime.Now()
client_id_1 = "C.1111111111111111"
client_id_2 = "C.2222222222222222"
self.db.WriteClientMetadata(client_id_1, fleetspeak_enabled=False)
self.db.WriteClientMetadata(client_id_2, fleetspeak_enabled=False)
self.db.WriteFlowObject(
rdf_flow_objects.Flow(
client_id=client_id_1, flow_id="0000001A", create_time=now))
self.db.WriteFlowObject(
rdf_flow_objects.Flow(
client_id=client_id_2, flow_id="0000001B", create_time=now))
flows = self.db.ReadAllFlowObjects(client_id=client_id_1)
self.assertEqual([f.flow_id for f in flows], ["0000001A"])
def testReadAllFlowObjectsWithoutChildren(self):
now = rdfvalue.RDFDatetime.Now()
client_id_1 = "C.1111111111111111"
self.db.WriteClientMetadata(client_id_1, fleetspeak_enabled=False)
self.db.WriteFlowObject(
rdf_flow_objects.Flow(
client_id=client_id_1, flow_id="0000001A", create_time=now))
self.db.WriteFlowObject(
rdf_flow_objects.Flow(
client_id=client_id_1,
flow_id="0000001B",
parent_flow_id="0000001A",
create_time=now))
flows = self.db.ReadAllFlowObjects(include_child_flows=False)
self.assertEqual([f.flow_id for f in flows], ["0000001A"])
def testReadAllFlowObjectsWithAllConditions(self):
now = rdfvalue.RDFDatetime.Now()
client_id_1 = "C.1111111111111111"
client_id_2 = "C.2222222222222222"
self.db.WriteClientMetadata(client_id_1, fleetspeak_enabled=False)
self.db.WriteClientMetadata(client_id_2, fleetspeak_enabled=False)
self.db.WriteFlowObject(
rdf_flow_objects.Flow(
client_id=client_id_1, flow_id="0000000A", create_time=now))
self.db.WriteFlowObject(
rdf_flow_objects.Flow(
client_id=client_id_1,
flow_id="0000000B",
parent_flow_id="0000000A",
create_time=now))
self.db.WriteFlowObject(
rdf_flow_objects.Flow(
client_id=client_id_1,
flow_id="0000000C",
create_time=now - rdfvalue.Duration.From(1, rdfvalue.SECONDS)))
self.db.WriteFlowObject(
rdf_flow_objects.Flow(
client_id=client_id_1,
flow_id="0000000D",
create_time=now + rdfvalue.Duration.From(1, rdfvalue.SECONDS)))
self.db.WriteFlowObject(
rdf_flow_objects.Flow(
client_id=client_id_2, flow_id="0000000E", create_time=now))
flows = self.db.ReadAllFlowObjects(
client_id=client_id_1,
min_create_time=now,
max_create_time=now,
include_child_flows=False)
self.assertEqual([f.flow_id for f in flows], ["0000000A"])
def testUpdateUnknownFlow(self):
_, flow_id = self._SetupClientAndFlow()
crash_info = rdf_client.ClientCrash(crash_message="oh no")
with self.assertRaises(db.UnknownFlowError):
self.db.UpdateFlow(
u"C.1234567890AAAAAA", flow_id, client_crash_info=crash_info)
def testFlowUpdateChangesAllFields(self):
client_id, flow_id = self._SetupClientAndFlow()
flow_obj = self.db.ReadFlowObject(client_id, flow_id)
flow_obj.cpu_time_used.user_cpu_time = 0.5
flow_obj.cpu_time_used.system_cpu_time = 1.5
flow_obj.num_replies_sent = 10
flow_obj.network_bytes_sent = 100
self.db.UpdateFlow(client_id, flow_id, flow_obj=flow_obj)
read_flow = self.db.ReadFlowObject(client_id, flow_id)
# Last update times will differ.
read_flow.last_update_time = None
flow_obj.last_update_time = None
self.assertEqual(read_flow, flow_obj)
def testFlowStateUpdate(self):
client_id, flow_id = self._SetupClientAndFlow()
# Check that just updating flow_state works fine.
self.db.UpdateFlow(
client_id, flow_id, flow_state=rdf_flow_objects.Flow.FlowState.CRASHED)
read_flow = self.db.ReadFlowObject(client_id, flow_id)
self.assertEqual(read_flow.flow_state,
rdf_flow_objects.Flow.FlowState.CRASHED)
# TODO(user): remove an option to update the flow by updating flow_obj.
# It makes the DB API unnecessary complicated.
# Check that changing flow_state through flow_obj works too.
read_flow.flow_state = rdf_flow_objects.Flow.FlowState.RUNNING
self.db.UpdateFlow(client_id, flow_id, flow_obj=read_flow)
read_flow_2 = self.db.ReadFlowObject(client_id, flow_id)
self.assertEqual(read_flow_2.flow_state,
rdf_flow_objects.Flow.FlowState.RUNNING)
def testUpdatingFlowObjAndFlowStateInSingleUpdateRaises(self):
client_id, flow_id = self._SetupClientAndFlow()
read_flow = self.db.ReadFlowObject(client_id, flow_id)
with self.assertRaises(db.ConflictingUpdateFlowArgumentsError):
self.db.UpdateFlow(
client_id,
flow_id,
flow_obj=read_flow,
flow_state=rdf_flow_objects.Flow.FlowState.CRASHED)
def testCrashInfoUpdate(self):
client_id, flow_id = self._SetupClientAndFlow()
crash_info = rdf_client.ClientCrash(crash_message="oh no")
self.db.UpdateFlow(client_id, flow_id, client_crash_info=crash_info)
read_flow = self.db.ReadFlowObject(client_id, flow_id)
self.assertEqual(read_flow.client_crash_info, crash_info)
def testPendingTerminationUpdate(self):
client_id, flow_id = self._SetupClientAndFlow()
pending_termination = rdf_flow_objects.PendingFlowTermination(reason="test")
self.db.UpdateFlow(
client_id, flow_id, pending_termination=pending_termination)
read_flow = self.db.ReadFlowObject(client_id, flow_id)
self.assertEqual(read_flow.pending_termination, pending_termination)
def testProcessingInformationUpdate(self):
client_id, flow_id = self._SetupClientAndFlow()
now = rdfvalue.RDFDatetime.Now()
deadline = now + rdfvalue.Duration.From(6, rdfvalue.HOURS)
self.db.UpdateFlow(
client_id,
flow_id,
processing_on="Worker1",
processing_since=now,
processing_deadline=deadline)
read_flow = self.db.ReadFlowObject(client_id, flow_id)
self.assertEqual(read_flow.processing_on, "Worker1")
self.assertEqual(read_flow.processing_since, now)
self.assertEqual(read_flow.processing_deadline, deadline)
# None can be used to clear some fields.
self.db.UpdateFlow(client_id, flow_id, processing_on=None)
read_flow = self.db.ReadFlowObject(client_id, flow_id)
self.assertEqual(read_flow.processing_on, "")
self.db.UpdateFlow(client_id, flow_id, processing_since=None)
read_flow = self.db.ReadFlowObject(client_id, flow_id)
self.assertEqual(read_flow.processing_since, None)
self.db.UpdateFlow(client_id, flow_id, processing_deadline=None)
read_flow = self.db.ReadFlowObject(client_id, flow_id)
self.assertEqual(read_flow.processing_deadline, None)
def testUpdateFlowsIgnoresMissingFlows(self):
_, flow_id = self._SetupClientAndFlow()
pending_termination = rdf_flow_objects.PendingFlowTermination(reason="test")
self.db.UpdateFlows([("C.1234567890AAAAAA", flow_id),
("C.1234567890BBBBBB", flow_id)],
pending_termination=pending_termination)
def testUpdateFlowsUpdatesMultipleFlowsCorrectly(self):
client_id_1, flow_id_1 = self._SetupClientAndFlow(
client_id="C.1234567890AAAAAA")
client_id_2, flow_id_2 = self._SetupClientAndFlow(
client_id="C.1234567890BBBBBB")
pending_termination = rdf_flow_objects.PendingFlowTermination(reason="test")
self.db.UpdateFlows([(client_id_1, flow_id_1), (client_id_2, flow_id_2)],
pending_termination=pending_termination)
read_flow_1 = self.db.ReadFlowObject(client_id_1, flow_id_1)
self.assertEqual(read_flow_1.pending_termination, pending_termination)
read_flow_2 = self.db.ReadFlowObject(client_id_2, flow_id_2)
self.assertEqual(read_flow_2.pending_termination, pending_termination)
def testRequestWriting(self):
client_id_1 = u"C.1234567890123456"
client_id_2 = u"C.1234567890123457"
flow_id_1 = u"1234ABCD"
flow_id_2 = u"ABCD1234"
with self.assertRaises(db.AtLeastOneUnknownFlowError):
self.db.WriteFlowRequests([
rdf_flow_objects.FlowRequest(
client_id=client_id_1, flow_id=flow_id_1)
])
for client_id in [client_id_1, client_id_2]:
self.db.WriteClientMetadata(client_id, fleetspeak_enabled=False)
requests = []
for flow_id in [flow_id_1, flow_id_2]:
for client_id in [client_id_1, client_id_2]:
rdf_flow = rdf_flow_objects.Flow(
client_id=client_id,
flow_id=flow_id,
create_time=rdfvalue.RDFDatetime.Now())
self.db.WriteFlowObject(rdf_flow)
for i in range(1, 4):
requests.append(
rdf_flow_objects.FlowRequest(
client_id=client_id, flow_id=flow_id, request_id=i))
self.db.WriteFlowRequests(requests)
for flow_id in [flow_id_1, flow_id_2]:
for client_id in [client_id_1, client_id_2]:
read = self.db.ReadAllFlowRequestsAndResponses(
client_id=client_id, flow_id=flow_id)
self.assertLen(read, 3)
self.assertEqual([req.request_id for (req, _) in read],
list(range(1, 4)))
for _, responses in read:
self.assertEqual(responses, {})
def _WriteRequestForProcessing(self, client_id, flow_id, request_id):
req_func = mock.Mock()
self.db.RegisterFlowProcessingHandler(req_func)
self.addCleanup(self.db.UnregisterFlowProcessingHandler)
_, marked_flow_id = self._SetupClientAndFlow(
client_id=client_id, next_request_to_process=3)
# We write 2 requests, one after another:
# First request is the request provided by the user. Second is
# a special (i.e. marked) one.
#
# The marked request is guaranteed to trigger processing. This way
# tests relying on flow processing callback being called (or not being
# called) can avoid race conditions: flow processing callback is guaranteed
# to be called for the marked request after it's either called or not called
# for the user-supplied request. Effectively, callback's invocation for
# the marked requests acts as a checkpoint: after it we can make
# assertions.
request = rdf_flow_objects.FlowRequest(
flow_id=flow_id,
client_id=client_id,
request_id=request_id,
needs_processing=True)
marked_request = rdf_flow_objects.FlowRequest(
flow_id=marked_flow_id,
client_id=client_id,
request_id=3,
needs_processing=True)
self.db.WriteFlowRequests([request, marked_request])
marked_found = False
cur_time = rdfvalue.RDFDatetime.Now()
while True:
requests = []
for call in req_func.call_args_list:
requests.extend(call[0])
if any(r.flow_id == marked_flow_id for r in requests):
# Poll-based implementations (i.e. MySQL) give no guarantess
# with regards to the order in which requests are going to be processed.
# In such implementations when 2 requests are retrieved from the DB,
# they're both going to be processed concurrently in parallel threads.
# For such implementations we allow for additional 0.1 seconds to pass
# after the marked flow processing request is processed to allow for
# possible parallel processing to finish.
if marked_found:
return len([r for r in requests if r.flow_id != marked_flow_id])
else:
marked_found = True
time.sleep(0.1)
if rdfvalue.RDFDatetime.Now() - cur_time > rdfvalue.Duration.From(
10, rdfvalue.SECONDS):
self.fail("Flow request was not processed in time.")
def testRequestWritingHighIDDoesntTriggerFlowProcessing(self):
client_id, flow_id = self._SetupClientAndFlow(next_request_to_process=3)
requests_triggered = self._WriteRequestForProcessing(client_id, flow_id, 4)
# Not the expected request.
self.assertEqual(requests_triggered, 0)
def testRequestWritingLowIDDoesntTriggerFlowProcessing(self):
client_id, flow_id = self._SetupClientAndFlow(next_request_to_process=3)
requests_triggered = self._WriteRequestForProcessing(client_id, flow_id, 2)
# Not the expected request.
self.assertEqual(requests_triggered, 0)
def testRequestWritingExpectedIDTriggersFlowProcessing(self):
client_id, flow_id = self._SetupClientAndFlow(next_request_to_process=3)
requests_triggered = self._WriteRequestForProcessing(client_id, flow_id, 3)
# This one is.
self.assertEqual(requests_triggered, 1)
def testFlowRequestsWithStartTimeAreCorrectlyDelayed(self):
client_id, flow_id = self._SetupClientAndFlow(next_request_to_process=3)
req_func = mock.Mock()
self.db.RegisterFlowProcessingHandler(req_func)
self.addCleanup(self.db.UnregisterFlowProcessingHandler)
cur_time = rdfvalue.RDFDatetime.Now()
request = rdf_flow_objects.FlowRequest(
flow_id=flow_id,
client_id=client_id,
request_id=3,
start_time=cur_time + rdfvalue.Duration.From(2, rdfvalue.SECONDS),
needs_processing=True)
self.db.WriteFlowRequests([request])
self.assertEqual(req_func.call_count, 0)
while req_func.call_count == 0:
time.sleep(0.1)
if rdfvalue.RDFDatetime.Now() - cur_time > rdfvalue.Duration.From(
10, rdfvalue.SECONDS):
self.fail("Flow request was not processed in time.")
self.assertGreaterEqual(rdfvalue.RDFDatetime.Now() - cur_time,
rdfvalue.Duration.From(2, rdfvalue.SECONDS))
def testDeleteFlowRequests(self):
client_id, flow_id = self._SetupClientAndFlow()
requests = []
responses = []
client_requests = []
for request_id in range(1, 4):
requests.append(
rdf_flow_objects.FlowRequest(
client_id=client_id, flow_id=flow_id, request_id=request_id))
responses.append(
rdf_flow_objects.FlowResponse(
client_id=client_id,
flow_id=flow_id,
request_id=request_id,
response_id=1))
client_requests.append(
rdf_flows.ClientActionRequest(
client_id=client_id, flow_id=flow_id, request_id=request_id))
self.db.WriteFlowRequests(requests)
self.db.WriteFlowResponses(responses)
self.db.WriteClientActionRequests(client_requests)
request_list = self.db.ReadAllFlowRequestsAndResponses(client_id, flow_id)
self.assertCountEqual([req.request_id for req, _ in request_list],
[req.request_id for req in requests])
random.shuffle(requests)
while requests:
request = requests.pop()
self.db.DeleteFlowRequests([request])
request_list = self.db.ReadAllFlowRequestsAndResponses(client_id, flow_id)
self.assertCountEqual([req.request_id for req, _ in request_list],
[req.request_id for req in requests])
def testResponsesForUnknownFlow(self):
client_id = u"C.1234567890123456"
flow_id = u"1234ABCD"
# This will not raise but also not write anything.
with test_lib.SuppressLogs():
self.db.WriteFlowResponses([
rdf_flow_objects.FlowResponse(
client_id=client_id, flow_id=flow_id, request_id=1, response_id=1)
])
read = self.db.ReadAllFlowRequestsAndResponses(client_id, flow_id)
self.assertEqual(read, [])
def testResponsesForUnknownRequest(self):
client_id, flow_id = self._SetupClientAndFlow()
request = rdf_flow_objects.FlowRequest(
client_id=client_id, flow_id=flow_id, request_id=1)
self.db.WriteFlowRequests([request])
# Write two responses at a time, one request exists, the other doesn't.
with test_lib.SuppressLogs():
self.db.WriteFlowResponses([
rdf_flow_objects.FlowResponse(
client_id=client_id, flow_id=flow_id, request_id=1,
response_id=1),
rdf_flow_objects.FlowResponse(
client_id=client_id, flow_id=flow_id, request_id=2, response_id=1)
])
# We should have one response in the db.
read = self.db.ReadAllFlowRequestsAndResponses(client_id, flow_id)
self.assertLen(read, 1)
request, responses = read[0]
self.assertLen(responses, 1)
def testStatusForUnknownRequest(self):
client_id, flow_id = self._SetupClientAndFlow()
request = rdf_flow_objects.FlowRequest(
client_id=client_id, flow_id=flow_id, request_id=1)
self.db.WriteFlowRequests([request])
# Write two status responses at a time, one for the request that exists, one
# for a request that doesn't.
with test_lib.SuppressLogs():
self.db.WriteFlowResponses([
rdf_flow_objects.FlowStatus(
client_id=client_id, flow_id=flow_id, request_id=1,
response_id=1),
rdf_flow_objects.FlowStatus(
client_id=client_id, flow_id=flow_id, request_id=2, response_id=1)
])
# We should have one response in the db.
read = self.db.ReadAllFlowRequestsAndResponses(client_id, flow_id)
self.assertLen(read, 1)
request, responses = read[0]
self.assertLen(responses, 1)
self.assertEqual(request.nr_responses_expected, 1)
def testResponseWriting(self):
client_id, flow_id = self._SetupClientAndFlow()
request = rdf_flow_objects.FlowRequest(
client_id=client_id,
flow_id=flow_id,
request_id=1,
needs_processing=False)
self.db.WriteFlowRequests([request])
responses = [
rdf_flow_objects.FlowResponse(
client_id=client_id, flow_id=flow_id, request_id=1, response_id=i)
for i in range(3)
]
self.db.WriteFlowResponses(responses)
all_requests = self.db.ReadAllFlowRequestsAndResponses(client_id, flow_id)
self.assertLen(all_requests, 1)
read_request, read_responses = all_requests[0]
self.assertEqual(read_request, request)
self.assertEqual(list(read_responses), [0, 1, 2])
for response_id, response in read_responses.items():
self.assertEqual(response.response_id, response_id)
def testResponseWritingForDuplicateResponses(self):
client_id, flow_id = self._SetupClientAndFlow()
request = rdf_flow_objects.FlowRequest(
client_id=client_id,
flow_id=flow_id,
request_id=1,
needs_processing=False)
self.db.WriteFlowRequests([request])
responses = [
rdf_flow_objects.FlowResponse(
client_id=client_id, flow_id=flow_id, request_id=1, response_id=0),
rdf_flow_objects.FlowResponse(
client_id=client_id, flow_id=flow_id, request_id=1, response_id=0)
]
self.db.WriteFlowResponses(responses)
all_requests = self.db.ReadAllFlowRequestsAndResponses(client_id, flow_id)
self.assertLen(all_requests, 1)
read_request, read_responses = all_requests[0]
self.assertEqual(read_request, request)
self.assertEqual(list(read_responses), [0])
for response_id, response in read_responses.items():
self.assertEqual(response.response_id, response_id)
def testCompletingMultipleRequests(self):
client_id, flow_id = self._SetupClientAndFlow()
requests = []
responses = []
for i in range(5):
requests.append(
rdf_flow_objects.FlowRequest(
client_id=client_id,
flow_id=flow_id,
request_id=i,
needs_processing=False))
responses.append(
rdf_flow_objects.FlowResponse(
client_id=client_id, flow_id=flow_id, request_id=i,
response_id=1))
responses.append(
rdf_flow_objects.FlowStatus(
client_id=client_id, flow_id=flow_id, request_id=i,
response_id=2))
self.db.WriteFlowRequests(requests)
# Complete all requests at once.
self.db.WriteFlowResponses(responses)
read = self.db.ReadAllFlowRequestsAndResponses(
client_id=client_id, flow_id=flow_id)
self.assertEqual(len(read), 5)
for req, _ in read:
self.assertTrue(req.needs_processing)
def testStatusMessagesCanBeWrittenAndRead(self):
client_id, flow_id = self._SetupClientAndFlow()
request = rdf_flow_objects.FlowRequest(
client_id=client_id,
flow_id=flow_id,
request_id=1,
needs_processing=False)
self.db.WriteFlowRequests([request])
responses = [
rdf_flow_objects.FlowResponse(
client_id=client_id, flow_id=flow_id, request_id=1, response_id=i)
for i in range(3)
]
# Also store an Iterator, why not.
responses.append(
rdf_flow_objects.FlowIterator(
client_id=client_id, flow_id=flow_id, request_id=1, response_id=3))
responses.append(
rdf_flow_objects.FlowStatus(
client_id=client_id, flow_id=flow_id, request_id=1, response_id=4))
self.db.WriteFlowResponses(responses)
all_requests = self.db.ReadAllFlowRequestsAndResponses(client_id, flow_id)
self.assertLen(all_requests, 1)
_, read_responses = all_requests[0]
self.assertEqual(list(read_responses), [0, 1, 2, 3, 4])
for i in range(3):
self.assertIsInstance(read_responses[i], rdf_flow_objects.FlowResponse)
self.assertIsInstance(read_responses[3], rdf_flow_objects.FlowIterator)
self.assertIsInstance(read_responses[4], rdf_flow_objects.FlowStatus)
def _ReadRequest(self, client_id, flow_id, request_id):
all_requests = self.db.ReadAllFlowRequestsAndResponses(client_id, flow_id)
for request, unused_responses in all_requests:
if request.request_id == request_id:
return request
def _ResponsesAndStatus(self, client_id, flow_id, request_id, num_responses):
return [
rdf_flow_objects.FlowResponse(
client_id=client_id,
flow_id=flow_id,
request_id=request_id,
response_id=i) for i in range(1, num_responses + 1)
] + [
rdf_flow_objects.FlowStatus(
client_id=client_id,
flow_id=flow_id,
request_id=request_id,
response_id=num_responses + 1)
]
def _WriteRequestAndCompleteResponses(self, client_id, flow_id, request_id,
num_responses):
request = rdf_flow_objects.FlowRequest(
client_id=client_id, flow_id=flow_id, request_id=request_id)
self.db.WriteFlowRequests([request])
return self._WriteCompleteResponses(
client_id=client_id,
flow_id=flow_id,
request_id=request_id,
num_responses=num_responses)
def _WriteCompleteResponses(self, client_id, flow_id, request_id,
num_responses):
# Write <num_responses> responses and a status in random order.
responses = self._ResponsesAndStatus(client_id, flow_id, request_id,
num_responses)
random.shuffle(responses)
for response in responses:
request = self._ReadRequest(client_id, flow_id, request_id)
self.assertIsNotNone(request)
# This is false up to the moment when we write the last response.
self.assertFalse(request.needs_processing)
self.db.WriteFlowResponses([response])
# Now that we sent all responses, the request needs processing.
request = self._ReadRequest(client_id, flow_id, request_id)
self.assertTrue(request.needs_processing)
self.assertEqual(request.nr_responses_expected, len(responses))
# Flow processing request might have been generated.
return len(self.db.ReadFlowProcessingRequests())
def testResponsesForEarlierRequestDontTriggerFlowProcessing(self):
# Write a flow that is waiting for request #2.
client_id, flow_id = self._SetupClientAndFlow(next_request_to_process=2)
requests_triggered = self._WriteRequestAndCompleteResponses(
client_id, flow_id, request_id=1, num_responses=3)
# No flow processing request generated for request 1 (we are waiting
# for #2).
self.assertEqual(requests_triggered, 0)
def testResponsesForLaterRequestDontTriggerFlowProcessing(self):
# Write a flow that is waiting for request #2.
client_id, flow_id = self._SetupClientAndFlow(next_request_to_process=2)
requests_triggered = self._WriteRequestAndCompleteResponses(
client_id, flow_id, request_id=3, num_responses=7)
# No flow processing request generated for request 3 (we are waiting
# for #2).
self.assertEqual(requests_triggered, 0)
def testResponsesForExpectedRequestTriggerFlowProcessing(self):
# Write a flow that is waiting for request #2.
client_id, flow_id = self._SetupClientAndFlow(next_request_to_process=2)
requests_triggered = self._WriteRequestAndCompleteResponses(
client_id, flow_id, request_id=2, num_responses=5)
# This one generates a request.
self.assertEqual(requests_triggered, 1)
def testRewritingResponsesForRequestDoesNotTriggerAdditionalProcessing(self):
# Write a flow that is waiting for request #2.
client_id, flow_id = self._SetupClientAndFlow(next_request_to_process=2)
marked_client_id, marked_flow_id = self._SetupClientAndFlow(
next_request_to_process=2)
request = rdf_flow_objects.FlowRequest(
client_id=client_id, flow_id=flow_id, request_id=2)
self.db.WriteFlowRequests([request])
marked_request = rdf_flow_objects.FlowRequest(
client_id=marked_client_id, flow_id=marked_flow_id, request_id=2)
self.db.WriteFlowRequests([marked_request])
# Generate responses together with a status message.
responses = self._ResponsesAndStatus(client_id, flow_id, 2, 4)
marked_responses = self._ResponsesAndStatus(marked_client_id,
marked_flow_id, 2, 4)
req_func = mock.Mock()
self.db.RegisterFlowProcessingHandler(req_func)
self.addCleanup(self.db.UnregisterFlowProcessingHandler)
# Write responses. This should trigger flow request processing.
self.db.WriteFlowResponses(responses)
cur_time = rdfvalue.RDFDatetime.Now()
while True:
if req_func.call_count == 1:
break
time.sleep(0.1)
if rdfvalue.RDFDatetime.Now() - cur_time > rdfvalue.Duration.From(
10, rdfvalue.SECONDS):
self.fail("Flow request was not processed in time.")
req_func.reset_mock()
# Write responses again. No further processing of these should be triggered.
self.db.WriteFlowResponses(responses)
# Testing for a callback *not being* called is not entirely trivial. Waiting
# for 1 (or 5, or 10) seconds is not acceptable (too slow). An approach used
# here: we need to trigger certain event - a kind of a checkpoint that is
# guaranteed to be triggered after the callback is called or is not called.
#
# Explicitly write marked_responses to trigger the flow processing handler.
# After flow processing handler is triggered for marked_responses, we can
# check that it wasn't triggered for responses.
self.db.WriteFlowResponses(marked_responses)
cur_time = rdfvalue.RDFDatetime.Now()
marked_found = False
while True:
requests = []
for call in req_func.call_args_list:
requests.extend(call[0])
if any(r.flow_id == marked_flow_id for r in requests):
# Poll-based implementations (i.e. MySQL) give no guarantess
# with regards to the order in which requests are going to be processed.
# In such implementations when 2 requests are retrieved from the DB,
# they're both going to be processed concurrently in parallel threads.
# For such implementations we allow for additional 0.1 seconds to pass
# after the marked flow processing request is processed to allow for
# possible parallel processing to finish.
if marked_found:
self.assertEmpty([r for r in requests if r.flow_id != marked_flow_id])
break
else:
marked_found = True
time.sleep(0.1)
if rdfvalue.RDFDatetime.Now() - cur_time > rdfvalue.Duration.From(
10, rdfvalue.SECONDS):
self.fail("Flow request was not processed in time.")
def testResponsesAnyRequestTriggerClientActionRequestDeletion(self):
# Write a flow that is waiting for request #2.
client_id, flow_id = self._SetupClientAndFlow(next_request_to_process=2)
for i in range(5):
self.db.WriteFlowRequests([
rdf_flow_objects.FlowRequest(
client_id=client_id, flow_id=flow_id, request_id=i)
])
req = rdf_flows.ClientActionRequest(
client_id=client_id, flow_id=flow_id, request_id=3)
self.db.WriteClientActionRequests([req])
self.assertTrue(self.db.ReadAllClientActionRequests(client_id))
self._WriteCompleteResponses(
client_id, flow_id, request_id=3, num_responses=3)
self.assertFalse(self.db.ReadAllClientActionRequests(client_id))
def _WriteResponses(self, num):
client_id, flow_id = self._SetupClientAndFlow(next_request_to_process=2)
request = rdf_flow_objects.FlowRequest(
client_id=client_id, flow_id=flow_id, request_id=2)
self.db.WriteFlowRequests([request])
# Generate responses together with a status message.
responses = self._ResponsesAndStatus(client_id, flow_id, 2, num)
# Write responses. This should trigger flow request processing.
self.db.WriteFlowResponses(responses)
return request, responses
def test40001RequestsCanBeWrittenAndRead(self):
client_id, flow_id = self._SetupClientAndFlow(next_request_to_process=2)
requests = [
rdf_flow_objects.FlowRequest(
client_id=client_id, flow_id=flow_id, request_id=i)
for i in range(40001)
]
self.db.WriteFlowRequests(requests)
self.assertLen(
self.db.ReadAllFlowRequestsAndResponses(client_id, flow_id), 40001)
def test40001ResponsesCanBeWrittenAndRead(self):
request, responses = self._WriteResponses(40001)
expected_request = rdf_flow_objects.FlowRequest(
client_id=request.client_id,
flow_id=request.flow_id,
request_id=request.request_id,
needs_processing=True,
nr_responses_expected=40002)
rrp = self.db.ReadFlowRequestsReadyForProcessing(
request.client_id,
request.flow_id,
next_needed_request=request.request_id)
self.assertLen(rrp, 1)
fetched_request, fetched_responses = rrp[request.request_id]
self.assertEqual(fetched_request, expected_request)
self.assertEqual(fetched_responses, responses)
arrp = self.db.ReadAllFlowRequestsAndResponses(request.client_id,
request.flow_id)
self.assertLen(arrp, 1)
fetched_request, fetched_responses = arrp[0]
self.assertEqual(fetched_request, expected_request)
self.assertEqual([r for _, r in sorted(fetched_responses.items())],
responses)
def testDeleteAllFlowRequestsAndResponsesHandles11000Responses(self):
request, _ = self._WriteResponses(11000)
self.db.DeleteAllFlowRequestsAndResponses(request.client_id,
request.flow_id)
arrp = self.db.ReadAllFlowRequestsAndResponses(request.client_id,
request.flow_id)
self.assertEmpty(arrp)
def testDeleteFlowRequestsHandles11000Responses(self):
request, _ = self._WriteResponses(11000)
self.db.DeleteFlowRequests([request])
arrp = self.db.ReadAllFlowRequestsAndResponses(request.client_id,
request.flow_id)
self.assertEmpty(arrp)
def testDeleteFlowRequestsHandles11000Requests(self):
client_id, flow_id = self._SetupClientAndFlow(next_request_to_process=2)
requests = [
rdf_flow_objects.FlowRequest(
client_id=client_id, flow_id=flow_id, request_id=i)
for i in range(2, 11002)
]
self.db.WriteFlowRequests(requests)
self.assertLen(
self.db.ReadAllFlowRequestsAndResponses(client_id, flow_id), 11000)
self.db.DeleteFlowRequests(requests)
self.assertEmpty(
self.db.ReadAllFlowRequestsAndResponses(client_id, flow_id))
def testLeaseFlowForProcessingRaisesIfParentHuntIsStoppedOrCompleted(self):
hunt_obj = rdf_hunt_objects.Hunt()
self.db.WriteHuntObject(hunt_obj)
self.db.UpdateHuntObject(
hunt_obj.hunt_id, hunt_state=rdf_hunt_objects.Hunt.HuntState.STOPPED)
client_id, flow_id = self._SetupClientAndFlow(
parent_hunt_id=hunt_obj.hunt_id)
processing_time = rdfvalue.Duration.From(60, rdfvalue.SECONDS)
with self.assertRaises(db.ParentHuntIsNotRunningError):
self.db.LeaseFlowForProcessing(client_id, flow_id, processing_time)
self.db.UpdateHuntObject(
hunt_obj.hunt_id, hunt_state=rdf_hunt_objects.Hunt.HuntState.COMPLETED)
with self.assertRaises(db.ParentHuntIsNotRunningError):
self.db.LeaseFlowForProcessing(client_id, flow_id, processing_time)
self.db.UpdateHuntObject(
hunt_obj.hunt_id, hunt_state=rdf_hunt_objects.Hunt.HuntState.STARTED)
# Should work again.
self.db.LeaseFlowForProcessing(client_id, flow_id, processing_time)
def testLeaseFlowForProcessingThatIsAlreadyBeingProcessed(self):
client_id, flow_id = self._SetupClientAndFlow()
processing_time = rdfvalue.Duration.From(60, rdfvalue.SECONDS)
flow_for_processing = self.db.LeaseFlowForProcessing(
client_id, flow_id, processing_time)
# Already marked as being processed.
with self.assertRaises(ValueError):
self.db.LeaseFlowForProcessing(client_id, flow_id, processing_time)
self.db.ReleaseProcessedFlow(flow_for_processing)
# Should work again.
self.db.LeaseFlowForProcessing(client_id, flow_id, processing_time)
def testLeaseFlowForProcessingAfterProcessingTimeExpiration(self):
client_id, flow_id = self._SetupClientAndFlow()
processing_time = rdfvalue.Duration.From(60, rdfvalue.SECONDS)
now = rdfvalue.RDFDatetime.Now()
with test_lib.FakeTime(now):
self.db.LeaseFlowForProcessing(client_id, flow_id, processing_time)
# Already marked as being processed.
with self.assertRaises(ValueError):
self.db.LeaseFlowForProcessing(client_id, flow_id, processing_time)
after_deadline = now + processing_time + rdfvalue.Duration.From(
1, rdfvalue.SECONDS)
with test_lib.FakeTime(after_deadline):
# Should work again.
self.db.LeaseFlowForProcessing(client_id, flow_id, processing_time)
def testLeaseFlowForProcessingUpdatesHuntCounters(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
hunt_id = hunt_obj.hunt_id
processing_time = rdfvalue.Duration.From(60, rdfvalue.SECONDS)
client_id, flow_id = self._SetupClientAndFlow(parent_hunt_id=hunt_id)
flow_for_processing = self.db.LeaseFlowForProcessing(
client_id, flow_id, processing_time)
flow_for_processing.num_replies_sent = 10
sample_results = [
rdf_flow_objects.FlowResult(
client_id=client_id,
flow_id=flow_id,
hunt_id=hunt_id,
payload=rdf_client.ClientSummary(client_id=client_id))
] * 10
self._WriteFlowResults(sample_results)
self.assertTrue(self.db.ReleaseProcessedFlow(flow_for_processing))
counters = self.db.ReadHuntCounters(hunt_id)
self.assertEqual(counters.num_clients_with_results, 1)
self.assertEqual(counters.num_results, 10)
def testLeaseFlowForProcessingUpdatesFlowObjects(self):
client_id, flow_id = self._SetupClientAndFlow()
now = rdfvalue.RDFDatetime.Now()
processing_time = rdfvalue.Duration.From(60, rdfvalue.SECONDS)
processing_deadline = now + processing_time
with test_lib.FakeTime(now):
flow_for_processing = self.db.LeaseFlowForProcessing(
client_id, flow_id, processing_time)
self.assertEqual(flow_for_processing.processing_on, utils.ProcessIdString())
self.assertEqual(flow_for_processing.processing_since, now)
self.assertEqual(flow_for_processing.processing_deadline,
processing_deadline)
read_flow = self.db.ReadFlowObject(client_id, flow_id)
self.assertEqual(read_flow.processing_on, utils.ProcessIdString())
self.assertEqual(read_flow.processing_since, now)
self.assertEqual(read_flow.processing_deadline, processing_deadline)
self.assertEqual(read_flow.num_replies_sent, 0)
flow_for_processing.next_request_to_process = 5
flow_for_processing.num_replies_sent = 10
self.assertTrue(self.db.ReleaseProcessedFlow(flow_for_processing))
# Check that returning the flow doesn't change the flow object.
self.assertEqual(read_flow.processing_on, utils.ProcessIdString())
self.assertEqual(read_flow.processing_since, now)
self.assertEqual(read_flow.processing_deadline, processing_deadline)
read_flow = self.db.ReadFlowObject(client_id, flow_id)
self.assertFalse(read_flow.processing_on)
self.assertIsNone(read_flow.processing_since)
self.assertIsNone(read_flow.processing_deadline)
self.assertEqual(read_flow.next_request_to_process, 5)
self.assertEqual(read_flow.num_replies_sent, 10)
def testFlowLastUpateTime(self):
processing_time = rdfvalue.Duration.From(60, rdfvalue.SECONDS)
t0 = rdfvalue.RDFDatetime.Now()
client_id, flow_id = self._SetupClientAndFlow()
t1 = rdfvalue.RDFDatetime.Now()
read_flow = self.db.ReadFlowObject(client_id, flow_id)
self.assertBetween(read_flow.last_update_time, t0, t1)
flow_for_processing = self.db.LeaseFlowForProcessing(
client_id, flow_id, processing_time)
self.assertBetween(flow_for_processing.last_update_time, t0, t1)
t2 = rdfvalue.RDFDatetime.Now()
self.db.ReleaseProcessedFlow(flow_for_processing)
t3 = rdfvalue.RDFDatetime.Now()
read_flow = self.db.ReadFlowObject(client_id, flow_id)
self.assertBetween(read_flow.last_update_time, t2, t3)
def testReleaseProcessedFlow(self):
client_id, flow_id = self._SetupClientAndFlow(next_request_to_process=1)
processing_time = rdfvalue.Duration.From(60, rdfvalue.SECONDS)
processed_flow = self.db.LeaseFlowForProcessing(client_id, flow_id,
processing_time)
# Let's say we processed one request on this flow.
processed_flow.next_request_to_process = 2
# There are some requests ready for processing but not #2.
self.db.WriteFlowRequests([
rdf_flow_objects.FlowRequest(
client_id=client_id,
flow_id=flow_id,
request_id=1,
needs_processing=True),
rdf_flow_objects.FlowRequest(
client_id=client_id,
flow_id=flow_id,
request_id=4,
needs_processing=True)
])
self.assertTrue(self.db.ReleaseProcessedFlow(processed_flow))
processed_flow = self.db.LeaseFlowForProcessing(client_id, flow_id,
processing_time)
# And another one.
processed_flow.next_request_to_process = 3
# But in the meantime, request 3 is ready for processing.
self.db.WriteFlowRequests([
rdf_flow_objects.FlowRequest(
client_id=client_id,
flow_id=flow_id,
request_id=3,
needs_processing=True)
])
self.assertFalse(self.db.ReleaseProcessedFlow(processed_flow))
def testReadChildFlows(self):
client_id = u"C.1234567890123456"
self.db.WriteClientMetadata(client_id, fleetspeak_enabled=False)
self.db.WriteFlowObject(
rdf_flow_objects.Flow(
flow_id=u"00000001",
client_id=client_id,
create_time=rdfvalue.RDFDatetime.Now()))
self.db.WriteFlowObject(
rdf_flow_objects.Flow(
flow_id=u"00000002",
client_id=client_id,
parent_flow_id=u"00000001",
create_time=rdfvalue.RDFDatetime.Now()))
self.db.WriteFlowObject(
rdf_flow_objects.Flow(
flow_id=u"00000003",
client_id=client_id,
parent_flow_id=u"00000002",
create_time=rdfvalue.RDFDatetime.Now()))
self.db.WriteFlowObject(
rdf_flow_objects.Flow(
flow_id=u"00000004",
client_id=client_id,
parent_flow_id=u"00000001",
create_time=rdfvalue.RDFDatetime.Now()))
# This one is completely unrelated (different client id).
self.db.WriteClientMetadata(u"C.1234567890123457", fleetspeak_enabled=False)
self.db.WriteFlowObject(
rdf_flow_objects.Flow(
flow_id=u"00000002",
client_id=u"C.1234567890123457",
parent_flow_id=u"00000001",
create_time=rdfvalue.RDFDatetime.Now()))
children = self.db.ReadChildFlowObjects(client_id, u"00000001")
self.assertLen(children, 2)
for c in children:
self.assertEqual(c.parent_flow_id, u"00000001")
children = self.db.ReadChildFlowObjects(client_id, u"00000002")
self.assertLen(children, 1)
self.assertEqual(children[0].parent_flow_id, u"00000002")
self.assertEqual(children[0].flow_id, u"00000003")
children = self.db.ReadChildFlowObjects(client_id, u"00000003")
self.assertEmpty(children)
def _WriteRequestAndResponses(self, client_id, flow_id):
rdf_flow = rdf_flow_objects.Flow(
client_id=client_id,
flow_id=flow_id,
create_time=rdfvalue.RDFDatetime.Now())
self.db.WriteFlowObject(rdf_flow)
for request_id in range(1, 4):
request = rdf_flow_objects.FlowRequest(
client_id=client_id, flow_id=flow_id, request_id=request_id)
self.db.WriteFlowRequests([request])
for response_id in range(1, 3):
response = rdf_flow_objects.FlowResponse(
client_id=client_id,
flow_id=flow_id,
request_id=request_id,
response_id=response_id)
self.db.WriteFlowResponses([response])
def _CheckRequestsAndResponsesAreThere(self, client_id, flow_id):
all_requests = self.db.ReadAllFlowRequestsAndResponses(client_id, flow_id)
self.assertLen(all_requests, 3)
for _, responses in all_requests:
self.assertLen(responses, 2)
def testDeleteAllFlowRequestsAndResponses(self):
client_id1 = u"C.1234567890123456"
client_id2 = u"C.1234567890123457"
flow_id1 = u"1234ABCD"
flow_id2 = u"1234ABCE"
self.db.WriteClientMetadata(client_id1, fleetspeak_enabled=True)
self.db.WriteClientMetadata(client_id2, fleetspeak_enabled=True)
self._WriteRequestAndResponses(client_id1, flow_id1)
self._WriteRequestAndResponses(client_id1, flow_id2)
self._WriteRequestAndResponses(client_id2, flow_id1)
self._WriteRequestAndResponses(client_id2, flow_id2)
self._CheckRequestsAndResponsesAreThere(client_id1, flow_id1)
self._CheckRequestsAndResponsesAreThere(client_id1, flow_id2)
self._CheckRequestsAndResponsesAreThere(client_id2, flow_id1)
self._CheckRequestsAndResponsesAreThere(client_id2, flow_id2)
self.db.DeleteAllFlowRequestsAndResponses(client_id1, flow_id2)
self._CheckRequestsAndResponsesAreThere(client_id1, flow_id1)
self._CheckRequestsAndResponsesAreThere(client_id2, flow_id1)
self._CheckRequestsAndResponsesAreThere(client_id2, flow_id2)
all_requests = self.db.ReadAllFlowRequestsAndResponses(client_id1, flow_id2)
self.assertEqual(all_requests, [])
def testDeleteAllFlowRequestsAndResponsesWithClientRequests(self):
client_id = u"C.1234567890123456"
flow_id = u"1234ABCD"
self.db.WriteClientMetadata(client_id, fleetspeak_enabled=True)
self._WriteRequestAndResponses(client_id, flow_id)
req = rdf_flows.ClientActionRequest(
client_id=client_id, flow_id=flow_id, request_id=1)
self.db.WriteClientActionRequests([req])
self._CheckRequestsAndResponsesAreThere(client_id, flow_id)
self.db.DeleteAllFlowRequestsAndResponses(client_id, flow_id)
self.assertEmpty(
self.db.ReadAllFlowRequestsAndResponses(client_id, flow_id))
def testReadFlowRequestsReadyForProcessing(self):
client_id = u"C.1234567890000000"
flow_id = u"12344321"
requests_for_processing = self.db.ReadFlowRequestsReadyForProcessing(
client_id, flow_id, next_needed_request=1)
self.assertEqual(requests_for_processing, {})
client_id, flow_id = self._SetupClientAndFlow(next_request_to_process=3)
for request_id in [1, 3, 4, 5, 7]:
request = rdf_flow_objects.FlowRequest(
client_id=client_id,
flow_id=flow_id,
request_id=request_id,
needs_processing=True)
self.db.WriteFlowRequests([request])
# Request 4 has some responses.
responses = [
rdf_flow_objects.FlowResponse(
client_id=client_id, flow_id=flow_id, request_id=4, response_id=i)
for i in range(3)
]
self.db.WriteFlowResponses(responses)
requests_for_processing = self.db.ReadFlowRequestsReadyForProcessing(
client_id, flow_id, next_needed_request=3)
# We expect three requests here. Req #1 is old and should not be there, req
# #7 can't be processed since we are missing #6 in between. That leaves
# requests #3, #4 and #5.
self.assertLen(requests_for_processing, 3)
self.assertEqual(list(requests_for_processing), [3, 4, 5])
for request_id in requests_for_processing:
request, _ = requests_for_processing[request_id]
self.assertEqual(request_id, request.request_id)
self.assertEqual(requests_for_processing[4][1], responses)
def testFlowProcessingRequestsQueue(self):
flow_ids = []
for _ in range(5):
client_id, flow_id = self._SetupClientAndFlow()
flow_ids.append(flow_id)
request_queue = queue.Queue()
def Callback(request):
self.db.AckFlowProcessingRequests([request])
request_queue.put(request)
self.db.RegisterFlowProcessingHandler(Callback)
self.addCleanup(self.db.UnregisterFlowProcessingHandler)
requests = []
for flow_id in flow_ids:
requests.append(
rdf_flows.FlowProcessingRequest(client_id=client_id, flow_id=flow_id))
self.db.WriteFlowProcessingRequests(requests)
got = []
while len(got) < 5:
try:
l = request_queue.get(True, timeout=6)
except queue.Empty:
self.fail("Timed out waiting for messages, expected 5, got %d" %
len(got))
got.append(l)
self.assertCountEqual(requests, got)
def testFlowProcessingRequestsQueueWithDelay(self):
flow_ids = []
for _ in range(5):
client_id, flow_id = self._SetupClientAndFlow()
flow_ids.append(flow_id)
request_queue = queue.Queue()
def Callback(request):
self.db.AckFlowProcessingRequests([request])
request_queue.put(request)
self.db.RegisterFlowProcessingHandler(Callback)
self.addCleanup(self.db.UnregisterFlowProcessingHandler)
now = rdfvalue.RDFDatetime.Now()
delivery_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(
now.AsSecondsSinceEpoch() + 0.5)
requests = []
for flow_id in flow_ids:
requests.append(
rdf_flows.FlowProcessingRequest(
client_id=client_id, flow_id=flow_id,
delivery_time=delivery_time))
self.db.WriteFlowProcessingRequests(requests)
got = []
while len(got) < 5:
try:
l = request_queue.get(True, timeout=6)
except queue.Empty:
self.fail("Timed out waiting for messages, expected 5, got %d" %
len(got))
got.append(l)
self.assertGreater(rdfvalue.RDFDatetime.Now(), l.delivery_time)
self.assertCountEqual(requests, got)
leftover = self.db.ReadFlowProcessingRequests()
self.assertEqual(leftover, [])
def testAcknowledgingFlowProcessingRequestsWorks(self):
flow_ids = []
for _ in range(5):
client_id, flow_id = self._SetupClientAndFlow()
flow_ids.append(flow_id)
flow_ids.sort()
now = rdfvalue.RDFDatetime.Now()
delivery_time = now + rdfvalue.Duration.From(10, rdfvalue.MINUTES)
requests = []
for flow_id in flow_ids:
requests.append(
rdf_flows.FlowProcessingRequest(
client_id=client_id, flow_id=flow_id,
delivery_time=delivery_time))
self.db.WriteFlowProcessingRequests(requests)
# We stored 5 FlowProcessingRequests, read them back and check they are all
# there.
stored_requests = self.db.ReadFlowProcessingRequests()
stored_requests.sort(key=lambda r: r.flow_id)
self.assertLen(stored_requests, 5)
self.assertCountEqual([r.flow_id for r in stored_requests], flow_ids)
# Now we ack requests 1 and 2. There should be three remaining in the db.
self.db.AckFlowProcessingRequests(stored_requests[1:3])
stored_requests = self.db.ReadFlowProcessingRequests()
self.assertLen(stored_requests, 3)
self.assertCountEqual([r.flow_id for r in stored_requests],
[flow_ids[0], flow_ids[3], flow_ids[4]])
# Make sure DeleteAllFlowProcessingRequests removes all requests.
self.db.DeleteAllFlowProcessingRequests()
self.assertEqual(self.db.ReadFlowProcessingRequests(), [])
self.db.UnregisterFlowProcessingHandler()
def _SampleResults(self, client_id, flow_id, hunt_id=None):
sample_results = []
for i in range(10):
sample_results.append(
rdf_flow_objects.FlowResult(
client_id=client_id,
flow_id=flow_id,
hunt_id=hunt_id,
tag="tag_%d" % i,
payload=rdf_client.ClientSummary(
client_id=client_id,
system_manufacturer="manufacturer_%d" % i,
install_date=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(10 +
i))))
return sample_results
def _WriteFlowResults(self, sample_results=None, multiple_timestamps=False):
if multiple_timestamps:
for r in sample_results:
self.db.WriteFlowResults([r])
else:
# Use random.shuffle to make sure we don't care about the order of
# results here, as they all have the same timestamp.
random.shuffle(sample_results)
self.db.WriteFlowResults(sample_results)
return sample_results
def testWritesAndCounts40001FlowResults(self):
client_id, flow_id = self._SetupClientAndFlow()
sample_results = [
rdf_flow_objects.FlowResult(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client.ClientSummary(client_id=client_id))
] * 40001
self.db.WriteFlowResults(sample_results)
result_count = self.db.CountFlowResults(client_id, flow_id)
self.assertEqual(result_count, 40001)
def testWritesAndReadsSingleFlowResultOfSingleType(self):
client_id, flow_id = self._SetupClientAndFlow()
sample_result = rdf_flow_objects.FlowResult(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client.ClientSummary(client_id=client_id))
with test_lib.FakeTime(42):
self.db.WriteFlowResults([sample_result])
results = self.db.ReadFlowResults(client_id, flow_id, 0, 100)
self.assertLen(results, 1)
self.assertEqual(results[0].payload, sample_result.payload)
self.assertEqual(results[0].timestamp.AsSecondsSinceEpoch(), 42)
def testWritesAndReadsMultipleFlowResultsOfSingleType(self):
client_id, flow_id = self._SetupClientAndFlow()
sample_results = self._WriteFlowResults(
self._SampleResults(client_id, flow_id))
results = self.db.ReadFlowResults(client_id, flow_id, 0, 100)
self.assertLen(results, len(sample_results))
# All results were written with the same timestamp (as they were written
# via a single WriteFlowResults call), so no assumptions about
# the order are made.
self.assertCountEqual(
[i.payload for i in results],
[i.payload for i in sample_results],
)
def testWritesAndReadsMultipleFlowResultsWithDifferentTimestamps(self):
client_id, flow_id = self._SetupClientAndFlow()
sample_results = self._WriteFlowResults(
self._SampleResults(client_id, flow_id), multiple_timestamps=True)
results = self.db.ReadFlowResults(client_id, flow_id, 0, 100)
self.assertLen(results, len(sample_results))
# Returned results have to be sorted by the timestamp in the ascending
# order.
self.assertEqual(
[i.payload for i in results],
[i.payload for i in sample_results],
)
def testWritesAndReadsMultipleFlowResultsOfMultipleTypes(self):
client_id, flow_id = self._SetupClientAndFlow()
sample_results = self._WriteFlowResults(sample_results=[
rdf_flow_objects.FlowResult(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client.ClientSummary(
client_id=client_id,
install_date=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(10 +
i)))
for i in range(10)
])
sample_results.extend(
self._WriteFlowResults(sample_results=[
rdf_flow_objects.FlowResult(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client.ClientCrash(
client_id=client_id,
timestamp=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(10 +
i)))
for i in range(10)
]))
sample_results.extend(
self._WriteFlowResults(sample_results=[
rdf_flow_objects.FlowResult(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client.ClientInformation(client_version=i))
for i in range(10)
]))
results = self.db.ReadFlowResults(client_id, flow_id, 0, 100)
self.assertLen(results, len(sample_results))
self.assertCountEqual(
[i.payload for i in results],
[i.payload for i in sample_results],
)
def testReadFlowResultsCorrectlyAppliesOffsetAndCountFilters(self):
client_id, flow_id = self._SetupClientAndFlow()
sample_results = self._WriteFlowResults(
self._SampleResults(client_id, flow_id), multiple_timestamps=True)
for l in range(1, 11):
for i in range(10):
results = self.db.ReadFlowResults(client_id, flow_id, i, l)
expected = sample_results[i:i + l]
result_payloads = [x.payload for x in results]
expected_payloads = [x.payload for x in expected]
self.assertEqual(
result_payloads, expected_payloads,
"Results differ from expected (from %d, size %d): %s vs %s" %
(i, l, result_payloads, expected_payloads))
def testReadFlowResultsCorrectlyAppliesWithTagFilter(self):
client_id, flow_id = self._SetupClientAndFlow()
sample_results = self._WriteFlowResults(
self._SampleResults(client_id, flow_id), multiple_timestamps=True)
results = self.db.ReadFlowResults(
client_id, flow_id, 0, 100, with_tag="blah")
self.assertFalse(results)
results = self.db.ReadFlowResults(
client_id, flow_id, 0, 100, with_tag="tag")
self.assertFalse(results)
results = self.db.ReadFlowResults(
client_id, flow_id, 0, 100, with_tag="tag_1")
self.assertEqual([i.payload for i in results], [sample_results[1].payload])
def testReadFlowResultsCorrectlyAppliesWithTypeFilter(self):
client_id, flow_id = self._SetupClientAndFlow()
sample_results = self._WriteFlowResults(sample_results=[
rdf_flow_objects.FlowResult(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client.ClientSummary(
client_id=client_id,
install_date=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(10 +
i)))
for i in range(10)
])
sample_results.extend(
self._WriteFlowResults(sample_results=[
rdf_flow_objects.FlowResult(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client.ClientCrash(
client_id=client_id,
timestamp=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(10 +
i)))
for i in range(10)
]))
results = self.db.ReadFlowResults(
client_id,
flow_id,
0,
100,
with_type=compatibility.GetName(rdf_client.ClientInformation))
self.assertFalse(results)
results = self.db.ReadFlowResults(
client_id,
flow_id,
0,
100,
with_type=compatibility.GetName(rdf_client.ClientSummary))
self.assertCountEqual(
[i.payload for i in results],
[i.payload for i in sample_results[:10]],
)
def testReadFlowResultsCorrectlyAppliesWithSubstringFilter(self):
client_id, flow_id = self._SetupClientAndFlow()
sample_results = self._WriteFlowResults(
self._SampleResults(client_id, flow_id), multiple_timestamps=True)
results = self.db.ReadFlowResults(
client_id, flow_id, 0, 100, with_substring="blah")
self.assertFalse(results)
results = self.db.ReadFlowResults(
client_id, flow_id, 0, 100, with_substring="manufacturer")
self.assertEqual(
[i.payload for i in results],
[i.payload for i in sample_results],
)
results = self.db.ReadFlowResults(
client_id, flow_id, 0, 100, with_substring="manufacturer_1")
self.assertEqual([i.payload for i in results], [sample_results[1].payload])
def testReadFlowResultsCorrectlyAppliesVariousCombinationsOfFilters(self):
client_id, flow_id = self._SetupClientAndFlow()
sample_results = self._WriteFlowResults(
self._SampleResults(client_id, flow_id), multiple_timestamps=True)
tags = {None: list(sample_results), "tag_1": [sample_results[1]]}
substrings = {
None: list(sample_results),
"manufacturer": list(sample_results),
"manufacturer_1": [sample_results[1]]
}
types = {
None: list(sample_results),
compatibility.GetName(rdf_client.ClientSummary): list(sample_results),
}
for tag_value, tag_expected in tags.items():
for substring_value, substring_expected in substrings.items():
for type_value, type_expected in types.items():
expected = [
r for r in tag_expected
if r in substring_expected and r in type_expected
]
results = self.db.ReadFlowResults(
client_id,
flow_id,
0,
100,
with_tag=tag_value,
with_type=type_value,
with_substring=substring_value)
self.assertCountEqual(
[i.payload for i in expected], [i.payload for i in results],
"Result items do not match for "
"(tag=%s, type=%s, substring=%s): %s vs %s" %
(tag_value, type_value, substring_value, expected, results))
def testReadFlowResultsReturnsPayloadWithMissingTypeAsSpecialValue(self):
client_id, flow_id = self._SetupClientAndFlow()
sample_results = self._WriteFlowResults(
self._SampleResults(client_id, flow_id), multiple_timestamps=True)
type_name = compatibility.GetName(rdf_client.ClientSummary)
try:
cls = rdfvalue.RDFValue.classes.pop(type_name)
results = self.db.ReadFlowResults(client_id, flow_id, 0, 100)
finally:
rdfvalue.RDFValue.classes[type_name] = cls
self.assertLen(sample_results, len(results))
for r in results:
self.assertIsInstance(r.payload,
rdf_objects.SerializedValueOfUnrecognizedType)
self.assertEqual(r.payload.type_name, type_name)
def testCountFlowResultsReturnsCorrectResultsCount(self):
client_id, flow_id = self._SetupClientAndFlow()
sample_results = self._WriteFlowResults(
self._SampleResults(client_id, flow_id), multiple_timestamps=True)
num_results = self.db.CountFlowResults(client_id, flow_id)
self.assertEqual(num_results, len(sample_results))
def testCountFlowResultsCorrectlyAppliesWithTagFilter(self):
client_id, flow_id = self._SetupClientAndFlow()
self._WriteFlowResults(
self._SampleResults(client_id, flow_id), multiple_timestamps=True)
num_results = self.db.CountFlowResults(client_id, flow_id, with_tag="blah")
self.assertEqual(num_results, 0)
num_results = self.db.CountFlowResults(client_id, flow_id, with_tag="tag_1")
self.assertEqual(num_results, 1)
def testCountFlowResultsCorrectlyAppliesWithTypeFilter(self):
client_id, flow_id = self._SetupClientAndFlow()
self._WriteFlowResults(sample_results=[
rdf_flow_objects.FlowResult(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client.ClientSummary(client_id=client_id))
for _ in range(10)
])
self._WriteFlowResults(sample_results=[
rdf_flow_objects.FlowResult(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client.ClientCrash(client_id=client_id))
for _ in range(10)
])
num_results = self.db.CountFlowResults(
client_id,
flow_id,
with_type=compatibility.GetName(rdf_client.ClientInformation))
self.assertEqual(num_results, 0)
num_results = self.db.CountFlowResults(
client_id,
flow_id,
with_type=compatibility.GetName(rdf_client.ClientSummary))
self.assertEqual(num_results, 10)
num_results = self.db.CountFlowResults(
client_id,
flow_id,
with_type=compatibility.GetName(rdf_client.ClientCrash))
self.assertEqual(num_results, 10)
def testCountFlowResultsCorrectlyAppliesWithTagAndWithTypeFilters(self):
client_id, flow_id = self._SetupClientAndFlow()
self._WriteFlowResults(
self._SampleResults(client_id, flow_id), multiple_timestamps=True)
num_results = self.db.CountFlowResults(
client_id,
flow_id,
with_tag="tag_1",
with_type=compatibility.GetName(rdf_client.ClientSummary))
self.assertEqual(num_results, 1)
def testCountFlowResultsByTypeReturnsCorrectNumbers(self):
client_id, flow_id = self._SetupClientAndFlow()
sample_results = self._WriteFlowResults(sample_results=[
rdf_flow_objects.FlowResult(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client.ClientSummary(client_id=client_id))
] * 3)
sample_results.extend(
self._WriteFlowResults(sample_results=[
rdf_flow_objects.FlowResult(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client.ClientCrash(client_id=client_id))
] * 5))
counts_by_type = self.db.CountFlowResultsByType(client_id, flow_id)
self.assertEqual(counts_by_type, {
"ClientSummary": 3,
"ClientCrash": 5,
})
def _CreateErrors(self, client_id, flow_id, hunt_id=None):
sample_errors = []
for i in range(10):
sample_errors.append(
rdf_flow_objects.FlowError(
client_id=client_id,
flow_id=flow_id,
hunt_id=hunt_id,
tag="tag_%d" % i,
payload=rdf_client.ClientSummary(
client_id=client_id,
system_manufacturer="manufacturer_%d" % i,
install_date=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(10 +
i))))
return sample_errors
def _WriteFlowErrors(self, sample_errors=None, multiple_timestamps=False):
if multiple_timestamps:
for r in sample_errors:
self.db.WriteFlowErrors([r])
else:
# Use random.shuffle to make sure we don't care about the order of
# errors here, as they all have the same timestamp.
random.shuffle(sample_errors)
self.db.WriteFlowErrors(sample_errors)
return sample_errors
def testWritesAndCounts40001FlowErrors(self):
client_id, flow_id = self._SetupClientAndFlow()
sample_errors = [
rdf_flow_objects.FlowError(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client.ClientSummary(client_id=client_id))
] * 40001
self.db.WriteFlowErrors(sample_errors)
error_count = self.db.CountFlowErrors(client_id, flow_id)
self.assertEqual(error_count, 40001)
def testWritesAndReadsSingleFlowErrorOfSingleType(self):
client_id, flow_id = self._SetupClientAndFlow()
sample_error = rdf_flow_objects.FlowError(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client.ClientSummary(client_id=client_id))
with test_lib.FakeTime(42):
self.db.WriteFlowErrors([sample_error])
errors = self.db.ReadFlowErrors(client_id, flow_id, 0, 100)
self.assertLen(errors, 1)
self.assertEqual(errors[0].payload, sample_error.payload)
self.assertEqual(errors[0].timestamp.AsSecondsSinceEpoch(), 42)
def testWritesAndReadsMultipleFlowErrorsOfSingleType(self):
client_id, flow_id = self._SetupClientAndFlow()
sample_errors = self._WriteFlowErrors(
self._CreateErrors(client_id, flow_id))
errors = self.db.ReadFlowErrors(client_id, flow_id, 0, 100)
self.assertLen(errors, len(sample_errors))
# All errors were written with the same timestamp (as they were written
# via a single WriteFlowErrors call), so no assumptions about
# the order are made.
self.assertCountEqual(
[i.payload for i in errors],
[i.payload for i in sample_errors],
)
def testWritesAndReadsMultipleFlowErrorsWithDifferentTimestamps(self):
client_id, flow_id = self._SetupClientAndFlow()
sample_errors = self._WriteFlowErrors(
self._CreateErrors(client_id, flow_id), multiple_timestamps=True)
errors = self.db.ReadFlowErrors(client_id, flow_id, 0, 100)
self.assertLen(errors, len(sample_errors))
# Returned errors have to be sorted by the timestamp in the ascending
# order.
self.assertEqual(
[i.payload for i in errors],
[i.payload for i in sample_errors],
)
def testWritesAndReadsMultipleFlowErrorsOfMultipleTypes(self):
client_id, flow_id = self._SetupClientAndFlow()
def SampleClientSummaryError(i):
return rdf_flow_objects.FlowError(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client.ClientSummary(
client_id=client_id,
install_date=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(10 + i)))
sample_errors = self._WriteFlowErrors(
sample_errors=[SampleClientSummaryError(i) for i in range(10)])
def SampleClientCrashError(i):
return rdf_flow_objects.FlowError(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client.ClientCrash(
client_id=client_id,
timestamp=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(10 + i)))
sample_errors.extend(
self._WriteFlowErrors(
sample_errors=[SampleClientCrashError(i) for i in range(10)]))
def SampleClientInformationError(i):
return rdf_flow_objects.FlowError(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client.ClientInformation(client_version=i))
sample_errors.extend(
self._WriteFlowErrors(
sample_errors=[SampleClientInformationError(i) for i in range(10)]))
errors = self.db.ReadFlowErrors(client_id, flow_id, 0, 100)
self.assertLen(errors, len(sample_errors))
self.assertCountEqual(
[i.payload for i in errors],
[i.payload for i in sample_errors],
)
def testReadFlowErrorsCorrectlyAppliesOffsetAndCountFilters(self):
client_id, flow_id = self._SetupClientAndFlow()
sample_errors = self._WriteFlowErrors(
self._CreateErrors(client_id, flow_id), multiple_timestamps=True)
for l in range(1, 11):
for i in range(10):
errors = self.db.ReadFlowErrors(client_id, flow_id, i, l)
expected = sample_errors[i:i + l]
error_payloads = [x.payload for x in errors]
expected_payloads = [x.payload for x in expected]
self.assertEqual(
error_payloads, expected_payloads,
"Errors differ from expected (from %d, size %d): %s vs %s" %
(i, l, error_payloads, expected_payloads))
def testReadFlowErrorsCorrectlyAppliesWithTagFilter(self):
client_id, flow_id = self._SetupClientAndFlow()
sample_errors = self._WriteFlowErrors(
self._CreateErrors(client_id, flow_id), multiple_timestamps=True)
errors = self.db.ReadFlowErrors(client_id, flow_id, 0, 100, with_tag="blah")
self.assertFalse(errors)
errors = self.db.ReadFlowErrors(client_id, flow_id, 0, 100, with_tag="tag")
self.assertFalse(errors)
errors = self.db.ReadFlowErrors(
client_id, flow_id, 0, 100, with_tag="tag_1")
self.assertEqual([i.payload for i in errors], [sample_errors[1].payload])
def testReadFlowErrorsCorrectlyAppliesWithTypeFilter(self):
client_id, flow_id = self._SetupClientAndFlow()
def SampleClientSummaryError(i):
return rdf_flow_objects.FlowError(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client.ClientSummary(
client_id=client_id,
install_date=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(10 + i)))
sample_errors = self._WriteFlowErrors(
sample_errors=[SampleClientSummaryError(i) for i in range(10)])
def SampleClientCrashError(i):
return rdf_flow_objects.FlowError(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client.ClientCrash(
client_id=client_id,
timestamp=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(10 + i)))
sample_errors.extend(
self._WriteFlowErrors(
sample_errors=[SampleClientCrashError(i) for i in range(10)]))
errors = self.db.ReadFlowErrors(
client_id,
flow_id,
0,
100,
with_type=compatibility.GetName(rdf_client.ClientInformation))
self.assertFalse(errors)
errors = self.db.ReadFlowErrors(
client_id,
flow_id,
0,
100,
with_type=compatibility.GetName(rdf_client.ClientSummary))
self.assertCountEqual(
[i.payload for i in errors],
[i.payload for i in sample_errors[:10]],
)
def testReadFlowErrorsCorrectlyAppliesVariousCombinationsOfFilters(self):
client_id, flow_id = self._SetupClientAndFlow()
sample_errors = self._WriteFlowErrors(
self._CreateErrors(client_id, flow_id), multiple_timestamps=True)
tags = {None: list(sample_errors), "tag_1": [sample_errors[1]]}
types = {
None: list(sample_errors),
compatibility.GetName(rdf_client.ClientSummary): list(sample_errors),
}
for tag_value, tag_expected in tags.items():
for type_value, type_expected in types.items():
expected = [r for r in tag_expected if r in type_expected]
errors = self.db.ReadFlowErrors(
client_id,
flow_id,
0,
100,
with_tag=tag_value,
with_type=type_value)
self.assertCountEqual([i.payload for i in expected],
[i.payload for i in errors],
"Error items do not match for "
"(tag=%s, type=%s): %s vs %s" %
(tag_value, type_value, expected, errors))
def testReadFlowErrorsReturnsPayloadWithMissingTypeAsSpecialValue(self):
client_id, flow_id = self._SetupClientAndFlow()
sample_errors = self._WriteFlowErrors(
self._CreateErrors(client_id, flow_id), multiple_timestamps=True)
type_name = compatibility.GetName(rdf_client.ClientSummary)
try:
cls = rdfvalue.RDFValue.classes.pop(type_name)
errors = self.db.ReadFlowErrors(client_id, flow_id, 0, 100)
finally:
rdfvalue.RDFValue.classes[type_name] = cls
self.assertLen(sample_errors, len(errors))
for r in errors:
self.assertIsInstance(r.payload,
rdf_objects.SerializedValueOfUnrecognizedType)
self.assertEqual(r.payload.type_name, type_name)
def testCountFlowErrorsReturnsCorrectErrorsCount(self):
client_id, flow_id = self._SetupClientAndFlow()
sample_errors = self._WriteFlowErrors(
self._CreateErrors(client_id, flow_id), multiple_timestamps=True)
num_errors = self.db.CountFlowErrors(client_id, flow_id)
self.assertEqual(num_errors, len(sample_errors))
def testCountFlowErrorsCorrectlyAppliesWithTagFilter(self):
client_id, flow_id = self._SetupClientAndFlow()
self._WriteFlowErrors(
self._CreateErrors(client_id, flow_id), multiple_timestamps=True)
num_errors = self.db.CountFlowErrors(client_id, flow_id, with_tag="blah")
self.assertEqual(num_errors, 0)
num_errors = self.db.CountFlowErrors(client_id, flow_id, with_tag="tag_1")
self.assertEqual(num_errors, 1)
def testCountFlowErrorsCorrectlyAppliesWithTypeFilter(self):
client_id, flow_id = self._SetupClientAndFlow()
self._WriteFlowErrors(sample_errors=[
rdf_flow_objects.FlowError(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client.ClientSummary(client_id=client_id))
] * 10)
self._WriteFlowErrors(sample_errors=[
rdf_flow_objects.FlowError(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client.ClientCrash(client_id=client_id))
] * 10)
num_errors = self.db.CountFlowErrors(
client_id,
flow_id,
with_type=compatibility.GetName(rdf_client.ClientInformation))
self.assertEqual(num_errors, 0)
num_errors = self.db.CountFlowErrors(
client_id,
flow_id,
with_type=compatibility.GetName(rdf_client.ClientSummary))
self.assertEqual(num_errors, 10)
num_errors = self.db.CountFlowErrors(
client_id,
flow_id,
with_type=compatibility.GetName(rdf_client.ClientCrash))
self.assertEqual(num_errors, 10)
def testCountFlowErrorsCorrectlyAppliesWithTagAndWithTypeFilters(self):
client_id, flow_id = self._SetupClientAndFlow()
self._WriteFlowErrors(
self._CreateErrors(client_id, flow_id), multiple_timestamps=True)
num_errors = self.db.CountFlowErrors(
client_id,
flow_id,
with_tag="tag_1",
with_type=compatibility.GetName(rdf_client.ClientSummary))
self.assertEqual(num_errors, 1)
def testCountFlowErrorsByTypeReturnsCorrectNumbers(self):
client_id, flow_id = self._SetupClientAndFlow()
sample_errors = self._WriteFlowErrors(sample_errors=[
rdf_flow_objects.FlowError(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client.ClientSummary(client_id=client_id))
] * 3)
sample_errors.extend(
self._WriteFlowErrors(sample_errors=[
rdf_flow_objects.FlowError(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client.ClientCrash(client_id=client_id))
] * 5))
counts_by_type = self.db.CountFlowErrorsByType(client_id, flow_id)
self.assertEqual(counts_by_type, {
"ClientSummary": 3,
"ClientCrash": 5,
})
def testWritesAndReadsSingleFlowLogEntry(self):
client_id, flow_id = self._SetupClientAndFlow()
message = "blah: ٩(͡๏̯͡๏)۶"
self.db.WriteFlowLogEntries([
rdf_flow_objects.FlowLogEntry(
client_id=client_id, flow_id=flow_id, message=message)
])
entries = self.db.ReadFlowLogEntries(client_id, flow_id, 0, 100)
self.assertLen(entries, 1)
self.assertEqual(entries[0].message, message)
def _WriteFlowLogEntries(self, client_id, flow_id):
messages = ["blah_%d" % i for i in range(10)]
for message in messages:
self.db.WriteFlowLogEntries([
rdf_flow_objects.FlowLogEntry(
client_id=client_id, flow_id=flow_id, message=message)
])
return messages
def testWritesAndReadsMultipleFlowLogEntries(self):
client_id, flow_id = self._SetupClientAndFlow()
messages = self._WriteFlowLogEntries(client_id, flow_id)
entries = self.db.ReadFlowLogEntries(client_id, flow_id, 0, 100)
self.assertEqual([e.message for e in entries], messages)
def testReadFlowLogEntriesCorrectlyAppliesOffsetAndCountFilters(self):
client_id, flow_id = self._SetupClientAndFlow()
messages = self._WriteFlowLogEntries(client_id, flow_id)
for i in range(10):
for size in range(1, 10):
entries = self.db.ReadFlowLogEntries(client_id, flow_id, i, size)
self.assertEqual([e.message for e in entries], messages[i:i + size])
def testReadFlowLogEntriesCorrectlyAppliesWithSubstringFilter(self):
client_id, flow_id = self._SetupClientAndFlow()
messages = self._WriteFlowLogEntries(client_id, flow_id)
entries = self.db.ReadFlowLogEntries(
client_id, flow_id, 0, 100, with_substring="foobar")
self.assertFalse(entries)
entries = self.db.ReadFlowLogEntries(
client_id, flow_id, 0, 100, with_substring="blah")
self.assertEqual([e.message for e in entries], messages)
entries = self.db.ReadFlowLogEntries(
client_id, flow_id, 0, 100, with_substring="blah_1")
self.assertEqual([e.message for e in entries], [messages[1]])
def testReadFlowLogEntriesCorrectlyAppliesVariousCombinationsOfFilters(self):
client_id, flow_id = self._SetupClientAndFlow()
messages = self._WriteFlowLogEntries(client_id, flow_id)
entries = self.db.ReadFlowLogEntries(
client_id, flow_id, 0, 100, with_substring="foobar")
self.assertFalse(entries)
entries = self.db.ReadFlowLogEntries(
client_id, flow_id, 1, 2, with_substring="blah")
self.assertEqual([e.message for e in entries], [messages[1], messages[2]])
entries = self.db.ReadFlowLogEntries(
client_id, flow_id, 0, 1, with_substring="blah_1")
self.assertEqual([e.message for e in entries], [messages[1]])
def testCountFlowLogEntriesReturnsCorrectFlowLogEntriesCount(self):
client_id, flow_id = self._SetupClientAndFlow()
messages = self._WriteFlowLogEntries(client_id, flow_id)
num_entries = self.db.CountFlowLogEntries(client_id, flow_id)
self.assertEqual(num_entries, len(messages))
def testFlowLogsAndErrorsForUnknownFlowsRaise(self):
client_id = u"C.1234567890123456"
flow_id = flow.RandomFlowId()
self.db.WriteClientMetadata(client_id, fleetspeak_enabled=False)
with self.assertRaises(db.AtLeastOneUnknownFlowError):
self.db.WriteFlowLogEntries([
rdf_flow_objects.FlowLogEntry(
client_id=client_id, flow_id=flow_id, message="test")
])
def _WriteFlowOutputPluginLogEntries(self, client_id, flow_id,
output_plugin_id):
entries = []
for i in range(10):
message = "blah_🚀_%d" % i
enum = rdf_flow_objects.FlowOutputPluginLogEntry.LogEntryType
if i % 3 == 0:
log_entry_type = enum.ERROR
else:
log_entry_type = enum.LOG
entry = rdf_flow_objects.FlowOutputPluginLogEntry(
client_id=client_id,
flow_id=flow_id,
output_plugin_id=output_plugin_id,
message=message,
log_entry_type=log_entry_type)
entries.append(entry)
self.db.WriteFlowOutputPluginLogEntries([entry])
return entries
def testFlowOutputPluginLogEntriesCanBeWrittenAndThenRead(self):
client_id, flow_id = self._SetupClientAndFlow()
output_plugin_id = "1"
written_entries = self._WriteFlowOutputPluginLogEntries(
client_id, flow_id, output_plugin_id)
read_entries = self.db.ReadFlowOutputPluginLogEntries(
client_id, flow_id, output_plugin_id, 0, 100)
self.assertLen(written_entries, len(read_entries))
self.assertEqual([e.message for e in written_entries],
[e.message for e in read_entries])
def testFlowOutputPluginLogEntryWith1MbMessageCanBeWrittenAndThenRead(self):
client_id, flow_id = self._SetupClientAndFlow()
output_plugin_id = "1"
entry = rdf_flow_objects.FlowOutputPluginLogEntry(
client_id=client_id,
flow_id=flow_id,
output_plugin_id=output_plugin_id,
message="x" * 1024 * 1024,
log_entry_type=rdf_flow_objects.FlowOutputPluginLogEntry.LogEntryType
.LOG)
self.db.WriteFlowOutputPluginLogEntries([entry])
read_entries = self.db.ReadFlowOutputPluginLogEntries(
client_id, flow_id, output_plugin_id, 0, 100)
self.assertLen(read_entries, 1)
self.assertEqual(read_entries[0].message, entry.message)
def testFlowOutputPluginLogEntriesCanBeReadWithTypeFilter(self):
client_id, flow_id = self._SetupClientAndFlow()
output_plugin_id = "1"
self._WriteFlowOutputPluginLogEntries(client_id, flow_id, output_plugin_id)
read_entries = self.db.ReadFlowOutputPluginLogEntries(
client_id,
flow_id,
output_plugin_id,
0,
100,
with_type=rdf_flow_objects.FlowOutputPluginLogEntry.LogEntryType.ERROR)
self.assertLen(read_entries, 4)
read_entries = self.db.ReadFlowOutputPluginLogEntries(
client_id,
flow_id,
output_plugin_id,
0,
100,
with_type=rdf_flow_objects.FlowOutputPluginLogEntry.LogEntryType.LOG)
self.assertLen(read_entries, 6)
def testReadFlowOutputPluginLogEntriesCorrectlyAppliesOffsetCounter(self):
client_id, flow_id = self._SetupClientAndFlow()
output_plugin_id = "1"
entries = self._WriteFlowOutputPluginLogEntries(client_id, flow_id,
output_plugin_id)
for l in range(1, 11):
for i in range(10):
results = self.db.ReadFlowOutputPluginLogEntries(
client_id, flow_id, output_plugin_id, i, l)
expected = entries[i:i + l]
result_messages = [x.message for x in results]
expected_messages = [x.message for x in expected]
self.assertEqual(
result_messages, expected_messages,
"Results differ from expected (from %d, size %d): %s vs %s" %
(i, l, result_messages, expected_messages))
def testReadFlowOutputPluginLogEntriesAppliesOffsetCounterWithType(self):
client_id, flow_id = self._SetupClientAndFlow()
output_plugin_id = "1"
entries = self._WriteFlowOutputPluginLogEntries(client_id, flow_id,
output_plugin_id)
for l in range(1, 11):
for i in range(10):
for with_type in [
rdf_flow_objects.FlowOutputPluginLogEntry.LogEntryType.LOG,
rdf_flow_objects.FlowOutputPluginLogEntry.LogEntryType.ERROR
]:
results = self.db.ReadFlowOutputPluginLogEntries(
client_id, flow_id, output_plugin_id, i, l, with_type=with_type)
expected = [e for e in entries if e.log_entry_type == with_type
][i:i + l]
result_messages = [x.message for x in results]
expected_messages = [x.message for x in expected]
self.assertEqual(
result_messages, expected_messages,
"Results differ from expected (from %d, size %d): %s vs %s" %
(i, l, result_messages, expected_messages))
def testFlowOutputPluginLogEntriesCanBeCountedPerPlugin(self):
client_id, flow_id = self._SetupClientAndFlow()
output_plugin_id_1 = "1"
self._WriteFlowOutputPluginLogEntries(client_id, flow_id,
output_plugin_id_1)
output_plugin_id_2 = "2"
self._WriteFlowOutputPluginLogEntries(client_id, flow_id,
output_plugin_id_2)
self.assertEqual(
self.db.CountFlowOutputPluginLogEntries(client_id, flow_id,
output_plugin_id_1), 10)
self.assertEqual(
self.db.CountFlowOutputPluginLogEntries(client_id, flow_id,
output_plugin_id_2), 10)
def testCountFlowOutputPluginLogEntriesRespectsWithTypeFilter(self):
client_id, flow_id = self._SetupClientAndFlow()
output_plugin_id = "1"
self._WriteFlowOutputPluginLogEntries(client_id, flow_id, output_plugin_id)
self.assertEqual(
self.db.CountFlowOutputPluginLogEntries(
client_id,
flow_id,
output_plugin_id,
with_type=rdf_flow_objects.FlowOutputPluginLogEntry.LogEntryType.LOG
),
6,
)
self.assertEqual(
self.db.CountFlowOutputPluginLogEntries(
client_id,
flow_id,
output_plugin_id,
with_type=rdf_flow_objects.FlowOutputPluginLogEntry.LogEntryType
.ERROR),
4,
)
def _SetupScheduledFlow(self, **kwargs):
merged_kwargs = {
"scheduled_flow_id":
flow.RandomFlowId(),
"flow_name":
file.CollectSingleFile.__name__,
"flow_args":
rdf_file_finder.CollectSingleFileArgs(
max_size_bytes=random.randint(0, 10)),
"runner_args":
rdf_flow_runner.FlowRunnerArgs(
network_bytes_limit=random.randint(0, 10)),
"create_time":
rdfvalue.RDFDatetime.Now(),
**kwargs
}
sf = rdf_flow_objects.ScheduledFlow(**merged_kwargs)
self.db.WriteScheduledFlow(sf)
return sf
def testListScheduledFlowsInitiallyEmpty(self):
client_id = self._SetupClient()
username = self._SetupUser()
self.assertEmpty(self.db.ListScheduledFlows(client_id, username))
def testWriteScheduledFlowPersistsAllFields(self):
client_id = self._SetupClient()
username = self._SetupUser()
sf = self._SetupScheduledFlow(
client_id=client_id,
creator=username,
scheduled_flow_id="1234123421342134",
flow_name=file.CollectSingleFile.__name__,
flow_args=rdf_file_finder.CollectSingleFileArgs(path="/baz"),
runner_args=rdf_flow_runner.FlowRunnerArgs(network_bytes_limit=1024),
create_time=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(42),
error="foobazzle disintegrated")
results = self.db.ListScheduledFlows(client_id, username)
self.assertEqual([sf], results)
result = results[0]
self.assertEqual(result.client_id, client_id)
self.assertEqual(result.creator, username)
self.assertEqual(result.scheduled_flow_id, "1234123421342134")
self.assertEqual(result.flow_name, file.CollectSingleFile.__name__)
self.assertEqual(result.flow_args.path, "/baz")
self.assertEqual(result.runner_args.network_bytes_limit, 1024)
self.assertEqual(result.create_time,
rdfvalue.RDFDatetime.FromSecondsSinceEpoch(42))
self.assertEqual(result.error, "foobazzle disintegrated")
def testWriteMultipleScheduledFlows(self):
client_id = self._SetupClient()
username = self._SetupUser()
sf1 = self._SetupScheduledFlow(client_id=client_id, creator=username)
sf2 = self._SetupScheduledFlow(client_id=client_id, creator=username)
self.assertCountEqual([sf1, sf2],
self.db.ListScheduledFlows(client_id, username))
def testWriteScheduledFlowUpdatesExistingEntry(self):
client_id = self._SetupClient()
username = self._SetupUser()
sf = self._SetupScheduledFlow(client_id=client_id, creator=username)
sf = self._SetupScheduledFlow(
client_id=client_id,
creator=username,
scheduled_flow_id=sf.scheduled_flow_id,
error="foobar")
self.assertEqual([sf], self.db.ListScheduledFlows(client_id, username))
def testListScheduledFlowsFiltersCorrectly(self):
client_id1 = self._SetupClient("C.0000000000000001")
client_id2 = self._SetupClient("C.0000000000000002")
client_id3 = self._SetupClient("C.0000000000000003")
username1 = self._SetupUser("u1")
username2 = self._SetupUser("u2")
username3 = self._SetupUser("u3")
sf11a = self._SetupScheduledFlow(client_id=client_id1, creator=username1)
sf11b = self._SetupScheduledFlow(client_id=client_id1, creator=username1)
sf12 = self._SetupScheduledFlow(client_id=client_id1, creator=username2)
sf21 = self._SetupScheduledFlow(client_id=client_id2, creator=username1)
sf22 = self._SetupScheduledFlow(client_id=client_id2, creator=username2)
self.assertCountEqual([sf11a, sf11b],
self.db.ListScheduledFlows(client_id1, username1))
self.assertEqual([sf12], self.db.ListScheduledFlows(client_id1, username2))
self.assertEqual([sf21], self.db.ListScheduledFlows(client_id2, username1))
self.assertEqual([sf22], self.db.ListScheduledFlows(client_id2, username2))
self.assertEmpty(
self.db.ListScheduledFlows("C.1234123412341234", username1))
self.assertEmpty(self.db.ListScheduledFlows(client_id1, "nonexistent"))
self.assertEmpty(
self.db.ListScheduledFlows("C.1234123412341234", "nonexistent"))
self.assertEmpty(self.db.ListScheduledFlows(client_id3, username1))
self.assertEmpty(self.db.ListScheduledFlows(client_id1, username3))
def testWriteScheduledFlowRaisesForUnknownClient(self):
self._SetupClient()
username = self._SetupUser()
with self.assertRaises(db.UnknownClientError):
self._SetupScheduledFlow(client_id="C.1234123412341234", creator=username)
self.assertEmpty(self.db.ListScheduledFlows("C.1234123412341234", username))
def testWriteScheduledFlowRaisesForUnknownUser(self):
client_id = self._SetupClient()
self._SetupUser()
with self.assertRaises(db.UnknownGRRUserError):
self._SetupScheduledFlow(client_id=client_id, creator="nonexistent")
self.assertEmpty(self.db.ListScheduledFlows(client_id, "nonexistent"))
def testDeleteScheduledFlowRemovesScheduledFlow(self):
client_id = self._SetupClient()
username = self._SetupUser()
sf = self._SetupScheduledFlow(client_id=client_id, creator=username)
self.db.DeleteScheduledFlow(client_id, username, sf.scheduled_flow_id)
self.assertEmpty(self.db.ListScheduledFlows(client_id, username))
def testDeleteScheduledFlowDoesNotRemoveUnrelatedEntries(self):
client_id1 = self._SetupClient("C.0000000000000001")
client_id2 = self._SetupClient("C.0000000000000002")
username1 = self._SetupUser("u1")
username2 = self._SetupUser("u2")
sf111 = self._SetupScheduledFlow(client_id=client_id1, creator=username1)
sf112 = self._SetupScheduledFlow(client_id=client_id1, creator=username1)
sf211 = self._SetupScheduledFlow(
client_id=client_id2,
creator=username1,
scheduled_flow_id=sf111.scheduled_flow_id)
sf121 = self._SetupScheduledFlow(
client_id=client_id1,
creator=username2,
scheduled_flow_id=sf111.scheduled_flow_id)
self.db.DeleteScheduledFlow(client_id1, username1, sf111.scheduled_flow_id)
self.assertEqual([sf112], self.db.ListScheduledFlows(client_id1, username1))
self.assertEqual([sf211], self.db.ListScheduledFlows(client_id2, username1))
self.assertEqual([sf121], self.db.ListScheduledFlows(client_id1, username2))
def testDeleteScheduledFlowRaisesForUnknownScheduledFlow(self):
client_id = self._SetupClient()
username = self._SetupUser()
self._SetupScheduledFlow(
scheduled_flow_id="1", client_id=client_id, creator=username)
with self.assertRaises(db.UnknownScheduledFlowError) as e:
self.db.DeleteScheduledFlow(client_id, username, "2")
self.assertEqual(e.exception.client_id, client_id)
self.assertEqual(e.exception.creator, username)
self.assertEqual(e.exception.scheduled_flow_id, "2")
with self.assertRaises(db.UnknownScheduledFlowError):
self.db.DeleteScheduledFlow(client_id, "nonexistent", "1")
with self.assertRaises(db.UnknownScheduledFlowError):
self.db.DeleteScheduledFlow("C.1234123412341234", username, "1")
def testDeleteUserDeletesScheduledFlows(self):
client_id = self._SetupClient()
client_id2 = self._SetupClient()
username1 = self._SetupUser("u1")
username2 = self._SetupUser("u2")
self._SetupScheduledFlow(client_id=client_id, creator=username1)
self._SetupScheduledFlow(client_id=client_id, creator=username1)
self._SetupScheduledFlow(client_id=client_id2, creator=username1)
sf2 = self._SetupScheduledFlow(client_id=client_id, creator=username2)
self.db.DeleteGRRUser(username1)
self.assertEmpty(self.db.ListScheduledFlows(client_id, username1))
self.assertEmpty(self.db.ListScheduledFlows(client_id2, username1))
self.assertEqual([sf2], self.db.ListScheduledFlows(client_id, username2))
# This file is a test library and thus does not require a __main__ block.
```
#### File: flows/general/timeline.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from typing import Iterator
from typing import Text
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.rdfvalues import timeline as rdf_timeline
from grr_response_core.lib.util import timeline
from grr_response_proto import timeline_pb2
from grr_response_server import data_store
from grr_response_server import flow_base
from grr_response_server import flow_responses
from grr_response_server import server_stubs
from grr_response_server.rdfvalues import objects as rdf_objects
class TimelineFlow(flow_base.FlowBase):
"""A flow mixin wrapping the timeline client action."""
friendly_name = "Timeline"
category = "/Collectors/"
behaviours = flow_base.BEHAVIOUR_BASIC
args_type = rdf_timeline.TimelineArgs
def Start(self) -> None:
super(TimelineFlow, self).Start()
if not self.args.root:
raise ValueError("The timeline root directory not specified")
self.CallClient(
action_cls=server_stubs.Timeline,
request=self.args,
next_state=self.Process.__name__)
def Process(
self,
responses: flow_responses.Responses[rdf_timeline.TimelineResult],
) -> None:
if not responses.success:
raise flow_base.FlowError(responses.status)
blob_ids = []
for response in responses:
for blob_id in response.entry_batch_blob_ids:
blob_ids.append(rdf_objects.BlobID(blob_id))
data_store.BLOBS.WaitForBlobs(blob_ids, timeout=_BLOB_STORE_TIMEOUT)
for response in responses:
self.SendReply(response)
def ProtoEntries(
client_id: Text,
flow_id: Text,
) -> Iterator[timeline_pb2.TimelineEntry]:
"""Retrieves timeline entries for the specified flow.
Args:
client_id: An identifier of a client of the flow to retrieve the blobs for.
flow_id: An identifier of the flow to retrieve the blobs for.
Returns:
An iterator over timeline entries protos for the specified flow.
"""
blobs = Blobs(client_id, flow_id)
return timeline.DeserializeTimelineEntryProtoStream(blobs)
def Blobs(
client_id: Text,
flow_id: Text,
) -> Iterator[bytes]:
"""Retrieves timeline blobs for the specified flow.
Args:
client_id: An identifier of a client of the flow to retrieve the blobs for.
flow_id: An identifier of the flow to retrieve the blobs for.
Yields:
Blobs of the timeline data in the gzchunked format for the specified flow.
"""
results = data_store.REL_DB.ReadFlowResults(
client_id=client_id,
flow_id=flow_id,
offset=0,
count=_READ_FLOW_RESULTS_COUNT)
for result in results:
payload = result.payload
if not isinstance(payload, rdf_timeline.TimelineResult):
message = "Unexpected timeline result of type '{}'".format(type(payload))
raise TypeError(message)
for entry_batch_blob_id in payload.entry_batch_blob_ids:
blob_id = rdf_objects.BlobID(entry_batch_blob_id)
blob = data_store.BLOBS.ReadBlob(blob_id)
if blob is None:
message = "Reference to non-existing blob: '{}'".format(blob_id)
raise AssertionError(message)
yield blob
# Number of results should never be big, usually no more than 2 or 3 results
# per flow (because each result is just a block of references to much bigger
# blobs). Just to be on the safe side, we use a number two orders of magnitude
# bigger.
_READ_FLOW_RESULTS_COUNT = 1024
# An amount of time to wait for the blobs with timeline entries to appear in the
# blob store. This is needed, because blobs are not guaranteed to be processed
# before the flow receives results from the client. This delay should usually be
# very quick, so the timeout used here should be more than enough.
_BLOB_STORE_TIMEOUT = rdfvalue.Duration.From(30, rdfvalue.SECONDS)
```
#### File: gui/selenium_tests/hunt_create_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from absl import app
from selenium.webdriver.common import keys
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_server import data_store
from grr_response_server import foreman
from grr_response_server import foreman_rules
from grr_response_server import hunt as lib_hunt
from grr_response_server.flows.general import file_finder
from grr_response_server.flows.general import transfer
from grr_response_server.gui import gui_test_lib
from grr_response_server.rdfvalues import flow_runner as rdf_flow_runner
from grr_response_server.rdfvalues import output_plugin as rdf_output_plugin
from grr.test_lib import test_lib
class TestNewHuntWizard(gui_test_lib.GRRSeleniumHuntTest):
"""Test the "new hunt wizard" GUI."""
@staticmethod
def FindForemanRules(hunt_urn, token):
rules = data_store.REL_DB.ReadAllForemanRules()
return [rule for rule in rules if rule.hunt_id == hunt_urn.Basename()]
def testNewHuntWizard(self):
# Open up and click on View Hunts.
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.WaitUntil(self.IsElementPresent, "css=a[grrtarget=hunts]")
self.Click("css=a[grrtarget=hunts]")
self.WaitUntil(self.IsElementPresent, "css=button[name=NewHunt]")
# Open up "New Hunt" wizard
self.Click("css=button[name=NewHunt]")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('What to run?')")
# Click on Filesystem item in flows list
self.WaitUntil(self.IsElementPresent, "css=#_Filesystem > i.jstree-icon")
self.Click("css=#_Filesystem > i.jstree-icon")
# Click on the FileFinder item in Filesystem flows list
self.Click("link=File Finder")
# Wait for flow configuration form to be rendered (just wait for first
# input field).
self.WaitUntil(self.IsElementPresent,
"css=grr-new-hunt-wizard-form label:contains('Paths')")
# Change "path" and "pathtype" values
self.Type(
"css=grr-new-hunt-wizard-form "
"grr-form-proto-repeated-field:has(label:contains('Paths')) "
"input", "/tmp")
self.Select(
"css=grr-new-hunt-wizard-form "
"grr-form-proto-single-field:has(label:contains('Pathtype')) "
"select", "TSK")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Hunt parameters')")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('How to process results')")
# Click on "Back" button and check that all the values in the form
# remain intact.
self.Click("css=grr-new-hunt-wizard-form button.Back")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Hunt parameters')")
self.Click("css=grr-new-hunt-wizard-form button.Back")
self.WaitUntil(self.IsElementPresent,
"css=grr-new-hunt-wizard-form label:contains('Paths')")
self.assertEqual(
"/tmp",
self.GetValue(
"css=grr-new-hunt-wizard-form "
"grr-form-proto-repeated-field:has(label:contains('Paths')) input"))
self.assertEqual(
"TSK",
self.GetSelectedLabel(
"css=grr-new-hunt-wizard-form "
"grr-form-proto-single-field:has(label:contains('Pathtype')) select"
))
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Hunt parameters')")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('How to process results')")
# Configure the hunt to use dummy output plugin.
self.Click("css=grr-new-hunt-wizard-form button[name=Add]")
self.Select("css=grr-new-hunt-wizard-form select", "DummyOutputPlugin")
self.Type(
"css=grr-new-hunt-wizard-form "
"grr-form-proto-single-field:has(label:contains('Filepath Regex')) "
"input", "some regex")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Where to run?')")
# Empty set of rules should be valid.
self.WaitUntil(self.IsElementPresent, "css=button.Next:not([disabled])")
# A note informs what an empty set of rules means.
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('No rules specified!')")
# Alternative match mode that matches a client if
# any of the rules evaluates to true can be selected.
self.Select(
"css=grr-configure-rules-page "
"label:contains('Match mode') ~ * select", "Match any")
# The note depends on the match mode.
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('No rules specified!')")
# Create 3 foreman rules. Note that "Add" button adds rules
# to the beginning of a list. So we always use :nth(0) selector.
self.Click("css=grr-configure-rules-page button[name=Add]")
self.Select("css=grr-configure-rules-page div.well:nth(0) select", "Regex")
rule = foreman_rules.ForemanRegexClientRule
label = rule.ForemanStringField.SYSTEM.description
self.Select(
"css=grr-configure-rules-page div.well:nth(0) "
"label:contains('Field') ~ * select", label)
self.Type(
"css=grr-configure-rules-page div.well:nth(0) "
"label:contains('Attribute regex') ~ * input", "Linux")
self.Click("css=grr-configure-rules-page button[name=Add]")
self.Select("css=grr-configure-rules-page div.well:nth(0) select",
"Integer")
rule = foreman_rules.ForemanIntegerClientRule
label = rule.ForemanIntegerField.CLIENT_CLOCK.description
self.Select(
"css=grr-configure-rules-page div.well:nth(0) "
"label:contains('Field') ~ * select", label)
self.Select(
"css=grr-configure-rules-page div.well:nth(0) "
"label:contains('Operator') ~ * select", "GREATER_THAN")
self.Type(
"css=grr-configure-rules-page div.well:nth(0) "
"label:contains('Value') ~ * input", "1336650631137737")
self.Click("css=grr-configure-rules-page button[name=Add]")
self.Click("css=grr-configure-rules-page div.well:nth(0) "
"label:contains('Os darwin') ~ * input[type=checkbox]")
# Click on "Back" button
self.Click("css=grr-new-hunt-wizard-form button.Back")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('How to process results')")
# Click on "Next" button again and check that all the values that
# we've just entered remain intact.
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Where to run?')")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Review')")
# Check that the arguments summary is present.
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Paths')")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('/tmp')")
# Check that output plugins are shown.
self.assertTrue(
self.IsElementPresent(
"css=grr-wizard-form:contains('DummyOutputPlugin')"))
self.assertTrue(
self.IsElementPresent("css=grr-wizard-form:contains('some regex')"))
# Check that there's no deprecated rules summary.
self.assertFalse(
self.IsElementPresent("css=grr-wizard-form:contains('Regex rules')"))
self.assertFalse(
self.IsElementPresent("css=grr-wizard-form:contains('Integer rules')"))
# Check that rules summary is present.
self.assertTrue(
self.IsElementPresent(
"css=grr-wizard-form:contains('Client rule set')"))
# Click on "Run" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Created Hunt')")
# Close the window and check that the hunt was created.
self.Click("css=button.Next")
# Select newly created hunt.
self.Click("css=grr-hunts-list td:contains('gui_user')")
# Check that correct details are displayed in hunt details tab.
self.WaitUntil(self.IsElementPresent,
"css=grr-hunt-inspector:contains('GenericHunt')")
self.WaitUntil(self.IsElementPresent,
"css=grr-hunt-inspector:contains('Flow Arguments')")
self.assertTrue(
self.IsElementPresent("css=grr-hunt-inspector:contains('Paths')"))
self.assertTrue(
self.IsElementPresent("css=grr-hunt-inspector:contains('/tmp')"))
self.assertTrue(
self.IsElementPresent(
"css=grr-hunt-inspector:contains('DummyOutputPlugin')"))
self.assertTrue(
self.IsElementPresent("css=grr-hunt-inspector:contains('some regex')"))
# Check that there's no deprecated rules summary.
self.assertFalse(
self.IsElementPresent("css=grr-hunt-inspector:contains('Regex rules')"))
self.assertFalse(
self.IsElementPresent(
"css=grr-hunt-inspector:contains('Integer rules')"))
# Check that rules summary is present.
self.assertTrue(
self.IsElementPresent(
"css=grr-hunt-inspector:contains('Client Rule Set')"))
# Check that the hunt object was actually created
hunts_list = sorted(
data_store.REL_DB.ReadHuntObjects(offset=0, count=10),
key=lambda x: x.create_time)
self.assertLen(hunts_list, 1)
# Check that the hunt was created with a correct flow
hunt = hunts_list[0]
self.assertEqual(hunt.args.standard.flow_name,
file_finder.FileFinder.__name__)
self.assertEqual(hunt.args.standard.flow_args.paths[0], "/tmp")
self.assertEqual(hunt.args.standard.flow_args.pathtype,
rdf_paths.PathSpec.PathType.TSK)
# self.assertEqual(hunt.args.flow_args.ignore_errors, True)
self.assertEqual(hunt.output_plugins[0].plugin_name, "DummyOutputPlugin")
# Check that hunt was not started
self.assertEqual(hunt.hunt_state, hunt.HuntState.PAUSED)
lib_hunt.StartHunt(hunt.hunt_id)
hunt_rules = self.FindForemanRules(
rdfvalue.RDFURN("hunts").Add(hunt.hunt_id), token=self.token)
# Check that the hunt was created with correct rules
self.assertLen(hunt_rules, 1)
lifetime = hunt_rules[0].GetLifetime()
lifetime -= rdfvalue.Duration.From(2, rdfvalue.WEEKS)
self.assertLessEqual(lifetime, rdfvalue.Duration.From(1, rdfvalue.SECONDS))
r = hunt_rules[0].client_rule_set
self.assertEqual(r.match_mode,
foreman_rules.ForemanClientRuleSet.MatchMode.MATCH_ANY)
self.assertLen(r.rules, 3)
self.assertEqual(r.rules[0].rule_type,
foreman_rules.ForemanClientRule.Type.OS)
self.assertEqual(r.rules[0].os.os_windows, False)
self.assertEqual(r.rules[0].os.os_linux, False)
self.assertEqual(r.rules[0].os.os_darwin, True)
self.assertEqual(r.rules[1].rule_type,
foreman_rules.ForemanClientRule.Type.INTEGER)
self.assertEqual(r.rules[1].integer.field, "CLIENT_CLOCK")
self.assertEqual(
r.rules[1].integer.operator,
foreman_rules.ForemanIntegerClientRule.Operator.GREATER_THAN)
self.assertEqual(r.rules[1].integer.value, 1336650631137737)
self.assertEqual(r.rules[2].rule_type,
foreman_rules.ForemanClientRule.Type.REGEX)
self.assertEqual(r.rules[2].regex.field, "SYSTEM")
self.assertEqual(r.rules[2].regex.attribute_regex, "Linux")
def testWizardStepCounterIsShownCorrectly(self):
# Open up and click on View Hunts.
self.Open("/#/hunts")
# Open up "New Hunt" wizard
self.Click("css=button[name=NewHunt]")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('What to run?')")
# Click on the FileFinder item in Filesystem flows list
self.WaitUntil(self.IsElementPresent, "css=#_Filesystem > i.jstree-icon")
self.Click("css=#_Filesystem > i.jstree-icon")
self.Click("link=File Finder")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Step 1 out of 6')")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Step 2 out of 6')")
def testLiteralExpressionIsProcessedCorrectly(self):
"""Literals are raw bytes. Testing that raw bytes are processed right."""
# Open up and click on View Hunts.
self.Open("/")
self.Click("css=a[grrtarget=hunts]")
# Open up "New Hunt" wizard
self.Click("css=button[name=NewHunt]")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('What to run?')")
# Click on Filesystem item in flows list
self.WaitUntil(self.IsElementPresent, "css=#_Filesystem > i.jstree-icon")
self.Click("css=#_Filesystem > i.jstree-icon")
# Click on the FileFinder item in Filesystem flows list
self.Click("link=File Finder")
self.Click("css=label:contains('Conditions') ~ * button")
self.Select("css=label:contains('Condition type') ~ * select",
"Contents literal match")
self.Type("css=label:contains('Literal') ~ * input", "foo\\x0d\\xc8bar")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Hunt parameters')")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('How to process results')")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Where to run?')")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Review')")
# Check that the arguments summary is present.
self.WaitUntil(
self.IsElementPresent,
"css=grr-wizard-form:contains('%s')" % file_finder.FileFinder.__name__)
self.WaitUntil(self.IsTextPresent, b"foo\\x0d\\xc8bar")
# Click on "Run" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Created Hunt')")
# Close the window and check that the hunt was created.
self.Click("css=button.Next")
# Check that the hunt object was actually created
hunts_list = sorted(
data_store.REL_DB.ReadHuntObjects(offset=0, count=10),
key=lambda x: x.create_time)
self.assertLen(hunts_list, 1)
# Check that the hunt was created with a correct literal value.
hunt = hunts_list[0]
self.assertEqual(hunt.args.standard.flow_name,
file_finder.FileFinder.__name__)
self.assertEqual(
hunt.args.standard.flow_args.conditions[0].contents_literal_match
.literal, b"foo\x0d\xc8bar")
def testOutputPluginsListEmptyWhenNoDefaultOutputPluginSet(self):
self.Open("/#main=ManageHunts")
self.Click("css=button[name=NewHunt]")
# Select "List Processes" flow.
self.Click("css=#_Processes > i.jstree-icon")
self.Click("link=ListProcesses")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Hunt parameters')")
# There should be no dummy output plugin visible.
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('How to process results')")
self.WaitUntilNot(self.IsElementPresent,
"css=grr-wizard-form:contains('Dummy do do')")
def testDefaultOutputPluginIsCorrectlyAddedToThePluginsList(self):
with test_lib.ConfigOverrider(
{"AdminUI.new_hunt_wizard.default_output_plugin": "DummyOutputPlugin"}):
self.Open("/#main=ManageHunts")
self.Click("css=button[name=NewHunt]")
# Select "List Processes" flow.
self.Click("css=#_Processes > i.jstree-icon")
self.Click("link=ListProcesses")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Hunt parameters')")
# Dummy output plugin should be added by default.
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('How to process results')")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('DummyOutputPlugin')")
def testLabelsHuntRuleDisplaysAvailableLabels(self):
client_id = self.SetupClient(0)
self.AddClientLabel(client_id, u"owner1", u"foo")
self.AddClientLabel(client_id, u"owner2", u"bar")
self.Open("/#main=ManageHunts")
self.Click("css=button[name=NewHunt]")
# Select "List Processes" flow.
self.Click("css=#_Processes > i.jstree-icon")
self.Click("link=ListProcesses")
# Click 'Next' to go to hunt parameters page.
self.Click("css=grr-new-hunt-wizard-form button.Next")
# Click 'Next' to go to output plugins page.
self.Click("css=grr-new-hunt-wizard-form button.Next")
# Click 'Next' to go to hunt rules page.
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.Click("css=grr-new-hunt-wizard-form button[name=Add]")
# Select 'Clients With Label' rule.
self.Select("css=grr-new-hunt-wizard-form div.well select", "Label")
# Check that there's an option present for labels 'bar' (this option
# should be selected) and for label 'foo'.
self.WaitUntil(
self.IsElementPresent, "css=grr-new-hunt-wizard-form div.well "
".form-group:has(label:contains('Label')) "
"select option:selected[label=bar]")
self.WaitUntil(
self.IsElementPresent, "css=grr-new-hunt-wizard-form div.well "
".form-group:has(label:contains('Label')) "
"select option:not(:selected)[label=foo]")
def testLabelsHuntRuleMatchesCorrectClients(self):
client_ids = self.SetupClients(10)
self.AddClientLabel(client_ids[1], u"owner1", u"foo")
self.AddClientLabel(client_ids[1], u"owner2", u"bar")
self.AddClientLabel(client_ids[7], u"GRR", u"bar")
self.Open("/#main=ManageHunts")
self.Click("css=button[name=NewHunt]")
# Select "List Processes" flow.
self.Click("css=#_Processes > i.jstree-icon")
self.Click("link=ListProcesses")
# Click 'Next' to go to the output plugins page, hunt parameters page
# and then to hunt rules page.
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Hunt parameters')")
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('How to process results')")
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Where to run?')")
# Select 'Clients With Label' rule.
self.Click("css=grr-configure-rules-page button[name=Add]")
self.Select("css=grr-new-hunt-wizard-form div.well select", "Label")
self.Select(
"css=grr-new-hunt-wizard-form div.well .form-group "
".form-group:has(label:contains('Label')):nth-last-of-type(1) "
"select", "foo")
self.Click("css=grr-new-hunt-wizard-form div.well .form-group "
".form-group:has(label:contains('Add label')) button")
self.Select(
"css=grr-new-hunt-wizard-form div.well .form-group "
".form-group:has(label:contains('Label')):nth-last-of-type(1) "
"select", "bar")
self.Select(
"css=grr-new-hunt-wizard-form div.well .form-group "
".form-group:has(label:contains('Match mode')) select", "Match any")
# Click 'Next' to go to hunt overview page. Then click 'Next' to go to
# submit the hunt and wait until it's created.
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('Created Hunt')")
hunts_list = sorted(
data_store.REL_DB.ReadHuntObjects(offset=0, count=10),
key=lambda x: x.create_time)
hunt = hunts_list[0]
lib_hunt.StartHunt(hunt.hunt_id)
foreman_obj = foreman.Foreman()
for client_id in client_ids:
tasks_assigned = foreman_obj.AssignTasksToClient(client_id)
if client_id in [client_ids[1], client_ids[7]]:
self.assertTrue(tasks_assigned)
else:
self.assertFalse(tasks_assigned)
def CreateSampleHunt(self, description, token=None):
self.StartHunt(
description=description,
flow_runner_args=rdf_flow_runner.FlowRunnerArgs(
flow_name=transfer.GetFile.__name__),
flow_args=transfer.GetFileArgs(
pathspec=rdf_paths.PathSpec(
path="/tmp/evil.txt",
pathtype=rdf_paths.PathSpec.PathType.TSK,
)),
client_rule_set=self._CreateForemanClientRuleSet(),
output_plugins=[
rdf_output_plugin.OutputPluginDescriptor(
plugin_name="DummyOutputPlugin",
plugin_args=gui_test_lib.DummyOutputPlugin.args_type(
filename_regex="blah!", fetch_binaries=True))
],
client_rate=60,
paused=True,
token=token)
def testPathAutocomplete(self):
# Open Hunts
self.Open("/#/hunts")
# Open "New Hunt" wizard
self.Click("css=button[name=NewHunt]")
self.WaitUntil(self.IsElementPresent,
"css=grr-wizard-form:contains('What to run?')")
# Click on Filesystem item in flows list
self.Click("css=#_Filesystem > i.jstree-icon")
# Click on the FileFinder item in Filesystem flows list
self.Click("link=File Finder")
input_selector = "css=grr-form-glob-expression input[uib-typeahead]"
# Change "path"
self.Type(input_selector, "/foo/%%path")
self.WaitUntil(self.IsElementPresent,
"css=[uib-typeahead-popup]:contains('%%environ_path%%')")
self.GetElement(input_selector).send_keys(keys.Keys.ENTER)
self.WaitUntilEqual("/foo/%%environ_path%%", self.GetValue,
input_selector + ":text")
if __name__ == "__main__":
app.run(test_lib.main)
```
#### File: selenium_tests/v2/browser_history_test.py
```python
from absl import app
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_server.flows.general import webhistory
from grr_response_server.gui import gui_test_lib
from grr.test_lib import flow_test_lib
from grr.test_lib import test_lib
def _GenResults(browser, i):
return webhistory.CollectBrowserHistoryResult(
browser=browser,
stat_entry=rdf_client_fs.StatEntry(
pathspec=rdf_paths.PathSpec.OS(
path=f"/home/foo/{browser.name.lower()}-{i}"),
st_mode=0o644,
st_dev=16777220 + i,
st_nlink=1 + i,
st_uid=237586 + i,
st_gid=89939 + i,
st_size=42 + i,
st_atime=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(400000 + i),
st_mtime=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(40000 + i),
st_ctime=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(4000 + i),
))
class CollectBrowserHistoryTest(gui_test_lib.GRRSeleniumTest):
"""Tests the search UI."""
def setUp(self):
super().setUp()
self.client_id = self.SetupClient(0)
self.RequestAndGrantClientApproval(self.client_id)
def testCorrectlyDisplaysInProgressStateForMultipleBrowsers(self):
# Start the flow with 2 browsers scheduled for collection.
flow_args = webhistory.CollectBrowserHistoryArgs(browsers=[
webhistory.CollectBrowserHistoryArgs.Browser.CHROME,
webhistory.CollectBrowserHistoryArgs.Browser.OPERA,
])
flow_test_lib.StartFlow(
webhistory.CollectBrowserHistory,
creator=self.token.username,
client_id=self.client_id,
flow_args=flow_args)
with flow_test_lib.FlowProgressOverride(
webhistory.CollectBrowserHistory,
webhistory.CollectBrowserHistoryProgress(browsers=[
webhistory.BrowserProgress(
browser=webhistory.Browser.CHROME,
status=webhistory.BrowserProgress.Status.IN_PROGRESS,
num_collected_files=0,
),
webhistory.BrowserProgress(
browser=webhistory.Browser.OPERA,
status=webhistory.BrowserProgress.Status.IN_PROGRESS,
num_collected_files=0,
),
])):
self.Open(f"/v2/clients/{self.client_id}")
# Expand the flow.
self.Click("css=.flow-title:contains('Browser History')")
self.WaitUntil(self.IsElementPresent,
"css=.row:contains('Chrome') .in-progress")
self.WaitUntil(self.IsElementPresent,
"css=.row:contains('Opera') .in-progress")
# Check that other browsers are not shown.
self.WaitUntilNot(self.IsElementPresent, "css=.row:contains('Safari')")
self.WaitUntilNot(self.IsElementPresent,
"css=.row:contains('Internet Explorer')")
self.WaitUntilNot(self.IsElementPresent, "css=.row:contains('Firefox')")
def testCorrectlyDisplaysSuccessStateForSingleBrowser(self):
flow_args = webhistory.CollectBrowserHistoryArgs(
browsers=[webhistory.CollectBrowserHistoryArgs.Browser.CHROME])
flow_test_lib.StartFlow(
webhistory.CollectBrowserHistory,
creator=self.token.username,
client_id=self.client_id,
flow_args=flow_args)
with flow_test_lib.FlowProgressOverride(
webhistory.CollectBrowserHistory,
webhistory.CollectBrowserHistoryProgress(browsers=[
webhistory.BrowserProgress(
browser=webhistory.Browser.CHROME,
status=webhistory.BrowserProgress.Status.SUCCESS,
num_collected_files=1,
),
])):
self.Open(f"/v2/clients/{self.client_id}")
# Expand the flow.
self.Click("css=.flow-title:contains('Browser History')")
self.WaitUntil(self.IsElementPresent,
"css=.row:contains('Chrome') .success:contains('1 file')")
# Check that other browsers are not shown.
self.WaitUntilNot(self.IsElementPresent, "css=.row:contains('Opera')")
self.WaitUntilNot(self.IsElementPresent, "css=.row:contains('Safari')")
self.WaitUntilNot(self.IsElementPresent,
"css=.row:contains('Internet Explorer')")
self.WaitUntilNot(self.IsElementPresent, "css=.row:contains('Firefox')")
def testCorrectlyDisplaysWarningStateForSingleBrowser(self):
flow_args = webhistory.CollectBrowserHistoryArgs(
browsers=[webhistory.CollectBrowserHistoryArgs.Browser.CHROME])
flow_test_lib.StartFlow(
webhistory.CollectBrowserHistory,
creator=self.token.username,
client_id=self.client_id,
flow_args=flow_args)
with flow_test_lib.FlowProgressOverride(
webhistory.CollectBrowserHistory,
webhistory.CollectBrowserHistoryProgress(browsers=[
webhistory.BrowserProgress(
browser=webhistory.Browser.CHROME,
status=webhistory.BrowserProgress.Status.SUCCESS,
num_collected_files=0,
),
])):
self.Open(f"/v2/clients/{self.client_id}")
# Expand the flow.
self.Click("css=.flow-title:contains('Browser History')")
self.WaitUntil(
self.IsElementPresent,
"css=.row:contains('Chrome') .warning:contains('No files collected')")
# Check that other browsers are not shown.
self.WaitUntilNot(self.IsElementPresent, "css=.row:contains('Opera')")
self.WaitUntilNot(self.IsElementPresent, "css=.row:contains('Safari')")
self.WaitUntilNot(self.IsElementPresent,
"css=.row:contains('Internet Explorer')")
self.WaitUntilNot(self.IsElementPresent, "css=.row:contains('Firefox')")
def testCorrectlyDisplaysErrorForSingleBrowser(self):
flow_args = webhistory.CollectBrowserHistoryArgs(
browsers=[webhistory.CollectBrowserHistoryArgs.Browser.CHROME])
flow_test_lib.StartFlow(
webhistory.CollectBrowserHistory,
creator=self.token.username,
client_id=self.client_id,
flow_args=flow_args)
with flow_test_lib.FlowProgressOverride(
webhistory.CollectBrowserHistory,
webhistory.CollectBrowserHistoryProgress(browsers=[
webhistory.BrowserProgress(
browser=webhistory.Browser.CHROME,
status=webhistory.BrowserProgress.Status.ERROR,
description="Something went wrong",
num_collected_files=0,
),
])):
self.Open(f"/v2/clients/{self.client_id}")
# Expand the flow.
self.Click("css=.flow-title:contains('Browser History')")
self.WaitUntil(
self.IsElementPresent,
"css=.row:contains('Chrome') .error:contains('Something went wrong')")
# Check that other browsers are not shown.
self.WaitUntilNot(self.IsElementPresent, "css=.row:contains('Opera')")
self.WaitUntilNot(self.IsElementPresent, "css=.row:contains('Safari')")
self.WaitUntilNot(self.IsElementPresent,
"css=.row:contains('Internet Explorer')")
self.WaitUntilNot(self.IsElementPresent, "css=.row:contains('Firefox')")
def testShowsDownloadButtonOnFlowCompletion(self):
flow_args = webhistory.CollectBrowserHistoryArgs(
browsers=[webhistory.CollectBrowserHistoryArgs.Browser.CHROME])
flow_id = flow_test_lib.StartFlow(
webhistory.CollectBrowserHistory,
creator=self.token.username,
client_id=self.client_id,
flow_args=flow_args)
with flow_test_lib.FlowProgressOverride(
webhistory.CollectBrowserHistory,
webhistory.CollectBrowserHistoryProgress(browsers=[
webhistory.BrowserProgress(
browser=webhistory.Browser.CHROME,
status=webhistory.BrowserProgress.Status.IN_PROGRESS,
num_collected_files=0,
),
])):
self.Open(f"/v2/clients/{self.client_id}")
# Make sure that the flow panel is already displayed...
self.WaitUntil(self.IsElementPresent,
"css=.flow-title:contains('Browser History')")
# ...and then check for the presence of the 'Download all' button.
self.WaitUntilNot(self.IsElementPresent,
"css=button:contains('Download all')")
flow_test_lib.MarkFlowAsFinished(self.client_id, flow_id)
with flow_test_lib.FlowProgressOverride(
webhistory.CollectBrowserHistory,
webhistory.CollectBrowserHistoryProgress(browsers=[
webhistory.BrowserProgress(
browser=webhistory.Browser.CHROME,
status=webhistory.BrowserProgress.Status.IN_PROGRESS,
num_collected_files=1,
),
])):
# The flow details view should get updated automatically.
self.WaitUntil(self.IsElementPresent,
"css=button:contains('Download all')")
def testDisplaysMultipleResultsForSingleBrowser(self):
flow_args = webhistory.CollectBrowserHistoryArgs(
browsers=[webhistory.CollectBrowserHistoryArgs.Browser.CHROME])
flow_id = flow_test_lib.StartFlow(
webhistory.CollectBrowserHistory,
creator=self.token.username,
client_id=self.client_id,
flow_args=flow_args)
flow_test_lib.AddResultsToFlow(
self.client_id,
flow_id,
[_GenResults(webhistory.Browser.CHROME, i) for i in range(200)],
tag="CHROME")
with flow_test_lib.FlowProgressOverride(
webhistory.CollectBrowserHistory,
webhistory.CollectBrowserHistoryProgress(browsers=[
webhistory.BrowserProgress(
browser=webhistory.Browser.CHROME,
status=webhistory.BrowserProgress.Status.SUCCESS,
num_collected_files=200,
),
])):
self.Open(f"/v2/clients/{self.client_id}")
# Expand the flow.
self.Click("css=.flow-title:contains('Browser History')")
# Expand the browser.
self.Click("css=div.title:contains('Chrome')")
# Check that only first 100 results are visible. First row is the table
# header, so we start with 1.
self.WaitUntil(self.IsElementPresent, "css=.results tr:nth(1)")
self.WaitUntilNot(
self.IsElementPresent,
"css=.results tr:nth(101):contains('/home/foo/chrome-100')")
# Check that clicking Load More loads the rest.
self.Click("css=button:contains('Load More')")
self.WaitUntil(
self.IsElementPresent,
"css=.results tr:nth(200):contains('/home/foo/chrome-199')")
self.WaitUntilNot(self.IsElementPresent, "css=.results tr:nth(201)")
# Check that the "load more" button disappears when everything is loaded.
self.WaitUntilNot(self.IsElementPresent,
"css=button:contains('Load more')")
def testAllowsCopyingResultPathToClipboard(self):
flow_args = webhistory.CollectBrowserHistoryArgs(
browsers=[webhistory.CollectBrowserHistoryArgs.Browser.CHROME])
flow_id = flow_test_lib.StartFlow(
webhistory.CollectBrowserHistory,
creator=self.token.username,
client_id=self.client_id,
flow_args=flow_args)
flow_test_lib.AddResultsToFlow(
self.client_id,
flow_id, [_GenResults(webhistory.Browser.CHROME, 0)],
tag="CHROME")
with flow_test_lib.FlowProgressOverride(
webhistory.CollectBrowserHistory,
webhistory.CollectBrowserHistoryProgress(browsers=[
webhistory.BrowserProgress(
browser=webhistory.Browser.CHROME,
status=webhistory.BrowserProgress.Status.SUCCESS,
num_collected_files=1,
),
])):
self.Open(f"/v2/clients/{self.client_id}")
# Expand the flow.
self.Click("css=.flow-title:contains('Browser History')")
# Expand the browser.
self.Click("css=div.title:contains('Chrome')")
# Hover and click on the copy button.
self.MoveMouseTo(
"css=.results tr:nth(1):contains('/home/foo/chrome-0') td.path")
# Click on the now visible button.
self.Click(
"css=.results tr:nth(1):contains('/home/foo/chrome-0') td.path "
"button.copy-button")
clip_value = self.GetClipboard()
self.assertEqual(clip_value, "/home/foo/chrome-0")
def testDisplaysAndHidesResultsForSingleBrowser(self):
flow_args = webhistory.CollectBrowserHistoryArgs(browsers=[
webhistory.CollectBrowserHistoryArgs.Browser.CHROME,
webhistory.CollectBrowserHistoryArgs.Browser.OPERA,
])
flow_id = flow_test_lib.StartFlow(
webhistory.CollectBrowserHistory,
creator=self.token.username,
client_id=self.client_id,
flow_args=flow_args)
flow_test_lib.AddResultsToFlow(
self.client_id,
flow_id, [_GenResults(webhistory.Browser.CHROME, 0)],
tag="CHROME")
flow_test_lib.AddResultsToFlow(
self.client_id,
flow_id, [_GenResults(webhistory.Browser.OPERA, 0)],
tag="OPERA")
with flow_test_lib.FlowProgressOverride(
webhistory.CollectBrowserHistory,
webhistory.CollectBrowserHistoryProgress(browsers=[
webhistory.BrowserProgress(
browser=webhistory.Browser.CHROME,
status=webhistory.BrowserProgress.Status.SUCCESS,
num_collected_files=1,
),
webhistory.BrowserProgress(
browser=webhistory.Browser.OPERA,
status=webhistory.BrowserProgress.Status.SUCCESS,
num_collected_files=1,
)
])):
self.Open(f"/v2/clients/{self.client_id}")
self.Click("css=.flow-title:contains('Browser History')")
self.Click("css=div.title:contains('Chrome')")
self.WaitUntil(self.IsElementPresent,
"css=.results tr:contains('/home/foo/chrome-0')")
# Only Chrome's results should be shown.
self.WaitUntilNot(self.IsElementPresent,
"css=.results tr:contains('/home/foo/opera-0')")
# Second click should toggle the browser results view.
self.Click("css=div.title:contains('Chrome')")
self.WaitUntilNot(self.IsElementPresent,
"css=.results tr:contains('/home/foo/chrome-0')")
self.Click("css=div.title:contains('Opera')")
self.WaitUntil(self.IsElementPresent,
"css=.results tr:contains('/home/foo/opera-0')")
# Only Opera's results should be shown.
self.WaitUntilNot(self.IsElementPresent,
"css=.results tr:contains('/home/foo/chrome-0')")
# Second click should toggle the browser results view.
self.Click("css=div.title:contains('Opera')")
self.WaitUntilNot(self.IsElementPresent,
"css=.results tr:contains('/home/foo/opera-9')")
def testUpdatesResultsOfRunningFlowDynamically(self):
flow_args = webhistory.CollectBrowserHistoryArgs(
browsers=[webhistory.CollectBrowserHistoryArgs.Browser.CHROME])
flow_id = flow_test_lib.StartFlow(
webhistory.CollectBrowserHistory,
creator=self.token.username,
client_id=self.client_id,
flow_args=flow_args)
self.Open(f"/v2/clients/{self.client_id}")
self.Click("css=.flow-title:contains('Browser History')")
progress_0_results = webhistory.CollectBrowserHistoryProgress(browsers=[
webhistory.BrowserProgress(
browser=webhistory.Browser.CHROME,
status=webhistory.BrowserProgress.Status.IN_PROGRESS,
num_collected_files=0,
)
])
with flow_test_lib.FlowProgressOverride(webhistory.CollectBrowserHistory,
progress_0_results):
self.WaitUntil(self.IsElementPresent, "css=div.title:contains('Chrome')")
self.WaitUntilNot(self.IsElementPresent,
"css=.header:contains('arrow_right')")
flow_test_lib.AddResultsToFlow(
self.client_id,
flow_id, [_GenResults(webhistory.Browser.CHROME, i) for i in range(10)],
tag="CHROME")
progress_10_results = webhistory.CollectBrowserHistoryProgress(browsers=[
webhistory.BrowserProgress(
browser=webhistory.Browser.CHROME,
status=webhistory.BrowserProgress.Status.IN_PROGRESS,
num_collected_files=10,
)
])
with flow_test_lib.FlowProgressOverride(webhistory.CollectBrowserHistory,
progress_10_results):
self.WaitUntil(self.IsElementPresent,
"css=.header:contains('arrow_right')")
self.Click("css=div.title:contains('Chrome')")
self.WaitUntilEqual(10, self.GetCssCount, "css=tr:contains('/home/foo')")
flow_test_lib.AddResultsToFlow(
self.client_id,
flow_id, [_GenResults(webhistory.Browser.CHROME, 11)],
tag="CHROME")
progress_11_results = webhistory.CollectBrowserHistoryProgress(browsers=[
webhistory.BrowserProgress(
browser=webhistory.Browser.CHROME,
status=webhistory.BrowserProgress.Status.IN_PROGRESS,
num_collected_files=11,
)
])
with flow_test_lib.FlowProgressOverride(webhistory.CollectBrowserHistory,
progress_11_results):
self.WaitUntilEqual(11, self.GetCssCount, "css=tr:contains('/home/foo')")
if __name__ == "__main__":
app.run(test_lib.main)
``` |
{
"source": "jiyongseong/AzureCommon",
"score": 3
} |
#### File: python/resourceGroup/manage_resource_group.py
```python
import sys, os
#sudo pip install prettytable
from prettytable import PrettyTable
sys.path.append(os.path.abspath("../login/"))
from azureADUserLogin import *
resource_client = azure_ad_user_login()
def list_resource_group(resource_group_name=""):
rgTable = PrettyTable(['Name','Location'])
if resource_group_name == "":
for item in resource_client.resource_groups.list():
rgTable.add_row([item.name,item.location])
print(rgTable);
else:
if resource_client.resource_groups.check_existence(resource_group_name):
rg = resource_client.resource_groups.get(resource_group_name)
rgTable.add_row([rg.name,rg.location])
print(rgTable);
else:
print ("There is no resource group named " + resource_group_name);
def create_resource_group(resource_group_name, location):
if resource_client.resource_groups.check_existence(resource_group_name):
print (resource_group_name + " is already existing.\ntry other name.")
return;
else:
resource_client.resource_groups.create_or_update(resource_group_name, {'location':location})
print ("Successfully created {0} Resoure Group\nregion :{1}".format(resource_group_name, location));
def delete_resource_group(resource_group_name):
if resource_client.resource_groups.check_existence(resource_group_name):
resource_client.resource_groups.delete(resource_group_name)
print ("Successfully deleted {0} Resoure Group".format(resource_group_name));
else:
print ("Resource group '" + resource_group_name + "' does not exist.\ncheck the name.")
return;
``` |
{
"source": "JiyongYang/LibPatrolSimLearn",
"score": 3
} |
#### File: LibPatrolSimLearn/libPatrolSimLearn/Calculator.py
```python
import random
import os
from math import *
from Parser import *
from Drawer import *
class Calculator:
def __init__(self, mapSizeX, mapSizeY, sizeOfGrid):
self.recentPtLst = list()
self.cumulatedPolicyHeatmap = np.zeros((101, 101, 8), dtype='f')
self.checkNumber = 5
self.mapSizeX = mapSizeX
self.mapSizeY = mapSizeY
self.sizeOfGrid = sizeOfGrid
self.ratio_w = mapSizeX / sizeOfGrid
self.ratio_h = mapSizeY / sizeOfGrid
def setCheckNumber(self, n):
self.checkNumber = n
def calCumulatePolicyHeatmap(self, policyHeatmap):
for i in range(len(policyHeatmap)):
for j in range(len(policyHeatmap[i])):
for k in range(len(policyHeatmap[i][j])):
self.cumulatedPolicyHeatmap[i][j][k] += policyHeatmap[i][j][k]
def getPolicyHeatmap(self):
return self.cumulatedPolicyHeatmap
def initial_learn(self, fileName):
lst = ReadXML(fileName, self.mapSizeX, self.mapSizeY, self.sizeOfGrid)
for agent in lst:
agent.calHoG()
agent.calHeatmap()
agent.calPolicyHeatmap()
self.calCumulatePolicyHeatmap(agent.getPolicyHeatmap())
def learn(self, fileName):
lst = ReadXML(fileName, self.mapSizeX, self.mapSizeY, self.sizeOfGrid)
self.loadData()
for agent in lst:
agent.calHoG()
agent.calHeatmap()
agent.calPolicyHeatmap()
self.calCumulatePolicyHeatmap(agent.getPolicyHeatmap())
def saveData(self):
if not os.path.exists('learnedData'):
os.makedirs('learnedData')
np.savetxt('learnedData/ld_right.txt', cal.getPolicyHeatmap()[:, :, 0], fmt = '%0.1f')
np.savetxt('learnedData/ld_rightup.txt', cal.getPolicyHeatmap()[:, :, 1], fmt = '%0.1f')
np.savetxt('learnedData/ld_up.txt', cal.getPolicyHeatmap()[:, :, 2], fmt = '%0.1f')
np.savetxt('learnedData/ld_leftup.txt', cal.getPolicyHeatmap()[:, :, 3], fmt = '%0.1f')
np.savetxt('learnedData/ld_left.txt', cal.getPolicyHeatmap()[:, :, 4], fmt = '%0.1f')
np.savetxt('learnedData/ld_leftdown.txt', cal.getPolicyHeatmap()[:, :, 5], fmt = '%0.1f')
np.savetxt('learnedData/ld_down.txt', cal.getPolicyHeatmap()[:, :, 6], fmt = '%0.1f')
np.savetxt('learnedData/ld_rightdown.txt', cal.getPolicyHeatmap()[:, :, 7], fmt = '%0.1f')
def loadData(self):
filelist = ['ld_right.txt', 'ld_rightup.txt', 'ld_up.txt', 'ld_leftup.txt', 'ld_left.txt', 'ld_leftdown.txt', 'ld_down.txt', 'ld_rightdown.txt']
cumulatedPolicyHeatmap = np.zeros((101, 101, 8), dtype='f')
# folding array layer
for i in range(8):
cumulatedPolicyHeatmap[:,:,i] = np.loadtxt("learnedData/"+filelist[i])
self.cumulatedPolicyHeatmap = cumulatedPolicyHeatmap
def drawData(self):
drawer = Drawer()
drawer.render()
drawer.print_value_all(self.cumulatedPolicyHeatmap)
def calDir(self, angle):
dir = None
# right
if (angle >= 337.5 or angle < 22.5):
dir = 0
# rightup
elif (angle >= 22.5 and angle < 67.5):
dir = 1
# up
elif (angle >= 67.5 and angle < 112.5):
dir = 2
# leftup
elif (angle >= 112.5 and angle < 157.5):
dir = 3
# left
elif (angle >= 157.5 and angle < 202.5):
dir = 4
# leftdown
elif (angle >= 202.5 and angle < 247.5):
dir = 5
# down
elif (angle >= 247.5 and angle < 292.5):
dir = 6
# rightdown
elif (angle >= 292.5 and angle < 337.5):
dir = 7
return dir
def checkWithFilter(self, cx, cy, _dir):
# 9 by 9 filter
filter = [(-1,-1), (0, -1), (1, -1), (-1, 0), (0, 0), (1, 0), (-1, 1), (0, 1), (1, 1)]
tendency = 0
for fPt in filter:
x = cx + fPt[0]
y = cy + fPt[1]
if x > 0 and x < (self.mapSizeX-1) and y > 0 and y < (self.mapSizeY-1):
temp = 0
tdir = 0
for i in range(8):
val = self.cumulatedPolicyHeatmap[x][y][i]
if val > temp:
temp = val
tdir = i
if temp != 0:
if _dir == tdir:
tendency += 1
if tendency >= 1:
return "Normal"
else:
return "Abnormal"
def check(self, point):
# check number of points
if len(self.recentPtLst) >= self.checkNumber:
lst = self.recentPtLst
# processing
post_pt = None
for pt in lst:
if post_pt != None:
px, py, pz = post_pt
cx, cy, cz = pt
dx = (cx - px)
dy = (cy - py)
angle = atan2(dy, dx) * 180 / pi
if angle < 0:
angle += 360
dir = self.calDir(angle)
cellpos_x = int(px / self.ratio_w)
cellpos_y = int(py / self.ratio_h)
result = checkWithFilter(cellpos_x, cellpos_y, dir)
print(result)
post_pt = pt
# delete oldest data
tLst = lst[1:]
self.recentPtLst = tlst
print(self.recentPtLst)
else:
self.recentPtLst.append(point)
def checkWithPoints(self, ptLst):
self.loadData()
post_pt = None
for pt in ptLst:
if post_pt != None:
px, py, pz = post_pt
cx, cy, cz = pt
dx = (cx - px)
dy = (cy - py)
angle = atan2(dy, dx) * 180 / pi
if angle < 0:
angle += 360
dir = self.calDir(angle)
cellpos_x = int(px / self.ratio_w)
cellpos_y = int(py / self.ratio_h)
result = self.checkWithFilter(cellpos_x, cellpos_y, dir)
print(result)
post_pt = pt
if __name__ == "__main__":
#initial_learn("training2/training.xml")
#load_learnedData()
mapSizeX = 101
mapSizeY = 101
sizeOfGrid = 101
cal = Calculator(mapSizeX, mapSizeY, sizeOfGrid)
#cal.initial_learn("training2/training.xml")
#cal.learn("training2/training.xml")
#cal.saveData()
#cal.drawData()
cal.checkWithPoints([(86,74.125,0), (85,75.125,0), (84,76.125,0), (73,75.125,0), (82,36.125,0)])
"""
def initial_learn(fileName):
lst = ReadXML(fileName)
cal = Calculator()
for agent in lst:
agent.calHoG()
agent.calHeatmap()
agent.calPolicyHeatmap()
cal.cumulatePolicyHeatmap(agent.getPolicyHeatmap())
#plt.imshow(agent.getHog(), cmap='hot', interpolation='nearest')
#plt.show()
#drawer = Drawer()
#drawer.render()
#drawer.print_value_all(cal.getPolicyHeatmap())
np.savetxt('learnedData/ld_right.txt', cal.getPolicyHeatmap()[:, :, 0], fmt = '%0.1f')
np.savetxt('learnedData/ld_rightup.txt', cal.getPolicyHeatmap()[:, :, 1], fmt = '%0.1f')
np.savetxt('learnedData/ld_up.txt', cal.getPolicyHeatmap()[:, :, 2], fmt = '%0.1f')
np.savetxt('learnedData/ld_leftup.txt', cal.getPolicyHeatmap()[:, :, 3], fmt = '%0.1f')
np.savetxt('learnedData/ld_left.txt', cal.getPolicyHeatmap()[:, :, 4], fmt = '%0.1f')
np.savetxt('learnedData/ld_leftdown.txt', cal.getPolicyHeatmap()[:, :, 5], fmt = '%0.1f')
np.savetxt('learnedData/ld_down.txt', cal.getPolicyHeatmap()[:, :, 6], fmt = '%0.1f')
np.savetxt('learnedData/ld_rightdown.txt', cal.getPolicyHeatmap()[:, :, 7], fmt = '%0.1f')
def load_learnedData():
# load data
filelist = ['ld_right.txt', 'ld_rightup.txt', 'ld_up.txt', 'ld_leftup.txt', 'ld_left.txt', 'ld_leftdown.txt', 'ld_down.txt', 'ld_rightdown.txt']
cumulatedPolicyHeatmap = np.zeros((101, 101, 8), dtype='f')
# folding array layer
for i in range(8):
cumulatedPolicyHeatmap[:,:,i] = np.loadtxt("learnedData/"+filelist[i])
# draw
drawer = Drawer()
drawer.render()
drawer.print_value_all(cumulatedPolicyHeatmap)
"""
```
#### File: LibPatrolSimLearn/libPatrolSimLearn/Drawer.py
```python
import tkinter as tk
from PIL import ImageTk, Image
import time
PhotoImage = ImageTk.PhotoImage
UNIT = 25
HEIGHT = 100
WIDTH = 100
REVERSE = 100
SIZEOFCANVAS_HEIGHT = 1200
SIZEOFCANVAS_WIDTH = 1200
class Drawer(tk.Tk):
def __init__(self):
super(Drawer, self).__init__()
self.geometry('{0}x{1}'.format(SIZEOFCANVAS_WIDTH, SIZEOFCANVAS_HEIGHT))#HEIGHT * UNIT, HEIGHT * UNIT))
self.shapes = self.load_images()
self.canvas = self._build_canvas()
self.texts = []
self.policyHeatmap = None
def setPolicyHeatmap(self, m):
self.policyHeatmap = m
def _build_canvas(self):
canvas = tk.Canvas(self, bg='white',
height=HEIGHT * UNIT,
width=WIDTH * UNIT,
scrollregion=(0, 0, WIDTH * UNIT, HEIGHT * UNIT))
hbar=tk.Scrollbar(self,orient=tk.HORIZONTAL)
hbar.pack(side=tk.BOTTOM,fill=tk.X)
hbar.config(command=canvas.xview)
vbar=tk.Scrollbar(self,orient=tk.VERTICAL)
vbar.pack(side=tk.RIGHT,fill=tk.Y)
vbar.config(command=canvas.yview)
# create grid line
for c in range(0, WIDTH * UNIT, UNIT):
x0, y0, x1, y1 = c, 0, c, HEIGHT * UNIT
canvas.create_line(x0, y0, x1, y1)
for r in range(0, HEIGHT * UNIT, UNIT):
x0, y0, x1, y1 = 0, r, HEIGHT * UNIT, r
canvas.create_line(x0, y0, x1, y1)
canvas.config(width=300,height=300)
canvas.config(xscrollcommand=hbar.set, yscrollcommand=vbar.set)
canvas.bind_all("<MouseWheel>", lambda event: self.canvas.yview_scroll(int(-1*(event.delta/120)), "units"))
canvas.pack(side=tk.LEFT,expand=True,fill=tk.BOTH)
#canvas.pack()
return canvas
def load_images(self):
sieofimage = 20
up = PhotoImage(
Image.open("img/up.png").resize((sieofimage, sieofimage)))
down = PhotoImage(
Image.open("img/down.png").resize((sieofimage, sieofimage)))
left = PhotoImage(
Image.open("img/left.png").resize((sieofimage, sieofimage)))
right = PhotoImage(
Image.open("img/right.png").resize((sieofimage, sieofimage)))
rightup = PhotoImage(
Image.open("img/rightup.png").resize((sieofimage, sieofimage)))
rightdown = PhotoImage(
Image.open("img/rightdown.png").resize((sieofimage, sieofimage)))
leftup = PhotoImage(
Image.open("img/leftup.png").resize((sieofimage, sieofimage)))
leftdown = PhotoImage(
Image.open("img/leftdown.png").resize((sieofimage, sieofimage)))
return right, rightup, up, leftup, left, leftdown, down, rightdown
def text_value(self, row, col, contents, action, font='Helvetica', size=7,
style='normal', anchor="nw"):
# right
if action == 0:
origin_x, origin_y = 76, 42
# rightup
elif action == 1:
origin_x, origin_y = 76, 5
# up
elif action == 2:
origin_x, origin_y = 42, 5
# leftup
elif action == 3:
origin_x, origin_y = 7, 5
# left
elif action == 4:
origin_x, origin_y = 7, 42
# leftdown
elif action == 5:
origin_x, origin_y = 7, 77
# down
elif action == 6:
origin_x, origin_y = 42, 77
# rightdown
elif action == 7:
origin_x, origin_y = 76, 77
origin_x = origin_x / 100 * UNIT
origin_y = origin_y / 100 * UNIT
x, y = origin_x + (UNIT * row), origin_y + (UNIT * col)
font = (font, str(size), style)
text = self.canvas.create_text(x, y, fill="black", text=contents,
font=font, anchor=anchor)
return self.texts.append(text)
def text_value_a(self, row, col, contents, font='Helvetica', size=7,
style='normal', anchor="nw"):
x, y = (UNIT * row), (UNIT * col)
font = (font, str(size), style)
text = self.canvas.create_text(x, y, fill="black", text=contents,
font=font, anchor=anchor)
return self.texts.append(text)
def print_value_all(self, policyHeatmap):
for i in self.texts:
self.canvas.delete(i)
self.texts.clear()
for i in range(WIDTH):
for j in range(HEIGHT):
dir = (0, 0)
bal = 0
for action in range(0, 8):
temp = policyHeatmap[i][j][action]
#self.text_value(i, REVERSE-j, round(temp, 2), action)
if dir[1] < temp:
dir = (action, temp)
bal += temp
if bal != 0:
self.canvas.create_image(i*UNIT+UNIT/2, (REVERSE-j)*UNIT+UNIT/2, image=self.shapes[dir[0]])
self.mainloop()
def print_value_all_a(self, policyHeatmap):
for i in self.texts:
self.canvas.delete(i)
self.texts.clear()
for i in range(WIDTH):
for j in range(HEIGHT):
temp = policyHeatmap[i][j]
self.text_value_a(i, REVERSE-j, temp)
self.mainloop()
def coords_to_state(self, coords):
x = int((coords[0] - (UNIT/2)) / UNIT)
y = int((coords[1] - (UNIT/2)) / UNIT)
return [x, y]
def state_to_coords(self, state):
x = int(state[0] * UNIT + (UNIT/2))
y = int(state[1] * UNIT + (UNIT/2))
return [x, y]
def render(self):
time.sleep(0.03)
self.update()
```
#### File: LibPatrolSimLearn/libPatrolSimLearn/Parser.py
```python
import xml.etree.ElementTree as ET
import copy
from Agent import *
def ReadXML(fileName, mapSizeX, mapSizeY, sizeOfGrid):
doc = ET.parse(fileName)
root = doc.getroot()
agentList = list()
for _agentList in root:
if(_agentList.tag == "AgentList"):
for _agent in _agentList: # Agent
# agnet_name = _agent.attrib["name"]
id, sc = _agent.attrib["id"].split('_')
agent_id = int(id)
agnet_sc = int(sc)
agent_spd = float(_agent.attrib["spd"])
agent_type = int(_agent.attrib["type"])
agent = Agent(agent_id, agnet_sc, "None", agent_type, agent_spd, mapSizeX, mapSizeY, sizeOfGrid)
waypoints = _agent.find('Waypoints')
#print(agent_id, agent_spd, agent_type, waypoints)
for _point in waypoints:
x = float(_point.attrib["x"])
y = float(_point.attrib["y"])
z = float(_point.attrib["z"])
if x > 100:
x = 100
if y > 100:
y = 100
if z > 100:
z = 100
#y = 100 - y
point = Point(x, y, z)
agent.addWayPoint(point)
agentList.append(agent)
return agentList
``` |
{
"source": "jiyoojeong/gamma-ai",
"score": 2
} |
#### File: gamma-ai/energylossidentification/EnergyLoss.py
```python
import ROOT
import array
import os
import sys
###################################################################################################
class EnergyLossIdentification:
"""
This class performs energy loss training. A typical usage would look like this:
AI = EnergyLossIdentification("Ling2.seq3.quality.root", "Results", "MLP,BDT", 1000000)
AI.train()
AI.test()
"""
###################################################################################################
def __init__(self, FileName, Output, Algorithm, MaxEvents):
"""
The default constructor for class EventClustering
Attributes
----------
FileName : string
Data file name (something like: X.maxhits2.eventclusterizer.root)
OutputPrefix: string
Output filename prefix as well as outout directory name
Algorithms: string
The algorithms used during training. Seperate multiples by commma (e.g. "MLP,DNNCPU")
MaxEvents: integer
The maximum amount of events to use
"""
self.FileName = FileName
self.OutputPrefix = Output
self.Algorithms = Algorithm
self.MaxEvents = MaxEvents
###################################################################################################
def train(self):
"""
Switch between the various machine-learning libraries based on self.Algorithm
"""
if self.Algorithms.startswith("TMVA:"):
self.trainTMVAMethods()
elif self.Algorithms.startswith("SKL:"):
self.trainSKLMethods()
elif self.Algorithms.startswith("TF:"):
self.trainTFMethods()
else:
print("ERROR: Unknown algorithm: {}".format(self.Algorithms))
return
###################################################################################################
def loadData(self):
"""
Prepare numpy array dataset for scikit-learn and tensorflow models
"""
import time
import numpy as np
from sklearn.model_selection import train_test_split
print("{}: retrieve from ROOT tree".format(time.time()))
# Open the file
DataFile = ROOT.TFile(self.FileName)
if DataFile.IsOpen() == False:
print("Error opening data file")
return False
# Get the data tree
DataTree = DataFile.Get("Quality")
if DataTree == 0:
print("Error reading data tree from root file")
return False
Branches = DataTree.GetListOfBranches()
VariableMap = {}
# Create a map of the branches, i.e. the columns
for B in list(Branches):
if B.GetName() == "EvaluationIsCompletelyAbsorbed":
VariableMap[B.GetName()] = array.array('i', [0])
else:
VariableMap[B.GetName()] = array.array('f', [0])
DataTree.SetBranchAddress(B.GetName(), VariableMap[B.GetName()])
# transform data into numpy array
total_data = min(self.MaxEvents, DataTree.GetEntries())
X_data = np.zeros((total_data, 40)) # space holder
if self.Algorithms.startswith("TF:"):
y_data = np.zeros((total_data, 2))
else:
y_data = np.zeros((total_data, 1))
all_features = list(VariableMap.keys())
all_features.remove("SequenceLength")
all_features.remove("SimulationID")
all_features.remove("EvaluationIsReconstructable")
all_features.remove("EvaluationZenithAngle")
all_features.remove("EvaluationIsCompletelyAbsorbed") #y
print("{}: start formatting array".format(time.time()))
for x in range(0, total_data):
if x%1000 == 0 and x > 0:
print("{}: Progress: {}/{}".format(time.time(), x, total_data))
DataTree.GetEntry(x) # Get row x
new_row=[VariableMap[feature][0] for feature in all_features]
X_data[x]=np.array(new_row)
if VariableMap["EvaluationIsCompletelyAbsorbed"][0] == 1:
target=1.0
else:
target=0.0
if self.Algorithms.startswith("TF:"):
y_data[x][0]= target
y_data[x][1]= 1-target
else:
y_data[x]= target
print("{}: finish formatting array".format(time.time()))
# Split training and testing data
X_train, X_test, y_train, y_test = train_test_split(X_data, y_data, test_size = 0.5, random_state = 0)
return X_train, X_test, y_train, y_test
def trainSKLMethods(self):
import time
import numpy as np
from sklearn import datasets
#from sklearn.cross_validation import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier
from sklearn.metrics import classification_report, roc_auc_score
from sklearn.metrics import classification_report,confusion_matrix
# load training and testing data
X_train, X_test, y_train, y_test = self.loadData()
# SVM
if self.Algorithms == "SKL:SVM":
print("Running support vector machine ... please stand by...")
from sklearn.svm import SVC
svclassifier = SVC(kernel='linear')
svclassifier.fit(X_train, y_train)
y_predicted = svclassifier.predict(X_test)
print(svclassifier)
# print("Training set score: %f" % mlp.score(X_train, y_train))
# print("Test set score: %f" % mlp.score(X_test, y_test))
print(confusion_matrix(y_test, y_predicted))
# Run the multi-layer perceptron
elif self.Algorithms == "SKL:MLP":
print("Running multi-layer perceptron ... please stand by...")
from sklearn.neural_network import MLPClassifier
mlp = MLPClassifier(solver='lbfgs', alpha=1e-5, activation='logistic', hidden_layer_sizes=(100, 50, 30), random_state=0)
# MLPClassifier supports only the Cross-Entropy loss function
# feature scaling
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
# Fit only to the training data
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
mlp.fit(X_train, y_train)
y_predicted=mlp.predict(X_test)
#[coef.shape for coef in mlp.coefs_]
print(mlp)
print("Training set score: %f" % mlp.score(X_train, y_train))
print("Test set score: %f" % mlp.score(X_test, y_test))
print(confusion_matrix(y_test,y_predicted))
#print(classification_report(y_test,predictions))
# Run the random forrest
elif self.Algorithms == "SKL:RF":
print("Running random forrest ... please stand by...")
rf=RandomForestClassifier(n_estimators=1400, criterion ='entropy', random_state=0,bootstrap=False, min_samples_leaf=0.01, max_features='sqrt', min_samples_split=5, max_depth=11)
rf.fit(X_train, y_train)
y_predicted = rf.predict(X_test)
# ADABoosting decision tree
elif self.Algorithms == "SKL:ADABDC":
print("Running ADABoost'ed decision tree ... please stand by...")
dt = DecisionTreeClassifier(max_depth=8, min_samples_leaf=0.01)
bdt = AdaBoostClassifier(dt,
algorithm='SAMME',
n_estimators=800,
learning_rate=0.1)
"""from sklearn.model_selection import GridSearchCV
parameters = {"max_depth":range(3,20),"min_samples_leaf":np.arange(0.01,0.5, 0.03)}
clf = GridSearchCV(DecisionTreeClassifier(), parameters, n_jobs=4)
clf.fit(X=X_data, y=y_data)
tree_model = clf.best_estimator_
print (clf.best_score_, clf.best_params_)
return"""
#cross_val_score(clf, iris.data, iris.target, cv=10)
# train
print("{}: start training".format(time.time()))
bdt.fit(X_train, y_train)
# test
print("{}: start testing".format(time.time()))
y_predicted = bdt.predict(X_test)
# parameter adjustments
# - learning rate
# - scaling? energy value is larger but only around 1k~10k times
else:
print("ERROR: Unknown algorithm: {}".format(self.Algorithms))
return
# evaluate (roc curve)
print(classification_report(y_test, y_predicted, target_names=["background", "signal"]))
print("Area under ROC curve: %.4f"%(roc_auc_score(y_test, y_predicted)))
###################################################################################################
def trainTFMethods(self):
import tensorflow as tf
import numpy as np
import matplotlib as mpl
mpl.use('TkAgg')
import matplotlib.pyplot as plt
import time
X_train, X_test, y_train, y_test = self.loadData()
# DATA SET PARAMETERS
# Get our dimensions for our different variables and placeholders:
numFeatures = X_train.shape[1]
# numLabels = number of classes we are predicting (here just 2: good or bad)
numLabels = y_train.shape[1]
if self.Algorithms == "TF:NN":
MaxIterations = 500
# Placeholders
InputDataSpaceSize=numFeatures
OutputDataSpaceSize=numLabels
print(" ... placeholders ...")
X = tf.placeholder(tf.float32, [None, InputDataSpaceSize], name="X")
Y = tf.placeholder(tf.float32, [None, OutputDataSpaceSize], name="Y")
# Layers: 1st hidden layer X1, 2nd hidden layer X2, etc.
print(" ... hidden layers ...")
H = tf.contrib.layers.fully_connected(X, 10) #, activation_fn=tf.nn.relu6, weights_initializer=tf.truncated_normal_initializer(0.0, 0.1), biases_initializer=tf.truncated_normal_initializer(0.0, 0.1))
H = tf.contrib.layers.fully_connected(H, 100) #, activation_fn=tf.nn.relu6, weights_initializer=tf.truncated_normal_initializer(0.0, 0.1), biases_initializer=tf.truncated_normal_initializer(0.0, 0.1))
H = tf.contrib.layers.fully_connected(H, 1000) #, activation_fn=tf.nn.relu6, weights_initializer=tf.truncated_normal_initializer(0.0, 0.1), biases_initializer=tf.truncated_normal_initializer(0.0, 0.1))
print(" ... output layer ...")
Output = tf.contrib.layers.fully_connected(H, OutputDataSpaceSize, activation_fn=None)
# Loss function
print(" ... loss function ...")
#LossFunction = tf.reduce_sum(np.abs(Output - Y)/TestBatchSize)
#LossFunction = tf.reduce_sum(tf.pow(Output - Y, 2))/TestBatchSize
LossFunction = tf.nn.l2_loss(Output-Y, name="squared_error_cost")
# Minimizer
print(" ... minimizer ...")
Trainer = tf.train.AdamOptimizer().minimize(LossFunction)
#Accuracy
# argmax(Y, 1) is the correct label
correct_predictions_OP = tf.equal(tf.argmax(Output,1),tf.argmax(Y,1))
accuracy_OP = tf.reduce_mean(tf.cast(correct_predictions_OP, "float"))
# Create and initialize the session
print(" ... session ...")
sess = tf.Session()
sess.run(tf.global_variables_initializer())
print(" ... writer ...")
writer = tf.summary.FileWriter("OUT_ToyModel2DGauss", sess.graph)
writer.close()
# Add ops to save and restore all the variables.
print(" ... saver ...")
Saver = tf.train.Saver()
###################################################################################################
# Step 3: Training and evaluating the network
###################################################################################################
print("Info: Training and evaluating the network")
# Train the network
#Timing = time.process_time()
TimesNoImprovement = 0
BestMeanSquaredError = sys.float_info.max
def CheckPerformance():
global TimesNoImprovement
global BestMeanSquaredError
MeanSquaredError = sess.run(tf.nn.l2_loss(Output - y_test), feed_dict={X: X_test})
print("Iteration {} - MSE of test data: {}".format(Iteration, MeanSquaredError))
print("final accuracy on test set: %s" %str(sess.run(accuracy_OP, feed_dict={X: X_test, Y: y_test})))
# Main training and evaluation loop
for Iteration in range(0, MaxIterations):
# Take care of Ctrl-C
#if Interrupted == True: break
# Train
sess.run(Trainer, feed_dict={X: X_train, Y: y_train})
# Check performance: Mean squared error
if Iteration > 0 and Iteration % 20 == 0:
CheckPerformance()
if TimesNoImprovement == 100:
print("No improvement for 30 rounds")
break;
# logistic regression
elif self.Algorithms == "TF:LR":
# TRAINING SESSION PARAMETERS
# number of times we iterate through training data
# tensorboard shows that accuracy plateaus at ~25k epochs
numEpochs = 2700
# a smarter learning rate for gradientOptimizer
learningRate = tf.train.exponential_decay(learning_rate=0.0008,
global_step= 1,
decay_steps=X_train.shape[0],
decay_rate= 0.95,
staircase=True)
# tensors: placeholders
# X = X-matrix / feature-matrix / data-matrix... It's a tensor to hold our
# data. 'None' here means that we can hold any number of emails
X = tf.placeholder(tf.float32, [None, numFeatures])
# yGold = Y-matrix / label-matrix / labels... This will be our correct answers matrix.
yGold = tf.placeholder(tf.float32, [None, numLabels])
# tensors: weights and bias term for regression
# Values are randomly sampled from a Gaussian with a standard deviation of:
# sqrt(6 / (numInputNodes + numOutputNodes + 1))
weights = tf.Variable(tf.random_normal([numFeatures,numLabels],
mean=0,
stddev=(np.sqrt(6/numFeatures+
numLabels+1)),
name="weights"))
bias = tf.Variable(tf.random_normal([1,numLabels],
mean=0,
stddev=(np.sqrt(6/numFeatures+numLabels+1)),
name="bias"))
######################
### PREDICTION OPS ###
######################
# INITIALIZE our weights and biases
init_OP = tf.global_variables_initializer()
# PREDICTION ALGORITHM i.e. FEEDFORWARD ALGORITHM
apply_weights_OP = tf.matmul(X, weights, name="apply_weights")
add_bias_OP = tf.add(apply_weights_OP, bias, name="add_bias")
activation_OP = tf.nn.sigmoid(add_bias_OP, name="activation")
#####################
### EVALUATION OP ###
#####################
# COST FUNCTION i.e. MEAN SQUARED ERROR
cost_OP = tf.nn.l2_loss(activation_OP-yGold, name="squared_error_cost")
#######################
### OPTIMIZATION OP ###
#######################
# OPTIMIZATION ALGORITHM i.e. GRADIENT DESCENT
training_OP = tf.train.GradientDescentOptimizer(learningRate).minimize(cost_OP)
# visualization
epoch_values=[]
accuracy_values=[]
cost_values=[]
# Turn on interactive plotting
plt.ion()
# Create the main, super plot
fig = plt.figure()
# Create two subplots on their own axes and give titles
ax1 = plt.subplot("211")
ax1.set_title("TRAINING ACCURACY", fontsize=18)
ax2 = plt.subplot("212")
ax2.set_title("TRAINING COST", fontsize=18)
plt.tight_layout()
#####################
### RUN THE GRAPH ###
#####################
# Create a tensorflow session
sess = tf.Session()
# Initialize all tensorflow variables
sess.run(init_OP)
## Ops for vizualization
# argmax(activation_OP, 1) gives the label our model thought was most likely
# argmax(yGold, 1) is the correct label
correct_predictions_OP = tf.equal(tf.argmax(activation_OP,1),tf.argmax(yGold,1))
# False is 0 and True is 1, what was our average?
accuracy_OP = tf.reduce_mean(tf.cast(correct_predictions_OP, "float"))
# Summary op for regression output
activation_summary_OP = tf.summary.histogram("output", activation_OP)
# Summary op for accuracy
accuracy_summary_OP = tf.summary.scalar("accuracy", accuracy_OP)
# Summary op for cost
cost_summary_OP = tf.summary.scalar("cost", cost_OP)
# Summary ops to check how variables (W, b) are updating after each iteration
weightSummary = tf.summary.histogram("weights", weights.eval(session=sess))
biasSummary = tf.summary.histogram("biases", bias.eval(session=sess))
# Merge all summaries
all_summary_OPS = tf.summary.merge_all()
# Summary writer
writer = tf.summary.FileWriter("summary_logs", sess.graph)
# Initialize reporting variables
cost = 0
diff = 1
# Training epochs
for i in range(numEpochs):
if i > 1 and diff < .0001:
print("change in cost %g; convergence."%diff)
break
else:
# Run training step
step = sess.run(training_OP, feed_dict={X: X_train, yGold: y_train})
# Report occasional stats
if i % 10 == 0:
# Add epoch to epoch_values
epoch_values.append(i)
# Generate accuracy stats on test data
summary_results, train_accuracy, newCost = sess.run(
[all_summary_OPS, accuracy_OP, cost_OP],
feed_dict={X: X_train, yGold: y_train}
)
# Add accuracy to live graphing variable
accuracy_values.append(train_accuracy)
# Add cost to live graphing variable
cost_values.append(newCost)
# Write summary stats to writer
writer.add_summary(summary_results, i)
# Re-assign values for variables
diff = abs(newCost - cost)
cost = newCost
#generate print statements
print("step %d, training accuracy %g"%(i, train_accuracy))
print("step %d, cost %g"%(i, newCost))
print("step %d, change in cost %g"%(i, diff))
# Plot progress to our two subplots
accuracyLine, = ax1.plot(epoch_values, accuracy_values)
costLine, = ax2.plot(epoch_values, cost_values)
fig.canvas.draw()
time.sleep(1)
# How well do we perform on held-out test data?
print("final accuracy on test set: %s" %str(sess.run(accuracy_OP,
feed_dict={X: X_test,
yGold: y_test})))
##############################
### SAVE TRAINED VARIABLES ###
##############################
# Create Saver
saver = tf.train.Saver()
# Save variables to .ckpt file
# saver.save(sess, "trained_variables.ckpt")
# Close tensorflow session
sess.close()
return
###################################################################################################
def trainTMVAMethods(self):
"""
Main training function
Returns
-------
bool
True is everything went well, False in case of an error
"""
# Open the file
DataFile = ROOT.TFile(self.FileName)
if DataFile.IsOpen() == False:
print("Error opening data file")
return False
# Get the data tree
DataTree = DataFile.Get("Quality")
if DataTree == 0:
print("Error reading data tree from root file")
return False
# Limit the number of events:
if DataTree.GetEntries() > self.MaxEvents:
print("Reducing source tree size from " + str(DataTree.GetEntries()) + " to " + str(self.MaxEvents) + " (i.e. the maximum set)")
NewTree = DataTree.CloneTree(0);
NewTree.SetDirectory(0);
for i in range(0, self.MaxEvents):
DataTree.GetEntry(i)
NewTree.Fill()
DataTree = NewTree;
# Initialize TMVA
ROOT.TMVA.Tools.Instance()
FullPrefix = self.OutputPrefix
ResultsFile = ROOT.TFile(FullPrefix + ".root", "RECREATE")
Factory = ROOT.TMVA.Factory("TMVAClassification", ResultsFile, "!V:!Silent:Color:DrawProgressBar:Transformations=I;D;P;G,D:AnalysisType=Classification")
DataLoader = ROOT.TMVA.DataLoader(self.OutputPrefix)
IgnoredBranches = [ 'SimulationID', 'SequenceLength']
Branches = DataTree.GetListOfBranches()
for Name in IgnoredBranches:
DataLoader.AddSpectator(Name, "F")
for B in list(Branches):
if not B.GetName() in IgnoredBranches:
if not B.GetName().startswith("Evaluation"):
DataLoader.AddVariable(B.GetName(), "F")
SignalCut = ROOT.TCut("EvaluationIsCompletelyAbsorbed >= 0.5")
BackgroundCut = ROOT.TCut("EvaluationIsCompletelyAbsorbed < 0.5")
DataLoader.SetInputTrees(DataTree, SignalCut, BackgroundCut)
DataLoader.PrepareTrainingAndTestTree(SignalCut, BackgroundCut, "nTrain_Signal=0:nTrain_Background=0:SplitMode=Random:NormMode=NumEvents:!V")
# Neural Networks
if 'MLP' in self.Algorithms:
method = Factory.BookMethod(DataLoader, ROOT.TMVA.Types.kMLP, "MLP", "H:!V:NeuronType=tanh:VarTransform=N:NCycles=100:HiddenLayers=N+10,N-5:TestRate=5:TrainingMethod=BFGS:!UseRegulator")
#method = Factory.BookMethod(DataLoader, ROOT.TMVA.Types.kMLP, "MLP", "H:!V:NeuronType=tanh:VarTransform=N:NCycles=100:HiddenLayers=N+10,N-5:TestRate=5:!UseRegulator")
#method = Factory.BookMethod(DataLoader, ROOT.TMVA.Types.kMLP, "MLP", "H:!V:NeuronType=tanh:VarTransform=N:NCycles=100:HiddenLayers=N+10,N-5:TestRate=6:TrainingMethod=BFGS:Sampling=0.3:SamplingEpoch=0.8:ConvergenceImprove=1e-6:ConvergenceTests=15:!UseRegulator")
# PDEFoamBoost
if 'PDEFoamBoost' in self.Algorithms:
method = Factory.BookMethod(DataLoader, ROOT.TMVA.Types.kPDEFoam, "PDEFoamBoost", "!H:!V:Boost_Num=100:Boost_Transform=linear:SigBgSeparate=F:MaxDepth=4:UseYesNoCell=T:DTLogic=MisClassificationError:FillFoamWithOrigWeights=F:TailCut=0:nActiveCells=2000:nBin=50:Nmin=200:Kernel=None:Compress=T")
#method = Factory.BookMethod(DataLoader, ROOT.TMVA.Types.kPDEFoam, "PDEFoamBoost", "!H:!V:Boost_Num=30:Boost_Transform=linear:SigBgSeparate=F:MaxDepth=4:UseYesNoCell=T:DTLogic=MisClassificationError:FillFoamWithOrigWeights=F:TailCut=0:nActiveCells=500:nBin=20:Nmin=400:Kernel=None:Compress=T")
# PDERSPCA
if 'PDERSPCA' in self.Algorithms:
method = Factory.BookMethod(DataLoader, ROOT.TMVA.Types.kPDERS, "PDERSPCA", "!H:!V:VolumeRangeMode=Adaptive:KernelEstimator=Gauss:GaussSigma=0.3:NEventsMin=400:NEventsMax=600:VarTransform=PCA")
# Random Forest Boosted Decision Trees
if 'BDT' in self.Algorithms:
method = Factory.BookMethod(DataLoader, ROOT.TMVA.Types.kBDT, "BDT", "!H:!V:NTrees=1000:MinNodeSize=1%:MaxDepth=3:BoostType=AdaBoost:AdaBoostBeta=0.4:SeparationType=CrossEntropy:nCuts=100:PruneMethod=NoPruning")
#method = Factory.BookMethod(DataLoader, ROOT.TMVA.Types.kBDT, "BDT", "!H:!V:NTrees=850:nEventsMin=150:MaxDepth=3:BoostType=AdaBoost:AdaBoostBeta=0.5:SeparationType=GiniIndex:nCuts=20:PruneMethod=NoPruning")
#method = Factory.BookMethod(DataLoader, ROOT.TMVA.Types.kBDT, "BDT", "!H:!V:NTrees=1000:nEventsMin=1000:MaxDepth=4:BoostType=AdaBoost:AdaBoostBeta=0.5:SeparationType=GiniIndex:nCuts=20:PruneMethod=NoPruning")
# State Vector Machine
if 'SVM' in self.Algorithms:
method = Factory.BookMethod(DataLoader, ROOT.TMVA.Types.kSVM, "SVM", "Gamma=0.25:Tol=0.001:VarTransform=Norm");
# DNN
if 'DNN_CPU' in self.Algorithms:
Layout = "Layout=TANH|N,TANH|N/2,LINEAR"
Training0 = "LearningRate=1e-1,Momentum=0.9,Repetitions=1,ConvergenceSteps=30,BatchSize=256,TestRepetitions=10,WeightDecay=1e-4,Regularization=L2,DropConfig=0.0+0.5+0.5+0.5,Multithreading=True"
Training1 = "LearningRate=1e-2,Momentum=0.9,Repetitions=1,ConvergenceSteps=30,BatchSize=256,TestRepetitions=10,WeightDecay=1e-4,Regularization=L2,DropConfig=0.0+0.0+0.0+0.0,Multithreading=True"
Training2 = "LearningRate=1e-3,Momentum=0.0,Repetitions=1,ConvergenceSteps=30,BatchSize=256,TestRepetitions=10,WeightDecay=1e-4,Regularization=L2,DropConfig=0.0+0.0+0.0+0.0,Multithreading=True"
TrainingStrategy = "TrainingStrategy=" + Training0 + "|" + Training1 + "|" + Training2
Options = "!H:V:ErrorStrategy=CROSSENTROPY:VarTransform=N:WeightInitialization=XAVIERUNIFORM:" + Layout + ":" + TrainingStrategy
Options += ":Architecture=CPU"
Factory.BookMethod(DataLoader, ROOT.TMVA.Types.kDNN, "DNN_CPU", Options)
# DNN
if 'DNN_GPU' in self.Algorithms:
Layout = "Layout=TANH|N,TANH|N/2,LINEAR"
Training0 = "LearningRate=1e-1,Momentum=0.9,Repetitions=1,ConvergenceSteps=100,BatchSize=256,TestRepetitions=10,WeightDecay=1e-4,Regularization=L2,DropConfig=0.0+0.5+0.5+0.5,Multithreading=True"
Training1 = "LearningRate=1e-2,Momentum=0.9,Repetitions=1,ConvergenceSteps=100,BatchSize=256,TestRepetitions=10,WeightDecay=1e-4,Regularization=L2,DropConfig=0.0+0.0+0.0+0.0,Multithreading=True"
Training2 = "LearningRate=1e-3,Momentum=0.0,Repetitions=1,ConvergenceSteps=100,BatchSize=256,TestRepetitions=10,WeightDecay=1e-4,Regularization=L2,DropConfig=0.0+0.0+0.0+0.0,Multithreading=True"
TrainingStrategy = "TrainingStrategy=" + Training0 + "|" + Training1 + "|" + Training2
Options = "!H:V:ErrorStrategy=CROSSENTROPY:VarTransform=N:WeightInitialization=XAVIERUNIFORM:" + Layout + ":" + TrainingStrategy
Options += ":Architecture=GPU"
Factory.BookMethod(DataLoader, ROOT.TMVA.Types.kDNN, "DNN_GPU", Options)
# Finally test, train & evaluate all methods
print("Started training")
Factory.TrainAllMethods()
Factory.TestAllMethods()
Factory.EvaluateAllMethods()
return True
###################################################################################################
def test(self):
"""
Main test function
Returns
-------
bool
True is everything went well, False in case of an error
"""
return True
# END
###################################################################################################
``` |
{
"source": "Jiyooung/ALGORITHM",
"score": 3
} |
#### File: programmers/Level 1/JY_Recommend_new_ID.py
```python
def solution(new_id):
new = list(new_id)
spectial = ['-', '_', '.']
point = -1
for idx, n in enumerate(new[:]):
# 1단계 new_id의 모든 대문자를 대응되는 소문자로 치환합니다.
if ord(n) >= 65 and ord(n) <= 90:
new[idx] = chr(ord(n) + 32)
# 2단계 new_id에서 알파벳 소문자, 숫자, 빼기(-), 밑줄(_), 마침표(.)를 제외한 모든 문자를 제거합니다.
elif not((ord(n) >= 97 and ord(n) <= 122) or (ord(n)>=48 and ord(n) <= 57) or n in spectial):
new[idx] = ''
# 3단계 new_id에서 마침표(.)가 2번 이상 연속된 부분을 하나의 마침표(.)로 치환합니다.
if new[idx] == '.':
if point != idx-1:
point = idx
else:
new[idx] = ''
point += 1
if new[idx] == '' and point == idx-1:
point += 1
new = list(''.join(new)) # '' 제거
# 4단계 new_id에서 마침표(.)가 처음이나 끝에 위치한다면 제거합니다.
if len(new) > 0:
for idx in range(-1,1):
if new[idx] == '.':
new[idx] = ''
new = ''.join(new)
# 5단계 new_id가 빈 문자열이라면, new_id에 "a"를 대입합니다.
if len(new) == 0:
new += 'a'
# 6단계 new_id의 길이가 16자 이상이면, new_id의 첫 15개의 문자를 제외한 나머지 문자들을 모두 제거합니다.
# 만약 제거 후 마침표(.)가 new_id의 끝에 위치한다면 끝에 위치한 마침표(.) 문자를 제거합니다.
if len(new) >= 16:
if new[14] == '.':
new = new[:14]
else:
new = new[:15]
# 7단계 new_id의 길이가 2자 이하라면, new_id의 마지막 문자를 new_id의 길이가 3이 될 때까지 반복해서 끝에 붙입니다.
if len(new) <= 2:
last = new[-1]
while len(new) < 3:
new += last
return new
# run test
print(solution("...!@BaT#*..y.abcdefghijklm"), "bat.y.abcdefghi")
print(solution("z-+.^."), "z--")
print(solution("=.="), "aaa")
print(solution("123_.def"), "123_.def")
print(solution("abcdefghijklmn.p"), "abcdefghijklmn")
```
#### File: programmers/Level 2/JY_Find_prime_numbers.py
```python
import itertools
def find(num): # num이 소수이면 True 반환
i = 2
while i*i <= num:
if num % i == 0:
return False
i += 1
return True
def solution(numbers):
answer = 0
number = []
for i in range(1, len(numbers)+1):
number += map(int, map(''.join, itertools.permutations(numbers, i)))
number = set(number)
for n in number:
if n != 0 and n != 1 and find(n) == True:
answer += 1
return answer
# run test
print(solution("17")) #3
```
#### File: programmers/Level 2/JY_lifeboat_1.py
```python
def solution(people, limit):
answer = 0
check = 0
start = 0
end = -1
people.sort(reverse=True)
while check < len(people):
if people[start] + people[end] <= limit: # 같이 갈 사람 구함
check += 1
end -= 1
check += 1
start += 1
answer += 1
return answer
# run test
# people = [70,50,80,50]
people = [70, 80, 50]
limit = 100
print(solution(people, limit))
```
#### File: programmers/Level 2/JY_More_spicy_2.py
```python
import heapq
def solution(scoville, K):
answer = 0
pq = []
for s in scoville:
heapq.heappush(pq, s)
while pq[0] < K:
if len(pq) == 1:
answer = -1
break
min1 = heapq.heappop(pq)
min2 = heapq.heappop(pq)
heapq.heappush(pq, min1 + (min2 * 2))
answer += 1
return answer
# run test
print(solution([2, 3, 1, 9, 10, 12], 7)) # 2
print(solution([0, 0], 7)) # -1
print(solution([1, 1, 1, 1, 1, 1], 10)) # -1
```
#### File: programmers/Level 2/JY_Rank_search_2.py
```python
import collections
import bisect
store = collections.defaultdict(list)
def div(st): # st의 모든 경우의 수 구하기(16개) > store에 점수 추가
l, e, n, f, s = st.split(' ')
for l1 in [l, '-']:
for e1 in [e, '-']:
for n1 in [n, '-']:
for f1 in [f, '-']:
store[l1+' '+e1+' '+n1+' '+f1].append(int(s))
def solution(info, query):
answer = []
for st in info:
div(st)
for s in store: # 점수 정렬
store[s].sort()
for q in query:
l, _, e, _, n, _, f, s = q.split(' ')
st = l+' '+e+' '+n+' '+f
idx = bisect.bisect_left(store[st],int(s)) # 점수 위치 이분탐색으로 찾기
answer.append(len(store[st])-idx) # 점수 이상인 지원자 수 저장
return answer
# run test
print(solution(["java backend junior pizza 150","python frontend senior chicken 210","python frontend senior chicken 150","cpp backend senior pizza 260","java backend junior chicken 80","python backend senior chicken 50"], ["java and backend and junior and pizza 100","python and frontend and senior and chicken 200","cpp and - and senior and pizza 250","- and backend and senior and - 150","- and - and - and chicken 100","- and - and - and - 150"]), [1,1,1,1,2,4])
# print(solution())
```
#### File: programmers/Level 2/JY_Truck_passing_bridge.py
```python
import collections
def solution(bridge_length, weight, truck_weights):
answer = bridge_length
que = collections.deque()
que.append(truck_weights[0])
w = truck_weights[0]
idx = 1
while len(que) > 0:
if idx < len(truck_weights) and len(que) < bridge_length: # 트럭 추가 가능 조건1
if weight >= w + truck_weights[idx]: # 무게 조건2
que.append(truck_weights[idx])
w += truck_weights[idx]
idx += 1
else:
que.append(0)
if len(que) == bridge_length or idx >= len(truck_weights): # 트럭 추가 불가
p = que.popleft()
if p != 0: # 트럭 빠짐. 무게 감소
w -= p
answer += 1
return answer
# run test
print(solution(2, 10, [7,4,5,6])) # 8
print(solution(100, 100, [10])) # 101
print(solution(100, 100, [10,10,10,10,10,10,10,10,10,10])) # 110
print(solution(5, 5, [2, 2, 2, 2, 1, 1, 1, 1, 1])) # 19
```
#### File: programmers/Level 3/JY_enforcement_camera.py
```python
def solution(routes):
answer = 1
routes.sort()
check = routes[0]
for r in routes[1:]:
if check[1] >= r[0]: # 겹치는 구간 구하기
check[0], check[1] = r[0], min(check[1], r[1])
else: # 겹치지 않음
answer += 1
check = r
return answer
# run test
print(solution([[-20,15], [-14,-5], [-18,-13], [-5,-3]])) # 2
# print(solution([[-20,-15], [-10,0], [5, 10]])) # 3
```
#### File: programmers/Level 4/JY_thievery.py
```python
def solution(money):
dp = [[0]*(len(money) + 1) for _ in range(2)]
dp[0][0], dp[0][1], dp[0][2] = 0, money[0], money[0] # 1번째 집부터 털었을 경우
for idx in range(3, len(money)):
dp[0][idx] = max(dp[0][idx-2] + money[idx-1], dp[0][idx-1])
dp[1][0], dp[1][1], dp[1][2] = 0, 0, money[1] # 2번째 집부터 털었을 경우
for idx in range(3, len(money) + 1):
dp[1][idx] = max(dp[1][idx-2] + money[idx-1], dp[1][idx-1])
return max(dp[0][-2], dp[1][-1])
# run test
print(solution([1, 2, 3, 1]), 4)
``` |
{
"source": "Jiyooung/TensorFlow_Certificate",
"score": 3
} |
#### File: 2. Convolutional Neural Networks in TensorFlow/Week 2/Exercise_2_Cats_vs_Dogs_using_augmentation_Question-FINAL.py
```python
import os
import zipfile
import random
import shutil
import tensorflow as tf
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from shutil import copyfile
from os import getcwd
# This code block unzips the full Cats-v-Dogs dataset to /tmp
# which will create a tmp/PetImages directory containing subdirectories
# called 'Cat' and 'Dog' (that's how the original researchers structured it)
# # 파일 압축 해제, 처음 한 번만 수행
# path_cats_and_dogs = f"{getcwd()}/../cats-and-dogs.zip"
# # shutil.rmtree('/tmp') # '/tmp' 폴더 삭제되니 주의!
#
# local_zip = path_cats_and_dogs
# zip_ref = zipfile.ZipFile(local_zip, 'r')
# zip_ref.extractall('/tmp')
# zip_ref.close()
print(len(os.listdir('/tmp/PetImages/Cat/')))
print(len(os.listdir('/tmp/PetImages/Dog/')))
# Expected Output:
# 12501
# 12501
# Use os.mkdir to create your directories
# You will need a directory for cats-v-dogs, and subdirectories for training
# and testing. These in turn will need subdirectories for 'cats' and 'dogs'
try:
# YOUR CODE GOES HERE
shutil.rmtree('/tmp/cats-v-dogs')
os.mkdir('/tmp/cats-v-dogs');
os.mkdir('/tmp/cats-v-dogs/training');
os.mkdir('/tmp/cats-v-dogs/training/cats');
os.mkdir('/tmp/cats-v-dogs/training/dogs');
os.mkdir('/tmp/cats-v-dogs/testing');
os.mkdir('/tmp/cats-v-dogs/testing/cats');
os.mkdir('/tmp/cats-v-dogs/testing/dogs');
except OSError:
pass
# Write a python function called split_data which takes
# a SOURCE directory containing the files
# a TRAINING directory that a portion of the files will be copied to
# a TESTING directory that a portion of the files will be copie to
# a SPLIT SIZE to determine the portion
# The files should also be randomized, so that the training set is a random
# X% of the files, and the test set is the remaining files
# SO, for example, if SOURCE is PetImages/Cat, and SPLIT SIZE is .9
# Then 90% of the images in PetImages/Cat will be copied to the TRAINING dir
# and 10% of the images will be copied to the TESTING dir
# Also -- All images should be checked, and if they have a zero file length,
# they will not be copied over
#
# os.listdir(DIRECTORY) gives you a listing of the contents of that directory
# os.path.getsize(PATH) gives you the size of the file
# copyfile(source, destination) copies a file from source to destination
# random.sample(list, len(list)) shuffles a list
def split_data(SOURCE, TRAINING, TESTING, SPLIT_SIZE):
# YOUR CODE STARTS HERE
source = os.listdir(SOURCE)
dataset = []
source_size = 1500
random_source = random.sample(source, source_size)
for file in random_source:
if (os.path.getsize(SOURCE + file) > 0):
dataset.append(file)
random_data = random.sample(dataset, len(dataset))
training_size = (int)(len(dataset) * SPLIT_SIZE)
train_set = random_data[:training_size]
test_set = random_data[training_size:]
for file in train_set:
copyfile(SOURCE + file, TRAINING + file)
for file in test_set:
copyfile(SOURCE + file, TESTING + file)
# YOUR CODE ENDS HERE
CAT_SOURCE_DIR = "/tmp/PetImages/Cat/"
TRAINING_CATS_DIR = "/tmp/cats-v-dogs/training/cats/"
TESTING_CATS_DIR = "/tmp/cats-v-dogs/testing/cats/"
DOG_SOURCE_DIR = "/tmp/PetImages/Dog/"
TRAINING_DOGS_DIR = "/tmp/cats-v-dogs/training/dogs/"
TESTING_DOGS_DIR = "/tmp/cats-v-dogs/testing/dogs/"
split_size = .9
split_data(CAT_SOURCE_DIR, TRAINING_CATS_DIR, TESTING_CATS_DIR, split_size)
split_data(DOG_SOURCE_DIR, TRAINING_DOGS_DIR, TESTING_DOGS_DIR, split_size)
print(len(os.listdir('/tmp/cats-v-dogs/training/cats/')))
print(len(os.listdir('/tmp/cats-v-dogs/training/dogs/')))
print(len(os.listdir('/tmp/cats-v-dogs/testing/cats/')))
print(len(os.listdir('/tmp/cats-v-dogs/testing/dogs/')))
# 파일 사이즈가 0인 파일은 빼고 함으로 오차 발생 가능
# Expected output:
# 1350
# 1350
# 150
# 150
# DEFINE A KERAS MODEL TO CLASSIFY CATS V DOGS
# USE AT LEAST 3 CONVOLUTION LAYERS
model = tf.keras.models.Sequential([
# YOUR CODE HERE
tf.keras.layers.Conv2D(16, (3,3), activation='relu', input_shape=(150,150,3)),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(32, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(32, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(optimizer=RMSprop(lr=0.001), loss='binary_crossentropy', metrics=['acc'])
TRAINING_DIR = '/tmp/cats-v-dogs/training'#YOUR CODE HERE
train_datagen = ImageDataGenerator(#YOUR CODE HERE
rescale=1./255,
zoom_range=0.2
)
# NOTE: YOU MUST USE A BATCH SIZE OF 10 (batch_size=10) FOR THE
# TRAIN GENERATOR.
train_generator = train_datagen.flow_from_directory (#YOUR CODE HERE
TRAINING_DIR,
target_size=(150,150),
batch_size=10,
class_mode='binary'
)
VALIDATION_DIR = '/tmp/cats-v-dogs/testing'#YOUR CODE HERE
validation_datagen = ImageDataGenerator(#YOUR CODE HERE
rescale=1./255,
zoom_range=0.2
)
# NOTE: YOU MUST USE A BACTH SIZE OF 10 (batch_size=10) FOR THE
# VALIDATION GENERATOR.
validation_generator = validation_datagen.flow_from_directory(#YOUR CODE HERE
VALIDATION_DIR,
target_size=(150,150),
batch_size=10,
class_mode='binary'
)
# Expected Output:
# Found 2700 images belonging to 2 classes.
# Found 300 images belonging to 2 classes.
history = model.fit_generator(train_generator,
epochs=2,
verbose=1,
validation_data=validation_generator)
# PLOT LOSS AND ACCURACY
# %matplotlib inline
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
#-----------------------------------------------------------
# Retrieve a list of list results on training and test data
# sets for each training epoch
#-----------------------------------------------------------
acc=history.history['acc']
val_acc=history.history['val_acc']
loss=history.history['loss']
val_loss=history.history['val_loss']
epochs=range(len(acc)) # Get number of epochs
#------------------------------------------------
# Plot training and validation accuracy per epoch
#------------------------------------------------
plt.plot(epochs, acc, 'r', "Training Accuracy")
plt.plot(epochs, val_acc, 'b', "Validation Accuracy")
plt.title('Training and validation accuracy')
plt.figure()
#------------------------------------------------
# Plot training and validation loss per epoch
#------------------------------------------------
plt.plot(epochs, loss, 'r', "Training Loss")
plt.plot(epochs, val_loss, 'b', "Validation Loss")
plt.title('Training and validation loss')
# Desired output. Charts with training and validation metrics. No crash :)
``` |
{
"source": "JiYou/libfacedetection",
"score": 3
} |
#### File: opencv_dnn/python/detect.py
```python
import os
import sys
import argparse
from typing import Tuple
import cv2
import numpy as np
from priorbox import PriorBox
from utils import draw
backends = (cv2.dnn.DNN_BACKEND_DEFAULT,
cv2.dnn.DNN_BACKEND_HALIDE,
cv2.dnn.DNN_BACKEND_INFERENCE_ENGINE,
cv2.dnn.DNN_BACKEND_OPENCV)
targets = (cv2.dnn.DNN_TARGET_CPU,
cv2.dnn.DNN_TARGET_OPENCL,
cv2.dnn.DNN_TARGET_OPENCL_FP16,
cv2.dnn.DNN_TARGET_MYRIAD)
def str2bool(v: str) -> bool:
if v.lower() in ['true', 'yes', 'on', 'y', 't']:
return True
elif v.lower() in ['false', 'no', 'off', 'n', 'f']:
return False
else:
raise NotImplementedError
parser = argparse.ArgumentParser(description='A demo for running libfacedetection using OpenCV\'s DNN module.')
# OpenCV DNN
parser.add_argument('--backend', choices=backends, default=cv2.dnn.DNN_BACKEND_DEFAULT, type=int,
help="Choose one of computation backends: "
"%d: automatically (by default), "
"%d: Halide language (http://halide-lang.org/), "
"%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
"%d: OpenCV implementation" % backends)
parser.add_argument('--target', choices=targets, default=cv2.dnn.DNN_TARGET_CPU, type=int,
help='Choose one of target computation devices: '
'%d: CPU target (by default), '
'%d: OpenCL, '
'%d: OpenCL fp16 (half-float precision), '
'%d: VPU' % targets)
# Location
parser.add_argument('--image', help='Path to the image.')
parser.add_argument('--model', type=str, help='Path to .onnx model file.')
# Inference
parser.add_argument('--conf_thresh', default=0.6, type=float, help='Threshold for filtering out faces with conf < conf_thresh.')
parser.add_argument('--nms_thresh', default=0.3, type=float, help='Threshold for non-max suppression.')
parser.add_argument('--keep_top_k', default=750, type=int, help='Keep keep_top_k for results outputing.')
# Result
parser.add_argument('--vis', default=True, type=str2bool, help='Set True to visualize the result image.')
parser.add_argument('--save', default='result.jpg', type=str, help='Path to save the result image.')
args = parser.parse_args()
# Build the blob
assert os.path.exists(args.image), 'File {} does not exist!'.format(args.image)
img = cv2.imread(args.image, cv2.IMREAD_COLOR)
h, w, _ = img.shape
print('Image size: h={}, w={}'.format(h, w))
blob = cv2.dnn.blobFromImage(img) # 'size' param resize the output to the given shape
# Load the net
net = cv2.dnn.readNet(args.model)
net.setPreferableBackend(args.backend)
net.setPreferableTarget(args.target)
# Run the net
output_names = ['loc', 'conf', 'iou']
net.setInput(blob)
loc, conf, iou = net.forward(output_names)
# Decode bboxes and landmarks
pb = PriorBox(input_shape=(w, h), output_shape=(w, h))
dets = pb.decode(loc, conf, iou, args.conf_thresh)
# NMS
if dets.shape[0] > 0:
# NMS from OpenCV
keep_idx = cv2.dnn.NMSBoxes(
bboxes=dets[:, 0:4].tolist(),
scores=dets[:, -1].tolist(),
score_threshold=args.conf_thresh,
nms_threshold=args.nms_thresh,
eta=1,
top_k=args.keep_top_k) # returns [box_num, class_num]
keep_idx = np.squeeze(keep_idx, axis=1) # [box_num, class_num] -> [box_num]
dets = dets[keep_idx]
print('Detection results: {} faces found'.format(dets.shape[0]))
for d in dets:
print('[{x1:.1f}, {y1:.1f}] [{x2:.1f}, {y2:.1f}] {score:.2f}'.format(
x1=d[0], y1=d[1], x2=d[2], y2=d[3], score=d[-1]))
else:
print('No faces found.')
exit()
# Draw boudning boxes and landmarks on the original image
img_res = draw(
img=img,
bboxes=dets[:, :4],
landmarks=np.reshape(dets[:, 4:14], (-1, 5, 2)),
scores=dets[:, -1]
)
if args.vis:
cv2.imshow('Detection Results on {}'.format(args.image), img_res)
cv2.resizeWindow(args.image, w, h)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Save the result image
cv2.imwrite(args.save, img_res)
``` |
{
"source": "jiyounglee-0523/FourierModel",
"score": 3
} |
#### File: FourierModel/models/baseline_models.py
```python
import torch
import torch.nn as nn
from torchdiffeq import odeint
# RNN Decoder
class GRUDecoder(nn.Module):
def __init__(self, args):
super(GRUDecoder, self).__init__()
self.decoder_layers = args.decoder_layers
self.decoder_hidden_dim = args.decoder_hidden_dim
self.input_embedding = nn.Linear(1, 128)
self.init_hidden_embedding = nn.Linear(args.latent_dimension+args.num_label, args.decoder_hidden_dim)
self.GRU = nn.GRU(input_size=128, hidden_size=args.decoder_hidden_dim, num_layers=args.decoder_layers, batch_first=True, dropout=args.dropout)
self.output_fc = nn.Linear(args.decoder_hidden_dim, 1)
def forward(self, target_x, memory, x):
# target_x = (B, S, 1), memory = (B, E), x = (B, S, 1)
B = target_x.size(0)
x = torch.cat((torch.ones(B, 1, 1).cuda(), x), dim=1) # (B, S+1, 1)
x = self.input_embedding(x) # (B, E)
memory = self.init_hidden_embedding(memory)
memory = torch.broadcast_to(memory.unsqueeze(0), (self.decoder_layers, B, self.decoder_hidden_dim)) # (num_layers, B, hidden)
memory = memory.contiguous()
output, _ = self.GRU(x, memory)
output = self.output_fc(output).squeeze(-1) # (B, S+1, 1)
return output[:, :-1]
def auto_regressive(self, target_x, memory):
# target_x = (B, S, 1) z = (B, E)
B, S, _ = target_x.size()
xx = self.input_embedding(torch.ones(B, 1, 1).cuda())
memory = self.init_hidden_embedding(memory)
memory = torch.broadcast_to(memory.unsqueeze(0), (self.decoder_layers, B, self.decoder_hidden_dim))
memory = memory.contiguous()
outputs = []
for i in range(500):
output, _ = self.GRU(xx, memory)
output = self.output_fc(output)[:, -1, :]
outputs.append(output)
xx = torch.cat((xx, self.input_embedding(output).unsqueeze(1)), dim=1)
outputs = torch.stack(outputs)
outputs = outputs.permute(1, 0, 2)
return outputs
# NP
class NeuralProcess(nn.Module):
def __init__(self, args):
super(NeuralProcess, self).__init__()
layers = []
layers.append(nn.Linear(args.latent_dimension + args.num_label+ 1, 2 * args.latent_dimension))
# layers.append(nn.Linear(args.latent_dimension + 1, 2*args.latent_dimension))
layers.append(nn.SiLU())
for _ in range(args.decoder_layers):
layers.append(nn.Linear(2*args.latent_dimension, 2*args.latent_dimension))
layers.append(nn.SiLU())
layers.append(nn.Linear(2*args.latent_dimension, 1))
self.model = nn.Sequential(*layers)
def forward(self, target_x, z, x):
# target_x = (B, S, 1) z = (B, E) index (B, N)
B, S, _ = target_x.size()
E = z.size(-1)
memory = torch.broadcast_to(z.unsqueeze(1), (B, S, E))
target_x = torch.cat((memory, target_x), dim=-1) # (B, S, E+1)
output = self.model(target_x).squeeze(-1) # (B, S, 1)
return output
class ODEFunc(nn.Module):
def __init__(self, latent_dimension, decoder_layers):
super(ODEFunc, self).__init__()
layers = []
for _ in range(decoder_layers):
layers.append(nn.Linear(2 * latent_dimension, 2 * latent_dimension))
layers.append(nn.SiLU())
self.net = nn.Sequential(*layers)
def forward(self, t, x):
return self.net(x)
class ODEDecoder(nn.Module):
def __init__(self, args):
super(ODEDecoder, self).__init__()
self.fc1 = nn.Linear(args.latent_dimension + args.num_label, 2 * args.latent_dimension)
self.odenet = ODEFunc(args.latent_dimension, args.decoder_layers)
self.fc2 = nn.Linear(2*args.latent_dimension, 1)
def forward(self, target_x, z, x):
# target_x = (B, S, 1) z = (B, E)
z = self.fc1(z)
pred_y = odeint(self.odenet, z, target_x[0].squeeze(-1), method='rk4')
pred_y = self.fc2(pred_y).permute(1, 0, 2).squeeze(-1)
return pred_y
class TransformerDecoder(nn.Module):
def __init__(self, args):
super(TransformerDecoder, self).__init__()
self.dropout = nn.Dropout(p=args.dropout)
self.embedding = nn.Linear(1, 128, bias=False)
self.label_embedding = nn.Linear(args.num_label + args.latent_dimension, 128, bias=False)
# model
self.pos_embedding = nn.Linear(1, 128)
decoder_layer = nn.TransformerEncoderLayer(d_model=128, nhead=4, dim_feedforward=args.decoder_hidden_dim,
dropout = args.dropout)
self.model = nn.TransformerEncoder(decoder_layer, num_layers=args.decoder_layers)
self.output_fc = nn.Linear(128, 1, bias=False)
def forward(self, target_x, r, x):
# x (B, S, 1) target_x (B, S, 1) r (B, E)
B = r.size(0)
r = self.label_embedding(r).unsqueeze(1) # (B, 1, E)
x = self.embedding(x) # (B, S, E)
x = torch.cat((r, x), dim=1) # (B, S+1, E)
target_x = torch.cat((torch.zeros(B, 1, 1).cuda(), target_x), dim=1) # (B, S+1, 1)
target_x = self.pos_embedding(target_x) # (B, S+1, E)
x = x + target_x # (B, S+1, E)
x = self.dropout(x)
x = x.permute(1, 0, 2) # (S+1, B, E)
mask = self.generate_square_subsequent_mask(x.size(0))
output = self.model(src=x, mask=mask).permute(1, 0, 2) # (B, S+1, E)
output = self.output_fc(output).squeeze(-1)
return output[:, :-1]
def generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones((sz, sz))) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask.cuda()
def auto_regressive(self, r, target_x):
# r (B, E) target_x (B, S, 1)
with torch.no_grad():
B = r.size(0)
S = target_x.size(1)
r = self.label_embedding(r).unsqueeze(1) # (B, 1, E)
target_x = torch.cat((torch.zeros(B, 1, 1).cuda(), target_x), dim=1) # (B, S+1, 1)
dec_x = r.permute(1, 0, 2) # (1, B, E)
outputs = []
for i in range(S):
mask = self.generate_square_subsequent_mask(dec_x.size(0))
dec_span = self.pos_embedding(target_x[:, :i+1, :]).permute(1, 0, 2) # (i, B, E)
x = dec_x + dec_span
output = self.model(src=x, mask=mask) # (i, B, E)
output = self.output_fc(output)[-1] # (B, 1)
outputs.append(output)
dec_x = torch.cat((dec_x, self.embedding(output.unsqueeze(0))), dim=0)
return outputs
```
#### File: FourierModel/trainer/ConditionalTrainer.py
```python
import torch
import os
import wandb
import time
from datetime import datetime
from datasets.cond_dataset import get_dataloader
from models.latentmodel import ConditionalQueryFNP
from utils.model_utils import count_parameters, EarlyStopping
from utils.trainer_utils import log
class ConditionalBaseTrainer():
def __init__(self, args):
self.train_dataloader = get_dataloader(args, 'train')
self.eval_dataloader = get_dataloader(args, 'eval')
self.n_epochs = args.n_epochs
self.debug = args.debug
self.dataset_type = args.dataset_type
self.n_harmonics = args.n_harmonics
NP = 'NP' if args.NP else 'nonNP'
filename = f'{datetime.now().date()}_{args.dataset_type}_{args.dataset_name}_{NP}_{args.lower_bound}_{args.upper_bound}_{args.encoder}_{args.encoder_blocks}_{args.encoder_hidden_dim}_decoder_{args.decoder}_{args.decoder_layers}'
args.filename = filename
self.path = args.path + filename
self.file_path = self.path + '/' + filename
print(f'Model will be saved at {self.path}')
os.mkdir(self.path)
self.logger = log(path=self.path + '/', file=filename + '.logs')
class ConditionalNPTrainer(ConditionalBaseTrainer):
def __init__(self, args):
super(ConditionalNPTrainer, self).__init__(args)
self.model = ConditionalQueryFNP(args).cuda()
self.optimizer = torch.optim.AdamW(self.model.parameters(), lr=args.lr)
self.alpha = 1
self.max_num = 0
if not self.debug:
wandb.init(project='FourierDecoder', config=args)
self.logger.info(f'Number of parameters: {count_parameters(self.model)}')
self.logger.info(f'Wandb Project Name: {args.dataset_type+args.dataset_name}'
print(f'Number of parameters: {count_parameters(self.model)}')
def train(self):
best_mse = float('inf')
for n_epoch in range(self.n_epochs):
starttime = time.time()
for it, sample in enumerate(self.train_dataloader):
self.model.train()
self.optimizer.zero_grad(set_to_none=True)
samp_sin = sample['sin'].cuda() # B, S, 1
label = sample['label'].squeeze(-1).cuda() # B
orig_ts = sample['orig_ts'].cuda() # B, S
index = sample['index'].cuda() # B, N
mse_loss, kl_loss = self.model(orig_ts, samp_sin, label, index)
loss = mse_loss + self.alpha * kl_loss
# loss = mse_loss
loss.backward()
self.optimizer.step()
if not self.debug:
wandb.log({'train_loss': loss,
'train_kl_loss': kl_loss,
'train_mse_loss': mse_loss,
'epoch': n_epoch,
'alpha': self.alpha})
self.logger.info(f'[Train Loss]: {loss:.4f} [Train MSE]: {mse_loss:.4f} [Train KL]: {kl_loss:.4f}')
else:
print(f'[Train Loss]: {loss:.4f} [Train MSE]: {mse_loss:.4f} [Train KL]: {kl_loss:.4f}')
endtime = time.time()
if not self.debug:
self.logger.info(f'[Time] : {endtime-starttime}')
else:
print(f'[Time] : {endtime-starttime}')
eval_loss, eval_mse, eval_kl = self.evaluation()
if not self.debug:
wandb.log({'eval_loss': eval_loss,
'eval_mse': eval_mse,
'eval_kl': eval_kl,
'epoch': n_epoch,
'alpha': self.alpha})
self.logger.info(f'[Eval Loss]: {eval_loss:.4f} [Eval MSE]: {eval_mse:.4f} [Eval KL]: {eval_kl:.4f}')
else:
print(f'[Eval Loss]: {eval_loss:.4f} [Eval MSE]: {eval_mse:.4f} [Eval KL]: {eval_kl:.4f}')
if best_mse > eval_loss:
best_mse = eval_loss
if not self.debug:
torch.save({'model_state_dict': self.model.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'loss': best_mse}, self.file_path+'_best.pt')
self.logger.info(f'Model parameter saved at {n_epoch}')
if n_epoch % 50 == 0: # 50 epoch 마다 모델 저장하기
torch.save({'model_state_dict': self.model.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'loss': eval_loss}, self.file_path + f'_{n_epoch + self.max_num}.pt')
def evaluation(self):
self.model.eval()
avg_eval_loss = 0.
avg_eval_mse = 0.
avg_kl = 0.
with torch.no_grad():
for it, sample in enumerate(self.eval_dataloader):
samp_sin = sample['sin'].cuda()
label = sample['label'].squeeze(-1).cuda()
orig_ts = sample['orig_ts'].cuda()
index = sample['index'].cuda()
mse_loss, kl_loss = self.model(orig_ts, samp_sin, label, index)
loss = mse_loss + self.alpha * kl_loss
# loss = mse_loss
avg_eval_loss += (loss.item() * samp_sin.size(0))
avg_eval_mse += (mse_loss.item() * samp_sin.size(0))
avg_kl += (kl_loss * samp_sin.size(0))
avg_eval_loss /= self.eval_dataloader.dataset.__len__()
avg_eval_mse /= self.eval_dataloader.dataset.__len__()
avg_kl /= self.eval_dataloader.dataset.__len__()
return avg_eval_loss, avg_eval_mse, avg_kl
def test(self):
self.model.eval()
ckpt = torch.load(self.path)
self.model.load_state_dict(ckpt['model_state_dict'])
avg_test_loss = 0.
avg_test_mse = 0.
avg_kl = 0.
with torch.no_grad():
for it, sample in enumerate(self.test_dataloder):
samp_sin = sample['sin'].cuda()
label = sample['label'].squeeze(-1).cuda()
orig_ts = sample['orig_ts'].cuda()
mse_loss, kl_loss = self.model(orig_ts, samp_sin, label, sampling=False)
loss = mse_loss + kl_loss
avg_test_loss += (loss.item() / len(self.test_dataloder))
avg_test_mse += (mse_loss.item() / len(self.test_dataloder))
avg_kl += (kl_loss.item() / len(self.test_dataloder))
if not self.debug:
wandb.log({'test_loss': avg_test_loss,
'test_mse': avg_test_mse,
'test_kl': avg_kl})
``` |
{
"source": "jiyoungsin/fastcdc-py",
"score": 2
} |
#### File: fastcdc-py/fastcdc/cli.py
```python
import click
from click_default_group import DefaultGroup
from fastcdc import __version__
from fastcdc import chunkify
from fastcdc import benchmark
from fastcdc import split
from fastcdc import scan
@click.group(cls=DefaultGroup, default="chunkify", default_if_no_args=False)
@click.version_option(version=__version__, message="fastcdc - %(version)s")
def cli():
pass
cli.add_command(chunkify.chunkify)
cli.add_command(benchmark.benchmark)
cli.add_command(scan.scan)
cli.add_command(split.split)
if __name__ == "__main__":
cli()
``` |
{
"source": "jiyoungsin/Mission_Python",
"score": 2
} |
#### File: Mission_Python/listings/listing1-1.py
```python
WIDTH = 800
HEIGHT = 600
player_x = 500
player_y = 550
def draw():
screen.blit(images.backdrop, (0, 0))
``` |
{
"source": "JiYuanFeng/MCTrans",
"score": 3
} |
#### File: mctrans/utils/misc.py
```python
import random
from typing import Optional
import numpy as np
import torch
import torch.distributed as dist
from torch import Tensor
def sync_param(input, reduction='mean'):
if isinstance(input, np.ndarray):
sync_input = torch.from_numpy(input).cuda()
elif isinstance(input, torch.Tensor):
sync_input = input.clone()
else:
raise ValueError('input should be torch tensor or ndarray')
dist.all_reduce(sync_input)
if reduction == 'mean':
sync_input.div_(dist.get_world_size())
return sync_input
def is_distributed():
if dist.is_available() and dist.is_initialized():
return True
else:
return False
``` |
{
"source": "JiYuanFeng/mmclassification",
"score": 2
} |
#### File: tests/test_data/test_builder.py
```python
import os.path as osp
from copy import deepcopy
from unittest.mock import patch
import torch
from mmcv.utils import digit_version
from mmcls.datasets import ImageNet, build_dataloader, build_dataset
from mmcls.datasets.dataset_wrappers import (ClassBalancedDataset,
ConcatDataset, KFoldDataset,
RepeatDataset)
class TestDataloaderBuilder():
@classmethod
def setup_class(cls):
cls.data = list(range(20))
cls.samples_per_gpu = 5
cls.workers_per_gpu = 1
@patch('mmcls.datasets.builder.get_dist_info', return_value=(0, 1))
def test_single_gpu(self, _):
common_cfg = dict(
dataset=self.data,
samples_per_gpu=self.samples_per_gpu,
workers_per_gpu=self.workers_per_gpu,
dist=False)
# Test default config
dataloader = build_dataloader(**common_cfg)
if digit_version(torch.__version__) >= digit_version('1.8.0'):
assert dataloader.persistent_workers
elif hasattr(dataloader, 'persistent_workers'):
assert not dataloader.persistent_workers
assert dataloader.batch_size == self.samples_per_gpu
assert dataloader.num_workers == self.workers_per_gpu
assert not all(
torch.cat(list(iter(dataloader))) == torch.tensor(self.data))
# Test without shuffle
dataloader = build_dataloader(**common_cfg, shuffle=False)
assert all(
torch.cat(list(iter(dataloader))) == torch.tensor(self.data))
# Test with custom sampler_cfg
dataloader = build_dataloader(
**common_cfg,
sampler_cfg=dict(type='RepeatAugSampler', selected_round=0),
shuffle=False)
expect = [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6]
assert all(torch.cat(list(iter(dataloader))) == torch.tensor(expect))
@patch('mmcls.datasets.builder.get_dist_info', return_value=(0, 1))
def test_multi_gpu(self, _):
common_cfg = dict(
dataset=self.data,
samples_per_gpu=self.samples_per_gpu,
workers_per_gpu=self.workers_per_gpu,
num_gpus=2,
dist=False)
# Test default config
dataloader = build_dataloader(**common_cfg)
if digit_version(torch.__version__) >= digit_version('1.8.0'):
assert dataloader.persistent_workers
elif hasattr(dataloader, 'persistent_workers'):
assert not dataloader.persistent_workers
assert dataloader.batch_size == self.samples_per_gpu * 2
assert dataloader.num_workers == self.workers_per_gpu * 2
assert not all(
torch.cat(list(iter(dataloader))) == torch.tensor(self.data))
# Test without shuffle
dataloader = build_dataloader(**common_cfg, shuffle=False)
assert all(
torch.cat(list(iter(dataloader))) == torch.tensor(self.data))
# Test with custom sampler_cfg
dataloader = build_dataloader(
**common_cfg,
sampler_cfg=dict(type='RepeatAugSampler', selected_round=0),
shuffle=False)
expect = torch.tensor(
[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6])
assert all(torch.cat(list(iter(dataloader))) == expect)
@patch('mmcls.datasets.builder.get_dist_info', return_value=(1, 2))
def test_distributed(self, _):
common_cfg = dict(
dataset=self.data,
samples_per_gpu=self.samples_per_gpu,
workers_per_gpu=self.workers_per_gpu,
num_gpus=2, # num_gpus will be ignored in distributed environment.
dist=True)
# Test default config
dataloader = build_dataloader(**common_cfg)
if digit_version(torch.__version__) >= digit_version('1.8.0'):
assert dataloader.persistent_workers
elif hasattr(dataloader, 'persistent_workers'):
assert not dataloader.persistent_workers
assert dataloader.batch_size == self.samples_per_gpu
assert dataloader.num_workers == self.workers_per_gpu
non_expect = torch.tensor(self.data[1::2])
assert not all(torch.cat(list(iter(dataloader))) == non_expect)
# Test without shuffle
dataloader = build_dataloader(**common_cfg, shuffle=False)
expect = torch.tensor(self.data[1::2])
assert all(torch.cat(list(iter(dataloader))) == expect)
# Test with custom sampler_cfg
dataloader = build_dataloader(
**common_cfg,
sampler_cfg=dict(type='RepeatAugSampler', selected_round=0),
shuffle=False)
expect = torch.tensor(
[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6][1::2])
assert all(torch.cat(list(iter(dataloader))) == expect)
class TestDatasetBuilder():
@classmethod
def setup_class(cls):
data_prefix = osp.join(osp.dirname(__file__), '../data/dataset')
cls.dataset_cfg = dict(
type='ImageNet',
data_prefix=data_prefix,
ann_file=osp.join(data_prefix, 'ann.txt'),
pipeline=[],
test_mode=False,
)
def test_normal_dataset(self):
# Test build
dataset = build_dataset(self.dataset_cfg)
assert isinstance(dataset, ImageNet)
assert dataset.test_mode == self.dataset_cfg['test_mode']
# Test default_args
dataset = build_dataset(self.dataset_cfg, {'test_mode': True})
assert dataset.test_mode == self.dataset_cfg['test_mode']
cp_cfg = deepcopy(self.dataset_cfg)
cp_cfg.pop('test_mode')
dataset = build_dataset(cp_cfg, {'test_mode': True})
assert dataset.test_mode
def test_concat_dataset(self):
# Test build
dataset = build_dataset([self.dataset_cfg, self.dataset_cfg])
assert isinstance(dataset, ConcatDataset)
assert dataset.datasets[0].test_mode == self.dataset_cfg['test_mode']
# Test default_args
dataset = build_dataset([self.dataset_cfg, self.dataset_cfg],
{'test_mode': True})
assert dataset.datasets[0].test_mode == self.dataset_cfg['test_mode']
cp_cfg = deepcopy(self.dataset_cfg)
cp_cfg.pop('test_mode')
dataset = build_dataset([cp_cfg, cp_cfg], {'test_mode': True})
assert dataset.datasets[0].test_mode
def test_repeat_dataset(self):
# Test build
dataset = build_dataset(
dict(type='RepeatDataset', dataset=self.dataset_cfg, times=3))
assert isinstance(dataset, RepeatDataset)
assert dataset.dataset.test_mode == self.dataset_cfg['test_mode']
# Test default_args
dataset = build_dataset(
dict(type='RepeatDataset', dataset=self.dataset_cfg, times=3),
{'test_mode': True})
assert dataset.dataset.test_mode == self.dataset_cfg['test_mode']
cp_cfg = deepcopy(self.dataset_cfg)
cp_cfg.pop('test_mode')
dataset = build_dataset(
dict(type='RepeatDataset', dataset=cp_cfg, times=3),
{'test_mode': True})
assert dataset.dataset.test_mode
def test_class_balance_dataset(self):
# Test build
dataset = build_dataset(
dict(
type='ClassBalancedDataset',
dataset=self.dataset_cfg,
oversample_thr=1.,
))
assert isinstance(dataset, ClassBalancedDataset)
assert dataset.dataset.test_mode == self.dataset_cfg['test_mode']
# Test default_args
dataset = build_dataset(
dict(
type='ClassBalancedDataset',
dataset=self.dataset_cfg,
oversample_thr=1.,
), {'test_mode': True})
assert dataset.dataset.test_mode == self.dataset_cfg['test_mode']
cp_cfg = deepcopy(self.dataset_cfg)
cp_cfg.pop('test_mode')
dataset = build_dataset(
dict(
type='ClassBalancedDataset',
dataset=cp_cfg,
oversample_thr=1.,
), {'test_mode': True})
assert dataset.dataset.test_mode
def test_kfold_dataset(self):
# Test build
dataset = build_dataset(
dict(
type='KFoldDataset',
dataset=self.dataset_cfg,
fold=0,
num_splits=5,
test_mode=False,
))
assert isinstance(dataset, KFoldDataset)
assert not dataset.test_mode
assert dataset.dataset.test_mode == self.dataset_cfg['test_mode']
# Test default_args
dataset = build_dataset(
dict(
type='KFoldDataset',
dataset=self.dataset_cfg,
fold=0,
num_splits=5,
test_mode=False,
),
default_args={
'test_mode': True,
'classes': [1, 2, 3]
})
assert not dataset.test_mode
assert dataset.dataset.test_mode == self.dataset_cfg['test_mode']
assert dataset.dataset.CLASSES == [1, 2, 3]
cp_cfg = deepcopy(self.dataset_cfg)
cp_cfg.pop('test_mode')
dataset = build_dataset(
dict(
type='KFoldDataset',
dataset=self.dataset_cfg,
fold=0,
num_splits=5,
),
default_args={
'test_mode': True,
'classes': [1, 2, 3]
})
# The test_mode in default_args will be passed to KFoldDataset
assert dataset.test_mode
assert not dataset.dataset.test_mode
# Other default_args will be passed to child dataset.
assert dataset.dataset.CLASSES == [1, 2, 3]
```
#### File: test_models/test_backbones/test_vision_transformer.py
```python
import math
import os
import tempfile
from copy import deepcopy
import pytest
import torch
import torch.nn.functional as F
from torch.nn.modules import GroupNorm
from torch.nn.modules.batchnorm import _BatchNorm
from mmcls.models.backbones import VisionTransformer
def is_norm(modules):
"""Check if is one of the norms."""
if isinstance(modules, (GroupNorm, _BatchNorm)):
return True
return False
def check_norm_state(modules, train_state):
"""Check if norm layer is in correct train state."""
for mod in modules:
if isinstance(mod, _BatchNorm):
if mod.training != train_state:
return False
return True
def test_vit_backbone():
cfg_ori = dict(
arch='b',
img_size=224,
patch_size=16,
drop_rate=0.1,
init_cfg=[
dict(
type='Kaiming',
layer='Conv2d',
mode='fan_in',
nonlinearity='linear')
])
with pytest.raises(AssertionError):
# test invalid arch
cfg = deepcopy(cfg_ori)
cfg['arch'] = 'unknown'
VisionTransformer(**cfg)
with pytest.raises(AssertionError):
# test arch without essential keys
cfg = deepcopy(cfg_ori)
cfg['arch'] = {
'num_layers': 24,
'num_heads': 16,
'feedforward_channels': 4096
}
VisionTransformer(**cfg)
# Test ViT base model with input size of 224
# and patch size of 16
model = VisionTransformer(**cfg_ori)
model.init_weights()
model.train()
assert check_norm_state(model.modules(), True)
imgs = torch.randn(3, 3, 224, 224)
patch_token, cls_token = model(imgs)[-1]
assert cls_token.shape == (3, 768)
assert patch_token.shape == (3, 768, 14, 14)
# Test custom arch ViT without output cls token
cfg = deepcopy(cfg_ori)
cfg['arch'] = {
'embed_dims': 128,
'num_layers': 24,
'num_heads': 16,
'feedforward_channels': 1024
}
cfg['output_cls_token'] = False
model = VisionTransformer(**cfg)
patch_token = model(imgs)[-1]
assert patch_token.shape == (3, 128, 14, 14)
# Test ViT with multi out indices
cfg = deepcopy(cfg_ori)
cfg['out_indices'] = [-3, -2, -1]
model = VisionTransformer(**cfg)
for out in model(imgs):
assert out[0].shape == (3, 768, 14, 14)
assert out[1].shape == (3, 768)
def timm_resize_pos_embed(posemb, posemb_new, num_tokens=1, gs_new=()):
# Timm version pos embed resize function.
# Refers to https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py # noqa:E501
ntok_new = posemb_new.shape[1]
if num_tokens:
posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0,
num_tokens:]
ntok_new -= num_tokens
else:
posemb_tok, posemb_grid = posemb[:, :0], posemb[0]
gs_old = int(math.sqrt(len(posemb_grid)))
if not len(gs_new): # backwards compatibility
gs_new = [int(math.sqrt(ntok_new))] * 2
assert len(gs_new) >= 2
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old,
-1).permute(0, 3, 1, 2)
posemb_grid = F.interpolate(
posemb_grid, size=gs_new, mode='bicubic', align_corners=False)
posemb_grid = posemb_grid.permute(0, 2, 3,
1).reshape(1, gs_new[0] * gs_new[1], -1)
posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
return posemb
def test_vit_weight_init():
# test weight init cfg
pretrain_cfg = dict(
arch='b',
img_size=224,
patch_size=16,
init_cfg=[dict(type='Constant', val=1., layer='Conv2d')])
pretrain_model = VisionTransformer(**pretrain_cfg)
pretrain_model.init_weights()
assert torch.allclose(pretrain_model.patch_embed.projection.weight,
torch.tensor(1.))
assert pretrain_model.pos_embed.abs().sum() > 0
pos_embed_weight = pretrain_model.pos_embed.detach()
tmpdir = tempfile.gettempdir()
checkpoint = os.path.join(tmpdir, 'test.pth')
torch.save(pretrain_model.state_dict(), checkpoint)
# test load checkpoint
finetune_cfg = dict(
arch='b',
img_size=224,
patch_size=16,
init_cfg=dict(type='Pretrained', checkpoint=checkpoint))
finetune_model = VisionTransformer(**finetune_cfg)
finetune_model.init_weights()
assert torch.allclose(finetune_model.pos_embed, pos_embed_weight)
# test load checkpoint with different img_size
finetune_cfg = dict(
arch='b',
img_size=384,
patch_size=16,
init_cfg=dict(type='Pretrained', checkpoint=checkpoint))
finetune_model = VisionTransformer(**finetune_cfg)
finetune_model.init_weights()
resized_pos_embed = timm_resize_pos_embed(pos_embed_weight,
finetune_model.pos_embed)
assert torch.allclose(finetune_model.pos_embed, resized_pos_embed)
os.remove(checkpoint)
```
#### File: mmclassification/tools/kfold-cross-valid.py
```python
import argparse
import copy
import os
import os.path as osp
import time
from datetime import datetime
from pathlib import Path
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.runner import get_dist_info, init_dist
from mmcls import __version__
from mmcls.apis import init_random_seed, set_random_seed, train_model
from mmcls.datasets import build_dataset
from mmcls.models import build_classifier
from mmcls.utils import collect_env, get_root_logger, load_json_log
TEST_METRICS = ('precision', 'recall', 'f1_score', 'support', 'mAP', 'CP',
'CR', 'CF1', 'OP', 'OR', 'OF1', 'accuracy')
prog_description = """K-Fold cross-validation.
To start a 5-fold cross-validation experiment:
python tools/kfold-cross-valid.py $CONFIG --num-splits 5
To resume a 5-fold cross-validation from an interrupted experiment:
python tools/kfold-cross-valid.py $CONFIG --num-splits 5 --resume-from work_dirs/fold2/latest.pth
To summarize a 5-fold cross-validation:
python tools/kfold-cross-valid.py $CONFIG --num-splits 5 --summary
""" # noqa: E501
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=prog_description)
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--num-splits', type=int, help='The number of all folds.')
parser.add_argument(
'--fold',
type=int,
help='The fold used to do validation. '
'If specify, only do an experiment of the specified fold.')
parser.add_argument(
'--summary',
action='store_true',
help='Summarize the k-fold cross-validation results.')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument('--device', help='device used for training')
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def copy_config(old_cfg):
"""deepcopy a Config object."""
new_cfg = Config()
_cfg_dict = copy.deepcopy(old_cfg._cfg_dict)
_filename = copy.deepcopy(old_cfg._filename)
_text = copy.deepcopy(old_cfg._text)
super(Config, new_cfg).__setattr__('_cfg_dict', _cfg_dict)
super(Config, new_cfg).__setattr__('_filename', _filename)
super(Config, new_cfg).__setattr__('_text', _text)
return new_cfg
def train_single_fold(args, cfg, fold, distributed, seed):
# create the work_dir for the fold
work_dir = osp.join(cfg.work_dir, f'fold{fold}')
cfg.work_dir = work_dir
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# wrap the dataset cfg
train_dataset = dict(
type='KFoldDataset',
fold=fold,
dataset=cfg.data.train,
num_splits=args.num_splits,
seed=seed,
)
val_dataset = dict(
type='KFoldDataset',
fold=fold,
# Use the same dataset with training.
dataset=copy.deepcopy(cfg.data.train),
num_splits=args.num_splits,
seed=seed,
test_mode=True,
)
val_dataset['dataset']['pipeline'] = cfg.data.val.pipeline
cfg.data.train = train_dataset
cfg.data.val = val_dataset
cfg.data.test = val_dataset
# dump config
stem, suffix = osp.basename(args.config).rsplit('.', 1)
cfg.dump(osp.join(cfg.work_dir, f'{stem}_fold{fold}.{suffix}'))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
logger.info(
f'-------- Cross-validation: [{fold+1}/{args.num_splits}] -------- ')
# set random seeds
# Use different seed in different folds
logger.info(f'Set random seed to {seed + fold}, '
f'deterministic: {args.deterministic}')
set_random_seed(seed + fold, deterministic=args.deterministic)
cfg.seed = seed + fold
meta['seed'] = seed + fold
model = build_classifier(cfg.model)
model.init_weights()
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
meta.update(
dict(
mmcls_version=__version__,
config=cfg.pretty_text,
CLASSES=datasets[0].CLASSES,
kfold=dict(fold=fold, num_splits=args.num_splits)))
# add an attribute for visualization convenience
train_model(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
device='cpu' if args.device == 'cpu' else 'cuda',
meta=meta)
def summary(args, cfg):
summary = dict()
for fold in range(args.num_splits):
work_dir = Path(cfg.work_dir) / f'fold{fold}'
# Find the latest training log
log_files = list(work_dir.glob('*.log.json'))
if len(log_files) == 0:
continue
log_file = sorted(log_files)[-1]
date = datetime.fromtimestamp(log_file.lstat().st_mtime)
summary[fold] = {'date': date.strftime('%Y-%m-%d %H:%M:%S')}
# Find the latest eval log
json_log = load_json_log(log_file)
epochs = sorted(list(json_log.keys()))
eval_log = {}
def is_metric_key(key):
for metric in TEST_METRICS:
if metric in key:
return True
return False
for epoch in epochs[::-1]:
if any(is_metric_key(k) for k in json_log[epoch].keys()):
eval_log = json_log[epoch]
break
summary[fold]['epoch'] = epoch
summary[fold]['metric'] = {
k: v[0] # the value is a list with only one item.
for k, v in eval_log.items() if is_metric_key(k)
}
show_summary(args, summary)
def show_summary(args, summary_data):
try:
from rich.console import Console
from rich.table import Table
except ImportError:
raise ImportError('Please run `pip install rich` to install '
'package `rich` to draw the table.')
console = Console()
table = Table(title=f'{args.num_splits}-fold Cross-validation Summary')
table.add_column('Fold')
metrics = summary_data[0]['metric'].keys()
for metric in metrics:
table.add_column(metric)
table.add_column('Epoch')
table.add_column('Date')
for fold in range(args.num_splits):
row = [f'{fold+1}']
if fold not in summary_data:
table.add_row(*row)
continue
for metric in metrics:
metric_value = summary_data[fold]['metric'].get(metric, '')
def format_value(value):
if isinstance(value, float):
return f'{value:.2f}'
if isinstance(value, (list, tuple)):
return str([format_value(i) for i in value])
else:
return str(value)
row.append(format_value(metric_value))
row.append(str(summary_data[fold]['epoch']))
row.append(summary_data[fold]['date'])
table.add_row(*row)
console.print(table)
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.summary:
summary(args, cfg)
return
# resume from the previous experiment
if args.resume_from is not None:
cfg.resume_from = args.resume_from
resume_kfold = torch.load(cfg.resume_from).get('meta',
{}).get('kfold', None)
if resume_kfold is None:
raise RuntimeError(
'No "meta" key in checkpoints or no "kfold" in the meta dict. '
'Please check if the resume checkpoint from a k-fold '
'cross-valid experiment.')
resume_fold = resume_kfold['fold']
assert args.num_splits == resume_kfold['num_splits']
else:
resume_fold = 0
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
# init a unified random seed
seed = init_random_seed(args.seed)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
if args.fold is not None:
folds = [args.fold]
else:
folds = range(resume_fold, args.num_splits)
for fold in folds:
cfg_ = copy_config(cfg)
if fold != resume_fold:
cfg_.resume_from = None
train_single_fold(args, cfg_, fold, distributed, seed)
if args.fold is None:
summary(args, cfg)
if __name__ == '__main__':
main()
``` |
{
"source": "Jiyuan-Yang/paper_lookup",
"score": 3
} |
#### File: utils/parser/condition_parser.py
```python
def condition_check(string: str, condition: str) -> bool:
condition = chop_redundant_bracket(condition.strip()).strip()
level_dict = get_level_dict(condition)
if level_dict == {}:
return has_sub_string(string, condition)
else:
min_level = min(level_dict.keys())
min_level_dict = level_dict[min_level]
if min_level_dict['|']:
check_left = condition_check(string, condition[0:min_level_dict['|'][0]])
if check_left:
return True
check_right = condition_check(string, condition[min_level_dict['|'][0] + 1:])
return check_right
else:
check_left = condition_check(string, condition[0:min_level_dict['&'][0]])
if not check_left:
return False
check_right = condition_check(string, condition[min_level_dict['&'][0] + 1:])
return check_right
def get_level_dict(condition: str) -> dict:
current_level = 0
level_dict = {}
for idx, ch in enumerate(condition):
if ch == '(':
current_level += 1
elif ch == ')':
current_level -= 1
elif ch == '&' or ch == '|':
current_level_dict = level_dict.get(current_level, None)
if not current_level_dict:
level_dict[current_level] = {'&': [], '|': []}
current_level_dict = level_dict[current_level]
if ch == '&':
current_level_dict['&'].append(idx)
else:
current_level_dict['|'].append(idx)
return level_dict
def has_sub_string(string: str, sub_string: str) -> bool:
try:
_ = string.index(sub_string)
return True
except ValueError:
return False
def chop_redundant_bracket(string: str) -> str:
while True:
check, left_or_right = has_right_level(string)
if check:
break
elif left_or_right == 0:
string = string[1:]
else:
string = string[:-1]
while True:
if len(string) > 2 and string[0] == '(' and string[-1] == ')':
string = string[1:-1]
else:
return string
def has_right_level(string: str) -> (bool, int or None):
# 0 for left, 1 for right
level = 0
for ch in string:
if ch == '(':
level += 1
elif ch == ')':
level -= 1
if level == 0:
return True, None
elif level > 0:
return False, 0
else:
return False, 1
if __name__ == '__main__':
pass
``` |
{
"source": "jiyuanzFB/pytorch",
"score": 2
} |
#### File: distributed/fsdp/test_fsdp_state_dict.py
```python
import sys
from copy import deepcopy
from functools import partial
from typing import Any, Dict
import torch
from torch import distributed as dist
from torch.distributed.fsdp import (
FullyShardedDataParallel as FSDP,
StateDictType,
CPUOffload
)
from torch.nn import Linear, Module
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel
from torch.optim import SGD
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import (
FSDPTest,
get_full_params,
_get_full_detached_param,
_zero_model,
_get_state_dict,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
)
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
INNER_SHAPE = [4, 4]
OUTER_SHAPE = [4, 5]
_SUPPORTED_STATE_DICT_IMPLS = ["state_dict", "local_state_dict"]
STATE_DICT_MAPPING = {
"state_dict": StateDictType.FULL_STATE_DICT,
"local_state_dict": StateDictType.LOCAL_STATE_DICT,
"sharded_state_dict": StateDictType.SHARDED_STATE_DICT,
}
class Model(Module):
def __init__(self, wrap_fsdp):
super().__init__()
self.inner = Linear(*INNER_SHAPE)
if wrap_fsdp:
self.inner = FSDP(self.inner)
self.outer = Linear(*OUTER_SHAPE)
def forward(self, x):
# Forward twice.
i = self.inner(x)
j = self.inner(x)
return self.outer(i + j)
class TestFSDPStateDict(FSDPTest):
@property
def world_size(self):
return 2
def _get_simple_nested_model(self, *fsdp_args, **fsdp_kwargs):
model = FSDP(
nn.Sequential(
FSDP(nn.Linear(10, 10, bias=False), *fsdp_args, **fsdp_kwargs),
nn.Linear(10, 10, bias=False),
),
*fsdp_args,
**fsdp_kwargs,
)
return model
def _get_simple_model(self, *fsdp_args, **fsdp_kwargs):
model = FSDP(nn.Linear(10, 10, bias=False), *fsdp_args, **fsdp_kwargs)
return model
@skip_if_lt_x_gpu(2)
@parametrize(
"cpu_offload",
[CPUOffload(offload_params=True), CPUOffload(offload_params=False)],
)
@parametrize("fp16", [True, False])
def test_basic_save_and_load_state_dict(self, cpu_offload, fp16):
"""
Tests that we can save a state_dict and load it into a blank model
with various configs such as fp16 and cpu offload and parameters
match as expected.
"""
for model_call in [
partial(self._get_simple_nested_model, cpu_offload=cpu_offload),
partial(self._get_simple_model, cpu_offload=cpu_offload),
]:
model = model_call()
fsdp_state_dict = _get_state_dict(model, cpu_offload.offload_params, fp16)
if fp16:
# Verify fp16 is the type
for tensor in fsdp_state_dict.values():
self.assertEqual(tensor.dtype, torch.float16)
model_new = model_call()
if not cpu_offload.offload_params:
model_new = model_new.cuda()
if fp16:
model_new.half()
# zero the model to ensure parameters are different.
_zero_model(model_new)
with model.summon_full_params(), model_new.summon_full_params():
params = list(model.parameters())
params_new = list(model_new.parameters())
self.assertNotEqual(params, params_new)
# Verify parameters are the same in the new model.
model_new.load_state_dict(fsdp_state_dict)
with model_new.summon_full_params():
with model.summon_full_params():
params = list(model.parameters())
params_new = list(model_new.parameters())
self.assertEqual(params, params_new)
if fp16:
for tensor in model_new.parameters():
self.assertEqual(tensor.dtype, torch.float16)
@skip_if_lt_x_gpu(2)
def test_save_and_load_after_forward_state_dict(self):
"""
Test that saving after some training results in params being updated as
expected.
"""
torch.cuda.set_device(self.rank)
model = self._get_wrapped_model(group=torch.distributed.distributed_c10d._get_default_group())
optim = torch.optim.SGD(model.parameters(), lr=0.1)
initial_params = _get_full_detached_param(model)
for _ in range(6):
inp = model.module.get_input(torch.device("cuda"))
output = model(*inp)
loss = model.module.get_loss(inp, output).cuda()
model.module.run_backward(loss)
optim.step()
trained_params = _get_full_detached_param(model)
# Ensure some training occured
self.assertNotEqual(initial_params, trained_params)
# Save a copy of the state_dict
state_dict = {k: v.clone() for k, v in model.state_dict().items()}
_zero_model(model)
# Load state_dict into zeroed model
model.load_state_dict(state_dict)
loaded_params = _get_full_detached_param(model)
self.assertEqual(loaded_params, trained_params)
def _initialize_model(self, wrap_fsdp: bool, wrap_ddp: bool = True):
# keep everything deterministic for input data
torch.manual_seed(0)
model = Model(wrap_fsdp).cuda()
if wrap_fsdp:
model = FSDP(model)
elif wrap_ddp:
model = DistributedDataParallel(model, device_ids=[self.rank])
return model
@staticmethod
def _state_dict(model: Module, state_dict_type: str):
try:
enum_val = STATE_DICT_MAPPING[state_dict_type]
except KeyError:
raise ValueError(f"No state_dict type for {state_dict_type}")
with model.state_dict_type(enum_val):
return model.state_dict()
@staticmethod
def _load_state_dict(
model: Module, state_dict_type: str, state_dict: Dict[str, Any]
):
try:
enum_val = STATE_DICT_MAPPING[state_dict_type]
except KeyError:
raise ValueError(f"No state_dict for {state_dict_type}")
with model.state_dict_type(enum_val):
return model.load_state_dict(state_dict)
def _dist_train(self, wrap_fsdp: bool, state_dict_type: str = ""):
# TODO: Move this test to common_fsdp.
model = self._initialize_model(wrap_fsdp)
optim = SGD(model.parameters(), lr=0.1)
in_data = torch.rand(64, 4, requires_grad=True, device=torch.device("cuda"))
for _ in range(3):
out = model(in_data)
out.sum().backward()
optim.step()
optim.zero_grad()
if wrap_fsdp:
blank_model = FSDP(Model(True).cuda())
_zero_model(blank_model)
state_dict = self._state_dict(model, state_dict_type)
self._load_state_dict(blank_model, state_dict_type, state_dict)
return get_full_params(blank_model)
else:
return list(model.parameters())
@skip_if_lt_x_gpu(2)
@parametrize("state_dict_type", _SUPPORTED_STATE_DICT_IMPLS)
def test_state_dict_save_load_flow(self, state_dict_type):
fsdp_params = self._dist_train(wrap_fsdp=True, state_dict_type=state_dict_type)
ddp_params = self._dist_train(wrap_fsdp=False)
self.assertEqual(ddp_params, fsdp_params)
@skip_if_lt_x_gpu(2)
@parametrize("state_dict_type", _SUPPORTED_STATE_DICT_IMPLS)
def test_fsdp_state_dict_keys(self, state_dict_type):
state_dict = self._state_dict(self._initialize_model(True), state_dict_type)
if state_dict_type == "local_state_dict":
self.assertEqual(set(["flat_param", "inner.flat_param"]), state_dict.keys())
elif state_dict_type == "state_dict":
# Keys should match local model.
local_model = self._initialize_model(wrap_fsdp=False, wrap_ddp=False)
local_keys = local_model.state_dict().keys()
self.assertEqual(state_dict.keys(), local_keys)
else:
raise NotImplementedError(f"No test for {state_dict_type}!")
@skip_if_lt_x_gpu(2)
def test_state_dict_load_into_local_module(self):
"""
Tests that FSDP's state_dict can be loaded into a local model.
"""
model = self._initialize_model(wrap_fsdp=True)
optim = SGD(model.parameters(), lr=0.1)
in_data = torch.rand(64, 4, requires_grad=True, device=torch.device("cuda"))
for _ in range(3):
out = model(in_data)
out.sum().backward()
optim.step()
optim.zero_grad()
with model.summon_full_params():
fsdp_params = deepcopy(list(model.parameters()))
# get FSDP state_dict. Note that by default we return state_dict.
fsdp_state_dict = model.state_dict()
# Create zeroed local model
blank_local_model = self._initialize_model(wrap_fsdp=False, wrap_ddp=False)
for param in blank_local_model.parameters():
with torch.no_grad():
param.zero_()
# Load fsdp's full state dict into the local and verify params are as
# expected.
blank_local_model.load_state_dict(fsdp_state_dict)
local_params = list(blank_local_model.parameters())
for fsdp_param, local_param in zip(fsdp_params, local_params):
self.assertEqual(fsdp_param, local_param)
instantiate_parametrized_tests(TestFSDPStateDict)
if __name__ == "__main__":
run_tests()
```
#### File: torch/nested/_nestedtensor.py
```python
import torch
from functools import wraps
@wraps(torch._nested_tensor)
def nested_tensor(*args, **kwargs):
return NestedTensor(torch._nested_tensor(*args, **kwargs))
# TODO: This entire class is not really necessary now that NestedTensor lives
# in tree; before it lived out of tree and there was no way to conveniently
# override the string printing behavior. Now that we are in tree, we can
# directly override _tensor_str to capture this behavior, and the wrapper subclass
# is not necessary. See also https://github.com/pytorch/pytorch/issues/73506
class NestedTensor:
# data is a torch.Tensor backed by a NestedTensorImpl
def __init__(self, impl):
self._impl = impl
@property
def dtype(self):
"""
The data type of ```self``` NestedTensor.
"""
return self._impl.dtype
@property
def layout(self):
"""
The layout of ```self``` NestedTensor.
"""
return self._impl.layout
@property
def device(self):
"""
The device of ```self``` NestedTensor.
"""
return self._impl.device
@property
def requires_grad(self):
"""
Is ```True``` if gradients need to be computed for this Tensor.
"""
return self._impl.requires_grad
def stride(self):
"""
NestedTensor currently does not have a stride. This will throw.
"""
return self._impl.stride()
def size(self):
"""
NestedTensor currently does not have a size. This will throw.
"""
return self._impl.size()
def dim(self):
"""
The dimension of ```self``` NestedTensor.
"""
return self._impl.dim()
def numel(self):
"""
The number of elements of ```self``` NestedTensor.
"""
return self._impl.numel()
def is_contiguous(self):
"""
Returns true if ```self``` NestedTensor is contiguous.
"""
return self._impl.is_contiguous()
def __str__(self):
def _str(x, indent=0, tab=" "):
s = indent * tab + "[\n"
strs = list(map(str, x.unbind()))
strs = list(
map(
lambda xi: "\n".join(
map(lambda xij: (indent + 1) * tab + xij, xi.split("\n"))
),
strs,
)
)
s += ",\n".join(strs)
s += "\n" + indent * tab + "]"
return s
return "nested_tensor(" + _str(self) + ")"
def __repr__(self):
return self.__str__()
def unbind(self, dim=None):
if dim is None:
unbound = torch.ops.aten.unbind.int(self._impl, 0)
if len(unbound) == 0:
return ()
return unbound
return torch.ops.aten.unbind.int(self._impl, dim)
``` |
{
"source": "jiyucho9145/bfcm",
"score": 2
} |
#### File: jiyucho9145/bfcm/app.py
```python
import sys
import os
import bfcm
class App:
def run(self):
args = sys.argv
if len(args) < 3:
sys.stderr.write('usage:%s %s %s\n' % (args[0], 'command', 'config_path'))
return 1
command = args[1]
config = args[2]
if not os.path.exists(config):
sys.stderr.write('設定ファイルが見つかりません: %s\n' % args[2])
return 2
cg = bfcm.logic.ConfigLogic()
cm = cg.create_config_manager(config)
commander = bfcm.commander.Commander(cm)
commander.execute(command, args[3:])
return 0
if __name__ == '__main__':
app = App()
sys.exit(app.run())
``` |
{
"source": "jiyu-indeed/django-proctor",
"score": 3
} |
#### File: proctor/tests/test_context_processors.py
```python
from unittest import TestCase
from mock import Mock
from proctor import context_processors
class ContextProcessorsTest(TestCase):
def test_proc_returns_proc(self):
request = Mock(proc='test')
proc = context_processors.proc(request)
self.assertEqual({'proc': 'test'}, proc)
def test_empty_request_proc_returns_empty_dict(self):
proc = context_processors.proc(None)
self.assertEqual({}, proc)
``` |
{
"source": "jiyulongxu/playwell",
"score": 2
} |
#### File: scripts/benchmark/kafka_message_bus.py
```python
import time
import json
from kafka import KafkaProducer
PRODUCER = KafkaProducer(
bootstrap_servers="localhost:9092",
acks=1,
retries=3,
value_serializer=str.encode
)
def main():
try:
for behavior in ("注册成功", "完善资料", "浏览商品", "提交订单"):
for i in range(0, 1000000):
_upload_event(str(i), behavior)
finally:
PRODUCER.close()
def _upload_event(user_id, behavior):
event = {
"type": "user_behavior",
"attr": {
"user_id": user_id,
"behavior": behavior
},
"time": int(time.time() * 1000)
}
PRODUCER.send("playwell", key=None, value=json.dumps(event))
if __name__ == "__main__":
main()
```
#### File: playwell-client/playwell/playwell.py
```python
import sys
import inspect
import argparse
from playwell import (
init_client,
API
)
def main():
init_client()
# all api modules
from playwell import (
definition,
activity,
activity_runner,
thread,
clock,
domain,
message_bus,
service,
service_runner,
slots,
system
)
all_locals = locals()
modules = [all_locals[var_name] for var_name in all_locals
if inspect.ismodule(all_locals[var_name])]
module_name, api_name, exec_args = _check_args(sys.argv, modules)
# get module
if module_name not in all_locals:
print("The module %s is not exist" % module_name)
exit(1)
module = all_locals[module_name]
if not inspect.ismodule(module):
print("The %s is not a module" % module_name)
exit(1)
if api_name == "help":
_show_help(module)
exit(0)
if not hasattr(module, api_name.upper()):
print("The api %s is not exist" % api_name)
exit(1)
api = getattr(module, api_name.upper())
parser = argparse.ArgumentParser(description='Call playwell API')
for arg_declare in api.args_declare:
parser.add_argument("--%s" % arg_declare.name, **arg_declare.meta)
args = parser.parse_args(exec_args)
api.execute(args.__dict__)
def _check_args(args, modules):
if len(args) < 3:
print("Invalid command arguments, eg.")
print(" playwell definition validate --codec yaml --file ./definition.yml")
print(" playwell activity create --definition test --display_name 'Test activity' --config '{}'")
print(" playwell activity pause --id 1")
print()
print("All modules:")
for module in modules:
print(" ", module.__name__[9:], " - ", module.__doc__.strip())
exit(1)
return args[1], args[2], args[3:]
def _show_help(module):
for element_name in dir(module):
element = getattr(module, element_name)
if not isinstance(element, API):
continue
print("%s: [%s] %s" % (element_name.lower(), element.method, element.url))
if element.args_declare:
for arg in element.args_declare:
print(" --%s %s" % (arg.name, arg.meta))
else:
print(" No need arguments")
print()
if __name__ == "__main__":
main()
```
#### File: service/message/http.py
```python
import logging
import queue
from urllib.parse import urlparse
import requests
from bottle import request as web_request
from playwell.service import Result
from playwell.service.message.bus import MessageBus
class HttpMessageBus(MessageBus):
"""HttpMessageBus
"""
CLASS_NAME = "playwell.message.bus.HttpMessageBus"
CONFIG_URL = "url"
def __init__(self, name, clazz, alive, opened, available, config):
super().__init__(name, clazz, alive, opened, available, config)
self._url = config[HttpMessageBus.CONFIG_URL]
self._buffer = queue.Queue()
def init_web_server(self):
from playwell.service.resource.web import web_server
parse_result = urlparse(self._url)
@web_server.app.post(parse_result.path)
def _post_handler():
try:
message_data_seq = web_request.json
for message_data in message_data_seq:
self._buffer.put_nowait(self._decode_message(message_data))
return Result.ok().to_dict()
except Exception as e:
logging.exception(e)
return Result.fail(
error_code="service_error",
message=str(e)
).to_dict()
def write(self, messages):
message_data_seq = self._encode_messages(messages)
if not message_data_seq:
return
requests.post(self._url, json=message_data_seq)
def read(self, max_fetch_num: int):
messages = []
while True:
try:
messages.append(self._buffer.get_nowait())
except queue.Empty:
return messages
else:
if len(messages) >= max_fetch_num:
return messages
```
#### File: message/proxy/launcher.py
```python
import atexit
from playwell.service.message.proxy.config import load_config_from_yaml
def launch(config_file_path: str):
"""初始化配置 & 开始运行
"""
load_config_from_yaml(config_file_path)
# 初始化日志
from playwell.service.log import init_logging
init_logging()
# 初始化web server
from playwell.service.resource.web import start_web_server
start_web_server()
# 初始化proxy
from playwell.service.message.proxy.http import HttpServiceRequestProxy
proxy = HttpServiceRequestProxy.build_with_config()
atexit.register(proxy.close)
proxy.start()
def main():
import sys
if len(sys.argv) < 2:
print("Need config file path: playwell_service_proxy ./config.yaml")
exit(1)
launch(sys.argv[1])
```
#### File: playwell_rpa/browser/form.py
```python
import logging
from playwell.service import (
Result,
single_request_service
)
from playwell.service.message import ServiceRequestMessage
@single_request_service
def submit_form(request: ServiceRequestMessage):
"""该Action可以用于自动填充表单并提交
- name: submit_form
type: browser.form
args:
request:
session_id: var("session_id")
input:
- css_selector: str(".username")
input: str("Sam")
- css_selector: str(".password")
input: str("<PASSWORD>")
submit:
css_selector: str(".submit")
"""
from playwell_rpa.browser import (
web_driver_manager,
select_element
)
args = request.args
driver = web_driver_manager.get_driver(args["session_id"])
# handle input elements
input_elements = args["input"]
for input_element_arg in input_elements:
selector, selector_expr, input_value = None, None, None
for k, v in input_element_arg.items():
if k.endswith("_selector"):
selector = k
selector_expr = v
elif k == "input":
input_value = v
input_element = select_element(driver, selector, selector_expr)
input_element.click()
input_element.send_keys(input_value)
# click submit button
submit_element_args = args["submit"]
selector, selector_expr = submit_element_args.popitem()
try:
select_element(driver, selector, selector_expr).submit()
except Exception as e:
logging.warning("Submit error: %s, try click" % str(e))
select_element(driver, selector, selector_expr).click()
return Result.ok()
```
#### File: playwell-rpa/playwell_rpa/mysql.py
```python
import logging
import pymysql
import pymysql.cursors
from typing import Sequence
from playwell.service import Result
from playwell.service.message import ServiceRequestMessage
class ExecuteSQL:
"""执行SQL语句
"""
def __init__(self):
self._config = {}
def init(self, **config):
self._config = config
def __call__(self, requests: Sequence[ServiceRequestMessage]):
connection = pymysql.connect(
cursorclass=pymysql.cursors.DictCursor,
**self._config
)
try:
with connection.cursor() as cursor:
for req in requests:
sql = req.args["sql"]
params = req.args["params"]
cursor.execute(sql, params)
connection.commit()
return [Result.ok()] * len(requests)
except Exception as e:
logging.exception(e)
return [Result.fail(
error_code="sys_error",
message=str(e)
)] * len(requests)
finally:
connection.close()
```
#### File: playwell-rpa/playwell_rpa/system.py
```python
import os
from playwell.service import (
Result,
single_request_service
)
from playwell.service.message import ServiceRequestMessage
@single_request_service
def exec_cmd(request: ServiceRequestMessage):
cmd = request.args["cmd"]
os.system(cmd)
return Result.ok()
``` |
{
"source": "jiyulongxu/sql-tricks",
"score": 3
} |
#### File: sql-tricks/sqltricks/create.py
```python
import logging
from sqltricks.drop import DropTable
class Create(object):
CREATE = "CREATE"
class CreateTable(Create):
"""
引用形式: CreateTable(runner=None)(Field,Field,Field)
"""
TYPE = "TABLE"
runner = None
drop = None
def __init__(self, name, drop=False, runner=None):
self.name = name
self.runner = runner
self.drop = drop
def __call__(self, *fields):
_ = " ".join((self.CREATE, self.TYPE, "IF NOT EXISTS", "`{}`".format(self.name),
"(\n" + ",\n".join([field.raw for field in fields]) + "\n)"
)) + ';'
if self.drop:
DropTable(self.name, runner=self.runner)()
logging.debug('\n'+_)
if callable(self.runner):
self.runner(_)
logging.info("Table created successfully")
return _
```
#### File: sql-tricks/sqltricks/insert.py
```python
import logging
import json
from collections import OrderedDict
class INSERT(object):
INSERT = "INSERT INTO"
class InsertTable(INSERT):
"""
引用形式: CreateTable(runner=None)(Field,Field,Field)
"""
runner = None
drop = None
def __init__(self, name, drop=False, runner=None):
self.name = name
self.runner = runner
self.drop = drop
def __call__(self, **data):
names, values = [i for i in zip(*sorted(data.items(), key=lambda x:x[0]))]
_ = " ".join((self.INSERT, "`{}`".format(self.name),
"(" + ",".join([name for name in names]) + ")",
"values({})".format(json.dumps(values)[1:-1])
)) + ';'
logging.debug(_)
if callable(self.runner):
self.runner(_)
logging.info("Table insert successfully")
return _
```
#### File: sql-tricks/tests/test_create.py
```python
from __future__ import with_statement
from sqltricks.create import *
from sqltricks.fields import *
import db
import logging
logging.getLogger().setLevel(logging.DEBUG)
def test_create():
print('run')
assert CreateTable('test', drop=True, runner=db.conn.execute)(
VARCHAR(128, name='name', primary_key=True, not_null=True),
INT(name='age', not_null=True),
FLOAT(name='money'),
DATE(name='update'),
) == """CREATE TABLE IF NOT EXISTS `test` (
`name` VARCHAR(128) PRIMARY KEY NOT NULL,
`age` INT NOT NULL,
`money` FLOAT,
`update` DATE
);"""
if __name__ == '__main__':
test_create()
``` |
{
"source": "jiyuuchc/cellcutter",
"score": 2
} |
#### File: alpha/ops/clustering.py
```python
import tensorflow as tf
import numpy as np
from sklearn.cluster import DBSCAN
from .common import *
def _pred_labels(locations, weights, eps, min_samples, min_weight):
''' generating mask proposals using dbscan
'''
all_labels = []
n_batches = tf.shape(locations)[0]
eps = tf.broadcast_to(tf.constant(eps), (n_batches,)).numpy()
min_samples = tf.broadcast_to(tf.constant(min_samples), (n_batches,)).numpy()
min_weight = tf.broadcast_to(tf.constant(min_weight), (n_batches,)).numpy()
weights = weights.numpy()
locations = locations.numpy()
for k in range(n_batches):
sel = weights[k] > min_weight[k]
labels = np.ones_like(weights[k], dtype=np.int32)*(-1)
if np.any(sel):
dbscan = DBSCAN(eps[k], min_samples = min_samples[k])
labels[sel] = dbscan.fit_predict(locations[k][sel,:], sample_weight=weights[k][sel])
all_labels.append(labels)
return tf.constant(all_labels, dtype=tf.int32)
def pred_labels(offsets, weights, eps = 0.9, min_samples = 4, min_weight=.1, from_logits=True):
_, h, w, _ = offsets.get_shape()
locations = decode_offsets(offsets)
if from_logits:
weights = tf.sigmoid(weights)
weights = weights[...,0]
preds = tf.py_function(
_pred_labels,
[locations, weights, eps, min_samples, min_weight],
tf.int32,
)
preds = tf.ensure_shape(preds, [None, h, w])
return preds
```
#### File: cellcutter/cellcutter/markers.py
```python
import numpy as np
from skimage.feature import blob_doh
from skimage.measure import regionprops
from sklearn.preprocessing import StandardScaler
from skimage.filters import gaussian
from .extra import expand_labels
def label_with_blob_detection(img, max_sigma = 10, min_sigma = 3, threshold = .01):
''' Generate maker label from nucleus imag using blob detection
'''
img = np.array(img, dtype = np.double)
img = StandardScaler().fit_transform(img.reshape(-1,1)).reshape(img.shape)
blobs = blob_doh(img, max_sigma = max_sigma, min_sigma = min_sigma, threshold = threshold)
# remove dark on bright blobs
xs, ys = np.round(blobs[:,:2]).astype(int).transpose()
blobs_p = blobs[np.greater(gaussian(img)[(xs, ys)],0), :]
xs, ys = np.round(blobs_p[:,:2]).astype(int).transpose()
label = np.zeros(shape = img.shape, dtype=int)
label[(xs,ys)] = np.arange(len(xs)) + 1
label = expand_labels(label, max_sigma - 2)
return label
def blob_detection(img, max_sigma = 10, min_sigma = 3, threshold = 0.01):
''' Located blobs
'''
img = np.array(img, dtype = np.double)
img = StandardScaler().fit_transform(img.reshape(-1,1)).reshape(img.shape)
blobs = blob_doh(img, max_sigma = max_sigma, min_sigma = min_sigma, threshold = threshold)
xs, ys = np.round(blobs[:,:2]).astype(int).transpose()
blobs_p = blobs[np.greater(gaussian(img)[(xs, ys)],0), :]
return np.round(blobs_p[:,:2]).astype(int)
``` |
{
"source": "jiyuuchc/mpscript",
"score": 3
} |
#### File: jiyuuchc/mpscript/mvp.py
```python
import serial
class Mvp:
stx, etx, eot, enq, ack, nak = (b'\x02',b'\x03',b'\x04',b'\x05',b'\x06',b'\x15')
def __init__(self, port):
self.ser = serial.Serial(port, bytesize=7, parity=serial.PARITY_EVEN, stopbits=2, timeout=1.0)
def getBcc(self, s):
bcc = 0
for b in s:
bcc = bcc ^ b
bcc = bcc ^ 3
bcc = bcc ^ 255
return bytes([bcc])
def sendCmd(self, s):
self.ser.write(self.stx)
self.ser.write(s)
self.ser.write(self.etx)
self.ser.write(self.getBcc(s))
return self.ser.read()
def selectDev(self, n):
self.ser.write(b'0%i'%n)
self.ser.write(self.enq)
return self.ser.read()
def readResp(self):
ans = self.ser.read()
if (ans != self.stx) :
return None
s = b''
ans = b''
while (ans != self.etx):
s += ans
ans = self.ser.read()
ans = serlf.ser.read()
if getBcc(s) != ans:
return None
else:
return s
def setValvePosition(self, dev, pos):
self.ser.write(self.eot)
self.selectDev(dev)
self.sendCmd(b'Vn%i'%pos)
self.sendCmd(b'Ur')
self.sendCmd(self.eot)
``` |
{
"source": "jiyuw/bench-marking-k-means",
"score": 3
} |
#### File: jiyuw/bench-marking-k-means/003-kmeans_spark.py
```python
import sys
from pyspark import SparkContext
from pyspark.sql import *
import os
import pandas as pd
import numpy as np
import time
from tqdm import trange
def load_dataset_sp(sc, file):
"""
load dataset
:param sc: spark session
:param file: path to file
:return: RDD of data points
"""
data = sc.textFile(file).map(lambda row:row.strip().split()).map(lambda row: (int(row[0]), int(row[1])))
return data
def euc_dist_sp(point, centroids):
"""
calculate euclidean distance between a data point and a centroid
:param X: data point
:param centroids: list of centroids
:return:
"""
point = np.array(point)
min_dist = float('inf')
for i in range(len(centroids)):
dist = np.sum((point-centroids[i])**2)
if dist < min_dist:
idx = i
return idx
def flatten_tuple(t):
"""
flatten tuple output
:param t: (cen_idx, (point, 1))
:return:
"""
return tuple(list(t[1][0])+[t[0]])
def kmeans_sp(con, X, cen, max_iter):
"""
k means with spark
:param con: spark session
:param X: RDD, input data
:param cen: RDD, initial centroids
:param max_iter: max iteration number
:return: data_group - list of assigned clusters in each iteration
centroids_output - list of centroids in each iteration
cost_output - list of cost in each iteration
"""
for i in trange(max_iter):
# collect centroids
centroids = np.array(cen.collect())
if i == max_iter - 1:
final_cen = centroids
# calculate distance between all points to all centroids and find closest one: (cen_idx, (point, 1))
closest_cen = X.map(lambda pair: (euc_dist_sp(pair, centroids), (np.array(pair), 1)))
if i == max_iter-1:
final_assign = closest_cen.map(lambda pair: flatten_tuple(pair)).collect()
break
# re-calculate centroids
cen = closest_cen.reduceByKey(lambda a, b: (a[0]+b[0],a[1]+b[1]))\
.map(lambda pair: tuple(pair[1][0]/pair[1][1]))
return np.array(final_assign), np.array(final_cen), None
def millitime(t):
return round(t*1000, 3)
class dataset():
def __init__(self, name, location, con=None, output_dir="results/python"):
if location not in ['python', 'spark', 'snowflake']:
raise ValueError("wrong type")
if location in ['spark', 'snowflake'] and not con:
raise ValueError("need connector for spark or snowflake")
self.name = name
self.con = con
self.location = location
self.output_file = {'label': os.path.join(output_dir, self.name + f"-label_{self.location}.txt"),
'centroid': os.path.join(output_dir, self.name + f"-cen_{self.location}.txt"),
'cost': os.path.join(output_dir, self.name + f'-cost_{self.location}.txt')}
print("#### "+name)
print(f"- {name} initiated")
def load_dataset(self, load_fn, root_dir="datasets"):
if self.con:
self.data = load_fn(self.con, os.path.join(root_dir, self.name + '.txt'))
self.gt_par = load_fn(self.con, os.path.join(root_dir, self.name + '-pa.txt'))
self.gt_cen = load_fn(self.con, os.path.join(root_dir, self.name + '-c.txt'))
self.init_cen = load_fn(self.con, os.path.join(root_dir, self.name + '-ic.txt'))
else:
self.data = load_fn(os.path.join(root_dir, self.name + '.txt'))
self.gt_par = load_fn(os.path.join(root_dir, self.name + '-pa.txt'))
self.gt_cen = load_fn(os.path.join(root_dir, self.name + '-c.txt'))
self.init_cen = load_fn(os.path.join(root_dir, self.name + '-ic.txt'))
print(f"- {self.name} data loaded")
def dataset_info(self):
n_sample, n_feature = self.data.shape
n_cluster = self.gt_cen.shape[0]
return n_sample, n_feature, n_cluster
def kmeans_train(self, train_fn, max_iter):
print(f"- {self.name} training start")
start = time.time()
if self.location == 'python':
self.label, self.centroid, self.cost = train_fn(self.data, self.init_cen, max_iter)
else:
self.label, self.centroid, self.cost = train_fn(self.con, self.data, self.init_cen, max_iter)
end = time.time()
print(f"- {self.name} trained")
t = millitime(end-start)
print(f"time used: {t}ms")
return t
def save_output(self):
np.savetxt(self.output_file['label'], self.label)
if self.cost:
np.savetxt(self.output_file['cost'], self.cost)
if self.location == 'python':
np.savetxt(self.output_file['centroid'], self.centroid.reshape(self.centroid.shape[0], -1), fmt='%d')
else:
np.savetxt(self.output_file['centroid'], self.centroid)
print(f"- {self.name} saved")
def load_output(self):
self.label = np.loadtxt(self.output_file['label'])
self.cost = np.loadtxt(self.output_file['cost'])
self.centroid = np.loadtxt(self.output_file['centroid'])
if self.location == 'python':
self.centroid = self.centroid.reshape((self.centroid.shape[0], -1, 2))
print(f"- {self.name} output loaded")
def eval_output(self):
if self.cost:
print(f"The final cost reduction is {round((self.cost[-2]-self.cost[-1])/self.cost[-2]*100, 2)}%")
def main(name, sc, mode):
cols = ['dataset', 'time']
if not os.path.exists(f'results/time_spark-{mode}.csv'):
df = pd.DataFrame(columns=cols)
else:
df = pd.read_csv(f'results/time_spark-{mode}.csv')
names = [name, name+'e1', name+'e2', name+'e3']
for n in names:
if n in list(df['dataset']):
print("#### "+n+" skipped")
continue
d = dataset(n, 'spark', sc, 'results')
d.load_dataset(load_dataset_sp, 's3://data516project/datasets')
t = d.kmeans_train(kmeans_sp, 100)
d.save_output()
tmp = pd.DataFrame([[n, t]], columns=cols)
df = df.append(tmp, ignore_index=True)
del d
df.to_csv(f'results/time_spark-{mode}.csv', index=False)
def test(name, sc):
cols = ['dataset', 'time']
if not os.path.exists('results/time_spark.csv'):
df = pd.DataFrame(columns=cols)
else:
df = pd.read_csv('results/time_spark.csv')
n = name
d = dataset(n, 'spark', sc, 'results')
d.load_dataset(load_dataset_sp, 's3://data516project/datasets')
t = d.kmeans_train(kmeans_sp, 2)
d.save_output()
tmp = pd.DataFrame([[n, t]], columns=cols)
df = df.append(tmp, ignore_index=True)
del d
print(df)
if __name__ == '__main__':
name = sys.argv[1]
mode = sys.argv[2]
sc = SparkContext()
spark = SparkSession.builder.getOrCreate()
if mode == 'test':
test(name, sc)
else:
time_df = main(name, sc, mode)
sc.stop()
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.