metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "joway/PyAlgorithm",
"score": 3
} |
#### File: algorithm/hash/horner_hash.py
```python
from algorithm.calculation.horner import horner
def horner_hash(key, size):
_BASE = 32
return horner(key, _BASE) % size
if __name__ == '__main__':
print(horner_hash('test', 10))
```
#### File: algorithm/searches/binary_search.py
```python
def binary_search(val, sorted_data):
length = len(sorted_data)
# [start, stop]
start = 0
stop = length - 1
while start <= stop:
mid = (stop + start) // 2
if sorted_data[mid] < val:
start = mid + 1
elif sorted_data[mid] > val:
stop = mid - 1
else:
return mid
return -1
```
#### File: datastructure/graphs/dfs.py
```python
from .graph import Graph
'''
'''
class DFS(Graph):
def __init__(self, size=10, graph=None):
super().__init__(size=size, graph=graph)
@classmethod
def dsf(cls, graph):
pass
```
#### File: datastructure/graphs/dijkstra.py
```python
import copy
from .graph import Graph
'''
ๅๆบๆ็ญ่ทฏ
ไธๆฌกๆงๅพๅฐๅไธช็นๅฐๅ
จ้จ็น็ๆ็ญ่ทฏๅพ
ๅนฟๅบฆไผๅ
ๆ็ดข
'''
class Dijkstra(Graph):
def __init__(self, size=10, graph=None):
super().__init__(size=size, graph=graph)
self.result = copy.deepcopy(self.graph)
def shortest_path(self, start=None, end=None):
S = {start: 0}
# ๅๅงๅ U
U = {v: self.graph[start][v] for v in range(0, self.size) if v != start}
while len(S) < self.size:
# min ๅฝๆฐ็ key ๅๆฐไผ ๅ
ฅไธไธชๅฝๆฐ
# keyๅฝๆฐ่ฟๅ็็ปๆไฝไธบๆฏ่พๅคงๅฐๆๆๅบ็ไพๆฎ
min_key = min(U, key=U.get)
S[min_key] = U.get(min_key)
U = {v: min(self.graph[min_key][v] + S[min_key],
U[v]) for v in U.keys() if v != min_key}
return S.get(end or 0)
```
#### File: array/medium/combinationSum2.py
```python
class Solution(object):
"""
Problem:
https://leetcode.com/problems/combination-sum-ii/
Example:
given candidate set [10, 1, 2, 7, 6, 1, 5] and target 8,
A solution set is:
[
[1, 7],
[1, 2, 5],
[2, 6],
[1, 1, 6]
]
"""
def combinationSum2(self, candidates, target, init=True):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
if init:
candidates.sort()
rets = []
for i in candidates:
if i > target:
break
elif i == target:
rets.append([i])
else:
_temp = list(candidates)
_temp.remove(i)
rets += ([sorted([i] + x) for x in self.combinationSum2(_temp, target - i, False)])
result = []
for r in rets:
if r not in result:
result.append(r)
return result
if __name__ == '__main__':
candidates = [10, 1, 2, 7, 6, 1, 5]
target = 8
result = Solution().combinationSum2(candidates, target)
print(result)
```
#### File: array/medium/combinationSum3.py
```python
class Solution(object):
"""
Problem:
https://leetcode.com/problems/combination-sum-iii/
Example:
Example 1:
Input: k = 3, n = 7
Output:
[[1,2,4]]
Example 2:
Input: k = 3, n = 9
Output:
[[1,2,6], [1,3,5], [2,3,4]]
"""
def search(self, pre_nums, k, n):
rets = []
_start = max(pre_nums) + 1 if pre_nums else 1
for i in range(_start, 10):
_cur = pre_nums + [i]
if k == 1 and i == n:
rets += [_cur]
break
if k > 1 and i < n:
rets += self.search(_cur, k - 1, n - i)
return rets
def combinationSum3(self, k, n):
"""
:type k: int
:type n: int
:rtype: List[List[int]]
"""
return self.search([], k, n)
if __name__ == '__main__':
result = Solution().combinationSum3(3, 9)
print(result)
```
#### File: array/medium/searchRange.py
```python
class Solution(object):
"""
Problem:
https://leetcode.com/problems/search-for-a-range/
Example:
Given [5, 7, 7, 8, 8, 10] and target value 8,
return [3, 4]
"""
def binary_search(self, val, sorted_data):
length = len(sorted_data)
# [start, stop]
start = 0
stop = length - 1
while start <= stop:
mid = (stop + start) // 2
if sorted_data[mid] < val:
start = mid + 1
elif sorted_data[mid] > val:
stop = mid - 1
else:
return mid
return -1
def searchRange(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
pos = self.binary_search(target, nums)
if pos == -1:
return [-1, -1]
start = pos
end = pos
left = 1
right = 1
while left or right:
if left and pos - left >= 0 and nums[pos - left] == target:
start = pos - left
left += 1
else:
left = 0
if right and pos + right < len(nums) and nums[pos + right] == target:
end = pos + right
right += 1
else:
right = 0
return [start, end]
if __name__ == '__main__':
# nums = [5, 7, 7, 8, 8, 10]
nums = [2, 2]
result = Solution().searchRange(nums, 3)
print(result)
```
#### File: tests/number/test_number.py
```python
from algorithm.numberTheory.fibonacci import fibonacci_log_n
from tests.base_test_case import BaseTestCase
class SearchTest(BaseTestCase):
def setUp(self):
super().setUp()
self.num = 100
def test_fibonacci_log_n(self):
print(fibonacci_log_n(self.num))
```
#### File: tests/problem/test_eightqueen.py
```python
from algorithm.problem import eightqueen
from tests.base_test_case import BaseTestCase
class EightQueenTestCase(BaseTestCase):
def setUp(self):
super().setUp()
def test_eight_queen(self):
self.loop(eightqueen, [1, 2, 4])
```
#### File: tests/sort/test_merge_sort.py
```python
import math
from algorithm.sorts.merge_sort import merge_sort
from algorithm.sorts.merge_withInsert import merge_with_insert
from tests.sort.base_sort import BaseSortTestCase
class MergeBaseSortTestCase(BaseSortTestCase):
def test_merge(self):
self.loop(merge_sort, self.data)
def test_merge_worst(self):
self.loop(merge_sort, self.reverse_data)
def test_merge_with_insert(self):
self.loop(merge_with_insert, self.data, int(math.log(self.length, 2)))
def test_merge_with_insert_worst(self):
self.loop(merge_with_insert, self.reverse_data, int(math.log(self.length, 2)))
```
#### File: tests/tree/test_binary_heap.py
```python
from datastructure.trees.binary_heap import BinaryHeap
from tests.base_test_case import BaseTestCase
class BinaryHeapTestCase(BaseTestCase):
def setUp(self):
super().setUp()
def test_binary_tree(self):
data = [x for x in range(0, 15)]
heap = BinaryHeap(data, False)
print(heap)
heap.del_last()
print(heap)
``` |
{
"source": "joway/python-china",
"score": 2
} |
#### File: python-china/users/views.py
```python
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.views.decorators.csrf import csrf_exempt
from users.models import User
from users.services import AuthService
def redirect(request):
""" redirect to auth server url and get the code
"""
return HttpResponseRedirect(AuthService.get_auth_url())
@csrf_exempt
def login(request):
""" login with code
"""
access_token, refresh_token = AuthService.get_tokens(code=request.POST['code'])
user_info = AuthService.get_user_info(access_token=access_token)
try:
user = User.objects.get(email=user_info['email'])
except User.DoesNotExist:
user = User.objects.create_user(email=user_info['email'],
username=user_info['username'],
avatar=user_info['avatar'])
return HttpResponse(access_token)
def callback(request):
""" get the code and request for jwt token
"""
access_token, refresh_token = AuthService.get_tokens(code=request.GET['code'])
return HttpResponse(access_token)
``` |
{
"source": "joway/statsd-server",
"score": 3
} |
#### File: joway/statsd-server/monitor.py
```python
import socket
import psutil
import statsd as statsd_lib
from settings import STATSD_HOST, STATSD_PORT
hostname = socket.gethostname()
statsd = statsd_lib.StatsClient(host=STATSD_HOST, port=STATSD_PORT, prefix=hostname)
def monitoring_gauge(key, obj):
for item in obj.__dict__:
statsd.gauge('%s.%s' % (key, item), int(obj.__getattribute__(item)))
def monitoring_mem():
monitoring_gauge('mem', psutil.virtual_memory())
def monitoring_swap():
monitoring_gauge('swap', psutil.swap_memory())
def monitoring_cpu():
monitoring_gauge('cpu', psutil.cpu_times())
def monitoring_net():
monitoring_gauge('net', psutil.net_io_counters())
``` |
{
"source": "joweeba/mrtaskman",
"score": 2
} |
#### File: mrtaskman/client/package_cache_test.py
```python
__author__ = '<EMAIL> (<NAME>)'
import json
import logging
import os
import threading
import time
import unittest
from client import package_cache
class PackageCacheTest(unittest.TestCase):
def setUp(self):
self.path = '/tmp/cache_test'
self.min_duration_seconds = 0
self.max_size_bytes = 100 * 1024
self.low_watermark_percentage = 0.6
self.high_watermark_percentage = 0.8
# Clear old cache, if any.
os.system('rm -rf %s' % self.path)
self.cache = package_cache.PackageCache(
self.min_duration_seconds,
self.max_size_bytes,
self.path,
self.low_watermark_percentage,
self.high_watermark_percentage)
def tearDown(self):
os.system('rm -rf %s' % self.path)
def tearDownTestCase(self):
os.system('rm -rf %s' % self.path)
def testSetUpAndTearDown(self):
logging.info('setUp done. tearing down...')
def testDotCacheInfoMatchesSettings(self):
dot_cache_info_file = open(os.path.join(self.path, '.cache_info'))
cache_info = json.load(dot_cache_info_file)
self.assertEqual(cache_info['min_duration_seconds'],
self.min_duration_seconds)
self.assertEqual(cache_info['max_size_bytes'], self.max_size_bytes)
self.assertEqual(cache_info['low_watermark_percentage'],
self.low_watermark_percentage)
self.assertEqual(cache_info['high_watermark_percentage'],
self.high_watermark_percentage)
def testSecondCacheClientDoesNotTrampleCacheInfoFile(self):
second_cache = package_cache.PackageCache(
20, 100, self.path, self.low_watermark_percentage,
self.high_watermark_percentage)
self.testDotCacheInfoMatchesSettings()
def testSecondCacheClientObeysLock(self):
second_cache = package_cache.PackageCache(
20, 100, self.path, self.low_watermark_percentage,
self.high_watermark_percentage)
# Acquire lock via first cache.
self.cache._Lock()
event = threading.Event()
def DoSecondThread(second_cache, event):
second_cache._Lock()
second_cache._Unlock()
event.set()
# Tell second cache to attempt to acquire the lock (should block.)
second_thread = threading.Thread(target=DoSecondThread,
args=(second_cache, event))
second_thread.start()
self.assertFalse(event.is_set())
time.sleep(0.5)
self.assertFalse(event.is_set())
# Release the lock and expect first thread to finish.
self.cache._Unlock()
self.assertTrue(event.wait(10.0))
second_thread.join(10.0)
def testCopyToDirectory_NeedsDownload(self):
name = 'name'
version = 1
tmp_dir = os.tempnam('/tmp', 'testCopyToDirectory_NeedsDownload')
def MockOnCacheMiss(n, v, p):
self.assertEqual(name, n)
self.assertEqual(version, v)
try:
os.system('mkdir -p %s' % tmp_dir)
self.cache.CopyToDirectory(
{
'name': name,
'version': version,
},
tmp_dir,
MockOnCacheMiss)
finally:
os.system('rm -rf %s' % tmp_dir)
def testAddToDownloading(self):
name = 'name'
version = 1
tmp_dir = os.tempnam('/tmp', 'testCopyToDirectory_NeedsDownload')
pid = os.getpid()
def MockOnCacheMiss(n, v, p):
self.assertEqual(name, n)
self.assertEqual(version, v)
downloading = open(os.path.join(self.path, '.downloading'), 'r')
records = json.load(downloading)
downloading.close()
record = records[package_cache.MakePackageString(name, version)]
self.assertEqual(pid, record['pid'])
self.assertTrue(record['timestamp'] <= time.mktime(time.gmtime()))
try:
os.system('mkdir -p %s' % tmp_dir)
self.cache.CopyToDirectory(
{
'name': name,
'version': version,
},
tmp_dir,
MockOnCacheMiss)
finally:
os.system('rm -rf %s' % tmp_dir)
def testRemoveFromDownloading(self):
name = 'name'
version = 1
tmp_dir = os.tempnam('/tmp', 'testCopyToDirectory_NeedsDownload')
pid = os.getpid()
def MockOnCacheMiss(n, v, p):
self.assertEqual(name, n)
self.assertEqual(version, v)
downloading = open(os.path.join(self.path, '.downloading'), 'r')
records = json.load(downloading)
downloading.close()
record = records[package_cache.MakePackageString(name, version)]
self.assertEqual(pid, record['pid'])
self.assertTrue(record['timestamp'] <= time.mktime(time.gmtime()))
try:
os.system('mkdir -p %s' % tmp_dir)
self.cache.CopyToDirectory(
{
'name': name,
'version': version,
},
tmp_dir,
MockOnCacheMiss)
finally:
os.system('rm -rf %s' % tmp_dir)
downloading = open(os.path.join(self.path, '.downloading'), 'r')
records = json.load(downloading)
downloading.close()
record = records.get(package_cache.MakePackageString(name, version), None)
self.assertIsNone(record)
def testRemoveFromIndex(self):
name = 'name'
version = 1
tmp_dir = os.tempnam('/tmp', 'testCopyToDirectory_NeedsDownload')
pid = os.getpid()
def MockOnCacheMiss(n, v, p):
self.assertEqual(name, n)
self.assertEqual(version, v)
try:
os.system('mkdir -p %s' % tmp_dir)
self.cache.CopyToDirectory(
{
'name': name,
'version': version,
},
tmp_dir,
MockOnCacheMiss)
finally:
os.system('rm -rf %s' % tmp_dir)
index = open(os.path.join(self.path, '.index'), 'r')
records = json.load(index)
index.close()
record = records[package_cache.MakePackageString(name, version)]
self.assertEqual(pid, record['pid'])
self.assertNotEqual(tmp_dir, record['cache_dir'])
self.assertTrue(self.path in record['cache_dir'])
self.assertTrue(record['timestamp'] <= time.mktime(time.gmtime()))
self.assertEqual(0, record['size_bytes'])
self.cache._Lock()
self.cache._RemoveFromIndex(package_cache.MakePackageString(name, version))
self.cache._Unlock()
index = open(os.path.join(self.path, '.index'), 'r')
records = json.load(index)
index.close()
record = records.get(package_cache.MakePackageString(name, version), None)
self.assertIsNone(record)
def testWaitOnDownload(self):
name = 'name'
version = 1
tmp_dir = os.tempnam('/tmp', 'testWaitOnDownload')
tmp_dir2 = os.tempnam('/tmp', 'testWaitOnDownload2')
os.system('mkdir -p %s' % tmp_dir)
os.system('mkdir -p %s' % tmp_dir2)
second_cache = package_cache.PackageCache(
20, 100, self.path, self.low_watermark_percentage,
self.high_watermark_percentage)
event = threading.Event()
def DoSecondThread(second_cache, event):
try:
# Should block.
second_cache.CopyToDirectory({'name': name, 'version': version},
tmp_dir2, None)
finally:
event.set()
# Tell second cache to attempt to acquire the lock (should block.)
second_thread = threading.Thread(target=DoSecondThread,
args=(second_cache, event))
def MockOnCacheMiss(n, v, p):
self.assertEqual(name, n)
self.assertEqual(version, v)
os.system('echo "nothing" > %s' % os.path.join(p, 'file.txt'))
second_thread.start()
time.sleep(1.5)
try:
self.cache.CopyToDirectory(
{
'name': name,
'version': version,
},
tmp_dir,
MockOnCacheMiss)
finally:
event.wait()
filename = os.path.join(tmp_dir2, 'file.txt')
try:
with open(filename): pass
except IOError:
self.fail('%s did not exist' % filename)
os.system('rm -rf %s' % tmp_dir)
os.system('rm -rf %s' % tmp_dir2)
# TODO(jeff.carollo): testCopyDirectory.
def main():
# Set up logging.
FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(format=FORMAT, level=logging.INFO)
unittest.main()
if __name__ == '__main__':
main()
```
#### File: server/handlers/api_handlers.py
```python
from google.appengine.ext import blobstore
from google.appengine.ext import db
from google.appengine.ext.db import datastore_query
from google.appengine.ext.webapp import blobstore_handlers
from models import tasks
from util import device_info
import csv
import datetime
import json
import StringIO
import urllib
import webapp2
def FormatModelDataAsCsv(models, keys):
"""Takes list of db.Models and returns CSV representation as StingIO."""
data_csv = StringIO.StringIO()
data_writer = csv.DictWriter(data_csv, keys)
for model in models:
d = db.to_dict(model)
encoded_d = {}
for (key, value) in d.iteritems():
if isinstance(value, basestring):
encoded_d[key] = value.encode('utf-8', 'ignore')
else:
encoded_d[key] = value
data_writer.writerow(encoded_d)
data_csv.flush()
return data_csv
def Utf8Encode(string):
return string.encode('utf-8', 'ignore')
def GetExecutor(executor):
device_data = device_info.GetDeviceInfo(executor)
if device_data is not None:
executor = device_data['device_name']
return executor
TASK_RESULT_KEYS = [
'task_id',
'executor',
'assigned_worker',
'assigned_time',
'completed_time',
'exit_code',
'stderr_url',
'stdout_url',
'stderr_blobkey',
'stdout_blobkey',
]
def FormatTaskResultsAsCsv(task_list):
"""Specially format Tasks with TaskResults for CSV transmission."""
data_csv = StringIO.StringIO()
data_writer = csv.DictWriter(data_csv, TASK_RESULT_KEYS)
for task in task_list:
if task.executor_requirements[0] == 'macos':
continue
executor = GetExecutor(task.executor_requirements[0])
encoded_d = {}
encoded_d['task_id'] = task.key().id()
encoded_d['executor'] = Utf8Encode(executor)
encoded_d['assigned_worker'] = Utf8Encode(task.assigned_worker)
encoded_d['assigned_time'] = task.assigned_time
encoded_d['completed_time'] = task.completed_time
encoded_d['exit_code'] = task.result.exit_code
encoded_d['stderr_url'] = Utf8Encode(task.result.stderr_download_url)
encoded_d['stdout_url'] = Utf8Encode(task.result.stdout_download_url)
encoded_d['stderr_blobkey'] = Utf8Encode(task.result.stderr.key().__str__())
encoded_d['stdout_blobkey'] = Utf8Encode(task.result.stdout.key().__str__())
data_writer.writerow(encoded_d)
data_csv.flush()
return data_csv
class ListResultsAfterDate(webapp2.RequestHandler):
"""Retrieves a special-formatted list of results newer than after_date."""
def get(self, after_date):
cursor = self.request.get('cursor', None)
limit = int(self.request.get('limit', 1000))
try:
after_date = int(urllib.unquote(after_date))
except:
self.response.out.write('after_date must be an integer timestamp.')
self.response.set_status(400)
return
after_date = datetime.datetime.fromtimestamp(after_date)
# Fetch data.
(results, next_cursor) = tasks.GetResultsAfterDate(
after_date, limit, cursor)
# Format data as CSV.
data_csv = FormatTaskResultsAsCsv(results)
# Create response.
response = {}
response['kind'] = 'csv_data#task_results'
response['headers'] = TASK_RESULT_KEYS
response['data'] = data_csv.getvalue()
response['next_cursor'] = next_cursor
# Write response.
json.dump(response, self.response.out, check_circular=False)
self.response.headers['Content-Type'] = 'application/json'
app = webapp2.WSGIApplication([
('/api/task_results/list_after_date/after_date/(.+)', ListResultsAfterDate),
], debug=True)
```
#### File: server/index/index_handlers.py
```python
__author__ = '<NAME> (<EMAIL>)'
import logging
import webapp2
from index import migrate_tasks_pipeline
from index import task_results_pipeline
class IndexHandler(webapp2.RequestHandler):
def get(self):
self.response.out.write('''
<html>
<head><title>Index</title></head>
<body>
<form method="POST" action='/index'>
<input type="hidden" name="action" value="migrate_tasks"></input>
<input type="submit" value="Migrate Tasks"></input>
</form>
<form method="POST" action='/index'>
<input type="hidden" name="action" value="task_results"></input>
<input type="submit" value="Task Results"></input>
</form>
</body>
</html>
''')
self.response.headers['Content-Type'] = 'text/html'
def post(self):
logging.info('request: %s', self.request.body.decode('utf-8'))
action = self.request.get('action', None)
if not action:
self.response.out.write('Need to give an action.')
self.response.set_status(400)
return
if action == 'migrate_tasks':
pipeline = migrate_tasks_pipeline.MigrateTasksPipeline()
pipeline.start()
self.redirect(
pipeline.base_path + "/status?root=" + pipeline.pipeline_id)
return
if action == 'task_results':
pipeline = task_results_pipeline.TaskResultsPipeline()
pipeline.start()
self.redirect(
pipeline.base_path + "/status?root=" + pipeline.pipeline_id)
return
self.response.out.write('Invalid action: %s' % action)
self.response.set_status(400)
app = webapp2.WSGIApplication([
('/index', IndexHandler),
], debug=True)
```
#### File: server/index/migrate_tasks_pipeline.py
```python
__author__ = '<NAME> (<EMAIL>)'
import logging
from mapreduce import base_handler
from mapreduce import mapreduce_pipeline
from mapreduce import operation
from models import tasks
class MigrateTasksPipeline(base_handler.PipelineBase):
def run(self):
yield mapreduce_pipeline.MapperPipeline(
'MigrateTasks',
'index.migrate_tasks_pipeline.Map',
'mapreduce.input_readers.DatastoreInputReader',
params={
'entity_kind': 'models.tasks.Task',
'batch_size': 100,
},
shards=100)
def Map(model):
if model.parent() is not None:
model_dict = db.to_dict(model)
model2 = tasks.Task(key=db.Key.from_path('Task', model.key().id()),
**model_dict)
operation.db.Put(model2)
```
#### File: workers/macos/worker.py
```python
__author__ = '<EMAIL> (<NAME>)'
import cStringIO
import datetime
import httplib
import json
import logging
import os
import socket
import StringIO
import subprocess
import sys
import time
import urllib2
import gflags
from client import mrtaskman_api
from client import package_installer
from client import package_cache
from common import device_info
from common import http_file_upload
from common import parsetime
from common import split_stream
FLAGS = gflags.FLAGS
gflags.DEFINE_string('log_filename', '', 'Where to log stuff. Required.')
gflags.DEFINE_string('worker_name', '', 'Unique worker name.')
gflags.DEFINE_list('worker_capabilities', ['macos', 'android'],
'Things this worker can do.')
# Package cache flags.
gflags.DEFINE_boolean('use_cache', True, 'Whether or not to use package cache.')
gflags.DEFINE_string('cache_path',
'/usr/local/worker_cache',
'Where to cache packages.')
gflags.DEFINE_integer('min_duration_seconds', 60,
'Minimum time to cache something.')
gflags.DEFINE_integer('max_cache_size_bytes', 2 * 1024 * 1024 * 1024,
'Maximum size of the cache in bytes.')
gflags.DEFINE_float('low_watermark_percentage', 0.6,
'When cleaning up, keeps at least this much cache.')
gflags.DEFINE_float('high_watermark_percentage', 0.8,
'When cleaning up, deletes to below this line.')
class TaskError(Exception):
pass
class MrTaskmanUnrecoverableHttpError(TaskError):
pass
class MrTaskmanRecoverableHttpError(TaskError):
pass
def GetHostname():
return socket.gethostname()
class MacOsWorker(object):
"""Executes macos tasks."""
def __init__(self, worker_name, log_stream):
self.worker_name_ = worker_name
self.log_stream_ = log_stream
self.api_ = mrtaskman_api.MrTaskmanApi()
self.hostname_ = GetHostname()
self.capabilities_ = {'executor': self.GetCapabilities()}
self.executors_ = {}
for capability in self.capabilities_['executor']:
self.executors_[capability] = self.ExecuteTask
self.use_cache_ = FLAGS.use_cache
if self.use_cache_:
self.package_cache_ = package_cache.PackageCache(
FLAGS.min_duration_seconds,
FLAGS.max_cache_size_bytes,
FLAGS.cache_path,
FLAGS.low_watermark_percentage,
FLAGS.high_watermark_percentage)
def GetCapabilities(self):
capabilities = device_info.GetCapabilities()
capabilities.append('macos')
capabilities.append(self.worker_name_)
return capabilities
def AssignTask(self):
"""Makes a request to /tasks/assign to get assigned a task.
Returns:
Task if a task was assigned, or None.
"""
try:
task = self.api_.AssignTask(self.worker_name_, self.hostname_,
self.capabilities_)
return task
except urllib2.HTTPError, e:
logging.info('Got %d HTTP response from MrTaskman on AssignTask.',
e.code)
return None
except urllib2.URLError, e:
logging.info('Got URLError trying to reach MrTaskman: %s', e)
return None
def SendResponse(self, task_id, stdout, stderr, task_result):
while True:
try:
# TODO(jeff.carollo): Refactor.
device_sn = device_info.GetDeviceSerialNumber()
task_result['device_serial_number'] = device_sn
response_url = self.api_.GetTaskCompleteUrl(task_id)
if not response_url:
logging.info('No task complete url for task_id %s', task_id)
return
response_url = response_url.get('task_complete_url', None)
if not response_url:
logging.info('No task complete url for task_id %s', task_id)
return
self.api_.SendTaskResult(response_url, stdout, stderr, task_result)
logging.info('Successfully sent response for task %s: %s',
task_id, self.api_.MakeTaskUrl(task_id))
return
except urllib2.HTTPError, error_response:
body = error_response.read()
code = error_response.code
if code == 404:
logging.warning('TaskCompleteUrl timed out.')
continue
logging.warning('SendResponse HTTPError code %d\n%s',
code, body)
return
except urllib2.URLError, e:
logging.info(
'Got URLError trying to send response to MrTaskman: %s', e)
logging.info('Retrying in 10 seconds')
time.sleep(10)
continue
def GetTaskCompleteUrl(self, task_id):
try:
return self.api_.GetTaskCompleteUrl(task_id)
except urllib2.HTTPError, error_response:
body = error_response.read()
code = error_response.code
logging.warning('GetTaskCompleteUrl HTTPError code %d\n%s',
code, body)
def ShouldWaitForDevice(self):
"""Returns True iff this worker controls a device which is offline."""
if not device_info.DEVICE_SN:
return False
return not device_info.DeviceIsConnected()
def PollAndExecute(self):
logging.info('Polling for work...')
device_active = True
while True:
try:
if self.ShouldWaitForDevice():
if device_active:
logging.info('Device %s is offline. Waiting for it to come back.',
device_info.DEVICE_SN)
device_active = False
time.sleep(10)
continue
if not device_active:
logging.info('Device came back online.')
device_active = True
# TODO(jeff.carollo): Wrap this in a catch-all Excepion handler that
# allows us to continue executing in the face of various task errors.
task = self.AssignTask()
if not task:
time.sleep(10)
continue
except KeyboardInterrupt:
logging.info('Caught CTRL+C. Exiting.')
return
task_stream = cStringIO.StringIO()
task_logs = None
self.log_stream_.AddStream(task_stream)
try:
logging.info('Got a task:\n%s\n', json.dumps(task, 'utf-8', indent=2))
config = task['config']
task_id = int(task['id'])
attempt = task['attempts']
# Figure out which of our executors we can use.
executor = None
allowed_executors = config['task']['requirements']['executor']
for allowed_executor in allowed_executors:
try:
executor = self.executors_[allowed_executor]
except KeyError:
pass
if executor is not None:
break
if executor is None:
# TODO: Send error response to server.
# This is probably our fault - we said we could do something
# that we actually couldn't do.
logging.error('No matching executor from %s', allowed_executors)
raise Exception('No allowed executors matched our executors_:\n' +
'%s\nvs.%s\n' % (allowed_executors, self.executors_))
try:
# We've got a valid executor, so use it.
(results, stdout, stderr) = executor(task_id, attempt, task, config)
except MrTaskmanUnrecoverableHttpError:
logging.error(
'Unrecoverable MrTaskman HTTP error. Aborting task %d.', task_id)
continue
finally:
self.log_stream_.RemoveStream(task_stream)
task_logs = task_stream.getvalue().decode('utf-8')
task_stream.close()
try:
results['worker_log'] = task_logs.encode('utf-8')
self.SendResponse(task_id,
stdout,
stderr,
results)
except MrTaskmanUnrecoverableHttpError:
logging.error(
'Unrecoverable MrTaskman HTTP error. Aborting task %d.', task_id)
logging.info('Polling for work...')
# Loop back up and poll for the next task.
def ExecuteTask(self, task_id, attempt, task, config):
logging.info('Recieved task %s', task_id)
try:
tmpdir = package_installer.TmpDir()
# Download the files we need from the server.
files = config.get('files', [])
self.DownloadAndStageFiles(files)
# Install any packages we might need.
# TODO(jeff.carollo): Handle any exceptions raised here.
packages = config.get('packages', [])
self.DownloadAndInstallPackages(packages, tmpdir)
# We probably don't want to run forever. Default to 12 minutes.
timeout = config['task'].get('timeout', '12m')
timeout = parsetime.ParseTimeDelta(timeout)
# Get any environment variables to inject.
env = config['task'].get('env', {})
env = env.update(os.environ)
# Get our command and execute it.
command = config['task']['command']
logging.info('Running command %s', command)
(exit_code, stdout, stderr, execution_time, result_metadata) = (
self.RunCommandRedirectingStdoutAndStderrWithTimeout(
command, env, timeout, tmpdir.GetTmpDir()))
logging.info('Executed %s with result %d', command, exit_code)
results = {
'kind': 'mrtaskman#task_complete_request',
'task_id': task_id,
'attempt': attempt,
'exit_code': exit_code,
'execution_time': execution_time.total_seconds(),
'result_metadata': result_metadata
}
return (results, stdout, stderr)
finally:
tmpdir.CleanUp()
def RunCommandRedirectingStdoutAndStderrWithTimeout(
self, command, env, timeout, cwd):
command = ' '.join([command, '>stdout', '2>stderr'])
# TODO: More precise timing through process info.
begin_time = datetime.datetime.now()
timeout_time = begin_time + timeout
process = subprocess.Popen(args=command,
env=env,
shell=True,
cwd=cwd)
ret = None
while None == ret and (datetime.datetime.now() < timeout_time):
time.sleep(0.02)
ret = process.poll()
finished_time = datetime.datetime.now()
if finished_time >= timeout_time and (None == ret):
logging.info('command %s timed out.', command)
process.terminate()
process.wait()
ret = -99
execution_time = finished_time - begin_time
try:
stdout = file(os.path.join(cwd, 'stdout'), 'rb')
except IOError, e:
logging.error('stdout was not written.')
stdout = file(os.path.join(cwd, 'stdout'), 'w')
stdout.write('No stdout.')
stdout.flush()
stdout.close()
stdout = file(os.path.join(cwd, 'stdout'), 'rb')
try:
stderr = file(os.path.join(cwd, 'stderr'), 'rb')
except IOError, e:
logging.error('stderr was not written.')
stderr = file(os.path.join(cwd, 'stderr'), 'w')
stderr.write('No stderr.')
stderr.flush()
stderr.close()
stderr = file(os.path.join(cwd, 'stderr'), 'rb')
try:
result_metadata_file = file(os.path.join(cwd, 'result_metadata'), 'r')
result_metadata = json.loads(result_metadata_file.read().decode('utf-8'))
except:
result_metadata = None
return (ret, stdout, stderr, execution_time, result_metadata)
def DownloadAndStageFiles(self, files):
logging.info('Not staging files: %s', files)
# TODO: Stage files.
def DownloadAndInstallPackages(self, packages, tmpdir):
# TODO(jeff.carollo): Create a package cache if things take off.
for package in packages:
attempts = 0
while True:
try:
# TODO(jeff.carollo): Put package cache code here.
if self.use_cache_:
self.package_cache_.CopyToDirectory(
package, tmpdir.GetTmpDir(),
package_installer.DownloadAndInstallPackage)
else:
package_installer.DownloadAndInstallPackage(
package['name'], package['version'],
tmpdir.GetTmpDir())
break
except urllib2.HTTPError, e:
logging.error('Got HTTPError %d trying to grab package %s.%s: %s',
e.code, package['name'], package['version'], e)
raise MrTaskmanUnrecoverableHttpError(e)
except (urllib2.URLError, httplib.IncompleteRead,
httplib.BadStatusLine, httplib.HTTPException), e:
logging.error('Got URLError trying to grab package %s.%s: %s',
package['name'], package['version'], e)
logging.info('Retrying in 10')
attempts += 1
# TODO(jeff.carollo): Figure out a robust way to do this.
# Likely need to just try a few times to get around Internet blips
# then mark task as failed for package reasons.
if attempts < 10:
time.sleep(10)
continue
else:
logging.error('Failed to grab package for 100 attempts. Aborting.')
raise MrTaskmanUnrecoverableHttpError(e)
except IOError, e:
logging.error('Got IOError trying to grab package %s.%s: %s',
package['name'], package['version'], e)
raise MrTaskmanUnrecoverableHttpError(e)
def main(argv):
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
sys.stderr.write('%s\n' % e)
sys.exit(1)
return
# Set default socket timeout to 2 hours so that we catch missing timeouts.
socket.setdefaulttimeout(2 * 60 * 60)
if not FLAGS.log_filename:
sys.stderr.write('Flag --log_filename is required.\n')
sys.exit(-9)
return
try:
from third_party import portalocker
log_file = file(FLAGS.log_filename, 'a+')
portalocker.lock(log_file, portalocker.LOCK_EX | portalocker.LOCK_NB)
except Exception, e:
logging.exception(e)
print 'Could not get exclusive lock.'
sys.exit(-10)
return
try:
FORMAT = '%(asctime)-15s %(message)s'
log_stream = split_stream.SplitStream(sys.stdout, log_file)
logging.basicConfig(format=FORMAT, level=logging.DEBUG,
stream=log_stream)
macos_worker = MacOsWorker(FLAGS.worker_name, log_stream=log_stream)
# Run forever, executing tasks from the server when available.
macos_worker.PollAndExecute()
finally:
logging.shutdown()
log_file.flush()
portalocker.unlock(log_file)
log_file.close()
if __name__ == '__main__':
main(sys.argv)
``` |
{
"source": "joweenflores/stock_trading_api",
"score": 3
} |
#### File: backend/models/stocks.py
```python
from typing import TYPE_CHECKING
from sqlalchemy import Column, String, Date, ForeignKey, TIMESTAMP, Integer, Boolean, Numeric
from sqlalchemy.sql import func
from sqlalchemy.orm import relationship
from ..db.base_class import Base
if TYPE_CHECKING:
from .traders import Trade, Offer, Portfolio # noqa: F401
# Stocks based on Philippine Stock Exchange
# https://edge.pse.com.ph/companyDirectory/form.do
class Stock(Base):
symbol = Column(String(5), index=True, unique=True)
name = Column(String(256))
sector_id = Column(Integer, ForeignKey('sector.id'), nullable=True)
sub_sector_id = Column(Integer, ForeignKey('subsector.id'), nullable=True)
listing_date = Column(Date(), server_default=func.now())
active = Column(Boolean(), default=True)
# FK Reverse Relations
price_history = relationship("PriceHistory", backref="stock")
trade = relationship("Trade", backref="stock")
offer = relationship("Offer", backref="stock")
portfolio = relationship("Portfolio", backref="stock")
def __repr__(self):
return f"<Stock(symbol='{self.symbol}')>"
class PriceHistory(Base):
stock_id = Column(Integer, ForeignKey('stock.id'))
buy = Column(Numeric(10, 3))
sell = Column(Numeric(10, 3))
timestamp = Column(TIMESTAMP(timezone=True), server_default=func.now(), onupdate=func.current_timestamp())
def __repr__(self):
return f"<PriceHistory(buy='{self.buy}', sell='{self.sell}')>"
class Sector(Base):
name = Column(String(50))
# FK Reverse Relations
stock = relationship('Stock', backref="sector", uselist=False)
def __repr__(self):
return f"<Sector(name='{self.name}')>"
class SubSector(Base):
name = Column(String(50))
# FK Reverse Relations
stock = relationship('Stock', backref="subsector", uselist=False)
def __repr__(self):
return f"<SubSector(name='{self.name}')>"
``` |
{
"source": "joWeiss/iota.lib.py",
"score": 3
} |
#### File: commands/extended/helpers.py
```python
class Helpers(object):
"""
Adds additional helper functions that aren't part of the core or extended
API.
"""
def __init__(self, api):
self.api = api
def is_promotable(self, tail):
# type: (TransactionHash) -> bool
"""
Determines if a tail transaction is promotable.
:param tail:
Transaction hash. Must be a tail transaction.
"""
return self.api.check_consistency(tails=[tail])['state']
```
#### File: commands/extended/send_transfer.py
```python
from __future__ import absolute_import, division, print_function, \
unicode_literals
from typing import List, Optional
import filters as f
from iota import Address, Bundle, ProposedTransaction, TransactionHash
from iota.commands import FilterCommand, RequestFilter
from iota.commands.extended.prepare_transfer import PrepareTransferCommand
from iota.commands.extended.send_trytes import SendTrytesCommand
from iota.crypto.types import Seed
from iota.filters import SecurityLevel, Trytes
__all__ = [
'SendTransferCommand',
]
class SendTransferCommand(FilterCommand):
"""
Executes ``sendTransfer`` extended API command.
See :py:meth:`iota.api.Iota.send_transfer` for more info.
"""
command = 'sendTransfer'
def get_request_filter(self):
return SendTransferRequestFilter()
def get_response_filter(self):
pass
def _execute(self, request):
change_address = request['changeAddress'] # type: Optional[Address]
depth = request['depth'] # type: int
inputs = request['inputs'] # type: Optional[List[Address]]
min_weight_magnitude = request['minWeightMagnitude'] # type: int
seed = request['seed'] # type: Seed
transfers = request['transfers'] # type: List[ProposedTransaction]
reference = request['reference'] # type: Optional[TransactionHash]
security_level = request['securityLevel'] # int
pt_response = PrepareTransferCommand(self.adapter)(
changeAddress = change_address,
inputs = inputs,
seed = seed,
transfers = transfers,
securityLevel = security_level,
)
st_response = SendTrytesCommand(self.adapter)(
depth = depth,
minWeightMagnitude = min_weight_magnitude,
trytes = pt_response['trytes'],
reference = reference,
)
return {
'bundle': Bundle.from_tryte_strings(st_response['trytes']),
}
class SendTransferRequestFilter(RequestFilter):
def __init__(self):
super(SendTransferRequestFilter, self).__init__(
{
# Required parameters.
'depth': f.Required | f.Type(int) | f.Min(1),
'seed': f.Required | Trytes(result_type=Seed),
# Loosely-validated; testnet nodes require a different value
# than mainnet.
'minWeightMagnitude': f.Required | f.Type(int) | f.Min(1),
'transfers': (
f.Required
| f.Array
| f.FilterRepeater(f.Required | f.Type(ProposedTransaction))
),
# Optional parameters.
'changeAddress': Trytes(result_type=Address),
'securityLevel': SecurityLevel,
# Note that ``inputs`` is allowed to be an empty array.
'inputs':
f.Array | f.FilterRepeater(f.Required | Trytes(result_type=Address)),
'reference': Trytes(result_type=TransactionHash),
},
allow_missing_keys = {
'changeAddress',
'inputs',
'reference',
'securityLevel',
},
)
```
#### File: commands/extended/helpers_test.py
```python
from __future__ import absolute_import, division, print_function, \
unicode_literals
from unittest import TestCase
from iota import Iota, TransactionHash
from iota.adapter import MockAdapter
class HelpersTestCase(TestCase):
def setUp(self):
super(HelpersTestCase, self).setUp()
self.api = api = Iota('mock://')
self.api.adapter = MockAdapter()
# noinspection SpellCheckingInspection
self.transaction = (
'TESTVALUE9DONTUSEINPRODUCTION99999KPZOTR'
'VDB9GZDJGZSSDCBIX9QOK9PAV9RMDBGDXLDTIZTWQ'
)
def test_positive_is_promotable(self):
"""
Transaction is promotable
"""
self.api.adapter.seed_response('checkConsistency', {
'state': True,
})
self.assertTrue(self.api.helpers.is_promotable(tail=self.transaction))
def test_negative_is_promotable(self):
"""
Transaction is not promotable
"""
self.api.adapter.seed_response('checkConsistency', {
'state': False,
'info': 'Inconsistent state',
})
self.assertFalse(self.api.helpers.is_promotable(tail=self.transaction))
``` |
{
"source": "JowellDev/jomoney",
"score": 2
} |
#### File: api/endpoints/transaction_routes.py
```python
from typing import List
from fastapi import APIRouter, Depends
from sqlalchemy.orm import Session
from db.session import get_db
from schemas.transactions import Transaction, TransactionSchema
router = APIRouter()
@router.get('/', response_model=List[Transaction])
def get_all_transactions(db: Session = Depends(get_db)):
return {"message": "get transaction"}
@router.get('/{transaction_id}', response_model=Transaction)
def get_transaction(transaction_id: int, db: Session = Depends(get_db)):
return {"message": "show transaction"}
```
#### File: jomoney/db/session.py
```python
from typing import Generator
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from core.config import Config
SQLALCHEMY_DATABASE_URL = Config.DB_URL
engine = create_engine(SQLALCHEMY_DATABASE_URL)
sessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
def get_db() -> Generator:
try:
db = sessionLocal()
yield db
finally:
db.close()
``` |
{
"source": "J-Owens/soccerdata",
"score": 3
} |
#### File: soccerdata/tests/test_Integration.py
```python
import pandas as pd
import pytest
import soccerdata as foo
# TODO: integration tests
# Names of common leagues equal for all classes
# Number of clubs equal for all common leagues over classes
# Clubnames equal for all common leagues over classes
# Number of games equal for all common leagues/seasons over classes
# Scores per game equal for all common leagues over classes
@pytest.mark.e2e
def test_five38_vs_elo():
"""We should be able to retrieve the Elo history for all teams in these leagues."""
league_sel = [
'ENG-Premier League',
'ESP-La Liga',
'FRA-Ligue 1',
'GER-Bundesliga',
'ITA-Serie A',
]
five38 = foo.FiveThirtyEight(leagues=league_sel, seasons='1819')
five38_games = five38.read_games()
elo = foo.ClubElo()
elo_hist = pd.concat([elo.read_team_history(team) for team in set(five38_games['home_team'])])
assert set(five38_games['home_team']) - set(elo_hist['team']) == set()
```
#### File: soccerdata/tests/test_MatchHistory.py
```python
import pandas as pd
# Unittests -------------------------------------------------------------------
# Reader
def test_epl_2y(match_epl_2y):
df = match_epl_2y.read_games()
assert isinstance(df, pd.DataFrame)
assert len(df.index.get_level_values("season").unique()) == 2
```
#### File: soccerdata/tests/test_SoFIFA.py
```python
import pandas as pd
import pytest
# Unittests -------------------------------------------------------------------
@pytest.mark.fails_gha
def test_sofifa_ratings(sofifa_bundesliga):
assert isinstance(sofifa_bundesliga.read_ratings(), pd.DataFrame)
``` |
{
"source": "jowggernaut/rare-earth-modelling",
"score": 3
} |
#### File: API/algoritmoOtimizacao/ElementoClass.py
```python
from algoritmoOtimizacao.CoisaFisicaClass import Coisa_Fisica
class Elemento(Coisa_Fisica):
def __init__(elemento, nome: str, simbolo: str, preco: float, ca0: float):
super().__init__(nome, preco)
elemento.simbolo = simbolo
elemento.ca0 = ca0
def concentracoes_aquoso(elemento, chutes: list, n_celulas: int):
cas = {'ca1': chutes[0]}
for i in range(2, n_celulas + 1):
cas['ca' + str(i)] = chutes[i - 1]
return cas
```
#### File: API/algoritmoOtimizacao/SubstanciaClass.py
```python
from algoritmoOtimizacao.CoisaFisicaClass import Coisa_Fisica
class Substancia(Coisa_Fisica):
def __init__(substancia, nome, preco, propriedades_fisqui):
assert type(propriedades_fisqui) == dict
super().__init__(nome, preco)
for key in propriedades_fisqui:
setattr(substancia, key, propriedades_fisqui[key])
``` |
{
"source": "jowiho/nand2tetris",
"score": 3
} |
#### File: projects/06/HackAssembler.py
```python
import os, re, sys
class SymbolTable:
symbols = {
'SP': 0,
'LCL': 1,
'ARG': 2,
'THIS': 3,
'THAT': 4,
'R0': 0,
'R1': 1,
'R2': 2,
'R3': 3,
'R4': 4,
'R5': 5,
'R6': 6,
'R7': 7,
'R8': 8,
'R9': 9,
'R10': 10,
'R11': 11,
'R12': 12,
'R13': 13,
'R14': 14,
'R15': 15,
'SCREEN': 0x4000,
'KBD': 0x6000
}
nextVariableAddress = 16
def register_label(self, label, address):
self.symbols[label] = address
def resolve(self, symbol):
if not symbol in self.symbols:
self.symbols[symbol] = self.nextVariableAddress
self.nextVariableAddress += 1
return self.symbols[symbol]
class CodeWriter:
def __init__(self, file, symbol_table):
self.file = file
self.symbol_table = symbol_table
def encode(self, instruction):
if instruction[0] == '@':
self._write(self._encode_a(instruction))
else:
self._write(self._encode_c(instruction))
def _write(self, line):
self.file.write(line + '\n')
def _encode_destination(self, destination):
encoded = ""
encoded += '1' if 'A' in destination else '0'
encoded += '1' if 'D' in destination else '0'
encoded += '1' if 'M' in destination else '0'
return encoded
def _encode_opcode(self, opcode):
if 'M' in opcode:
opcode = opcode.replace('M', 'A')
firstbit = "1"
else:
firstbit = "0"
if opcode == '0': return firstbit + "101010"
if opcode == '1': return firstbit + "111111"
if opcode == '-1': return firstbit + "111010"
if opcode == 'D': return firstbit + "001100"
if opcode == 'A': return firstbit + "110000"
if opcode == '!D': return firstbit + "001101"
if opcode == '!A': return firstbit + "110001"
if opcode == '-D': return firstbit + "001111"
if opcode == '-A': return firstbit + "110011"
if opcode == 'D+1': return firstbit + "011111"
if opcode == 'A+1': return firstbit + "110111"
if opcode == 'D-1': return firstbit + "001110"
if opcode == 'A-1': return firstbit + "110010"
if opcode == 'D+A': return firstbit + "000010"
if opcode == 'D-A': return firstbit + "010011"
if opcode == 'A-D': return firstbit + "000111"
if opcode == 'D&A': return firstbit + "000000"
if opcode == 'D|A': return firstbit + "010101"
raise Exception("Unknown opcode " + opcode)
def _encode_jump(self, jump):
if jump == 'JGT': return "001"
if jump == 'JEQ': return "010"
if jump == 'JGE': return "011"
if jump == 'JLT': return "100"
if jump == 'JNE': return "101"
if jump == 'JLE': return "110"
if jump == 'JMP': return "111"
return "000"
def _encode_a(self, instruction):
address = instruction[1:]
if not address.isdigit():
address = self.symbol_table.resolve(address)
return "{0:016b}".format(int(address))
def _encode_c(self, instruction):
if ';' in instruction:
operation, jump = instruction.split(';')
else:
operation = instruction
jump = ""
if '=' in operation:
destination, opcode = operation.split('=')
else:
destination = ""
opcode = operation
return "111" + self._encode_opcode(opcode) + self._encode_destination(destination) + self._encode_jump(jump)
class Parser:
def __init__(self, code_writer, symbol_table):
self.writer = code_writer
self.symbol_table = symbol_table
def parseFile(self, filename):
# First pass: register labels in symbol table and keep only instructions
instructions = []
with open(filename) as file:
for line in file.readlines():
line = self._strip(line)
if len(line):
if line[0] == '(':
self.symbol_table.register_label(line[1:-1], len(instructions))
else:
instructions.append(line)
# Second pass: encode instructions
for instruction in instructions:
self.writer.encode(instruction)
def _strip(self, line):
line = re.sub('//.*', '', line)
line = re.sub(r'\s', '', line)
return line
def translate_file(asm_filename):
hack_filename = os.path.splitext(asm_filename)[0] + ".hack"
with open(hack_filename, "w") as asm_file:
symbol_table = SymbolTable()
writer = CodeWriter(asm_file, symbol_table)
parser = Parser(writer, symbol_table)
parser.parseFile(asm_filename)
def main(argv):
if len(argv) == 1 and os.path.splitext(argv[0])[1] == ".asm":
translate_file(argv[0])
else:
print("Usage: HackAssembler.py #.asm")
sys.exit(1)
if __name__ == '__main__':
main(sys.argv[1:])
``` |
{
"source": "jowinjohnchemban/Python",
"score": 4
} |
#### File: Python/maths/simple_calculator.py
```python
def add(a,b): #Addition +
return (a+b)
def sub(a,b): #Subtraction -
return (a-b)
def prd(a,b): #Multiplication ร
return (a*b)
def div(a,b): #Division รท
return (a/b)
def main():
n = 1 #default Addition
while(n != 0):
n=int(input("\n 1. Addition\n 2. Subtraction\n 3. Multiplication\n 4. Division\n 0. Exit\n\n Enter your Choice!\n")) #MENU
if(n == 0):
exit()
a=int(input("\n\n First Number : ")) #First Input
b=int(input("\n\n Second Number : ")) #Second Input
try:
#Output
if(n == 1):
print("\n Sum=",add(a,b))
elif(n == 2):
print("\n Difference=",sub(a,b))
elif(n == 3):
print("\n Product=",prd(a,b))
elif(n == 4):
print("\n Quotient=",div(a,b))
else:
print("Unknown Entry!")
except:
print("\n\n E : Exception Occured")
if __name__ == "__main__":
main()
``` |
{
"source": "Jownao/Clock_RealTIme",
"score": 3
} |
#### File: Jownao/Clock_RealTIme/clock.py
```python
from tkinter import *
from tkinter import ttk
from tkinter import font
import time
import datetime
def quit(*args):
root.destroy()
def clock_time():
time = datetime.datetime.now()
time = (time.strftime("%H:%M:%S"))
txt.set(time)
root.after(1000,clock_time)
root = Tk()
root.attributes("-fullscreen",False)
root.configure(background='black')
root.bind('x',quit)
root.after(1000,clock_time)
fnt = font.Font(family = 'Helvetica',size = 30,weight = 'bold')
txt = StringVar()
lbl = ttk.Label(root, textvariable=txt, font = fnt, foreground = 'white', background = 'black')
lbl.place(relx = 0.5,rely = 0.5,anchor=CENTER)
root.title('Relรณgio Futurista')
root.geometry("500x300")
root.mainloop()
``` |
{
"source": "Jownao/TelegramBotAPI_jownao",
"score": 3
} |
#### File: TelegramBotAPI_jownao/pBot/telegrambot.py
```python
import telebot
from telebot import types
chave = "<KEY>"
bot = telebot.TeleBot(chave)
@bot.message_handler(commands=["badalo"])
def responder(mensagem):
bot.reply_to(mensagem,"badalado em vocรช")
@bot.message_handler(commands=["redes"])
def responder(mensagem):
bot.reply_to(mensagem,"Jownao se encontra atualmente nessas redes sociais:\n"+
"Instagram: instagram.com/jownao\n"+
"Twitch: twitch.tv/jownao\n"+
"Github: github.com/Jownao")
@bot.message_handler(commands=["amigos"])
def responder(mensagem):
bot.reply_to(mensagem,"Amigos:\n"+
"Carlo\nRulio\nVicsk\nAlbert\nzaboyz ltda")
@bot.message_handler(func=lambda m: True)
def echo_all(message):
if message == "oi":
bot.reply_to(message, "Iae de boa ?")
else:
bot.reply_to(message, message.text)
chat_id = "798429701"
# or add KeyboardButton one row at a time:
markup = types.ReplyKeyboardMarkup()
itembtna = types.KeyboardButton('Badalo')
itembtnv = types.KeyboardButton('de')
itembtnc = types.KeyboardButton('Sino')
itembtnd = types.KeyboardButton('Macho')
itembtne = types.KeyboardButton('Veio')
markup.row(itembtna, itembtnv)
markup.row(itembtnc, itembtnd, itembtne)
bot.send_message(chat_id, "Escolha oque desejar meu caro:", reply_markup=markup)
bot.polling()
``` |
{
"source": "jowolf/Django-QuickPages",
"score": 2
} |
#### File: Django-QuickPages/quickpages/models.py
```python
from django.db import models
#from django.utils.safestring import mark_safe
from taggit.managers import TaggableManager
#### Custom Managers
class PublishedManager (models.Manager):
def published (self):
return self.filter (published=True)
def unpublished (self):
return self.filter (published=False)
# This is specific to our site (eRacks.com), but here as an example for those who wish to use it:
class PRManager (models.Manager):
def presspages (self):
return self.filter (template__endswith="press.html").exclude (title__startswith='eRacks').order_by ('-created')
#### Models
class QuickPage (models.Model):
slug = models.CharField (max_length=1024, unique=True, help_text="This is the url. Slashes are OK, but not necessary.")
name = models.CharField (max_length=512, help_text="Name - short, typically one or two words.")
title = models.CharField (max_length=512, help_text="Page title, and optional H1 tag (see Heading)")
published = models.BooleanField (default=True, help_text="Published=shown. If not, gives 404")
javascript = models.CharField (max_length=1024, blank=True, help_text="Comma-separated filenames, relative to MEDIA_ROOT or STATIC_ROOT")
css = models.CharField (max_length=1024, blank=True, help_text="Comma-separated filenames, relative to MEDIA_ROOT or STATIC_ROOT")
heading = models.BooleanField (default=True, help_text='If "Heading" is checked, "Title" is added to the top of the content in an H1 tag.')
meta_title = models.CharField (max_length=512, blank=True, help_text="Meta title")
description = models.TextField (blank=True, help_text="Meta description")
keywords = models.TextField (blank=True, help_text="Meta keywords")
comments = models.TextField (blank=True, help_text="Internal developer/editor comments, not shown")
content = models.TextField (blank=True,
help_text='HTML Content - main content div of the page.')
#help_text='HTML Content - <a href="javascript:mce_setup();">Click Here</a> to edit - see <a href="http://wiki.moxiecode.com/index.php/TinyMCE:Configuration">TinyMCE Configuration</a> for more options.')
template = models.CharField (max_length=100, blank=True,
help_text="Optional template - default is quickpages/base.html, which you can also simply replace in your project tree.")
# site = model.ForeignKey ('Site'...)
updated = models.DateTimeField ('date modified', auto_now=True)
created = models.DateTimeField ('date created', auto_now_add=True)
objects = PublishedManager()
pr_objects = PRManager()
tags = TaggableManager(blank=True)
def get_absolute_url (self):
return '/' + self.slug
def __unicode__ (self):
return self.name
types = (
('raw','Raw - passed thru as is'),
('html','HTML - Use the built-in HTML Editor (warning: will strip certain tags, etc'),
)
class QuickSnippet (models.Model):
name = models.CharField(max_length=100, help_text="Name, identifier-style, no spaces, will be exported into request context")
#slug = models.CharField(max_length=100)
title = models.CharField(max_length=100, blank=True, help_text="Title, verbose name / short description")
js = models.TextField(blank=True, help_text="Javascript for this snippet - <script> tags will be passed through")
css = models.TextField(blank=True, help_text="CSS for this snippet - <link> tags will be passed through")
#meta = models.ManyToManyField (Meta, blank=True)
body = models.TextField(help_text="Body of snippet, available via {{ <name> }} in templates via context processor")
comments = models.TextField (blank=True, help_text="Internal developer/editor comments, not shown")
typ = models.CharField (max_length=20, default='raw', choices = types, help_text="Editor type - raw, html, etc - future: Markdown, etc")
created = models.DateTimeField('date created', auto_now_add=True)
updated = models.DateTimeField('date updated', auto_now=True)
published = models.BooleanField(default=True)
objects = PublishedManager()
tags = TaggableManager(blank=True)
def __unicode__ (self):
#return mark_safe (self.body or self.title or self.name)
return '%s (%s)' % (self.name, self.title)
# TODO: Add template, image, possibly slug
def get_absolute_url (self): return '/%s/' % self.name #slug
url = property (get_absolute_url)
``` |
{
"source": "jowoni/compiler",
"score": 3
} |
#### File: jowoni/compiler/calclex.py
```python
import sys
sys.path.insert(0, "../..")
import ply.lex as lex
# reserved
reserved = (
'INT', 'RETURN', 'VOID', 'FOR', 'ELSE', 'IF', 'WHILE', 'BREAK',
)
# List of token names. This is always required
tokens = reserved + (
# Literals (identifier, integer constant, float constant, string constant,
# char const)
'ID', # 'TYPEID', 'ICONST', 'FCONST', 'SCONST', 'CCONST',
# Operators (+,-,*,/,%, &, ||, &&, !, <, <=, >, >=, ==)
'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MOD', 'AND', 'LOR',
'LAND', 'LNOT', 'LT', 'LE', 'GT', 'GE', 'EQ',
# Assignment (=)
'EQUALS',
# Increment/decrement (++,--)
'PLUSPLUS', 'MINUSMINUS',
# Delimeters ( ) [ ] { } , ;
'LPAREN', 'RPAREN',
'LBRACKET', 'RBRACKET',
'LBRACE', 'RBRACE',
'COMMA', 'SEMI',
)
# Operators
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_MOD = r'%'
t_AND = r'&'
t_LOR = r'\|\|'
t_LT = r'<'
t_GT = r'>'
t_LE = r'<='
t_GE = r'>='
t_EQ = r'=='
# Assignment operators
t_EQUALS = r'='
# Increment/decrement
t_PLUSPLUS = r'\+\+'
t_MINUSMINUS = r'--'
# Delimeters
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LBRACE = r'\{'
t_RBRACE = r'\}'
t_COMMA = r','
t_SEMI = r';'
# A regular expression rule with some action code
def t_NUMBER(t):
r'\d+'
t.value = int(t.value)
return t
# Define a rule so we can track line numbers
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
# A string containing ignored characters (spaces and tabs)
t_ignore = ' \t'
# Error handling rule
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
# Identifiers and reserved words
reserved_map = {}
for r in reserved:
reserved_map[r.lower()] = r
def t_ID(t):
r'[A-Za-z_][\w_]*'
t.type = reserved_map.get(t.value, "ID")
return t
# Preprocessor directive (ignored)
def t_preprocessor(t):
r'\#(.)*?\n'
t.lexer.lineno += 1
# Integer literal
t_ICONST = r'\d+([uU]|[lL]|[uU][lL]|[lL][uU])?'
# Floating literal
t_FCONST = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
# String literal
t_SCONST = r'\"([^\\\n]|(\\.))*?\"'
# Character constant 'c' or L'c'
t_CCONST = r'(L)?\'([^\\\n]|(\\.))*?\''
# Build the lexer
lexer = lex.lex()
``` |
{
"source": "Joword/DBMerge",
"score": 3
} |
#### File: Joword/DBMerge/utils.py
```python
import pandas as pd
from read_data import pmid_path,evidences
def pmid_groupby(arg:str):
evidences = pd.read_table(arg+".txt",sep='\t',dtype='str',encoding='utf-8')
df1 = pd.DataFrame(pd.DataFrame(evidences).groupby(['variant_id'],as_index=False)['submitter','publication'].agg(lambda x:x.str.cat(sep='|')),dtype='str')
return df1
def make_chgvs():
u'''ๅฐdelๅdupไธ่ท็ขฑๅบ๏ผinsๅ่ท็ขฑๅบ
:return:
'''
with open(path,"r+") as file:
with open("chgvs_index.txt","w+") as g:
next(file)
g.write("variantId\tchr\tstart\tstop\tref\talt\tgene\ttranscript\tchgvs\n")
lines = [i.strip().split("\t") for i in file.readlines()]
for line in lines:
if len(line[8].split("alt: "))>1:
if len(line[8].split("alt: ")[1].replace(" )","").split("del"))>1:
g.write("\t".join([line[i] for i in range(0, 8)]+[str(line[7] + ":" + line[8].split("alt: ")[1].replace(" )","").split("del")[0] + "del")])+"\n")
elif len(line[8].split("alt: ")[1].replace(" )","").split("dup"))>1:
g.write("\t".join([line[i] for i in range(0, 8)]+[str(line[7] + ":" + line[8].split("alt: ")[1].replace(" )","").split("dup")[0] + "dup")])+"\n")
else:
g.write("\t".join([line[i] for i in range(0, 8)] + [str(line[7] + ":" + line[8].split("alt: ")[1].replace(" )",""))]) + "\n")
elif len(line[8].split("alt: "))==1:
if len(line[8].split("del")) >1 and len(line[8].split("ins"))==1:
g.write("\t".join([line[i] for i in range(0, 8)]+[str(line[7]+":"+line[8].split("del")[0]+"del")])+"\n")
elif len(line[8].split("dup"))>1 and len(line[8].split("ins"))==1:
g.write("\t".join([line[i] for i in range(0, 8)]+[str(line[7]+":"+line[8].split("dup")[0]+"dup")])+"\n")
elif len(line[8].split("del")) >1 and len(line[8].split("ins"))>1:
g.write("\t".join([line[i] for i in range(0, 8)] + [str(line[7] + ":" + line[8].split("del")[0] + "del")+"ins"+line[8].split("ins")[1]]) + "\n")
else:
g.write("\t".join([line[i] for i in range(0, 8)] + [str(line[7] + ":" + line[8])]) + "\n")
else:
g.write("\t".join([line[i] for i in range(0, 8)]+[str(line[7]+":"+line[8])])+"\n")
``` |
{
"source": "jowr/jopy",
"score": 3
} |
#### File: jopy/jopy/base.py
```python
from __future__ import print_function, division
class JopyBaseClass(object):
"""The base class for all objects
The mother of all classes in the jopy module. Implements
basic functionality for debugging and exception handling.
Extended description of function, just a usage example
for the NumPy style docstrings. See also:
http://sphinx-doc.org/ext/example_numpy.html#example-numpy
"""
def __init__(self):
self.DEBUG = False
@property
def DEBUG(self):
return self._DEBUG
@DEBUG.setter
def DEBUG(self, value):
self._DEBUG = value
@DEBUG.deleter
def DEBUG(self):
del self._DEBUG
def autolog(self, message):
"""Centralised logging facility
Use this function in your code to write to the log files. It can
also be extended to perform some more sophisticated actions
for advanced error detection.
Function name and line number get prepended automatically.
Parameters
----------
message : str
message to log
"""
import inspect, logging
# Get the previous frame in the stack, otherwise it would
# be this function!!!
func = inspect.currentframe().f_back.f_code
msg = "%s: %s in %s:%i" % (
message,
func.co_name,
func.co_filename,
func.co_firstlineno
)
# Dump the message + the name of this function to the log.
logging.debug(msg)
if self.DEBUG:
print(msg)
```
#### File: jopy/styles/__init__.py
```python
import matplotlib.pyplot as plt
try:
from .plots import Figure
except:
from jopy.styles.plots import Figure
def get_figure(orientation='landscape',width=110,fig=None,axs=False):
"""Creates a figure with some initial properties
The object can be customised with the parameters. But since it is an
object, it can also be modified later.
Parameters
----------
orientation : str
either landscape or portrait
width : float
width in mm, used for scaling the default A4 format
if width is less than 10, use it as a factor to apply to A4
fig : matplotlib.figure.Figure, jopy.plots.Figure
The figure object to handle, None creates a new figure
axs : boolean
True or False - Should an axis object be added to the figure?
Returns
-------
jopy.plots.Figure, matplotlib.figure.Figure
The figure object
"""
if fig is None: fig = plt.figure()
fig.__class__ = Figure
sideA = 297. # height of A4
sideB = 210. # width of A4
mm_to_inch = 3.93700787401575/100.0 # factor mm to inch
if width < 0: raise ValueError("size cannot be less than zero.")
width *= mm_to_inch
sideA *= mm_to_inch
sideB *= mm_to_inch
if orientation=='landscape':
if width < 10*mm_to_inch: width *= sideA
scale = width/sideA
width = sideA*scale #=width
height = sideB*scale
elif orientation=='portrait':
if width < 10*mm_to_inch: width *= sideB
scale = width/sideB
width = sideB*scale #=width
height = sideA*scale
else:
raise ValueError("Unknown orientation")
fig.set_size_inches(width,height)
if axs: fig.add_subplot(111)
return fig
def plot_axis(data,kind,ax=None):
if ax is None: ax = plt.gca()
if __name__ == "__main__":
from jopy.utils import module_class_dict
import jopy.styles.mplib as mpl
dic = module_class_dict(mpl)
for i in dic:
line_fig,map_fig = dic[i]()._show_info()
line_fig.savefig(i+"_lines.pdf")
map_fig.savefig(i+"_maps.pdf")
```
#### File: jopy/styles/plots.py
```python
from __future__ import print_function, division
import matplotlib
import matplotlib.pyplot as plt
import copy
class Figure(matplotlib.figure.Figure):
def _get_axis(self,**kwargs):
ax = kwargs.pop('ax', self._get_axes()[0])
return ax
def _get_axes(self,**kwargs):
ax = kwargs.pop('ax', [])
ax = kwargs.pop('axs', ax)
if ax is None or len(ax)<1:
try: ax = super(Figure, self)._get_axes()
except: ax = [plt.gca()]; pass
return ax
def get_legend_handles_labels_axis(self,ax=None,axs=None):
"""Extracts the handles and labels from an axis or from a list of axes.
Useful for manual legend processing and customisation.
"""
ax = self._get_axes(ax=ax,axs=axs)
handles = []; labels = []
for a in ax:
handlestmp, labelstmp = a.get_legend_handles_labels()
handles.extend(handlestmp)
labels.extend(labelstmp)
return handles, labels, ax[0]
def draw_legend(self, **kwargs):
"""Puts a legend on the provided axis.
Can be used with kwargs like ncol=2 and alike, which are passed
on to the corrresponding pyplot routines.
"""
tc = kwargs.pop('textcolour', matplotlib.rcParams["text.color"])
tc = kwargs.pop('textcolor', tc)
#kwargs.setdefault('loc', 0)
#kwargs.setdefault('frameon', True)
h, l, a = self.get_legend_handles_labels_axis(ax=kwargs.pop('ax', None),axs=kwargs.pop('axs', None))
#handles = copy.copy(kwargs.pop('handles', handles))
handles = []
for h in kwargs.pop('handles', h):
handles.append(copy.copy(h))
handles[-1].set_alpha(1.0)
labels = []
for l in kwargs.pop('labels', l):
labels.append(copy.copy(l))
legend = a.legend(handles,labels,**kwargs)
try:
rect = legend.get_frame()
rect.set_facecolor(matplotlib.rcParams["grid.color"])
rect.set_linewidth(0)
rect.set_edgecolor(tc)
# Change the alpha value, make sure it is visible
def set_alpha(objList):
for o in objList:
try: o.set_alpha(1.0)
except: matplotlib.artist.setp(o, alpha=1.0); pass
#mpl.artist.setp(o, markersize=6)
#mpl.artist.setp(o, alpha=np.max([1.0,o.get_alpha()]))
# h.set_alpha(np.max([1.0,h.get_alpha()]))
# #mpl.artist.setp(h, alpha=np.max([1.0,h.get_alpha()]))
# mpl.artist.setp(h, markersize=6)
set_alpha(legend.legendHandles)
set_alpha(legend.get_lines())
set_alpha(legend.get_patches())
#
#for h in legend.legendHandles:
# h.set_alpha(np.max([1.0,h.get_alpha()]))
# #mpl.artist.setp(h, alpha=np.max([1.0,h.get_alpha()]))
# mpl.artist.setp(h, markersize=6)
# Change the legend label colors to almost black, too
for t in legend.texts:
t.set_color(tc)
except AttributeError:
# There are no labled objects
pass
return legend
def to_file(self, name, **kwargs):
dic = dict(bbox_inches='tight')
dic.update(**kwargs)
self.savefig(name, **dic)
def to_raster(self, name, **kwargs):
dic = dict(dpi=300)
dic.update(**kwargs)
if name.endswith(".png") or name.endswith(".jpg"):
self.to_file(name, **dic)
else:
raise ValueError("You can only save jpg and png images as raster images.")
def to_power_point(self, name, **kwargs):
dic = dict(dpi=600, transparent=True)
dic.update(**kwargs)
if name.endswith(".png"):
self.to_raster(name, **dic)
else:
raise ValueError("You should use png images with MS PowerPoint.")
```
#### File: jopy/jopy/utils.py
```python
from __future__ import print_function, division
import numpy as np
from numpy import pi,e
def module_class_dict(mod):
"""Get all classes from a module in a dict with the names
Parameters
----------
mod : Python module
The module to extract the classes from.
"""
mod_name = str(mod).split("'")[1]
ret = {}
for name, cls in mod.__dict__.items():
if isinstance(cls, type):
if mod_name in str(cls):
ret[name] = cls
return ret
# def module_class_dict(mod):
# """
# Returns a list of names of the abstract base
# classes that should not be instantiated. Can
# be used to build an ignore list.
# """
# mod_name = str(mod).split("'")[1]
# print(mod_name)
# ignList = {}
# for i in inspect.getmembers(mod):
# if inspect.isclass(i[1]):
# ignList[i[0]] = i[1]
# return ignList
#dict([(name, cls) for name, cls in mod.__dict__.items() if isinstance(cls, type)])
def transition_factor(start=0.25, stop=0.75, position=0.5, order=2):
"""Weighting factor for smooth transition (from 0 to 1)
This function returns a value between 0 and 1. A smooth transition
is achieved by means of defining the position and the transition
interval from start to stop parameter. Outside this interval,
the 0 and the 1 are returned, respectively. This transition function
with up to two smooth derivatives was taken from [1]. If you provide
an order higher than 2, the generalised logistic function [2] will be
used to calculated the transition curve.
Parameters
----------
start : float
start of transition interval; default 0.25
stop : float
end of transition interval; default 0.75
position : float
current position; default 0.5
order : integer
Smooth up to which derivative?; default 2
Returns
-------
float
smooth transition between 0 and 1 from start to stop [-]
Use tFactor in an equation like this:
tFactor = transition_factor(start=start,stop=stop,position=position);
smoothed = tFactor*1stValue + (1 - tFactor)*2ndValue;
References
----------
[1] <NAME>, Proposal of New Object-Oriented Equation-Based Model
Libraries for Thermodynamic Systems, PhD thesis, Technical University
<NAME>, 2008
[2] Generalised logistic function on http://en.wikipedia.org/wiki/Generalised_logistic_function
"""
a_map = [-1./2., -2./pi, -3./4., -8./pi] #First parameters
b_map = [ 1./2., 1./2., 1./2., 1./2.] #Second parameters
#Rename variables to match with Richter2008, p.68ff
phi = 0.0 #"current phase";
a = 0.0 # "multiplier";
b = 0.0 # "addition";
x_t = 0.0 # "Start of transition";
x = 0.0 # "Current position";
DELTAx = 0.0 # "Length of transition";
#Parameters for generalised logistic function
A = 0. #"Lower asymptote";
K = 1. #"Upper asymptote";
B = 8. #"Growth rate";
nu= 1. #"Symmetry changes";
Q = nu #"Zero correction";
M = nu #"Maximum growth for Q = nu";
X = 0.
END = 0.
START = 0.
factor = 0.
order = int(order)
if order < 0:
raise ValueError("This function only supports positive values for the order of smooth derivatives.")
swapper = None
if start>stop:
swapper = start
start = stop
stop = swapper
#raise Exception("There is only support for positive differences, please provide start < stop.")
position = np.array(position)
res = np.zeros_like(position)
res[position < start] = 1.0
res[position > stop ] = 0.0
theMask = (position >= start) & (position <= stop)
position = position[theMask]
#0th to 2nd order
if order <= 2:
a = a_map[order];
b = b_map[order];
x = position;
DELTAx = stop - start;
x_t = start + 0.5*DELTAx;
phi = (x - x_t) / DELTAx * pi;
else: #higher order
#We need to do some arbitrary scaling:
END = 4.0
START = -2.0
factor= (END-START) / (stop-start)
X = START + (position - start) * factor
resTMP = np.zeros_like(position)
if (order == 0):
for i in range(len(position)):
resTMP[i] = a * np.sin(phi[i]) + b
elif (order == 1):
for i in range(len(position)):
resTMP[i] = a * ( 1./2. * np.cos(phi[i]) * np.sin(phi[i]) + 1./2.*phi[i]) + b
elif (order == 2):
for i in range(len(position)):
resTMP[i] = a * ( 1./3. * np.cos(phi[i])**2. * np.sin(phi[i]) + 2./3. * np.sin(phi[i])) + b
else:
for i in range(len(position)):
resTMP[i] = 1. - (A + (K-A) / np.power( 1. + Q * np.power(e,(-B*(X[i] - M))),1./nu))
res[theMask] = resTMP
if swapper is None: return 1-res
else: return res
def transition_factor_alt(center = 0.5, length = 1.0, position = 0.0, order = 2):
"""Please see :py:func:`.transition_factor` for documentation
"""
return transition_factor(start=center-0.5*length, stop=center+0.5*length, position=position, order=order)
```
#### File: test/test_thermo/__init__.py
```python
from __future__ import print_function, division
import jopy.thermo
class TestUtils(object):
@classmethod
def setup_class(cls):
pass
def test_lmtd(self):
res = jopy.thermo.lmtd(0.0, 0.0)
assert res == 0.0
res = jopy.thermo.lmtd(10.0, 10.0)
print(res,10.0)
@classmethod
def teardown_class(cls):
pass
``` |
{
"source": "jowsey/botflop",
"score": 2
} |
#### File: botflop/cogs/linking_panel.py
```python
import os
import discord
import requests
import json
import logging
import sys
from discord.ext import commands, tasks
from discord.ext.commands import has_permissions, MissingPermissions
from dotenv import load_dotenv
import aiohttp
import asyncio
class Linking(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.guild_id = int(os.getenv('guild_id'))
self.crabwings_role_id = int(os.getenv('crabwings_role_id'))
self.duckfeet_role_id = int(os.getenv('duckfeet_role_id'))
self.elktail_role_id = int(os.getenv('elktail_role_id'))
self.client_role_id = int(os.getenv('client_role_id'))
self.subuser_role_id = int(os.getenv('subuser_role_id'))
self.verified_role_id = int(os.getenv('verified_role_id'))
@commands.Cog.listener()
async def on_message(self, message):
self.guild = self.bot.get_guild(self.guild_id)
if message.author != self.bot.user and message.guild is None and self.guild.get_member(message.author.id) is not None:
if "://" in message.content:
return
if self.guild in message.author.guilds:
channel = message.channel
await channel.send("Processing, please wait...")
# Potential API key, so tries it out
if len(message.content) == 48:
url = "https://panel.birdflop.com/api/client/account"
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + message.content,
}
# response = requests.get(url, headers=headers)
async with aiohttp.ClientSession() as session:
async with session.get(url, headers=headers) as response:
# If API token is verified to be correct:
if response.status == 200:
# Formats response of account in JSON format
json_response = await response.json()
# Loads contents of users.json
file = open('users.json', 'r')
data = json.load(file)
file.close()
# Checks if user exists. If so, skips adding them to users.json
client_id_already_exists = False
discord_id_already_exists = False
for user in data['users']:
if user['client_id'] == json_response['attributes']['id']:
client_id_already_exists = True
logging.info("Client ID already exists")
if user['discord_id'] == message.author.id:
discord_id_already_exists = True
logging.info("Discord ID already exists")
if not client_id_already_exists and not discord_id_already_exists:
data['users'].append({
'discord_id': message.author.id,
'client_id': json_response['attributes']['id'],
'client_api_key': message.content
})
json_dumps = json.dumps(data, indent=2)
# Adds user to users.json
file = open('users.json', 'w')
file.write(json_dumps)
file.close()
member = self.guild.get_member(message.author.id)
if member:
url = "https://panel.birdflop.com/api/client"
headers = {
'Accept': 'application/json',
'Authorization': 'Bearer ' + message.content,
}
async with aiohttp.ClientSession() as session:
async with session.get(url, headers=headers) as response:
# If API token is verified to be correct, continues
if response.status == 200:
# Formats response for servers in JSON format
servers_json_response = await response.json()
user_client = False
user_subuser = False
user_crabwings = False
user_duckfeet = False
user_elktail = False
for server in servers_json_response['data']:
server_owner = server['attributes']['server_owner']
if server_owner:
user_client = True
elif not server_owner:
user_subuser = True
server_node = server['attributes']['node']
if server_node == "Crabwings - NYC":
user_crabwings = True
elif server_node == "Duckfeet - EU":
user_duckfeet = True
elif server_node == "Elktail - EU":
user_elktail = True
if user_client:
role = discord.utils.get(self.guild.roles, id=self.client_role_id)
await member.add_roles(role)
if user_subuser:
role = discord.utils.get(self.guild.roles, id=self.subuser_role_id)
await member.add_roles(role)
if user_crabwings:
role = discord.utils.get(self.guild.roles, id=self.crabwings_role_id)
await member.add_roles(role)
if user_duckfeet:
role = discord.utils.get(self.guild.roles, id=self.duckfeet_role_id)
await member.add_roles(role)
if user_elktail:
role = discord.utils.get(self.guild.roles, id=self.elktail_role_id)
await member.add_roles(role)
role = discord.utils.get(self.guild.roles, id=self.verified_role_id)
await member.add_roles(role)
await channel.send(
'Your Discord account has been linked to your panel account! You may unlink your Discord and panel accounts by reacting in the #verification channel or by deleting your Verification API key.')
logging.info("Success message sent to " + message.author.name + "#" + str(
message.author.discriminator) + " (" + str(
message.author.id) + ")" + ". User linked to API key " + message.content + " and client_id " + str(
json_response['attributes']['id']))
elif discord_id_already_exists:
await channel.send(
'Sorry, your Discord account is already linked to a panel account. If you would like to link your Discord account to a different panel account, please unlink your Discord account first by reacting in the #verification channel.')
logging.info("Duplicate Discord message sent to " + message.author.name + "#" + str(
message.author.discriminator) + " (" + str(
message.author.id) + ")" + " for using API key " + message.content + " linked to client_id " + str(
json_response['attributes']['id']))
elif client_id_already_exists:
await channel.send(
'Sorry, your panel account is already linked to a Discord account. If you would like to link your panel account to a different Discord account, please unlink your panel account first by deleting its Verification API key and waiting up to 10 minutes.')
logging.info("Duplicate panel message sent to " + message.author.name + "#" + str(
message.author.discriminator) + " (" + str(
message.author.id) + ")" + " for using API key " + message.content + " linked to client_id " + str(
json_response['attributes']['id']))
else:
# Says if API key is the corect # of characters but invalid
await channel.send("Sorry, that appears to be an invalid API key.")
logging.info(
'invalid sent to ' + message.author.name + "#" + str(
message.author.discriminator) + " (" + str(
message.author.id) + ")")
else:
# Says this if API key is incorrect # of characters
await channel.send(
'Sorry, that doesn\'t appear to be an API token. An API token should be a long string resembling this: ```<KEY>```')
logging.info("obvious incorrect sent to " + message.author.name + "#" + str(
message.author.discriminator) + " (" + str(message.author.id) + ")")
def setup(bot):
bot.add_cog(Linking(bot))
``` |
{
"source": "Jowsjw/Image-Classifier-Application",
"score": 2
} |
#### File: Jowsjw/Image-Classifier-Application/predict.py
```python
import argparse
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms,models
import json
import PIL
from PIL import Image
def main():
in_arg = get_input_args()
model = loadmodel(in_arg.modelpath)
device=['cuda','cpu']
if gpu = True:
model.to(device[0])
else:
print๏ผ'Please turn on the GPU mode')
probs, classes = predict(in_arg.image, model, in_arg.top_k,in_arg.cat_to_name)
probability,labels = get_flower_name(predict(in_arg.image, model, in_arg.top_k,in_arg.cat_to_name))
print("image to predict:", in_arg.image)
print("\n flower:",labels)
print("\n probability:",probability)
def get_input_args():
parser = argparse.ArgumentParser()
parser.add_argument('--image', type=str, help='image path')
parser.add_argument('--modelpath', type=str, help='the path that we save our model')
parser.add_argument('--top_k', type=int, default=5, help='return top K classes')
parser.add_argument('--cat_to_name', type=str, default='', help='helps us to find the flower names')
return parser.parse_args()
def loadmodel(modelpath):
checkpoint = torch.load(modelpath)
model = checkpoint['model']
model.state_dict=checkpoint['state_dict']
model.classifier=checkpoint['classifier']
model.class_to_idx = checkpoint['class_to_idx']
return model
def process_image(image):
ig = Image.open(image)
ig = ig.resize((256,256))
ig = ig.crop((16,16,240,240))
ig = np.array(ig)/255
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
ig = (ig - mean) / std
ig = ig.transpose(2,0,1)
return ig
def predict(image, model, top_k,cat_to_name):
image = process_image(image)
image = torch.from_numpy(np.array([image])).float()
model.eval()
image = image.cuda()
output = model.forward(image)
ps = torch.exp(output).topk(topk)
prob = ps[0][0]
index = ps[1][0]
convert = []
for i in range(len(model.class_to_idx.items())):
convert.append(list(model.class_to_idx.items())[i][0])
label = []
for i in range(topk):
label.append(convert[index[i]])
prob = prob.cpu()
prob = prob.detach().numpy()
return prob,label
def get_flower_name(image, model, top_k,cat_to_name):
with open('cat_to_name.json', 'r') as f:
cat_to_name = json.load(f)
prob,classes = predict(image, model, top_k,cat_to_name)
labels = []
for i in classes:
labels.append(cat_to_name[i])
probability = []
for i in proba:
probability.append(i)
return probability,labels
if __name__ == '__main__':
main()
``` |
{
"source": "jowslive/face-recog",
"score": 3
} |
#### File: face-recog/Python/Trainer_All.py
```python
import os # Importando o OS para passar uma pasta como caminho
import cv2 # Importando o OpenCV
import numpy as np # Importando o Numpy
from PIL import Image # Importando o Image
LBPHFace = cv2.face.LBPHFaceRecognizer_create(1, 1, 7,7) # CRIA O MรTODO LBPH DE RECONHECIMENTO FACIAL
path = 'C:/Users/joaob/Documents/Github/face-recog/Python/dataSet' # Caminho para a pasta que contรฉm as fotos
def getImageWithID (path):
imagePaths = [os.path.join(path, f) for f in os.listdir(path)]
FaceList = []
IDs = []
for imagePath in imagePaths:
faceImage = Image.open(imagePath).convert('L') # Abre a imagem e converte para cinza
faceImage = faceImage.resize((110,110)) # Redimenciona a imagem para que o mรฉtodo reconhecedor LBPH possa ser treinado
faceNP = np.array(faceImage, 'uint8') # Converte a imagem em um array Numpy
ID = int(os.path.split(imagePath)[-1].split('.')[1]) # Recupera o ID do array
FaceList.append(faceNP) # Acrescenta o array Numpy para a lista
IDs.append(ID) # Acrescenta o ID para a lista de IDs
cv2.imshow('Training Set', faceNP) # Mostra as imagens na lista
cv2.waitKey(125)
return np.array(IDs), FaceList # Os IDs serรฃo convertidos em um array Numpy
IDs, FaceList = getImageWithID(path)
# ------------------------------------ TREINANDO O RECONHECIMENTO FACIAL ----------------------------------------
print('TREINANDO OS ARQUIVOS......')
LBPHFace.train(FaceList, IDs)
print('MรTODO LBPH RECONHECIDOR FACIAL COMPLETADO...')
LBPHFace.save('Recogniser/trainingDataLBPH.xml')
print ('TODOS ARQUIVOS XML FORAM SALVOS...')
cv2.destroyAllWindows()
``` |
{
"source": "joxeankoret/capstone",
"score": 2
} |
#### File: python/capstone/arm.py
```python
import ctypes, ctypes.util
from arm_const import *
# define the API
class arm_op_mem(ctypes.Structure):
_fields_ = (
('base', ctypes.c_uint),
('index', ctypes.c_uint),
('scale', ctypes.c_int),
('disp', ctypes.c_int),
)
class arm_op_shift(ctypes.Structure):
_fields_ = (
('type', ctypes.c_uint),
('value', ctypes.c_uint),
)
class arm_op_value(ctypes.Union):
_fields_ = (
('reg', ctypes.c_uint),
('imm', ctypes.c_int),
('fp', ctypes.c_double),
('mem', arm_op_mem),
)
class arm_op(ctypes.Structure):
_fields_ = (
('shift', arm_op_shift),
('type', ctypes.c_uint),
('value', arm_op_value),
)
class _cs_arm(ctypes.Structure):
_fields_ = (
('cc', ctypes.c_uint),
('update_flags', ctypes.c_bool),
('writeback', ctypes.c_bool),
('op_count', ctypes.c_uint8),
('operands', arm_op * 20),
)
def get_arch_info(a):
return (a.cc, a.update_flags, a.writeback, a.operands[:a.op_count])
``` |
{
"source": "Joxis/VKD",
"score": 2
} |
#### File: data/veri/data_manager.py
```python
import os.path as osp
import numpy as np
from copy import deepcopy
class Veri(object):
"""
VeRi
Reference:
"""
def __init__(self, root='/data/datasets/', min_seq_len=0):
self.root = osp.join(root, 'VeRi')
self.train_name_path = osp.join(self.root, 'name_train.txt')
self.query_name_path = osp.join(self.root, 'name_query.txt')
self.track_gallery_info_path = osp.join(self.root, 'test_track.txt')
train_names = self._get_names(self.train_name_path)
query_names = self._get_names(self.query_name_path)
train, num_train_tracklets, num_train_pids, num_train_imgs = self._process_train(train_names)
gallery, num_gallery_tracklets, num_gallery_pids, num_gallery_imgs = self._process_gallery()
query, num_query_tracklets, num_query_pids, num_query_imgs = self._process_query(query_names, gallery)
gallery_img = []
num_gallery_tracklets = 0
for el in gallery:
for fr in el[0]:
gallery_img.append(([fr], el[1], el[2]))
num_gallery_tracklets += 1
query_img, _, _, _ = self._process_query_image(query_names)
num_imgs_per_tracklet = num_train_imgs + num_gallery_imgs + num_query_imgs
total_num = np.sum(num_imgs_per_tracklet)
min_num = np.min(num_imgs_per_tracklet)
max_num = np.max(num_imgs_per_tracklet)
avg_num = np.mean(num_imgs_per_tracklet)
num_total_pids = num_train_pids + num_query_pids
num_total_tracklets = num_train_tracklets + num_gallery_tracklets + num_query_tracklets
print("=> VeRi loaded")
print("Dataset statistics:")
print(" -----------------------------------------")
print(" subset | # ids | # tracklets | # images")
print(" -----------------------------------------")
print(" train | {:5d} | {:8d} | {:8d}".format(num_train_pids, num_train_tracklets, np.sum(num_train_imgs)))
print(" query | {:5d} | {:8d} | {:8d}".format(num_query_pids, num_query_tracklets, np.sum(num_query_imgs)))
print(" gallery | {:5d} | {:8d} | {:8d}".format(num_gallery_pids, num_gallery_tracklets, np.sum(num_gallery_imgs)))
print(" -----------------------------------------")
print(" total | {:5d} | {:8d} | {:8d}".format(num_total_pids, num_total_tracklets, total_num))
print(" -----------------------------------------")
print(" number of images per tracklet: {} ~ {}, average {:.1f}".format(min_num, max_num, avg_num))
print(" -----------------------------------------")
self.train = train
self.query = query
self.gallery = gallery
self.gallery_img = gallery_img
self.query_img = query_img
self.num_train_pids = num_train_pids
self.num_query_pids = num_query_pids
self.num_gallery_pids = num_gallery_pids
def _get_names(self, fpath):
names = []
with open(fpath, 'r') as f:
for line in f:
new_line = line.rstrip()
names.append(new_line)
return names
def _process_gallery(self):
gallery = []
pids = []
num_images = []
with open(self.track_gallery_info_path) as fp:
for line in fp.readlines():
imgs_names = [osp.join(self.root, f'image_test/{el}') for el in line.split(' ')]
imgs_names = imgs_names[1:len(imgs_names)-1]
pid, camera = map(int, line.split(' ')[0].replace('c', '').split('_')[:2])
camera -= 1
gallery.append((imgs_names, pid, camera))
num_images.append(len(imgs_names))
pids.append(pid)
return gallery, len(gallery), len(set(pids)), num_images
def _process_train(self, train_names):
train = []
pids = []
num_images = []
train_names = sorted(train_names)
hs = np.asarray([hash(el[:9]) for el in train_names])
displaces = np.nonzero(hs[1:] - hs[:-1])[0] + 1
displaces = np.concatenate([[0], displaces, [len(hs)]])
for idx in range(len(displaces) - 1):
names = train_names[displaces[idx]: displaces[idx + 1]]
imgs_names = [osp.join(self.root, f'image_train/{el}') for el in names]
pid, camera = map(int, names[0].replace('c', '').split('_')[:2])
camera -= 1
train.append((imgs_names, pid, camera))
num_images.append(len(imgs_names))
pids.append(pid)
# RE-LABEl
pid_map = {pid: idx for idx, pid in enumerate(np.unique(pids))}
for i in range(len(train)):
pid = pids[i]
train[i] = (train[i][0], pid_map[pid], train[i][2])
pids[i] = pid_map[pid]
return train, len(train), len(set(pids)), num_images
def _process_query(self, query_names, gallery):
queries = []
pids = []
num_images = []
for qn in query_names:
pid, camera = map(int, qn.replace('c', '').split('_')[:2])
camera -= 1
# look into gallery
for el in gallery:
if el[1] == pid and el[2] == camera:
queries.append((deepcopy(el[0]), el[1], el[2]))
num_images.append(len(el[0]))
pids.append(el[1])
break
return queries, len(queries), len(set(pids)), num_images
def _process_query_image(self, query_names):
queries = []
pids = []
num_images = []
for qn in query_names:
imgs_names = [osp.join(self.root, f'image_query/{qn}')]
pid, camera = map(int, qn.replace('c', '').split('_')[:2])
camera -= 1
queries.append((imgs_names, pid, camera))
num_images.append(len(imgs_names))
pids.append(pid)
return queries, len(queries), len(set(pids)), num_images
```
#### File: VKD/model/loss.py
```python
import torch
from torch import nn as nn
from torch.nn import functional as F
def l2_norm(x: torch.Tensor):
return x / torch.norm(x, dim=-1, keepdim=True)
class MatrixPairwiseDistances(nn.Module):
def __init__(self):
super(MatrixPairwiseDistances, self).__init__()
def __call__(self, x: torch.Tensor, y: torch.Tensor = None):
if y is not None: # exact form of squared distances
differences = x.unsqueeze(1) - y.unsqueeze(0)
else:
differences = x.unsqueeze(1) - x.unsqueeze(0)
distances = torch.sum(differences * differences, -1)
return distances
class SmoothedCCE(nn.Module):
def __init__(self, num_classes: int, eps: float = 0.1, reduction: str='sum'):
super(SmoothedCCE, self).__init__()
self.reduction = reduction
assert reduction in ['sum', 'mean']
self.eps = eps
self.num_classes = num_classes
self.logsoftmax = nn.LogSoftmax(dim=1)
self.factor_0 = self.eps / self.num_classes
self.factor_1 = 1 - ((self.num_classes - 1) / self.num_classes) * self.eps
def labels_to_one_hot(self, labels):
onehot = torch.ones(len(labels), self.num_classes).to(labels.device) * self.factor_0
onehot[torch.arange(0, len(labels), dtype=torch.long), labels.long()] = self.factor_1
return onehot
def forward(self, feats, target):
"""
target are long in [0, num_classes)!
"""
one_hots = self.labels_to_one_hot(target)
if self.reduction == 'sum':
loss = torch.sum(-torch.sum(one_hots * self.logsoftmax(feats), -1))
else:
loss = torch.mean(-torch.sum(one_hots * self.logsoftmax(feats), -1))
return loss
def __call__(self, *args, **kwargs):
return super(SmoothedCCE, self).__call__(*args, **kwargs)
class KDLoss(nn.Module):
def __init__(self, temp: float, reduction: str):
super(KDLoss, self).__init__()
self.temp = temp
self.reduction = reduction
self.kl_loss = nn.KLDivLoss(reduction=reduction)
def forward(self, teacher_logits: torch.Tensor, student_logits: torch.Tensor):
student_softmax = F.log_softmax(student_logits / self.temp, dim=-1)
teacher_softmax = F.softmax(teacher_logits / self.temp, dim=-1)
kl = nn.KLDivLoss(reduction='none')(student_softmax, teacher_softmax)
kl = kl.sum() if self.reduction == 'sum' else kl.sum(1).mean()
kl = kl * (self.temp ** 2)
return kl
def __call__(self, *args, **kwargs):
return super(KDLoss, self).__call__(*args, **kwargs)
class LogitsMatching(nn.Module):
def __init__(self, reduction: str):
super(LogitsMatching, self).__init__()
self.mse_loss = nn.MSELoss(reduction=reduction)
def forward(self, teacher_logits: torch.Tensor, student_logits: torch.Tensor):
return self.mse_loss(student_logits, teacher_logits)
def __call__(self, *args, **kwargs):
return super(LogitsMatching, self).__call__(*args, **kwargs)
class SimilarityDistillationLoss(nn.Module):
def __init__(self, metric: str):
assert metric in ['l2', 'l1', 'huber']
super(SimilarityDistillationLoss, self).__init__()
self.distances = MatrixPairwiseDistances()
self.metric = metric
def forward(self, teacher_embs: torch.Tensor, student_embs: torch.Tensor):
teacher_distances = self.distances(teacher_embs)
student_distances = self.distances(student_embs)
if self.metric == 'l2':
return 0.5 * nn.MSELoss(reduction='mean')(student_distances, teacher_distances)
if self.metric == 'l1':
return 0.5 * nn.L1Loss(reduction='mean')(student_distances, teacher_distances)
if self.metric == 'huber':
return 0.5 * nn.SmoothL1Loss(reduction='mean')(student_distances, teacher_distances)
raise ValueError()
def __call__(self, *args, **kwargs):
return super(SimilarityDistillationLoss, self).__call__(*args, **kwargs)
class OnlineTripletLoss(nn.Module):
def __init__(self, margin='soft', batch_hard=True, reduction='mean'):
super(OnlineTripletLoss, self).__init__()
self.batch_hard = batch_hard
self.reduction = reduction
if isinstance(margin, float) or margin == 'soft':
self.margin = margin
else:
raise NotImplementedError(
'The margin {} is not recognized in TripletLoss()'.format(margin))
def forward(self, feat, id=None, pos_mask=None, neg_mask=None, mode='id', dis_func='eu',
n_dis=0):
if dis_func == 'cdist':
feat = feat / feat.norm(p=2, dim=1, keepdim=True)
dist = self.cdist(feat, feat)
elif dis_func == 'eu':
dist = self.cdist(feat, feat)
if mode == 'id':
if id is None:
raise RuntimeError('foward is in id mode, please input id!')
else:
identity_mask = torch.eye(feat.size(0)).byte()
identity_mask = identity_mask.cuda() if id.is_cuda else identity_mask
same_id_mask = torch.eq(id.unsqueeze(1), id.unsqueeze(0))
negative_mask = same_id_mask ^ 1
positive_mask = same_id_mask ^ identity_mask.bool()
elif mode == 'mask':
if pos_mask is None or neg_mask is None:
raise RuntimeError('foward is in mask mode, please input pos_mask & neg_mask!')
else:
positive_mask = pos_mask
same_id_mask = neg_mask ^ 1
negative_mask = neg_mask
else:
raise ValueError('unrecognized mode')
if self.batch_hard:
if n_dis != 0:
img_dist = dist[:-n_dis, :-n_dis]
max_positive = (img_dist * positive_mask[:-n_dis, :-n_dis].float()).max(1)[0]
min_negative = (img_dist + 1e5 * same_id_mask[:-n_dis, :-n_dis].float()).min(1)[0]
dis_min_negative = dist[:-n_dis, -n_dis:].min(1)[0]
z_origin = max_positive - min_negative
# z_dis = max_positive - dis_min_negative
else:
max_positive = (dist * positive_mask.float()).max(1)[0]
min_negative = (dist + 1e5 * same_id_mask.float()).min(1)[0]
z = max_positive - min_negative
else:
pos = positive_mask.topk(k=1, dim=1)[1].view(-1, 1)
positive = torch.gather(dist, dim=1, index=pos)
pos = negative_mask.topk(k=1, dim=1)[1].view(-1, 1)
negative = torch.gather(dist, dim=1, index=pos)
z = positive - negative
if isinstance(self.margin, float):
b_loss = torch.clamp(z + self.margin, min=0)
elif self.margin == 'soft':
if n_dis != 0:
b_loss = torch.log(1 + torch.exp(
z_origin)) + -0.5 * dis_min_negative # + torch.log(1+torch.exp(z_dis))
else:
b_loss = torch.log(1 + torch.exp(z))
else:
raise NotImplementedError("How do you even get here!")
if self.reduction == 'mean':
return b_loss.mean()
return b_loss.sum()
def cdist(self, a, b):
'''
Returns euclidean distance between a and b
Args:
a (2D Tensor): A batch of vectors shaped (B1, D)
b (2D Tensor): A batch of vectors shaped (B2, D)
Returns:
A matrix of all pairwise distance between all vectors in a and b,
will be shape of (B1, B2)
'''
diff = a.unsqueeze(1) - b.unsqueeze(0)
return ((diff ** 2).sum(2) + 1e-12).sqrt()
def __call__(self, *args, **kwargs):
return super(OnlineTripletLoss, self).__call__(*args, **kwargs)
```
#### File: VKD/utils/saver.py
```python
import subprocess
from pathlib import Path
import cv2
import numpy as np
import torch
import json
from torch.utils.tensorboard import SummaryWriter
from model.net import TriNet
class Saver(object):
"""
"""
def __init__(self, path: str, uuid: str):
self.path = Path(path) / uuid
self.path.mkdir(exist_ok=True, parents=True)
self.chk_path = self.path / 'chk'
self.chk_path.mkdir(exist_ok=True)
self.log_path = self.path / 'logs'
self.log_path.mkdir(exist_ok=True)
self.params_path = self.path / 'params'
self.params_path.mkdir(exist_ok=True)
# TB logs
self.writer = SummaryWriter(str(self.path))
# Dump the `git log` and `git diff`. In this way one can checkout
# the last commit, add the diff and should be in the same state.
for cmd in ['log', 'diff']:
with open(self.path / f'git_{cmd}.txt', mode='wt') as f:
subprocess.run(['git', cmd], stdout=f)
def load_logs(self):
with open(str(self.params_path / 'params.json'), 'r') as fp:
params = json.load(fp)
with open(str(self.params_path / 'hparams.json'), 'r') as fp:
hparams = json.load(fp)
return params, hparams
@staticmethod
def load_net(path: str, chk_name: str, dataset_name: str):
with open(str(Path(path) / 'params' / 'hparams.json'), 'r') as fp:
net_hparams = json.load(fp)
with open(str(Path(path) / 'params' / 'params.json'), 'r') as fp:
net_params = json.load(fp)
assert dataset_name == net_params['dataset_name']
net = TriNet(backbone_type=net_hparams['backbone_type'], pretrained=True,
num_classes=net_hparams['num_classes'])
net_state_dict = torch.load(Path(path) / 'chk' / chk_name)
net.load_state_dict(net_state_dict)
return net
def write_logs(self, model: torch.nn.Module, params: dict):
with open(str(self.params_path / 'params.json'), 'w') as fp:
json.dump(params, fp)
with open(str(self.params_path / 'hparams.json'), 'w') as fp:
json.dump(model.get_hparams(), fp)
def write_image(self, image: np.ndarray, epoch: int, name: str):
out_image_path = self.log_path / f'{epoch:05d}_{name}.jpg'
cv2.imwrite(str(out_image_path), image)
image = image[..., ::-1]
self.writer.add_image(f'{name}', image, epoch, dataformats='HWC')
def dump_metric_tb(self, value: float, epoch: int, m_type: str, m_desc: str):
self.writer.add_scalar(f'{m_type}/{m_desc}', value, epoch)
def save_net(self, net: torch.nn.Module, name: str = 'weights', overwrite: bool = False):
weights_path = self.chk_path / name
if weights_path.exists() and not overwrite:
raise ValueError('PREVENT OVERWRITE WEIGHTS')
torch.save(net.state_dict(), weights_path)
def dump_hparams(self, hparams: dict, metrics: dict):
self.writer.add_hparams(hparams, metrics)
``` |
{
"source": "joxoby/ign-conan",
"score": 2
} |
#### File: urdfdom_headers/all/conanfile.py
```python
import os
from conans import CMake, ConanFile, tools
class URDFDomHeaders(ConanFile):
name = "urdfdom_headers"
license = "BSD"
author = "<NAME> <EMAIL>"
url = "https://github.com/ros/urdfdom"
description = "Headers for URDF parsers"
topics = ("robotics", "simulation")
settings = "os", "compiler", "build_type", "arch"
generators = "cmake", "cmake_find_package_multi"
@property
def _source_subfolder(self):
return "source_subfolder"
def source(self):
tools.get(**self.conan_data["sources"][self.version])
os.rename("urdfdom_headers-1.0.4", self._source_subfolder)
def requirements(self):
pass
# for req in self.conan_data["requirements"]:
# self.requires(req)
def _configure_cmake(self):
cmake = CMake(self)
cmake.definitions["BUILD_TESTING"] = False
cmake.configure(source_folder=self._source_subfolder)
return cmake
def build(self):
cmake = self._configure_cmake()
cmake.build()
def package(self):
cmake = self._configure_cmake()
cmake.install()
def package_info(self):
pass
``` |
{
"source": "Joy127690GitHub/Sensor_evaluation",
"score": 3
} |
#### File: Sensor_evaluation/ultrasonic_sm/oscilloscope2.py
```python
import time
import numpy as np
from matplotlib.lines import Line2D
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.widgets import CheckButtons
import pandas as pd
#import PySerialTest.py
class Scope():
def __init__(self, ax, maxt=30):
self.ax = ax
self.maxt = maxt #in second ๆถ้ด่ฝด้ฟๅบฆ
self.data_time = 7*[pd.Series(dtype='int64')]
self.node_addr = 7*[pd.Series(dtype='int64')]
self.data_dist = 7*[pd.Series(dtype='int64')]
self.data_amp = 7*[pd.Series(dtype='int64')]
#self.scat=7*[plt.scatter([],[])]
self.start_time = 7*[0]
self.last_record = 7*[0]
class Scope_line(Scope):
def __init__(self,fig, ax, ax_limit, blind_zone, maxt=30):
super().__init__(ax)
self.color_cycle=['b', 'g', 'c', 'm', 'y', 'k', 'w']
self.fig = fig
self.blind_zone = blind_zone
#ax ๅๆฐ่ฎพ็ฝฎ
#็ฌฌไธ่ฝด๏ผไธป่ฝดaxๅๆฐ่ฎพ็ฝฎ๏ผTime_Distance
self.ax.set_title("Time vs Distance(lines on/off via clicking legend)")
self.ax.set_xlim(0, self.maxt) #ๆจชๅๆ ้ๅผ่ฎพ็ฝฎ
self.ax.set_ylim(-0.02*ax_limit, 1.1*ax_limit)#็บตๅๆ ้ๅผ่ฎพ็ฝฎ, #default for UCC2500 test
self.ax.set_xlabel('Time (second)')
self.ax.set_ylabel('Distance (mm)')
#self.ax.axhline(y=blind_zone,color='r', label='Blind zone')
#็ฌฌไบ่ฝด๏ผๅฏ่ฝดax2ๅๆฐ่ฎพ็ฝฎ
self.ax2 = self.ax.twinx()
self.ax2.set_ylabel('Amplitude (dB SPL)')
self.ax2.set_ylim(-0.02*255, 1.1*255)#็บตๅๆ ้ๅผ่ฎพ็ฝฎ,
self.line_index = 0
self.count_win_move = 1 #ๆฒ็บฟๆถ้ด็ช็งปๅจๆฌกๆฐ
def update(self, i, *frames_data):
#ๆถ้ด็ชๅ
็ๆฐๆฎๆพ็คบ็็ผๅฒๅญๅจ
try:
if len(frames_data[i]['Timestamp']) >1: #็ฉบ็ๆฐๆฎๅธงๅฐๅธฆๆฅๅ็ปญไปฃ็ ็ไธๅฏ้ขๆต็็ดขๅผ้่ฏฏ๏ผ่ฟ้ๅคๆญ่ฟๆปค
if self.count_win_move == 1:
self.start_time[i] = frames_data[i]['Timestamp'][0]
self.data_time[i] = frames_data[i]['Timestamp'][self.last_record[i]:-1]-self.start_time[i]
self.node_addr[i] = frames_data[i]['Node_address'][self.last_record[i]:-1]
self.data_dist[i] = frames_data[i]['Distance'][self.last_record[i]:-1]
self.data_amp[i] = frames_data[i]['Amplitude'][self.last_record[i]:-1]
if self.data_time[i].iloc[-1] > (self.maxt * self.count_win_move): # reset the arrays
#ๆพ็คบ็ชๅฃ็งปๅจ
self.last_record[i] = len(self.data_time[i])-1
#print('self.last_record[i]:',self.last_record[i])
self.count_win_move +=1
#่ฎพ็ฝฎๅๆ ่ฝด็งปๅจ
self.ax.set_xlim(self.maxt*(self.count_win_move-1),
self.maxt*self.count_win_move)
self.ax.figure.canvas.draw()
else:
pass
except IndexError as ex: #for handling empty frames_data problem
print('Error: Frames_data '+ str(ex)+'. Size:', frames_data[i].shape )
time.sleep(1)
else:
if i == self.line_index: #ๅๆฌกๆง่ก
self.ax.add_line(Line2D(self.data_time[i], self.data_dist[i],linestyle='-',
color=self.color_cycle[i], animated=True,
label='Node.'+str(int(self.node_addr[i][0]))+' Dist.'))
self.ax2.add_line(Line2D(self.data_time[i],self.data_amp[i],linestyle=':',
color=self.color_cycle[i], animated=True,
label='Node.'+str(int(self.node_addr[i][0]))+' Amp.'))
self.list_lines = (self.ax.lines+self.ax2.lines)
if i == (len(frames_data)-1): #็ญๅพ
ๆๆๆฒ็บฟๅๅปบๅฎๆฏ๏ผๅๅปบlegend
self.leg = self.fig.legend(fancybox=True, shadow=True, loc='lower center',ncol=len(self.list_lines))
self.ax.axhline(y=self.blind_zone,xmin=0, xmax=1,color='r', label='Blind zone')#axline้ๆพๅจlegendๅๅปบไนๅ๏ผๅฆๅไผๅบ้ฎ้ข
self.fig.canvas.draw()
self.init_legend(self.list_lines)
self.fig.canvas.mpl_connect('pick_event', self.on_pick)
self.line_index +=1
else:
self.ax.lines[i].set_data(self.data_time[i], self.data_dist[i])
self.ax2.lines[i].set_data(self.data_time[i], self.data_amp[i])
self.list_lines = self.ax.lines + self.ax2.lines
return self.list_lines
def init_legend(self,lines):
self.lined = {} # Will map legend lines to original lines.
for legline, origline in zip(self.leg.get_lines(), lines):
legline.set_picker(True) # Enable picking on the legend line.
#print('legline.get_picker:', legline.get_picker())
self.lined[legline] = origline
def on_pick(self,event):
# On the pick event, find the original line corresponding to the legend
# proxy line, and toggle its visibility.
legline = event.artist
origline = self.lined[legline]
visible = not origline.get_visible()
origline.set_visible(visible)
# Change the alpha on the line in the legend so we can see what lines have been toggled.
legline.set_alpha(1.0 if visible else 0.2)
self.fig.canvas.draw()
class Scope_line2(Scope):
def __init__(self, ax, ax_limit, rax, maxt=30):
super().__init__(ax)
self.color_cycle=['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w']
#ax ๅๆฐ่ฎพ็ฝฎ
#็ฌฌไธ่ฝด๏ผไธป่ฝดaxๅๆฐ่ฎพ็ฝฎ๏ผTime_Distance
self.ax.set_title("Time vs Distance")
self.ax.set_xlim(0, self.maxt) #ๆจชๅๆ ้ๅผ่ฎพ็ฝฎ
self.ax.set_ylim(-0.02*ax_limit, 1.1*ax_limit)#็บตๅๆ ้ๅผ่ฎพ็ฝฎ, #default for UCC2500 test
self.ax.set_xlabel('Time (second)')
self.ax.set_ylabel('Distance (mm)')
self.rax = rax
#็ฌฌไบ่ฝด๏ผๅฏ่ฝดax2ๅๆฐ่ฎพ็ฝฎ
self.ax2 = self.ax.twinx()
self.ax2.set_ylabel('Amplitude (dB SPL)')
self.ax2.set_ylim(-0.02*255, 1.1*255)#็บตๅๆ ้ๅผ่ฎพ็ฝฎ,
self.line_index = 0
#ๆฒ็บฟๆถ้ด็ช็งปๅจๆฌกๆฐ
self.count_win_move = 1
def update(self, i, *frames_data):
#ๆถ้ด็ชๅ
็ๆฐๆฎๆพ็คบ็็ผๅฒๅญๅจ
try:
if len(frames_data[i]['Timestamp']) >1: #็ฉบ็ๆฐๆฎๅธงๅฐๅธฆๆฅๅ็ปญไปฃ็ ็ไธๅฏ้ขๆต็็ดขๅผ้่ฏฏ๏ผ่ฟ้ๅคๆญ่ฟๆปค
if self.count_win_move == 1:
self.start_time[i] = frames_data[i]['Timestamp'][0]
self.data_time[i] = frames_data[i]['Timestamp'][self.last_record[i]:-1]-self.start_time[i]
self.node_addr[i] = frames_data[i]['Node_address'][self.last_record[i]:-1]
self.data_dist[i] = frames_data[i]['Distance'][self.last_record[i]:-1]
self.data_amp[i] = frames_data[i]['Amplitude'][self.last_record[i]:-1]
if self.data_time[i].iloc[-1] > (self.maxt * self.count_win_move): # reset the arrays
#ๆพ็คบ็ชๅฃ็งปๅจ
self.last_record[i] = len(self.data_time[i])-1
self.count_win_move +=1
#่ฎพ็ฝฎๅๆ ่ฝด็งปๅจ
self.ax.set_xlim(self.maxt*(self.count_win_move-1),
self.maxt*self.count_win_move)
self.ax.figure.canvas.draw()
else:
pass
except IndexError as ex: #for handling empty frames_data problem
print('Error: Frames_data '+ str(ex)+'. Size:', frames_data[i].shape )
time.sleep(1)
else:
if i == self.line_index: #ๅๆฌกๆง่ก
self.ax.add_line(Line2D(self.data_time[i], self.data_dist[i],linestyle='-',
color=self.color_cycle[i], animated=True,
label='Node.'+str(int(self.node_addr[i][0]))+' Dist.'))
self.ax2.add_line(Line2D(self.data_time[i],self.data_amp[i],linestyle=':',
color=self.color_cycle[i], animated=True,
label='Node.'+str(int(self.node_addr[i][0]))+' Amp.',visible=False))
self.list_lines = (self.ax.lines+self.ax2.lines)
if i == (len(frames_data)-1): #็ญๅพ
ๆๆๆฒ็บฟๅธธ่งๅฎๆฏ๏ผๅๅปบCheckbox
self.init_checkbox()
self.check.on_clicked(self.func_checkbox)
self.ax.figure.canvas.draw()
self.line_index +=1
else:
self.ax.lines[i].set_data(self.data_time[i], self.data_dist[i])
self.ax2.lines[i].set_data(self.data_time[i], self.data_amp[i])
self.list_lines = self.ax.lines + self.ax2.lines
return self.list_lines
def init_checkbox(self):
self.line_labels = [str(line.get_label()) for line in self.list_lines]
visibility = [line.get_visible() for line in self.list_lines]
self.check = CheckButtons(self.rax, self.line_labels, visibility)
def func_checkbox(self,label):
index = self.line_labels.index(label)
self.list_lines[index].set_visible(not self.list_lines[index].get_visible())
class Scope_stem(Scope):
def __init__(self,fig, ax, ax_limit, blind_zone):
super().__init__(ax)
self.fig = fig
self.blind_zone = blind_zone
self.ax.set_xlim(-0.02*ax_limit, 1.1*ax_limit) #ๆจชๅๆ ้ๅผ่ฎพ็ฝฎ #for UCC2500 test
self.ax.set_ylim(-0.02*255, 1.1*255)#็บตๅๆ ้ๅผ่ฎพ็ฝฎ, #for amplitude 100%
self.ax.set_xlabel('Distance (mm)')
self.ax.set_ylabel('Amplitude (dB SPL)')
self.ax.set_title("Distance vs Amplitude")
self.colorline_cycle=['C0-', 'C1-', 'C2-', 'C4-', 'C5-', 'C6-', 'C7-']
self.colormarker_cycle=['C0o', 'C1o', 'C2o', 'C4o', 'C5o', 'C6o', 'C7o']
self.basefmt=' '
self.markerline= 7*[None]
self.stemlines= 7*[None]
self.baseline=7*[None]
self.stem_container = 7*[None]
self.container_labels = 7*[None]
self.count_sample = 5 #ๆพ็คบ้ๆ ทๆฌกๆฐ้ๅถ๏ผไป
ๆพ็คบ่ฟ็ปญ5ๆฌก็้ๆ ทๅผ
self.count_win_move = 1 #ๆฒ็บฟๆถ้ด็ช็งปๅจๆฌกๆฐ
self.line_index = 0
def update(self, i, *frames_data):
#ๆถ้ด็ชๅ
็ๆฐๆฎๆพ็คบ็็ผๅฒๅญๅจ
try:
if len(frames_data[i]['Timestamp']) >=self.count_sample: #็ฉบ็ๆฐๆฎๅธงๅฐๅธฆๆฅๅ็ปญไปฃ็ ็ไธๅฏ้ขๆต็็ดขๅผ้่ฏฏ๏ผ่ฟ้ๅคๆญ่ฟๆปค
if self.count_win_move == 1:
self.start_time[i] = frames_data[i]['Timestamp'][0]
self.data_time[i] = frames_data[i]['Timestamp'][-self.count_sample:-1]-self.start_time[i]
self.node_addr[i] = frames_data[i]['Node_address'][-self.count_sample:-1]
self.data_dist[i] = frames_data[i]['Distance'][-self.count_sample:-1]
self.data_amp[i] = frames_data[i]['Amplitude'][-self.count_sample:-1]
self.count_win_move +=1
self.ax.figure.canvas.draw()
else:
pass
except IndexError as ex: #for handling empty frames_data problem
print('Error: Frames_data '+ str(ex)+'. Size:', frames_data[i].shape )
time.sleep(1)
else:
#self.markerline[i], self.stemlines[i], self.baseline[i]
self.stem_container[i]= self.ax.stem(self.data_dist[i], self.data_amp[i],
linefmt=self.colorline_cycle[i],
markerfmt=self.colormarker_cycle[i],
basefmt=self.basefmt,
label='Node.'+str(int(self.node_addr[i].iloc[-1])),#self.node_addr[i] is Pandas series type, iloc should be used, cannot use self.node_addr[i][-1]
use_line_collection=True)
self.stem_container[i].markerline.set_markersize(3)
if i == self.line_index: #ๅๆฌกๆง่ก
self.container_labels[i] = self.stem_container[i].get_label()
if i == (len(frames_data)-1): #็ญๅพ
ๆๆๆฒ็บฟๅๅปบๅฎๆฏ๏ผๅๅปบlegend
self.leg = self.fig.legend(self.stem_container[0:len(frames_data)], #่ฟ้่ฆ็จๅ็๏ผๅฆๅNone typeๅจ่ฟ้ไผๆฅUserWarning้่ฏฏ
self.container_labels[0:len(frames_data)],
fancybox=True, shadow=True, loc='lower center',ncol=len(frames_data) )
self.ax.axvline(x=self.blind_zone,ymin=0, ymax=1,linestyle=':',color='r', label='Blind zone')#axline้ๆพๅจlegendๅๅปบไนๅ๏ผๅฆๅไผๅบ้ฎ้ข
self.fig.canvas.draw()
#self.init_legend(self.stem_container[0:len(frames_data)])
#self.fig.canvas.mpl_connect('pick_event', self.on_pick)
self.line_index +=1
else:
pass
#return self.markerline, self.stemlines, self.baseline
return self.stem_container[i]
if __name__ == "__main__":
def emitter(p=0.03): #้ๆบๆฐๆฎๆต็ๆๅจ
"""Return a random value in [0, 1) with probability p, else 0."""
t =0,
while True:
v = np.random.rand(1)
t += v
if v > p:
yield t, 0., 0.
else:
yield t, np.random.rand(1), 50*(np.random.rand(1)+1), #ๆณจๆๅฟๅฟไบ้ๅท
# Fixing random state for reproducibility
np.random.seed(19680801)
fig, ax = plt.subplots()
scope = Scope(ax)
frames_data = emitter()
print(next(frames_data))
# pass a generator in "emitter" to produce data for the update func
ani = animation.FuncAnimation(fig, scope.update, frames_data, interval=10,
blit=True)
plt.show()
```
#### File: Sensor_evaluation/ultrasonic_sm/sensor_eval_app3.py
```python
import sys
import time
from PyQt5. QtWidgets import QMainWindow
from matplotlib.backends.qt_compat import QtCore, QtWidgets
from matplotlib.backends.backend_qt5agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
from matplotlib.figure import Figure
import matplotlib.animation as animation
from matplotlib.animation import FFMpegWriter
from comm_serial import SerialRW_SM
from oscilloscope2 import *
#from oscilloscope3 import Record_plot as Rp
from sensor_eval_ui import *
from ultra_sm import Ultra_sm_std as sm
from ultra_sm_sync import Ultra_sm_sync as sm_sync
class My_AppWindow(QMainWindow, Ui_MainWindow):
def __init__(self):
super().__init__()
self.setupUi(self) #็ชๅฃๆงไปถๅธ็ฝฎๅฎ็ฐ๏ผๆนๆณ็ปงๆฟ่ชUi_MainWindow
self.thread_sampling = Thread_sampling()#ๆฐๆฎ่ฏปๅๅๅฏผๅ
ฅ็บฟ็จ
self.thread_sampling.start()
self.frames_data = self.thread_sampling.data_records()
self.ani1 = None # the global animiation type should be defined, ๅฆๅไผๅบ็ฐ่ซๅ็Funcanimationไธๆง่ก๏ผไธๆพ็คบ
self.add_fig2tab_1()
self.ani2 = None
self.add_fig2tab_2()
self.ani3 = None
self.add_fig2tab_3()
def add_fig2tab_1(self):
#ๆ ็ญพ1ๅฎนๅจ็ปๅพ๏ผ=========
layout1 = QtWidgets.QVBoxLayout(self.tab_1) #ๆ ็ญพๅฎนๅจไธๆทปๅ ๅธๅฑๅฏน่ฑก
self.fig1 = Figure(figsize=(5, 3)) #ๅๅปบMatplotlib็ๅพ่กจๅฏน่ฑก
tab_canvas = FigureCanvas(self.fig1)#ๅฐMatplotlib็ๅพ่กจ็ฝฎไบ็ปๅธไธ
self.ax_tab1 = tab_canvas.figure.subplots() #ๅๅปบๅญๅพ่ฝดๅใ่ฟ้ไธ่ฝ็ดๆฅ็จsubplots()ๅๅปบfigure, ax, ๅฆๅไผๅบ็ฐ็ฌ็ซMatplotlib็ๅพ่กจ็ชๅฃ
self.fig1.subplots_adjust(bottom=0.12)
layout1.addWidget(tab_canvas) #ๅฐMatplotlib็็ปๅธๅ
ณ่ๅฐๆ ็ญพๅฎนๅจ็layoutไธญ
self.addToolBar(QtCore.Qt.TopToolBarArea,
NavigationToolbar(tab_canvas, self)) #ๅจ็ปๅธไธๆทปๅ ๅพ่กจ็ๆไฝ่ๅ
#ๅจ่ฝดๅไธญๆทปๅ ๅจๆ็คบๆณขๅจๆฒ็บฟ
scope_line=Scope_line(fig=self.fig1, ax=self.ax_tab1,
ax_limit = self.thread_sampling.ax_limit(),
blind_zone = self.thread_sampling.ax_blind_zone())
try:
self.ani1 = animation.FuncAnimation(self.fig1, scope_line.update,
frames=len(self.frames_data),#init_func=scope_line.init,
fargs=self.frames_data,
interval=100, blit=True)
except Exception as ex:
print('error: Funcanimation:', ex)
'''
Writer = animation.writers['ffmpeg']
writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)
ani.save('Test.mp4', writer='ffmpeg')
'''
def add_fig2tab_2(self):
#ๆ ็ญพ2ๅฎนๅจ็ปๅพ๏ผ==============
layout2 = QtWidgets.QVBoxLayout(self.tab_2) #ๆ ็ญพๅฎนๅจไธๆทปๅ ๅธๅฑๅฏน่ฑก
self.fig2 = Figure(figsize=(5, 3)) #ๅๅปบMatplotlib็ๅพ่กจๅฏน่ฑก
tab_canvas2 = FigureCanvas(self.fig2)#ๅฐMatplotlib็ๅพ่กจ็ฝฎไบ็ปๅธไธ
self.ax_tab2 = tab_canvas2.figure.subplots() #ๅๅปบๅญๅพ่ฝดๅใ่ฟ้ไธ่ฝ็ดๆฅ็จsubplots()ๅๅปบfigure, ax, ๅฆๅไผๅบ็ฐ็ฌ็ซMatplotlib็ๅพ่กจ็ชๅฃ
self.fig2.subplots_adjust(left=0.22)
self.ax_ckbox2 = tab_canvas2.figure.add_axes([0.0, 0.48-0.08*len(self.frames_data)/2, #rect = [left, bottom, width, height]
0.13, 0.08*len(self.frames_data)])
layout2.addWidget(tab_canvas2) #ๅฐMatplotlib็็ปๅธๅ
ณ่ๅฐๆ ็ญพๅฎนๅจ็layoutไธญ
self.addToolBar(QtCore.Qt.TopToolBarArea,
NavigationToolbar(tab_canvas2, self)) #ๅจ็ปๅธไธๆทปๅ ๅพ่กจ็ๆไฝ่ๅ
#ๅจ่ฝดๅไธญๆทปๅ ๅจๆ็คบๆณขๅจๆฒ็บฟ
scope_line2=Scope_line2(self.ax_tab2, ax_limit = self.thread_sampling.ax_limit(),rax=self.ax_ckbox2)
self.ani2 = animation.FuncAnimation(self.fig2, scope_line2.update,
frames=len(self.frames_data),
fargs=self.frames_data,interval=100, blit=True)
def add_fig2tab_3(self):
#ๆ ็ญพ3ๅฎนๅจ็ปๅพ๏ผ==============
#ๆฑ็ถๅพ
layout3 = QtWidgets.QVBoxLayout(self.tab_3) #ๆ ็ญพๅฎนๅจไธๆทปๅ ๅธๅฑๅฏน่ฑก
self.fig3 = Figure(figsize=(5, 3)) #ๅๅปบMatplotlib็ๅพ่กจๅฏน่ฑก
tab_canvas3 = FigureCanvas(self.fig3)#ๅฐMatplotlib็ๅพ่กจ็ฝฎไบ็ปๅธไธ
self.ax_tab3 = tab_canvas3.figure.subplots() #ๅๅปบๅญๅพ่ฝดๅใ่ฟ้ไธ่ฝ็ดๆฅ็จsubplots()ๅๅปบfigure, ax, ๅฆๅไผๅบ็ฐ็ฌ็ซMatplotlib็ๅพ่กจ็ชๅฃ
self.fig3.subplots_adjust(bottom=0.12)
layout3.addWidget(tab_canvas3) #ๅฐMatplotlib็็ปๅธๅ
ณ่ๅฐๆ ็ญพๅฎนๅจ็layoutไธญ
self.addToolBar(QtCore.Qt.TopToolBarArea,
NavigationToolbar(tab_canvas3, self)) #ๅจ็ปๅธไธๆทปๅ ๅพ่กจ็ๆไฝ่ๅ
#ๅจ่ฝดๅไธญๆทปๅ ๅจๆ็คบๆณขๅจๆฒ็บฟ
scope_stem=Scope_stem(fig=self.fig3, ax=self.ax_tab3,
ax_limit = self.thread_sampling.ax_limit(),
blind_zone = self.thread_sampling.ax_blind_zone())
self.ani3 = animation.FuncAnimation(self.fig3, scope_stem.update,
frames=len(self.frames_data),#init_func=scope_line.init,
fargs=self.frames_data,
interval=100, blit=True)
class Thread_sampling(QtCore.QThread):
def __init__(self):
super().__init__()
self.ser_comm()
self.ax_sensingRange = 2500 #in mm, default
def ser_comm(self):
com_port = 'COM6'
com_cfg_sm = {'baudrate': 19200,
'bytesize': 8,
'parity': 'N',
"stopbits": 1}
# sensor module type
ucc2500_7 = sm(sensing_range=2500,node_addr=0x7)
ucc4000_2 = sm(sensing_range=4000,node_addr=0x2)
ucc4000_3 = sm(sensing_range=4000,node_addr=0x3)
ucc4000_7 = sm(sensing_range=4000,node_addr=0x7)
#self.list_sm = [ucc2500]
#self.list_sm = [ucc4000_7,]
#self.list_sm = [ucc4000_7]
#self.list_sm = [ucc4000_2,ucc4000_3]
#print(len(self.list_sm))
# Sensor module type with sync function
ucc4000_sync2 = sm_sync(sensing_range=4000,node_addr=0x2)
ucc4000_sync3 = sm_sync(sensing_range=4000,node_addr=0x3)
ucc2500_sync7 = sm_sync(sensing_range=2500,node_addr=0x7)
self.list_sm = [ucc4000_sync2,ucc4000_sync3]
#self.list_sm = [ucc4000_sync3]
#self.list_sm = [ucc2500_sync7]
#self.list_sm = [ucc2500]
self.ser_sm = SerialRW_SM(com_port, com_cfg_sm,self.list_sm)
try:
if self.ser_sm.start():
'''
print("main thread wakes up")
btn_connect = True
ser.stop(btn_connect)
'''
else:
pass
except Exception as ex:
print (ex)
def data_records(self):
return self.ser_sm.records_data
def ax_limit(self):
for i in range(len(self.list_sm)):
if self.list_sm[i].sensing_range > self.ax_sensingRange:
self.ax_sensingRange = self.list_sm[i].sensing_range
return self.ax_sensingRange
def ax_blind_zone(self):
return self.list_sm[0].blind_zone
if __name__ == "__main__":
# Check whether there is already a running QApplication (e.g., if running
# from an IDE).
qapp = QtWidgets.QApplication.instance()
if not qapp:
qapp = QtWidgets.QApplication(sys.argv)
app = My_AppWindow()
app.show()
app.activateWindow()
app.raise_()
qapp.exec_()
``` |
{
"source": "joy13975/covidprof_submission",
"score": 2
} |
#### File: adapter/base/messaging.py
```python
import logging
class MessagingAdapter:
def __init__(self,
*kargs,
message_handler=None,
error_handler=None,
**kwargs):
self.message_handler = message_handler
self.error_handler = error_handler
def _listen(self, *args, **kwargs):
pass
def _reply(self, *args, **kwargs):
pass
def _upload_media(self, *args, **kwargs):
pass
def listen(self, *args, **kwargs):
try:
self._listen(*args, **kwargs)
except KeyboardInterrupt:
logging.info('\nStop')
def reply(self, msg, *args, **kwargs):
return self._reply(msg, *args, **kwargs)
def upload_media(self, *args, **kwargs):
return self._upload_media(*args, **kwargs)
```
#### File: src/adapter/c3aidatalake.py
```python
import requests
import pandas as pd
def read_data_json(typename, api, body):
"""
read_data_json directly accesses the C3.ai COVID-19 Data Lake APIs using the requests library,
and returns the response as a JSON, raising an error if the call fails for any reason.
------
typename: The type you want to access, i.e. 'OutbreakLocation', 'LineListRecord', 'BiblioEntry', etc.
api: The API you want to access, either 'fetch' or 'evalmetrics'.
body: The spec you want to pass. For examples, see the API documentation.
"""
response = requests.post(
"https://api.c3.ai/covid/api/1/" + typename + "/" + api,
json=body,
headers={
'Accept': 'application/json',
'Content-Type': 'application/json'
}
)
# if request failed, show exception
if response.status_code != 200:
raise Exception(response.json()["message"])
return response.json()
def fetch(typename, body, get_all=False, remove_meta=True):
"""
fetch accesses the C3.ai COVID-19 Data Lake using read_data_json, and converts the response into a Pandas dataframe.
fetch is used for all non-timeseries data in the C3.ai COVID-19 Data Lake, and will call read_data as many times
as required to access all of the relevant data for a given typename and body.
------
typename: The type you want to access, i.e. 'OutbreakLocation', 'LineListRecord', 'BiblioEntry', etc.
body: The spec you want to pass. For examples, see the API documentation.
get_all: If True, get all records and ignore any limit argument passed in the body. If False, use the limit argument passed in the body. The default is False.
remove_meta: If True, remove metadata about each record. If False, include it. The default is True.
"""
if get_all:
has_more = True
offset = 0
limit = 2000
df = pd.DataFrame()
while has_more:
body['spec'].update(limit=limit, offset=offset)
response_json = read_data_json(typename, 'fetch', body)
new_df = pd.json_normalize(response_json['objs'])
df = df.append(new_df)
has_more = response_json['hasMore']
offset += limit
else:
response_json = read_data_json(typename, 'fetch', body)
df = pd.json_normalize(response_json['objs'])
if remove_meta:
df = df.drop(columns=[c for c in df.columns if (
'meta' in c) | ('version' in c)])
return df
def evalmetrics(typename, body, get_all=False, remove_meta=True):
"""
evalmetrics accesses the C3.ai COVID-19 Data Lake using read_data_json, and converts the response into a Pandas dataframe.
evalmetrics is used for all timeseries data in the C3.ai COVID-19 Data Lake.
------
typename: The type you want to access, i.e. 'OutbreakLocation', 'LineListRecord', 'BiblioEntry', etc.
body: The spec you want to pass. For examples, see the API documentation.
get_all: If True, get all metrics and ignore limits on number of expressions and ids. If False, consider expressions and ids limits. The default is False.
remove_meta: If True, remove metadata about each record. If False, include it. The default is True.
"""
if get_all:
expressions = body['spec']['expressions']
ids = body['spec']['ids']
df = pd.DataFrame()
for ids_start in range(0, len(ids), 10):
for expressions_start in range(0, len(expressions), 4):
body['spec'].update(
ids=ids[ids_start: ids_start + 10],
expressions=expressions[expressions_start: expressions_start + 4]
)
response_json = read_data_json(typename, 'evalmetrics', body)
new_df = pd.json_normalize(response_json['result'])
new_df = new_df.apply(pd.Series.explode)
df = pd.concat([df, new_df], axis=1)
else:
response_json = read_data_json(typename, 'evalmetrics', body)
df = pd.json_normalize(response_json['result'])
df = df.apply(pd.Series.explode)
# get the useful data out
if remove_meta:
df = df.filter(regex='dates|data|missing')
# only keep one date column
date_cols = [col for col in df.columns if 'dates' in col]
keep_cols = date_cols[:1] + \
[col for col in df.columns if 'dates' not in col]
df = df.filter(items=keep_cols).rename(columns={date_cols[0]: "dates"})
df["dates"] = pd.to_datetime(df["dates"])
return df
def getprojectionhistory(body, remove_meta=True):
"""
getprojectionhistory accesses the C3.ai COVID-19 Data Lake using read_data_json, and converts the response into a Pandas dataframe.
------
body: The spec you want to pass. For examples, see the API documentation.
remove_meta: If True, remove metadata about each record. If False, include it. The default is True.
"""
response_json = read_data_json(
"outbreaklocation", 'getprojectionhistory', body)
df = pd.json_normalize(response_json)
df = df.apply(pd.Series.explode)
# get the useful data out
if remove_meta:
df = df.filter(regex='dates|data|missing|expr')
# only keep one date column
date_cols = [col for col in df.columns if 'dates' in col]
keep_cols = date_cols[:1] + \
[col for col in df.columns if 'dates' not in col]
df = df.filter(items=keep_cols).rename(columns={date_cols[0]: "dates"})
df["dates"] = pd.to_datetime(df["dates"])
# rename columns to simplify naming convention
df = df.rename(columns=lambda x: x.replace(".value", ""))
return df
```
#### File: src/adapter/c3ai.py
```python
import requests
from time import sleep
import json
import logging
from multiprocessing.pool import ThreadPool
class RequestException(Exception):
pass
class C3aiAdapter:
limits = {
'biblioentry': 2000,
'populationdata': 2000,
'labordetail': 2000,
'linelistrecord': 5000,
'sequence': 8000,
'subsequence': 8000,
'biologicalasset': 8000,
}
@classmethod
def request(cls, typename, api, body, max_retries=10, retry_wait=1):
'''
Sends HTTP Post request to C3ai's data lake API.
Retry on server error.
'''
for nth_retry in range(1, max_retries+1):
try:
response = requests.post(
'https://api.c3.ai/covid/api/1/' + typename + '/' + api,
json=body,
headers={
'Accept': 'application/json',
'Content-Type': 'application/json'
})
except Exception as e:
if 'Temporary failure in name resolution' in str(e):
logging.error(f'Request to {typename}/{api} resulted in error '
f'{e}; '
f'retrying in {retry_wait}s '
f'({nth_retry}/{max_retries})')
sleep(retry_wait)
continue
if response.status_code == 200:
# Successful request
return response.json()
elif (500 <= response.status_code < 600) or \
'too many requests' in repr(response).lower():
# Retry on server error or congestion
if nth_retry >= max_retries:
# raise RequestException(repr(response))
response
return {
'error': f'Max request {max_retries} retries reached.',
'content': json.loads(response.content)
}
logging.error(f'Request to {typename}/{api} resulted in code '
f'{response.status_code}; '
f'retrying in {retry_wait}s '
f'({nth_retry}/{max_retries})')
sleep(retry_wait)
else:
try:
message = response.json()['message']
except Exception:
message = 'Could not parse response JSON message: ' + \
repr(response)
raise RequestException(message)
@classmethod
def fetch(cls, typename, body):
return cls.request(typename, 'fetch', body)
@classmethod
def fetch_all(cls, typename, body, n_threads):
stop_at_offset = -1
def thread_work(offset):
nonlocal stop_at_offset
if stop_at_offset != -1 and stop_at_offset <= offset:
return []
thread_spec = body['spec'].copy()
thread_spec['offset'] = offset
p = cls.fetch(typename, {'spec': thread_spec})
if not p['hasMore']:
stop_at_offset = offset
return p
spec = body['spec']
pagesize = spec.get('limit', cls.limits[typename.lower()])
groupsize = n_threads * pagesize
base_offset = spec.get('offset', 0)
while stop_at_offset == -1:
with ThreadPool(n_threads) as pool:
pages = pool.map(thread_work,
range(base_offset, base_offset + groupsize, pagesize))
yield pages
base_offset += groupsize
@classmethod
def get_all_biblioentry_ids(cls, n_threads=50):
pages = cls.fetch_all(
typename='biblioentry',
body={
'spec': {'include': 'id'}
},
n_threads=n_threads
)
for page in pages:
for subpage in page:
if subpage['count'] > 0:
yield subpage['objs']
@classmethod
def get_all_papers(cls, **kwargs):
pages = cls.fetch_all(
typename='biblioentry',
body={
'spec': {
'include': 'id,title,abstractText,hasFullText,authors,publishTime,url',
**kwargs
}
},
n_threads=1,
)
for page in pages:
for subpage in page:
if subpage['count'] > 0:
yield subpage['objs']
@classmethod
def get_text(cls, biblioentry_ids, n_threads=50):
# Although GetArticleMetadata supports up to 10 ids,
# it does not return the ids themselves, and if we
# query using an id that actually doesn't have full
# text, then we cannot know which paper is missing in
# the result. Therefore just query one by one in parallel.
def thread_work(biblioentry_id):
text = cls.request(
typename='biblioentry',
api='getarticlemetadata',
body={'ids': [biblioentry_id]}
)
if 'value' in text:
text = text['value']['value']
else:
# There was an error and retries couldn't resolve
# it. Just return that error message + content.
text = [text]
if len(text) > 0 and 'body_text' in text[0]:
body_text = text[0]['body_text']
else:
body_text = ''
return body_text
with ThreadPool(n_threads) as pool:
return pool.map(thread_work, biblioentry_ids)
```
#### File: src/adapter/elastic_search.py
```python
from datetime import datetime
from elasticsearch_dsl import Document, Keyword, Text, Date, connections
from elasticsearch_dsl.query import MultiMatch
from .base.search_engine import SearchEngineBase
# define a default article mapping
class DefaultArticleMapping(Document):
title = Text(analyzer='snowball')
authors = Text()
publishTime = Date()
abstract = Text(analyzer='snowball')
body = Text(analyzer='snowball')
class Index:
name = "covidprof"
# set settings and possibly other attributes of the index like
# analyzers
settings = {"number_of_shards": 1, "number_of_replicas": 0}
class ElasticSearchAdapter(SearchEngineBase):
def __init__(self, mapping=DefaultArticleMapping):
# establish a persistent elasticsearch connection
connections.create_connection()
# set the mapping as an instance attribute, for use in the _add method
self.mapping = mapping
# push the mapping template to elasticsearch and initialize the index
self.mapping.init()
def _add(self, documents):
if isinstance(documents, dict):
documents = [documents]
for doc in documents:
doc_without_id = {k: v for (k, v) in doc.items() if k != "id"}
# convert publishTime from string to datetime if it exists
if doc_without_id['publishTime'] != '':
doc_without_id['publishTime'] = datetime.strptime(doc_without_id['publishTime'], '%Y-%m-%dT%H:%M:%S')
else:
doc_without_id['publishTime'] = None
# save the doc
self.mapping(meta={'id': doc['id']}, **doc_without_id).save()
# refresh index to make changes live
self.mapping._index.refresh()
def _search(self, query, n, target_fields=['body', 'abstract'],
frag_size=500, n_frags=3):
# target_fields is in order of importance!!
s = self.mapping.search()
s.query = MultiMatch(
query=query,
fields=target_fields,
)
for f in target_fields:
s = s.highlight(f, pre_tags='', post_tags='',
fragment_size=frag_size,
number_of_fragments=n_frags)
s = s.sort(
{'_score': {'order': 'desc'}},
{'publishTime': {'order': 'desc'}},
)
return s[:n].execute()
@classmethod
def get_highlight_frags(cls, doc, fields=['body', 'abstract']):
'''Extract highlighted fragments'''
for field in fields:
if field in doc.meta.highlight:
return '\n\n'.join(doc.meta.highlight[field])
```
#### File: src/adapter/tantivy_search.py
```python
import os
import tantivy
from base.search_engine import SearchEngineBase
class TantivyAdapter(SearchEngineBase):
def __init__(self,
index_path='tantivy_index',
fields=[
('id', True),
('title', True),
('authors', True),
('publishTime', True),
('abstract', True),
('body', True)]
):
schema_builder = tantivy.SchemaBuilder()
for field, stored in fields:
schema_builder.add_text_field(field, stored=stored)
self.schema = schema_builder.build()
if not os.path.exists(index_path):
os.mkdir(index_path)
self.index = tantivy.Index(self.schema, index_path)
def _add(self, documents):
writer = self.index.writer()
if isinstance(documents, dict):
documents = [documents]
for doc in documents:
writer.add_document(tantivy.Document(**doc))
writer.commit()
def _search(self, query, n=3, target_fields=['title', 'abstract', 'body']):
# Reload the index to ensure it points to the last commit.
self.index.reload()
searcher = self.index.searcher()
query = self.index.parse_query(query, target_fields)
search_result = searcher.search(query, limit=n)
return [searcher.doc(doc_addr) for _, doc_addr in search_result.hits]
```
#### File: covidprof_submission/src/build_index.py
```python
import sys
import os
import json
from time import time
from glob import glob
from itertools import chain
import logging
from datetime import datetime
from adapter.c3ai import C3aiAdapter
from adapter.elastic_search import ElasticSearchAdapter as SearchEngine
from adapter.web import WebAdapter
def download(skip_to=-1, stop_at=-1, get_paper_args={}, output='file'):
a = C3aiAdapter()
for page_i, papers_page in enumerate(a.get_all_papers(**get_paper_args)):
t0 = time()
if skip_to != -1 and page_i < int(skip_to):
logging.info(f'Skipping page {page_i}')
continue
if stop_at != -1 and page_i >= int(stop_at):
break
logging.info(f'Downloading page {page_i}...')
# Strip unnecessary info
text_papers = []
for p in papers_page:
del p['meta']
del p['version']
if p.get('hasFullText'):
text_papers.append(p)
else:
p['text'] = ''
text_paper_ids = [p['id'] for p in text_papers]
# Fetch paper text
texts = a.get_text(text_paper_ids)
# Assign text to paper
for p, t in zip(text_papers, texts):
p['text'] = t
# Output
if output == 'file':
with open(f'data/page_{page_i}.json', 'w') as f:
json.dump(papers_page, f)
elif output == 'yield':
yield papers_page
else:
raise ValueError(f'Unknown output type {output}')
logging.info(f'Page {page_i} downloaded in {time()-t0:.1f}s')
page_i += 1
logging.info('Done')
def _paper_to_doc(papers):
for paper in papers:
if type(paper['text']) is list:
paper['text'] = '\n'.join(section['text']
for section in paper['text'])
yield {
'id': paper['id'],
'title': paper.get('title', ''),
'authors': paper.get('authors', ''),
'publishTime': paper.get('publishTime', ''),
'abstract': paper.get('abstractText', ''),
'body': paper.get('text', ''),
'url': paper.get('url', ''),
}
def _get_doc_iters():
for path in sorted(glob('data/page_*.json')):
with open(path, 'r') as f:
papers = json.load(f)
logging.info(f'Indexing {path}')
yield _paper_to_doc(papers)
def init_index():
t = SearchEngine()
doc_iter = chain.from_iterable(_get_doc_iters())
t.add(doc_iter)
def update_index():
a = C3aiAdapter()
t = SearchEngine()
# Get (latest ids) - (indexed ids)
for i, ids_page in enumerate(a.get_all_biblioentry_ids()):
new_ids = []
for id_obj in ids_page:
paper_id = id_obj['id']
docs = t.search(paper_id, target_fields=['id'], n=1)
if docs and docs[0]['id'][0] == paper_id:
continue
new_ids.append(paper_id)
if not new_ids:
logging.info(f'IDs page {i} has no new paper IDs.')
continue
logging.info(f'IDs page {i} new paper IDs: {new_ids}')
# Get header and full text for these new papers
pages = download(get_paper_args={'filter': ' || '.join(f'id=="{i}"' for i in new_ids)},
output='yield')
for papers in pages:
t.add(_paper_to_doc(papers))
def add(json_file):
assert os.path.exists(json_file)
with open(json_file, 'r') as f:
page = json.load(f)
t = SearchEngine()
t.add(page)
def add_web(url, page_type='generic'):
# Download single page
t = SearchEngine()
w = WebAdapter()
title, text = w.parse_page(url, page_type=page_type)
print(text)
print('-------------------------------')
yn = input('Above text OK? (y/n)')
if yn.lower().lower() != 'y':
print('Abort')
exit(0)
date_str = input('Enter page last update date (YYYYmmdd): ')
page_id = input('Enter page id: ')
dt = datetime.strptime(date_str, '%Y%m%d')
dt_str = dt.isoformat()
page = {
'id': page_id,
'title': title,
'body': text,
'url': url,
'publishTime': dt_str,
}
t.add(page)
def debug():
logging.warning(f'Debugging with args: {sys.argv}')
a = C3aiAdapter()
t = SearchEngine()
import code
code.interact(local={**locals(), **globals()})
if __name__ == '__main__':
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S'
)
if len(sys.argv) < 2:
logging.error('Specify mode: init or update')
exit(1)
mode = sys.argv[1]
if mode == 'download':
list(download(**dict(v.split('=') for v in sys.argv[2:])))
elif mode == 'init':
init_index()
elif mode == 'update':
update_index()
elif mode == 'add':
assert len(sys.argv) >= 3, 'Provide filename'
add(sys.argv[2])
elif mode == 'addweb':
assert len(sys.argv) >= 2, 'Provide URL'
add_web(sys.argv[2], **dict(v.split('=') for v in sys.argv[3:]))
else:
debug()
```
#### File: covidprof_submission/src/excerpt_gen.py
```python
import requests
import re
from time import time
from multiprocessing.pool import ThreadPool
import json
import logging
import subprocess
from time import sleep
from bs4 import BeautifulSoup
from adapter.elastic_search import ElasticSearchAdapter as SearchEngine
from adapter.gpt3 import GPT3Adapter
def get_model(model_name, use_gpu=False, pipieline_type='question-answering'):
from transformers import pipeline
device = -1 # -1 for CPU
if use_gpu:
device = 0 # First GPU device
logging.info(f'Using GPU device: {device}')
else:
logging.info('Using CPU')
logging.info(f'Initializing model "{model_name}"" ...')
model = pipeline(pipieline_type, model=model_name,
tokenizer=model_name, framework='pt', device=device)
logging.info(f'Model max tokens: {model.tokenizer.model_max_length}')
return model
def preprocess_text(data):
spchar_list = ['\n', '/', '\\', '[', ']']
data = data.translate({ord(x): x for x in spchar_list})
data = data.replace('\xa0', ' ')
data = re.sub(r'(.)([\.\,\!\?\'\"\[\]\)\;\:])([^ ])', r'\1\2 \3', data)
data = re.sub(r'(\[\d+\])+', '', data)
data = re.sub(r' \n', '\n', data)
data = re.sub(r'\n+', ' ', data)
data = re.sub(r' +', ' ', data)
return data
def get_web_data(url):
wiki_data = requests.get(url).text
soup = BeautifulSoup(wiki_data, 'lxml')
data = []
paragraphs = soup.select('p')
excerpts = soup.select('div.excerpt')
for k in excerpts + paragraphs:
data.append(k.getText())
data = '\n'.join([str(s) for s in data])
data = preprocess_text(data)
return data
def split_data(data, part_len):
base = 0
i = 0
parts = []
while True:
part = data[base:base+part_len]
last_period = part.rfind('.')
if last_period == -1:
last_period = part_len
part = part[:last_period+1].strip()
parts.append(part)
base = base + last_period+1
if base >= len(data):
break
i += 1
return parts
def get_find_answer_func(model, question):
def find_answer(part):
return model({
'context': part,
'question': question,
})
return find_answer
def get_excerpts_text(top_answers):
excerpts = []
sentences_before = 1
sentences_after = 1
for i, (ans, part) in top_answers:
# Go n sentences ahead
str_before = part[:ans['start']]
ends_before = [m.end(0) for m in re.finditer(r'[^\.]\. ', str_before)]
if ends_before:
begin = ends_before[-min(len(ends_before), (sentences_before+1))]
else:
begin = ans['start']
str_after = part[ans['end']:]
if len(str_after) < 2:
end = ans['end'] + len(str_after)
else:
end = ans['end']
ends_after = [m.end(0) for m in re.finditer(r'[^\.]\.', str_after)]
if ends_after:
end += ends_after[min(len(ends_after) - 1, sentences_after)]
ans_src = part[begin:end]
excerpts.append((i, ans_src))
return excerpts
class ExcerptGen:
def __init__(self,
model_name='deepset/bert-large-uncased-whole-word-masking-squad2',
accelerator='cpu'):
self._and_start_elastic_server()
self.se = SearchEngine()
self.accelerator = accelerator.lower()
self.gpt3 = GPT3Adapter()
self.model = None
if self.accelerator != 'colab':
self.model = get_model(model_name=model_name,
use_gpu=self.accelerator == 'gpu')
def _and_start_elastic_server(self):
'''Make sure elasticsearch server is up'''
logging.info('Checking Elasticsearch server status...')
cmd = 'sudo service elasticsearch status'.split()
try:
checkstr = subprocess.check_output(cmd).decode('utf-8')
except subprocess.CalledProcessError as e:
if 'elasticsearch is not running' in e.output.decode('utf-8')\
.lower():
logging.info('Elasticsearch server is not running - '
'attempting to start it now...')
cmd = 'sudo service elasticsearch start'.split()
run_result = subprocess.run(cmd)
if run_result.returncode != 0:
logging.error(
'Command failed with return code: '
f'{run_result.return_code}')
exit()
# Wait a bit after Elasticsearch server is up because
# making a connection too soon will cause error
sleep(5)
return
raise(e)
if 'elasticsearch is running' in checkstr.lower():
# No need to do anything
logging.info('Elasticsearch sever already running - good.')
def get_excerpts_from_docs(self, question, docs, max_page_size=512*32):
answers = []
find_answer = get_find_answer_func(self.model, question)
logging.info(f'Get excerpts with max_page_size={max_page_size}')
if self.accelerator == 'gpu':
for doc in docs:
# If p is too large it will crash the GPU...
logging.info(
f'Splitting doc with len={len(doc)}')
parts = split_data(doc, max_page_size)
logging.info(
f'Doc len {len(doc)} split into {len(parts)} parts')
part_answers = [find_answer(p) for p in parts]
answers.append(max(part_answers, key=lambda a: a['score']))
elif self.accelerator == 'colab':
answers = self.ask_colab(question, docs)
else:
n_threads = 8
with ThreadPool(n_threads) as pool:
answers = pool.starmap(find_answer, enumerate(docs))
# Extract answer text area
top_answers = sorted(enumerate(zip(answers, docs)),
key=lambda v: v[1][0]['score'],
reverse=True)
return get_excerpts_text(top_answers)
def get_excerpts(self,
question,
part_len=1024*2,
top_n_answers=4,
url='https://en.wikipedia.org/wiki/COVID-19_pandemic'):
t0 = time()
# Combine wikipedia data + c3ai
c3ai_docs = self.se.search(question, 3)
c3ai_texts = [preprocess_text(d['body'] if d['body'] else d['abstract'])
for d in c3ai_docs]
web_text = get_web_data(url)
if self.accelerator in ('gpu', 'colab'):
# GPU/colab is fast enough to not need further text breakdown
parts = [web_text, *c3ai_texts]
logging.info(f'Data ({sum(len(p) for p in parts)}) (no split)')
else:
c3ai_text = '\n\n\n'.join(c3ai_texts)
evidence_pool = '\n\n\n'.join((web_text, c3ai_text))
parts = split_data(evidence_pool, part_len=part_len)
logging.info(
f'Data ({len(evidence_pool)}) split into {len(parts)} parts')
logging.info(f'Data gathering: {time()-t0:.1f}s')
excerpts = self.get_excerpts_from_docs(question, parts)[:top_n_answers]
return excerpts
def ask_colab(self, question, docs,
conn_str='http://192.168.1.9:8899/ask'):
r = requests.post(
conn_str,
headers={'Content-Type': 'application/json',
'Accept': 'application/json'},
json={'question': question, 'docs': docs},
timeout=60
)
if r.status_code == 200:
return json.loads(r.content)
else:
logging.error(f'Colab server returned code {r.status_code}!')
logging.error(r.content)
return r.content
```
#### File: covidprof_submission/src/get_excerpts_client.py
```python
import requests
import json
from time import time
# conn_str = 'http://172.16.17.32:58899/get_excerpts'
conn_str = 'http://localhost:8899/get_excerpts'
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
}
def get_excerpts(question,
url='https://en.wikipedia.org/wiki/Coronavirus_disease_2019'):
r = requests.post(conn_str, headers=headers,
json={'question': question, 'url': url})
if r.status_code == 200:
return json.loads(r.content.decode('utf-8'))
else:
print(f'Server returned status {r.status_code}')
return r
t0 = time()
es = get_excerpts('is covid lethal or not?')
print(f'get_excerpts(): {time()-t0:.1f}s')
for e in es.split('", "'):
print(e + '\n')
```
#### File: covidprof_submission/src/numerical.py
```python
from adapter.c3aidatalake import evalmetrics
from adapter.gpt3 import GPT3Adapter
from datetime import datetime
from dateutil.relativedelta import relativedelta
import re
import json
import logging
import seaborn as sns
import matplotlib
matplotlib.use('Agg')
sns.set()
def parse_date(date_str):
return datetime.strptime(date_str, '%Y-%m-%d').date()
def pretty_location(ugly_location):
return ', '.join([' '.join(re.findall('[A-Z][^A-Z]*', component))
for component in ugly_location.replace(' ', '_').split('_')])
class Numerical():
def __init__(self):
self.gpt3 = GPT3Adapter()
@classmethod
def _clean_spec_str(cls, spec_str):
res = spec_str.replace('\n', '')
# Add quotes to keys
res = re.sub(r'([\w]+):', r'"\1":', res)
return res
@classmethod
def _parse_spec_dates(cls, spec):
try:
from_date_str = spec['from']
to_date_str = spec['to']
from_date, to_date = cls.resolve_dates(from_date_str, to_date_str)
spec['from'] = from_date
spec['to'] = to_date
except Exception as e:
logging.error(e)
logging.error('Bad input from GPT-3?')
# best thing to do here just might be to ignore exception and tell
# user we couldn't find the data
return None, None
def handle_request(self, text):
spec_str = self.gpt3.parse_numerical_query(text)
spec_json_str = self._clean_spec_str(spec_str)
df = None
try:
# Set default spec values and do light preprocess
spec = {
'type': 'case',
'location': 'UnitedStates',
'from': '1m',
'to': datetime.now().strftime('%Y-%m-%d'),
}
user_spec = json.loads(spec_json_str)
no_user_location = 'location' not in user_spec
no_user_from_date = 'from' not in user_spec
no_user_to_date = 'to' not in user_spec
logging.info(f'Numerical spec parsed from user input: {user_spec}')
user_spec = {
k.lower(): v.strip() for k, v in user_spec.items() if v.strip()
}
spec.update(user_spec)
self._parse_spec_dates(spec)
from_date_str = spec['from'].strftime('%Y-%m-%d')
to_date_str = spec['to'].strftime('%Y-%m-%d')
logging.info(f'Final numerical spec: {spec}')
# Map metric name for labels etc
loc_str = pretty_location(spec['location'])
df, data_source = self.fetch_data(spec)
# Deal with when data couldn't be fetched
metric_name = {
'case': 'Confirmed Cases',
'death': 'Confirmed Deaths',
'recovery': 'Confirmed Recoveries',
}[spec['type']]
loc_output = loc_str + \
(' (you didn\'t specify a location)' if no_user_location else '')
from_date_output = from_date_str + \
(' (you didn\'t specify a FROM date)' if no_user_from_date else '')
to_date_output = to_date_str + \
(' (you didn\'t specify a TO date)' if no_user_to_date else '')
if df is None:
reply_text = \
(f'It looks like I don\'t have {metric_name} data'
f' for {loc_output} from {from_date_output} to '
f'{to_date_output}. Try a different location or period?')
return reply_text, None
except json.JSONDecodeError:
logging.error(f'Could not parse spec_str: {spec_str}')
reply_text = ('I don\'t understand that request. '
'Could you be a bit more specific?')
return reply_text, None
# Remove missing data
missing_col = next(c for c in df.columns if c.endswith('missing'))
df = df[df[missing_col] == 0]
data_col = next(c for c in df.columns if 'data' in c)
df = df.rename(columns={data_col: 'metric'})
df = df.loc[:, ['dates', 'metric']].sort_values('dates')
# Make figure
ax = df.plot.line(x='dates', lw=3, color='orange',
figsize=(7, 4), legend=False)
ax.set_xlabel('Dates', fontsize=12)
ax.set_ylabel(f'No. of {metric_name}', fontsize=12)
ax.set_title(f'{data_source} {metric_name} in {loc_str}', fontsize=16)
f = ax.get_figure()
f.tight_layout()
png_filename = 'stats_graph.png'
f.savefig(png_filename, format='png')
diff = int(df.iloc[-1].metric - df.iloc[0].metric)
if diff != 0:
inc_red_str = 'increased' if diff > 0 else 'reduced'
diff_pct = 100 * ((df.iloc[-1].metric/df.iloc[0].metric) - 1) \
if df.iloc[0].metric > 0 else float('inf')
pct_sign = '+' if diff_pct > 0 else '-'
reply_text = \
(f'This graph shows the number of {metric_name.lower()} '
f'in {loc_output} from {from_date_output} to '
f'{to_date_output}, '
f'sourced from {data_source} (via C3.ai). '
f'Over this period, numbers have {inc_red_str} by '
f'{diff} ({pct_sign}{diff_pct:.1f}%).')
else:
reply_text = (
f'Good news! There are no {metric_name.lower()} during this'
f' period according to {data_source} (via C3.ai).')
return reply_text, png_filename
@classmethod
def fetch_data(cls,
spec,
sources=['NYT', 'JHU', 'ECDC', 'CovidTrackingProject']):
metric = spec['type']
if metric == 'recovery':
sources = ['JHU']
spec_loc = spec['location']
if not spec_loc:
spec_loc = 'UnitedStates'
location = cls.resolve_location(spec_loc)
evalmetrics_ids = cls.gen_evalmetrics_ids(location)
data = None
all_cols = []
stop_searching = False
for source in sources:
evalmetrics_expressions = \
cls.gen_evalmetrics_expressions(metric, source)
for id_ in evalmetrics_ids:
df = cls.evalmetrics_request(
[id_], evalmetrics_expressions, spec['from'], spec['to'])
all_cols.extend(df.columns)
# Discard df if all missing
missing_col = next(
col for col in df.columns if col.endswith('missing'))
if (df[missing_col] == 100).all():
continue
data = df
stop_searching = True
break
if stop_searching:
break
if data is None:
logging.warning(f'No data for {evalmetrics_ids}')
return data, source
@classmethod
def evalmetrics_request(cls, ids, expressions, from_date, to_date):
table_name = "outbreaklocation"
body = {
'spec': {
'ids': ids,
'expressions': expressions,
'interval': "DAY",
'start': from_date.strftime("%Y-%m-%d"),
'end': to_date.strftime("%Y-%m-%d"),
}
}
return evalmetrics(table_name, body, get_all=True)
@classmethod
def gen_evalmetrics_ids(cls, location):
# if the location is in the US and the county and city are both present,
# query evalmetrics with the county first. if it fails, query evalmetrics using
# city as county
# if the location is outside the US and the city and province are both present,
# query evalmetrics with the province first. if it fails, query evalmetrics using city as
# province
if location['country'] == 'UnitedStates':
if 'city' in location:
return [
'_'.join(
[location['county'], location['province'], location['country']]),
'_'.join([location['city'], location['province'], location['country']])]
elif 'county' in location:
return ['_'.join([location['county'], location['province'], location['country']])]
else:
if 'city' in location:
return [
'_'.join([location['province'], location['country']]),
'_'.join([location['city'], location['country']])
]
# the below rules apply to both US and non US countries that did not fulfill any of the conditions above
if 'province' in location:
return ['_'.join([location['province'], location['country']])]
return [location['country']]
@classmethod
def gen_evalmetrics_expressions(cls, metric, source):
if metric == 'death':
return [f'{source}_ConfirmedDeaths']
elif metric == 'recovery':
return [f'{source}_ConfirmedRecoveries']
else:
return [f'{source}_ConfirmedCases']
@classmethod
def resolve_location(cls, location_str):
resolved = {}
components = location_str.replace(' ', '_').split('_')
components.reverse()
resolved['country'] = components[0]
if resolved['country'].lower() == 'UnitedStates'.lower():
if len(components) > 1:
# this means the location at least contains a state
resolved['province'] = components[1]
if len(components) > 2:
# this means the location at least contains a county
resolved['county'] = components[2]
if len(components) > 3:
# this means the location at least contains a city
resolved['city'] = components[3]
else:
if len(components) > 1:
# this means the location at least contains a province
resolved['province'] = components[1]
if len(components) > 2:
# this means the location at least contains a city
resolved['city'] = components[2]
return resolved
@classmethod
def resolve_date_str(cls, date_str):
abs_date = None
delta_date = None
try:
abs_date = parse_date(date_str)
except ValueError:
# must be delta
search = re.search(r'(\d+)([dwmy])', date_str)
n = int(search.group(1))
unit = search.group(2)
delta_date = relativedelta(**{
{
'd': 'days',
'w': 'weeks',
'm': 'months',
'y': 'years',
}[unit]: n
})
return abs_date, delta_date
@classmethod
def resolve_dates(cls, from_date_str, to_date_str):
'''Convert parsed date strings into date range'''
from_date, delta_from = cls.resolve_date_str(from_date_str)
to_date, delta_to = cls.resolve_date_str(to_date_str)
if from_date: # From absolute date
if to_date: # To absolute date
pass # Both absolute, nothing to do
else: # To delta date
to_date = from_date + delta_to
else: # From delta date
if to_date: # To absolute date
from_date = to_date - delta_from
else: # To delta date
# Assume TO is relative to today
to_date = datetime.now().date() - delta_to
from_date = to_date - delta_from
# In some rare cases from_date can be in the future
# e.g. "Thanksgiving" = 11/24 in US, 10/11 in Canada
if from_date > to_date:
from_date -= relativedelta(years=1)
return from_date, to_date
``` |
{
"source": "joy13975/elfin",
"score": 2
} |
#### File: elfin/elfinpy/dbgen.py
```python
import glob
import numpy as np
import codecs
import json
import argparse
import shutil
from collections import defaultdict
from collections import OrderedDict
import Bio.PDB
from utilities import *
from pdb_utilities import *
nested_dict = lambda: defaultdict(nested_dict)
def parse_args(args):
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description='Generates the xdb database from preprocessed single and double modules.')
parser.add_argument('--relaxed_pdbs_dir', default='./resources/pdb_prepped/')
parser.add_argument('--metadata_dir', default='./resources/metadata/')
parser.add_argument('--output', default='./resources/xdb.json')
parser.add_argument('--aligned_pdb_dir', default='./resources/pdb_aligned/')
return parser.parse_args(args)
def main(test_args=None):
"""main"""
args = parse_args(sys.argv[1:] if test_args is None else test_args)
XDBGenerator(
args.relaxed_pdbs_dir,
args.metadata_dir,
args.aligned_pdb_dir,
args.output
).run()
class XDBGenerator:
def __init__(
self,
relaxed_pdbs_dir,
metadata_dir,
aligned_pdb_dir,
out_file
):
self.relaxed_pdbs_dir = relaxed_pdbs_dir
module_types = ['doubles', 'singles', 'hubs']
shutil.move('metadata','resources/metadata' )
make_dir(aligned_pdb_dir)
for mt in module_types:
make_dir(aligned_pdb_dir + '/{}/'.format(mt))
self.hub_info = read_json(metadata_dir + '/hub_info.json')
self.aligned_pdb_dir = aligned_pdb_dir
self.out_file = out_file
self.si = Bio.PDB.Superimposer()
self.modules = nested_dict()
self.n_to_c_tx = []
self.hub_tx = []
# Cache in memory because disk I/O is really heavy here
self.single_pdbs = defaultdict(dict)
self.double_pdbs = defaultdict(dict)
def find_tip(self, term, struct, chain_id):
term = term.lower()
assert(term in {'c', 'n'})
chain = get_chain(struct, chain_id=chain_id)
residues = chain.child_list
n = len(residues)
divider = 6 # The smaller the divider, the closer to terminus.
assert(n > 0)
if term == 'n':
start_idx, end_idx = 0, n//divider
else:
start_idx, end_idx = (divider-1)*n//divider, n
sum_coord = np.asarray([0., 0., 0.])
for r in residues[start_idx:end_idx]:
sum_coord += r['CA'].get_coord().astype('float64')
tip_vector = sum_coord/(end_idx - start_idx - 1)
return tip_vector.tolist()
def create_tx(self, mod_a, a_chain, mod_b, b_chain, rot, tran):
tx_entry = \
OrderedDict([
('mod_a', mod_a),
('mod_a_chain', a_chain),
('mod_b', mod_b),
('mod_b_chain', b_chain),
('rot', rot.tolist()),
('tran', np.asarray(tran).tolist())
])
return tx_entry
def process_hub(self, file_name):
"""Aligns a hub module to its A component (chain A), then computes the
transform for aligning itself to its other components.
"""
# Load structures
hub = read_pdb(file_name)
# Centre the hub
self.move_to_origin(hub)
hub_fusion_factor = 4
hub_name = os.path.basename(file_name).replace('.pdb', '')
hub_meta = self.hub_info.get(hub_name, None)
assert(hub_meta != None)
if hub_meta is None:
raise ValueError('Could not get hub metadata for hub {}\n'.format(hub_name))
# Create module entry first
comp_data = hub_meta['component_data']
del hub_meta['component_data']
hub_meta['chains'] = {
c.id: {
'single_name': comp_data[c.id]['single_name'],
'n': nested_dict(),
'n_tip': nested_dict(),
'c': nested_dict(),
'c_tip': nested_dict(),
'n_residues': len(c.child_list)
} for c in hub.get_chains()
}
hub_meta['radii'] = self.get_radii(hub)
self.modules['hubs'][hub_name] = hub_meta
# The current process does not allow hub to hub connections. Maybe this
# need to be changed?
for hub_chain_id in comp_data:
chain_data = comp_data[hub_chain_id]
comp_name = chain_data['single_name']
if chain_data['c_free']:
b_name_gen = (tx['mod_b'] for tx in self.n_to_c_tx if tx['mod_a'] == comp_name)
for single_b_name in b_name_gen:
# Compute the transformation required to move a single
# module B from its aligned position to the current hub's
# "finger tip".
#
# Here we do not use the second quadrant method, because during
# stitching none of the hubs' residues get changed. The stitching
# will take place at the end of the hub's component's terminal.
rc_hub_a = get_chain_residue_count(hub, hub_chain_id)
rc_dbl_a = get_pdb_residue_count(self.single_pdbs[comp_name])
fusion_count = int_ceil(float(rc_dbl_a) / hub_fusion_factor)
double = self.double_pdbs[comp_name][single_b_name]
# Compute transformation matrix.
# Find transform between component single and single b.
hub_single_chain_id = \
list(self.single_pdbs[comp_name].get_chains())[0].id
single_b_chain_id = \
list(self.single_pdbs[single_b_name].get_chains())[0].id
dbl_tx_id = self.modules['singles'][comp_name]['chains'] \
[hub_single_chain_id]['c'] \
[single_b_name][single_b_chain_id]
assert(dbl_tx_id is not None)
dbl_n_to_c = self.n_to_c_tx[dbl_tx_id]
dbl_tx = np.vstack(
(np.hstack((dbl_n_to_c['rot'], np.transpose([dbl_n_to_c['tran']]))),
[0,0,0,1])
)
# Find transform from hub to single A.
rot, tran = self.get_rot_trans(
fixed=hub,
fixed_chain_id=hub_chain_id,
moving=double,
fixed_resi_offset=rc_hub_a - fusion_count,
moving_resi_offset=rc_dbl_a - fusion_count,
match_count=fusion_count
)
# Rotation in BioPython is inversed.
rot = np.transpose(rot)
comp_to_single_tx = np.vstack(
(np.hstack((rot, np.transpose([tran]))),
[0,0,0,1])
)
# 1. Shift to hub's component frame.
# 2. Shift to double B frame.
dbl_raised_tx = np.matmul(comp_to_single_tx, dbl_tx);
# Decompose transform.
rot = dbl_raised_tx[:3, :3]
tran = dbl_raised_tx[:3, 3]
tx = self.create_tx(
hub_name,
hub_chain_id,
single_b_name,
single_b_chain_id,
rot,
tran)
tx_id = len(self.n_to_c_tx) + len(self.hub_tx)
self.modules['hubs'][hub_name]['chains'] \
[hub_chain_id]['c'] \
[single_b_name][single_b_chain_id] = tx_id
self.modules['hubs'][hub_name]['chains'] \
[hub_chain_id]['c_tip'] = \
self.find_tip('c', hub, hub_chain_id)
self.modules['singles'][single_b_name]['chains'] \
[single_b_chain_id]['n'] \
[hub_name][hub_chain_id] = tx_id
self.hub_tx.append(tx)
if chain_data['n_free']:
a_name_gen = (tx['mod_a'] for tx in self.n_to_c_tx if tx['mod_b'] == comp_name)
for single_a_name in a_name_gen:
# Same as c_free except comp acts as single b
rc_a = get_pdb_residue_count(self.single_pdbs[single_a_name])
rc_b = get_pdb_residue_count(self.single_pdbs[comp_name])
fusion_count = int_ceil(float(rc_b) / hub_fusion_factor)
double = self.double_pdbs[single_a_name][comp_name]
# Compute transformation matrix.
# Find transform from double component B to hub component.
rot, tran = self.get_rot_trans(
fixed=hub,
fixed_chain_id=hub_chain_id,
moving=double,
fixed_resi_offset=0, # start matching from the n-term of hub component, which is index 0
moving_resi_offset=rc_a, # start matching at the beginning of single b in the double
match_count=fusion_count
)
# Rotation in BioPython is inversed.
rot = np.transpose(rot)
dbl_to_hub_tx = np.vstack(
(np.hstack((rot, np.transpose([tran]))),
[0,0,0,1])
)
# 1. Shift to hub frame - do nothing; just dbl_to_hub_tx.
# Decompose transform.
rot = dbl_to_hub_tx[:3, :3]
tran = dbl_to_hub_tx[:3, 3]
single_a_chain_id = \
list(self.single_pdbs[single_a_name].get_chains())[0].id
tx = self.create_tx(
single_a_name,
single_a_chain_id,
hub_name,
hub_chain_id,
rot,
tran)
tx_id = len(self.n_to_c_tx) + len(self.hub_tx)
self.modules['singles'][single_a_name]['chains'] \
[single_a_chain_id]['c'] \
[hub_name][hub_chain_id] = tx_id
self.modules['hubs'][hub_name]['chains'] \
[hub_chain_id]['n'] \
[single_a_name][single_a_chain_id] = tx_id
self.modules['hubs'][hub_name]['chains'] \
[hub_chain_id]['n_tip'] = \
self.find_tip('n', hub, hub_chain_id)
self.hub_tx.append(tx)
save_pdb(
struct=hub,
path=self.aligned_pdb_dir + '/hubs/' + hub_name + '.pdb'
)
def process_double(self, file_name):
"""Aligns a double module to its A component and then computes the transform
for aligning to its B component. Saves aligned structure to output folder.
"""
# Step 1: Load structures
double = read_pdb(file_name)
# Preprocessed pdbs have only 1 chain
assert(len(list(double.get_chains())) == 1)
double_name = file_name.split('/')[-1].replace('.pdb', '')
single_a_name, single_b_name = double_name.split('-')
single_a = self.single_pdbs[single_a_name]
single_b = self.single_pdbs[single_b_name]
rc_a = get_pdb_residue_count(single_a)
rc_b = get_pdb_residue_count(single_b)
rc_double = get_pdb_residue_count(double)
rc_a_half = int_floor(float(rc_a)/2)
rc_b_half = int_ceil(float(rc_b)/2)
# fusion_factor should be deprecated in favour of "core range".
dbl_fusion_factor = 8
fusion_count_a = int_ceil(float(rc_a) / dbl_fusion_factor)
fusion_count_b = int_ceil(float(rc_b) / dbl_fusion_factor)
# Step 2: Move double to align with the first single.
self.align(
moving=double,
fixed=single_a,
moving_resi_offset=rc_a_half - fusion_count_a,
fixed_resi_offset=rc_a_half - fusion_count_a,
match_count=fusion_count_a
)
# Step 3: Get COM of the single_b as seen in the double.
com_b = self.get_centre_of_mass(
single_b,
mother=double,
child_resi_offset=rc_b_half - fusion_count_b,
mother_resi_offset=rc_a + rc_b_half - fusion_count_b,
match_count=fusion_count_b
)
# Step 4: Get transformation of single B to part B inside double.
#
# Double is already aligned to first single so there is no need for
# the first transformation.
#
# Only align residues starting from the middle of single B because
# the middle suffers the least from interfacing displacements.
rot, tran = self.get_rot_trans(
moving=double,
fixed=single_b,
moving_resi_offset=rc_a + rc_b_half - fusion_count_b,
fixed_resi_offset=rc_b_half - fusion_count_b,
match_count=fusion_count_b
)
# Rotation in BioPython is inversed.
rot = np.transpose(rot)
# Inverse result transform because we want the tx that takes the
# single B module to part B inside double.
tmp_tx = np.vstack(
(np.hstack((rot, np.transpose([tran]))),
[0,0,0,1])
)
inv_tx = np.linalg.inv(tmp_tx);
# Decompose transform.
rot = inv_tx[:3, :3]
tran = inv_tx[:3, 3]
# Step 5: Save the aligned molecules.
#
# Here the PDB format adds some slight floating point error. PDB is
# already phased out so and we should really consider using mmCIF for
# all modules.
save_pdb(
struct=double,
path=self.aligned_pdb_dir + '/doubles/' + double_name + '.pdb'
)
single_a_chain_id = list(single_a.get_chains())[0].id
single_b_chain_id = list(single_b.get_chains())[0].id
tx = self.create_tx(
single_a_name,
single_a_chain_id,
single_b_name,
single_b_chain_id,
rot,
tran)
tx_id = len(self.n_to_c_tx)
self.modules['singles'][single_a_name]['chains'] \
[single_a_chain_id]['c'][single_b_name][single_b_chain_id] = tx_id
self.modules['singles'][single_b_name]['chains'] \
[single_b_chain_id]['n'][single_a_name][single_a_chain_id] = tx_id
self.n_to_c_tx.append(tx)
# Cache structure in memory
self.double_pdbs[single_a_name][single_b_name] = double
def process_single(self, file_name):
"""Centres a single module and saves to output folder."""
single_name = file_name.split('/')[-1].replace('.pdb', '')
single = read_pdb(file_name)
# Preprocessed pdbs have only 1 chain
assert(len(list(single.get_chains())) == 1)
# Check that there is only one chain
chain_list = list(single.get_chains())
if len(chain_list) != 1:
raise ValueError('Single PDB contains {} chains!\n'.format(len(chain_list)))
self.move_to_origin(single)
save_pdb(
struct=single,
path=self.aligned_pdb_dir + '/singles/' + single_name + '.pdb'
)
self.modules['singles'][single_name] = {
'chains': {
chain_list[0].id: {
'n': nested_dict(),
'c': nested_dict(),
'n_residues': len(chain_list[0].child_list)
}
},
'radii': self.get_radii(single)
}
# Cache structure in memory
self.single_pdbs[single_name] = single
def dump_xdb(self):
"""Writes alignment data to a json file."""
to_dump = \
OrderedDict([
('modules', self.modules),
('n_to_c_tx', self.n_to_c_tx)
])
json.dump(to_dump,
open(self.out_file, 'w'),
separators=(',', ':'),
ensure_ascii=False,
indent=4)
def get_centre_of_mass(
self,
child,
mother=None,
child_resi_offset=0,
mother_resi_offset=0,
match_count=-1
):
"""Computes centre-of-mass coordinate of a Bio.PDB.Structure.Structure.
Args:
- child - Bio.PDB.Structure.Structure for which the centre-of-mass should
be calculated.
- mother - Bio.PDB.Structure.Structure onto which child is to be first
aligned.
- moving_resi_offset - the residue offset of the moving
Bio.PDB.Structure.Structure when extracting carbon alpha coordinates.
- fixed_resi_offset - the residue offset of the fixed
Bio.PDB.Structure.Structure when extracting carbon alpha coordinates.
- match_count - number of residues from which carbon alpha coordinates are
extracted.
Returns:
- com - 3x1 numpy array of the centre-of-mass.
"""
CAs = [r['CA'].get_coord().astype('float64') for r in child.get_residues()]
com = np.mean(CAs, axis=0)
if mother is not None:
# This is for finding COM of a single inside a double
_, tran = self.get_rot_trans(
moving=child,
fixed=mother,
moving_resi_offset=child_resi_offset,
fixed_resi_offset=mother_resi_offset,
match_count=match_count
)
com += tran
return com
def get_radii(self, pose):
"""Computes three different measures of the radius.
Args:
- pose - Bio.PDB.Structure.Structure
Returns:
- _ - an dict containing: average of all atoms distances, max
carbon alpha distance, and max heavy atom distance, each calculated
against the centre-of-mass.
"""
if not pose.at_origin:
raise ValueError('get_radii() must be called with centered modules.')
natoms = 0;
rg_sum = 0;
max_ca_dist = 0;
nHeavy = 0;
max_heavy_dist = 0;
for a in pose.get_atoms():
dist = np.linalg.norm(
a.get_coord().astype('float64'));
rg_sum += dist;
if(a.name =='CA'):
max_ca_dist = max(max_ca_dist, dist);
if(a.element != 'H'):
max_heavy_dist = max(max_heavy_dist, dist);
nHeavy = nHeavy + 1;
natoms = natoms + 1;
average_all = rg_sum / natoms;
return {
'average_all': average_all,
'max_ca_dist': max_ca_dist,
'max_heavy_dist': max_heavy_dist
}
def move_to_origin(self, pdb):
"""Centres a Bio.PDB.Structure.Structure to the global origin."""
com = self.get_centre_of_mass(pdb)
# No rotation - just move to centre
pdb.transform([[1,0,0],[0,1,0],[0,0,1]], -com)
# Tag the pdb
pdb.at_origin = True
def align(
self,
**kwargs
):
"""Moves the moving Bio.PDB.Structure.Structure to the fixed
Bio.PDB.Structure.Structure.
"""
moving = kwargs.pop('moving')
fixed = kwargs.pop('fixed')
moving_resi_offset = kwargs.pop('moving_resi_offset', 0)
fixed_resi_offset = kwargs.pop('fixed_resi_offset', 0)
match_count = kwargs.pop('match_count', -1)
rot, tran = self.get_rot_trans(
moving=moving,
fixed=fixed,
moving_resi_offset=moving_resi_offset,
fixed_resi_offset=fixed_resi_offset,
match_count=match_count
)
# BioPython's own transform() deals with the inversed rotation
# correctly.
moving.transform(rot, tran)
def get_rot_trans(
self,
**kwargs
):
"""Computes the rotation and transformation matrices using BioPython's
superimposer.
Args:
- moving - the Bio.PDB.Structure.Structure that is to move towards the
other (fixed).
- fixed - the Bio.PDB.Structure.Structure that the other (moving) is to
align to.
- moving_resi_offset - the residue offset of the moving
Bio.PDB.Structure.Structure when extracting carbon alpha coordinates.
- fixed_resi_offset - the residue offset of the fixed
Bio.PDB.Structure.Structure when extracting carbon alpha coordinates.
- match_count - number of residues from which carbon alpha coordinates are
extracted.
----IMPORT NOTE----
The rotation from BioPython is the second dot operand instead of the
conventional first dot operand.
This means instead of the standard R*v + T, the actual transform is done
with v'*R + T.
Hence, the resultant rotation matrix might need transposing if not
passed back into BioPython.
----IMPORT NOTE----
Returns:
- (rot, tran) - a tuple containing the rotation and transformation
matrices.
"""
moving = kwargs.pop('moving')
moving_chain_id = kwargs.pop('moving_chain_id', 'A')
fixed = kwargs.pop('fixed')
fixed_chain_id = kwargs.pop('fixed_chain_id', 'A')
moving_resi_offset = kwargs.pop('moving_resi_offset', 0)
fixed_resi_offset = kwargs.pop('fixed_resi_offset', 0)
match_count = kwargs.pop('match_count', -1)
moving_chain = get_chain(moving, chain_id=moving_chain_id)
moving_residues = moving_chain.child_list \
[moving_resi_offset:(moving_resi_offset+match_count)]
ma = [r['CA'] for r in moving_residues]
fixed_chain = get_chain(fixed, chain_id=fixed_chain_id)
fixed_residues = fixed_chain.child_list \
[fixed_resi_offset:(fixed_resi_offset+match_count)]
fa = [r['CA'] for r in fixed_residues]
self.si.set_atoms(fa, ma)
return self.si.rotran
def run(self):
"""Calls the processing functions for singles, doubles, and hubs in that
order. Dumps alignment data into json database.
"""
# Single modules
single_files = glob.glob(self.relaxed_pdbs_dir + '/singles/*.pdb')
n_singles = len(single_files)
for i in range(0, n_singles):
print('Centering single [{}/{}] {}' \
.format(i+1, n_singles, single_files[i]))
self.process_single(single_files[i])
# Double modules
double_files = glob.glob(self.relaxed_pdbs_dir + '/doubles/*.pdb')
nDoubles = len(double_files)
for i in range(0, nDoubles):
print('Aligning double [{}/{}] {}' \
.format(i+1, nDoubles, double_files[i]))
self.process_double(double_files[i])
# Hub modules
hub_files = glob.glob(self.relaxed_pdbs_dir + '/hubs/*.pdb')
nHubs = len(hub_files)
for i in range(0, nHubs):
print('Aligning hub [{}/{}] {}' \
.format(i+1, nHubs, hub_files[i]))
self.process_hub(hub_files[i])
self.n_to_c_tx += self.hub_tx
print('Total: {} singles, {} doubles, {} hubs'.format(n_singles, nDoubles, nHubs))
self.dump_xdb()
if __name__ =='__main__':
safe_exec(main)
```
#### File: obsolete/pymol_helpers/gen_spec_sol_rot.py
```python
import argparse, sys
import numpy as np
import kabsch
from utilities import *
def get_spec_sol_rot(spec_file, sol_csv):
if spec_file.rfind('.csv') != -1:
spec_pts = read_csv_points(spec_file)
elif spec_file.rfind('.json') != -1:
with open(spec_file, 'r') as file:
spec_pts = np.asarray(json.load(file)['coms'])
else:
print 'Unknown spec file format'
solPts = read_csv_points(sol_csv)
# Centre both pts to their ends
centred_spec = spec_pts - spec_pts[-1]
centred_sol = solPts - solPts[-1]
# Equalise sample points
sol_up_pts = upsample(centred_spec, centred_sol)
sol_up_pts = sol_up_pts - sol_up_pts[-1]
# Find Kabsch rotation for solution -> spec
rot = Kabsch.kabsch(sol_up_pts, centred_spec)
return gen_pymol_txm(rot)
def main():
ap = argparse.ArgumentParser(
description='Generate spec to solution rotation string for Pymol')
ap.add_argument('spec_file')
ap.add_argument('sol_file')
args = ap.parse_args()
print(get_spec_sol_rot(args.spec_file, args.sol_file))
if __name__ == '__main__':
main()
```
#### File: elfin/elfinpy/pdb_utilities.py
```python
import Bio.PDB
DIRTY_ATOMS = {'1H', '2H', '3H', 'OXT'}
BACKBONE_NAMES = {'N', 'CA', 'C', 'O', 'H', 'HA'}
def get_pdb_residue_count(pdb):
"""Returns the residue count of a Bio.PDB.Structure.Structure."""
return sum([len(c.child_list) for c in pdb.child_list[0].child_list])
def get_chain_residue_count(struct, chain_id):
"""Returns the residue count of a Bio.PDB.Structure.Structure."""
return len(get_chain(struct, chain_id).child_list)
def copy_residues(pdb, chain_ids=None):
return [r.copy() for r in get_residues(pdb, chain_ids)]
def get_residues(pdb, chain_ids=None):
"""Returns returns residues copied from a PDB.
Args:
- pdb - Bio.PDB.Structure.Structure.
- chain_ids - strip residues from these specific chain_ids only.
Returns:
- residues - a list of Bio.PDB.Residue.Residue.
"""
residues = []
for model in pdb:
for chain in model:
if chain_ids == None or chain.id in chain_ids:
residues.extend(chain.child_list)
return residues
def get_chain(struct, chain_id='A'):
"""Returns a specific chain from a Bio.PDB.Structure.Structure."""
return struct.child_list[0].child_dict[chain_id]
def get_chains(struct):
"""Returns all chains of a Bio.PDB.Structure.Structure."""
return struct.child_list[0].child_list
def read_pdb(
read_path,
pdb_name=None
):
"""Reads a PDB file and returns a BioPython structure.
Args:
- read_path - PDB string file path to read from.
- pdb_name - a string to set as the name of the Bio.PDB.Structure.Structure.
Returns:
- structure - Bio.PDB.Structure.Structure.
"""
if pdb_name == None:
pdb_name = read_path.split('/')[-1].replace('.', '_')
parser = Bio.PDB.PDBParser(PERMISSIVE=False)
structure = parser.get_structure(pdb_name, read_path)
return structure
def save_cif(**kwargs):
"""Saves a Bio.PDB.Structure.Structure as a CIF file. Does not automatically
append .cif extension.
Args:
- struct - Bio.PDB.Structure.Structure to be saved.
- path - CIF string file path.
"""
struct = kwargs.pop('struct')
path = kwargs.pop('path')
with open(path, 'w') as file:
io = Bio.PDB.mmcifio.MMCIFIO()
io.set_structure(struct)
io.save(file)
# Temporary fix for CIF files not getting parsed properly by Rosetta: add
# a dummy section at the end. ("Note that the final table in the cif file
# may not be recognized - adding a dummy entry (like `_citation.title
# ""`) to the end of the file may help.")
file.writelines('_citation.title "Elfin"')
def save_pdb(**kwargs):
"""Saves a Bio.PDB.Structure.Structure as a PDB file.
Args:
- struct - Bio.PDB.Structure.Structure to be saved.
- save_path - string file path.
"""
struct = kwargs.pop('struct')
path = kwargs.pop('path')
io = Bio.PDB.PDBIO()
io.set_structure(struct)
io.save(path)
def main():
"""main"""
raise RuntimeError('This module should not be executed as a script')
if __name__ =='__main__':
main()
```
#### File: elfin/elfinpy/stitch.py
```python
from collections import deque
from collections import namedtuple
import sys
import argparse
import numpy as np
import Bio.PDB
import Bio.SubsMat.MatrixInfo
import Bio.PDB.StructureBuilder
try:
import utilities as utils
import pdb_utilities as pdb_utils
except ImportError:
from . import utilities as utils
from . import pdb_utilities as pdb_utils
def parse_args(args):
desc = ('Create CIF atom model from design solution JSON exported '
'by elfin-ui.')
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('input_file')
parser.add_argument('-o', '--out_file', default='')
parser.add_argument('-x', '--xdb', default='./resources/xdb.json')
parser.add_argument('-p', '--pdb_dir', default='./resources/pdb_aligned/')
parser.add_argument('-c', '--cappings_dir',
default='./resources/pdb_cappings')
parser.add_argument('-m', '--metadata_dir',
default='./resources/metadata/')
parser.add_argument('-s', '--show_fusion', action='store_true')
parser.add_argument('-d', '--disable_capping', action='store_true')
parser.add_argument('--skip_unused', action='store_true')
return parser.parse_args(args)
def main(test_args=None):
args = parse_args(sys.argv[1:] if test_args is None else test_args)
input_ext = args.input_file[args.input_file.rfind('.'):].lower()
if input_ext == '.json':
spec = utils.read_json(args.input_file)
xdb = utils.read_json(args.xdb)
struct = Stitcher(
spec,
xdb,
args.pdb_dir,
args.cappings_dir,
args.metadata_dir,
args.show_fusion,
args.disable_capping,
args.skip_unused
).run()
if args.out_file == '':
args.out_file = args.input_file
args.out_file = '.'.join(args.out_file.split('.')[:-1] + ['cif'])
print('Saving to:', args.out_file)
pdb_utils.save_cif(struct=struct, path=args.out_file)
else:
print('Unknown input file type: \"{}\"'.format(input_ext))
exit()
def validate_spec(spec):
if 'networks' not in spec:
return 'No networks object in spec.'
else:
if not spec['networks']:
return 'Spec file has no module networks.'
if 'pg_networks' in spec:
n_pgn = len(spec['pg_networks'])
if n_pgn > 0:
return ('Spec file has {} path guide networks. '
'It should have zero.').format(n_pgn)
def get_node(network, json_name):
node = network[json_name]
utils.check_mod_type(node['module_type'])
return node
TermIdentifierBase = namedtuple(
'TermIdentifierBase', ['ui_name', 'chain_id', 'term'])
class TermIdentifier(TermIdentifierBase):
"""Small class to hold terminus identifier data"""
def __new__(cls, *args, **kwargs):
self = super(TermIdentifier, cls).__new__(cls, *args, **kwargs)
utils.check_term_type(self.term)
return self
def __repr__(self):
return ':'.join((str(getattr(self, f)) for f in self._fields))
def same_chain_as(self, other):
return self.ui_name == other.ui_name and \
self.chain_id == other.chain_id
ChainIdentifierBase = namedtuple('ChainIdentifierBase', ['src', 'dst'])
class ChainIdentifier(ChainIdentifierBase):
"""
Small class to hold source and destination TermIdentifiers
for a chain
"""
def __new__(cls, *args, **kwargs):
self = super(ChainIdentifier, cls).__new__(cls, *args, **kwargs)
assert self.src.term == 'n'
assert self.dst.term == 'c'
return self
def __repr__(self):
return '{}->{}'.format(self.src, self.dst)
# Returns a list of all leaf TermIdentifiers.
#
# A leaf is a terminus that is either unoccupied or on a hub node.
def find_leaves(network, xdb):
try:
res = []
for ui_name in network:
node = get_node(network, ui_name)
mod_type = node['module_type']
mod_name = node['module_name']
chains = xdb['modules'][mod_type + 's'][mod_name]['chains']
cl = node['c_linkage']
nl = node['n_linkage']
if mod_type == 'hub':
for c in chains:
if chains[c]['n']:
res.append(TermIdentifier(ui_name, c, 'c'))
if chains[c]['c']:
res.append(TermIdentifier(ui_name, c, 'n'))
else: # Guaranteed to be 'single' thanks to get_node()
if not nl:
res.append(TermIdentifier(
ui_name, cl[0]['source_chain_id'], 'n'))
if not cl:
res.append(TermIdentifier(
ui_name, nl[0]['source_chain_id'], 'c'))
return res
except KeyError as ke:
print('KeyError:', ke)
print('Probably bad input format.')
exit()
# Walks the a chain starting with the src TermIdentifier, according to the
# network JSON object, yielding each TermIdentifier and next_linkage on the
# fly.
def walk_chain(network, src):
ui_name, chain_id, term = src
while True:
node = get_node(network, ui_name)
# Advance until either a hub or a single with dangling terminus is
# encountered.
next_linkages = [
l for l in node[utils.opposite_term(term) + '_linkage']
if l['source_chain_id'] == chain_id
]
assert len(next_linkages) <= 1, \
'Expected only next_linkages size <= 1 (since' \
'each node has max 2 linkages, one N and one C).'
term_iden = TermIdentifier(ui_name, chain_id, term)
next_linkage = next_linkages[0] if next_linkages else None
yield term_iden, next_linkage
if not next_linkage:
break
ui_name, chain_id = \
next_linkage['target_mod'], \
next_linkage['target_chain_id']
# Walks the network and returns a generator of ChainIdentifiers.
#
# This method guarantees that src->dst is in the direction of N->C.
def decompose_network(network, xdb, skip_unused=False):
src_q = deque()
visited = set()
# Find entry node to begin walking the network with.
leaves = find_leaves(network, xdb)
assert leaves, 'No leave nodes for network.'
src_q.extend(leaves)
while src_q:
src = src_q.popleft()
if src in visited:
# This could happen when termini identifiers on hubs are added
# before the termini on the other end of those chains are popped
# out of the queue.
continue
visited.add(src)
mod_type = None
chain_walker = walk_chain(network, src)
for term_iden, next_linkage in chain_walker:
ui_name, chain_id, term = term_iden
node = get_node(network, ui_name)
mod_type = node['module_type']
if not next_linkage:
dst = TermIdentifier(
ui_name, chain_id, utils.opposite_term(term))
if dst not in visited:
visited.add(dst)
srcdst = (src, dst) if term == 'n' else (dst, src)
chain_iden = ChainIdentifier(*srcdst)
yield chain_iden
if mod_type == 'hub':
# Add unvisited components as new chain sources.
hub = xdb['modules']['hubs'][node['module_name']]
for hub_chain_id in hub['chains']:
hub_chain = hub['chains'][hub_chain_id]
for term in utils.TERM_TYPES:
if hub_chain[term]: # If not dormant.
iden = (ui_name, hub_chain_id, term)
if iden not in visited:
src_q.append(iden)
break
if mod_type == 'hub':
# This is a "bypass" hub, i.e. the current hub component has
# interfaceable N and C terms, and the current chain goes
# through it without ending here.
#
# In this case, check for unused components that might not
# need to be placed since they aren't leaves nor connect to
# any leaf nodes.
hub = xdb['modules']['hubs'][node['module_name']]
for hub_chain_id in hub['chains']:
if hub_chain_id == chain_id:
continue
c_links = len([l for l in node['c_linkage']
if l['source_chain_id'] == hub_chain_id])
n_links = len([l for l in node['n_linkage']
if l['source_chain_id'] == hub_chain_id])
if c_links == n_links == 0:
if skip_unused:
print('Skipping unused hub component:',
ui_name, hub_chain_id)
else:
srcdst = (
TermIdentifier(ui_name, hub_chain_id, 'n'),
TermIdentifier(ui_name, hub_chain_id, 'c')
)
yield ChainIdentifier(*srcdst)
ModInfo = namedtuple('ModInfo', ['mod_type', 'mod_name', 'res', 'res_n'])
def transform_residues(res, rot, tran):
for r in res:
for a in r:
# Do the transform manually because BioPython has non
# standard multiplication order.
a.coord = rot.dot(a.coord) + tran
# Blend residue lists M = (1-w)M + wF, where M is an atom coordinate in
# moving_res, F is an atom coordinate in fixed_res, and w is the corresponding
# weight in weights.
#
# Also removes dirty atoms. If residues are not the same (name), only backbone
# atoms are blended.
def blend_residues(moving_res, fixed_res, weights):
# temporarily disable blending because it's causing horrible
# residue distortions
return
assert len(moving_res) == len(fixed_res)
assert len(moving_res) == len(weights)
for m, f, w in zip(moving_res, fixed_res, weights):
# Remove dirty atoms. They seem to crop up in the process of
# optimizing PDBs even if preprocess.py already removed them once.
#
# Also remove atoms not in fixed residue - this is only known to
# happen to CYS (HG) and HIS (HE1/HE2).
if m.resname == f.resname:
# Complain about absent atoms
for a in m:
if a.name not in pdb_utils.DIRTY_ATOMS and a.name not in f:
print(a.name, 'not in', f.resname)
to_remove = [a for a in m if a.name in
pdb_utils.DIRTY_ATOMS or a.name not in f]
for da in to_remove:
m.detach_child(da.name)
# Compute new position based on combination of two positions.
def compute_coord(a, b): return (1-w)*a.coord + w*b.coord
for ma in m:
if m.resname == f.resname:
# Identical residues should have the same atom positions
assert ma.name in f
ma.coord = compute_coord(ma, f[ma.name])
else:
# Only modify backbone atoms.
if ma.name in pdb_utils.BACKBONE_NAMES and \
ma.name in f:
ma.coord = compute_coord(ma, f[ma.name])
class Stitcher:
def __init__(
self,
spec,
xdb,
pdb_dir,
cappings_dir,
metadata_dir,
show_fusion=False,
disable_capping=False,
skip_unused=False,
):
spec_complaint = validate_spec(spec)
if spec_complaint:
print('Error:', spec_complaint)
exit()
self.spec = spec
self.xdb = xdb
self.pdb_dir = pdb_dir
self.cr_dir = cappings_dir
self.show_fusion = show_fusion
self.disable_capping = disable_capping
self.skip_unused = skip_unused
self.si = Bio.PDB.Superimposer()
self.chain_id = 0
# Parse and convert capping repeat indicies into a dictionary
self.capping_repeat_idx = {}
meta_csv = utils.read_csv(
metadata_dir + '/repeat_indicies.csv', delim=' ')
for row in meta_csv:
mod_name = row[0].split('.')[0].replace('DHR', 'D')
self.capping_repeat_idx[mod_name] = \
[int(idx) for idx in row[1:]]
def deposit_chain(self, network, chain_iden):
# n -src-> c ... n -dst-> c
print('Deposit chain:', chain_iden)
src, dst = chain_iden
atom_chain = self.new_chain()
# Build context to pass to subroutines.
def context(): return 0
context.atom_chain = atom_chain
context.network = network
context.last_node = None
chain_walker = walk_chain(network, src)
for term_iden, next_linkage in chain_walker:
context.term_iden = term_iden
context.next_linkage = next_linkage
print('Deposit {}->{}'.format(repr(term_iden),
next_linkage['target_mod']
if next_linkage else None))
context.node = get_node(network, term_iden.ui_name)
context.mod_info = self.get_mod_info(
context.node, term_iden.chain_id)
context.pref_res = []
context.main_res = [r.copy() for r in context.mod_info.res]
context.suff_res = []
if context.last_node:
# Midway through the chain - always displace N term.
self.displace_terminus(context, 'n')
else:
# Start of chain on the N side - cap N term.
self.cap_terminus(context, 'n')
if next_linkage:
# There's a next node - always displace C term.
self.displace_terminus(context, 'c')
else:
# There's no next node - cap C term.
self.cap_terminus(context, 'c')
all_res = context.pref_res + context.main_res + context.suff_res
for r in all_res:
r.id = (r.id[0], self.next_residue_id(), r.id[2])
rot = np.transpose(np.asarray(context.node['rot']))
r.transform(rot, context.node['tran'])
atom_chain.add(r)
if self.show_fusion:
# curr_chain = Bio.PDB.Chain.Chain(chain_id)
# chains.append(curr_chain)
print('TODO: show_fusion')
context.last_node = context.node
context.last_term_iden = term_iden
print('')
self.model.add(atom_chain)
def cap_terminus(self, deposit_context, term):
utils.check_term_type(term)
if self.disable_capping:
print('Capping disabled')
return
# Unpack context.
mod_info = deposit_context.mod_info
residues = deposit_context.main_res
chain_id = deposit_context.term_iden.chain_id
if mod_info.mod_type == 'single':
cap_name = mod_info.mod_name.split('_')[0 if term == 'n' else -1]
elif mod_info.mod_type == 'hub':
# If we were to cap hubs, we need to first check whether N
# term is an open terminus in this hub.
hub = self.xdb['modules']['hubs'][mod_info.mod_name]
chain = hub['chains'][chain_id]
cap_name = chain['single_name']
if chain[term]:
# Continue to capping as usual.
pass
else:
# No need to cap a hub component term that is a
# closed interface.
return
print('Capping {}({})'.format(term, cap_name))
pdb_path = '{}/{}_{}.pdb'.format(self.cr_dir, cap_name,
'NI' if term == 'n' else 'IC')
cap_and_repeat = pdb_utils.read_pdb(pdb_path)
cap_res = self.get_capping(
prime_res=residues,
cap_res=pdb_utils.get_residues(cap_and_repeat),
cr_r_ids=self.capping_repeat_idx[cap_name],
term=term
)
if term == 'n':
deposit_context.pref_res = cap_res
else:
deposit_context.suff_res = cap_res
# Computes the capping residues. Displaces primary residues (thus modifies
# the prime_res parameter).
def get_capping(self, prime_res, cap_res, cr_r_ids, term):
utils.check_term_type(term)
# Find residue index at which the residue id[1] matches capping
# start index. Residue id often does not start from 1 and is never
# 0-based.
rid_range = tuple(cr_r_ids[:2]) if term == 'n' else tuple(cr_r_ids[2:])
for i, el in enumerate(cap_res):
if el.id[1] == rid_range[0]:
match_start = i
break
else:
raise ValueError('Could not find residue index {}'.format(
rid_range[0]))
match_len = rid_range[1] - rid_range[0] + 1 # Inclusive
match_end = match_start + match_len
# N: match left, C: match right
prime_align_res = prime_res[:match_len] \
if term == 'n' else \
prime_res[-match_len:]
cap_align_res = cap_res[match_start:match_end]
prim_atoms = [r['CA'] for r in prime_align_res]
cap_atoms = [r['CA'] for r in cap_align_res]
self.si.set_atoms(prim_atoms, cap_atoms)
rot, tran = self.si.rotran
result = []
cap_protrude_res = cap_res[:match_start] + cap_res[match_end:]
for r in cap_protrude_res:
rr = r.copy()
rr.transform(rot, tran)
result.append(rr)
# Also transform cap align res to the right frame.
for r in cap_align_res:
r.transform(rot, tran)
# Displace prime_res using linear weights, the same method as
# displace_terminus().
# Linear weights (0, 1] - default for 'c'.
disp_w = [i/match_len for i in range(1, match_len + 1)]
if term == 'n':
disp_w.reverse() # Want [1, 0) for N term.
blend_residues(prime_align_res, cap_align_res, disp_w)
return result
def displace_terminus(self, deposit_context, term):
utils.check_term_type(term)
if term == 'n':
assert deposit_context.last_node
# Node A is on the C end, so we get the N end node, and swap
# order.
a_node = deposit_context.last_node
a_chain_id = deposit_context.last_term_iden.chain_id
a_info = self.get_mod_info(a_node, a_chain_id)
b_node = deposit_context.node
b_chain_id = deposit_context.term_iden.chain_id
b_info = deposit_context.mod_info
elif term == 'c':
next_linkage = deposit_context.next_linkage
assert next_linkage
# Node A is on the N end, so we get the C end node.
a_node = deposit_context.node
a_chain_id = deposit_context.term_iden.chain_id
a_info = deposit_context.mod_info
b_ui_name, b_chain_id = next_linkage['target_mod'], \
next_linkage['target_chain_id']
b_node = get_node(deposit_context.network, b_ui_name)
b_info = self.get_mod_info(b_node, b_chain_id)
types = (a_info.mod_type, b_info.mod_type)
if types == ('single', 'single'):
a_single_name = a_info.mod_name
b_single_name = b_info.mod_name
elif types == ('hub', 'single'):
hub = self.xdb['modules']['hubs'][a_info.mod_name]
a_single_name = hub['chains'][a_chain_id]['single_name']
b_single_name = b_info.mod_name
elif types == ('single', 'hub'):
a_single_name = a_info.mod_name
hub = self.xdb['modules']['hubs'][b_info.mod_name]
b_single_name = hub['chains'][b_chain_id]['single_name']
else:
raise ValueError('Unknown type tuple:', types)
a_single_len = self.get_single_len(a_single_name)
b_single_len = self.get_single_len(b_single_name)
dbl_name = a_single_name + '-' + b_single_name
dbl_pdb = pdb_utils.read_pdb(
self.pdb_dir + '/doubles/' + dbl_name + '.pdb')
dbl_res = pdb_utils.get_residues(dbl_pdb)
main_res = deposit_context.main_res
if term == 'n':
# Displace N term residues (first half of main_res) based on
# linear weights. In the double, start from B module.
#
# main_res: [n ... | ... c]
# disp_w: [1....0]
# dbl: [n ... | ... c] [n ... | ... c]
if b_info.mod_type == 'hub':
# Lift double (in A frame) to hub arm frame with A at the
# arm's tip.
chains = \
self.xdb['modules']['singles'][a_single_name]['chains']
tx_id = chains[a_chain_id]['c'][b_info.mod_name][b_chain_id]
tx = self.xdb['n_to_c_tx'][tx_id]
rot = np.asarray(tx['rot'])
tran = np.asarray(tx['tran'])
else: # Guaranteed to be 'single' thanks to get_node()
# Drop double to B frame.
rot, tran = self.get_drop_tx(a_single_name, b_single_name)
transform_residues(dbl_res, rot, tran)
disp_n = b_single_len // 2
disp_w = [i/disp_n for i in range(1, disp_n + 1)]
main_disp = main_res[:disp_n]
dbl_part = dbl_res[-b_single_len:-b_single_len+disp_n]
disp_w.reverse() # Make it 1 -> 0
elif term == 'c':
# Displace C term residues (second half of main_res) based on
# linear weights. In the double, start from end of A module and go
# backwards.
#
# main_res: [n ... | ... c]
# disp_w: [0....1]
# dbl: [n ... | ... c] [n ... | ... c]
if a_info.mod_type == 'hub':
# Step 1: Drop double to B frame.
rot, tran = self.get_drop_tx(a_single_name, b_single_name)
# Step 2: Lift double (in B frame) to hub arm frame.
chains = self.xdb['modules']['hubs'][a_info.mod_name]['chains']
tx_id = chains[a_chain_id]['c'][b_single_name][b_chain_id]
tx = self.xdb['n_to_c_tx'][tx_id]
hub_rot = np.asarray(tx['rot'])
hub_tran = np.asarray(tx['tran'])
tran = hub_rot.dot(tran) + hub_tran
rot = hub_rot.dot(rot)
transform_residues(dbl_res, rot, tran)
disp_n = a_single_len // 2
disp_w = [i/disp_n for i in range(1, disp_n + 1)]
main_disp = main_res[-disp_n:]
dbl_part = dbl_res[disp_n:disp_n+disp_n]
blend_residues(main_disp, dbl_part, disp_w)
def get_drop_tx(self, a_single_name, b_single_name):
a_chains = self.xdb['modules']['singles'][a_single_name]['chains']
assert len(a_chains) == 1
a_chain_id = list(a_chains.keys())[0]
a_b_chains = a_chains[a_chain_id]['c'][b_single_name]
assert len(a_b_chains) == 1
b_chain_id = list(a_b_chains.keys())[0]
tx_id = a_b_chains[b_chain_id]
tx = self.xdb['n_to_c_tx'][tx_id]
# Inverse tx because dbgen.py computes the tx that takes the
# single B module to part B inside double.
rot = np.transpose(tx['rot'])
tran = rot.dot(-np.asarray(tx['tran']))
return rot, tran
# Returns the number of residues in a single module.
def get_single_len(self, mod_name):
chains = self.xdb['modules']['singles'][mod_name]['chains']
assert len(chains) == 1
chain_id = list(chains.keys())[0]
return chains[chain_id]['n_residues']
def get_mod_info(self, node, chain_id):
mod_type = node['module_type']
mod_name = node['module_name']
# Obtain module residues.
pdb = pdb_utils.read_pdb(self.pdb_dir + '/' + mod_type +
's/' + mod_name + '.pdb')
res = pdb_utils.get_residues(pdb, chain_id)
res_n = len(res)
return ModInfo(mod_type, mod_name, res, res_n)
def deposit_chains(self, network):
chain_iden_gen = decompose_network(network, self.xdb, self.skip_unused)
for chain_iden in chain_iden_gen:
self.deposit_chain(network, chain_iden)
def new_chain(self):
return Bio.PDB.Chain.Chain(self.next_chain_id())
def next_chain_id(self):
cid = str(self.chain_id)
self.chain_id += 1
return cid
def reset_residue_id(self):
self.residue_id = 1
def next_residue_id(self):
rid = self.residue_id
self.residue_id += 1
return rid
def run(self):
self.reset_residue_id()
self.model = Bio.PDB.Model.Model(0)
if self.show_fusion:
print('Note: show_fusion is on')
networks = self.spec['networks']
for nw_name in networks:
print('Processing network \"{}\"'.format(nw_name))
complaint = self.deposit_chains(networks[nw_name])
if complaint:
print('Error: {}', complaint)
exit()
# Create output
sb = Bio.PDB.StructureBuilder.StructureBuilder()
sb.init_structure('0')
structure = sb.get_structure()
structure.add(self.model)
return structure
if __name__ == '__main__':
utils.safe_exec(main)
```
#### File: elfin/elfinpy/v1_design_convert.py
```python
import argparse, sys
import copy
try:
from elfin_graph import ElfinGraph
from elfin_node import ElfinNode
from utilities import *
except ImportError as e:
from .elfin_graph import ElfinGraph
from .elfin_node import ElfinNode
from .utilities import *
def compute_old_graph_txm(xdb, graph):
nodes = graph.nodes
double_data = xdb['double_data']
for i in range(len(nodes)-1):
node_a = nodes[i]
node_b = nodes[i+1]
rel = double_data[node_a.name][node_b.name]
for j in range(i+1):
nodes[j].transform(rel['rot'], rel['tran'])
def parse_args(args):
parser = argparse.ArgumentParser(description='Converts old Elfin core intermediate output into new format');
parser.add_argument('input') # No dash means mandatory
parser.add_argument('--output')
parser.add_argument('--xdb_path', default='resources/xdb.json')
parser.add_argument('--multichain_test', action='store_true')
return parser.parse_args(args)
def v1_to_v2(input_json, xdb_path, multichain_test=False):# Elfin core output
# Make sure we're working with the old format
keys = input_json.keys()
if not 'nodes' in keys:
print('Error: input file is not a v1 elfin solution file.')
exit(1)
n_nodes = len(input_json['nodes'])
nodes = [
ElfinNode(
id=i,
name=el,
trim=[(False if i == 0 else True), (False if i == n_nodes - 1 else True)],
cterm_node_id=((i+1) if i < n_nodes - 1 else -1)
) for (i, el) in enumerate(input_json['nodes'])
]
graph = ElfinGraph('c1', nodes) # c1 for chain number 1
graphs = [graph]
assert(len(graphs) == 1)
xdb = read_json(xdb_path)
for g in graphs:
compute_old_graph_txm(xdb, g)
if multichain_test:
graphs.append(copy.deepcopy(graph))
# Note: flipping z direction can cause problems in PyMol
# visualisation (can't view as cartoon)
graphs[0].transform([[-1,0,0],[0,-1,0],[0,0,1]],[100,100,0])
graphs[1].transform([[1,0,0],[0,1,0],[0,0,1]],[-100,-100,0])
graphs[1].name = 'c2'
return graphs
def main(test_args=None):
print('Deprecated. For code reference only.')
exit()
args = parse_args(sys.argv[1:] if test_args is None else test_args)
graphs = v1_to_v2(
read_json(args.input),
args.xdb_path,
args.multichain_test)
output_file = args.output
if output_file == None:
output_file = args.input.replace('.json', '.v2.json')
with open(output_file, 'w') as ofp:
json.dump(graphs, ofp, default=lambda o: o.__dict__)
print('Saved to: ' + output_file)
if __name__ == '__main__':
main()
``` |
{
"source": "joy13975/elfin-old",
"score": 2
} |
#### File: scripts/PyMolUtils/ColorByRes.py
```python
import colorsys,sys
from pymol import cmd
aa_1_3 = {
'A': 'ALA',
'C': 'CYS',
'D': 'ASP',
'E': 'GLU',
'F': 'PHE',
'G': 'GLY',
'H': 'HIS',
'I': 'ILE',
'K': 'LYS',
'L': 'LEU',
'M': 'MET',
'N': 'ASN',
'P': 'PRO',
'Q': 'GLN',
'R': 'ARG',
'S': 'SER',
'T': 'THR',
'V': 'VAL',
'W': 'TRP',
'Y': 'TYR',
}
aa_3_1 = {
'ALA' : 'A',
'CYS' : 'C',
'ASP' : 'D',
'GLU' : 'E',
'PHE' : 'F',
'GLY' : 'G',
'HIS' : 'H',
'ILE' : 'I',
'LYS' : 'K',
'LEU' : 'L',
'MET' : 'M',
'ASN' : 'N',
'PRO' : 'P',
'GLN' : 'Q',
'ARG' : 'R',
'SER' : 'S',
'THR' : 'T',
'VAL' : 'V',
'TRP' : 'W',
'TYR' : 'Y',
}
aa_types = {
'A': 'hydrophobic',
'C': 'cysteine',
'D': 'negative',
'E': 'negative',
'F': 'aromatic',
'G': 'glycine',
'H': 'polar',
'I': 'hydrophobic',
'K': 'positive',
'L': 'hydrophobic',
'M': 'hydrophobic',
'N': 'polar',
'P': 'proline',
'Q': 'polar',
'R': 'positive',
'S': 'polar',
'T': 'polar',
'V': 'hydrophobic',
'W': 'aromatic',
'Y': 'aromatic',
}
def color_by_restype(selection="all",
hydrophobic='grey90',
aromatic='lightpink',
polar='palecyan',
positive='blue',
negative='red',
cysteine='paleyellow',
proline='palegreen',
glycine='green',
):
"""
usage: color_by_restype <selection>, <optional overrides of default colors>
e.g. color_by_restype protein and chain A, hydrophobic=wheat
Residue groups: Default colours:
hydrophobic: AILMV grey90
aromatic: FWY lightpink
polar: HNQST palecyan
positive: KR blue
negative: DE red
cysteine: C paleyellow
proline: P palegreen
glycine: G green
"""
colors = {
'hydrophobic': hydrophobic,
'aromatic': aromatic,
'polar': polar,
'positive': positive,
'negative': negative,
'cysteine': cysteine,
'proline': proline,
'glycine': glycine,
}
for aa in aa_types:
sel = selection + " and r. %s" % aa_1_3[aa]
# print sel,"-->", colors[aa_types[aa]]
cmd.color(colors[aa_types[aa]],sel)
cmd.extend("color_by_restype",color_by_restype)
```
#### File: scripts/PyMolUtils/Compare.py
```python
import numpy as np
from pymol import cmd
# Not sure how to just figure out where elfin is located
# So we need to load our library this way
elfinDir = '/Users/joy/src/elfin/'
elfinPyLibDir = elfinDir + '/src/python/'
elfinMovieDir = elfinDir + '/movieOutput/'
import imp
utils = imp.load_source('utils', elfinPyLibDir + '/utils.py')
Kabsch = imp.load_source('Kabsch', elfinPyLibDir + '/Kabsch.py')
def compare_sol(specFile, solCSV):
if specFile.rfind('.csv') != -1:
specPts = utils.readCSVPoints(specFile)
elif specFile.rfind('.json') != -1:
with open(specFile, 'r') as file:
specPts = np.asarray(json.load(file)['coms'])
else:
print 'Unknown spec file format'
solPts = utils.readCSVPoints(solCSV)
# Centre both pts
centredSpec = specPts - np.mean(specPts, axis=0)
centredSol = solPts - np.mean(solPts, axis=0)
# Draw specification
draw_pts(centredSpec, color=[0.7,0,0])
# Equalise sample points
specUpPts = utils.upsample(centredSpec, centredSol)
draw_pts(specUpPts, color=[0.5,0.5,0])
# Find Kabsch rotation for solution -> spec
R = Kabsch.kabsch(centredSpec, specUpPts)
centredSpecR = np.dot(centredSpec, R)
draw_pts(centredSpecR, color=[0,0.5,0.7])
cmd.reset()
cmd.set("depth_cue", 0)
cmd.extend("compare_sol", compare_sol)
```
#### File: scripts/Python/GenPymolTransform.py
```python
import argparse, sys
import numpy as np
from utils import *
def main():
ap = argparse.ArgumentParser(description='Generate Grid Search configurations');
ap.add_argument('pairName')
ap.add_argument('--xdbPath', default='res/xDB.json')
ap.add_argument('--pairDir', default='/Users/joy/src/elfin/res/aligned/pair/')
args = ap.parse_args()
xDB = readJSON(args.xdbPath)
resetString = \
('delete {}\n' + \
'load {}\n' + \
'hide everything, {}\n' + \
'show cartoon, {}\n') \
.format(
args.pairName,
args.pairDir + '/' + args.pairName + '.pdb',
args.pairName,
args.pairName
)
singleNames = args.pairName.split('-')
rel = xDB['pairsData'][singleNames[0]][singleNames[1]]
rotTp = np.transpose(rel['rot'])
rotTpTran = np.append(rotTp, np.transpose([rel['tran']]), axis=1)
pymolRotMat = np.append(rotTpTran, [[0,0,0,1]], axis=0)
pymolRotMatStr = '[' + ', '.join(map(str, pymolRotMat.ravel())) + ']'
txString = \
'cmd.transform_selection({}, {}, homogenous=0)' \
.format(
"\'" + args.pairName + "\'",
pymolRotMatStr
)
print resetString
print txString
if __name__ == '__main__':
main()
```
#### File: scripts/Python/xDBStat.py
```python
from utils import *
def main():
(avgD, minD, maxD) = getXDBStat(readJSON('res/xDB.json'))
print 'Distances avg: {}, min: {}, max: {}'.format(avgD, minD, maxD)
if __name__ =='__main__': safeExec(main)
```
#### File: elfin-old/tests/TestLv2.py
```python
import glob
from Designer import *
import Greedy
from utils import *
import time
import numpy as np
def main():
xDB = readJSON('res/xDB.json')
bmDir = 'bm/l10'
designers = []
designers.append(Greedy.GreedyDesigner(xDB, 'maxHeavy'))
# MCDesigner
# ...
# Process all benchmarks
for jsonFile in glob.glob(bmDir + '/*.json'):
spec = readJSON(jsonFile)
# we add one point between each pair of com
coms = np.asarray(spec['coms'])
targetLen = len(coms)
mids = np.asarray([np.mean(p, axis=0) for p in zip(coms, np.roll(coms, -1, axis=0))[0:-1]])
spec['coms'] = np.append([val for p in zip(coms, mids) for val in p], [coms[-1]], axis=0)
for designer in designers:
designerName = designer.__class__.__name__
print 'Benchmarking {} on {}, target length={}'.format(
designerName, jsonFile, targetLen)
startTime = time.clock()
(nodes,shape,score) = designer.design(spec, targetLen)
print "{:.2f}s, score: {}".format(time.clock() - startTime, score)
if nodes == spec['nodes']:
print 'Pass'
else:
print 'Failed'
# pauseCode()
makePdbFromNodes(xDB, nodes, 'res/centered_pdb/pair', jsonFile.replace('.json', '_' + designerName + '_LV2.pdb'))
if __name__ =='__main__': safeExec(main)
``` |
{
"source": "joy13975/elfin",
"score": 3
} |
#### File: extensions/deprecated/center_of_mass.py
```python
from __future__ import print_function
from pymol import cmd
def com(selection, state=None, mass=None, object=None, quiet=0, **kwargs):
quiet = int(quiet)
if (object == None):
try:
object = cmd.get_legal_name(selection)
object = cmd.get_unused_name(object + "_COM", 0)
except AttributeError:
object = 'COM'
cmd.delete(object)
if (state != None):
x, y, z = get_com(selection, mass=mass, quiet=quiet)
if not quiet:
print("[%f %f %f]" % (x, y, z))
cmd.pseudoatom(object, pos=[x, y, z], **kwargs)
cmd.show("spheres", object)
else:
for i in range(cmd.count_states()):
x, y, z = get_com(selection, mass=mass, state=i + 1, quiet=quiet)
if not quiet:
# print("State %d:%f %f %f" % (i + 1, x, y, z))
print("[%f, %f, %f]" % (i + 1, x, y, z))
cmd.pseudoatom(object, pos=[x, y, z], state=i + 1, **kwargs)
cmd.show("spheres", 'last ' + object)
cmd.extend("com", com)
def get_com(selection, state=1, mass=None, quiet=1):
"""
DESCRIPTION
Calculates the center of mass
Author: <NAME>
Michigan State University
slaw (at) msu . edu
"""
quiet = int(quiet)
totmass = 0.0
if mass != None and not quiet:
print("Calculating mass-weighted COM")
state = int(state)
model = cmd.get_model(selection, state)
x, y, z = 0, 0, 0
for a in model.atom:
if (mass != None):
m = a.get_mass()
x += a.coord[0] * m
y += a.coord[1] * m
z += a.coord[2] * m
totmass += m
else:
x += a.coord[0]
y += a.coord[1]
z += a.coord[2]
if (mass != None):
return [x / totmass, y / totmass, z / totmass]
else:
return [x / len(model.atom), y / len(model.atom), z / len(model.atom)]
cmd.extend("get_com", get_com)
# vi:expandtab:sw=3
```
#### File: extensions/deprecated/compare_solutions.py
```python
from pymol import cmd
import numpy as np
import elfinpy
def compare_solutions(spec_file=None, sol_csv_file=None):
"""
Compares solution center-of-mass points again the specification.
Args:
- spec_file - a csv or json file string path
- sol_csv_file - a csv file string path
"""
if spec_file is None or sol_csv_file is None:
print(compare_solutions.__doc__)
else:
if spec_file.rfind('.csv') != -1:
spec_pts = elfinpy.read_csv_points(spec_file)
elif spec_file.rfind('.json') != -1:
with open(spec_file, 'r') as file:
spec_pts = np.asarray(json.load(file)['coms'])
else:
print 'Unknown spec file format'
sol_pts = elfinpy.read_csv_points(sol_csv_file)
# Centre both pts
centred_spec = spec_pts - np.mean(spec_pts, axis=0)
centred_sol = sol_pts - np.mean(sol_pts, axis=0)
# Draw specification
draw_pts(centred_spec, color=[0.7,0,0])
# Equalise sample points
specUpPts = elfinpy.upsample(centred_spec, centred_sol)
draw_pts(specUpPts, color=[0.5,0.5,0])
# Find Kabsch rotation for solution -> spec
R = kabsch.run_kabsch(centred_spec, specUpPts)
centredSpecR = np.dot(centred_spec, R)
draw_pts(centredSpecR, color=[0,0.5,0.7])
cmd.reset()
cmd.set("depth_cue", 0)
cmd.extend("compare_solutions", compare_solutions)
print('Compare Solutios Loaded')
def main():
raise RuntimeError('This module should not be executed as a script')
if __name__ =='__main__':
main()
```
#### File: pymol_scripts/extensions/extension_template.py
```python
def main():
"""main"""
raise RuntimeError('This module should not be executed as a script')
if __name__ =='__main__':
main()
in_pymol = False
try:
import pymol
in_pymol = True
except ImportError as ie:
main()
if in_pymol:
from pymol import cmd
print('Template Extension Loaded')
``` |
{
"source": "joy13975/elfin-ui",
"score": 2
} |
#### File: elfin-ui/elfin/livebuild_helper.py
```python
import colorsys
import random
import json
import collections
import functools
import bpy
import bmesh
import mathutils
import mathutils.bvhtree
from . import addon_paths
# Global (Const) Variables -----------------------
blender_pymol_unit_conversion = 10.0
# Color Change Placeholder
#
# An option for Place/Extrude operator enums so that user can change the
# color before choosing a module. This makes changing display color fast
# because once a module is selected via the enum list, changing the display
# color causes constant re-linking and that causes lag.
color_change_placeholder = '-Change Color-'
color_change_placeholder_enum_tuple = \
(color_change_placeholder, color_change_placeholder, '')
# Prototype List Empty Placeholder
# An option to inform the user that the prototype list is empty
empty_list_placeholder = '-List Empty-'
empty_list_placeholder_enum_tuple = \
(empty_list_placeholder, empty_list_placeholder, '')
nop_enum_selectors = {
color_change_placeholder,
empty_list_placeholder
}
# Classes ----------------------------------------
# Singleton Metaclass
# Credits to https://stackoverflow.com/questions/6760685/
# creating-a-singleton-in-python
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(
Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class LivebuildState(metaclass=Singleton):
def __init__(self):
self.reset()
def get_all_extrudables(self, sel_mod):
self.n_extrudables = get_extrusion_prototype_list(sel_mod, 'n')
self.c_extrudables = get_extrusion_prototype_list(sel_mod, 'c')
return self.n_extrudables, self.c_extrudables
def update_derivatives(self):
res = [color_change_placeholder_enum_tuple] + \
[module_enum_tuple(mod_name)
for mod_name in self.get_all_module_names()]
self.placeables = res if len(res) > 1 else [
empty_list_placeholder_enum_tuple]
# Find max hub termini
self.max_hub_branches = 0
for hub_name in self.xdb['modules']['hubs']:
hub_branches = max_hub_free_termini(hub_name, self.xdb)
self.max_hub_branches = max(hub_branches, self.max_hub_branches)
def get_all_module_names(self):
groups = (self.xdb['modules']['singles'], self.xdb['modules']['hubs'])
xdb_mod_names = {k for group in groups for k in group.keys()}
return (mod_name for mod_name in self.library
if mod_name in xdb_mod_names)
def load_xdb(self, skip_derivatives_update=False):
with open(addon_paths.xdb_path, 'r') as file:
self.xdb = collections.OrderedDict(json.load(file))
if not skip_derivatives_update:
self.update_derivatives()
print('{}: Xdb loaded'.format(__class__.__name__))
def load_library(self, skip_derivatives_update=False):
with bpy.types.BlendDataLibraries.load(addon_paths.modlib_path) as \
(data_from, data_to):
self.library = data_from.objects
if not skip_derivatives_update:
self.update_derivatives()
print('{}: Module library loaded'.format(__class__.__name__))
def load_path_guide(self):
with bpy.types.BlendDataLibraries.load(addon_paths.pguide_path) as \
(data_from, data_to):
self.pguide = data_from.objects
print('{}: Path guide library loaded'.format(__class__.__name__))
def reset(self):
self.n_extrudables = [empty_list_placeholder_enum_tuple]
self.c_extrudables = [empty_list_placeholder_enum_tuple]
self.placeables = [empty_list_placeholder_enum_tuple]
self.max_hub_branches = 0
self.load_all()
self.num = 3
def load_all(self):
self.load_xdb(skip_derivatives_update=True)
self.load_library(skip_derivatives_update=True)
self.load_path_guide()
self.update_derivatives()
random.seed()
class ColorWheel(metaclass=Singleton):
hue_diff = 0.14
lightness_base = 0.4
lightness_variance = 0.3
saturation_base = 0.8
saturation_variance = .2
def __init__(self):
self.hue = random.random()
def next_color(self, ):
self.hue += (self.hue_diff / 2) + random.random() * (1 - self.hue_diff)
lightness = self.lightness_base + \
random.random() * self.lightness_variance
saturation = self.saturation_base + \
random.random() * self.saturation_variance
return colorsys.hls_to_rgb(
self.hue % 1.0,
lightness % 1.0,
saturation % 1.0
)
class object_receiver:
"""Decorator for functions that receive a Blender object.
Passes object to func by argument if specified, otherwise use the
selected object.
"""
def __init__(self, func):
self.func = func
functools.update_wrapper(self, func)
def __call__(self, obj=None, *args, **kwargs):
if not obj:
if get_selection_len() == 0:
print('No object specified nor selected.')
return
return [self.func(obj, *args, **kwargs)
for obj in get_selected(-1)]
else:
return self.func(obj, *args, **kwargs)
# Quick Access Methods ---------------------------
@object_receiver
def get_mirrors(obj):
return obj.elfin.mirrors
@object_receiver
def get_elfin(obj):
return obj.elfin
@object_receiver
def show_links(obj):
obj.elfin.show_links()
def count_obj():
return len(bpy.data.objects)
def get_xdb():
return LivebuildState().xdb
def hub_is_symmetric(hub_name):
return LivebuildState().xdb['modules']['hubs'][hub_name]['symmetric']
def get_n_to_c_tx(mod_a, chain_a, mod_b, chain_b):
xdb = get_xdb()
if mod_is_single(mod_a):
meta_a = xdb['modules']['singles'][mod_a]
elif mod_is_hub(mod_a):
meta_a = xdb['modules']['hubs'][mod_a]
tx_id = meta_a['chains'][chain_a]['c'][mod_b][chain_b]
tx_json = xdb['n_to_c_tx'][tx_id]
tx = mathutils.Matrix(tx_json['rot']).to_4x4()
tx.translation = tx_json['tran']
return tx
def mod_is_hub(mod_name):
return mod_name in get_xdb()['modules']['hubs']
def mod_is_single(mod_name):
return mod_name in get_xdb()['modules']['singles']
def get_selection_len():
return len(bpy.context.selected_objects)
def get_selected(n=1):
"""
Return the first n selected object, or None if nothing is selected.
"""
if get_selection_len():
selection = bpy.context.selected_objects
if n == 1:
return selection[0]
elif n == -1:
return selection[:]
else:
return selection[:n]
else:
return []
# Helpers ----------------------------------------
def add_module(mod_name, color, follow_selection=True):
lmod = import_module(mod_name)
give_module_new_color(lmod, color)
# Cache active_object because it changes with create_network()
location = [0, 0, 0]
if follow_selection and get_selection_len():
location = bpy.context.active_object.\
matrix_world.translation.copy()
for s in get_selected(-1):
s.select = False
# Imported objects are hidden by default.
lmod.hide = False
# Create a new empty object as network parent.
network_parent = create_network('module')
network_parent.matrix_world.translation = location
lmod.parent = network_parent
# Select new object.
lmod.select = True
# If not set to lmod, it would be the parent, which will lead to
# failure when trying to select parent via Shift-G + P
bpy.context.scene.objects.active = lmod
return lmod
def get_ordered_selection():
# Returns objects such that obj_a was selected before obj_b.
obj_a, obj_b = None, None
if get_selection_len() == 2:
obj_a, obj_b = get_selected(-1)
if bpy.context.active_object == obj_a:
obj_a, obj_b = obj_b, obj_a
return obj_a, obj_b
def max_hub_free_termini(mod_name, xdb=None):
free_termini = 0
if not xdb:
xdb = get_xdb()
hub_meta = xdb['modules']['hubs'][mod_name]
for chain_id, termini_meta in hub_meta['chains'].items():
free_termini += \
(len(termini_meta['n']) > 0) + (len(termini_meta['c']) > 0)
return free_termini
def selection_check(
selection=None,
n_modules=0,
n_joints=0,
n_bridges=0,
n_networks=0,
n_pg_networks=0):
"""Counts objects in all (or provided) selection and checks whether all
expected counts are met.
"""
if selection is None:
selection = get_selected(-1)
for obj in selection:
if obj.elfin.is_module():
n_modules -= 1
if n_modules < 0:
return False
elif obj.elfin.is_joint():
n_joints -= 1
if n_joints < 0:
return False
elif obj.elfin.is_bridge():
n_bridges -= 1
if n_bridges < 0:
return False
elif obj.elfin.is_network():
n_networks -= 1
if n_networks < 0:
return False
elif obj.elfin.is_pg_network():
n_pg_networks -= 1
if n_pg_networks < 0:
return False
return n_modules == n_joints == n_bridges == \
n_networks == n_pg_networks == 0
def find_symmetric_hub(network_parents):
"""Returns the symmetric hub center piece if there is one, else None.
"""
xdb = get_xdb()
walkers = [walk_network(np.children[0]) for np in network_parents]
for walker in walkers:
for m in walker:
m_name = m.elfin.module_name
if m_name in xdb['modules']['hubs'] and hub_is_symmetric(m_name):
return m
return None
def transfer_network(mod, existing_network=None):
"""Move all modules or pguides on the same network as mod under a new
network parent object.
"""
old_network = mod.parent
if old_network.elfin.is_network():
network_type = 'module'
elif old_network.elfin.is_pg_network():
network_type = 'pguide'
else:
print('Invalid object passed to transfer_network():', mod)
return
new_network = create_network(network_type)
# Gather all network objects into a list and calculate COM
com = mathutils.Vector([0, 0, 0])
network_obj = []
walker = walk_network if network_type == 'module' else walk_pg_network
# old network must be walked instead of using children because when
# severing, we rely on link information to decide whether to split
# networks.
for m in walker(mod):
network_obj.append(m)
com += m.matrix_world.translation
existing_network_children = existing_network.children \
if existing_network else ()
for c in existing_network_children:
network_obj.append(c)
com += c.matrix_world.translation
com = com / len(network_obj)
new_network.location = com
# Mandatory update to reflect new parent transform
bpy.context.scene.update()
for m in network_obj:
change_parent_preserve_transform(m, new_network)
if not old_network.children:
print('---First network destroy:', old_network.name)
old_network.elfin.destroy()
if existing_network and \
existing_network != old_network and \
not existing_network.children:
print('---Second network destroy:', existing_network.name)
existing_network.elfin.destroy()
def create_network(network_type):
"""Creates and returns a new arrow object as a network parent object,
preserving selection.
"""
selection = get_selected(-1)
for s in selection:
s.select = False
bpy.ops.object.empty_add(type='ARROWS')
nw = get_selected()
nw.select = False
nw.elfin.init_network(nw, network_type)
for s in selection:
s.select = True
return nw
def check_network_integrity(network):
"""Returns the network (list of modules) consists of a single network and
is spatially well formed, meaning all interfaces of the network must be
the way they were found by elfin as elfin had placed them via extrusion.
Network level transformations should not destroy well-formed-ness.
"""
... # Currently not needed
return NotImplementedError
def import_joint():
"""Links a bridge object and initializes it using two end joints."""
joint = None
try:
with bpy.data.libraries.load(addon_paths.pguide_path) as \
(data_from, data_to):
data_to.objects = ['joint']
joint = bpy.context.scene.objects.link(data_to.objects[0]).object
joint.elfin.init_joint(joint)
return joint
except Exception as e:
if joint:
# In case something went wrong before this line in try
joint.elfin.obj_ptr = joint
joint.elfin.destroy()
raise e
def import_bridge(joint_a, joint_b):
"""Links a bridge object and initializes it using two end joints."""
bridge = None
try:
with bpy.data.libraries.load(addon_paths.pguide_path) as \
(data_from, data_to):
data_to.objects = ['bridge']
bridge = bpy.context.scene.objects.link(data_to.objects[0]).object
bridge.elfin.init_bridge(bridge, joint_a, joint_b)
return bridge
except Exception as e:
if bridge:
# In case something went wrong before this line in try
bridge.elfin.obj_ptr = bridge
bridge.elfin.destroy()
raise e
def module_menu(self, context):
self.layout.menu("INFO_MT_elfin_add", icon="PLUGIN")
def walk_pg_network(joint, initial=True):
"""A generator that traverses the path guide network depth-first and
yields each object on the way, without repeating.
"""
if not joint.elfin.is_joint():
joint = joint.elfin.pg_neighbors[0].obj
if initial:
for pg in joint.parent.children:
pg.elfin.node_walked = False
yield joint
joint.elfin.node_walked = True
for bridge_nb in joint.elfin.pg_neighbors:
bridge = bridge_nb.obj
for other_end_nb in bridge.elfin.pg_neighbors:
other_end = other_end_nb.obj
if not other_end.elfin.node_walked:
yield from walk_pg_network(other_end, initial=False)
def walk_network(module, initial=True):
"""A generator that traverses the module network depth-first and yields
each object on the way, without repeating.
"""
if not module.elfin.is_module():
return
if initial:
for pg in module.parent.children:
pg.elfin.node_walked = False
yield module
module.elfin.node_walked = True
# Walk n-terminus first
for n_obj in module.elfin.n_linkage:
if not n_obj.target_mod.elfin.node_walked:
yield from walk_network(module=n_obj.target_mod, initial=False)
# Then c-terminus
for c_obj in module.elfin.c_linkage:
if not c_obj.target_mod.elfin.node_walked:
yield from walk_network(module=c_obj.target_mod, initial=False)
IncompatibleModuleError = ValueError('Modules are not compatible!')
def extrude_terminus(which_term, selector, sel_mod, color, reporter):
"""Extrudes selector module at the which_term of sel_mod"""
assert which_term in {'n', 'c'}
all_ext_mods = []
result_signal = {'FINISHED'}
ext_mod = None
try:
sel_mod_name = sel_mod.elfin.module_name
sel_mod.select = False
# Extract chain IDs and module name
c_chain, ext_mod_name, n_chain = \
selector.split('.')
ext_mod = import_module(ext_mod_name)
all_ext_mods.append(ext_mod)
extrude_from = n_chain if which_term == 'n' else c_chain
extrude_into = c_chain if which_term == 'n' else n_chain
sel_ext_type_pair = (sel_mod.elfin.module_type,
ext_mod.elfin.module_type)
print(('Extruding module {to_mod} (chain {to_chain})'
' from {from_mod}\'s {terminus}-Term (chain {from_chain})').
format(to_mod=selector,
to_chain=extrude_into,
from_mod=sel_mod_name,
terminus=which_term.upper(),
from_chain=extrude_from))
def project_extruded_mod(fixed_mod, ext_mod, src_chain=extrude_from):
tx = get_tx(
fixed_mod,
src_chain,
extrude_into,
ext_mod,
which_term,
sel_ext_type_pair
)
if not tx and reporter is not None:
reporter.report({'ERROR'}, str(IncompatibleModuleError))
raise IncompatibleModuleError
ext_mod.matrix_world = tx * ext_mod.matrix_world
# Create link
if which_term == 'n':
n_link = fixed_mod.elfin.new_n_link(
src_chain, ext_mod, extrude_into)
c_link = ext_mod.elfin.new_c_link(
extrude_into, fixed_mod, src_chain)
else:
c_link = fixed_mod.elfin.new_c_link(
src_chain, ext_mod, extrude_into)
n_link = ext_mod.elfin.new_n_link(
extrude_into, fixed_mod, src_chain)
print('Debug: done linking')
# Touch up
bpy.context.scene.update() # Update to get the correct matrices
change_parent_preserve_transform(ext_mod, fixed_mod.parent)
give_module_new_color(ext_mod, color)
ext_mod.hide = False # Unhide (default is hidden)
ext_mod.select = True
# Because ModuleLifetimeWatcher.on_module_enter() runs
# asynchronously, a newly added module can be immediately deleted
# just before or during the routine up to this point. In that
# case, we need to ensure link integrity.
if fixed_mod.elfin.destroy_entered or \
ext_mod.elfin.destroy_entered:
n_link.sever()
c_link.sever()
return [ext_mod] # for mirror linking
xdb = get_xdb()
if sel_ext_type_pair in {('single', 'single'), ('single', 'hub')}:
project_extruded_mod(sel_mod, ext_mod)
if sel_mod.elfin.mirrors:
all_ext_mods += mirrored_extrude(
root_mod=sel_mod,
new_mirrors=[ext_mod],
ext_mod_name=ext_mod_name,
extrude_func=project_extruded_mod)
elif sel_ext_type_pair == ('hub', 'single'):
#
# Extrude from hub to single.
#
hub_meta = xdb['modules']['hubs'][sel_mod_name]
def extrude_hub_single(sel_mod, new_mod):
project_extruded_mod(sel_mod, new_mod, src_chain=extrude_from)
mirrors = [new_mod]
if hub_meta['symmetric']:
# Calculate non-occupied chain IDs
hub_all_chains = set(hub_meta['chains'].keys())
if which_term == 'n':
hub_busy_chains = set(sel_mod.elfin.n_linkage.keys())
else:
hub_busy_chains = set(sel_mod.elfin.c_linkage.keys())
hub_free_chains = hub_all_chains - hub_busy_chains
imported = mirrored_symhub_extrude(
sel_mod,
mirrors,
hub_free_chains,
ext_mod_name,
project_extruded_mod)
all_ext_mods.extend(imported)
return mirrors
first_mirror_group = extrude_hub_single(sel_mod, ext_mod)
if sel_mod.elfin.mirrors:
all_ext_mods += mirrored_extrude(
root_mod=sel_mod,
new_mirrors=first_mirror_group,
ext_mod_name=ext_mod_name,
extrude_func=extrude_hub_single)
elif sel_ext_type_pair == ('hub', 'hub'):
#
# Extrude from hub to hub is NOT allowed.
#
raise NotImplementedError
else:
raise ValueError(
'Invalid sel_ext_type_pair: {}'.format(sel_ext_type_pair))
except Exception as e:
if ext_mod:
# In case something went wrong before this line in try
ext_mod.elfin.obj_ptr = ext_mod
ext_mod.elfin.destroy()
sel_mod.select = True # Restore selection
if e != IncompatibleModuleError:
raise e
result_signal = {'CANCELLED'}
return all_ext_mods, result_signal
def execute_extrusion(which_term, selector, color, reporter):
"""Executes extrusion respecting mirror links and filers mirror selections
"""
if selector in nop_enum_selectors:
return {'FINISHED'}
filter_mirror_selection()
for sel_mod in get_selected(-1):
_, signal = extrude_terminus(
which_term,
selector,
sel_mod,
color,
reporter)
return signal
return {'FINISHED'}
def get_extrusion_prototype_list(sel_mod, which_term):
"""Generates a prototype list appropriately filtered for extrusion.
"""
assert which_term in {'n', 'c'}
enum_tuples = [color_change_placeholder_enum_tuple]
# Selection length is guranteed by poll()
sel_mod_name = sel_mod.elfin.module_name
sel_mod_type = sel_mod.elfin.module_type
xdb = get_xdb()
if sel_mod_type == 'hub':
hub_meta = xdb['modules']['hubs'][sel_mod_name]
if which_term == 'n':
occupied_termini = sel_mod.elfin.n_linkage.keys()
else:
occupied_termini = sel_mod.elfin.c_linkage.keys()
for src_chain_id, chain_meta in hub_meta['chains'].items():
if src_chain_id in occupied_termini:
continue
for single_name in chain_meta[which_term]:
single_chains = chain_meta[which_term][single_name]
assert(len(single_chains) == 1)
dst_chain_id = list(single_chains.keys())[0]
enum_tuples.append(
module_enum_tuple(
single_name,
extrude_from=src_chain_id,
extrude_into=dst_chain_id,
direction=which_term))
# Only allow one chain to be extruded because other
# "mirrors" will be generated automatically
if hub_meta['symmetric']:
break
elif sel_mod_type == 'single':
# Checks for occupancy by counting n/c termini links
if which_term == 'n':
link_len = len(sel_mod.elfin.n_linkage)
else:
link_len = len(sel_mod.elfin.c_linkage)
if link_len == 0:
single_meta = xdb['modules']['singles'][sel_mod_name]
chain_meta = single_meta['chains']
chain_id_list = list(chain_meta.keys())
assert len(chain_id_list) == 1
single_chain_name = chain_id_list[0]
term_meta = chain_meta[single_chain_name][which_term]
for ext_mod_name in term_meta:
for ext_mod_chain_name in term_meta[ext_mod_name]:
enum_tuples.append(
module_enum_tuple(
ext_mod_name,
extrude_from=single_chain_name,
extrude_into=ext_mod_chain_name,
direction=which_term))
else:
raise ValueError('Unknown module type: ', sel_mod_type)
# Remove color change placeholder if nothing can be extruded
return enum_tuples if len(enum_tuples) > 1 else []
def change_parent_preserve_transform(child, new_parent):
mw = child.matrix_world.copy()
child.parent = new_parent
child.matrix_world = mw
def get_tx(
fixed_mod,
extrude_from,
extrude_into,
ext_mod,
which_term,
mod_types
):
"""Returns the transformation matrix for when ext_mod is extruded from
fixed_mod's which_term.
"""
assert which_term in {'n', 'c'}
fixed_mod_name = fixed_mod.elfin.module_name
ext_mod_name = ext_mod.elfin.module_name
tx = None
try:
if which_term == 'n':
mod_params = (ext_mod_name, extrude_into,
fixed_mod_name, extrude_from)
else:
mod_params = (fixed_mod_name, extrude_from,
ext_mod_name, extrude_into)
n_to_c_tx = get_n_to_c_tx(*mod_params)
if mod_types == ('single', 'single'):
tx = scale_and_shift(n_to_c_tx, which_term == 'n', fixed_mod)
elif mod_types == ('single', 'hub'):
# dbgen.py only creates Hub-to-Single transforms. Single-to-Hub is
# therefore always the inverse.
tx = scale_and_shift(n_to_c_tx, True, fixed_mod)
elif mod_types == ('hub', 'single'):
tx = scale_and_shift(n_to_c_tx, False, fixed_mod)
else:
raise ValueError('Invalid mod_types: ({}, {})'.format(*mod_types))
except KeyError as ke:
tx = None
raise ke
return tx
def unlink_mirror(modules=None):
mods = modules[:] if modules else bpy.context.selected_objects[:]
if not mods:
return
for m in mods:
m.elfin.mirrors = None
def link_by_mirror(modules=None):
mirrors = modules[:] if modules else bpy.context.selected_objects[:]
if not mirrors:
return
m0 = mirrors[0]
for i in range(1, len(mirrors)):
if mirrors[i].elfin.module_name != m0.elfin.module_name:
print('Error: selected modules are not of the same prototype')
return
for m in mirrors:
m.elfin.mirrors = mirrors[:]
def mirrored_symhub_extrude(
root_symhub,
new_mirrors,
hub_free_chains,
ext_mod_name,
extrude_func):
imported = []
for src_chain_id in hub_free_chains:
mirror_mod = import_module(ext_mod_name)
imported.append(mirror_mod)
# Assign to the same network.
mirror_mod.parent = root_symhub.parent
extrude_func(root_symhub, mirror_mod, src_chain_id)
new_mirrors += imported
for m in new_mirrors:
m.elfin.mirrors = new_mirrors
return imported
def mirrored_extrude(
root_mod,
new_mirrors,
ext_mod_name,
extrude_func):
imported = []
for m in root_mod.elfin.mirrors:
if m != root_mod:
mirror_mod = import_module(ext_mod_name)
imported.append(mirror_mod)
# Assign to the same network.
mirror_mod.parent = m.parent
new_mirrors += extrude_func(m, mirror_mod)
for m in new_mirrors:
m.elfin.mirrors = new_mirrors
return imported
def filter_mirror_selection():
for s in bpy.context.selected_objects:
if s.select and s.elfin.mirrors:
for m in s.elfin.mirrors:
# Note that m could be the next s!
if m and m != s:
m.select = False
def suitable_for_extrusion(context):
"""Checks selection is not none and is homogenous.
"""
selection = context.selected_objects
n_objs = len(selection)
if n_objs == 0:
return False
# In object mode?
if selection[0].mode != 'OBJECT':
return False
# Homogenous?
first_mod_name = selection[0].elfin.module_name
for o in selection:
if not o.elfin.is_module() or o.elfin.module_name != first_mod_name:
return False
return True
def give_module_new_color(mod, new_color=None):
mat = bpy.data.materials.new(name='mat_' + mod.name)
mat.diffuse_color = new_color if new_color else ColorWheel().next_color()
mod.data.materials.append(mat)
mod.active_material = mat
def get_module_collision_map():
"""Checks all elfin modules for collision and returns a map of which
modules collide which.
"""
bpy.context.scene.update()
mods = [o for o in bpy.context.scene.objects if o.elfin.is_module()]
collision_map = {mod: [] for mod in mods}
for mod in collision_map:
collision_map[mod] = find_overlap(mod, mods)
return collision_map
def find_overlap(test_obj, obj_list, scale_factor=0.90):
"""
Tests whether an object's mesh overlaps with any mesh in obj_list.
The collision check will be skipped for test_obj itself, and its
immediate neighbors.
Args:
- test_obj - the object under test.
- obj_list - optional; the list of objects to test against.
- scale_factor - optional; the scale to apply before testing.
Returns:
- list of colliding objects
"""
bpy.context.scene.update()
scale = mathutils.Matrix.Scale(scale_factor, 4)
mod_bm = bmesh.new()
mod_bm.from_mesh(test_obj.data)
mod_bm.transform(test_obj.matrix_world * scale)
mod_bvh_tree = mathutils.bvhtree.BVHTree.FromBMesh(mod_bm)
colliding_objs = []
for ob in obj_list:
# Skip the test subject itself and its immediate neigbors.
if ob == test_obj or test_obj.elfin.find_link(ob):
continue
ob_bm = bmesh.new()
ob_bm.from_mesh(ob.data)
ob_bm.transform(ob.matrix_world * scale)
ob_bvh_tree = mathutils.bvhtree.BVHTree.FromBMesh(ob_bm)
overlaps = mod_bvh_tree.overlap(ob_bvh_tree)
if len(overlaps) > 0:
colliding_objs.append(ob)
return colliding_objs
def scale_and_shift(n_to_c_tx, invert=False, fixed_mod=None):
tx = pymol_to_blender_scale(n_to_c_tx)
if invert:
tran = tx.translation.copy()
tx.translation = [0, 0, 0]
tx.transpose()
tx.translation = tx * -tran
if fixed_mod is not None:
tx = equalize_frame(tx, fixed_mod)
return tx
def equalize_frame(tx, fixed_mod):
trans, rot, _ = fixed_mod.matrix_world.decompose()
delta = rot.to_matrix().to_4x4()
delta.translation = trans
return delta * tx
def scaleless_rot_tran(obj):
mw = obj.matrix_world.copy()
# Decompose matrix_world to remove 0.1 scale
tran = mathutils.Matrix.Translation(mw.translation)
rot = mw.to_euler().to_matrix().to_4x4()
return rot, tran
def pymol_to_blender_scale(n_to_c_tx):
tx = mathutils.Matrix(n_to_c_tx)
for i in range(0, 3):
tx[i][3] /= blender_pymol_unit_conversion
return tx
def get_compatible_hub_chains(hub_name, single_term, single_name):
assert single_term in {'n', 'c'}
hub_term = {'n': 'c', 'c': 'n'}[single_term]
chain_meta = get_xdb()['modules']['hubs'][hub_name]['chains']
compat_hub_chains = []
for chain_name in chain_meta:
if single_name in chain_meta[chain_name][hub_term].keys():
compat_hub_chains.append(chain_name)
return compat_hub_chains
def module_enum_tuple(mod_name,
extrude_from=None,
extrude_into=None,
direction=None):
"""Creates an enum tuple storing the single module selector, prefixed or
suffixed by the terminus of a hub from/to which the single module is
extruded.
Enum selector format: C-Chain ID, Module, N-Chain ID
Example context:
Let module A receive an extrusion opereation which attempts to add B
to A's n-terminus.
args:
- mod_name: module B's name.
- extrude_from: module A's chain ID that is receiving extrusion.
- extrude_into: module B's chain ID that is complementing the extrusion.
- direction: is B being added to A's c-terminus or n-terminus.
"""
if direction is not None:
assert direction in {'n', 'c'}
assert extrude_from is not None
if extrude_into is None:
extrude_into = ''
# Keep the selector format: n_chain, mod, c_chain
if direction == 'c':
mod_sel = '.'.join([extrude_from, mod_name, extrude_into])
display = ':{}(C) -> (N){}:{}.'.format(extrude_from,
extrude_into, mod_name)
elif direction == 'n':
mod_sel = '.'.join([extrude_into, mod_name, extrude_from])
display = ':{}(N) -> (C){}:{}.'.format(extrude_from,
extrude_into, mod_name)
else:
mod_sel = '.'.join(['', mod_name, ''])
display = mod_sel
return (mod_sel, display, '')
def import_module(mod_name):
"""Links a module object from library.blend. Supports all module types."""
lmod = None
try:
with bpy.data.libraries.load(addon_paths.modlib_path) as \
(data_from, data_to):
data_to.objects = [mod_name]
lmod = bpy.context.scene.objects.link(data_to.objects[0]).object
lmod.elfin.init_module(lmod, mod_name)
# Force newly loaded module to not be in selected status
lmod.select = False
return lmod
except Exception as e:
if lmod:
# In case something went wrong before this line in try
lmod.elfin.obj_ptr = lmod
lmod.elfin.destroy()
raise e
``` |
{
"source": "joy13975/enconv",
"score": 3
} |
#### File: enconv/enconv/encheck.py
```python
import argparse
import os
from time import sleep
from .encoding_converter import EncodingConverter
exclude_files = ['.DS_Store']
include_extensions = ['txt']
def main(*args, **kwargs):
ap = argparse.ArgumentParser(description='Guess text encoding of files in folder')
ap.add_argument('input_dir',
help='Input directory',
type=str)
ap.add_argument('-gl', '--guess_length',
help='Number of bytes to use for guessing. -1=all',
default=1024)
args = ap.parse_args()
if not os.path.isdir(args.input_dir):
print('Input directory is not a directory')
exit(1)
args_dict = vars(args)
filenames = [f for f in os.listdir(args.input_dir)
if os.path.isfile(os.path.join(args.input_dir, f))]
for filename in filenames:
if filename in exclude_files or filename.split('.')[-1] not in include_extensions:
continue
ec_args = args_dict.copy()
input_path = os.path.join(args.input_dir, filename)
ec_args['input_file'] = input_path
ec = EncodingConverter(**ec_args)
if ec.input_encoding.lower() == 'guess':
guess_result = ec.guess()
print(f'Guess for {filename}:\n{guess_result}')
ec.input_encoding = guess_result['encoding']
if __name__ == '__main__':
main()
``` |
{
"source": "joy13975/Orchestock",
"score": 3
} |
#### File: Orchestock/Orchestonks/notes.py
```python
import re
A4 = 440
C0 = A4 * (2.0 ** -4.75)
notes = 'CDEFGAB'
scale = ['C', 'SC', 'D', 'SD', 'E', 'F', 'SF', 'G', 'SG', 'A', 'SA', 'B']
def note_to_freq(note):
note = note.upper()
match = re.match(r'^([SF])?([A-G])(\d+)$', note)
assert match is not None
sharpflat, note_name, octave = match.groups()
octave = int(octave)
# Regularize notes to sharp only
note_index = notes.index(note_name)
if sharpflat == 'F':
note_index -= 1
sharpflat = '' if note_name in 'CF' else 'S'
elif sharpflat is None:
sharpflat = ''
scale_index = scale.index(sharpflat + notes[note_index])
return C0 * (2.0 ** (octave + (scale_index / 12)))
```
#### File: Orchestock/Orchestonks/tone_player.py
```python
from time import time, sleep
import math
import sounddevice as sd
import numpy as np
from enum import IntEnum, auto
from notes import note_to_freq
class ToneStatus(IntEnum):
Inactive = 0
Active = 1
Dead = 2
class Tone:
def __init__(self, freq, volume, duration):
self.freq = freq
self.volume = volume # 0~1
self.duration = duration # ms
self.status = ToneStatus.Inactive
self._start_time = -1
def start(self):
if self.status >= ToneStatus.Active:
return
self.status = ToneStatus.Active
self._start_time = time()
def check_expired(self):
if self.duration == -1 or self._start_time == -1:
return
if (time() - self._start_time) * 1000 >= self.duration:
self.status = ToneStatus.Dead
def sample(self):
return self.freq, self.volume, int(self.status)
class TonePlayer:
def __init__(self):
self.chunk_size = 512
self.fs = 2048*10
sd.default.samplerate = self.fs
self.tones = dict()
self.sample_i = 0
def _sample_callback(self, out_data, frame_count, time_info, status):
end = self.sample_i + frame_count
out_data[:] = self._mix_samples(
self.sample_i, end).reshape(frame_count, 1)
self.sample_i = end
def __enter__(self):
self.stream = sd.OutputStream(channels=1,
blocksize=self.chunk_size,
callback=self._sample_callback)
self.stream.start()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.stream.close()
def _logistic(self, x, k=1, x0=1, L=1):
return L/(1+(np.exp(-k*(x - x0))))
def _mix_samples(self, i, j):
self.tones = {k: v for k, v in self.tones.items()
if not v.status == ToneStatus.Dead}
if self.tones:
for t in self.tones.values():
t.check_expired()
freq, volume, status = \
[np.asmatrix(a) for a in
zip(*(t.sample() for t in self.tones.values()))]
sines = np.asmatrix(
np.sin((freq / self.fs).T * 2.0 * np.pi * np.arange(i, j)))
# Multiply by envelop based on tone status:
# Inactive -> Fade In
# Active -> Identity
# Dead -> Fade Out
k = (6*np.pi)/self.chunk_size
x0 = self.chunk_size / 2
fade_in = self._logistic(np.arange(j-i), x0=x0, k=k)
identity = np.ones(j-i)
fade_out = self._logistic(np.arange(j-i), x0=x0, k=-k)
fade_mat = np.asmatrix([fade_in, identity, fade_out])
fade_mask = np.asmatrix(np.diag(np.ones(3)))[status.A1]
samples = (volume * (np.multiply(fade_mask*fade_mat, sines))).A1
for t in self.tones.values():
t.start()
else:
samples = np.array([0.0]*(j-i))
return samples
def play(self, freq, duration=-1, volume=0.5):
if isinstance(freq, str):
freq = note_to_freq(freq)
tone = Tone(freq, volume, duration)
tone_id = id(tone)
self.tones[tone_id] = tone
return tone_id
def stop(self, tone_id=-1):
if tone_id == -1:
self.tones.clear()
elif tone_id in self.tones:
del self.tones[tone_id]
def test(self):
octave = 4
notes = [f'c{octave}', f'e{octave}', f'g{octave}', f'c{octave+1}']
duration = 4000
n = 2 * len(notes)
interval = (duration / n) / 1000
for i, note in enumerate(notes + notes[::-1]):
d = duration * (n - i)/n
p.play(note_to_freq(note), d)
sleep(interval)
if __name__ == "__main__":
with TonePlayer() as p:
p.test()
import code
code.interact(local={**locals(), **globals()})
``` |
{
"source": "Joy917/fast-transfer",
"score": 2
} |
#### File: fast-transfer/src/_sever_qt4.py
```python
from PySide import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(798, 732)
self.gridLayout = QtGui.QGridLayout(Form)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.groupBox_2 = QtGui.QGroupBox(Form)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.groupBox_2)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
spacerItem = QtGui.QSpacerItem(20, 20, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.checkBox_time = QtGui.QCheckBox(self.groupBox_2)
self.checkBox_time.setObjectName(_fromUtf8("checkBox_time"))
self.horizontalLayout.addWidget(self.checkBox_time)
self.dateTimeEdit_start = QtGui.QDateTimeEdit(self.groupBox_2)
self.dateTimeEdit_start.setDateTime(QtCore.QDateTime(QtCore.QDate(2017, 1, 1), QtCore.QTime(0, 0, 0)))
self.dateTimeEdit_start.setCalendarPopup(True)
self.dateTimeEdit_start.setObjectName(_fromUtf8("dateTimeEdit_start"))
self.horizontalLayout.addWidget(self.dateTimeEdit_start)
self.label_2 = QtGui.QLabel(self.groupBox_2)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.horizontalLayout.addWidget(self.label_2)
self.dateTimeEdit_end = QtGui.QDateTimeEdit(self.groupBox_2)
self.dateTimeEdit_end.setDateTime(QtCore.QDateTime(QtCore.QDate(2018, 1, 1), QtCore.QTime(0, 0, 0)))
self.dateTimeEdit_end.setCalendarPopup(True)
self.dateTimeEdit_end.setObjectName(_fromUtf8("dateTimeEdit_end"))
self.horizontalLayout.addWidget(self.dateTimeEdit_end)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
spacerItem2 = QtGui.QSpacerItem(20, 20, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem2)
self.checkBox_ip = QtGui.QCheckBox(self.groupBox_2)
self.checkBox_ip.setObjectName(_fromUtf8("checkBox_ip"))
self.horizontalLayout_3.addWidget(self.checkBox_ip)
self.lineEdit_ip = QtGui.QLineEdit(self.groupBox_2)
self.lineEdit_ip.setObjectName(_fromUtf8("lineEdit_ip"))
self.horizontalLayout_3.addWidget(self.lineEdit_ip)
spacerItem3 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem3)
self.verticalLayout_2.addLayout(self.horizontalLayout_3)
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
spacerItem4 = QtGui.QSpacerItem(20, 20, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem4)
self.checkBox_fuzzy = QtGui.QCheckBox(self.groupBox_2)
self.checkBox_fuzzy.setObjectName(_fromUtf8("checkBox_fuzzy"))
self.horizontalLayout_4.addWidget(self.checkBox_fuzzy)
self.lineEdit_fuzzysearch = QtGui.QLineEdit(self.groupBox_2)
self.lineEdit_fuzzysearch.setObjectName(_fromUtf8("lineEdit_fuzzysearch"))
self.horizontalLayout_4.addWidget(self.lineEdit_fuzzysearch)
spacerItem5 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem5)
self.verticalLayout_2.addLayout(self.horizontalLayout_4)
self.gridLayout.addWidget(self.groupBox_2, 1, 0, 1, 2)
self.groupBox = QtGui.QGroupBox(Form)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.verticalLayout = QtGui.QVBoxLayout(self.groupBox)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.textBrowser_log = QtGui.QTextBrowser(self.groupBox)
self.textBrowser_log.viewport().setProperty("cursor", QtGui.QCursor(QtCore.Qt.IBeamCursor))
self.textBrowser_log.setMouseTracking(True)
self.textBrowser_log.setObjectName(_fromUtf8("textBrowser_log"))
self.verticalLayout.addWidget(self.textBrowser_log)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.lineEdit_pagenumStart = QtGui.QLineEdit(self.groupBox)
self.lineEdit_pagenumStart.setMaximumSize(QtCore.QSize(50, 16777215))
self.lineEdit_pagenumStart.setObjectName(_fromUtf8("lineEdit_pagenumStart"))
self.horizontalLayout_2.addWidget(self.lineEdit_pagenumStart)
self.label_3 = QtGui.QLabel(self.groupBox)
self.label_3.setMaximumSize(QtCore.QSize(20, 16777215))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.horizontalLayout_2.addWidget(self.label_3)
self.lineEdit_pagenumEnd = QtGui.QLineEdit(self.groupBox)
self.lineEdit_pagenumEnd.setMaximumSize(QtCore.QSize(50, 16777215))
self.lineEdit_pagenumEnd.setObjectName(_fromUtf8("lineEdit_pagenumEnd"))
self.horizontalLayout_2.addWidget(self.lineEdit_pagenumEnd)
spacerItem6 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem6)
self.pushButton_pageup = QtGui.QPushButton(self.groupBox)
self.pushButton_pageup.setObjectName(_fromUtf8("pushButton_pageup"))
self.horizontalLayout_2.addWidget(self.pushButton_pageup)
self.pushButton_pagedown = QtGui.QPushButton(self.groupBox)
self.pushButton_pagedown.setObjectName(_fromUtf8("pushButton_pagedown"))
self.horizontalLayout_2.addWidget(self.pushButton_pagedown)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.gridLayout.addWidget(self.groupBox, 0, 0, 1, 2)
self.horizontalLayout_5 = QtGui.QHBoxLayout()
self.horizontalLayout_5.setObjectName(_fromUtf8("horizontalLayout_5"))
self.label_notice = QtGui.QLabel(Form)
self.label_notice.setMinimumSize(QtCore.QSize(600, 0))
self.label_notice.setObjectName(_fromUtf8("label_notice"))
self.horizontalLayout_5.addWidget(self.label_notice)
spacerItem7 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem7)
self.pushButton_check = QtGui.QPushButton(Form)
self.pushButton_check.setObjectName(_fromUtf8("pushButton_check"))
self.horizontalLayout_5.addWidget(self.pushButton_check)
self.gridLayout.addLayout(self.horizontalLayout_5, 2, 0, 1, 2)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "LogManager", None))
self.groupBox_2.setTitle(_translate("Form", "Search Setting", None))
self.checkBox_time.setText(_translate("Form", "time๏ผ", None))
self.label_2.setText(_translate("Form", "-----", None))
self.checkBox_ip.setText(_translate("Form", "IP๏ผ ", None))
self.checkBox_fuzzy.setText(_translate("Form", "fuzzy๏ผ", None))
self.groupBox.setTitle(_translate("Form", "Log Display", None))
self.label_3.setText(_translate("Form", "---", None))
self.pushButton_pageup.setText(_translate("Form", "page up ", None))
self.pushButton_pagedown.setText(_translate("Form", "page down", None))
self.label_notice.setText(_translate("Form", "Notice:", None))
self.pushButton_check.setText(_translate("Form", "Check", None))
```
#### File: fast-transfer/test/testLogging.py
```python
import logging,time,os
# ่ทๅๆฅๅฟๅฎไพๆนๆณ
def getLogger(name):
# ๆไฝ้ข็นๅฏไปฅๆ นๆฎๆฅๆๅฝๅๆฅๅฟ๏ผ้ฟๅ
ๅไธชๆฅๅฟๆไปถ่ฟๅคง
date = time.strftime("%Y-%m-%d", time.localtime())
# ่ทๅๆฅๅฟๅฎไพ
logger = logging.getLogger(name)
# ๆฅๅฟๆๅฐๅ
ๅฎนๆ ผๅผ
fmt = logging.Formatter("%(asctime)s [%(levelname)s] [%(name)s]: %(message)s\n", datefmt="%Y-%m-%d %H:%M:%S")
# ่พๅบๆ ผๅผๅๆฅๅฟๆไปถไฝ็ฝฎ
path = os.getcwd()
file_handler = logging.FileHandler(r"{}/log/{}_{}.log".format(path,date,name))
file_handler.setFormatter(fmt)
# ้
็ฝฎๆฅๅฟๅฎไพ
logger.addHandler(file_handler)
logger.setLevel(logging.INFO)
return logger
logger = getLogger("Test")
logger.info("่ฟๆฏไธๆกๆต่ฏไฟกๆฏ")
``` |
{
"source": "Joy917/News-Spider",
"score": 2
} |
#### File: News-Spider/spiders/thehill_spider.py
```python
import threading
from bs4 import BeautifulSoup
import re
import time
import utils
import entity
TOTALS = 0
def get_header():
header = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36",
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9",
"cache-control": "max-age=0",
"upgrade-insecure-requests": "1",
"Connection": "close",
"cookie": 'kppid=1a4aac943d4; has_js=1; _ga=GA1.2.2070443439.1621870819; _cb_ls=1; _cb=m_5H0BiRoPOB2AREW; _ntv_uid=3fc25902-f7b9-43c1-946d-ae954e9fecb3; __gads=ID=612340c38c6544d5:T=1621870819:S=ALNI_Mb3_tCo_FRQqwxu6qrFGkd2s5TS0A; ccpaUUID=dd3c619b-abd7-4f46-9c47-de11bb7d07c3; dnsDisplayed=true; ccpaApplies=false; signedLspa=false; ntv_as_us_privacy=1---; cuid=8d4cc250f5752790244f1621870826449_1624462826449; OB-USER-TOKEN=<PASSWORD>; _pbjs_userid_consent_data=3524755945110770; _gid=GA1.2.168308596.1622039723; _cb_svref=null; FCCDCF=[["<KEY>],null,["[[],[],[],[],null,null,true]",1622039970296]]; _gat=1; _chartbeat2=.1621870822200.1622040648167.101.BPo3cED3o9ufDVEs-9B1aCjJBDC-pN.8'
}
return header
def driver_url(driver, url):
driver.get(url)
result = driver.find_element_by_xpath("//body/div[@id='page']/div[@id='main']/div[@id='content']/ol[1]")
soup = BeautifulSoup(result.get_attribute('innerHTML'), "html.parser")
driver.quit()
return soup
def start_crawl(file_path, keywords, start_time, end_time):
keywords_str = "%20".join(keywords)
driver = utils.get_webdriver()
item_set = set()
for page in range(0, 10):
url = f"https://thehill.com/search/query/{keywords_str}?page={page}"
try:
soup = driver_url(driver, url)
search_result = soup.find_all("li", class_=re.compile("search-result"))
if search_result and len(search_result) > 0:
# ็ญ้ๆทปๅ ๆ็ซ ๆถ้ดๅurl title
for li in search_result:
origin_date = li.find_next("p", class_="date").string.strip()
date_list = origin_date.split("/")
date = date_list[2] + date_list[0] + date_list[1]
if int(date) < int(start_time):
return item_set
if int(date) > int(end_time):
continue
article = entity.Article()
a = li.find_next("h3", class_="title").find_next("a")
href = a.get("href")
if not href.startswith("https://thehill.com/"):
href = "https://thehill.com" + href
article.url = href
article.title = a.string
article.title_cn = utils.translate(article.title)
article.date = date
# ่งฃๆๆญฃๆ
try:
title, publish_date, content = utils.get_title_time_content(href, header=get_header())
article.text = content
article.text_cn = utils.translate(article.text)
except Exception as exc:
pass
time.sleep(1)
item_set.add(article)
else:
return item_set
except Exception as exc:
return item_set
try:
global TOTALS
TOTALS += len(item_set)
utils.write_xlsx_apend(file_path, item_set)
item_set.clear()
except:
pass
driver.quit()
def save_to_excel(file_path, keywords, item_set):
# ๅๅ
ฅๆฐๆฎ
utils.write_xlsx_apend(file_path, item_set)
class Task(threading.Thread):
def __init__(self, thread_id, name, dir_name, keywords, start_time, end_time, signal):
super().__init__()
self.thread_id = thread_id
self.name = name
self.dir_name = dir_name
self.keywords = keywords
self.start_time = start_time
self.end_time = end_time
self._signal = signal
def run(self):
try:
self._signal.emit(f"{self.name} start...")
start = time.time()
file_path = f"{self.dir_name}\\{utils.now_timestamp()}-{self.name}.xlsx"
# ๅๅปบ็ฉบExcelๅนถๅๅ
ฅ่กจๅคด
utils.create_xlsx_with_head(file_path=file_path, sheet_name='+'.join(self.keywords))
start_crawl(file_path, self.keywords, self.start_time, self.end_time)
end = time.time()
used_time = round((end - start) / 60, 2)
msg = f"{self.name} end, totals:{TOTALS}, used:{used_time} min"
self._signal.emit(msg)
except:
self._signal.emit(f"{self.name} failed end")
if __name__ == '__main__':
keywords = ["Tokyo"]
start_time = "20210623"
end_time = "20210630"
# ๅๅปบ็ฉบExcelๅนถๅๅ
ฅ่กจๅคด
utils.create_xlsx_with_head("./TheHill.xlsx", sheet_name='+'.join(keywords))
item_set = start_crawl("./TheHill.xlsx", keywords=keywords, start_time=start_time, end_time=end_time)
``` |
{
"source": "Joy917/ToolScripts",
"score": 3
} |
#### File: ToolScripts/common/transfer.py
```python
import os
import re
def normal_path(*args):
return os.path.normpath(os.path.join(*args))
class Transfer:
def __init__(self):
self.DESKTOP_PATH = normal_path("C:/Users", os.getenv("USERNAME"), "Desktop")
self.DEFAULT_SIZE_PER = 1024 * 1024 * 10
def split_file(self, src_file: str, output_dir=""):
"""
ๆๅๆไปถ
:param src_file: ๆบๆไปถ่ทฏๅพ
:param output_dir: ๅๅๆไปถ่พๅบ็ฎๅฝ
:return:
"""
if not os.path.isfile(src_file):
raise Exception(f"The given path:{src_file} is not file!")
if not output_dir:
output_dir = self.DESKTOP_PATH
file_size = os.path.getsize(src_file)
print(f"src file size:{file_size // (1024 * 1024)} M")
count = file_size // self.DEFAULT_SIZE_PER
mod = file_size % self.DEFAULT_SIZE_PER
if mod > 0: count += 1
with open(file=src_file, mode="rb") as fr:
# ๅๅฒๆบๆไปถ่ณ็ฎๆ ๅคงๅฐ
for i in range(0, count):
target_file = normal_path(output_dir, f"{os.path.basename(src_file)}_{i}")
with open(file=target_file, mode="wb") as fw:
fw.write(fr.read(self.DEFAULT_SIZE_PER))
print("sub file name:", target_file)
print("split finished!")
def fit_file(self, files_dir: str, target_file_name: str, output_dir=""):
"""ๅๅนถๆไปถ
:param files_dir: ๅๅๆไปถๆๅจ็ฎๅฝ
:param target_file_name: ๅๅนถๆไปถๅ
:param output_dir: ๅๅนถๆไปถ่พๅบ็ฎๅฝ
:return:
"""
if not (os.path.exists(files_dir) and os.path.isdir(files_dir)):
raise Exception(f"The given path:{files_dir} does not exist or is not a folder!")
if not output_dir:
output_dir = self.DESKTOP_PATH
target_file_path = normal_path(output_dir, target_file_name)
if os.path.exists(target_file_path):
raise Exception("The output folder already exists target file!")
regex = re.compile(target_file_name + "_\d+")
split_files = []
# ่ทๅ็ฎๆ ๆไปถๅ่กจๅนถๆ นๆฎ็ผๅทๆๅบ
for file in os.listdir(files_dir):
if not re.match(regex, file):
continue
index = file.rsplit("_", 1)[1]
split_files.insert(int(index), normal_path(files_dir, file))
# ็ป่ฃ
ๆไปถ
with open(file=target_file_path, mode="ab") as fw:
for sub_file in split_files:
with open(file=sub_file, mode="rb") as fr:
fw.write(fr.read())
print(f"{target_file_path} fit finished!")
if __name__ == '__main__':
file = "Hadoopๆๅจๆๅ_็ฌฌๅ็_ไธญๆ็.pdf"
transfer = Transfer()
# transfer.split_file("E:/ๅคฉๅคฉๅไธ/Hadoopๆๅจๆๅ_็ฌฌๅ็_ไธญๆ็.pdf")
transfer.fit_file("C:/Users/Joy/Desktop/", file)
``` |
{
"source": "joyabhatt/jb-inhibition",
"score": 2
} |
#### File: joyabhatt/jb-inhibition/aigame.py
```python
from neuron import h
from pylab import concatenate, figure, show, ion, ioff, pause,xlabel, ylabel, plot, Circle, sqrt, arctan, arctan2, close
from copy import copy, deepcopy
from random import uniform, seed, sample, randint
from matplotlib import pyplot as plt
import random
import numpy as np
from skimage.transform import downscale_local_mean, resize
from skimage.color import rgb2gray
import json
import gym
import sys
from gym import wrappers
from time import time
from collections import OrderedDict
from imgutils import getoptflow
from imgutils import getObjectsBoundingBoxes, getObjectMotionDirection
import cv2
from centroidtracker import CentroidTracker
# make the environment - env is global so that it only gets created on a single node (important when using MPI with > 1 node)
try:
from conf import dconf
env = gym.make(dconf['env']['name'],frameskip=dconf['env']['frameskip'])
if dconf['env']['savemp4']: env = wrappers.Monitor(env, './videos/' + dconf['sim']['name'] + '/',force=True)
env.reset()
except:
print('Exception in makeENV')
env = gym.make('Pong-v0',frameskip=3)
env = wrappers.Monitor(env, './videos/' + str(time()) + '/',force=True)
env.reset()
# get smallest angle difference
def getangdiff (ang1, ang2):
if ang1 > 180.0:
ang1 -= 360.0
if ang2 > 180.0:
ang2 -= 360.0
angdiff = ang1 - ang2
if angdiff > 180.0:
angdiff-=360.0
elif angdiff < -180.0:
angdiff+=360.0
return angdiff
class AIGame:
""" Interface to OpenAI gym game
"""
def __init__ (self,fcfg='sim.json'): # initialize variables
self.env = env
self.countAll = 0
self.ldir = ['E','NE','N', 'NW','W','SW','S','SE']
self.ldirpop = ['EV1D'+Dir for Dir in self.ldir]
self.lratepop = ['ER'] # populations that we calculate rates for
for d in self.ldir: self.lratepop.append('EV1D'+d)
self.dFVec = OrderedDict({pop:h.Vector() for pop in self.lratepop}) # NEURON Vectors for firing rate calculations
self.dFiringRates = OrderedDict({pop:np.zeros(dconf['net'][pop]) for pop in self.lratepop}) # python objects for firing rate calculations
self.dAngPeak = OrderedDict({'EV1DE': 0.0,'EV1DNE': 45.0, # receptive field peak angles for the direction selective populations
'EV1DN': 90.0,'EV1DNW': 135.0,
'EV1DW': 180.0,'EV1DSW': 235.0,
'EV1DS': 270.0,'EV1DSE': 315.0})
self.AngRFSigma2 = dconf['net']['AngRFSigma']**2 # angular receptive field (RF) sigma squared used for dir selective neuron RFs
if self.AngRFSigma2 <= 0.0: self.AngRFSigma2=1.0
self.input_dim = int(np.sqrt(dconf['net']['ER'])) # input image XY plane width,height
self.dirSensitiveNeuronDim = int(np.sqrt(dconf['net']['EV1DE'])) # direction sensitive neuron XY plane width,height
self.dirSensitiveNeuronRate = (dconf['net']['DirMinRate'], dconf['net']['DirMaxRate']) # min, max firing rate (Hz) for dir sensitive neurons
self.intaction = int(dconf['actionsPerPlay']) # integrate this many actions together before returning reward information to model
# these are Pong-specific coordinate ranges; should later move out of this function into Pong-specific functions
self.courtYRng = (34, 194) # court y range
self.courtXRng = (20, 140) # court x range
self.racketXRng = (141, 144) # racket x range
self.dObjPos = {'racket':[], 'ball':[]}
self.last_obs = [] # previous observation
self.last_ball_dir = 0 # last ball direction
self.FullImages = [] # full resolution images from game environment
self.ReducedImages = [] # low resolution images from game environment used as input to neuronal network model
self.ldflow = [] # list of dictionary of optical flow (motion) fields
if dconf['DirectionDetectionAlgo']['CentroidTracker']:
self.ct = CentroidTracker()
self.objects = OrderedDict() # objects detected in current frame
self.last_objects = OrderedDict() # objects detected in previous frame
def updateInputRates (self, dsum_Images):
# update input rates to retinal neurons
#fr_Images = np.where(dsum_Images>1.0,100,dsum_Images) #Using this to check what number would work for firing rate
#fr_Images = np.where(dsum_Images<10.0,0,dsum_Images)
fr_Images = 40/(1+np.exp((np.multiply(-1,dsum_Images)+123)/25))
fr_Images = np.subtract(fr_Images,np.min(fr_Images)) #baseline firing rate subtraction. Instead all excitatory neurons are firing at 5Hz.
#print(np.amax(fr_Images))
self.dFiringRates['ER'] = np.reshape(fr_Images,400) #400 for 20*20
def computeMotionFields (self, UseFull=False):
# compute and store the motion fields and associated data
if UseFull:
limage = self.FullImages
else:
limage = self.ReducedImages
if len(limage) < 2:
flow = np.zeros(shape=(limage[-1].shape[0],limage[-1].shape[1],2))
mag = np.zeros(shape=(limage[-1].shape[0],limage[-1].shape[1]))
ang = np.zeros(shape=(limage[-1].shape[0],limage[-1].shape[1]))
ang[mag == 0] = -100
goodInds = np.zeros(shape=(limage[-1].shape[0],limage[-1].shape[1]))
self.ldflow.append({'flow':flow,'mag':mag,'ang':ang,'goodInds':goodInds,'thang':ang,'thflow':flow})
#return
else:
self.ldflow.append(getoptflow(limage[-2],limage[-1]))
def computeAllObjectsMotionDirections(self, UseFull=False):
#Detect the objects, and initialize the list of bounding box rectangles
if len(self.FullImages)==0: return
if UseFull:
cimage = self.FullImages[-1]
else:
cimage = self.ReducedImages[-1]
rects = getObjectsBoundingBoxes(cimage)
cimage = np.ascontiguousarray(cimage, dtype=np.uint8)
# update our centroid tracker using the computed set of bounding box rectangles
self.objects = self.ct.update(rects)
if len(self.last_objects)==0:
self.last_objects = deepcopy(self.objects)
flow = np.zeros(shape=(self.dirSensitiveNeuronDim,self.dirSensitiveNeuronDim,2))
mag = np.zeros(shape=(self.dirSensitiveNeuronDim,self.dirSensitiveNeuronDim))
ang = np.zeros(shape=(self.dirSensitiveNeuronDim,self.dirSensitiveNeuronDim))
ang[mag == 0] = -100
goodInds = np.zeros(shape=(self.dirSensitiveNeuronDim,self.dirSensitiveNeuronDim))
else:
dirX, dirY = getObjectMotionDirection(self.objects, self.last_objects, rects, dims=np.shape(cimage)[0],\
FlowWidth=dconf['DirectionDetectionAlgo']['FlowWidth'])
if np.shape(cimage)[0] != self.dirSensitiveNeuronDim or np.shape(cimage)[1] != self.dirSensitiveNeuronDim:
dirX = resize(dirX, (self.dirSensitiveNeuronDim, self.dirSensitiveNeuronDim), anti_aliasing=True)
dirY = resize(dirY, (self.dirSensitiveNeuronDim, self.dirSensitiveNeuronDim), anti_aliasing=True)
mag, ang = cv2.cartToPolar(dirX, -1*dirY)
ang = np.rad2deg(ang)
ang[mag == 0] = -100
self.last_objects = deepcopy(self.objects)
flow = np.zeros(shape=(self.dirSensitiveNeuronDim,self.dirSensitiveNeuronDim,2))
flow[:,:,0] = dirX
flow[:,:,1] = dirY
goodInds = np.zeros(shape=(self.dirSensitiveNeuronDim,self.dirSensitiveNeuronDim))
self.ldflow.append({'flow':flow,'mag':mag,'ang':ang,'goodInds':goodInds,'thang':ang,'thflow':flow})
def updateDirSensitiveRates (self):
# update firing rate of dir sensitive neurons using dirs (2D array with motion direction at each coordinate)
if len(self.ldflow) < 1: return
dflow = self.ldflow[-1]
motiondir = dflow['thang'] # angles in degrees, but thresholded for significant motion; negative value means not used
dAngPeak = self.dAngPeak
dirSensitiveNeuronDim = self.dirSensitiveNeuronDim
if motiondir.shape[0] != dirSensitiveNeuronDim or motiondir.shape[1] != dirSensitiveNeuronDim:
motiondir = resize(motiondir, (dirSensitiveNeuronDim, dirSensitiveNeuronDim), anti_aliasing=True)
AngRFSigma2 = self.AngRFSigma2
MaxRate = self.dirSensitiveNeuronRate[1]
for pop in self.ldirpop: self.dFiringRates[pop] = self.dirSensitiveNeuronRate[0] * np.ones(shape=(dirSensitiveNeuronDim,dirSensitiveNeuronDim))
for y in range(motiondir.shape[0]):
for x in range(motiondir.shape[1]):
if motiondir[y,x] >= 0.0: # make sure it's a valid angle
for pop in self.ldirpop:
fctr = np.exp(-1.0*(getangdiff(motiondir[y][x],dAngPeak[pop])**2)/AngRFSigma2)
#print('updateDirRates',pop,x,y,fctr,dAngPeak[pop],motiondir[y][x])
self.dFiringRates[pop][y,x] += MaxRate * fctr
#print('motiondir',motiondir)
for pop in self.ldirpop:
self.dFiringRates[pop]=np.reshape(self.dFiringRates[pop],dirSensitiveNeuronDim**2)
#print(pop,np.amin(self.dFiringRates[pop]),np.amax(self.dFiringRates[pop]),np.mean(self.dFiringRates[pop]))
#print(pop,self.dFiringRates[pop])
def findobj (self, img, xrng, yrng):
# find an object's x, y position in the image (assumes bright object on dark background)
subimg = img[yrng[0]:yrng[1],xrng[0]:xrng[1],:]
sIC = np.sum(subimg,2) #assuming the color of object is uniform, add values or r,g,b to get a single value
pixelVal = np.amax(sIC) #find the pixel value representing object assuming a black background
sIC[sIC<pixelVal]=0 #make binary image
Obj_inds = []
for i in range(sIC.shape[0]):
for j in range(sIC.shape[1]):
if sIC[i,j]>0:
Obj_inds.append([i,j])
if sIC.shape[0]*sIC.shape[1]==np.shape(Obj_inds)[0]: #if there is no object in the subimage
ypos = -1
xpos = -1
else:
ypos = np.median(Obj_inds,0)[0] #y position of the center of mass of the object
xpos = np.median(Obj_inds,0)[1] #x position of the center of mass of the object
return xpos, ypos
def playGame (self, actions, epCount): #actions need to be generated from motor cortex
# PLAY GAME
rewards = []; proposed_actions =[]; total_hits = []; Images = []
input_dim = self.input_dim
done = False
courtYRng, courtXRng, racketXRng = self.courtYRng, self.courtXRng, self.racketXRng # coordinate ranges for different objects (PONG-specific)
if self.intaction==1:
lgwght = [1.0]
else:
lgwght = np.linspace(0.6, 1, self.intaction) # time-decay grayscale image weights (earlier indices with lower weights are from older frames)
lgimage = [] # grayscale down-sampled images with decaying time-lagged input
lgimage_ns = [] #grayscale full images with decaying time-lagged input
if len(self.last_obs)==0: #if its the first action of the episode, there won't be any last_obs, therefore no last image
lobs_gimage_ds = []
else:
lobs_gimage = 255.0*rgb2gray(self.last_obs[courtYRng[0]:courtYRng[1],:,:])
lobs_gimage_ds = downscale_local_mean(lobs_gimage,(8,8))
lobs_gimage_ds = np.where(lobs_gimage_ds>np.min(lobs_gimage_ds)+1,255,lobs_gimage_ds)
lobs_gimage_ds = 0.5*lobs_gimage_ds #use this image for motion computation only
for adx in range(self.intaction):
#for each action generated by the firing rate of the motor cortex, find the suggested-action by comparing the position of the ball and racket
caction = actions[adx] #action generated by the firing rate of the motor cortex
if np.shape(self.last_obs)[0]>0: #if last_obs is not empty
xpos_Ball, ypos_Ball = self.findobj(self.last_obs, courtXRng, courtYRng) # get x,y positions of ball
xpos_Racket, ypos_Racket = self.findobj(self.last_obs, racketXRng, courtYRng) # get x,y positions of racket
#Now we know the position of racket relative to the ball. We can suggest the action for the racket so that it doesn't miss the ball.
#For the time being, I am implementing a simple rule i.e. based on only the ypos of racket relative to the ball
if ypos_Ball==-1: #guess about proposed move can't be made because ball was not visible in the court
proposed_action = -1 #no valid action guessed
elif ypos_Racket>ypos_Ball: #if the racket is lower than the ball the suggestion is to move up
proposed_action = dconf['moves']['UP'] #move up
elif ypos_Racket<ypos_Ball: #if the racket is higher than the ball the suggestion is to move down
proposed_action = dconf['moves']['DOWN'] #move down
elif ypos_Racket==ypos_Ball:
proposed_action = dconf['moves']['NOMOVE'] #no move
#self.FullImages.append(np.sum(self.last_obs[courtYRng[0]:courtYRng[1],:,:],2))
self.dObjPos['ball'].append([courtXRng[0]-1+xpos_Ball,ypos_Ball])
self.dObjPos['racket'].append([racketXRng[0]-1+xpos_Racket,ypos_Racket])
else:
proposed_action = -1 #if there is no last_obs
ypos_Ball = -1 #if there is no last_obs, no position of ball
xpos_Ball = -1 #if there is no last_obs, no position of ball
observation, reward, done, info = self.env.step(caction)
#find position of ball after action
xpos_Ball2, ypos_Ball2 = self.findobj(observation, courtXRng, courtYRng)
ball_moves_towards_racket = False
if xpos_Ball>0 and xpos_Ball2>0:
if xpos_Ball2-xpos_Ball>0:
ball_moves_towards_racket = True # use proposed action for reward only when the ball moves towards the racket
current_ball_dir = 1
elif xpos_Ball2-xpos_Ball<0:
ball_moves_towards_racket = False
current_ball_dir = -1
else:
ball_moves_towards_racket = False
current_ball_dir = 0 #direction can't be determinted prob. because the ball didn't move in x dir.
else:
ball_moves_towards_racket = False
current_ball_dir = 0 #direction can't be determined because either current or last position of the ball is outside the court
if "followOnlyTowards" in dconf:
if dconf["followOnlyTowards"] and not ball_moves_towards_racket:
proposed_action = -1 # no proposed action if ball moving away from racket
ball_hits_racket = 0
# previously I assumed when current_ball_dir is 0 there is no way to find out if the ball hit the racket
if current_ball_dir-self.last_ball_dir<0 and reward==0 and xpos_Ball2>courtXRng[1]-courtXRng[0]-40:
ball_hits_racket = 1
#print('Current_ball_dir', current_ball_dir)
#print('Last ball dir', self.last_ball_dir)
#print('current X pos Ball', xpos_Ball2)
#print('last X pos Ball', xpos_Ball)
#print('Court Range',courtXRng)
print(ball_hits_racket)
self.last_ball_dir = current_ball_dir
total_hits.append(ball_hits_racket) # i dont think this can be more than a single hit in 5 moves. so check if sum is greater than 1, print error
self.env.render()
self.last_obs = observation # current observation will be used as last_obs for the next action
if done:
self.env.reset()
self.last_obs = [] # when the game ends, and new game starts, there is no last observation
self.last_ball_dir=0
done = False
rewards.append(reward)
proposed_actions.append(proposed_action)
gray_Image = 255.0*rgb2gray(observation[courtYRng[0]:courtYRng[1],:,:]) # convert to grayscale; rgb2gray has 0-1 range so mul by 255
gray_ds = downscale_local_mean(gray_Image,(8,8)) # then downsample
gray_ds = np.where(gray_ds>np.min(gray_ds)+1,255,gray_ds) # Different thresholding
gray_ns = np.where(gray_Image>np.min(gray_Image)+1,255,gray_Image)
lgimage_ns.append(lgwght[adx]*gray_ns)
lgimage.append(lgwght[adx]*gray_ds) # save weighted grayscale image from current frame
self.countAll += 1
# NB: previously we merged 2x2 pixels into 1 value. Now we merge 8x8 pixels into 1 value.
# so the original 160x160 pixels will result into 20x20 values instead of previously used 80x80.
if len(lgimage)>1:
dsum_Images = np.maximum(lgimage[0],lgimage[1])
nsum_Images = np.maximum(lgimage_ns[0],lgimage_ns[1])
for gimage in lgimage[2:]: dsum_Images = np.maximum(dsum_Images,gimage)
for gimage in lgimage_ns[2:]: nsum_Images = np.maximum(nsum_Images,gimage)
else:
dsum_Images = lgimage[0]
nsum_Images = lgimage_ns[0]
self.FullImages.append(nsum_Images) # save full images ----> THIS IS JUST USED FOR DIRECTIONS (for accuracy)
self.ReducedImages.append(dsum_Images) # save the input image
self.updateInputRates(dsum_Images) # update input rates to retinal neurons
if self.intaction==1: #if only one frame used per play, then add the downsampled and scaled image from last_obs for direction computation
if len(lobs_gimage_ds)>0:
dsum_Images = np.maximum(dsum_Images,lobs_gimage_ds)
if dconf['DirectionDetectionAlgo']['OpticFlow']:
self.computeMotionFields(UseFull=dconf['DirectionDetectionAlgo']['UseFull']) # compute the motion fields
elif dconf['DirectionDetectionAlgo']['CentroidTracker']:
self.computeAllObjectsMotionDirections(UseFull=dconf['DirectionDetectionAlgo']['UseFull']) # compute the motion field using CetroidTracking
self.updateDirSensitiveRates() # update motion sensitive neuron input rates
if done: # done means that 1 episode of the game finished, so the environment needs to be reset.
epCount.append(self.countAll)
self.env.reset()
self.env.frameskip = 3
self.countAll = 0
if np.sum(total_hits)>1:
print('ERROR COMPUTING NUMBER OF HITS')
for r in range(len(rewards)):
if rewards[r]==-1: total_hits[r]=-1 #when the ball misses the racket, the reward is -1
return rewards, epCount, proposed_actions, total_hits
```
#### File: joyabhatt/jb-inhibition/imgutils.py
```python
import numpy as np
import cv2 # opencv
# from skimage.registration import optical_flow_tvl1
from scipy import ndimage
from scipy.ndimage.filters import maximum_filter
from scipy.ndimage.morphology import generate_binary_structure, binary_erosion
from collections import OrderedDict
import copy
from skimage.transform import downscale_local_mean, rescale
def getoptflow (gimg0, gimg1, winsz=3, pyrscale=0.5, nlayer=3, niter=3, polyn=5, polysigma=1.1):
# gets dense optical flow between two grayscale images (gimg0, gimg1)
# using openCV's implementation the <NAME>'s algorithm.
"""
. @param winsz averaging window size; larger values increase the algorithm robustness to image
. noise and give more chances for fast motion detection, but yield more blurred motion field.
. @param pyr_scale parameter, specifying the image scale (\<1) to build pyramids for each image;
. pyrscale=0.5 means a classical pyramid, where each next layer is twice smaller than the previous
. one.
. @param nlayer number of pyramid layers including the initial image; levels=1 means that no extra
. layers are created and only the original images are used.
. noise and give more chances for fast motion detection, but yield more blurred motion field.
. @param niter number of iterations the algorithm does at each pyramid level.
. @param polyn size of the pixel neighborhood used to find polynomial expansion in each pixel;
. larger values mean that the image will be approximated with smoother surfaces, yielding more
. robust algorithm and more blurred motion field, typically poly_n =5 or 7.
. @param polysigma standard deviation of the Gaussian that is used to smooth derivatives used as a
. basis for the polynomial expansion; for polyn=5, you can set polysigma=1.1, for polyn=7, a
. good value would be polysigma=1.5.
"""
# see help(cv2.calcOpticalFlowFarneback) for param choices
flow = cv2.calcOpticalFlowFarneback(gimg0,gimg1, None, pyrscale, nlayer, winsz, niter, polyn, polysigma, 0)
mag, ang = cv2.cartToPolar(flow[...,0], -flow[...,1])
ang = np.rad2deg(ang)
thang = np.copy(ang) # now perform thresholding
th = np.mean(mag) + np.std(mag)
goodInds = np.where(mag<th,0,1)
thflow = np.copy(flow)
for y in range(thang.shape[0]):
for x in range(thang.shape[1]):
if mag[y,x] < th:
thang[y,x] = -100 # invalid angle; angles should all be non-negative
thflow[y,x,0] = thflow[y,x,1] = 0 # 0 flow
return {'flow':flow,'mag':mag,'ang':ang,'goodInds':goodInds,'thang':thang,'thflow':thflow}
def getoptflowframes (Images,winsz=3, pyrscale=0.5, nlayer=3, niter=3, polyn=5, polysigma=1.1):
# get optical flow between all frames in 3D array of frames; index 0 is frame; next indices are y,x
return [getoptflow(Images[i,:,:],Images[i+1,:,:],winsz=winsz,pyrscale=pyrscale,nlayer=nlayer,niter=niter,polyn=polyn,polysigma=polysigma) for i in range(Images.shape[0]-1)]
# from https://stackoverflow.com/questions/3684484/peak-detection-in-a-2d-array
def detectpeaks (image):
"""
Takes an image and detect the peaks usingthe local maximum filter.
Returns a boolean mask of the peaks (i.e. 1 when
the pixel's value is the neighborhood maximum, 0 otherwise)
"""
# define an 8-connected neighborhood
neighborhood = generate_binary_structure(2,2)
#apply the local maximum filter; all pixel of maximal value
#in their neighborhood are set to 1
local_max = maximum_filter(image, footprint=neighborhood)==image
#local_max is a mask that contains the peaks we are
#looking for, but also the background.
#In order to isolate the peaks we must remove the background from the mask.
#we create the mask of the background
background = (image==0)
#a little technicality: we must erode the background in order to
#successfully subtract it form local_max, otherwise a line will
#appear along the background border (artifact of the local maximum filter)
eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)
#we obtain the final mask, containing only peaks,
#by removing the background from the local_max mask (xor operation)
detected_peaks = local_max ^ eroded_background
return detected_peaks
def getObjectsBoundingBoxes(frame):
mask = frame > np.min(frame)
labelim, nlabels = ndimage.label(mask)
# each pixel in labelim contains labels of the object it belongs to.
rects = []
for labels in range(nlabels):
clabel = labels+1
o = ndimage.find_objects(labelim==clabel)
# to get a bounding box
# compute the (x, y)-coordinates of the bounding box for the object
startX = o[0][0].start
startY = o[0][1].start
endX = o[0][0].stop
endY = o[0][1].stop
box = np.array([startX, startY, endX, endY])
#print('box centroid is:',[int((startX + endX) / 2.0),int((startY + endY) / 2.0)])
rects.append(box.astype("int"))
return rects
def getObjectMotionDirection(objects, last_objects, rects, dims, FlowWidth):
dirX = np.zeros(shape=(dims,dims))
dirY = np.zeros(shape=(dims,dims))
MotionAngles = np.zeros(shape=(dims,dims))
objectIDs = list(objects.keys())
objectCentroids = list(objects.values())
last_objectIDs = list(last_objects.keys())
last_objectCentroids = list(last_objects.values())
directions = []
locations = []
for cvalue in objectIDs:
cid = objectIDs.index(cvalue)
cobj_centroid = objectCentroids[cid]
if cvalue in last_objectIDs:
lid = last_objectIDs.index(cvalue)
lobj_centroid = last_objectCentroids[lid]
for i in range(np.shape(rects)[0]):
startX = rects[i][0]
if startX<(FlowWidth/2):
startX = 0
else:
startX = startX-(FlowWidth/2)
startY = rects[i][1]
if startY<(FlowWidth/2):
startY = 0
else:
startY = startY-(FlowWidth/2)
endX = rects[i][2]
if endX>dims-(FlowWidth/2):
endX = dims
else:
endX = endX+(FlowWidth/2)
endY = rects[i][3]
if endY>dims-(FlowWidth/2):
endY = dims
else:
endY = endY+(FlowWidth/2)
if cobj_centroid[1]>=startY and cobj_centroid[1]<=endY and cobj_centroid[0]>=startX and cobj_centroid[0]<=endX:
targetX = range(int(startX),int(endX),1)
targetY = range(int(startY),int(endY),1)
for ix in targetX:
for iy in targetY:
dirX[ix][iy]= cobj_centroid[1]-lobj_centroid[1] #x direction
dirY[ix][iy]= cobj_centroid[0]-lobj_centroid[0] #y direction
cdir = [cobj_centroid[1]-lobj_centroid[1],cobj_centroid[0]-lobj_centroid[0]]
directions.append(cdir)
locations.append([cobj_centroid[1],cobj_centroid[0]])
else:
lobj_centroid = []
return dirX, dirY
``` |
{
"source": "joyalbin/tvm",
"score": 2
} |
#### File: tvm/autotvm/tophub.py
```python
import logging
import os
import json
from .task import ApplyHistoryBest
from .. import target as _target
from ..contrib.util import tempdir
from ..contrib.download import download
AUTOTVM_TOPHUB_ROOT_PATH = os.path.join(os.path.expanduser('~'), ".tvm", "tophub")
logger = logging.getLogger('autotvm')
def _alias(name):
"""convert alias for some packages"""
table = {
'vtacpu': 'vta',
}
return table.get(name, name)
def context(target, extra_files=None):
"""Return the dispatch context with pre-tuned parameters.
The corresponding downloaded *.log files under tophub root path will be loaded.
Users can also add their own files in argument `extra_files`.
Parameters
----------
target: Target
The compilation target
extra_files: list of str, optional
Extra log files to load
"""
rootpath = AUTOTVM_TOPHUB_ROOT_PATH
best_context = ApplyHistoryBest([])
if isinstance(target, str):
target = _target.create(target)
big_target = str(target).split()[0]
if os.path.isfile(os.path.join(rootpath, big_target + ".log")):
best_context.load(os.path.join(rootpath, big_target + ".log"))
for opt in target.options:
if opt.startswith("-device"):
model = _alias(opt[8:])
if os.path.isfile(os.path.join(rootpath, model) + ".log"):
best_context.load(os.path.join(rootpath, model) + ".log")
if extra_files:
for filename in extra_files:
best_context.load(filename)
return best_context
def download_package(backend):
"""Download pre-tuned parameters of operators for a backend
Parameters
----------
backend: str
The name of package
"""
rootpath = AUTOTVM_TOPHUB_ROOT_PATH
if not os.path.isdir(rootpath):
# make directory
splits = os.path.split(rootpath)
for j in range(1, len(splits)+1):
path = os.path.join(*splits[:j])
if not os.path.isdir(path):
os.mkdir(path)
backend = _alias(backend)
logger.info("Download pre-tuned parameters for %s", backend)
download("https://raw.githubusercontent.com/uwsaml/tvm-distro/master/tophub/%s.log" % backend,
os.path.join(rootpath, backend + ".log"), True, verbose=0)
def check_package(backend):
"""Check whether have pre-tuned parameters of the certain target.
If not, will download it.
Parameters
----------
backend: str
The name of package
"""
backend = _alias(backend)
if os.path.isfile(os.path.join(AUTOTVM_TOPHUB_ROOT_PATH, backend + ".log")):
return
download_package(backend)
def list_packages():
"""List all available pre-tuned op parameters for targets
Returns
-------
ret: List
All available packets
"""
path = tempdir()
filename = path.relpath("info.json")
logger.info("Download meta info for pre-tuned parameters")
download("https://raw.githubusercontent.com/uwsaml/tvm-distro/master/tophub/info.json",
filename, True, verbose=0)
with open(filename, "r") as fin:
text = "".join(fin.readlines())
info = json.loads(text)
keys = list(info.keys())
keys.sort()
return [(k, info[k]) for k in keys]
``` |
{
"source": "joyalicegu/botany",
"score": 3
} |
#### File: joyalicegu/botany/menu_screen.py
```python
import curses
import math
import os
import traceback
import threading
import time
import random
import getpass
import json
import sqlite3
import string
import re
import completer
import datetime
class CursedMenu(object):
#TODO: name your plant
'''A class which abstracts the horrors of building a curses-based menu system'''
def __init__(self, this_plant, this_data):
'''Initialization'''
self.initialized = False
self.screen = curses.initscr()
curses.noecho()
curses.raw()
if curses.has_colors():
curses.start_color()
try:
curses.curs_set(0)
except curses.error:
# Not all terminals support this functionality.
# When the error is ignored the screen will look a little uglier, but that's not terrible
# So in order to keep botany as accesible as possible to everyone, it should be safe to ignore the error.
pass
self.screen.keypad(1)
self.plant = this_plant
self.visited_plant = None
self.user_data = this_data
self.plant_string = self.plant.parse_plant()
self.plant_ticks = str(int(self.plant.ticks))
self.exit = False
self.infotoggle = 0
self.maxy, self.maxx = self.screen.getmaxyx()
# Highlighted and Normal line definitions
if curses.has_colors():
self.define_colors()
self.highlighted = curses.color_pair(1)
else:
self.highlighted = curses.A_REVERSE
self.normal = curses.A_NORMAL
# Threaded screen update for live changes
screen_thread = threading.Thread(target=self.update_plant_live, args=())
screen_thread.daemon = True
screen_thread.start()
# Recusive lock to prevent both threads from drawing at the same time
self.screen_lock = threading.RLock()
self.screen.clear()
self.show(["water","look","garden","visit", "instructions"], title=' botany ', subtitle='options')
def define_colors(self):
# set curses color pairs manually
curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_WHITE)
curses.init_pair(2, curses.COLOR_WHITE, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_GREEN, curses.COLOR_BLACK)
curses.init_pair(4, curses.COLOR_BLUE, curses.COLOR_BLACK)
curses.init_pair(5, curses.COLOR_MAGENTA, curses.COLOR_BLACK)
curses.init_pair(6, curses.COLOR_YELLOW, curses.COLOR_BLACK)
curses.init_pair(7, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(8, curses.COLOR_CYAN, curses.COLOR_BLACK)
ESC_CODE_TO_PAIR = {
'' : 0, # normal
'0' : 0, # normal
'30' : 1, # black
'31' : 7, # red
'32' : 3, # green
'33' : 6, # yellow
'34' : 4, # blue
'35' : 5, # magenta
'36' : 8, # cyan
'37' : 2 # white
}
def esc_code_to_color(self, esc_code):
return curses.color_pair(self.ESC_CODE_TO_PAIR.get(esc_code, 0))
def show(self, options, title, subtitle):
# Draws a menu with parameters
self.set_options(options)
self.update_options()
self.title = title
self.subtitle = subtitle
self.selected = 0
self.initialized = True
self.draw_menu()
def update_options(self):
# Makes sure you can get a new plant if it dies
if self.plant.dead or self.plant.stage == 5:
if "harvest" not in self.options:
self.options.insert(-1,"harvest")
else:
if "harvest" in self.options:
self.options.remove("harvest")
def set_options(self, options):
# Validates that the last option is "exit"
if options[-1] != 'exit':
options.append('exit')
self.options = options
def draw(self):
# Draw the menu and lines
self.maxy, self.maxx = self.screen.getmaxyx()
self.screen_lock.acquire()
self.screen.refresh()
try:
self.draw_default()
self.screen.refresh()
except Exception as exception:
# Makes sure data is saved in event of a crash due to window resizing
self.screen.clear()
self.screen.addstr(0, 0, "Enlarge terminal!", curses.A_NORMAL)
self.screen.refresh()
self.__exit__()
traceback.print_exc()
self.screen_lock.release()
def draw_menu(self):
# Actually draws the menu and handles branching
request = ""
try:
while request != "exit":
self.draw()
request = self.get_user_input()
self.handle_request(request)
self.__exit__()
# Also calls __exit__, but adds traceback after
except Exception as exception:
self.screen.clear()
self.screen.addstr(0, 0, "Enlarge terminal!", curses.A_NORMAL)
self.screen.refresh()
self.__exit__()
#traceback.print_exc()
except IOError as exception:
self.screen.clear()
self.screen.refresh()
self.__exit__()
def ascii_render(self, filename, ypos, xpos):
# Prints ASCII art from file at given coordinates
this_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),"art")
this_filename = os.path.join(this_dir,filename + '.txt')
this_file = open(this_filename,"r")
this_string = this_file.readlines()
this_file.close()
self.screen_lock.acquire()
for y, line in enumerate(this_string, 2):
self.screen.addstr(ypos+y, xpos, line, curses.A_NORMAL)
# self.screen.refresh()
self.screen_lock.release()
def ansi_render(self, filename, ypos, xpos):
# Prints ANSI art from file at given coordinates
# Falls back on ASCII if no ANSI version exists
# Assumes curses.has_colors()
this_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),"art")
this_filename = os.path.join(this_dir,filename + '.ansi')
if not os.path.exists(this_filename):
self.ascii_render(filename, ypos, xpos)
return
this_file = open(this_filename,"r")
this_string = this_file.readlines()
this_file.close()
self.screen_lock.acquire()
color = curses.A_NORMAL
for y, line in enumerate(this_string, 2):
code_text_pairs = [tuple(token.split('m', 1)) if 'm' in token else (None, token)
for token in line.rstrip('\r\n').split('\x1b[') ]
color_text_pairs = [(color, text) if code == None else (self.esc_code_to_color(code), text)
for (code, text) in code_text_pairs]
x = 0
for (color, text) in color_text_pairs:
# Handle overflowing art gracefully
text = text[:max(0, self.maxx-(xpos+x))]
if not text:
continue
self.screen.addstr(ypos+y, xpos+x, text, color)
x += len(text)
self.screen_lock.release()
def art_render(self, filename, ypos, xpos):
if curses.has_colors():
self.ansi_render(filename, ypos, xpos)
else:
self.ascii_render(filename, ypos, xpos)
def draw_plant_ascii(self, this_plant):
ypos = 0
xpos = int((self.maxx-37)/2 + 25)
plant_art_list = [
'poppy',
'cactus',
'aloe',
'flytrap',
'jadeplant',
'fern',
'daffodil',
'sunflower',
'baobab',
'lithops',
'hemp',
'pansy',
'iris',
'agave',
'ficus',
'moss',
'sage',
'snapdragon',
'columbine',
'brugmansia',
'palm',
'pachypodium',
]
if this_plant.dead == True:
self.art_render('rip', ypos, xpos)
elif datetime.date.today().month == 10 and datetime.date.today().day == 31:
self.art_render('jackolantern', ypos, xpos)
elif this_plant.stage == 0:
self.art_render('seed', ypos, xpos)
elif this_plant.stage == 1:
self.art_render('seedling', ypos, xpos)
elif this_plant.stage == 2:
this_filename = plant_art_list[this_plant.species]+'1'
self.art_render(this_filename, ypos, xpos)
elif this_plant.stage == 3 or this_plant.stage == 5:
this_filename = plant_art_list[this_plant.species]+'2'
self.art_render(this_filename, ypos, xpos)
elif this_plant.stage == 4:
this_filename = plant_art_list[this_plant.species]+'3'
self.art_render(this_filename, ypos, xpos)
def draw_default(self):
# draws default menu
clear_bar = " " * (int(self.maxx*2/3))
self.screen_lock.acquire()
self.screen.addstr(1, 2, self.title, curses.A_STANDOUT) # Title for this menu
self.screen.addstr(3, 2, self.subtitle, curses.A_BOLD) #Subtitle for this menu
# clear menu on screen
for index in range(len(self.options)+1):
self.screen.addstr(4+index, 4, clear_bar, curses.A_NORMAL)
# display all the menu items, showing the 'pos' item highlighted
for index in range(len(self.options)):
textstyle = self.normal
if index == self.selected:
textstyle = self.highlighted
self.screen.addstr(4+index ,4, clear_bar, curses.A_NORMAL)
self.screen.addstr(4+index ,4, "%d - %s" % (index+1, self.options[index]), textstyle)
self.screen.addstr(12, 2, clear_bar, curses.A_NORMAL)
self.screen.addstr(13, 2, clear_bar, curses.A_NORMAL)
self.screen.addstr(12, 2, "plant: ", curses.A_DIM)
self.screen.addstr(12, 9, self.plant_string, curses.A_NORMAL)
self.screen.addstr(13, 2, "score: ", curses.A_DIM)
self.screen.addstr(13, 9, self.plant_ticks, curses.A_NORMAL)
# display fancy water gauge
if not self.plant.dead:
water_gauge_str = self.water_gauge()
self.screen.addstr(4,14, water_gauge_str, curses.A_NORMAL)
else:
self.screen.addstr(4,13, clear_bar, curses.A_NORMAL)
self.screen.addstr(4,14, "( RIP )", curses.A_NORMAL)
# draw cute ascii from files
if self.visited_plant:
# Needed to prevent drawing over a visited plant
self.draw_plant_ascii(self.visited_plant)
else:
self.draw_plant_ascii(self.plant)
self.screen_lock.release()
def water_gauge(self):
# build nice looking water gauge
water_left_pct = 1 - ((time.time() - self.plant.watered_timestamp)/86400)
# don't allow negative value
water_left_pct = max(0, water_left_pct)
water_left = int(math.ceil(water_left_pct * 10))
water_string = "(" + (")" * water_left) + ("." * (10 - water_left)) + ") " + str(int(water_left_pct * 100)) + "% "
return water_string
def update_plant_live(self):
# updates plant data on menu screen, live!
while not self.exit:
self.plant_string = self.plant.parse_plant()
self.plant_ticks = str(int(self.plant.ticks))
if self.initialized:
self.update_options()
self.draw()
time.sleep(1)
def get_user_input(self):
# gets the user's input
try:
user_in = self.screen.getch() # Gets user input
except Exception as e:
self.__exit__()
if user_in == -1: # Input comes from pipe/file and is closed
raise IOError
## DEBUG KEYS - enable these lines to see curses key codes
# self.screen.addstr(2, 2, str(user_in), curses.A_NORMAL)
# self.screen.refresh()
# Resize sends curses.KEY_RESIZE, update display
if user_in == curses.KEY_RESIZE:
self.maxy,self.maxx = self.screen.getmaxyx()
self.screen.clear()
self.screen.refresh()
# enter, exit, and Q Keys are special cases
if user_in == 10:
return self.options[self.selected]
if user_in == 27:
return self.options[-1]
if user_in == 113:
self.selected = len(self.options) - 1
return
# this is a number; check to see if we can set it
if user_in >= ord('1') and user_in <= ord(str(min(7,len(self.options)))):
self.selected = user_in - ord('0') - 1 # convert keypress back to a number, then subtract 1 to get index
return
# increment or Decrement
down_keys = [curses.KEY_DOWN, 14, ord('j')]
up_keys = [curses.KEY_UP, 16, ord('k')]
if user_in in down_keys: # down arrow
self.selected += 1
if user_in in up_keys: # up arrow
self.selected -=1
# modulo to wrap menu cursor
self.selected = self.selected % len(self.options)
return
def format_garden_data(self,this_garden):
# Returns list of lists (pages) of garden entries
plant_table = []
for plant_id in this_garden:
if this_garden[plant_id]:
if not this_garden[plant_id]["dead"]:
this_plant = this_garden[plant_id]
plant_table.append((this_plant["owner"],
this_plant["age"],
int(this_plant["score"]),
this_plant["description"]))
return plant_table
def format_garden_entry(self, entry):
return "{:14.14} - {:>16} - {:>8}p - {}".format(*entry)
def sort_garden_table(self, table, column, ascending):
""" Sort table in place by a specified column """
def key(entry):
entry = entry[column]
# In when sorting ages, convert to seconds
if column == 1:
coeffs = [24*60*60, 60*60, 60, 1]
nums = [int(n[:-1]) for n in entry.split(":")]
if len(nums) == len(coeffs):
entry = sum(nums[i] * coeffs[i] for i in range(len(nums)))
return entry
return table.sort(key=key, reverse=not ascending)
def filter_garden_table(self, table, pattern):
""" Filter table using a pattern, and return the new table """
def filterfunc(entry):
if len(pattern) == 0:
return True
entry_txt = self.format_garden_entry(entry)
try:
result = bool(re.search(pattern, entry_txt))
except Exception as e:
# In case of invalid regex, don't match anything
result = False
return result
return list(filter(filterfunc, table))
def draw_garden(self):
# draws community garden
# load data from sqlite db
this_garden = self.user_data.retrieve_garden_from_db()
# format data
self.clear_info_pane()
if self.infotoggle == 2:
# the screen IS currently showing the garden (1 page), make the
# text a bunch of blanks to clear it out
self.infotoggle = 0
return
# if infotoggle isn't 2, the screen currently displays other stuff
plant_table_orig = self.format_garden_data(this_garden)
self.infotoggle = 2
# print garden information OR clear it
index = 0
sort_column, sort_ascending = 0, True
sort_keys = ["n", "a", "s", "d"] # Name, Age, Score, Description
plant_table = plant_table_orig
self.sort_garden_table(plant_table, sort_column, sort_ascending)
while True:
entries_per_page = self.maxy - 16
index_max = min(len(plant_table), index + entries_per_page)
plants = plant_table[index:index_max]
page = [self.format_garden_entry(entry) for entry in plants]
self.screen_lock.acquire()
self.draw_info_text(page)
# Multiple pages, paginate and require keypress
page_text = "(%d-%d/%d) | sp/next | bksp/prev | s <col #>/sort | f/filter | q/quit" % (index, index_max, len(plant_table))
self.screen.addstr(self.maxy-2, 2, page_text)
self.screen.refresh()
self.screen_lock.release()
c = self.screen.getch()
if c == -1: # Input comes from pipe/file and is closed
raise IOError
self.infotoggle = 0
# Quit
if c == ord("q") or c == ord("x") or c == 27:
break
# Next page
elif c in [curses.KEY_ENTER, curses.KEY_NPAGE, ord(" "), ord("\n")]:
index += entries_per_page
if index >= len(plant_table):
break
# Previous page
elif c == curses.KEY_BACKSPACE or c == curses.KEY_PPAGE:
index = max(index - entries_per_page, 0)
# Next line
elif c == ord("j") or c == curses.KEY_DOWN:
index = max(min(index + 1, len(plant_table) - 1), 0)
# Previous line
elif c == ord("k") or c == curses.KEY_UP:
index = max(index - 1, 0)
# Sort entries
elif c == ord("s"):
c = self.screen.getch()
if c == -1: # Input comes from pipe/file and is closed
raise IOError
column = -1
if c < 255 and chr(c) in sort_keys:
column = sort_keys.index(chr(c))
elif ord("1") <= c <= ord("4"):
column = c - ord("1")
if column != -1:
if sort_column == column:
sort_ascending = not sort_ascending
else:
sort_column = column
sort_ascending = True
self.sort_garden_table(plant_table, sort_column, sort_ascending)
# Filter entries
elif c == ord("/") or c == ord("f"):
self.screen.addstr(self.maxy-2, 2, "Filter: " + " " * (len(page_text)-8))
pattern = self.get_user_string(10, self.maxy-2, lambda x: x in string.printable)
plant_table = self.filter_garden_table(plant_table_orig, pattern)
self.sort_garden_table(plant_table, sort_column, sort_ascending)
index = 0
# Clear page before drawing next
self.clear_info_pane()
self.clear_info_pane()
def get_plant_description(self, this_plant):
output_text = ""
this_species = this_plant.species_list[this_plant.species]
this_color = this_plant.color_list[this_plant.color]
this_stage = this_plant.stage
stage_descriptions = {
0:[
"You're excited about your new seed.",
"You wonder what kind of plant your seed will grow into.",
"You're ready for a new start with this plant.",
"You're tired of waiting for your seed to grow.",
"You wish your seed could tell you what it needs.",
"You can feel the spirit inside your seed.",
"These pretzels are making you thirsty.",
"Way to plant, Ann!",
"'To see things in the seed, that is genius' - <NAME>",
],
1:[
"The seedling fills you with hope.",
"The seedling shakes in the wind.",
"You can make out a tiny leaf - or is that a thorn?",
"You can feel the seedling looking back at you.",
"You blow a kiss to your seedling.",
"You think about all the seedlings who came before it.",
"You and your seedling make a great team.",
"Your seedling grows slowly and quietly.",
"You meditate on the paths your plant's life could take.",
],
2:[
"The " + this_species + " makes you feel relaxed.",
"You sing a song to your " + this_species + ".",
"You quietly sit with your " + this_species + " for a few minutes.",
"Your " + this_species + " looks pretty good.",
"You play loud techno to your " + this_species + ".",
"You play piano to your " + this_species + ".",
"You play rap music to your " + this_species + ".",
"You whistle a tune to your " + this_species + ".",
"You read a poem to your " + this_species + ".",
"You tell a secret to your " + this_species + ".",
"You play your favorite record for your " + this_species + ".",
],
3:[
"Your " + this_species + " is growing nicely!",
"You're proud of the dedication it took to grow your " + this_species + ".",
"You take a deep breath with your " + this_species + ".",
"You think of all the words that rhyme with " + this_species + ".",
"The " + this_species + " looks full of life.",
"The " + this_species + " inspires you.",
"Your " + this_species + " makes you forget about your problems.",
"Your " + this_species + " gives you a reason to keep going.",
"Looking at your " + this_species + " helps you focus on what matters.",
"You think about how nice this " + this_species + " looks here.",
"The buds of your " + this_species + " might bloom soon.",
],
4:[
"The " + this_color + " flowers look nice on your " + this_species +"!",
"The " + this_color + " flowers have bloomed and fill you with positivity.",
"The " + this_color + " flowers remind you of your childhood.",
"The " + this_color + " flowers remind you of spring mornings.",
"The " + this_color + " flowers remind you of a forgotten memory.",
"The " + this_color + " flowers remind you of your happy place.",
"The aroma of the " + this_color + " flowers energize you.",
"The " + this_species + " has grown beautiful " + this_color + " flowers.",
"The " + this_color + " petals remind you of that favorite shirt you lost.",
"The " + this_color + " flowers remind you of your crush.",
"You smell the " + this_color + " flowers and are filled with peace.",
],
5:[
"You fondly remember the time you spent caring for your " + this_species + ".",
"Seed pods have grown on your " + this_species + ".",
"You feel like your " + this_species + " appreciates your care.",
"The " + this_species + " fills you with love.",
"You're ready for whatever comes after your " + this_species + ".",
"You're excited to start growing your next plant.",
"You reflect on when your " + this_species + " was just a seedling.",
"You grow nostalgic about the early days with your " + this_species + ".",
],
99:[
"You wish you had taken better care of your plant.",
"If only you had watered your plant more often..",
"Your plant is dead, there's always next time.",
"You cry over the withered leaves of your plant.",
"Your plant died. Maybe you need a fresh start.",
],
}
# self.life_stages is tuple containing length of each stage
# (seed, seedling, young, mature, flowering)
if this_plant.dead:
this_stage = 99
this_stage_descriptions = stage_descriptions[this_stage]
description_num = random.randint(0,len(this_stage_descriptions) - 1)
# If not fully grown
if this_stage <= 4:
# Growth hint
if this_stage >= 1:
last_growth_at = this_plant.life_stages[this_stage - 1]
else:
last_growth_at = 0
ticks_since_last = this_plant.ticks - last_growth_at
ticks_between_stage = this_plant.life_stages[this_stage] - last_growth_at
if ticks_since_last >= ticks_between_stage * 0.8:
output_text += "You notice your plant looks different.\n"
output_text += this_stage_descriptions[description_num] + "\n"
# if seedling
if this_stage == 1:
species_options = [this_plant.species_list[this_plant.species],
this_plant.species_list[(this_plant.species+3) % len(this_plant.species_list)],
this_plant.species_list[(this_plant.species-3) % len(this_plant.species_list)]]
random.shuffle(species_options)
plant_hint = "It could be a(n) " + species_options[0] + ", " + species_options[1] + ", or " + species_options[2]
output_text += plant_hint + ".\n"
# if young plant
if this_stage == 2:
if this_plant.rarity >= 2:
rarity_hint = "You feel like your plant is special."
output_text += rarity_hint + ".\n"
# if mature plant
if this_stage == 3:
color_options = [this_plant.color_list[this_plant.color],
this_plant.color_list[(this_plant.color+3) % len(this_plant.color_list)],
this_plant.color_list[(this_plant.color-3) % len(this_plant.color_list)]]
random.shuffle(color_options)
plant_hint = "You can see the first hints of " + color_options[0] + ", " + color_options[1] + ", or " + color_options[2]
output_text += plant_hint + ".\n"
return output_text
def draw_plant_description(self, this_plant):
# If menu is currently showing something other than the description
self.clear_info_pane()
if self.infotoggle != 1:
# get plant description before printing
output_string = self.get_plant_description(this_plant)
growth_multiplier = 1 + (0.2 * (this_plant.generation-1))
output_string += "Generation: {}\nGrowth rate: {}x".format(self.plant.generation, growth_multiplier)
self.draw_info_text(output_string)
self.infotoggle = 1
else:
# otherwise just set toggle
self.infotoggle = 0
def draw_instructions(self):
# Draw instructions on screen
self.clear_info_pane()
if self.infotoggle != 4:
instructions_txt = ("welcome to botany. you've been given a seed\n"
"that will grow into a beautiful plant. check\n"
"in and water your plant every 24h to keep it\n"
"growing. 5 days without water = death. your\n"
"plant depends on you & your friends to live!\n"
"more info is available in the readme :)\n"
"https://github.com/jifunks/botany/blob/master/README.md\n"
" cheers,\n"
" curio\n"
)
self.draw_info_text(instructions_txt)
self.infotoggle = 4
else:
self.infotoggle = 0
def clear_info_pane(self):
# Clears bottom part of screen
self.screen_lock.acquire()
clear_bar = " " * (self.maxx - 3)
this_y = 14
while this_y < self.maxy:
self.screen.addstr(this_y, 2, clear_bar, curses.A_NORMAL)
this_y += 1
self.screen.refresh()
self.screen_lock.release()
def draw_info_text(self, info_text, y_offset = 0):
# print lines of text to info pane at bottom of screen
self.screen_lock.acquire()
if type(info_text) is str:
info_text = info_text.splitlines()
for y, line in enumerate(info_text, 2):
this_y = y+12 + y_offset
if len(line) > self.maxx - 3:
line = line[:self.maxx-3]
if this_y < self.maxy:
self.screen.addstr(this_y, 2, line, curses.A_NORMAL)
self.screen.refresh()
self.screen_lock.release()
def harvest_confirmation(self):
self.clear_info_pane()
# get plant description before printing
max_stage = len(self.plant.stage_list) - 1
harvest_text = ""
if not self.plant.dead:
if self.plant.stage == max_stage:
harvest_text += "Congratulations! You raised your plant to its final stage of growth.\n"
harvest_text += "Your next plant will grow at a speed of: {}x\n".format(1 + (0.2 * self.plant.generation))
harvest_text += "If you harvest your plant you'll start over from a seed.\nContinue? (Y/n)"
self.draw_info_text(harvest_text)
try:
user_in = self.screen.getch() # Gets user input
except Exception as e:
self.__exit__()
if user_in == -1: # Input comes from pipe/file and is closed
raise IOError
if user_in in [ord('Y'), ord('y')]:
self.plant.start_over()
else:
pass
self.clear_info_pane()
def build_weekly_visitor_output(self, visitors):
visitor_block = ""
visitor_line = ""
for visitor in visitors:
this_visitor_string = str(visitor) + "({}) ".format(visitors[str(visitor)])
if len(visitor_line + this_visitor_string) > self.maxx-3:
visitor_block += '\n'
visitor_line = ""
visitor_block += this_visitor_string
visitor_line += this_visitor_string
return visitor_block
def build_latest_visitor_output(self, visitors):
visitor_line = ""
for visitor in visitors:
if len(visitor_line + visitor) > self.maxx-10:
visitor_line += "and more"
break
visitor_line += visitor + ' '
return [visitor_line]
def get_weekly_visitors(self):
game_dir = os.path.dirname(os.path.realpath(__file__))
garden_db_path = os.path.join(game_dir, 'sqlite/garden_db.sqlite')
conn = sqlite3.connect(garden_db_path)
c = conn.cursor()
c.execute("SELECT * FROM visitors WHERE garden_name = '{}' ORDER BY weekly_visits".format(self.plant.owner))
visitor_data = c.fetchall()
conn.close()
visitor_block = ""
visitor_line = ""
if visitor_data:
for visitor in visitor_data:
visitor_name = visitor[2]
weekly_visits = visitor[3]
this_visitor_string = "{}({}) ".format(visitor_name, weekly_visits)
if len(visitor_line + this_visitor_string) > self.maxx-3:
visitor_block += '\n'
visitor_line = ""
visitor_block += this_visitor_string
visitor_line += this_visitor_string
else:
visitor_block = 'nobody :('
return visitor_block
def get_user_string(self, xpos=3, ypos=15, filterfunc=str.isalnum, completer=None):
# filter allowed characters using filterfunc, alphanumeric by default
user_string = ""
user_input = 0
if completer:
completer = completer(self)
while user_input != 10:
user_input = self.screen.getch()
if user_input == -1: # Input comes from pipe/file and is closed
raise IOError
self.screen_lock.acquire()
# osx and unix backspace chars...
if user_input == 127 or user_input == 263:
if len(user_string) > 0:
user_string = user_string[:-1]
if completer:
completer.update_input(user_string)
self.screen.addstr(ypos, xpos, " " * (self.maxx-xpos-1))
elif user_input in [ord('\t'), curses.KEY_BTAB] and completer:
direction = 1 if user_input == ord('\t') else -1
user_string = completer.complete(direction)
self.screen.addstr(ypos, xpos, " " * (self.maxx-xpos-1))
elif user_input < 256 and user_input != 10:
if filterfunc(chr(user_input)) or chr(user_input) == '_':
user_string += chr(user_input)
if completer:
completer.update_input(user_string)
self.screen.addstr(ypos, xpos, str(user_string))
self.screen.refresh()
self.screen_lock.release()
return user_string
def visit_handler(self):
self.clear_info_pane()
self.draw_info_text("whose plant would you like to visit?")
self.screen.addstr(15, 2, '~')
if self.plant.visitors:
latest_visitor_string = self.build_latest_visitor_output(self.plant.visitors)
self.draw_info_text("since last time, you were visited by: ", 3)
self.draw_info_text(latest_visitor_string, 4)
self.plant.visitors = []
weekly_visitor_text = self.get_weekly_visitors()
self.draw_info_text("this week you've been visited by: ", 6)
self.draw_info_text(weekly_visitor_text, 7)
guest_garden = self.get_user_string(completer = completer.LoginCompleter)
if not guest_garden:
self.clear_info_pane()
return None
if guest_garden.lower() == getpass.getuser().lower():
self.screen.addstr(16, 2, "you're already here!")
self.screen.getch()
self.clear_info_pane()
return None
home_folder = os.path.dirname(os.path.expanduser("~"))
guest_json = home_folder + "/{}/.botany/{}_plant_data.json".format(guest_garden, guest_garden)
guest_plant_description = ""
if os.path.isfile(guest_json):
with open(guest_json) as f:
visitor_data = json.load(f)
guest_plant_description = visitor_data['description']
self.visited_plant = self.get_visited_plant(visitor_data)
guest_visitor_file = home_folder + "/{}/.botany/visitors.json".format(guest_garden, guest_garden)
if os.path.isfile(guest_visitor_file):
water_success = self.water_on_visit(guest_visitor_file)
if water_success:
self.screen.addstr(16, 2, "...you watered ~{}'s {}...".format(str(guest_garden), guest_plant_description))
if self.visited_plant:
self.draw_plant_ascii(self.visited_plant)
else:
self.screen.addstr(16, 2, "{}'s garden is locked, but you can see in...".format(guest_garden))
else:
self.screen.addstr(16, 2, "i can't seem to find directions to {}...".format(guest_garden))
try:
self.screen.getch()
self.clear_info_pane()
self.draw_plant_ascii(self.plant)
finally:
self.visited_plant = None
def water_on_visit(self, guest_visitor_file):
visitor_data = {}
# using -1 here so that old running instances can be watered
guest_data = {'user': getpass.getuser(), 'timestamp': int(time.time()) - 1}
if os.path.isfile(guest_visitor_file):
if not os.access(guest_visitor_file, os.W_OK):
return False
with open(guest_visitor_file) as f:
visitor_data = json.load(f)
visitor_data.append(guest_data)
with open(guest_visitor_file, mode='w') as f:
f.write(json.dumps(visitor_data, indent=2))
return True
def get_visited_plant(self, visitor_data):
""" Returns a drawable pseudo plant object from json data """
class VisitedPlant: pass
plant = VisitedPlant()
plant.stage = 0
plant.species = 0
if "is_dead" not in visitor_data:
return None
plant.dead = visitor_data["is_dead"]
if plant.dead:
return plant
if "stage" in visitor_data:
stage = visitor_data["stage"]
if stage in self.plant.stage_list:
plant.stage = self.plant.stage_list.index(stage)
if "species" in visitor_data:
species = visitor_data["species"]
if species in self.plant.species_list:
plant.species = self.plant.species_list.index(species)
else:
return None
elif plant.stage > 1:
return None
return plant
def handle_request(self, request):
# Menu options call functions here
if request == None: return
if request == "harvest":
self.harvest_confirmation()
if request == "water":
self.plant.water()
if request == "look":
try:
self.draw_plant_description(self.plant)
except Exception as exception:
self.screen.refresh()
# traceback.print_exc()
if request == "instructions":
try:
self.draw_instructions()
except Exception as exception:
self.screen.refresh()
# traceback.print_exc()
if request == "visit":
try:
self.visit_handler()
except Exception as exception:
self.screen.refresh()
# traceback.print_exc()
if request == "garden":
try:
self.draw_garden()
except Exception as exception:
self.screen.refresh()
# traceback.print_exc()
def __exit__(self):
self.exit = True
cleanup()
def cleanup():
try:
curses.curs_set(2)
except curses.error:
# cursor not supported; just ignore
pass
curses.endwin()
os.system('clear')
``` |
{
"source": "joyantaDebnath/CERES",
"score": 3
} |
#### File: src/modules/helper.py
```python
def hex_n_bytes_to_int(x):
if x == []:
return None
elif type(x) == list and len(x) > 0:
temp = []
for i in x:
temp.append(int(i, 16))
q = ''.join(format(x, '02X') for x in bytes(temp))
return int(q, 16)
else:
return int(x, 16)
def array_to_bytes(x):
if x == []:
return None
temp = []
for k in x:
temp.append(int(k, 16))
return bytes(temp)
def get_ku_padding_count(elem):
if elem == []:
return 0
temp = int(elem[len(elem) - 1], 16)
bin_temp = "{0:08b}".format(temp)
padding = len(bin_temp) - len(bin_temp.rstrip('0'))
return padding
def map_version(x):
if type(x) == list and len(x) == 1: # DSL
x = x[0]
if x is None:
return [0, 0]
return x
def map_bool(x):
if type(x) == list and len(x) == 1: # DSL
x = x[0]
return bool(x) # bool(None) == False
def map_ku(x):
temp = ""
for k in x:
if type(k) == bytes:
k = k.hex()
temp = temp + "{0:08b}".format(int(k, 16))
kus = []
for j in range(0, len(temp)):
if j == 0 and temp[j] == '1':
kus.append('digitalSignature')
elif j == 1 and temp[j] == '1':
kus.append('nonRepudiation/contentCommitment')
elif j == 2 and temp[j] == '1':
kus.append('keyEncipherment')
elif j == 3 and temp[j] == '1':
kus.append('dataEncipherment')
elif j == 4 and temp[j] == '1':
kus.append('keyAgreement')
elif j == 5 and temp[j] == '1':
kus.append('keyCertSign')
elif j == 6 and temp[j] == '1':
kus.append('cRLSign')
elif j == 7 and temp[j] == '1':
kus.append('encipherOnly')
elif j == 8 and temp[j] == '1':
kus.append('decipherOnly')
return kus
def map_eku(x):
if len(x) == 1 and type(x[0]) == tuple: # DSL
x = x[0]
temp = []
for oid in x:
if oid[0] == 3100166514561975041:
temp.append('serverAuth')
elif oid[0] == 3100166514561975042:
temp.append('clientAuth')
elif oid[0] == 3100166514561975043:
temp.append('codeSigning')
elif oid[0] == 3100166514561975044:
temp.append('emailProtection')
elif oid[0] == 3100166514561986563:
temp.append('timeStamping')
elif oid[0] == 3100166514561975049:
temp.append('OCSPSigning')
else:
temp.append(oid[0])
return temp
def addtotuple(a, b):
if b is None and a is not None:
return a
elif a is None and b is not None:
return b
elif type(a) == tuple and type(b) == tuple:
return a + b
elif type(a) == tuple and type(b) != tuple:
return a + (b,)
elif type(a) != tuple and type(b) == tuple:
return (a,) + b
else:
return (a, b)
def Timedecoder(Value):
return Value.Year, Value.Month, Value.Day, Value.Hour, Value.Minute, Value.Second
### for dsl_based_parsers
cert = []
cur_index = -1
def initialize(certin):
global cert, cur_index
cert = certin
cur_index = 0
def match(p, flag, i):
global cert, cur_index
if flag:
if i < len(cert) and str(cert[i]) == p:
i = i + 1
cur_index = i
return True, 1, None, i
else:
return False, 0, None, i
else:
if i >= len(cert) or str(cert[i]) != p:
return True, 0, None, i
else:
return False, 0, None, i
def get_ku_padding_count_dsl(y):
elem = y[0]
if elem == None:
return 0
elif type(elem) == tuple:
x = int(elem[len(elem) - 1])
else:
x = int(elem)
lastkuval = "{0:08b}".format(x)
padding = len(lastkuval) - len(lastkuval.rstrip('0'))
return padding
def splituple(y):
x = y[0]
if x == None:
return x
p = list()
p = Func_split(x, p)
return tuple(p)
def Func_split(data, temp):
if data[1] == None:
temp.append(data[0])
return temp
elif type(data[1]) != tuple:
temp.append(data)
return temp
temp.append(data[0])
return Func_split(data[1], temp)
def hex_n_bytes_to_int_dsl(y):
x = y[0]
if x == None:
return None
elif type(x) == tuple:
temp = []
for i in x:
temp.append(int(i))
q = ''.join(format(x, '02X') for x in bytes(temp))
return int(q, 16)
else:
return int(x)
def array_to_bytes_dsl(y):
x = y[0]
if x == None:
return None
elif (type(x) == tuple):
temp = []
for k in x:
temp.append(int(k))
return bytes(temp)
else:
return bytes([int(x)])
def endcheck(y):
op = y[0]
global cur_index
if op == 1:
return cur_index < len(cert)
elif op == 2:
return cur_index == len(cert)
elif op == 3:
return cur_index <= len(cert)
return False
def getintvalue(x):
if len(x) == 1:
if len(x[0]) == 2:
return x[0][0]
return None
def addtotuple_dsl(x):
a, b = x[0], x[1]
if b is None and a is not None:
return a
elif a is None and b is not None:
return b
elif type(a) == tuple and type(b) == tuple:
return a + b
elif type(a) == tuple and type(b) != tuple:
return a + (b,)
elif type(a) != tuple and type(b) == tuple:
return (a,) + b
else:
return (a, b)
def checkKnownExtId(id):
if id == 5578019:
return True, 'auth_key_id'
elif id == 5577998:
return True, 'sub_key_id'
elif id == 5577999:
return True, 'key_usage'
elif id == 5578021:
return True, 'ext_key_usage'
elif id == 5578001:
return True, 'subject_alt_name'
elif id == 5578002:
return True, 'issuer_alt_name'
elif id == 5578003:
return True, 'basic_constraints'
elif id == 5578016:
return True, 'cert_policies'
elif id == 5578015:
return True, 'crl_dist_points'
elif id == 3100166514561974529:
return True, 'authority_info_access'
else:
return False, None
```
#### File: parsers/combinator_based/parser_rsa_signature.py
```python
from modules.helper import *
from modules.x509_ds import *
from parsec import *
@generate('')
def short_length():
x = yield regex(r'[0-7][0-9A-F]').parsecmap(hex_n_bytes_to_int)
return 1, x
@generate('')
def long_length():
x = yield regex(r'8[1-6]')
size = int(x) - 80
fields = yield count(any_byte, size).parsecmap(hex_n_bytes_to_int)
if not (fields >= 128):
raise ParseError("not proper length in RSA_Signature", "", -1)
return 1 + size, fields
@generate('')
def parameter():
tag = yield any_byte.parsecmap(hex_n_bytes_to_int)
s1, size = yield length
fields = yield count(any_byte, size).parsecmap(array_to_bytes)
s = 1 + s1 + size
if not (tag == 5 and size == 0):
raise ParseError("not proper parameter in RSA_Signature", "", -1)
return s, Parameter([tag, fields])
@generate('')
def oid_parser():
yield string('06')
s1, size = yield length
fields = yield count(any_byte, size).parsecmap(hex_n_bytes_to_int)
s = 1 + s1 + size
return s, [fields, size]
@generate('')
def algorithm_identifier():
yield string('30')
s1, size = yield length
s2, oid = yield oid_parser
if size - s2 > 0:
s3, param = yield parameter
else:
s3, param = 0, None
s = 1 + s1 + s2 + s3
if (size != s2 + s3):
raise ParseError("not proper length in RSA_Signature", "", -1)
return s, AlgorithmIdentifier([oid, param])
@generate('')
def rsa_signature():
yield string('00')
yield string('01')
padding = yield regex(r'(FF)*') # at least 8
if (len(padding) / 2) < 8:
raise ParseError("not proper padding in RSA_Signature", "", -1)
yield string('00')
yield string('30')
s1, size1 = yield length
s2, algo_ident = yield algorithm_identifier
yield string('04')
s3, size3 = yield length
digest = yield count(any_byte, size3).parsecmap(array_to_bytes)
s = 1 + 1 + len(padding) / 2 + 1 + 1 + s1 + s2 + 1 + s3 + size3
if size1 != s2 + 1 + s3 + size3:
raise ParseError("not proper length in RSA_Signature", "", -1)
return s, [algo_ident, digest]
def run(inp):
x = program.parse(inp)
if x[0] != len(inp) / 2:
raise ParseError("not proper length in RSA_Signature", "", -1)
return x[1]
whitespace = regex(r'\s')
ignore = many(whitespace)
any_byte = regex(r'[0-9A-F][0-9A-F]')
length = short_length | long_length
program = rsa_signature
```
#### File: dsl_based/grammar/generate-code.py
```python
from antlr4 import *
from build.dsl_grammarLexer import dsl_grammarLexer
from customVisitor import *
import sys
inputfile = sys.argv[1]
outputfile = 'out.py'
def main():
lexer = dsl_grammarLexer(FileStream(inputfile))
stream = CommonTokenStream(lexer)
parser = dsl_grammarParser(stream)
try:
visitor = customVisitor(outputfile)
tree = parser.start()
visitor.visit(tree)
except:
raise
if __name__ == '__main__':
main()
``` |
{
"source": "joyanujoy/athena-buildings",
"score": 2
} |
#### File: athena-buildings/models/buildings_generator.py
```python
from io import StringIO
import boto3
import pandas as pd
import geopandas as gpd
from shapely.geometry import Polygon
from models.utils import AthenaWaiter, download_file_from_s3
class BuildingsGenerator(object):
def __init__(self, min_x, max_x, min_y, max_y, bucket, folder,
query_id=None):
"""
if query_id is given, we expect postprocessing the output only.
"""
self.min_x, self.max_x = min_x, max_x
self.min_y, self.max_y = min_y, max_y
self.bucket = bucket
self.folder = folder
self.query_id = query_id
def get_query_string(self):
return "WITH nodes_in_bbox AS( "\
" SELECT id, lat, lon, type, tags FROM planet "\
" WHERE type='node' "\
" AND lon BETWEEN {0} AND {1} "\
" AND lat BETWEEN {2} AND {3} "\
"), "\
"ways AS( "\
" SELECT type, id, tags, nds FROM planet "\
" WHERE type='way' "\
"), "\
"relation_ways AS( "\
" SELECT r.id, r.tags, way.ref, way.role, way_position "\
" FROM planet r "\
" CROSS JOIN UNNEST(r.members) "\
" WITH ORDINALITY AS m (way, way_position) "\
" WHERE r.type='relation' "\
" AND element_at(r.tags, 'type')='multipolygon' "\
" AND way.role='outer' AND way.type='way' "\
") "\
"SELECT w.id AS way_id, "\
" n.id AS node_id, "\
" r.id AS relation_id, "\
" COALESCE(r.id, w.id) AS building_id, "\
" n.lon, n.lat, "\
" node_position, "\
" COALESCE(r.tags['name'], w.tags['name']) AS name, "\
" COALESCE(r.tags['building'], w.tags['building']) AS amenity "\
"FROM ways w "\
"CROSS JOIN UNNEST(w.nds) "\
"WITH ORDINALITY AS t (nd, node_position) "\
"JOIN nodes_in_bbox n ON n.id = nd.ref "\
"LEFT OUTER JOIN relation_ways r ON w.id=r.ref "\
"WHERE element_at(COALESCE(r.tags, w.tags), 'building') "\
" IS NOT NULL "\
"ORDER BY relation_id, way_position, way_id, node_position "\
.format(self.min_x, self.max_x, self.min_y, self.max_y)
def get_query_id(self):
client = boto3.client(
'athena',
region_name='us-east-1'
)
response = client.start_query_execution(
QueryString=self.get_query_string(),
QueryExecutionContext={
'Database': 'default'
},
ResultConfiguration={
'OutputLocation': 's3://{0}/{1}'.format(
self.bucket,
self.folder
)
}
)
return response['QueryExecutionId']
def get_results_key(self, query_id):
return '{0}/{1}.csv'.format(self.folder, query_id)
def get_results_df(self, query_id):
waiter = AthenaWaiter(max_tries=100)
waiter.wait(
bucket=self.bucket,
key=self.get_results_key(query_id),
query_id=query_id
)
raw_result = StringIO(
download_file_from_s3(
self.get_results_key(query_id),
self.bucket
)
)
return pd.read_csv(raw_result, encoding='utf-8')
@staticmethod
def create_polygon(way):
node_list = list(zip(way.lon, way.lat))
return Polygon(node_list) if len(node_list) >= 3 \
else None
def generate(self):
all_buildings = gpd.GeoDataFrame()
if not self.query_id:
self.query_id = self.get_query_id()
results = self.get_results_df(self.query_id)
ways = results.groupby(by=['building_id', 'way_id'])
amenity_types = ['civic', 'college', 'commercial', 'hospital',
'industrial', 'office', 'public', 'retail',
'train_station', 'transportation', 'university']
for _, way in ways:
metadata = way.iloc[0]
if metadata['amenity'] not in amenity_types:
continue
polygon = self.create_polygon(way)
if polygon:
metadata = way.iloc[0]
fill_color = '#D4AF37'
building_gdf = gpd.GeoDataFrame(
[[
metadata['name'],
metadata['amenity'],
fill_color,
polygon
]],
columns=[
'name',
'amenity',
'color',
'geometry'
]
)
all_buildings = all_buildings.append(building_gdf)
return all_buildings.to_json(ensure_ascii=False)
``` |
{
"source": "Joyash23/resume-website",
"score": 3
} |
#### File: resume/accounts/forms.py
```python
from django import forms
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
class UserLoginForm(forms.Form):
username = forms.CharField()
password = forms.CharField(widget=forms.PasswordInput)
def clean(self, *args, **kwargs):
username = self.cleaned_data.get("username")
password = self.cleaned_data.get("password")
if username and password:
user = authenticate(username=username, password=password)
if not user:
raise forms.ValidationError("User Does Not Exist.")
if not user.check_password(password):
raise forms.ValidationError("Password Does not Match.")
if not user.is_active:
raise forms.ValidationError("User is not Active.")
return super(UserLoginForm, self).clean(*args, **kwargs)
class UserRegistrationForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput)
confirm_password = forms.CharField(widget=forms.PasswordInput)
class Meta:
model = User
fields = ("username", "password", "confirm_password")
def clean_confirm_password(self):
password = self.cleaned_data.get("password")
confirm_password = self.cleaned_data.get("confirm_password")
if password != confirm_password:
raise forms.ValidationError("Passwords Must Match")
return password
``` |
{
"source": "joybanerjee08/imgaug",
"score": 2
} |
#### File: imgaug/checks/check_segmentation_maps.py
```python
from __future__ import print_function
import numpy as np
import imgaug as ia
from imgaug import augmenters as iaa
def main():
quokka = ia.quokka(size=0.5)
h, w = quokka.shape[0:2]
c = 4
segmap = np.zeros((h, w, c), dtype=np.float32)
segmap[70:120, 90:150, 0] = 1.0
segmap[30:70, 50:65, 1] = 1.0
segmap[20:50, 55:85, 2] = 1.0
segmap[120:140, 0:20, 3] = 1.0
segmap = ia.SegmentationMapOnImage(segmap, quokka.shape)
print("Affine...")
aug = iaa.Affine(translate_px={"x": 20}, mode="constant", cval=128)
quokka_aug = aug.augment_image(quokka)
segmaps_aug = aug.augment_segmentation_maps([segmap])[0]
segmaps_drawn = segmap.draw_on_image(quokka)
segmaps_aug_drawn = segmaps_aug.draw_on_image(quokka_aug)
ia.imshow(
np.hstack([
segmaps_drawn,
segmaps_aug_drawn
])
)
print("Affine with mode=edge...")
aug = iaa.Affine(translate_px={"x": 20}, mode="edge")
quokka_aug = aug.augment_image(quokka)
segmaps_aug = aug.augment_segmentation_maps([segmap])[0]
segmaps_drawn = segmap.draw_on_image(quokka)
segmaps_aug_drawn = segmaps_aug.draw_on_image(quokka_aug)
ia.imshow(
np.hstack([
segmaps_drawn,
segmaps_aug_drawn
])
)
print("PiecewiseAffine...")
aug = iaa.PiecewiseAffine(scale=0.04)
aug_det = aug.to_deterministic()
quokka_aug = aug_det.augment_image(quokka)
segmaps_aug = aug_det.augment_segmentation_maps([segmap])[0]
segmaps_drawn = segmap.draw_on_image(quokka)
segmaps_aug_drawn = segmaps_aug.draw_on_image(quokka_aug)
ia.imshow(
np.hstack([
segmaps_drawn,
segmaps_aug_drawn
])
)
print("PerspectiveTransform...")
aug = iaa.PerspectiveTransform(scale=0.04)
aug_det = aug.to_deterministic()
quokka_aug = aug_det.augment_image(quokka)
segmaps_aug = aug_det.augment_segmentation_maps([segmap])[0]
segmaps_drawn = segmap.draw_on_image(quokka)
segmaps_aug_drawn = segmaps_aug.draw_on_image(quokka_aug)
ia.imshow(
np.hstack([
segmaps_drawn,
segmaps_aug_drawn
])
)
print("ElasticTransformation alpha=3, sig=0.5...")
aug = iaa.ElasticTransformation(alpha=3.0, sigma=0.5)
aug_det = aug.to_deterministic()
quokka_aug = aug_det.augment_image(quokka)
segmaps_aug = aug_det.augment_segmentation_maps([segmap])[0]
segmaps_drawn = segmap.draw_on_image(quokka)
segmaps_aug_drawn = segmaps_aug.draw_on_image(quokka_aug)
ia.imshow(
np.hstack([
segmaps_drawn,
segmaps_aug_drawn
])
)
print("ElasticTransformation alpha=10, sig=3...")
aug = iaa.ElasticTransformation(alpha=10.0, sigma=3.0)
aug_det = aug.to_deterministic()
quokka_aug = aug_det.augment_image(quokka)
segmaps_aug = aug_det.augment_segmentation_maps([segmap])[0]
segmaps_drawn = segmap.draw_on_image(quokka)
segmaps_aug_drawn = segmaps_aug.draw_on_image(quokka_aug)
ia.imshow(
np.hstack([
segmaps_drawn,
segmaps_aug_drawn
])
)
print("CopAndPad mode=constant...")
aug = iaa.CropAndPad(px=(-10, 10, 15, -15), pad_mode="constant", pad_cval=128)
aug_det = aug.to_deterministic()
quokka_aug = aug_det.augment_image(quokka)
segmaps_aug = aug_det.augment_segmentation_maps([segmap])[0]
segmaps_drawn = segmap.draw_on_image(quokka)
segmaps_aug_drawn = segmaps_aug.draw_on_image(quokka_aug)
ia.imshow(
np.hstack([
segmaps_drawn,
segmaps_aug_drawn
])
)
print("CropAndPad mode=edge...")
aug = iaa.CropAndPad(px=(-10, 10, 15, -15), pad_mode="edge")
aug_det = aug.to_deterministic()
quokka_aug = aug_det.augment_image(quokka)
segmaps_aug = aug_det.augment_segmentation_maps([segmap])[0]
segmaps_drawn = segmap.draw_on_image(quokka)
segmaps_aug_drawn = segmaps_aug.draw_on_image(quokka_aug)
ia.imshow(
np.hstack([
segmaps_drawn,
segmaps_aug_drawn
])
)
print("Scale...")
aug = iaa.Scale(0.5, interpolation="nearest")
aug_det = aug.to_deterministic()
quokka_aug = aug_det.augment_image(quokka)
segmaps_aug = aug_det.augment_segmentation_maps([segmap])[0]
segmaps_drawn = segmap.draw_on_image(quokka)
segmaps_aug_drawn = segmaps_aug.draw_on_image(quokka_aug)
ia.imshow(ia.draw_grid([segmaps_drawn, segmaps_aug_drawn], cols=2))
print("Alpha...")
aug = iaa.Alpha(0.7, iaa.Affine(rotate=20))
aug_det = aug.to_deterministic()
quokka_aug = aug_det.augment_image(quokka)
segmaps_aug = aug_det.augment_segmentation_maps([segmap])[0]
segmaps_drawn = segmap.draw_on_image(quokka)
segmaps_aug_drawn = segmaps_aug.draw_on_image(quokka_aug)
ia.imshow(
np.hstack([
segmaps_drawn,
segmaps_aug_drawn
])
)
if __name__ == "__main__":
main()
``` |
{
"source": "joybh98/manim",
"score": 3
} |
#### File: manim/mobject/coordinate_systems.py
```python
__all__ = [
"CoordinateSystem",
"Axes",
"ThreeDAxes",
"NumberPlane",
"PolarPlane",
"ComplexPlane",
]
import fractions as fr
import numbers
from typing import Callable, Dict, Iterable, Optional, Sequence, Tuple, Union
import numpy as np
from colour import Color
from manim.mobject.opengl_compatibility import ConvertToOpenGL
from .. import config
from ..constants import *
from ..mobject.functions import ParametricFunction
from ..mobject.geometry import (
Arrow,
Circle,
DashedLine,
Dot,
Line,
Rectangle,
RegularPolygon,
)
from ..mobject.number_line import NumberLine
from ..mobject.svg.tex_mobject import MathTex
from ..mobject.types.vectorized_mobject import (
Mobject,
VDict,
VectorizedPoint,
VGroup,
VMobject,
)
from ..utils.color import (
BLACK,
BLUE,
BLUE_D,
GREEN,
LIGHT_GREY,
WHITE,
YELLOW,
color_gradient,
invert_color,
)
from ..utils.config_ops import merge_dicts_recursively, update_dict_recursively
from ..utils.simple_functions import binary_search
from ..utils.space_ops import angle_of_vector
class CoordinateSystem:
"""
Abstract class for Axes and NumberPlane
Examples
--------
.. manim:: CoordSysExample
:save_last_frame:
class CoordSysExample(Scene):
def construct(self):
# the location of the ticks depends on the x_range and y_range.
grid = Axes(
x_range=[0, 1, 0.05], # step size determines num_decimal_places.
y_range=[0, 1, 0.05],
x_length=9,
y_length=5.5,
axis_config={
"numbers_to_include": np.arange(0, 1 + 0.1, 0.1),
"number_scale_value": 0.5,
},
tips=False,
)
# Labels for the x-axis and y-axis.
y_label = grid.get_y_axis_label("y", edge=LEFT, direction=LEFT, buff=0.4)
x_label = grid.get_x_axis_label("x")
grid_labels = VGroup(x_label, y_label)
graphs = VGroup()
for n in np.arange(1, 20 + 0.5, 0.5):
graphs += grid.get_graph(lambda x: x ** n, color=WHITE)
graphs += grid.get_graph(
lambda x: x ** (1 / n), color=WHITE, use_smoothing=False
)
# Extra lines and labels for point (1,1)
graphs += grid.get_horizontal_line(grid.c2p(1, 1, 0), color=BLUE)
graphs += grid.get_vertical_line(grid.c2p(1, 1, 0), color=BLUE)
graphs += Dot(point=grid.c2p(1, 1, 0), color=YELLOW)
graphs += Tex("(1,1)").scale(0.75).next_to(grid.c2p(1, 1, 0))
title = Title(
# spaces between braces to prevent SyntaxError
r"Graphs of $y=x^{ {1}\over{n} }$ and $y=x^n (n=1,2,3,...,20)$",
include_underline=False,
scale_factor=0.85,
)
self.add(title, graphs, grid, grid_labels)
"""
def __init__(
self,
x_range=None,
y_range=None,
x_length=None,
y_length=None,
dimension=2,
):
self.dimension = dimension
default_step = 1
if x_range is None:
x_range = [
round(-config["frame_x_radius"]),
round(config["frame_x_radius"]),
default_step,
]
elif len(x_range) == 2:
x_range = [*x_range, default_step]
if y_range is None:
y_range = [
round(-config["frame_y_radius"]),
round(config["frame_y_radius"]),
default_step,
]
elif len(y_range) == 2:
y_range = [*y_range, default_step]
self.x_range = x_range
self.y_range = y_range
self.x_length = x_length
self.y_length = y_length
self.num_sampled_graph_points_per_tick = 10
def coords_to_point(self, *coords):
raise NotImplementedError()
def point_to_coords(self, point):
raise NotImplementedError()
def c2p(self, *coords):
"""Abbreviation for coords_to_point"""
return self.coords_to_point(*coords)
def p2c(self, point):
"""Abbreviation for point_to_coords"""
return self.point_to_coords(point)
def get_axes(self):
raise NotImplementedError()
def get_axis(self, index):
return self.get_axes()[index]
def get_x_axis(self):
return self.get_axis(0)
def get_y_axis(self):
return self.get_axis(1)
def get_z_axis(self):
return self.get_axis(2)
def get_x_axis_label(self, label_tex, edge=UR, direction=UR, **kwargs):
return self.get_axis_label(
label_tex, self.get_x_axis(), edge, direction, **kwargs
)
def get_y_axis_label(
self, label_tex, edge=UR, direction=UP * 0.5 + RIGHT, **kwargs
):
return self.get_axis_label(
label_tex, self.get_y_axis(), edge, direction, **kwargs
)
# move to a util_file, or Mobject()??
@staticmethod
def create_label_tex(label_tex) -> "Mobject":
"""Checks if the label is a ``float``, ``int`` or a ``str`` and creates a :class:`~.MathTex` label accordingly.
Parameters
----------
label_tex : The label to be compared against the above types.
Returns
-------
:class:`~.Mobject`
The label.
"""
if (
isinstance(label_tex, float)
or isinstance(label_tex, int)
or isinstance(label_tex, str)
):
label_tex = MathTex(label_tex)
return label_tex
def get_axis_label(
self,
label: Union[float, str, "Mobject"],
axis: "Mobject",
edge: Sequence[float],
direction: Sequence[float],
buff: float = SMALL_BUFF,
) -> "Mobject":
"""Gets the label for an axis.
Parameters
----------
label
The label. Can be any mobject or `int/float/str` to be used with :class:`~.MathTex`
axis
The axis to which the label will be added.
edge
The edge of the axes to which the label will be added. ``RIGHT`` adds to the right side of the axis
direction
Allows for further positioning of the label.
buff
The distance of the label from the line.
Returns
-------
:class:`~.Mobject`
The positioned label along the given axis.
"""
label = self.create_label_tex(label)
label.next_to(axis.get_edge_center(edge), direction, buff=buff)
label.shift_onto_screen(buff=MED_SMALL_BUFF)
return label
def get_axis_labels(
self,
x_label: Union[float, str, "Mobject"] = "x",
y_label: Union[float, str, "Mobject"] = "y",
) -> "VGroup":
"""Defines labels for the x_axis and y_axis of the graph.
Parameters
----------
x_label
The label for the x_axis
y_label
The label for the y_axis
Returns
-------
:class:`~.VGroup`
A :class:`~.Vgroup` of the labels for the x_axis and y_axis.
See Also
--------
:class:`get_x_axis_label`
:class:`get_y_axis_label`
"""
self.axis_labels = VGroup(
self.get_x_axis_label(x_label),
self.get_y_axis_label(y_label),
)
return self.axis_labels
def add_coordinates(
self,
*axes_numbers: Union[
Optional[Iterable[float]], Union[Dict[float, Union[str, float, "Mobject"]]]
],
**kwargs,
):
"""Adds labels to the axes.
Parameters
----------
axes_numbers
The numbers to be added to the axes. Use ``None`` to represent an axis with default labels.
Examples
--------
.. code-block:: python
ax = ThreeDAxes()
x_labels = range(-4, 5)
z_labels = range(-4, 4, 2)
ax.add_coordinates(x_labels, None, z_labels) # default y labels, custom x & z labels
ax.add_coordinates(x_labels) # only x labels
.. code-block:: python
# specifically control the position and value of the labels using a dict
ax = Axes(x_range=[0, 7])
x_pos = [x for x in range(1, 8)]
# strings are automatically converted into a `Tex` mobject.
x_vals = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
x_dict = dict(zip(x_pos, x_vals))
ax.add_coordinates(x_dict)
"""
self.coordinate_labels = VGroup()
# if nothing is passed to axes_numbers, produce axes with default labelling
if not axes_numbers:
axes_numbers = [None for _ in range(self.dimension)]
for axis, values in zip(self.axes, axes_numbers):
if isinstance(values, dict):
labels = axis.add_labels(values, **kwargs)
else:
labels = axis.add_numbers(values, **kwargs)
self.coordinate_labels.add(labels)
return self
def get_line_from_axis_to_point(
self,
index: int,
point: Sequence[float],
line_func: Line = DashedLine,
color: Color = LIGHT_GREY,
stroke_width: float = 2,
) -> Line:
"""Returns a straight line from a given axis to a point in the scene.
Parameters
----------
index
Specifies the axis from which to draw the line. `0 = x_axis`, `1 = y_axis`
point
The point to which the line will be drawn.
line_func
The function of the :class:`~.Line` mobject used to construct the line.
color
The color of the line.
stroke_width
The stroke width of the line.
Returns
-------
:class:`~.Line`
The line from an axis to a point.
See Also
--------
:class:`get_vertical_line`
:class:`get_horizontal_line`
"""
axis = self.get_axis(index)
line = line_func(axis.get_projection(point), point)
line.set_stroke(color, stroke_width)
return line
def get_vertical_line(self, point: Sequence[float], **kwargs) -> Line:
"""A vertical line from the x-axis to a given point in the scene.
Parameters
----------
point
The point to which the vertical line will be drawn.
kwargs
Additional parameters to be passed to :class:`get_line_from_axis_to_point`
Returns
-------
:class:`Line`
A vertical line from the x-axis to the point.
"""
return self.get_line_from_axis_to_point(0, point, **kwargs)
def get_horizontal_line(self, point: Sequence[float], **kwargs) -> Line:
"""A horizontal line from the y-axis to a given point in the scene.
Parameters
----------
point
The point to which the horizontal line will be drawn.
kwargs
Additional parameters to be passed to :class:`get_line_from_axis_to_point`
Returns
-------
:class:`Line`
A horizontal line from the y-axis to the point.
"""
return self.get_line_from_axis_to_point(1, point, **kwargs)
# graphing
def get_graph(
self,
function: Callable[[float], float],
x_range: Optional[Sequence[float]] = None,
**kwargs,
):
"""Generates a curve based on a function.
Parameters
----------
function
The function used to construct the :class:`~.ParametricFunction`.
x_range
The range of the curve along the axes. ``x_range = [x_min, x_max]``.
kwargs
Additional parameters to be passed to :class:`~.ParametricFunction`.
Returns
-------
:class:`~.ParametricFunction`
The plotted curve.
"""
t_range = np.array(self.x_range, dtype=float)
if x_range is not None:
t_range[: len(x_range)] = x_range
if x_range is None or len(x_range) < 3:
# if t_range has a defined step size, increase the number of sample points per tick
t_range[2] /= self.num_sampled_graph_points_per_tick
# For axes, the third coordinate of x_range indicates
# tick frequency. But for functions, it indicates a
# sample frequency
graph = ParametricFunction(
lambda t: self.coords_to_point(t, function(t)), t_range=t_range, **kwargs
)
graph.underlying_function = function
return graph
def get_parametric_curve(self, function, **kwargs):
dim = self.dimension
graph = ParametricFunction(
lambda t: self.coords_to_point(*function(t)[:dim]), **kwargs
)
graph.underlying_function = function
return graph
def input_to_graph_point(self, x: float, graph: "ParametricFunction") -> np.ndarray:
"""Returns the coordinates of the point on the ``graph``
corresponding to the input ``x`` value.
Parameters
----------
x
The x-value for which the coordinates of corresponding point on the :attr:`graph` are to be found.
graph
The :class:`~.ParametricFunction` on which the x-value and y-value lie.
Returns
-------
:class:`np.ndarray`
The coordinates of the point on the :attr:`graph` corresponding to the :attr:`x` value.
"""
if hasattr(graph, "underlying_function"):
return graph.function(x)
else:
alpha = binary_search(
function=lambda a: self.point_to_coords(graph.point_from_proportion(a))[
0
],
target=x,
lower_bound=self.x_range[0],
upper_bound=self.x_range[1],
)
if alpha is not None:
return graph.point_from_proportion(alpha)
else:
return None
def i2gp(self, x, graph):
"""
Alias for :meth:`input_to_graph_point`.
"""
return self.input_to_graph_point(x, graph)
def get_graph_label(
self,
graph: "ParametricFunction",
label: Union[float, str, "Mobject"] = "f(x)",
x_val: Optional[float] = None,
direction: Sequence[float] = RIGHT,
buff: float = MED_SMALL_BUFF,
color: Optional[Color] = None,
dot: bool = False,
dot_config: Optional[dict] = None,
) -> Mobject:
"""Creates a properly positioned label for the passed graph,
styled with parameters and an optional dot.
Parameters
----------
graph
The curve of the function plotted.
label
The label for the function's curve. Written with :class:`MathTex` if not specified otherwise.
x_val
The x_value with which the label should be aligned.
direction
The cartesian position, relative to the curve that the label will be at --> ``LEFT``, ``RIGHT``
buff
The buffer space between the curve and the label.
color
The color of the label.
dot
Adds a dot at the given point on the graph.
dot_config
Additional parameters to be passed to :class:`~.Dot`.
Returns
-------
:class:`Mobject`
The positioned label and :class:`~.Dot`, if applicable.
"""
if dot_config is None:
dot_config = {}
label = self.create_label_tex(label)
color = color or graph.get_color()
label.set_color(color)
if x_val is None:
# Search from right to left
for x in np.linspace(self.x_range[1], self.x_range[0], 100):
point = self.input_to_graph_point(x, graph)
if point[1] < config["frame_y_radius"]:
break
else:
point = self.input_to_graph_point(x_val, graph)
label.next_to(point, direction, buff=buff)
label.shift_onto_screen()
if dot:
label.add(Dot(point=point, **dot_config))
return label
# calculus
def get_riemann_rectangles(
self,
graph: "ParametricFunction",
x_range: Optional[Sequence[float]] = None,
dx: Optional[float] = 0.1,
input_sample_type: str = "left",
stroke_width: float = 1,
stroke_color: Color = BLACK,
fill_opacity: float = 1,
color: Union[Iterable[Color], Color] = np.array((BLUE, GREEN)),
show_signed_area: bool = True,
bounded_graph: "ParametricFunction" = None,
blend: bool = False,
width_scale_factor: float = 1.001,
) -> VGroup:
"""This method returns the :class:`~.VGroup` of the Riemann Rectangles for
a particular curve.
Parameters
----------
graph
The graph whose area will be approximated by Riemann rectangles.
x_range
The minimum and maximum x-values of the rectangles. ``x_range = [x_min, x_max]``.
dx
The change in x-value that separates each rectangle.
input_sample_type
Can be any of ``"left"``, ``"right"`` or ``"center"``. Refers to where
the sample point for the height of each Riemann Rectangle
will be inside the segments of the partition.
stroke_width
The stroke_width of the border of the rectangles.
stroke_color
The color of the border of the rectangle.
fill_opacity
The opacity of the rectangles.
color
The colors of the rectangles. Creates a balanced gradient if multiple colors are passed.
show_signed_area
Indicates negative area when the curve dips below the x-axis by inverting its color.
blend
Sets the :attr:`stroke_color` to :attr:`fill_color`, blending the rectangles without clear separation.
bounded_graph
If a secondary graph is specified, encloses the area between the two curves.
width_scale_factor
The factor by which the width of the rectangles is scaled.
Returns
-------
:class:`~.VGroup`
A :class:`~.VGroup` containing the Riemann Rectangles.
"""
# setting up x_range, overwrite user's third input
if x_range is None:
if bounded_graph is None:
x_range = [graph.t_min, graph.t_max]
else:
x_min = max(graph.t_min, bounded_graph.t_min)
x_max = min(graph.t_max, bounded_graph.t_max)
x_range = [x_min, x_max]
x_range = [*x_range[:2], dx]
rectangles = VGroup()
x_range = np.arange(*x_range)
# allows passing a string to color the graph
if type(color) is str:
colors = [color] * len(x_range)
else:
colors = color_gradient(color, len(x_range))
for x, color in zip(x_range, colors):
if input_sample_type == "left":
sample_input = x
elif input_sample_type == "right":
sample_input = x + dx
elif input_sample_type == "center":
sample_input = x + 0.5 * dx
else:
raise ValueError("Invalid input sample type")
graph_point = self.input_to_graph_point(sample_input, graph)
if bounded_graph is None:
y_point = self.origin_shift(self.y_range)
else:
y_point = bounded_graph.underlying_function(x)
points = VGroup(
*list(
map(
VectorizedPoint,
[
self.coords_to_point(x, y_point),
self.coords_to_point(x + width_scale_factor * dx, y_point),
graph_point,
],
)
)
)
rect = Rectangle().replace(points, stretch=True)
rectangles.add(rect)
# checks if the rectangle is under the x-axis
if self.p2c(graph_point)[1] < y_point and show_signed_area:
color = invert_color(color)
# blends rectangles smoothly
if blend:
stroke_color = color
rect.set_style(
fill_color=color,
fill_opacity=fill_opacity,
stroke_color=stroke_color,
stroke_width=stroke_width,
)
return rectangles
def get_area(
self,
graph: "ParametricFunction",
x_range: Optional[Sequence[float]] = None,
color: Union[Color, Iterable[Color]] = [BLUE, GREEN],
opacity: float = 0.3,
dx_scaling: float = 1,
bounded: "ParametricFunction" = None,
):
"""Returns a :class:`~.VGroup` of Riemann rectangles sufficiently small enough to visually
approximate the area under the graph passed.
Parameters
----------
graph
The graph/curve for which the area needs to be gotten.
x_range
The range of the minimum and maximum x-values of the area. ``x_range = [x_min, x_max]``.
color
The color of the area. Creates a gradient if a list of colors is provided.
opacity
The opacity of the area.
bounded
If a secondary :attr:`graph` is specified, encloses the area between the two curves.
dx_scaling
The factor by which the :attr:`dx` value is scaled.
Returns
-------
:class:`~.VGroup`
The :class:`~.VGroup` containing the Riemann Rectangles.
"""
dx = self.x_range[2] / 500
return self.get_riemann_rectangles(
graph,
x_range=x_range,
dx=dx * dx_scaling,
bounded_graph=bounded,
blend=True,
color=color,
show_signed_area=False,
).set_opacity(opacity=opacity)
def angle_of_tangent(
self, x: float, graph: "ParametricFunction", dx: float = 1e-8
) -> float:
"""Returns the angle to the x-axis of the tangent
to the plotted curve at a particular x-value.
Parameters
----------
x
The x-value at which the tangent must touch the curve.
graph
The :class:`~.ParametricFunction` for which to calculate the tangent.
dx
The small change in `x` with which a small change in `y`
will be compared in order to obtain the tangent.
Returns
-------
:class:`float`
The angle of the tangent with the x axis.
"""
p0 = self.input_to_graph_point(x, graph)
p1 = self.input_to_graph_point(x + dx, graph)
return angle_of_vector(p1 - p0)
def slope_of_tangent(
self, x: float, graph: "ParametricFunction", **kwargs
) -> float:
"""Returns the slope of the tangent to the plotted curve
at a particular x-value.
Parameters
----------
x
The x-value at which the tangent must touch the curve.
graph
The :class:`~.ParametricFunction` for which to calculate the tangent.
Returns
-------
:class:`float`
The slope of the tangent with the x axis.
"""
return np.tan(self.angle_of_tangent(x, graph, **kwargs))
def get_derivative_graph(
self, graph: "ParametricFunction", color: Color = GREEN, **kwargs
) -> ParametricFunction:
"""Returns the curve of the derivative of the passed
graph.
Parameters
----------
graph
The graph for which the derivative will be found.
color
The color of the derivative curve.
**kwargs
Any valid keyword argument of :class:`~.ParametricFunction`
Returns
-------
:class:`~.ParametricFunction`
The curve of the derivative.
"""
def deriv(x):
return self.slope_of_tangent(x, graph)
return self.get_graph(deriv, color=color, **kwargs)
def get_secant_slope_group(
self,
x: float,
graph: ParametricFunction,
dx: Optional[float] = None,
dx_line_color: Color = YELLOW,
dy_line_color: Optional[Color] = None,
dx_label: Optional[Union[float, str]] = None,
dy_label: Optional[Union[float, str]] = None,
include_secant_line: bool = True,
secant_line_color: Color = GREEN,
secant_line_length: float = 10,
) -> VGroup:
"""Creates two lines representing `dx` and `df`, the labels for `dx` and `df`, and
the secant to the curve at a particular x-value.
Parameters
----------
x
The x-value at which the secant intersects the graph for the first time.
graph
The curve for which the secant will be found.
dx
The change in `x` after which the secant exits.
dx_line_color
The color of the line that indicates the change in `x`.
dy_line_color
The color of the line that indicates the change in `y`. Defaults to the color of :attr:`graph`.
dx_label
The label for the `dx` line.
dy_label
The label for the `dy` line.
include_secant_line
Whether or not to include the secant line in the graph,
or just have the df and dx lines and labels.
secant_line_color
The color of the secant line.
secant_line_length
The length of the secant line.
Returns
-------
:class:`~.VGroup`
A group containing the elements: `dx_line`, `df_line`, and
if applicable also :attr:`dx_label`, :attr:`df_label`, `secant_line`.
"""
group = VGroup()
dx = dx or float(self.x_range[1] - self.x_range[0]) / 10
dx_line_color = dx_line_color
dy_line_color = dy_line_color or graph.get_color()
p1 = self.input_to_graph_point(x, graph)
p2 = self.input_to_graph_point(x + dx, graph)
interim_point = p2[0] * RIGHT + p1[1] * UP
group.dx_line = Line(p1, interim_point, color=dx_line_color)
group.df_line = Line(interim_point, p2, color=dy_line_color)
group.add(group.dx_line, group.df_line)
labels = VGroup()
if dx_label is not None:
group.dx_label = self.create_label_tex(dx_label)
labels.add(group.dx_label)
group.add(group.dx_label)
if dy_label is not None:
group.df_label = self.create_label_tex(dy_label)
labels.add(group.df_label)
group.add(group.df_label)
if len(labels) > 0:
max_width = 0.8 * group.dx_line.width
max_height = 0.8 * group.df_line.height
if labels.width > max_width:
labels.width = max_width
if labels.height > max_height:
labels.height = max_height
if dx_label is not None:
group.dx_label.next_to(
group.dx_line, np.sign(dx) * DOWN, buff=group.dx_label.height / 2
)
group.dx_label.set_color(group.dx_line.get_color())
if dy_label is not None:
group.df_label.next_to(
group.df_line, np.sign(dx) * RIGHT, buff=group.df_label.height / 2
)
group.df_label.set_color(group.df_line.get_color())
if include_secant_line:
secant_line_color = secant_line_color
group.secant_line = Line(p1, p2, color=secant_line_color)
group.secant_line.scale_in_place(
secant_line_length / group.secant_line.get_length()
)
group.add(group.secant_line)
return group
def get_vertical_lines_to_graph(
self,
graph: ParametricFunction,
x_range: Optional[Sequence[float]] = None,
num_lines: int = 20,
**kwargs,
) -> VGroup:
"""Obtains multiple lines from the x-axis to the curve.
Parameters
----------
graph
The graph on which the line should extend to.
x_range
A list containing the lower and and upper bounds of the lines -> ``x_range = [x_min, x_max]``.
num_lines
The number of evenly spaced lines.
Returns
-------
:class:`~.VGroup`
The :class:`~.VGroup` of the evenly spaced lines.
"""
x_range = x_range if x_range is not None else self.x_range
return VGroup(
*[
self.get_vertical_line(self.i2gp(x, graph), **kwargs)
for x in np.linspace(x_range[0], x_range[1], num_lines)
]
)
def get_T_label(
self,
x_val: float,
graph: "ParametricFunction",
label: Optional[Union[float, str, "Mobject"]] = None,
label_color: Color = WHITE,
triangle_size: float = MED_SMALL_BUFF,
triangle_color: Color = WHITE,
line_func: "Line" = Line,
line_color: Color = YELLOW,
) -> VGroup:
"""Creates a labelled triangle marker with a vertical line from the x-axis
to a curve at a given x-value.
Parameters
----------
x_val
The position along the curve at which the label, line and triangle will be constructed.
graph
The :class:`~.ParametricFunction` for which to construct the label.
label
The label of the vertical line and triangle.
label_color
The color of the label.
triangle_size
The size of the triangle.
triangle_color
The color of the triangle.
line_func
The function used to construct the vertical line.
line_color
The color of the vertical line.
Examples
-------
.. manim:: T_labelExample
:save_last_frame:
class T_labelExample(Scene):
def construct(self):
# defines the axes and linear function
axes = Axes(x_range=[-1, 10], y_range=[-1, 10], x_length=9, y_length=6)
func = axes.get_graph(lambda x: x, color=BLUE)
# creates the T_label
t_label = axes.get_T_label(x_val=4, graph=func, label=Tex("x-value"))
self.add(axes, func, t_label)
Returns
-------
:class:`~.VGroup`
A :class:`~.VGroup` of the label, triangle and vertical line mobjects.
"""
T_label_group = VGroup()
triangle = RegularPolygon(n=3, start_angle=np.pi / 2, stroke_width=0).set_fill(
color=triangle_color, opacity=1
)
triangle.height = triangle_size
triangle.move_to(self.coords_to_point(x_val, 0), UP)
if label is not None:
t_label = self.create_label_tex(label).set_color(label_color)
t_label.next_to(triangle, DOWN)
T_label_group.add(t_label)
v_line = self.get_vertical_line(
self.i2gp(x_val, graph), color=line_color, line_func=line_func
)
T_label_group.add(triangle, v_line)
return T_label_group
class Axes(VGroup, CoordinateSystem, metaclass=ConvertToOpenGL):
"""Creates a set of axes.
Parameters
----------
x_range
The :code:`[x_min, x_max, x_step]` values of the x-axis.
y_range
The :code:`[y_min, y_max, y_step]` values of the y-axis.
x_length
The length of the x-axis.
y_length
The length of the y-axis.
axis_config
Arguments to be passed to :class:`~.NumberLine` that influences both axes.
x_axis_config
Arguments to be passed to :class:`~.NumberLine` that influence the x-axis.
y_axis_config
Arguments to be passed to :class:`~.NumberLine` that influence the y-axis.
tips
Whether or not to include the tips on both axes.
kwargs : Any
Additional arguments to be passed to :class:`CoordinateSystem` and :class:`~.VGroup`.
"""
def __init__(
self,
x_range: Optional[Sequence[float]] = None,
y_range: Optional[Sequence[float]] = None,
x_length: Optional[float] = round(config.frame_width) - 2,
y_length: Optional[float] = round(config.frame_height) - 2,
axis_config: Optional[dict] = None,
x_axis_config: Optional[dict] = None,
y_axis_config: Optional[dict] = None,
tips: bool = True,
**kwargs,
):
VGroup.__init__(self, **kwargs)
CoordinateSystem.__init__(self, x_range, y_range, x_length, y_length)
self.axis_config = {
"include_tip": tips,
"numbers_to_exclude": [0],
"exclude_origin_tick": True,
}
self.x_axis_config = {}
self.y_axis_config = {"rotation": 90 * DEGREES, "label_direction": LEFT}
self.update_default_configs(
(self.axis_config, self.x_axis_config, self.y_axis_config),
(axis_config, x_axis_config, y_axis_config),
)
self.x_axis_config = merge_dicts_recursively(
self.axis_config, self.x_axis_config
)
self.y_axis_config = merge_dicts_recursively(
self.axis_config, self.y_axis_config
)
self.x_axis = self.create_axis(self.x_range, self.x_axis_config, self.x_length)
self.y_axis = self.create_axis(self.y_range, self.y_axis_config, self.y_length)
# Add as a separate group in case various other
# mobjects are added to self, as for example in
# NumberPlane below
self.axes = VGroup(self.x_axis, self.y_axis)
self.add(*self.axes)
# finds the middle-point on each axis
lines_center_point = [((axis.x_max + axis.x_min) / 2) for axis in self.axes]
self.shift(-self.coords_to_point(*lines_center_point))
@staticmethod
def update_default_configs(default_configs, passed_configs):
for default_config, passed_config in zip(default_configs, passed_configs):
if passed_config is not None:
update_dict_recursively(default_config, passed_config)
def create_axis(
self,
range_terms: Sequence[float],
axis_config: dict,
length: float,
) -> NumberLine:
"""Creates an axis and dynamically adjusts its position depending on where 0 is located on the line.
Parameters
----------
range_terms
The range of the the axis : `(x_min, x_max, x_step)`.
axis_config
Additional parameters that are passed to :class:`NumberLine`.
length
The length of the axis.
Returns
-------
:class:`NumberLine`
Returns a number line with the provided x and y axis range.
"""
axis_config["length"] = length
axis = NumberLine(range_terms, **axis_config)
# without the call to origin_shift, graph does not exist when min > 0 or max < 0
# shifts the axis so that 0 is centered
axis.shift(-axis.number_to_point(self.origin_shift(range_terms)))
return axis
def coords_to_point(self, *coords: Sequence[float]) -> np.ndarray:
"""Transforms the vector formed from ``coords`` formed by the :class:`Axes`
into the corresponding vector with respect to the default basis.
Returns
-------
np.ndarray
A point that results from a change of basis from the coordinate system
defined by the :class:`Axes` to that of ``manim``'s default coordinate system
"""
origin = self.x_axis.number_to_point(self.origin_shift(self.x_range))
result = np.array(origin)
for axis, coord in zip(self.get_axes(), coords):
result += axis.number_to_point(coord) - origin
return result
def point_to_coords(self, point: float) -> Tuple:
"""Transforms the coordinates of the point which are with respect to ``manim``'s default
basis into the coordinates of that point with respect to the basis defined by :class:`Axes`.
Parameters
----------
point
The point whose coordinates will be found.
Returns
-------
Tuple
Coordinates of the point with respect to :class:`Axes`'s basis
"""
return tuple([axis.point_to_number(point) for axis in self.get_axes()])
def get_axes(self) -> VGroup:
"""Gets the axes.
Returns
-------
:class:`~.VGroup`
A pair of axes.
"""
return self.axes
def get_line_graph(
self,
x_values: Iterable[float],
y_values: Iterable[float],
z_values: Optional[Iterable[float]] = None,
line_color: Color = YELLOW,
add_vertex_dots: bool = True,
vertex_dot_radius: float = DEFAULT_DOT_RADIUS,
vertex_dot_style: Optional[dict] = None,
**kwargs,
) -> VDict:
"""Draws a line graph.
The graph connects the vertices formed from zipping
``x_values``, ``y_values`` and ``z_values``. Also adds :class:`Dots <.Dot>` at the
vertices if ``add_vertex_dots`` is set to ``True``.
Parameters
----------
x_values
Iterable of values along the x-axis.
y_values
Iterable of values along the y-axis.
z_values
Iterable of values (zeros if z_values is None) along the z-axis.
line_color
Color for the line graph.
add_vertex_dots
Whether or not to add :class:`~.Dot` at each vertex.
vertex_dot_radius
Radius for the :class:`~.Dot` at each vertex.
vertex_dot_style
Style arguments to be passed into :class:`~.Dot` at each vertex.
kwargs
Additional arguments to be passed into :class:`~.VMobject`.
Examples
--------
.. manim:: LineGraphExample
:save_last_frame:
class LineGraphExample(Scene):
def construct(self):
plane = NumberPlane(
x_range = (0, 7),
y_range = (0, 5),
x_length = 7,
axis_config={"include_numbers": True},
)
plane.center()
line_graph = plane.get_line_graph(
x_values = [0, 1.5, 2, 2.8, 4, 6.25],
y_values = [1, 3, 2.25, 4, 2.5, 1.75],
line_color=GOLD_E,
vertex_dot_style=dict(stroke_width=3, fill_color=PURPLE),
stroke_width = 4,
)
self.add(plane, line_graph)
"""
x_values, y_values = map(np.array, (x_values, y_values))
if z_values is None:
z_values = np.zeros(x_values.shape)
line_graph = VDict()
graph = VGroup(color=line_color, **kwargs)
vertices = [
self.coords_to_point(x, y, z)
for x, y, z in zip(x_values, y_values, z_values)
]
graph.set_points_as_corners(vertices)
graph.z_index = -1
line_graph["line_graph"] = graph
if add_vertex_dots:
vertex_dot_style = vertex_dot_style or {}
vertex_dots = VGroup(
*[
Dot(point=vertex, radius=vertex_dot_radius, **vertex_dot_style)
for vertex in vertices
]
)
line_graph["vertex_dots"] = vertex_dots
return line_graph
@staticmethod
def origin_shift(axis_range: Sequence[float]) -> float:
"""Determines how to shift graph mobjects to compensate when 0 is not on the axis.
Parameters
----------
axis_range
The range of the axis : ``(x_min, x_max, x_step)``.
"""
if axis_range[0] > 0:
return axis_range[0]
if axis_range[1] < 0:
return axis_range[1]
else:
return 0
class ThreeDAxes(Axes):
"""A 3-dimensional set of axes.
Parameters
----------
x_range
The :code:`[x_min, x_max, x_step]` values of the x-axis.
y_range
The :code:`[y_min, y_max, y_step]` values of the y-axis.
z_range
The :code:`[z_min, z_max, z_step]` values of the z-axis.
x_length
The length of the x-axis.
y_length
The length of the y-axis.
z_length
The length of the z-axis.
z_axis_config
Arguments to be passed to :class:`~.NumberLine` that influence the z-axis.
z_normal
The direction of the normal.
num_axis_pieces
The number of pieces used to construct the axes.
light_source
The direction of the light source.
depth
Currently non-functional.
gloss
Currently non-functional.
kwargs : Any
Additional arguments to be passed to :class:`Axes`.
"""
def __init__(
self,
x_range: Optional[Sequence[float]] = (-6, 6, 1),
y_range: Optional[Sequence[float]] = (-5, 5, 1),
z_range: Optional[Sequence[float]] = (-4, 4, 1),
x_length: Optional[float] = config.frame_height + 2.5,
y_length: Optional[float] = config.frame_height + 2.5,
z_length: Optional[float] = config.frame_height - 1.5,
z_axis_config: Optional[dict] = None,
z_normal: Sequence[float] = DOWN,
num_axis_pieces: int = 20,
light_source: Sequence[float] = 9 * DOWN + 7 * LEFT + 10 * OUT,
# opengl stuff (?)
depth=None,
gloss=0.5,
**kwargs,
):
Axes.__init__(
self,
x_range=x_range,
x_length=x_length,
y_range=y_range,
y_length=y_length,
**kwargs,
)
self.z_range = z_range
self.z_length = z_length
self.z_axis_config = {}
self.update_default_configs((self.z_axis_config,), (z_axis_config,))
self.z_axis_config = merge_dicts_recursively(
self.axis_config, self.z_axis_config
)
self.z_normal = z_normal
self.num_axis_pieces = num_axis_pieces
self.light_source = light_source
self.dimension = 3
z_axis = self.create_axis(self.z_range, self.z_axis_config, self.z_length)
z_axis.rotate_about_zero(-PI / 2, UP)
z_axis.rotate_about_zero(angle_of_vector(self.z_normal))
z_axis.shift(self.x_axis.number_to_point(self.origin_shift(x_range)))
self.axes.add(z_axis)
self.add(z_axis)
self.z_axis = z_axis
if not config.renderer == "opengl":
self.add_3d_pieces()
self.set_axis_shading()
def add_3d_pieces(self):
for axis in self.axes:
axis.pieces = VGroup(*axis.get_pieces(self.num_axis_pieces))
axis.add(axis.pieces)
axis.set_stroke(width=0, family=False)
axis.set_shade_in_3d(True)
def set_axis_shading(self):
def make_func(axis):
vect = self.light_source
return lambda: (
axis.get_edge_center(-vect),
axis.get_edge_center(vect),
)
for axis in self:
for submob in axis.family_members_with_points():
submob.get_gradient_start_and_end_points = make_func(axis)
submob.get_unit_normal = lambda a: np.ones(3)
submob.set_sheen(0.2)
class NumberPlane(Axes):
"""Creates a cartesian plane with background lines.
Parameters
----------
x_range
The :code:`[x_min, x_max, x_step]` values of the plane in the horizontal direction.
y_range
The :code:`[y_min, y_max, y_step]` values of the plane in the vertical direction.
x_length
The width of the plane.
y_length
The height of the plane.
background_line_style
Arguments that influence the construction of the background lines of the plane.
faded_line_style
Similar to :attr:`background_line_style`, affects the construction of the scene's background lines.
faded_line_ratio
Determines the number of boxes within the background lines: :code:`2` = 4 boxes, :code:`3` = 9 boxes.
make_smooth_after_applying_functions
Currently non-functional.
kwargs : Any
Additional arguments to be passed to :class:`Axes`.
.. note:: If :attr:`x_length` or :attr:`y_length` are not defined, the plane automatically adjusts its lengths based
on the :attr:`x_range` and :attr:`y_range` values to set the unit_size to 1.
Examples
--------
.. manim:: NumberPlaneExample
:save_last_frame:
class NumberPlaneExample(Scene):
def construct(self):
number_plane = NumberPlane(
x_range=[-10, 10, 1],
y_range=[-10, 10, 1],
background_line_style={
"stroke_color": TEAL,
"stroke_width": 4,
"stroke_opacity": 0.6
}
)
self.add(number_plane)
"""
def __init__(
self,
x_range: Optional[Sequence[float]] = (
-config["frame_x_radius"],
config["frame_x_radius"],
1,
),
y_range: Optional[Sequence[float]] = (
-config["frame_y_radius"],
config["frame_y_radius"],
1,
),
x_length: Optional[float] = None,
y_length: Optional[float] = None,
background_line_style: Optional[dict] = None,
faded_line_style: Optional[dict] = None,
faded_line_ratio: int = 1,
make_smooth_after_applying_functions=True,
**kwargs,
):
# configs
self.axis_config = {
"stroke_color": WHITE,
"stroke_width": 2,
"include_ticks": False,
"include_tip": False,
"line_to_number_buff": SMALL_BUFF,
"label_direction": DR,
"number_scale_value": 0.5,
}
self.y_axis_config = {"label_direction": DR}
self.background_line_style = {
"stroke_color": BLUE_D,
"stroke_width": 2,
"stroke_opacity": 1,
}
self.update_default_configs(
(self.axis_config, self.y_axis_config, self.background_line_style),
(
kwargs.pop("axis_config", None),
kwargs.pop("y_axis_config", None),
background_line_style,
),
)
# Defaults to a faded version of line_config
self.faded_line_style = faded_line_style
self.faded_line_ratio = faded_line_ratio
self.make_smooth_after_applying_functions = make_smooth_after_applying_functions
# init
super().__init__(
x_range=x_range,
y_range=y_range,
x_length=x_length,
y_length=y_length,
axis_config=self.axis_config,
y_axis_config=self.y_axis_config,
**kwargs,
)
# dynamically adjusts x_length and y_length so that the unit_size is one by default
if x_length is None:
x_length = self.x_range[1] - self.x_range[0]
if y_length is None:
y_length = self.y_range[1] - self.y_range[0]
self.init_background_lines()
def init_background_lines(self):
"""Will init all the lines of NumberPlanes (faded or not)"""
if self.faded_line_style is None:
style = dict(self.background_line_style)
# For anything numerical, like stroke_width
# and stroke_opacity, chop it in half
for key in style:
if isinstance(style[key], numbers.Number):
style[key] *= 0.5
self.faded_line_style = style
self.background_lines, self.faded_lines = self.get_lines()
self.background_lines.set_style(
**self.background_line_style,
)
self.faded_lines.set_style(
**self.faded_line_style,
)
self.add_to_back(
self.faded_lines,
self.background_lines,
)
def get_lines(self) -> Tuple[VGroup, VGroup]:
"""Generate all the lines, faded and not faded. Two sets of lines are generated: one parallel to the X-axis, and parallel to the Y-axis.
Returns
-------
Tuple[:class:`~.VGroup`, :class:`~.VGroup`]
The first (i.e the non faded lines) and second (i.e the faded lines) sets of lines, respectively.
"""
x_axis = self.get_x_axis()
y_axis = self.get_y_axis()
x_lines1, x_lines2 = self.get_lines_parallel_to_axis(
x_axis,
y_axis,
self.x_axis.x_step,
self.faded_line_ratio,
)
y_lines1, y_lines2 = self.get_lines_parallel_to_axis(
y_axis,
x_axis,
self.y_axis.x_step,
self.faded_line_ratio,
)
# TODO this was added so that we can run tests on NumberPlane
# In the future these attributes will be tacked onto self.background_lines
self.x_lines = x_lines1
self.y_lines = y_lines1
lines1 = VGroup(*x_lines1, *y_lines1)
lines2 = VGroup(*x_lines2, *y_lines2)
return lines1, lines2
def get_lines_parallel_to_axis(
self,
axis_parallel_to: NumberLine,
axis_perpendicular_to: NumberLine,
freq: float,
ratio_faded_lines: int,
) -> Tuple[VGroup, VGroup]:
"""Generate a set of lines parallel to an axis.
Parameters
----------
axis_parallel_to
The axis with which the lines will be parallel.
axis_perpendicular_to
The axis with which the lines will be perpendicular.
ratio_faded_lines
The ratio between the space between faded lines and the space between non-faded lines.
freq
Frequency of non-faded lines (number of non-faded lines per graph unit).
Returns
-------
Tuple[:class:`~.VGroup`, :class:`~.VGroup`]
The first (i.e the non-faded lines parallel to `axis_parallel_to`) and second (i.e the faded lines parallel to `axis_parallel_to`) sets of lines, respectively.
"""
line = Line(axis_parallel_to.get_start(), axis_parallel_to.get_end())
if ratio_faded_lines == 0: # don't show faded lines
ratio_faded_lines = 1 # i.e. set ratio to 1
step = (1 / ratio_faded_lines) * freq
lines1 = VGroup()
lines2 = VGroup()
unit_vector_axis_perp_to = axis_perpendicular_to.get_unit_vector()
# min/max used in case range does not include 0. i.e. if (2,6):
# the range becomes (0,4), not (0,6), to produce the correct number of lines
ranges = (
np.arange(
0,
min(
axis_perpendicular_to.x_max - axis_perpendicular_to.x_min,
axis_perpendicular_to.x_max,
),
step,
),
np.arange(
0,
max(
axis_perpendicular_to.x_min - axis_perpendicular_to.x_max,
axis_perpendicular_to.x_min,
),
-step,
),
)
for inputs in ranges:
for k, x in enumerate(inputs):
new_line = line.copy()
new_line.shift(unit_vector_axis_perp_to * x)
if k % ratio_faded_lines == 0:
lines1.add(new_line)
else:
lines2.add(new_line)
return lines1, lines2
def get_center_point(self) -> np.ndarray:
"""Gets the origin of :class:`NumberPlane`.
Returns
-------
np.ndarray
The center point.
"""
return self.coords_to_point(0, 0)
def get_x_unit_size(self):
return self.get_x_axis().get_unit_size()
def get_y_unit_size(self):
return self.get_x_axis().get_unit_size()
def get_axes(self) -> VGroup:
# Method Already defined at Axes.get_axes so we could remove this a later PR.
"""Gets the pair of axes.
Returns
-------
:class:`~.VGroup`
Axes
"""
return self.axes
def get_vector(self, coords, **kwargs):
kwargs["buff"] = 0
return Arrow(
self.coords_to_point(0, 0), self.coords_to_point(*coords), **kwargs
)
def prepare_for_nonlinear_transform(self, num_inserted_curves=50):
for mob in self.family_members_with_points():
num_curves = mob.get_num_curves()
if num_inserted_curves > num_curves:
mob.insert_n_curves(num_inserted_curves - num_curves)
return self
class PolarPlane(Axes):
r"""Creates a polar plane with background lines.
Parameters
----------
azimuth_step
The number of divisions in the azimuth (also known as the `angular coordinate` or `polar angle`). If ``None`` is specified then it will use the default
specified by ``azimuth_units``:
- ``"PI radians"`` or ``"TAU radians"``: 20
- ``"degrees"``: 36
- ``"gradians"``: 40
- ``None``: 1
A non-integer value will result in a partial division at the end of the circle.
size
The diameter of the plane.
radius_step
The distance between faded radius lines.
radius_max
The maximum value of the radius.
azimuth_units
Specifies a default labelling system for the azimuth. Choices are:
- ``"PI radians"``: Fractional labels in the interval :math:`\left[0, 2\pi\right]` with :math:`\pi` as a constant.
- ``"TAU radians"``: Fractional labels in the interval :math:`\left[0, \tau\right]` (where :math:`\tau = 2\pi`) with :math:`\tau` as a constant.
- ``"degrees"``: Decimal labels in the interval :math:`\left[0, 360\right]` with a degree (:math:`^{\circ}`) symbol.
- ``"gradians"``: Decimal labels in the interval :math:`\left[0, 400\right]` with a superscript "g" (:math:`^{g}`).
- ``None``: Decimal labels in the interval :math:`\left[0, 1\right]`.
azimuth_compact_fraction
If the ``azimuth_units`` choice has fractional labels, choose whether to combine the constant in a compact form :math:`\tfrac{xu}{y}` as opposed to :math:`\tfrac{x}{y}u`, where :math:`u` is the constant.
azimuth_offset
The angle offset of the azimuth, expressed in radians.
azimuth_direction
The direction of the azimuth.
- ``"CW"``: Clockwise.
- ``"CCW"``: Anti-clockwise.
azimuth_label_buff
The buffer for the azimuth labels.
azimuth_label_scale
The scale of the azimuth labels.
radius_config
The axis config for the radius.
Examples
--------
.. manim:: PolarPlaneExample
:ref_classes: PolarPlane
:save_last_frame:
class PolarPlaneExample(Scene):
def construct(self):
polarplane_pi = PolarPlane(
azimuth_units="PI radians",
size=6,
azimuth_label_scale=0.7,
radius_config={"number_scale_value": 0.7},
).add_coordinates()
self.add(polarplane_pi)
"""
def __init__(
self,
radius_max: float = config["frame_y_radius"],
size: Optional[float] = None,
radius_step: float = 1,
azimuth_step: Optional[float] = None,
azimuth_units: Optional[str] = "PI radians",
azimuth_compact_fraction: bool = True,
azimuth_offset: float = 0,
azimuth_direction: str = "CCW",
azimuth_label_buff: float = SMALL_BUFF,
azimuth_label_scale: float = 0.5,
radius_config: Optional[dict] = None,
background_line_style: Optional[dict] = None,
faded_line_style: Optional[dict] = None,
faded_line_ratio: int = 1,
make_smooth_after_applying_functions: bool = True,
**kwargs,
):
# error catching
if azimuth_units in ["PI radians", "TAU radians", "degrees", "gradians", None]:
self.azimuth_units = azimuth_units
else:
raise ValueError(
"Invalid azimuth units. Expected one of: PI radians, TAU radians, degrees, gradians or None."
)
if azimuth_direction in ["CW", "CCW"]:
self.azimuth_direction = azimuth_direction
else:
raise ValueError("Invalid azimuth units. Expected one of: CW, CCW.")
# configs
self.radius_config = {
"stroke_color": WHITE,
"stroke_width": 2,
"include_ticks": False,
"include_tip": False,
"line_to_number_buff": SMALL_BUFF,
"label_direction": DL,
"number_scale_value": 0.5,
}
self.background_line_style = {
"stroke_color": BLUE_D,
"stroke_width": 2,
"stroke_opacity": 1,
}
self.azimuth_step = (
(
{
"PI radians": 20,
"TAU radians": 20,
"degrees": 36,
"gradians": 40,
None: 1,
}[azimuth_units]
)
if azimuth_step is None
else azimuth_step
)
self.update_default_configs(
(self.radius_config, self.background_line_style),
(radius_config, background_line_style),
)
# Defaults to a faded version of line_config
self.faded_line_style = faded_line_style
self.faded_line_ratio = faded_line_ratio
self.make_smooth_after_applying_functions = make_smooth_after_applying_functions
self.azimuth_offset = azimuth_offset
self.azimuth_label_buff = azimuth_label_buff
self.azimuth_label_scale = azimuth_label_scale
self.azimuth_compact_fraction = azimuth_compact_fraction
# init
super().__init__(
x_range=np.array((-radius_max, radius_max, radius_step)),
y_range=np.array((-radius_max, radius_max, radius_step)),
x_length=size,
y_length=size,
axis_config=self.radius_config,
**kwargs,
)
# dynamically adjusts size so that the unit_size is one by default
if size is None:
size = 0
self.init_background_lines()
def init_background_lines(self):
"""Will init all the lines of NumberPlanes (faded or not)"""
if self.faded_line_style is None:
style = dict(self.background_line_style)
# For anything numerical, like stroke_width
# and stroke_opacity, chop it in half
for key in style:
if isinstance(style[key], numbers.Number):
style[key] *= 0.5
self.faded_line_style = style
self.background_lines, self.faded_lines = self.get_lines()
self.background_lines.set_style(
**self.background_line_style,
)
self.faded_lines.set_style(
**self.faded_line_style,
)
self.add_to_back(
self.faded_lines,
self.background_lines,
)
def get_lines(self) -> Tuple[VGroup, VGroup]:
"""Generate all the lines and circles, faded and not faded.
Returns
-------
Tuple[:class:`~.VGroup`, :class:`~.VGroup`]
The first (i.e the non faded lines and circles) and second (i.e the faded lines and circles) sets of lines and circles, respectively.
"""
center = self.get_center_point()
ratio_faded_lines = self.faded_line_ratio
offset = self.azimuth_offset
if ratio_faded_lines == 0: # don't show faded lines
ratio_faded_lines = 1 # i.e. set ratio to 1
rstep = (1 / ratio_faded_lines) * self.x_axis.x_step
astep = (1 / ratio_faded_lines) * (TAU * (1 / self.azimuth_step))
rlines1 = VGroup()
rlines2 = VGroup()
alines1 = VGroup()
alines2 = VGroup()
rinput = np.arange(0, self.x_axis.x_max + rstep, rstep)
ainput = np.arange(0, TAU, astep)
unit_vector = self.x_axis.get_unit_vector()[0]
for k, x in enumerate(rinput):
new_line = Circle(radius=x * unit_vector)
if k % ratio_faded_lines == 0:
alines1.add(new_line)
else:
alines2.add(new_line)
line = Line(center, self.get_x_axis().get_end())
for k, x in enumerate(ainput):
new_line = line.copy()
new_line.rotate(x + offset, about_point=center)
if k % ratio_faded_lines == 0:
rlines1.add(new_line)
else:
rlines2.add(new_line)
lines1 = VGroup(*rlines1, *alines1)
lines2 = VGroup(*rlines2, *alines2)
return lines1, lines2
def get_center_point(self):
return self.coords_to_point(0, 0)
def get_x_unit_size(self):
return self.get_x_axis().get_unit_size()
def get_y_unit_size(self):
return self.get_x_axis().get_unit_size()
def get_axes(self) -> VGroup:
"""Gets the axes.
Returns
-------
:class:`~.VGroup`
A pair of axes.
"""
return self.axes
def get_vector(self, coords, **kwargs):
kwargs["buff"] = 0
return Arrow(
self.coords_to_point(0, 0), self.coords_to_point(*coords), **kwargs
)
def prepare_for_nonlinear_transform(self, num_inserted_curves=50):
for mob in self.family_members_with_points():
num_curves = mob.get_num_curves()
if num_inserted_curves > num_curves:
mob.insert_n_curves(num_inserted_curves - num_curves)
return self
def polar_to_point(self, radius: float, azimuth: float) -> np.ndarray:
r"""Gets a point from polar coordinates.
Parameters
----------
radius
The coordinate radius (:math:`r`).
azimuth
The coordinate azimuth (:math:`\theta`).
Returns
-------
numpy.ndarray
The point.
Examples
--------
.. manim:: PolarToPointExample
:ref_classes: PolarPlane Vector
:save_last_frame:
class PolarToPointExample(Scene):
def construct(self):
polarplane_pi = PolarPlane(azimuth_units="PI radians", size=6)
polartopoint_vector = Vector(polarplane_pi.polar_to_point(3, PI/4))
self.add(polarplane_pi)
self.add(polartopoint_vector)
"""
return self.coords_to_point(radius * np.cos(azimuth), radius * np.sin(azimuth))
def pr2pt(self, radius: float, azimuth: float) -> np.ndarray:
"""Abbreviation for :meth:`polar_to_point`"""
return self.polar_to_point(radius, azimuth)
def point_to_polar(self, point: np.ndarray) -> Tuple[float, float]:
r"""Gets polar coordinates from a point.
Parameters
----------
point
The point.
Returns
-------
Tuple[:class:`float`, :class:`float`]
The coordinate radius (:math:`r`) and the coordinate azimuth (:math:`\theta`).
"""
x, y = self.point_to_coords(point)
return np.sqrt(x ** 2 + y ** 2), np.arctan2(y, x)
def pt2pr(self, point: np.ndarray) -> Tuple[float, float]:
"""Abbreviation for :meth:`point_to_polar`"""
return self.point_to_polar(point)
def get_coordinate_labels(
self,
r_values: Optional[Iterable[float]] = None,
a_values: Optional[Iterable[float]] = None,
**kwargs,
) -> VDict:
"""Gets labels for the coordinates
Parameters
----------
r_values
Iterable of values along the radius, by default None.
a_values
Iterable of values along the azimuth, by default None.
Returns
-------
VDict
Labels for the radius and azimuth values.
"""
if r_values is None:
r_values = [r for r in self.get_x_axis().get_tick_range() if r >= 0]
if a_values is None:
a_values = np.arange(0, 1, 1 / self.azimuth_step)
r_mobs = self.get_x_axis().add_numbers(r_values)
if self.azimuth_direction == "CCW":
d = 1
elif self.azimuth_direction == "CW":
d = -1
else:
raise ValueError("Invalid azimuth direction. Expected one of: CW, CCW")
a_points = [
{
"label": i,
"point": np.array(
[
self.get_right()[0]
* np.cos(d * (i * TAU) + self.azimuth_offset),
self.get_right()[0]
* np.sin(d * (i * TAU) + self.azimuth_offset),
0,
]
),
}
for i in a_values
]
if self.azimuth_units == "PI radians" or self.azimuth_units == "TAU radians":
a_tex = [
self.get_radian_label(i["label"])
.scale(self.azimuth_label_scale)
.next_to(
i["point"],
direction=i["point"],
aligned_edge=i["point"],
buff=self.azimuth_label_buff,
)
for i in a_points
]
elif self.azimuth_units == "degrees":
a_tex = [
MathTex(f'{360 * i["label"]:g}' + r"^{\circ}")
.scale(self.azimuth_label_scale)
.next_to(
i["point"],
direction=i["point"],
aligned_edge=i["point"],
buff=self.azimuth_label_buff,
)
for i in a_points
]
elif self.azimuth_units == "gradians":
a_tex = [
MathTex(f'{400 * i["label"]:g}' + r"^{g}")
.scale(self.azimuth_label_scale)
.next_to(
i["point"],
direction=i["point"],
aligned_edge=i["point"],
buff=self.azimuth_label_buff,
)
for i in a_points
]
elif self.azimuth_units is None:
a_tex = [
MathTex(f'{i["label"]:g}')
.scale(self.azimuth_label_scale)
.next_to(
i["point"],
direction=i["point"],
aligned_edge=i["point"],
buff=self.azimuth_label_buff,
)
for i in a_points
]
a_mobs = VGroup(*a_tex)
self.coordinate_labels = VGroup(r_mobs, a_mobs)
return self.coordinate_labels
def add_coordinates(
self,
r_values: Optional[Iterable[float]] = None,
a_values: Optional[Iterable[float]] = None,
):
"""Adds the coordinates.
Parameters
----------
r_values
Iterable of values along the radius, by default None.
a_values
Iterable of values along the azimuth, by default None.
"""
self.add(self.get_coordinate_labels(r_values, a_values))
return self
def get_radian_label(self, number, stacked=True):
constant_label = {"PI radians": r"\pi", "TAU radians": r"\tau"}[
self.azimuth_units
]
division = number * {"PI radians": 2, "TAU radians": 1}[self.azimuth_units]
frac = fr.Fraction(division).limit_denominator(max_denominator=100)
if frac.numerator == 0 & frac.denominator == 0:
return MathTex(r"0")
elif frac.numerator == 1 and frac.denominator == 1:
return MathTex(constant_label)
elif frac.numerator == 1:
if self.azimuth_compact_fraction:
return MathTex(
r"\tfrac{" + constant_label + r"}{" + str(frac.denominator) + "}"
)
else:
return MathTex(
r"\tfrac{1}{" + str(frac.denominator) + "}" + constant_label
)
elif frac.denominator == 1:
return MathTex(str(frac.numerator) + constant_label)
else:
if self.azimuth_compact_fraction:
return MathTex(
r"\tfrac{"
+ str(frac.numerator)
+ constant_label
+ r"}{"
+ str(frac.denominator)
+ r"}"
)
else:
return MathTex(
r"\tfrac{"
+ str(frac.numerator)
+ r"}{"
+ str(frac.denominator)
+ r"}"
+ constant_label
)
class ComplexPlane(NumberPlane):
"""
Examples
--------
.. manim:: ComplexPlaneExample
:save_last_frame:
:ref_classes: Dot MathTex
class ComplexPlaneExample(Scene):
def construct(self):
plane = ComplexPlane().add_coordinates()
self.add(plane)
d1 = Dot(plane.n2p(2 + 1j), color=YELLOW)
d2 = Dot(plane.n2p(-3 - 2j), color=YELLOW)
label1 = MathTex("2+i").next_to(d1, UR, 0.1)
label2 = MathTex("-3-2i").next_to(d2, UR, 0.1)
self.add(
d1,
label1,
d2,
label2,
)
"""
def __init__(self, color=BLUE, **kwargs):
super().__init__(
color=color,
**kwargs,
)
def number_to_point(self, number):
number = complex(number)
return self.coords_to_point(number.real, number.imag)
def n2p(self, number):
return self.number_to_point(number)
def point_to_number(self, point):
x, y = self.point_to_coords(point)
return complex(x, y)
def p2n(self, point):
return self.point_to_number(point)
def get_default_coordinate_values(self):
x_numbers = self.get_x_axis().get_tick_range()
y_numbers = self.get_y_axis().get_tick_range()
y_numbers = [complex(0, y) for y in y_numbers if y != 0]
return [*x_numbers, *y_numbers]
def get_coordinate_labels(self, *numbers, **kwargs):
if len(numbers) == 0:
numbers = self.get_default_coordinate_values()
self.coordinate_labels = VGroup()
for number in numbers:
z = complex(number)
if abs(z.imag) > abs(z.real):
axis = self.get_y_axis()
value = z.imag
kwargs["unit"] = "i"
else:
axis = self.get_x_axis()
value = z.real
number_mob = axis.get_number_mobject(value, **kwargs)
self.coordinate_labels.add(number_mob)
return self.coordinate_labels
def add_coordinates(self, *numbers):
self.add(self.get_coordinate_labels(*numbers))
return self
``` |
{
"source": "joybhallaa/scrapy",
"score": 3
} |
#### File: http/request/__init__.py
```python
from w3lib.url import safe_url_string
from scrapy.http.headers import Headers
from scrapy.utils.python import to_bytes
from scrapy.utils.trackref import object_ref
from scrapy.utils.url import escape_ajax
from scrapy.http.common import obsolete_setter
from scrapy.utils.curl import curl_to_request_kwargs
class Request(object_ref):
def __init__(self, url, callback=None, method='GET', headers=None, body=None,
cookies=None, meta=None, encoding='utf-8', priority=0,
dont_filter=False, errback=None, flags=None, cb_kwargs=None):
self._encoding = encoding # this one has to be set first
self.method = str(method).upper()
self._set_url(url)
self._set_body(body)
assert isinstance(priority, int), "Request priority not an integer: %r" % priority
self.priority = priority
if callback is not None and not callable(callback):
raise TypeError('callback must be a callable, got %s' % type(callback).__name__)
if errback is not None and not callable(errback):
raise TypeError('errback must be a callable, got %s' % type(errback).__name__)
self.callback = callback
self.errback = errback
self.cookies = cookies or {}
self.headers = Headers(headers or {}, encoding=encoding)
self.dont_filter = dont_filter
self._meta = dict(meta) if meta else None
self._cb_kwargs = dict(cb_kwargs) if cb_kwargs else None
self.flags = [] if flags is None else list(flags)
@property
def cb_kwargs(self):
if self._cb_kwargs is None:
self._cb_kwargs = {}
return self._cb_kwargs
@property
def meta(self):
if self._meta is None:
self._meta = {}
return self._meta
def _get_url(self):
return self._url
def _set_url(self, url):
if not isinstance(url, str):
raise TypeError('Request url must be str or unicode, got %s:' % type(url).__name__)
s = safe_url_string(url, self.encoding)
self._url = escape_ajax(s)
if ('://' not in self._url) and (not self._url.startswith('data:')):
raise ValueError('Missing scheme in request url: %s' % self._url)
url = property(_get_url, obsolete_setter(_set_url, 'url'))
def _get_body(self):
return self._body
def _set_body(self, body):
if body is None:
self._body = b''
else:
self._body = to_bytes(body, self.encoding)
body = property(_get_body, obsolete_setter(_set_body, 'body'))
@property
def encoding(self):
return self._encoding
def __str__(self):
return "<%s %s>" % (self.method, self.url)
__repr__ = __str__
def copy(self):
"""Return a copy of this Request"""
return self.replace()
def replace(self, *args, **kwargs):
"""Create a new Request with the same attributes except for those
given new values.
"""
for x in ['url', 'method', 'headers', 'body', 'cookies', 'meta', 'flags',
'encoding', 'priority', 'dont_filter', 'callback', 'errback', 'cb_kwargs']:
kwargs.setdefault(x, getattr(self, x))
cls = kwargs.pop('cls', self.__class__)
return cls(*args, **kwargs)
@classmethod
def from_curl(cls, curl_command, ignore_unknown_options=True, **kwargs):
"""Create a Request object from a string containing a `cURL
<https://curl.haxx.se/>`_ command. It populates the HTTP method, the
URL, the headers, the cookies and the body. It accepts the same
arguments as the :class:`Request` class, taking preference and
overriding the values of the same arguments contained in the cURL
command.
Unrecognized options are ignored by default. To raise an error when
finding unknown options call this method by passing
``ignore_unknown_options=False``.
.. caution:: Using :meth:`from_curl` from :class:`~scrapy.http.Request`
subclasses, such as :class:`~scrapy.http.JSONRequest`, or
:class:`~scrapy.http.XmlRpcRequest`, as well as having
:ref:`downloader middlewares <topics-downloader-middleware>`
and
:ref:`spider middlewares <topics-spider-middleware>`
enabled, such as
:class:`~scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware`,
:class:`~scrapy.downloadermiddlewares.useragent.UserAgentMiddleware`,
or
:class:`~scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware`,
may modify the :class:`~scrapy.http.Request` object.
To translate a cURL command into a Scrapy request,
you may use `curl2scrapy <https://michael-shub.github.io/curl2scrapy/>`_.
"""
request_kwargs = curl_to_request_kwargs(curl_command, ignore_unknown_options)
request_kwargs.update(kwargs)
return cls(**request_kwargs)
``` |
{
"source": "JoyBoyMaLin/no-fish",
"score": 2
} |
#### File: fish/app/models.py
```python
from django import forms
from django.utils import timezone
from django.db import models
from django.utils.html import format_html
from django.utils.translation import ugettext_lazy as _
class Category(models.Model):
class Meta:
verbose_name = _('category')
verbose_name_plural = _('categories')
title = models.CharField(_('title'), max_length=255)
cover = models.ImageField(_('cover'), upload_to='static/uploads/images/photos/', null=False, blank=False)
enabled = models.BooleanField(_('enabled'), default=True)
views = models.BigIntegerField(default=0)
created = models.DateField(_('created'), default=timezone.now)
def cover_data(self):
return format_html(
'<img src="{}" width="100px"/>',
self.cover.url,
)
cover_data.short_description = _('cover')
def __str__(self):
return self.title
class Photo(models.Model):
class Meta:
verbose_name = _('photo')
verbose_name_plural = _('photos')
title = models.CharField(_('title'), max_length=255)
cover = models.ImageField(_('cover'), upload_to='static/uploads/images/photos/', null=False, blank=False)
enabled = models.BooleanField(_('enabled'), default=True)
category = models.ManyToManyField(Category)
def __str__(self):
return self.title
class Slider(models.Model):
class Meta:
verbose_name = _('slider')
verbose_name_plural = _('sliders')
title = models.CharField(_('title'), max_length=255)
image = models.ImageField(_('image'), upload_to='static/uploads/images/photos/', null=False, blank=False)
url = models.URLField(_('url'), null=False)
enabled = models.BooleanField(_('enabled'), default=True)
def __str__(self):
return self.title
class Detail(models.Model):
class Meta:
verbose_name = _('detail')
verbose_name_plural = _('details')
photo = models.ForeignKey(Photo, default=None, on_delete=models.CASCADE)
image = models.ImageField(_('image'), upload_to='static/uploads/images/photos/', null=True, blank=True)
def __str__(self):
return self.photo.title
``` |
{
"source": "joyccino/GrabnGo",
"score": 3
} |
#### File: joyccino/GrabnGo/main.py
```python
import cv2
import numpy as np
import time
import face_recognition
import os
import os.path
#file transfer
from ftplib import FTP
def crop():
# from cropData import cropData
customer_name = input('Hello customer, Please enter your name.')
# Directory
directory = customer_name
# Parent Directory path
parent_dir = "/home/moon/Desktop/chosenones/1/customers/"
# Path
path = os.path.join(parent_dir, directory)
os.mkdir(path)
print("Directory '% s' created" % directory)
print("done, a directory has been generated for "+customer_name)
print('Your webcam is getting ready...')
# cropData()
webcam = cv2.VideoCapture(0)
if not webcam.isOpened():
print("Could not open webcam")
exit()
sample_num = 0
captured_num = 0
start_time=time.time()
counter = 0
grabngo = 'customer'
# loop through frames
while webcam.isOpened():
# read frame from webcam
status, frame = webcam.read()
sample_num = sample_num + 1
if not status:
break
# display output
cv2.imshow("captured frames", frame)
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(frame)
face_encodings = face_recognition.face_encodings(frame, face_locations)
# Label the results
for (top, right, bottom, left), name in zip(face_locations, grabngo):
if not name:
continue
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
crop_img = frame[top:bottom, left:right]
captured_num = captured_num + 1
cv2.imwrite('/home/moon/Desktop/chosenones/1/customers/'+customer_name+"/"+str(captured_num)+'.jpg', crop_img)
# press "Q" to stop
if cv2.waitKey(1) & 0xFF == ord('q'):
break
end_time=time.time()
elapsed = end_time - start_time
if elapsed > 5:
break
# release resources
webcam.release()
cv2.destroyAllWindows()
prepath = '/home/moon/Desktop/chosenones/1/customers/'
path = prepath+customer_name
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
if num_files < 10:
print('Your data size is less than 10... please inform your staff.')
else:
print('welcome, '+customer_name+'. you are ready to go.')
print('for file transfer')
``` |
{
"source": "Joyce1114/National-Park-Crawler",
"score": 3
} |
#### File: quotesbot/spiders/toscrape-xpath.py
```python
import scrapy
import urlparse
from quotesbot.items import ParkItem
class ToScrapeSpiderXPath(scrapy.Spider):
name = 'toscrape-xpath'
start_urls = [
'https://www.nps.gov/findapark/index.htm',
]
def parse(self, response):
parks = []
for item in response.xpath('.//select[@data-nonselectedtext="Park Name"]//option'):
park = ParkItem()
park["name"] = item.xpath('./text()').extract_first()
park["code"] = item.xpath('./@value').extract_first()
parks.append(park)
for park in parks:
start = "https://www.nps.gov/"
end = "/index.htm"
url = start + str(park.get("code"))[3:7] + end
request = scrapy.Request(url, callback=self.parse_history)
request.meta["park"] = park
yield request
# Get the specific park website callback
def parse_history(self, response):
base_url = "https://www.nps.gov"
park = response.meta["park"]
park["history"] = response.xpath('.//p/text()').extract_first()
hours_url = urlparse.urljoin(base_url, response.xpath('.//li/a[contains(@href, "hours")]/@href').extract_first())
hours_request = scrapy.Request(hours_url, callback=self.parse_hours, meta={"park": park})
yield hours_request
def parse_hours(self, response):
park = response.meta["park"]
park["hours"] = response.xpath(".//p/span/text()[contains(., 'a.m.') or contains(., 'am') "
"or contains(., 'A.M.') or contains(., 'AM')]").extract_first()
fees_url = urlparse.urljoin(response.url, response.xpath('.//li/a[contains(@href, "fees")]/@href').extract_first())
fees_request = scrapy.Request(fees_url, callback=self.parse_fees, meta={"park": park})
yield fees_request
# TODO
def parse_fees(self, response):
park = response.meta["park"]
park["fees"] = response.xpath(".//table//text()[contains(., ' fee ') or contains(., ' charge ')"
"or contains(., ' fees ') or contains(., ' passes ')]").extract_first()
services_url = urlparse.urljoin(response.url, response.xpath('.//li/a[contains(@href, "services")]/@href')
.extract_first())
services_request = scrapy.Request(services_url, callback=self.parse_services, meta={"park": park})
yield services_request
# TODO Where is this??
def parse_services(self, response):
park = response.meta["park"]
park["goodsandservices"] = response.xpath(".//p/span/text()").extract()
things2do_url = urlparse.urljoin(response.url, response.xpath('.//li/a[contains(@href, "things2do")]/@href')
.extract_first())
things2do_request = scrapy.Request(things2do_url, callback=self.parse_things2do, meta={"park": park})
yield things2do_request
# TODO Currently use the figure to locate
def parse_things2do(self, response):
park = response.meta["park"]
park["things2do"] = response.xpath(".//div[figure/@class = '-left']//strong/text()").extract()
news_url = urlparse.urljoin(response.url, response.xpath('.//li/a[contains(@href, "news")]/@href').extract_first())
news_request = scrapy.Request(news_url, callback=self.parse_news, meta={"park": park})
#yield news_request
yield {
'name': park.get('name'),
'things': park.get('things2do')
}
# TODO Currently use table to locate
def parse_news(self, response):
park = response.meta["park"]
park["news"] = response.xpath(".//table//p//text()").extract()
nature_url = urlparse.urljoin(response.url, response.xpath('.//li/a[contains(@href, "nature")]/@href').extract_first())
nature_request = scrapy.Request(nature_url, callback=self.parse_nature, meta={"park": park})
yield nature_request
# TODO Currently use table to locate
def parse_nature(self, response):
park = response.meta["park"]
park["nature"] = response.xpath(".//table//p//text()").extract()
info_url = urlparse.urljoin(response.url, response.xpath('.//li/a[contains(@href, "basicinfo")]/@href').extract_first())
info_request = scrapy.Request(info_url, callback=self.parse_info, meta={"park": park})
yield info_request
# TODO
def parse_info(self, response):
park = response.meta["park"]
park["weather"] = response.xpath(".//p[text()[contains(., ' season ') or contains(., ' weather ') "
"or contains(., ' temperature ')]]").extract_first()
yield {
"name": park.get('name'),
"history": park.get('history'),
"hours": park.get('hours'),
"fees": park.get('fees'),
"goodsandservices": park.get('goodsandservices'),
"things2do": park.get('things2do'),
"news": park.get('news'),
"nature": park.get('nature'),
"weather": park.get('weather')
}
``` |
{
"source": "Joyce72/free-python-games",
"score": 3
} |
#### File: free-python-games/docs/demo.py
```python
import docutils.frontend
import docutils.parsers.rst
import docutils.utils
import pyautogui
import pynput.keyboard as kb
import queue
import subprocess as sp
import threading
import time
slide = 0
slides = [
'''
# Give the Gift of Python
## <NAME>
# 1. Python trainer for Fortune 100 companies.
# 2. Married to Chemistry teacher, father of two.
# 3. 100s of hours of 4-12th grade instruction.
'''.splitlines(),
'''
## Setup
# 1. Install Python: https://www.python.org/
# 2. Run IDLE: $ python -m idlelib.idle
# 3. Use built-in Turtle module!
'''.splitlines(),
[
'',
'## Open the Turtle Window',
'',
'from turtle import *', 0.5,
'reset()', 0.5,
],
[
'',
'## Basic Commands',
'',
'forward(100)', 1,
'right(90)', 1,
'fd(100)', 1,
'rt(90)', 1,
'backward(-100)', 1,
'left(-90)', 1,
'forward(100)', 1,
'right(90)', 1,
'undo()', 1,
'undo()', 1,
],
[
'',
'## Loops',
'',
'reset()', 0.5,
'for each in range(5):', 0.5,
'bk(100)', 0.5,
'lt(144)', 0.5,
'', 3,
],
[
'',
'## Functions and Shapes',
'',
'def square():', 0.5,
'begin_fill()', 0.5,
'for each in range(4):', 0.5,
'forward(100)', 0.5,
'right(90)', 0.5,
-1,
'end_fill()', 0.5,
'',
'reset()', 0.5,
'square()', 3,
],
[
'',
'## Dots',
'',
'reset()', 0.5,
'help(dot)', 1,
'dot(100)', 1,
],
[
'',
'## Colors'
'',
'reset()', 0.5,
'from itertools import *', 0.5,
"colors = cycle(['red', 'green', 'blue', 'purple'])", 0.5,
'def present():', 0.5,
'for i in range(4):', 0.5,
'color(next(colors))', 0.5,
'square()', 0.5,
'left(90)', 0.5,
'',
'present()', 5,
],
[
'',
'## Locations',
'',
'reset()', 0.5,
'def line(a, b, x, y):', 0.5,
'up()', 0.5,
'goto(a, b)', 0.5,
'down()', 0.5,
'goto(x, y)', 0.5,
'',
"color('red')", 0.5,
'width(20)', 0.5,
'line(-100, -100, 0, 200)', 1,
'line(0, 200, 100, -100)', 1,
'line(100, -100, -100, -100)', 1,
],
[
'',
'## Mouse Inputs',
'',
'width(10)', 0.5,
"color('green')", 0.5,
'def tap(x, y):', 0.5,
'goto(x, y)', 0.5,
'dot(20)', 0.5,
'',
'onscreenclick(tap)', 0.5,
],
[
'',
'## Keyboard Events',
'',
'reset()', 0.5,
'width(10)', 0.5,
"onkey(lambda: fd(30), 'Up')", 0.5,
"onkey(lambda: bk(30), 'Down')", 0.5,
"onkey(lambda: lt(30), 'Left')", 0.5,
"onkey(lambda: rt(30), 'Right')", 0.5,
'listen()', 0.5,
],
[
'',
'## Animation',
'',
'hideturtle()', 0.5,
'tracer(False)', 0.5,
'running = True', 0.5,
'def draw():', 0.5,
'clear()', 0.5,
'present()', 0.5,
'update()', 0.5,
'left(1)', 0.5,
'if running:', 0.5,
'ontimer(draw, 100)',
'',
'reset()', 0.5,
'draw()', 0.5,
],
'''
## Free Python Games
# 1. Search: Free Python Games
# 2. $ python -m pip install freegames
# 3. http://www.grantjenks.com/docs/freegames/
'''.splitlines(),
]
def worker():
global slide
while True:
key = inputs.get()
if key == kb.Key.esc:
print('Typing slide', slide)
parts = slides[slide]
for part in parts:
if part == '':
pyautogui.press('enter')
time.sleep(0.25)
elif part == -1:
pyautogui.press('backspace')
elif isinstance(part, str):
pyautogui.typewrite(part, interval=0.1)
pyautogui.press('enter')
else:
time.sleep(part)
slide += 1
def ticker():
def on_press(key):
inputs.put(key)
with kb.Listener(on_press=on_press) as listener:
listener.join()
def commander():
global slide
while True:
value = input()
if value == 'q':
exit()
try:
slide = int(value)
except ValueError:
pass
def main():
global inputs
idle = sp.Popen(['python', '-m', 'idlelib.idle'])
inputs = queue.Queue()
work = threading.Thread(target=worker)
work.start()
tick = threading.Thread(target=ticker)
tick.start()
cmdr = threading.Thread(target=commander)
cmdr.start()
cmdr.join()
tick.join()
work.join()
idle.wait()
if __name__ == '__main__':
main()
``` |
{
"source": "JoyceBabu/SublimeLinter",
"score": 2
} |
#### File: SublimeLinter/lint/backend.py
```python
import sublime
from concurrent.futures import ThreadPoolExecutor, wait, FIRST_EXCEPTION
from contextlib import contextmanager
from itertools import chain, count
from functools import partial
import hashlib
import json
import logging
import multiprocessing
import os
import time
import threading
from . import style, util, linter as linter_module
logger = logging.getLogger(__name__)
WILDCARD_SYNTAX = '*'
MAX_CONCURRENT_TASKS = multiprocessing.cpu_count() or 1
task_count = count(start=1)
counter_lock = threading.Lock()
process_limit = threading.BoundedSemaphore(MAX_CONCURRENT_TASKS)
def lint_view(linters, view, view_has_changed, next):
"""Lint the given view.
This is the top level lint dispatcher. It is called
asynchronously.
"""
lint_tasks = get_lint_tasks(linters, view, view_has_changed)
run_concurrently(
partial(run_tasks, tasks, next=partial(next, linter))
for linter, tasks in lint_tasks
)
def run_tasks(tasks, next):
results = run_concurrently(tasks)
if results is None:
return # ABORT
errors = list(chain.from_iterable(results)) # flatten and consume
# We don't want to guarantee that our consumers/views are thread aware.
# So we merge here into Sublime's shared worker thread. Sublime guarantees
# here to execute all scheduled tasks ordered and sequentially.
sublime.set_timeout_async(lambda: next(errors))
def get_lint_tasks(linters, view, view_has_changed):
total_tasks = 0
for (linter, regions) in get_lint_regions(linters, view):
tasks = _make_tasks(linter, regions, view, view_has_changed)
total_tasks += len(tasks)
yield linter, tasks
if total_tasks > 4:
logger.warning(
"'{}' puts in total {}(!) tasks on the queue."
.format(short_canonical_filename(view), total_tasks)
)
def _make_tasks(linter_, regions, view, view_has_changed):
independent_linters = create_n_independent_linters(linter_, len(regions))
tasks = []
for linter, region in zip(independent_linters, regions):
code = view.substr(region)
offsets = view.rowcol(region.begin()) + (region.begin(),)
# Due to a limitation in python 3.3, we cannot 'name' a thread when
# using the ThreadPoolExecutor. (This feature has been introduced
# in python 3.6.) So, we do this manually.
task_name = make_good_task_name(linter, view)
task = partial(execute_lint_task, linter, code, offsets, view_has_changed)
executor = partial(modify_thread_name, task_name, task)
tasks.append(executor)
if len(tasks) > 3:
logger.warning(
"'{}' puts {} {} tasks on the queue."
.format(short_canonical_filename(view), len(tasks), linter_.name)
)
return tasks
def create_n_independent_linters(linter, n):
return (
[linter]
if n == 1
else [clone_linter(linter) for _ in range(n)]
)
def clone_linter(linter):
# type: (linter_module.Linter) -> linter_module.Linter
return linter.__class__(linter.view, linter.settings.clone())
def short_canonical_filename(view):
return (
os.path.basename(view.file_name())
if view.file_name()
else '<untitled {}>'.format(view.buffer_id())
)
def make_good_task_name(linter, view):
with counter_lock:
task_number = next(task_count)
canonical_filename = short_canonical_filename(view)
return 'LintTask|{}|{}|{}|{}'.format(
task_number, linter.name, canonical_filename, view.id())
def modify_thread_name(name, sink):
original_name = threading.current_thread().name
# We 'name' our threads, for logging purposes.
threading.current_thread().name = name
try:
return sink()
finally:
threading.current_thread().name = original_name
@contextmanager
def reduced_concurrency():
start_time = time.time()
with process_limit:
end_time = time.time()
waittime = end_time - start_time
if waittime > 0.1:
logger.warning('Waited in queue for {:.2f}s'.format(waittime))
yield
@reduced_concurrency()
def execute_lint_task(linter, code, offsets, view_has_changed):
try:
errors = linter.lint(code, view_has_changed) or []
finalize_errors(linter, errors, offsets)
return errors
except linter_module.TransientError:
raise # Raise here to abort in `await_futures` below
except linter_module.PermanentError:
return [] # Empty list here to clear old errors
except Exception:
linter.notify_failure()
# Log while multi-threaded to get a nicer log message
logger.exception('Unhandled exception:\n', extra={'demote': True})
return [] # Empty list here to clear old errors
def error_json_serializer(o):
"""Return a JSON serializable representation of error properties."""
if isinstance(o, sublime.Region):
return (o.a, o.b)
return o
def finalize_errors(linter, errors, offsets):
linter_name = linter.name
view = linter.view
line_offset, col_offset, pt_offset = offsets
for error in errors:
# see if this error belongs to the main file
belongs_to_main_file = True
if 'filename' in error:
if (os.path.normcase(error['filename']) != os.path.normcase(view.file_name() or '') and
error['filename'] != "<untitled {}>".format(view.buffer_id())):
belongs_to_main_file = False
line, start, end = error['line'], error['start'], error['end']
if belongs_to_main_file: # offsets are for the main file only
if line == 0:
start += col_offset
end += col_offset
line += line_offset
try:
region = error['region']
except KeyError:
line_start = view.text_point(line, 0)
region = sublime.Region(line_start + start, line_start + end)
if len(region) == 0:
region.b = region.b + 1
else:
if belongs_to_main_file: # offsets are for the main file only
region = sublime.Region(region.a + pt_offset, region.b + pt_offset)
error.update({
'line': line,
'start': start,
'end': end,
'linter': linter_name,
'region': region
})
uid = hashlib.sha256(
json.dumps(error, sort_keys=True, default=error_json_serializer).encode('utf-8')).hexdigest()
error.update({
'uid': uid,
'priority': style.get_value('priority', error, 0)
})
def get_lint_regions(linters, view):
syntax = util.get_syntax(view)
for linter in linters:
settings = linter.get_view_settings()
selector = settings.get('selector', None)
if selector is not None:
# Inspecting just the first char is faster
if view.score_selector(0, selector):
yield linter, [sublime.Region(0, view.size())]
else:
yield linter, [
region for region in view.find_by_selector(selector)
]
continue
# Fallback using deprecated `cls.syntax` and `cls.selectors`
if (
syntax not in linter.selectors and
WILDCARD_SYNTAX not in linter.selectors
):
yield linter, [sublime.Region(0, view.size())]
else:
yield linter, [
region
for selector in get_selectors(linter, syntax)
for region in view.find_by_selector(selector)
]
def get_selectors(linter, wanted_syntax):
for syntax in [wanted_syntax, WILDCARD_SYNTAX]:
try:
yield linter.selectors[syntax]
except KeyError:
pass
def run_concurrently(tasks, max_workers=MAX_CONCURRENT_TASKS):
with ThreadPoolExecutor(max_workers=max_workers) as executor:
work = [executor.submit(task) for task in tasks]
return await_futures(work)
def await_futures(fs):
done, not_done = wait(fs, return_when=FIRST_EXCEPTION)
try:
return [future.result() for future in done]
except Exception:
for future in not_done:
future.cancel()
return None
``` |
{
"source": "joycebyang/covidx",
"score": 3
} |
#### File: covidx/pretrain/models.py
```python
from collections import OrderedDict
import efficientnet_pytorch
import torch
import torch.nn as nn
import torchvision
class DenseNetModel(nn.Module):
def __init__(self, arch, num_classes):
super(DenseNetModel, self).__init__()
model_func = getattr(torchvision.models, arch)
self.model = model_func(pretrained=True)
# check if classifier is Linear or Sequential Layer
if isinstance(self.model.classifier, nn.Linear):
in_features = self.model.classifier.in_features
hidden_features = self.model.classifier.out_features
else:
for x in self.model.classifier:
if isinstance(x, nn.Linear):
in_features = x.in_features
hidden_features = x.out_features
break
# build a new, untrained feed-forward network as a classifier, using ReLU activations and dropout
self.model.classifier = nn.Sequential(
nn.Linear(in_features=in_features, out_features=hidden_features),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(in_features=hidden_features, out_features=num_classes),
nn.Sigmoid()
)
def forward(self, x):
return self.model(x)
class ResNetModel(nn.Module):
def __init__(self, arch, num_classes):
super(ResNetModel, self).__init__()
model_func = getattr(torchvision.models, arch)
self.model = model_func(pretrained=True)
# check if classifier is Linear or Sequential Layer
if isinstance(self.model.fc, nn.Linear):
in_features = self.model.fc.in_features
hidden_features = self.model.fc.out_features
else:
for x in self.model.fc:
if isinstance(x, nn.Linear):
in_features = x.in_features
hidden_features = x.out_features
break
# build a new, untrained feed-forward network as a classifier, using ReLU activations and dropout
self.model.fc = nn.Sequential(
nn.Linear(in_features=in_features, out_features=hidden_features),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(in_features=hidden_features, out_features=num_classes),
nn.Sigmoid()
)
def forward(self, x):
return self.model(x)
class EfficientNetModel(nn.Module):
def __init__(self, arch, num_classes):
super(EfficientNetModel, self).__init__()
self.model = efficientnet_pytorch.EfficientNet.from_pretrained(arch)
if isinstance(self.model._fc, nn.Linear):
in_features = self.model._fc.in_features
hidden_features = self.model._fc.out_features
else:
for x in self.model._fc:
if isinstance(x, nn.Linear):
in_features = x.in_features
hidden_features = x.out_features
break
self.model._fc = nn.Sequential(
nn.Linear(in_features=in_features, out_features=hidden_features),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(in_features=hidden_features, out_features=num_classes),
nn.Sigmoid()
)
def forward(self, x):
return self.model(x)
def load_from_checkpoint(self, ckp_path, device=torch.device('cpu')):
model_checkpoint = torch.load(ckp_path, map_location=device)
state_dict = model_checkpoint['state_dict']
new_state_dict = OrderedDict()
for k in state_dict:
short_key = k.replace('module.efficientnet.', '')
new_state_dict[short_key] = state_dict[k]
old_state_dict = self.model.state_dict()
for k in new_state_dict:
if k not in old_state_dict:
print('Unexpected key %s in state_dict' % k)
for k in old_state_dict:
if k not in new_state_dict:
print('Missing key %s in state_dict' % k)
self.model.load_state_dict(new_state_dict, strict=False)
def DenseNet121(num_classes=14):
return DenseNetModel('densenet121', num_classes)
def DenseNet169(num_classes=14):
return DenseNetModel('densenet169', num_classes)
def DenseNet201(num_classes=14):
return DenseNetModel("densenet201", num_classes)
def ResNet34(num_classes=14):
return ResNetModel("resnet34", num_classes)
def ResNet50(num_classes=14):
return ResNetModel('resnet50', num_classes)
def ResNet101(num_classes=14):
return ResNetModel("resnet101", num_classes)
def EfficientNet4(num_classes=14):
return EfficientNetModel("efficientnet-b4", num_classes)
def EfficientNet5(num_classes=14):
return EfficientNetModel('efficientnet-b5', num_classes)
def EfficientNet6(num_classes=14):
return EfficientNetModel('efficientnet-b6', num_classes)
``` |
{
"source": "joyce-fang/deep-reinforcement-learning",
"score": 3
} |
#### File: deep-reinforcement-learning/p1_navigation/model.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
class QNetwork(nn.Module):
def __init__(self, state_size, action_size, seed, fc1_units=64, fc2_units=64):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super(QNetwork, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units, fc2_units)
self.fc3 = nn.Linear(fc2_units, action_size)
def forward(self, state):
"""Build a network that maps state -> action values."""
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
return self.fc3(x)
class QNetworkPixels(nn.Module):
def __init__(self, state_size, action_size, seed):
super(QNetworkPixels, self).__init__()
self.seed = torch.manual_seed(seed)
self.conv1 = nn.Conv3d(3, 16, kernel_size=(1, 8, 8), stride=(1, 4, 4))
self.conv2 = nn.Conv3d(16, 32, kernel_size=(4, 4, 4), stride=(1, 2, 2))
conv2_output_size = self._get_conv_output_size(state_size)
fc = [conv2_output_size, 256]
self.fc1 = nn.Linear(fc[0], fc[1])
self.fc2 = nn.Linear(fc[1], action_size)
def forward(self, state):
x = self._cnn(state)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
def _get_conv_output_size(self, shape):
x = torch.rand(shape)
x = self._cnn(x)
n_size = x.data.view(1, -1).size(1)
return n_size
def _cnn(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = x.view(x.size(0), -1)
return x
``` |
{
"source": "joyceho/abbr-norm",
"score": 3
} |
#### File: joyceho/abbr-norm/format_nursing_abbr.py
```python
import json
import itertools
import argparse
import collections
import re
def _abbr_filter(abbr):
if len(abbr) < 2:
return False
return True
def _clean_abbr(abbrList):
# append the or to make life a bit easier
abbr = (" or ".join(abbrList)).strip()
if abbr is None or abbr == "":
return None
# split based on comma or ;
tmpAbbr = re.split(",|;", abbr)
# then resplit based on or
tmpAbbr = list(map(lambda x: x.split(" or "), tmpAbbr))
# flatten the list
tmpAbbr = itertools.chain(*tmpAbbr)
# clean the list to remove spaces
tmpAbbr = list(map(lambda x: x.strip(), tmpAbbr))
# clean the list with some filtering
tmpAbbr = list(filter(_abbr_filter, tmpAbbr))
return tmpAbbr
def _clean_qual_name(fullList):
full = "".join(fullList)
# clean up the parenthsis
full = re.sub(r'\([^)]*\)', '', full)
# full = re.sub(r'\ from Latin.+', '', full, flags=re.IGNORECASE)
# full = re.sub(r'\ from the Latin.+', '', full, flags=re.IGNORECASE)
# full = re.sub(r'\ from Middle English.+', '', full, flags=re.IGNORECASE)
return full
def parse_json(abbrDict):
finalAbbr = collections.defaultdict(list)
# setup the dictionary with the final abbreviations
for nl in abbrDict:
abbrList = _clean_abbr(nl['abbr'])
if abbrList is None:
continue
qualName = _clean_qual_name(nl['full'])
if " or " in qualName:
continue
for ab in abbrList:
finalAbbr[ab.strip()].append(qualName)
return finalAbbr
def deleteMultiKeys(abbrDict):
finalAbbr = {}
for k, v in abbrDict.items():
# map the values to lowercase set
syn = set(map(lambda x: x.lower(), v))
if len(syn) == 1:
finalAbbr[k] = v[0].lower()
return finalAbbr
def main():
parser = argparse.ArgumentParser()
parser.add_argument("outputFile", help="filename for updated abbreviations")
parser.add_argument("-i", help="input json files to parse", nargs='+', required=True)
args = parser.parse_args()
jsonAbbrFiles = args.i
finalAbbrDict = {}
for jsonFile in jsonAbbrFiles:
print("Loading and cleaning file:" + jsonFile)
# open the json abbreviation file
jsonAbbr = parse_json(json.load(open(jsonFile, 'r')))
jsonClenedAbbr = deleteMultiKeys(jsonAbbr)
finalAbbrDict.update(jsonClenedAbbr)
with open(args.outputFile, 'w') as outfile:
json.dump(finalAbbrDict, outfile, indent=2)
if __name__ == "__main__":
main()
``` |
{
"source": "joyceho/afib",
"score": 3
} |
#### File: afib/afib/baserisk.py
```python
from abc import ABCMeta, abstractmethod
class BaseRisk(object):
__metaclass__ = ABCMeta
features = None
@abstractmethod
def score(self, row):
"""
Given a pandas row or dictionary representation,
calculate the risk score
Parameters
----------
row : pandas row representation
Returns
----------
float: the score
"""
pass
```
#### File: afib/risk_scores/npoaf.py
```python
import numpy as np
from afib import BaseRisk
NPOAF_PTS = [2,3,1,3,3,1]
def npoaf(age, mvd, lad):
"""Calculates alternative POAF score (New-onset POAF score).
Args:
age: age of the person in years.
mvd: has mitral valve disease, takes inputs 'None', 'Mild', 'Moderate' or 'Severe' (first letter capitalized).
lad: t/f has left atrial dilatation.
Returns:
the new-onset POAF score.
Raises:
TypeError: if inputs are incorrect type.
"""
arr = np.array([65 <= age <= 74,
75 <= age,
int(float(mvd == "Mild")),
int(float(mvd == "Moderate")),
int(float(mvd == "Severe")),
lad], dtype=int)
return arr.dot(NPOAF_PTS)
class NPoafC(BaseRisk):
def score(self, row):
return npoaf(row["age"],
row["mvd"],
row["lad"])
```
#### File: risk_scores/tests/test_poaf.py
```python
import numpy.testing as npt
from afib.risk_scores import poaf, PoafC
def test_poaf():
tmp = poaf(59, False, 15, False, False, (30/100), False)
npt.assert_equal(tmp, 0)
tmp = poaf(60, False, 15, False, False, (30/100), False)
npt.assert_equal(tmp, 1)
tmp = poaf(70, False, 15, False, False, (30/100), False)
npt.assert_equal(tmp, 2)
tmp = poaf(80, False, 15, False, False, (30/100), False)
npt.assert_equal(tmp, 3)
tmp = poaf(80, True, 15, False, False, (30/100), False)
npt.assert_equal(tmp, 4)
tmp = poaf(80, True, 14, False, False, (30/100), False)
npt.assert_equal(tmp, 5)
tmp = poaf(80, True, 14, True, False, (30/100), False)
npt.assert_equal(tmp, 6)
tmp = poaf(80, True, 14, True, True, (30/100), False)
npt.assert_equal(tmp, 7)
tmp = poaf(80, True, 14, True, True, (29/100), False)
npt.assert_equal(tmp, 8)
tmp = poaf(80, True, 14, True, True, (29/100), True)
npt.assert_equal(tmp, 9)
def test_PoafC():
model = PoafC()
tmp = model.score({"age": 59,
"copd": False,
"egfr": 15,
"emrgncy": False,
"pibp": False,
"lvef": (30/100),
"vs": False})
npt.assert_almost_equal(tmp, 0, decimal=3)
tmp = model.score({"age": 60,
"copd": False,
"egfr": 15,
"emrgncy": False,
"pibp": False,
"lvef": (30/100),
"vs": False})
npt.assert_almost_equal(tmp, 1, decimal=3)
tmp = model.score({"age": 70,
"copd": False,
"egfr": 15,
"emrgncy": False,
"pibp": False,
"lvef": (30/100),
"vs": False})
npt.assert_almost_equal(tmp, 2, decimal=3)
tmp = model.score({"age": 80,
"copd": False,
"egfr": 15,
"emrgncy": False,
"pibp": False,
"lvef": (30/100),
"vs": False})
npt.assert_almost_equal(tmp, 3, decimal=3)
tmp = model.score({"age": 80,
"copd": True,
"egfr": 15,
"emrgncy": False,
"pibp": False,
"lvef": (30/100),
"vs": False})
npt.assert_almost_equal(tmp, 4, decimal=3)
tmp = model.score({"age": 80,
"copd": True,
"egfr": 14,
"emrgncy": False,
"pibp": False,
"lvef": (30/100),
"vs": False})
npt.assert_almost_equal(tmp, 5, decimal=3)
tmp = model.score({"age": 80,
"copd": True,
"egfr": 14,
"emrgncy": True,
"pibp": False,
"lvef": (30/100),
"vs": False})
npt.assert_almost_equal(tmp, 6, decimal=3)
tmp = model.score({"age": 80,
"copd": True,
"egfr": 14,
"emrgncy": True,
"pibp": True,
"lvef": (30/100),
"vs": False})
npt.assert_almost_equal(tmp, 7, decimal=3)
tmp = model.score({"age": 80,
"copd": True,
"egfr": 14,
"emrgncy": True,
"pibp": True,
"lvef": (29/100),
"vs": False})
npt.assert_almost_equal(tmp, 8, decimal=3)
tmp = model.score({"age": 80,
"copd": True,
"egfr": 14,
"emrgncy": True,
"pibp": True,
"lvef": (29/100),
"vs": True})
npt.assert_almost_equal(tmp, 9, decimal=3)
test_PoafC()
``` |
{
"source": "joyceho/cvdm",
"score": 3
} |
#### File: cvdm/score/advance.py
```python
import numpy as np
from cvdm.score import cox_surv, BaseRisk
from cvdm.score import clean_diab_dur, clean_pp, clean_hba1c, clean_acr, clean_nonhdl
# coefficients for survival
BETA = np.array([ 0.06187, # age at diagnosis of diabetes
-0.4736, # risk for females
0.08263, # duration of diabetes
0.00665, # pulse pressure (mmHg)
0.38248, # retinopathy
0.60160, # atrial fibrillation
0.09945, # unit increase in hba1c (%)
0.19341, # log of albumin/creatine ratio (mg/g)
0.12619, # non-hdl cholestrol (mmol/l)
0.24219 # treated hypertension
])
S_0 = 0.951044
CONST = 6.52910152
def advance(diab_age, female, diab_dur,
pp, retin, afib,
hba1c, acr, non_hdl, htn_treat):
# add ability to ensure specific values are not negative
xFeat = np.array([diab_age,
female,
clean_diab_dur(diab_dur),
clean_pp(pp),
retin,
afib,
clean_hba1c(hba1c),
np.log(clean_acr(acr)),
clean_nonhdl(non_hdl),
htn_treat])
s = cox_surv(xFeat, BETA, S_0, CONST)
return s
class Advance(BaseRisk):
features = ["diab_age",
"female",
"diab_dur",
"pp",
"retinopathy",
"afib",
"hba1c",
"albumin_creat",
"nonhdl_mmol",
"htn_treat"]
# set them to be the same
feat_key = features
def score(self, row):
return advance(row["diab_age"],
row["female"],
row["diab_dur"],
row["pp"],
row["retinopathy"],
row["afib"],
row["hba1c"],
row["albumin_creat"],
row["nonhdl_mmol"],
row["htn_treat"])
def get_features(self, row):
"""
Get the features associated with this score
"""
feat_dict = super().get_features(row)
# log transform albumin creat
feat_dict["albumin_creat"] = np.log(feat_dict["albumin_creat"])
return feat_dict
```
#### File: cvdm/score/hkdr.py
```python
import numpy as np
from cvdm.score import cox_surv, BaseRisk
from cvdm.score import clean_age, clean_diab_dur, clean_egfr, clean_acr
from cvdm.score import clean_bmi, clean_hba1c, clean_nonhdl, clean_hb
# coefficients for survival
HKDR_CHD = {
"coef": np.array([ 0.0267, # age in years
-0.3536, # if female
0.4373, # current smoker
0.0403, # duration of diabetes
-0.4808, # log_10 of efgr (ml/min/1.73 m^2)
0.1232, # log_10 of (1+spot ACR) (mg/mmol)
0.2644 # non-hdl cholestrol (mmol/L)
]),
"sm": 0.9616,
"const": 0.7082,
"shrink": 0.9440
}
HKDR_HF = {
"coef": np.array([ 0.0709, # age in years
0.0627, # bmi
0.1363, # hba1c (%)
0.9915, # log_10 of (1+spot ACR) (mg/mmol
-0.3606, # blood HB (g/dl)
0.8161 # chd during followup
]),
"male_sm": 0.9888,
"female_sm": 0.9809,
"const": 2.3961,
"shrink": 0.9744
}
HKDR_STROKE = {
"coef": np.array([ 0.0634, # age in years
0.0897, # hba1c (%)
0.5314, # log_10 ACR
0.5636 # hist_chd
]),
"const": 4.5674,
"sm": 0.9707
}
def hkdr_chd(age, female, cur_smoker, diab_dur,
egfr, acr, nonhdl_mmol):
"""
Calculate the risk for coronary heart disease
using the coefficients from the HKDR CHD Cohort
Parameters
----------
age : numeric
Age of subject
isFemale : boolean or int
Subject is female (True or False)
curSmoke: boolean or int
Previous history of CVD (True or False)
diabDur : numeric
Nubmer of years of diabetes
egfr : numeric
Estimated Glomerular Filteration Rate
acr : numeric
Urinary albumin : creatinine ratio in mg/mmol
nonHDL : numeric
Non-HDL cholesterol (mmol/L)
"""
xFeat = np.array([clean_age(age),
female,
cur_smoker,
clean_diab_dur(diab_dur),
np.log10(clean_egfr(egfr)),
np.log10(1+clean_acr(acr)),
clean_nonhdl(nonhdl_mmol, meas="mmol")])
return cox_surv(xFeat,
HKDR_CHD["coef"],
HKDR_CHD["sm"],
HKDR_CHD["const"],
HKDR_CHD["shrink"])
class HkdrCHD(BaseRisk):
features = ["female",
"index_age",
"diab_dur",
"cur_smoke",
"nonhdl_mmol"]
feat_key = features + ["egfr",
"albumin_creat_mgmmol"]
def score(self, row):
return hkdr_chd(row["index_age"],
row["female"],
row["cur_smoke"],
row["diab_dur"],
row["egfr"],
row["albumin_creat_mgmmol"],
row["nonhdl_mmol"])
def get_features(self, row):
feat_dict = super().get_features(row)
feat_dict["egfr_log"] = np.log10(row["egfr"])
feat_dict["acr_log"] = np.log10(1+row["albumin_creat_mgmmol"])
return feat_dict
def hkdr_hf(female, age, bmi, hba1c, acr, hb, chdHist):
"""
Calculate the risk for heart failure
using the coefficients from the HKDR Cohort
Parameters
----------
age : numeric
Age of subject
bmi : numeric
BMI of the subject (in kg/m^2)
hba1c: numeric
HBA1C (%)
acr : numeric
Urinary albumin : creatinine ratio in mg/mmol
hb : numeric
Blood Hemoglobin (g/dl)
chdHist : boolean or int
Subject had CHD (true or False)
"""
baseSurv = HKDR_HF["male_sm"]
if female:
baseSurv = HKDR_HF["female_sm"]
xFeat = np.array([clean_age(age),
clean_bmi(bmi),
clean_hba1c(hba1c),
np.log10(1+clean_acr(acr)),
clean_hb(hb),
chdHist])
return cox_surv(xFeat,
HKDR_HF["coef"],
baseSurv,
HKDR_HF["const"],
HKDR_HF["shrink"])
class HkdrHF(BaseRisk):
features = ["female",
"index_age",
"bmi",
"hba1c",
"hb",
"chd"]
feat_key = features + ["albumin_creat_mgmmol"]
def score(self, row):
return hkdr_hf(row["female"],
row["index_age"],
row["bmi"],
row["hba1c"],
row["albumin_creat_mgmmol"],
row["hb"],
row["chd"])
def get_features(self, row):
feat_dict = super().get_features(row)
feat_dict["acr_log"] = np.log10(1+row["albumin_creat_mgmmol"])
return feat_dict
def hkdr_stroke(age, hba1c, acr, chd):
xFeat = np.array([clean_age(age),
clean_hba1c(hba1c),
np.log10(clean_acr(acr)),
chd])
return cox_surv(xFeat,
HKDR_STROKE["coef"],
HKDR_STROKE["sm"],
HKDR_STROKE["const"])
class HkdrStroke(BaseRisk):
features = ["index_age",
"hba1c",
"chd"]
feat_key = features + ["albumin_creat_mgmmol"]
def score(self, row):
return hkdr_stroke(row["index_age"],
row["hba1c"],
row["albumin_creat_mgmmol"],
row["chd"])
def get_features(self, row):
feat_dict = super().get_features(row)
feat_dict["acr_log"] = np.log10(1+row["albumin_creat_mgmmol"])
return feat_dict
```
#### File: cvdm/score/recode.py
```python
import argparse
import json
import numpy as np
import tqdm
from cvdm.score import cox_surv, BaseRisk
from cvdm.score import clean_age, clean_bp, clean_hba1c
from cvdm.score import clean_tot_chol, clean_hdl, clean_acr
## Coefficients for CHF Recode
CHD_INFO ={
"coef": np.array([ 0.05268, # age (year)
0.25290, # female
-0.04969, # black ethnicity
0.29050, # smoking
0.00121, # SBP
1.00700, # history of cvd
0.63890, # blood pressure lowering drugs
-0.11750, # statins
0.73650, # anticoagulants
0.20920, # hba1c %
-0.00136, # total cholesterol
-0.01758, # hdl cholesterol
0.82104, # serum creatine mg/dl
0.00041 # urine albumin:creatine ratio
]),
"s0": 0.96, # lambda in the equation
"const": 5.15 # the mean to subtract by
}
MI_INFO ={
"coef": np.array([ 0.04363, # age (year)
-0.20660, # female
-0.11630, # black ethnicity
0.23580, # smoking
-0.00514, # SBP
0.96180, # history of cvd
-0.12480, # blood pressure lowering drugs
0.04699, # statins
0.54400, # anticoagulants
0.21350, # hba1c %
0.00019, # total cholesterol
-0.01358, # hdl cholesterol
0.08027, # serum creatine mg/dl
0.00042 # urine albumin:creatine ratio
]),
"s0": 0.96, # lambda in the equation
"const": 5.15 # the mean to subtract by
}
STROKE_INFO ={
"coef": np.array([ 0.02896, # age (year)
-0.00326, # female
0.27160, # black ethnicity
0.16650, # smoking
0.01659, # SBP
0.41380, # history of cvd
0.15980, # blood pressure lowering drugs
-0.18870, # statins
-0.13870, # anticoagulants
0.33650, # hba1c %
0.00171, # total cholesterol
-0.00639, # hdl cholesterol
0.59550, # serum creatine mg/dl
0.00030 # urine albumin:creatine ratio
]),
"s0": 0.96, # lambda in the equation
"const": 5.15 # the mean to subtract by
}
def recode(age, female, ethnicity, smoking, sbp,
cvdHist, bpld, statin, anticoag,
hba1c, tchol, hdl, creat, acr,
target="CHF"):
coefInfo = CHD_INFO
if target == "MI":
coefInfo = MI_INFO
if target == "STROKE":
coefInfo = STROKE_INFO
"""
Calculate the survival value
"""
xFeat = np.array([clean_age(age),
female,
ethnicity,
smoking,
clean_bp(sbp),
cvdHist,
bpld, statin, anticoag,
clean_hba1c(hba1c),
clean_tot_chol(tchol),
clean_hdl(hdl),
creat,
clean_acr(acr)])
return cox_surv(xFeat, coefInfo["coef"],
coefInfo["s0"], coefInfo["const"])
class Recode(BaseRisk):
target = None
features = ["index_age",
"female",
"AC",
"cur_smoke",
"sbp",
"cvd_hist",
"bpld",
"statin",
"anticoagulant",
"hba1c",
"chol_tot",
"chol_hdl",
"creat",
"albumin_creat"]
feat_key = features
def __init__(self, target="CHF"):
self.target = target
def score(self, row):
return recode(row["index_age"],
row["female"],
row["AC"],
row["cur_smoke"],
row["sbp"],
row["cvd_hist"],
row["bpld"],
row["statin"],
row["anticoagulant"],
row["hba1c"],
row["chol_tot"],
row["chol_hdl"],
row["creat"],
row["albumin_creat"],
target=self.target)
```
#### File: cvdm/score/score.py
```python
import numpy as np
from cvdm.score import BaseRisk, clean_chol, clean_bp
# coefficients for survival
LOW_RISK_MEN = {'CHD': {"alpha": -22.1, "p": 4.71},
'Non-CHD': {"alpha": -26.7, "p": 5.64}}
LOW_RISK_WOMEN = {'CHD': {"alpha": -29.8, "p": 6.36},
'Non-CHD': {"alpha": -31.0, "p": 6.62}}
HIGH_RISK_MEN = {'CHD': {"alpha": -21.0, "p": 4.62},
'Non-CHD': {"alpha": -25.7, "p": 5.47}}
HIGH_RISK_WOMEN = {'CHD': {"alpha": -28.7, "p": 6.23},
'Non-CHD': {"alpha": -30.0, "p": 6.42}}
# coefficients for measurements
COEFF = {'CHD': [0.71, 0.24, 0.018],
'Non-CHD': [0.63, 0.02, 0.022]}
def _so(age, alpha, p):
"""
s0(age) = exp{-exp(alpha)*(age)^p}
"""
return np.exp(-(np.exp(alpha))*(age)**p)
def _baseline_s0(age, coef):
"""
Calculate the baseline survival probability
for age and age+10 using the formula:
s_0(age) = exp{-exp(alpha)*(age-20)^p}
s_0(age+10) = exp{-exp(alpha)*(age-10)^p}
"""
return {"s_now": _so(age-20, coef["alpha"], coef["p"]),
"s_10": _so(age-10, coef["alpha"], coef["p"])}
def _w(chol, sbp, smoking, coef):
X = np.array([bool(smoking),
chol-6,
sbp-120])
return X.dot(coef)
def _survival(baseline_s0, w):
return {k: v**np.exp(w) for k, v in baseline_s0.items()}
def _risk_10(survival):
s10_age = survival["s_10"] / survival["s_now"]
return 1 - s10_age
def _calculate_score(coef_gender, age, chol, sbp, smoking):
# sum up the two risks
cvdRisk = 0
# calculate the two cases, CHD vs non-CHD
for k, v in coef_gender.items():
s0 = _baseline_s0(age, v)
w = _w(chol, sbp, smoking, COEFF[k])
s = _survival(s0, w)
cvdRisk += _risk_10(s)
return cvdRisk
class Score(BaseRisk):
low_risk = None
features = ["index_age",
"female",
"cur_smoke",
"sbp",
"chol_tot_mmol"]
feat_key = features
def __init__(self, low_risk=True):
self.low_risk = low_risk
def score(self, row):
return score(row["female"],
row["index_age"],
row["chol_tot_mmol"],
row["sbp"],
row["cur_smoke"],
self.low_risk)
def score(female, age, chol_mmol, sbp, smoking, low_risk):
sc = None
beta = HIGH_RISK_MEN
if female and low_risk:
beta = LOW_RISK_WOMEN
elif female:
beta = HIGH_RISK_WOMEN
elif low_risk:
beta = LOW_RISK_MEN
sc = _calculate_score(beta,
max(20, age),
clean_chol(chol_mmol),
clean_bp(sbp),
smoking)
return max(0, min(sc, 1))
```
#### File: cvdm/score/survModel.py
```python
import numpy as np
def cox_surv(xFeat, beta, s0, b0=0, shrinkage=1):
# 1 - x**exp(sum(xb))
xBeta = xFeat.dot(beta)
return 1 - s0**np.exp(shrinkage*(xBeta - b0))
def weibull_atf_surv(xFeat, beta, mu, sigma, t):
a = xFeat.dot(beta) + mu
b = (t / a) ** sigma
return 1 - np.exp(-b)
def weibull_hazard(xFeat, beta, lmbda, t, rho):
# exp(lambda + beta * xFeat) * t^rho
return np.exp(lmbda + xFeat.dot(beta)) * t ** rho
def weibull_surv(xFeat, beta, lmbda, t1, t2, rho):
return 1 - np.exp(weibull_hazard(xFeat, beta, lmbda, t1, rho) - weibull_hazard(xFeat, beta, lmbda, t2, rho))
```
#### File: score/tests/test_aric.py
```python
import numpy.testing as npt
from cvdm.score import aric, Aric
def test_aric():
tmp = aric(53, False, False, 190, 50, 140, False, False)
npt.assert_almost_equal(tmp, 0.03169, decimal=5)
tmp = aric(53, False, True, 190, 50, 140, False, False)
npt.assert_almost_equal(tmp, 0.04571, decimal=5)
tmp = aric(60, True, True, 220, 40, 140, True, False)
npt.assert_almost_equal(tmp, 0.38077, decimal=5)
def test_aric_json():
ar = Aric()
tmp = ar.score({"male": False,
"index_age": 53,
"Cauc": False,
"chol_tot": 190,
"chol_hdl": 50,
"sbp": 140,
"htn_treat": False,
"cur_smoke": False})
npt.assert_almost_equal(tmp, 0.03169, decimal=5)
```
#### File: score/tests/test_frs.py
```python
import numpy.testing as npt
from cvdm.score import frs_primary, frs_simple, FrsPrimary, FrsSimple
def test_frs_primary():
tmp = frs_primary(True, 61, 180, 47, 124, False, True, False)
npt.assert_almost_equal(tmp, 0.1048, decimal=4)
tmp = frs_primary(False, 53, 161, 55, 125, True, False, True)
npt.assert_almost_equal(tmp, 0.1562, decimal=4)
def test_frs_primary_json():
frs = FrsPrimary()
tmp = frs.score({"female": True,
"index_age": 61,
"chol_tot": 180,
"chol_hdl": 47,
"sbp": 124,
"htn_treat": False,
"dm": False,
"cur_smoke": True})
npt.assert_almost_equal(tmp, 0.1048, decimal=4)
tmp = frs.score({"female": False,
"index_age": 53,
"chol_tot": 161,
"chol_hdl": 55,
"sbp": 125,
"htn_treat": True,
"dm": True,
"cur_smoke": False})
npt.assert_almost_equal(tmp, 0.1562, decimal=4)
def test_frs_simple():
score = frs_simple(True, 35, 24.3, 122, False, True, False)
npt.assert_almost_equal(score, 0.029352227213368165, decimal=5)
def test_frs_simple_json():
frs = FrsSimple()
tmp = frs.score({"female": True,
"index_age": 35,
"height": 72.0,
"weight": 190.0,
"bmi": 24.3,
"sbp": 122,
"htn_treat": False,
"dm": False,
"cur_smoke": True})
npt.assert_almost_equal(tmp, 0.029352227213368165, decimal=5)
```
#### File: score/tests/test_hkdr.py
```python
import numpy.testing as npt
from cvdm.score import hkdr_chd, HkdrCHD
from cvdm.score import hkdr_hf, HkdrHF
def test_hkdr_chd():
tmp = hkdr_chd(59, True, False, 5, 105, 2.3, 3.87)
npt.assert_almost_equal(tmp, 0.082, decimal=3)
def test_hkdr_chd_json():
chd = HkdrCHD()
tmp = chd.score({"index_age": 59,
"female": True,
"cur_smoke": False,
"diab_dur": 5,
"egfr": 105,
"albumin_creat_mgmmol": 2.3,
"nonhdl_mmol": 3.87})
npt.assert_almost_equal(tmp, 0.082, decimal=3)
def test_hkdr_hf():
tmp = hkdr_hf(False, 59, 32, 8, 2.5, 13.8, True)
npt.assert_almost_equal(tmp, 0.038, decimal=3)
tmp = hkdr_hf(True, 59, 32, 8, 2.5, 13.8, True)
npt.assert_almost_equal(tmp, 0.064, decimal=3)
tmp = hkdr_hf(False, 59, 24.3, 8, 2.5, 13.8, True)
npt.assert_almost_equal(tmp, 0.024, decimal=3)
def test_hkdr_hf_json():
hf = HkdrHF()
tmp = hf.score({"index_age": 59,
"female": False,
"albumin_creat_mgmmol":2.5,
"bmi": 24.3,
"hba1c": 8,
"hb": 13.8,
"chd": True})
npt.assert_almost_equal(tmp, 0.024, decimal=3)
```
#### File: score/tests/test_qdiabetes.py
```python
import numpy.testing as npt
from cvdm.score import qdiabetes, QDiabetes
def test_qdiabetes():
tmp = qdiabetes(64, False, 27.34, 0.5, False, False,
64, 4.3, 120, False, False, False, False,
False, False, False, tYear=5)
npt.assert_almost_equal(tmp, 0.015, decimal=3)
tmp = qdiabetes(64, False, 27.34, 0.5, False, False,
64, 4.3, 120, False, False, False, False,
False, False, False, tYear=1)
npt.assert_almost_equal(tmp, 0.003, decimal=3)
tmp = qdiabetes(64, False, 27.34, 0.5, False, False,
64, 4.3, 120, False, False, False, False,
False, False, False, tYear=3)
npt.assert_almost_equal(tmp, 0.008, decimal=3)
tmp = qdiabetes(64, False, 27.34, 0.5, False, False,
64, 4.3, 120, False, False, False, False,
True, True, False, tYear=1)
npt.assert_almost_equal(tmp, 0.014, decimal=3)
tmp = qdiabetes(64, False, 27.34, 0.5, False, False,
64, 4.3, 120, False, False, False, True,
True, True, False, tYear=1)
npt.assert_almost_equal(tmp, 0.016, decimal=3)
tmp = qdiabetes(64, False, 27.34, 2, False, False,
64, 4.3, 120, False, False, False, True,
True, True, False, tYear=1)
npt.assert_almost_equal(tmp, 0.0225, decimal=4)
tmp = qdiabetes(64, False, 27.34, 8, False, False,
64, 4.3, 120, False, False, True, False,
True, True, False, tYear=1)
npt.assert_almost_equal(tmp, 0.032, decimal=3)
tmp = qdiabetes(64, False, 27.34, 8, False, True,
64, 4.3, 120, False, False, True, False,
True, True, False, tYear=1)
npt.assert_almost_equal(tmp, 0.031, decimal=3)
tmp = qdiabetes(64, True, 27.34, 0.5, False, False,
64, 4.3, 120, False, False, False, False,
True, False, True, tYear=1)
npt.assert_almost_equal(tmp, 0.016, decimal=3)
tmp = qdiabetes(64, True, 27.34, 0.5, False, False,
64, 4.3, 120, False, False, False, False,
True, False, True, tYear=2)
npt.assert_almost_equal(tmp, 0.032, decimal=3)
tmp = qdiabetes(64, True, 27.34, 0.5, False, False,
64, 4.3, 120, False, False, False, False,
True, False, True, tYear=5)
npt.assert_almost_equal(tmp, 0.084, decimal=3)
tmp = qdiabetes(64, True, 27.34, 4, False, False,
64, 4.3, 120, False, False, False, True,
True, False, True, tYear=1)
npt.assert_almost_equal(tmp, 0.024, decimal=3)
tmp = qdiabetes(64, True, 27.34, 0.5, True, False,
64, 4.3, 120, False, False, False, False,
True, False, True, tYear=1)
npt.assert_almost_equal(tmp, 0.011, decimal=3)
tmp = qdiabetes(64, True, 27.34, 0.5, False, True,
64, 4.3, 120, False, False, False, False,
True, False, True, tYear=1)
npt.assert_almost_equal(tmp, 0.012, decimal=3)
tmp = qdiabetes(64, True, 27.34, 5, False, True,
64, 4.3, 120, False, True, False, False,
True, False, True, tYear=1)
npt.assert_almost_equal(tmp, 0.025, decimal=3)
def test_qdiabetes_json():
model = QDiabetes(1)
tmp = model.score({"index_age": 64,
"male": True,
"bmi": 27.34,
"diab_dur": 5,
"AC": False,
"EAsian": True,
"hba1c_mmol": 64,
"tchdl": 4.3,
"sbp": 120,
"heavy_smoke": False,
"moderate_smoke": True,
"light_smoke": False,
"prev_smoke": False,
"afib": True,
"cvd_hist": False,
"renal": True
})
npt.assert_almost_equal(tmp, 0.025, decimal=3)
```
#### File: cvdm/score/ukpds.py
```python
import numpy as np
from cvdm.score import BaseRisk
from cvdm.score import clean_age, clean_hba1c, clean_bp, clean_tchdl
# coefficients for survival
BETA = np.array([ 1.059, # age at diagnosis of diabetes
0.525, # risk for females
0.390, # Afro-Carribean ethnicity
1.350, # smoking
1.183, # HBA1c
1.088, # 10mmHg increase in systolic blood pressure
3.845 # unit increase in log of lipid ratio
])
Q_0 = 0.0112 # intercept
D = 1.078 # risk ratio for each year increase in duration of diagnosed diabetes
def ukpds(ageDiab, age, female, ac, smoking, hba1c, sbp, tchdl, tYear=10):
"""
Calculate the number of years to forecast the risk.
"""
xFeat = np.array([clean_age(age)-55,
female,
ac,
bool(smoking),
clean_hba1c(hba1c)-6.72,
(clean_bp(sbp) - 135.7)/10,
np.log(clean_tchdl(tchdl))-1.59])
q = Q_0 * np.prod(np.power(BETA, xFeat))
uscore = 1 - np.exp(-q * D**(age-ageDiab)* (1-D**tYear)/ (1 - D))
return max(uscore, 0.0)
class Ukpds(BaseRisk):
tYear = None
features = ["diab_age",
"index_age",
"female",
"AC",
"cur_smoke",
"hba1c",
"sbp"]
feat_key = features + ["tchdl"]
def __init__(self, tYear=10):
self.tYear = tYear
def score(self, row):
return ukpds(row["diab_age"],
row["index_age"],
row["female"],
row["AC"],
row["cur_smoke"],
row["hba1c"],
row["sbp"],
row["tchdl"],
tYear=self.tYear)
def get_features(self, row):
feat_dict = super().get_features(row)
feat_dict["tchdl_log"] = np.log(row["tchdl"])
return feat_dict
``` |
{
"source": "JoyceIsCodingWow/Snail.png",
"score": 3
} |
#### File: Snail.png/cogs/misc_commands.py
```python
import discord
from discord.ext import commands
from discord.ext.commands import Bot
import math
class misc_commands(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def help(self, ctx):
author = ctx.message.author
embedVar = discord.Embed(title="Misc commands", description="Gives you a list of the misc commands.", color=16714423)
embedVar.set_author(name="Snail.png", icon_url="https://cdn.discordapp.com/avatars/756682052736385184/638737b828dcdd243c1207536547f68b.webp?size=1024")
embedVar.add_field(name="s!invite", value="Gives you the bots invite link. `s!invite`", inline=False)
embedVar.add_field(name="s!server", value="Gives you the invite for the bots support server. `s!server`", inline=False)
embedVar.add_field(name='s!suggest', value="Lets you suggest a feature for the bot. `s!suggest {suggestion}`")
embedVar.add_field(name="s!userinfo", value="Gives you a users information. `s!userinfo {User}`", inline=False)
embedVar.add_field(name="s!pfp", value="Gives you a users profile picture. `s!pfp {User}`", inline=False)
embedVar.add_field(name='s!vote', value=' Gives you the link to upvote snail on discord.ly.', inline=False)
embedVar.add_field(name="Moderator Commands", value="Gives you a list of all moderation comamnds.", inline=False)
embedVar.add_field(name="s!ban", value="Bans a member. `s!ban {User} {reason}`", inline=False)
embedVar.add_field(name='s!kick', value='Kicks a member. `s!kick {User} {reason}`', inline=False)
embedVar.add_field(name='s!purge', value='Purges the given amount of messages. `s!purge {Amount}`', inline=False)
embedVar.add_field(name='s!lockdown', value='locks down a channel. `s!lockdown`, `s!unlockdown`', inline=False)
embedVar.add_field(name='s!setup', value='Lets you setup certain bot features. `s!setup`', inline=False)
embedVar.add_field(name='Voice commands', value='Gives you a list of all voice related commands (currently doesnt do much)', inline=False)
embedVar.add_field(name='s!join', value='Joins a voice channel. `s!join`, `s!leave`', inline=False)
await author.send(embed=embedVar)
await ctx.send('Sent you a DM!')
@commands.command()
async def invite(self, ctx):
response = ('<https://discord.com/oauth2/authorize?client_id=756682052736385184&scope=bot&permissions=8>')
await ctx.send(response)
@commands.command(aliases=['support'])
async def server(self, ctx):
await ctx.send(f"https://discord.gg/C6AnGyr")
@commands.command(aliases=['whois', 'profile'])
async def userinfo(self, ctx, member: discord.Member):
if member == None:
member = ctx.message.author
account_created = member.created_at.strftime("%b %d, %Y")
account_join = member.joined_at.strftime("%b %d, %Y")
color = member.color
avatar = member.avatar_url
member_name = member.name
member_discrim = member.discriminator
member_mention = member.mention
userid = member.id
hashtag = ('#')
embedVar = discord.Embed(description=member_mention, color=color)
embedVar.set_author(name=member_name + hashtag + member_discrim, icon_url=avatar)
embedVar.set_thumbnail(url=avatar)
embedVar.add_field(name='User ID', value=userid, inline=False)
embedVar.add_field(name='Account Created At', value=account_created, inline=False)
embedVar.add_field(name='Joined Server At', value=account_join, inline=False)
await ctx.send(embed=embedVar)
@commands.command(aliases=['picture', 'profilepicture'])
async def pfp(self, ctx, member: discord.Member):
avatar = member.avatar_url
member_name = member.name
color = member.color
member_discrim = member.discriminator
embedVar = discord.Embed(description=f"{member_name}#{member_discrim}'s Profile picture", color=color)
embedVar.set_image(url=avatar)
await ctx.send(embed=embedVar)
@commands.command(aliases=['upvote'])
async def vote(self, ctx):
await ctx.send(f"You can upvote snail at https://discordbotlist.com/bots/snailpng. Voting currently doesnt do anything, but if you would like to suggest something you can suggest it with `s!suggest`")
def setup(bot):
bot.add_cog(misc_commands(bot))
```
#### File: Snail.png/cogs/mod_events.py
```python
import discord
from discord.ext import commands
from discord.ext.commands import Bot
from datetime import datetime
import sqlite3
now = datetime.now()
class mod_events(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_member_join(self, member):
if memeber.guild.id != 724360414829215766:
return
channel = self.bot.get_channel(766835252651884544)
channel.send(f"hiiiiiii {member.mention}")
@commands.Cog.listener()
async def on_message(self, message):
if message.guild != None:
return
if message.author == self.bot.user:
return
user = message.author.name
user_discrim = message.author.discriminator
user_avatar = message.author.avatar_url
channel = self.bot.get_channel(767970704393371698)
content = message.content
sent_at = message.created_at.strftime("%b %d, %Y")
embedVar = discord.Embed()
embedVar.set_author(name=user + ('#') +user_discrim, icon_url=user_avatar)
embedVar.add_field(name=sent_at, value=content)
await channel.send(embed=embedVar)
@commands.Cog.listener()
async def on_message_edit(self, before, after):
message = after
if message.author == self.bot.user:
return
if message.guild.id == None:
return
db = sqlite3.connect('snail.sqlite')
cursor = db.cursor()
cursor.execute(f"SELECT messagelog_channel FROM snail WHERE guild_id = {message.guild.id}")
result = cursor.fetchone()
if result is None:
return
else:
time = now.strftime("%m/%d/%y, %H:%M")
embedSex = discord.Embed(title='Message link', description=f"{message.author.mention} Edited a message in {message.channel.mention}", url=f"https://discordapp.com/channels/{message.guild.id}/{message.channel.id}/{message.id}", color=0xFF0000)
embedSex.set_footer(text=f"Author ID: {message.author.id} โ Edited At {time}")
embedSex.set_author(name=message.author, icon_url=message.author.avatar_url)
embedSex.add_field(name='Message before:', value=before.content, inline=False)
embedSex.add_field(name='Message after:', value=after.content, inline=False)
channel = self.bot.get_channel(id=int(result[0]))
await channel.send(embed=embedSex)
@commands.Cog.listener()
async def on_message_delete(self, message):
if message.guild.id == None:
return
if message.author == self.bot.user:
return
db = sqlite3.connect('snail.sqlite')
cursor = db.cursor()
cursor.execute(f"SELECT messagelog_channel FROM snail WHERE guild_id = {message.guild.id}")
result = cursor.fetchone()
if result is None:
return
else:
content = message.content
time = now.strftime("%m/%d/%y, %H:%M")
embedHi = discord.Embed( description=f"{message.author.mention} deleted a message in {message.channel.mention}", color=0xFF0000)
embedHi.set_author(name=message.author, icon_url=message.author.avatar_url)
embedHi.set_footer(text=f"author ID: {message.author.id} โ message ID: {message.id} โ")
embedHi.add_field(name=f"Message deleted at {time}", value=content, inline=False)
channel = self.bot.get_channel(id=int(result[0]))
await channel.send(embed=embedHi)
@commands.Cog.listener()
async def on_guild_join(self, guild):
channel = self.bot.get_channel(764986104281169940)
await channel.send(f"<@453271030543155210> snail has joined **{guild.name}**")
def setup(bot):
bot.add_cog(mod_events(bot))
``` |
{
"source": "joycejosie/sql_creation_cleanup",
"score": 4
} |
#### File: joycejosie/sql_creation_cleanup/CREATE.py
```python
import sqlite3
from emp import Employee
connection = sqlite3.connect (':memory:')
cursor = connection.cursor()
cursor.execute("""CREATE TABLE employees (
first text,
last text,
pay integer
)""")
def insert_emp(emp):
with connection:
cursor.execute("INSERT INTO employees VALUES (:first, :last, :pay)",
{'first':emp.first, 'last': emp.last, 'pay': emp.pay})
def get_emps_by_name(lastname):
cursor.execute("SELECT * FROM employees WHERE last = :last", {'last': lastname})
return cursor.fetchall()
def update_pay(emp, pay):
with connection:
cursor.execute("""UPDATE employees SET pay = :pay
WHERE first = :first AND last = :last""",
{'first': emp.first, 'last': emp.last, 'pay': emp.pay})
def remove_emp(emp):
with connection:
cursor.execute("DELETE from employees WHERE first = :first AND last = :last",
{'first': emp.first, 'last': emp.last})
emp_1 = Employee('Joyce', 'Lafontant', 48080)
emp_2 = Employee('Carl', 'Morcy', 35477)
emp_3 = Employee('Moussa', 'Fomba', 98743)
emp_4 = Employee('Urji', 'Haji', 80000)
emp_5 = Employee('Brodrick', 'Stanley', 73736)
emp_6 = Employee('Jamie', 'Edward', 191929)
emp_7 = Employee('John', 'Doe', 9000)
emp_8 = Employee('Jane', 'Doe', 80000)
insert_emp(emp_1)
insert_emp(emp_2)
# insert_emp(emp_3)
# insert_emp(emp_4)
# insert_emp(emp_5)
# insert_emp(emp_6)
# insert_emp(emp_7)
# insert_emp(emp_8)
show = get_emps_by_name('Lafontant')
print(show)
update_pay(emp_1, 95000)
show = get_emps_by_name('Lafontant')
print(show)
# show3 = remove_emp(emp_1)
# print(show3)
connection.close()
``` |
{
"source": "JoycelynLongdon/Mres_Research_Project",
"score": 2
} |
#### File: src/landcover_classification/rf_landcover_classification.py
```python
from __future__ import print_function, division
# Import GDAL, NumPy, and matplotlib
from osgeo import gdal, gdal_array
import numpy as np
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
## Sklearn Libraries
from sklearn import metrics
from sklearn.utils import shuffle
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import make_scorer
from sklearn.metrics import f1_score, confusion_matrix, roc_curve, auc, classification_report, recall_score, precision_recall_curve
from sklearn.metrics import accuracy_score
from pprint import pprint
# ### Preparing The Dataset
# In[2]:
# Read in our satellite and label image
satellite_img = gdal.Open('/gws/nopw/j04/ai4er/users/jl2182/data/Mres_Data/classification_training_data/final_filled_l8_training_data.tif', gdal.GA_ReadOnly)
training_img = gdal.Open('/gws/nopw/j04/ai4er/users/jl2182/data/Mres_Data/classification_training_data/third_remerge_landcover_training_data.tif', gdal.GA_ReadOnly)
# In[3]:
img = np.zeros((satellite_img.RasterYSize, satellite_img.RasterXSize, satellite_img.RasterCount),
gdal_array.GDALTypeCodeToNumericTypeCode(satellite_img.GetRasterBand(1).DataType))
for b in range(img.shape[2]):
img[:, :, b] = satellite_img.GetRasterBand(b + 1).ReadAsArray()
lbls = training_img.GetRasterBand(1).ReadAsArray().astype(np.uint8)
# Display them
plt.subplot(121)
plt.imshow(img[:, :, 6], cmap=plt.cm.tab20b)
plt.title('NIR')
plt.subplot(122)
plt.imshow(lbls, cmap=plt.cm.terrain)
plt.title('Training Data')
plt.show()
# In[4]:
lbls.shape
# In[5]:
img.shape
# In[6]:
unique, counts = np.unique(lbls, return_counts=True)
list(zip(unique, counts))
# Creating the X feature matrix array and the y labels matrix to be fed into the Random Forest
# In[7]:
# Find how many non-zero entries we have -- i.e. how many training data samples?
n_samples = (lbls >0).sum()
print('We have {n} samples'.format(n=n_samples))
# What are our classification labels?
labels = np.unique(lbls[lbls >0])
print('The training data include {n} classes: {classes}'.format(n=labels.size,
classes=labels))
# We will need a "X" matrix containing our features, and a "y" array containing our labels
# These will have n_samples rows
# In other languages we would need to allocate these and them loop to fill them, but NumPy can be faster
#this is a quick numpy trick for flattening
X = img[lbls >0 ] # include 8th band, which is Fmask, for now
y = lbls[lbls >0]
print('Our X matrix is sized: {sz}'.format(sz=X.shape))
print('Our y array is sized: {sz}'.format(sz=y.shape))
# In[8]:
unique, counts = np.unique(y, return_counts=True)
list(zip(unique, counts))
# ## Stratifying Data
# k-fold
# In[ ]:
#stratified k-cross validation to balance the classes
skf = StratifiedKFold(n_splits=10)
skf.get_n_splits(X, y)
# In[ ]:
StratifiedKFold(n_splits=10, random_state=None, shuffle=False)
for train_index, test_index in skf.split(X, y):
print("TRAIN:", train_index, "TEST:", test_index)
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
# shuffle split
# In[9]:
from sklearn.model_selection import StratifiedShuffleSplit
sss = StratifiedShuffleSplit(n_splits=10, test_size=0.3, random_state=0)
StratifiedShuffleSplit(n_splits=10, random_state=0)
for train_index, test_index in sss.split(X, y):
print("TRAIN:", train_index, "TEST:", test_index)
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
# In[10]:
urban = np.count_nonzero(y_test ==1)
urban
# In[ ]:
# ### Training The Random Forest
# In[11]:
# Initialize our model with 500 trees
rf = RandomForestClassifier(n_estimators=500, oob_score=True)
# In[12]:
# Fit our model to training data
train = rf.fit(X_train, y_train)
# ### Training Performance
# In[13]:
print('Our OOB prediction of accuracy is: {oob}%'.format(oob=rf.oob_score_ * 100))
# In[14]:
import pandas as pd
# Setup a dataframe -- just like R
df = pd.DataFrame()
df['truth'] = y_train
df['predict'] = rf.predict(X_train)
# Cross-tabulate predictions
print(pd.crosstab(df['truth'], df['predict'], margins=True))
# ### Validation Performance
# In[15]:
val = rf.predict(X_test)
# In[19]:
target_names = ['Cropland', 'Shrubland', 'Forest', 'Urban', 'Water','Clouds']
# In[20]:
print(classification_report(y_test, val, target_names=target_names))
# In[18]:
disp = metrics.plot_confusion_matrix(rf, X_test, y_test)
disp.figure_.suptitle("Confusion Matrix")
print(f"Confusion matrix:\n{disp.confusion_matrix}")
plt.show()
# ### Band Importance
# In[19]:
bands = [1, 2, 3, 4, 5, 6,7,8,9,10]
for b, imp in zip(bands, rf.feature_importances_):
print('Band {b} importance: {imp}'.format(b=b, imp=imp))
# #### Scientific comment on band importance
# The SWIR bands seem to be the most important features. The SWIR range of the EM spectrum allows the user to see certain absorption features very clearly due to certain materials only displaying absorption features in that range, which allows you to determine vegetation health as well as delineate between soil and decaying canopy.
# In[ ]:
#current parameters in use
# Look at parameters used by our current forest
print('Parameters currently in use:\n')
pprint(rf.get_params())
# ## Applying to Study Region
# In[135]:
# Read in our satellite and label image
new_img_path = gdal.Open('/gws/nopw/j04/ai4er/users/jl2182/data/Mres_Data/Counterfactual_Test_Data/Filled/Counterfactual_2020_Filled.tif', gdal.GA_ReadOnly)
# In[136]:
new_img = np.zeros((new_img_path.RasterYSize, new_img_path.RasterXSize, new_img_path.RasterCount),
gdal_array.GDALTypeCodeToNumericTypeCode(new_img_path.GetRasterBand(1).DataType))
for b in range(new_img.shape[2]):
new_img[:, :, b] = new_img_path.GetRasterBand(b + 1).ReadAsArray()
# In[137]:
new_shape = (new_img.shape[0] * new_img.shape[1], new_img.shape[2])
new_shape
img_as_array = new_img[:, :, :7].reshape(new_shape)
print('Reshaped from {o} to {n}'.format(o=new_img.shape,
n=img_as_array.shape))
# In[138]:
class_prediction = rf.predict(img_as_array)
# In[139]:
# Reshape our classification map
class_prediction_reshaped = class_prediction.reshape(new_img[:, :, 0].shape)
# In[140]:
class_prediction_reshaped.max()
# In[141]:
#save as numpy array ahead of analysis
np.save('/gws/nopw/j04/ai4er/users/jl2182/data/Figures/Counterfactual_Classified_Landcover/Counterfactual_2020_Classified.npy',class_prediction_reshaped)
# In[142]:
# Visualize
# First setup a 5-4-3 composite
def color_stretch(image, index, minmax=(0, 10000)):
colors = image[:, :, index].astype(np.float64)
max_val = minmax[1]
min_val = minmax[0]
# Enforce maximum and minimum values
colors[colors[:, :, :] > max_val] = max_val
colors[colors[:, :, :] < min_val] = min_val
for b in range(colors.shape[2]):
colors[:, :, b] = colors[:, :, b] * 1 / (max_val - min_val)
return colors
img543 = color_stretch(new_img, [5, 4, 3], (0, 7000))
# See https://github.com/matplotlib/matplotlib/issues/844/
n = class_prediction.max()
# Next setup a colormap for our map
colors = dict((
(1, (111, 97, 6,255)), # Cropland (brown)
(2, (135, 198, 42,255)), # Shrubland (light green)
(3, (15, 91, 3,255)), # Forest (dark green)
(4, (255, 26, 0,255)), # Urban (red)
(5, (0, 0, 255,255)), # Water (blue)
(6, (0, 0, 0,0)) #No Data/Clouds
))
# Put 0 - 255 as float 0 - 1
for k in colors:
v = colors[k]
_v = [_v / 255.0 for _v in v]
colors[k] = _v
index_colors = [colors[key] if key in colors else
(255, 255, 255, 0) for key in range(1, n + 1)]
cmap = plt.matplotlib.colors.ListedColormap(index_colors, 'Classification', n)
# Now show the classmap next to the image
plt.figure(figsize = (20,25))
plt.subplot(121)
plt.imshow(img543)
plt.figure(figsize = (20,25))
plt.subplot(122)
#plt.legend(handles=cmap, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0. )
plt.imshow(class_prediction_reshaped, cmap=cmap, interpolation='none')
plt.savefig( '/gws/nopw/j04/ai4er/users/jl2182/data/Figures/Counterfactual_Classified_Landcover/Counterfactual_2020_Classified.tif')
plt.savefig( '/gws/nopw/j04/ai4er/users/jl2182/data/Figures/Counterfactual_Classified_Landcover/Counterfactual_2020_Classified.png')
#plt.show()
# In[ ]:
#img = np.ma.masked_values(img_as_array, 0)
# In[ ]:
#class_pred = np.ones(img_as_array.shape)
#for i in range(img_as_array.shape[0]):
#for j in range(img_as_array.shape[1]):
#if img_as_array[i,j] == 0.:
#class_pred = 0
#if img_as_array[i,j] != 0:
#class_pred = rf.predict(img_as_array)
# In[ ]:
#cp = np.where(img_as_array ==0,0,rf.predict(img_as_array).reshape(new_img[:, :, 0].shape))
# In[ ]:
# In[ ]:
``` |
{
"source": "joycenerd/3D_Augmentation",
"score": 2
} |
#### File: 3D_Augmentation/data_utils/utils.py
```python
import os
import numpy as np
import open3d as o3d
from z_order import *
multizorder_ranges = {5000: 500, 1024: 300, 64: 20, 32: 10}
def read_pcd(path):
# Load data based on different extension
extension = os.path.splitext(path)[-1]
if extension == ".pcd":
pcd = o3d.io.read_point_cloud(path)
pcd = np.array(pcd.points)
elif extension == ".txt":
pcd = np.loadtxt(path)
# pcd = []
# with open(path, "r") as pcd_file:
# for line in pcd_file:
# content = line.strip().split(",")
# pcd.append(list(map(float, content)))
# pcd = np.array(pcd)
# pcd = pcd[:, :3]
elif extension == ".npy":
pcd = np.load(path)
elif extension == ".npz":
pcd = np.load(path)
pcd = pcd["points"]
else:
assert False, extension + " is not supported now !"
return pcd.astype(np.float32)
def write_pcd(point, output_path):
# Convert numpy array to pcd format
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(point)
# Output pcd file
o3d.io.write_point_cloud(output_path, pcd)
def pcd_normalize(pcd):
centroids = np.mean(pcd, axis=0)
pcd = pcd - centroids
max_dist = np.max(np.sqrt(np.sum(pcd**2, axis=1)))
pcd = pcd / max_dist
return pcd
def resample_pcd(points, num_points=1024):
# Drop or duplicate points so that pcd has exactly n points
idx = np.random.permutation(points.shape[0])
if idx.shape[0] < num_points:
idx = np.concatenate([idx, np.random.randint(points.shape[0], size = num_points - points.shape[0])])
return points[idx[:num_points]]
def random_sample(points, num_points=1024):
points = np.random.permutation(points)
points = points[:num_points, :]
return points
def farthest_point_sample(points, num_points=1024):
"""
Input:
points: a point set, in the format of NxM, where N is the number of points, and M is the point dimension
num_points: required number of sampled points
"""
def compute_dist(centroid, points):
return np.sum((centroid - points) ** 2, axis=1)
farthest_pts = np.zeros((num_points, points.shape[1]))
farthest_pts[0] = points[np.random.randint(len(points))] # Random choose one point as starting point
distances = compute_dist(farthest_pts[0], points)
for idx in range(1, num_points):
farthest_pts[idx] = points[np.argmax(distances)]
distances = np.minimum(distances, compute_dist(farthest_pts[idx], points))
return farthest_pts.astype(np.float32)
def get_zorder_sequence(points):
z_values = get_z_values(points)
points_zorder = points[np.argsort(z_values)]
return points_zorder
def get_z_values(points):
# Get z values of points
points_round = round_to_int_32(points) # convert to int
z_values = get_z_order(points_round[:, 0], points_round[:, 1], points_round[:, 2])
return z_values
def keep_zorder(points, num_points=1024):
# Random a start index of z-order sequence
sample_idx = np.random.randint(len(points) - num_points)
points = points[sample_idx:sample_idx+num_points]
return points
def keep_multizorder(points, num_points=1024):
remain = num_points
keep = np.array([], dtype=int)
multizorder_range = multizorder_ranges[num_points]
while len(keep) < num_points:
keep_range = remain if remain <= multizorder_range else np.random.randint(multizorder_range, remain)
keep_idx = np.random.randint(len(points) - keep_range)
keep = np.append(keep, np.arange(keep_idx, keep_idx+keep_range, dtype=int), axis=0)
keep = np.array(list(set(keep)), dtype=int)
remain = num_points - len(keep)
points = points[keep]
return points
def discard_zorder(points, num_in_points=5000, num_out_points=8192):
num_discard_points = num_out_points - num_in_points
idx = np.random.randint(num_in_points)
points = np.concatenate((points[:idx], points[idx+num_discard_points:]), axis=0)
return points
def discard_multizorder(points, num_in_points=5000, num_out_points=8192):
num_discard_points = num_out_points - num_in_points
multizorder_range = multizorder_ranges[num_in_points]
remain = num_discard_points
discard = np.array([])
while len(discard) < num_discard_points:
discard_range = remain if remain <= multizorder_range else np.random.randint(multizorder_range, remain)
discard_idx = np.random.randint(num_out_points - discard_range)
discard = np.append(discard, np.arange(discard_idx, discard_idx + discard_range), axis=0)
discard = np.array(list(set(discard)))
remain = num_discard_points - len(discard)
keep = np.arange(0, num_out_points)
points= points[np.array(list(set(keep) - set(discard)))]
return points
def discard_fps_multizorder(points, num_in_points=1024, num_out_points=10000):
def compute_dist(centroid, points):
return np.sum((centroid - points) ** 2, axis=1)
# Discard multizorder sample setting
num_discard_points = num_out_points - num_in_points
multizorder_range = multizorder_ranges[num_in_points]
remain = num_discard_points
discard = np.array([])
# Farthest point sample setting
fps_points = points[:-num_in_points]
fps = np.zeros((fps_points.shape))
fps[0] = points[np.random.randint(len(fps_points))]
distances = compute_dist(fps[0], fps_points)
# Discard multizorder sample based on farthest point sample
for idx in range(1, len(fps_points)):
# Get farthest point sample
fps[idx] = fps_points[np.argmax(distances)]
fps_idx = np.argmax(distances)
distances = np.minimum(distances, compute_dist(fps[idx], fps_points))
# Discard multizorder sample
discard_range = remain if remain <= multizorder_range else np.random.randint(multizorder_range, num_in_points) if remain > num_in_points else np.random.randint(multizorder_range, remain)
discard_idx = fps_idx
discard = np.append(discard, np.arange(discard_idx, discard_idx + discard_range), axis=0)
discard = np.array(list(set(discard)))
remain = num_discard_points - len(discard)
# Return discard multizorder sampled points if there are enough discard points
if len(discard) >= num_discard_points:
keep = np.arange(0, num_out_points)
points= points[np.array(list(set(keep) - set(discard)))]
return points
```
#### File: PointNet/data_utils/z_order.py
```python
import numpy as np
def round_to_int_32(data):
"""
Takes a Numpy array of float values between
-1 and 1, and rounds them to significant
32-bit integer values, to be used in the
morton code computation
:param data: multidimensional numpy array
:return: same as data but in 32-bit int format
"""
# first we rescale points to 0-512
data = 256*(data + 1)
# now convert to int
data = np.round(2 ** 21 - data).astype(dtype=np.int32)
return data
def split_by_3(x):
"""
Method to separate bits of a 32-bit integer
by 3 positions apart, using the magic bits
https://www.forceflow.be/2013/10/07/morton-encodingdecoding-through-bit-interleaving-implementations/
:param x: 32-bit integer
:return: x with bits separated
"""
# we only look at 21 bits, since we want to generate
# a 64-bit code eventually (3 x 21 bits = 63 bits, which
# is the maximum we can fit in a 64-bit code)
x &= 0x1fffff # only take first 21 bits
# shift left 32 bits, OR with self, and 00011111000000000000000000000000000000001111111111111111
x = (x | (x << 32)) & 0x1f00000000ffff
# shift left 16 bits, OR with self, and 00011111000000000000000011111111000000000000000011111111
x = (x | (x << 16)) & 0x1f0000ff0000ff
# shift left 8 bits, OR with self, and 0001000000001111000000001111000000001111000000001111000000000000
x = (x | (x << 8)) & 0x100f00f00f00f00f
# shift left 4 bits, OR with self, and 0001000011000011000011000011000011000011000011000011000100000000
x = (x | (x << 4)) & 0x10c30c30c30c30c3
# shift left 2 bits, OR with self, and 0001001001001001001001001001001001001001001001001001001001001001
x = (x | (x << 2)) & 0x1249249249249249
return x
def get_z_order(x, y, z):
"""
Given 3 arrays of corresponding x, y, z
coordinates, compute the morton (or z) code for
each point and return an index array
We compute the Morton order as follows:
1- Split all coordinates by 3 (add 2 zeros between bits)
2- Shift bits left by 1 for y and 2 for z
3- Interleave x, shifted y, and shifted z
The mordon order is the final interleaved bit sequence
:param x: x coordinates
:param y: y coordinates
:param z: z coordinates
:return: index array with morton code
"""
res = 0
res |= split_by_3(x) | split_by_3(y) << 1 | split_by_3(z) << 2
return res
```
#### File: 3D_Augmentation/PointNet/show_results_PN.py
```python
import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument("--sparsify_mode", type=str, default="PN", help="Sparsify mode")
parser.add_argument("--trans_feat", action="store_true", default=False, help='Whether to use transform feature')
parser.add_argument("--root_dir", type=str, default="/eva_data/psa/code/outputs/PointNet/classification", help="The result directory of PointNet.")
parser.add_argument("--output_dir", type=str, default="/eva_data/psa/code/outputs/PointNet/classification/chart", help="The output directory of PointNet loss / accuracy charts.")
args = parser.parse_args()
def show_results(result, category):
plt.figure(figsize=(10, 6))
plt.title("PointNet: " + args.sparsify_mode.replace(",", ", ") + "\n" + category)
for label, data in result.items():
if category == "loss":
plt.plot(np.arange(0, len(data)/100), data[::100], label=label)
else:
plt.plot(np.arange(0, len(data)), data, label=label + ": max_acc=%.2f" % (np.max(data) * 100))
if category == "loss":
plt.xlabel("per 100 iterations")
else:
plt.xlabel("epoch")
plt.ylabel(category)
plt.legend()
os.makedirs(os.path.join(args.output_dir, args.sparsify_mode.replace(",", "_")), exist_ok=True)
plt.savefig(os.path.join(args.output_dir, args.sparsify_mode.replace(",", "_"), category.replace(" ", "_") + ".png"))
plt.close()
if __name__ == "__main__":
train_save, test_save = {}, {}
mode_dirs = os.listdir(args.root_dir)
sparsify_modes = args.sparsify_mode.split(",")
for sparsify_mode in sparsify_modes:
for mode_dir in mode_dirs:
if sparsify_mode in mode_dir.split("_")[0]:
if (not args.trans_feat) or ((args.trans_feat) and ("transfeat" in mode_dir)):
train_save[mode_dir] = np.load(os.path.join(args.root_dir, mode_dir, "train_save.npy"), allow_pickle=True)
test_save[mode_dir] = np.load(os.path.join(args.root_dir, mode_dir, "test_save.npy"), allow_pickle=True)
for key_train, key_test in zip(train_save.keys(), test_save.keys()):
train_save[key_train] = train_save[key_train].item()
test_save[key_test] = test_save[key_test].item()
train_instance_acc = {}
train_loss = {}
test_instance_acc = {}
test_class_acc = {}
for key_train, key_test in zip(train_save.keys(), test_save.keys()):
train_instance_acc[key_train] = train_save[key_train]["instance_acc"]
train_loss[key_train] = train_save[key_train]["loss"]
test_instance_acc[key_test] = test_save[key_test]["instance_acc"]
test_class_acc[key_test] = test_save[key_test]["class_acc"]
show_results(train_instance_acc, "training instance accuracy")
show_results(train_loss, "loss")
show_results(test_instance_acc, "testing instance accuracy")
show_results(test_class_acc, "testing class accuracy")
# category = "training instance accuracy"
# plt.figure(figsize=(10, 6))
# plt.title("PointNet: " + args.sparsify_mode.replace(",", "_") + "\n" + category)
# for label, data in train_instance_acc.items():
# plt.plot(np.arange(0, len(data)), data, label=label)
# plt.xlabel("epoch")
# plt.ylabel(category)
# plt.legend(loc=4)
# os.makedirs(os.path.join(args.output_dir, args.sparsify_mode.replace(",", "_")))
# plt.savefig(os.path.join(args.output_dir, args.sparsify_mode.replace(",", "_"), category.replace(" ", "_") + ".png"))
# plt.close()
# plt.figure()
# plt.title("PointNet " + mode + " - " + category)
# for result in [train_save, test_save]:
# for transfeat_key in result.keys():
# content = result[transfeat_key]
# for content_key in content.keys():
# data = content[content_key]
# train_instance_acc = np.load(os.path.join(args.result_dir, "train_instance_accuracy.npy"))
# train_loss = np.load(os.path.join(args.result_dir, "train_loss.npy"))
# test_instance_acc = np.load(os.path.join(args.result_dir, "test_instance_accuracy.npy"))
# test_class_acc = np.load(os.path.join(args.result_dir, "test_class_accuracy.npy"))
# show_results("loss", train_loss, "pretrained_fixed", "training loss", "train_loss.png")
# show_results("accuracy", train_instance_acc, "pretrained_fixed", "training instance accuracy", "train_instance_accuracy.png")
# show_results("accuracy", test_instance_acc, "pretrained_fixed", "testing instance accuracy", "test_instance_accuracy.png")
# show_results("accuracy", test_class_acc, "pretrained_fixed", "testing class accuracy", "test_class_accuracy.png")
```
#### File: 3D_Augmentation/PointNet/train_cls.py
```python
from data_utils.ModelNetDataLoader import ModelNetDataLoader
import os
import argparse
import logging
import importlib
import sys
import torch
import numpy as np
import provider
from tqdm import tqdm
# Add ./models into system path
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(ROOT_DIR, "models"))
# Official
parser = argparse.ArgumentParser("PointNet")
parser.add_argument("--batch_size", type=int, default=256, help="Batch size in training [default: 24]")
parser.add_argument("--model", default="pointnet_cls", help="Model name [default: pointnet_cls]")
parser.add_argument("--epoch", default=200, type=int, help="Number of epoch in training [default: 200]")
parser.add_argument("--learning_rate", default=0.001, type=float, help="Learning rate in training [default: 0.001]")
parser.add_argument("--gpu", type=str, default="0", help="Specify gpu device [default: 0]")
parser.add_argument("--num_point", type=int, default=1024, help="Point Number [default: 1024]")
parser.add_argument("--optimizer", type=str, default="Adam", help="Optimizer for training [default: Adam]")
parser.add_argument("--output_dir", type=str, default="/home/zchin/augmentation_output/PointNet/...", help="Experiment root")
parser.add_argument("--decay_rate", type=float, default=1e-4, help="Decay rate [default: 1e-4]")
# MSN_PointNet
parser.add_argument("--sparsify_mode", type=str, default="random", choices=["PN", "random", "fps", "zorder", "multizorder"], help="Sparsify mode")
parser.add_argument("--dataset_mode", type=str, default="ModelNet40", choices=["ModelNet40", "ModelNet10", "ShapeNet", "ShapeNet_all"], help="Dataset mode. PLZ choose in [ModelNet40, ModelNet10, ShapeNet, ShapeNet_all]")
parser.add_argument("--zorder_mode", type=str, default="keep", choices=["keep", "discard"], help="Zorder sampled mode. PLZ choose in [keep, discard]")
parser.add_argument("--trans_feat", action="store_true", default=False, help='Whether to use transform feature')
parser.add_argument('--data-txt',type=str,help="training data txt file")
parser.add_argument('--augment_data_dir',type=str,help='directory that store augmented pcd file')
parser.add_argument('--ratio',type=float,default=0.7,help='real data ratio')
args = parser.parse_args()
# HYPER PARAMETER
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu # GPU devices
num_classes = {"ModelNet40": 40, "ModelNet10": 10, "ShapeNet": 8, "ShapeNet_all": 57}
num_class = num_classes[args.dataset_mode] # Number of class (default for ModelNet40)
train_save = {
"instance_acc": [],
"loss": []
} # Save training accuracy and loss
test_save = {
"instance_acc": [],
"class_acc": []
} # Save testing instance accuracy and class accuracy
def create_output_dir():
# Create output directry according to sparsify mode, normalize, trans_feat
trans_feat_dir = "_transfeat" if args.trans_feat else ""
mode_dir = args.sparsify_mode + trans_feat_dir
if args.sparsify_mode == "zorder" or args.sparsify_mode == "multizorder":
mode_dir = args.zorder_mode + "_" + mode_dir
if args.output_dir == "/work/eva0856121/Augmentation/code/outputs/PointNet":
# output_dir = os.path.join(args.output_dir, args.dataset_mode + "_cls", mode_dir)
output_dir = os.path.join(args.output_dir, "aug_" + args.dataset_mode + "_cls", mode_dir + "_MN40_VI_inter_rotate90AugOurs")
else:
output_dir = args.output_dir
os.makedirs(output_dir, exist_ok=True)
return output_dir
def set_logger(log_dir):
# Setup LOG file format
global logger
logger = logging.getLogger(args.model)
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
file_handler = logging.FileHandler(os.path.join(log_dir, args.model + ".txt"))
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
def log_string(message):
# Write message into log.txt
logger.info(message)
print(message)
def backup_python_file(backup_dir):
os.system("cp ./train_cls.py {}".format(backup_dir))
os.system("cp ./models/{}.py {}".format(args.model, backup_dir))
os.system("cp ./models/pointnet_util.py {}".format(backup_dir))
def create_dataloader():
print("Load " + args.dataset_mode + " as dataset ...")
# Create training dataloader
TRAIN_DATASET = ModelNetDataLoader(npoint=args.num_point, split="train", sparsify_mode=args.sparsify_mode, dataset_mode=args.dataset_mode, zorder_mode=args.zorder_mode,data_txt=args.data_txt,augment_data_dir=args.augment_data_dir,ratio=args.ratio)
trainDataLoader = torch.utils.data.DataLoader(TRAIN_DATASET, batch_size=args.batch_size, shuffle=True, num_workers=4, drop_last=True)
# Create testing dataloader
TEST_DATASET = ModelNetDataLoader(npoint=args.num_point, split="test", sparsify_mode=args.sparsify_mode, dataset_mode=args.dataset_mode, zorder_mode=args.zorder_mode)
testDataLoader = torch.utils.data.DataLoader(TEST_DATASET, batch_size=args.batch_size, shuffle=False, num_workers=4)
return trainDataLoader, testDataLoader
def create_network():
# Create network (classifier) and criterion
MODEL = importlib.import_module(args.model) # Load model from args.model (e.g. pointnet_cls.py)
classifier = MODEL.get_model(num_class).cuda()
criterion = MODEL.get_loss(trans_feat_switch=args.trans_feat).cuda()
# Try load pretrained weights
try:
checkpoint = torch.load(os.path.join(checkpoints_dir, "best_model.pth"))
start_epoch = checkpoint["epoch"]
classifier.load_state_dict(checkpoint["model_state_dict"])
log_string("Use pretrain model")
except:
log_string("No existing model, starting training from scratch...")
start_epoch = 0
# Fix encoder initial weight
# for child in classifier.feat.children():
# for param in child.parameters():
# param.requires_grad = False
# Setup optimizer
if args.optimizer == 'Adam':
optimizer = torch.optim.Adam(
classifier.parameters(),
lr=args.learning_rate,
betas=(0.9, 0.999),
eps=1e-08,
weight_decay=args.decay_rate
)
else:
optimizer = torch.optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)
# Setup scheduler for optimizer
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.7)
return classifier, criterion, optimizer, scheduler, start_epoch
def train(classifier, trainDataLoader, optimizer, scheduler, criterion):
# TRAIN MODE
mean_correct = []
scheduler.step()
for batch_id, data in tqdm(enumerate(trainDataLoader, 0), total=len(trainDataLoader), smoothing=0.9):
# Get points and target from trainDataLoader
points, target = data
points = points.data.numpy()
# Do something like augmentation
points = provider.random_point_dropout(points)
points[:,:, 0:3] = provider.random_scale_point_cloud(points[:,:, 0:3])
points[:,:, 0:3] = provider.shift_point_cloud(points[:,:, 0:3])
points = torch.Tensor(points)
target = target[:, 0]
points = points.transpose(2, 1)
points, target = points.cuda(), target.cuda()
optimizer.zero_grad()
# Start training
classifier = classifier.train()
pred, trans_feat = classifier(points)
loss = criterion(pred, target.long(), trans_feat)
train_save["loss"].append(loss.item()) # Save training loss
pred_choice = pred.data.max(1)[1]
correct = pred_choice.eq(target.long().data).cpu().sum()
mean_correct.append(correct.item() / float(points.size()[0]))
loss.backward()
optimizer.step()
train_instance_acc = np.mean(mean_correct)
train_save["instance_acc"].append(train_instance_acc) # Save training instance accuracy
return train_instance_acc
def test(model, testDataLoader):
mean_correct = []
class_acc = np.zeros((num_class, 3))
# print(model.feat.conv1.weight[0][0])
for batch_id, data in tqdm(enumerate(testDataLoader), total=len(testDataLoader)):
# Get points and target from testDataLoader
points, target = data
target = target[:, 0]
points = points.transpose(2, 1)
points, target = points.cuda(), target.cuda()
# Evaluate by PointNet model
classifier = model.eval()
pred, _ = classifier(points)
pred_choice = pred.data.max(1)[1] # prediction results
for cat in np.unique(target.cpu()):
classacc = pred_choice[target == cat].eq(target[target == cat].long().data).cpu().sum()
class_acc[cat, 0] += classacc.item() / float(points[target == cat].size()[0]) # Compute accuracy of certain class
class_acc[cat, 1] += 1 # Compute number of certain class
correct = pred_choice.eq(target.long().data).cpu().sum() # Total number of correct results
mean_correct.append(correct.item() / float(points.size()[0])) # Mean instance accuracy within one batch size
class_acc[:, 2] = class_acc[:, 0] / class_acc[:, 1] # The class accuracy of each class
class_acc = np.mean(class_acc[:, 2]) # Mean class accuracy (all objects)
instance_acc = np.mean(mean_correct) # Mean instance accuracy (all objects)
# Save testing accuracy (instance and class)
test_save["instance_acc"].append(instance_acc)
test_save["class_acc"].append(class_acc)
return instance_acc, class_acc
if __name__ == "__main__":
# Create output direcotry
output_dir = create_output_dir()
backup_dir = os.path.join(output_dir, "backup")
checkpoints_dir = os.path.join(output_dir, "checkpoints")
log_dir = os.path.join(output_dir, "logs")
os.makedirs(backup_dir, exist_ok=True)
os.makedirs(checkpoints_dir, exist_ok=True)
os.makedirs(log_dir, exist_ok=True)
# Backup important .py file
backup_python_file(backup_dir)
# Setup LOG file format
set_logger(log_dir)
log_string("Argument parameter: {}".format(args))
# Create training and testing dataloader
trainDataLoader, testDataLoader = create_dataloader()
# Create network (classifier), optimizer, scheduler
# classifier, criterion, optimizer, scheduler, start_epoch = create_network()
# # Setup parameters for training and testing
# global_epoch = 0
# best_instance_acc = 0.0
# best_class_acc = 0.0
# # Start training
# logger.info("Start training...")
# for epoch in range(start_epoch, args.epoch):
# log_string("Epoch %d (%d/%s):" % (global_epoch+1, epoch+1, args.epoch))
# # TRAIN MODE
# train_instance_acc = train(classifier, trainDataLoader, optimizer, scheduler, criterion)
# log_string("Train Instance Accuracy: %f" % (train_instance_acc))
# # TEST MODE
# with torch.no_grad():
# instance_acc, class_acc = test(classifier.eval(), testDataLoader)
# if instance_acc >= best_instance_acc:
# best_instance_acc = instance_acc
# best_epoch = epoch + 1
# if class_acc >= best_class_acc:
# best_class_acc = class_acc
# log_string("Test Instance Accuracy: %f, Class Accuracy: %f" % (instance_acc, class_acc))
# log_string("Best Instance Accuracy: %f, Class Accuracy: %f" % (best_instance_acc, best_class_acc))
# # Save best training details
# if instance_acc >= best_instance_acc:
# logger.info("Save model...")
# save_path = os.path.join(checkpoints_dir, "best_model.pth")
# log_string("Saving at %s" % (save_path))
# state = {
# "epoch": best_epoch,
# "instance_acc": instance_acc,
# "class_acc": class_acc,
# "model_state_dict": classifier.state_dict(),
# "optimizer_state_dict": optimizer.state_dict(),
# }
# torch.save(state, save_path)
# global_epoch += 1
# # Save weights and [training, testing] results
# # if epoch % 5 == 0:
# # torch.save(state, os.path.join(checkpoints_dir, "model_%d.pth" %(epoch)))
# np.save(os.path.join(output_dir, "train_save.npy"), train_save)
# np.save(os.path.join(output_dir, "test_save.npy"), test_save)
# logger.info("End of training...")
```
#### File: 3D_Augmentation/scene-representation-networks/llff2srn.py
```python
import os
import struct
import collections
import imageio
import numpy as np
from shutil import copyfile
root = "/work/eva0856121/datasets/NeRF/nerf_llff_data/"
dis = "/work/eva0856121/datasets/srn_data/llff_train/"
intrinsic = {
"fern" : [3260.5263328805895, 2016.0, 1512.0, 0.019750463822896944],
"flower" : [3575.0586059510074, 2016.0, 1512.0, 0.025134574603909228],
"fortress" : [3371.3189700388566, 2016.0, 1512.0, 0.007315123920083764],
"horns" :[3368.8237176028883, 2016.0, 1512.0, 0.012300143763124053],
"leaves" : [3428.4755177313386, 2016.0, 1512.0, 0.03738835450733112],
"orchids" : [3124.62276683125, 2016.0, 1512.0, 0.03324880811508114],
"room" : [3070.63827088164, 2016.0, 1512.0, 0.009615941992791891],
"trex" : [3329.8699571672205, 2016.0, 1512.0, 0.01265264013010501]
}
'''
intrinsic = {
"Truck": [1.15882652e+03, 9.60000000e+02, 5.40000000e+02, -2.91480825e-02]
}
'''
BaseImage = collections.namedtuple(
"Image", ["id", "qvec", "tvec", "camera_id", "name", "xys", "point3D_ids"])
def qvec2rotmat(qvec):
return np.array([
[1 - 2 * qvec[2]**2 - 2 * qvec[3]**2,
2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],
2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],
[2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],
1 - 2 * qvec[1]**2 - 2 * qvec[3]**2,
2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],
[2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],
2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],
1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]])
class Image(BaseImage):
def qvec2rotmat(self):
return qvec2rotmat(self.qvec)
def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"):
"""Read and unpack the next bytes from a binary file.
:param fid:
:param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc.
:param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}.
:param endian_character: Any of {@, =, <, >, !}
:return: Tuple of read and unpacked values.
"""
data = fid.read(num_bytes)
return struct.unpack(endian_character + format_char_sequence, data)
def read_images_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadImagesBinary(const std::string& path)
void Reconstruction::WriteImagesBinary(const std::string& path)
"""
images = {}
with open(path_to_model_file, "rb") as fid:
num_reg_images = read_next_bytes(fid, 8, "Q")[0]
for image_index in range(num_reg_images):
binary_image_properties = read_next_bytes(
fid, num_bytes=64, format_char_sequence="idddddddi")
image_id = binary_image_properties[0]
qvec = np.array(binary_image_properties[1:5])
tvec = np.array(binary_image_properties[5:8])
camera_id = binary_image_properties[8]
image_name = ""
current_char = read_next_bytes(fid, 1, "c")[0]
while current_char != b"\x00": # look for the ASCII 0 entry
image_name += current_char.decode("utf-8")
current_char = read_next_bytes(fid, 1, "c")[0]
num_points2D = read_next_bytes(fid, num_bytes=8,
format_char_sequence="Q")[0]
x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D,
format_char_sequence="ddq"*num_points2D)
xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),
tuple(map(float, x_y_id_s[1::3]))])
point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))
images[image_id] = Image(
id=image_id, qvec=qvec, tvec=tvec,
camera_id=camera_id, name=image_name,
xys=xys, point3D_ids=point3D_ids)
return images
scenes = os.listdir(root)
# scenes = ["Truck"]
for scene in scenes:
print("Now we are create ", scene)
# Create intrinsic file
with open(os.path.join(dis, scene, "intrinsics.txt"), "w") as fp:
for element in intrinsic[scene]:
print(element, end=" ", file=fp)
print("", file=fp)
print("0. 0. 0.", file=fp)
print("1.", file=fp)
img_path = os.path.join(root, scene, "images")
images = sorted(os.listdir(img_path))
img = imageio.imread(os.path.join(img_path, images[0]))[:, :, :3]
print("{} {}".format(img.shape[1], img.shape[0]), file=fp)
# Create poses dir
pose_dir = os.path.join(dis, scene, "pose")
os.makedirs(pose_dir, exist_ok=True)
imagesfile = os.path.join(root, scene, 'sparse/0/images.bin')
imdata = read_images_binary(imagesfile)
w2c_mats = []
bottom = np.array([0,0,0,1.]).reshape([1,4])
names = [imdata[k].name for k in imdata]
print(names)
print( 'Images #', len(names))
perm = np.argsort(names)
for k in imdata:
im = imdata[k]
R = im.qvec2rotmat()
t = im.tvec.reshape([3,1])
m = np.concatenate([np.concatenate([R, t], 1), bottom], 0)
m = m.reshape(-1)
with open(os.path.join(pose_dir, names[k-1].replace(".jpg", ".txt").replace(".JPG", ".txt").replace(".png", ".txt").replace(".PNG", ".txt")), "w") as fp:
for element in m:
print(element, end=" ", file=fp)
```
#### File: 3D_Augmentation/scene-representation-networks/train_weight_pred.py
```python
import configargparse
import os, time, datetime
import torch
import numpy as np
import dataio
from torch.utils.data import DataLoader
from srns import *
import util
from tqdm import tqdm
p = configargparse.ArgumentParser()
p.add('-c', '--config_filepath', required=False, is_config_file=True, help='Path to config file.')
# Note: in contrast to training, no multi-resolution!
p.add_argument('--img_sidelength', type=int, default=128, required=False,
help='Sidelength of test images.')
p.add_argument('--data_root', required=True, help='Path to directory with training data.')
p.add_argument('--logging_root', type=str, default='./logs',
required=False, help='Path to directory where checkpoints & tensorboard events will be saved.')
p.add_argument('--batch_size', type=int, default=32, help='Batch size.')
p.add_argument('--preload', action='store_true', default=False, help='Whether to preload data to RAM.')
p.add_argument('--max_num_instances', type=int, default=-1,
help='If \'data_root\' has more instances, only the first max_num_instances are used')
p.add_argument('--specific_observation_idcs', type=str, default=None,
help='Only pick a subset of specific observations for each instance.')
p.add_argument('--has_params', action='store_true', default=False,
help='Whether each object instance already comes with its own parameter vector.')
p.add_argument('--save_out_first_n',type=int, default=250, help='Only saves images of first n object instances.')
p.add_argument('--checkpoint_path', default=None, help='Path to trained model.')
# Model options
p.add_argument('--tracing_steps', type=int, default=10, help='Number of steps of intersection tester.')
p.add_argument('--fit_single_srn', action='store_true', required=False,
help='Only fit a single SRN for a single scene (not a class of SRNs) --> no hypernetwork')
p.add_argument('--use_unet_renderer', action='store_true',
help='Whether to use a DeepVoxels-style unet as rendering network or a per-pixel 1x1 convnet')
p.add_argument('--embedding_size', type=int, default=256,
help='Dimensionality of latent embedding.')
p.add_argument("--num_basis", type=int, required=True, help="Number of basis objs are used")
p.add_argument("--gpu", type=str, required=True, help="Choose GPU number")
opt = p.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpu
device = torch.device('cuda')
util.cond_mkdir(opt.logging_root)
# Save command-line parameters to log directory.
with open(os.path.join(opt.logging_root, "params.txt"), "w") as out_file:
out_file.write('\n'.join(["%s: %s" % (key, value) for key, value in vars(opt).items()]))
def back_up():
backup_dir = os.path.join(opt.logging_root, 'backup')
util.cond_mkdir(backup_dir)
os.system("cp train_weight_pred.py %s/" %(backup_dir))
os.system("cp srns.py %s/" %(backup_dir))
os.system("cp dataio.py %s/" %(backup_dir))
def create_dataloader():
if opt.specific_observation_idcs is not None:
specific_observation_idcs = list(map(int, opt.specific_observation_idcs.split(',')))
else:
specific_observation_idcs = None
dataset = dataio.SceneClassDataset(root_dir=opt.data_root,
max_num_instances=opt.max_num_instances,
specific_observation_idcs=None,
max_observations_per_instance=10,
samples_per_instance=1,
img_sidelength=opt.img_sidelength,)
dataloader = DataLoader(dataset,
batch_size=16,
shuffle=True,
drop_last=True,
num_workers=6)
test_dataset = dataio.SceneClassDataset(root_dir="/work/eva0856121/datasets/srn_data/cars_test/",
max_num_instances=-1,
specific_observation_idcs=specific_observation_idcs,
max_observations_per_instance=-1,
samples_per_instance=1,
img_sidelength=opt.img_sidelength,)
test_dataloader = DataLoader(test_dataset,
batch_size=16,
shuffle=False,
drop_last=False,
num_workers=6)
return dataset, dataloader, test_dataset, test_dataloader
def create_network(USE_EMBED):
model = SRNsModel(latent_dim=opt.embedding_size,
has_params=opt.has_params,
use_unet_renderer=opt.use_unet_renderer,
tracing_steps=opt.tracing_steps).cuda()
weightPred = AnalogyPred(num_basis= opt.num_basis, USE_EMBED=USE_EMBED, mode="regression").cuda()
optimizer = torch.optim.Adam(list(weightPred.parameters()) , lr=1e-3)
assert (opt.checkpoint_path is not None), "Have to pass checkpoint!"
print("Loading SRN model from %s" % opt.checkpoint_path)
util.custom_load(model, path=opt.checkpoint_path, discriminator=None, overwrite_embeddings=False)
for child in model.children():
for param in child.parameters():
param.requires_grad = False
return model, weightPred, optimizer
def train_weightPred(model, weightPred, dataset, dataloader, test_dataset, test_dataloader, optimizer, USE_EMBED):
fixed_data = dataloader.collate_fn([dataset[0]])
fixed_input = fixed_data[0][0]
if USE_EMBED:
basis_dict = util.read_pickle("./pretrained_model/srn_new/features/cars_train_embedding_all.pkl")
else:
basis_dict = util.read_pickle("./pretrained_model/srn_new/features/cars_train_feature_all.pkl")
selected_id = [i for i in range(200)]
basis_feat = [basis_dict[k].squeeze() for k in selected_id]
basis_feat = torch.stack(basis_feat).cuda() # 10 x 512 training features
print("The shape of basis: ", basis_feat.shape)
print('Begin to train weight prediction branch...')
for epoch in range(50):
weightPred.train()
model.eval()
psnrs, ssims = list(), list()
for batch_id, data in enumerate(dataloader):
model_input, ground_truth = data
model_input = model_input[0]
ground_truth = ground_truth[0]
if USE_EMBED:
feat_sim = weightPred(model.pre_phi(model.resnet(model_input["input_img"].cuda()).squeeze()), basis_feat)
pred_output = model(input = model_input, weighted_ebd = torch.matmul(feat_sim, basis_feat.squeeze()))
else:
feat_sim = weightPred(model.resnet(model_input["input_img"].cuda()).squeeze(), basis_feat)
pred_output = model(input = model_input, weighted_ebd = model.pre_phi(torch.matmul(feat_sim, basis_feat.squeeze())))
optimizer.zero_grad()
dist_loss = model.get_image_loss(pred_output, ground_truth)
reg_loss = model.get_regularization_loss(pred_output, ground_truth)
latent_loss = model.get_latent_loss()
weighted_dist_loss = 200 * dist_loss
weighted_reg_loss = 1 * reg_loss
weighted_latent_loss = 0.001 * latent_loss
total_loss = weighted_dist_loss + weighted_reg_loss + weighted_latent_loss
total_loss.backward()
optimizer.step()
psnr, ssim = model.get_psnr(pred_output, ground_truth)
psnrs.extend(psnr)
ssims.extend(ssim)
print("Training epoch %d. Running mean PSNR %0.6f SSIM %0.6f" % (epoch, np.mean(psnrs), np.mean(ssims)))
with open(os.path.join(opt.logging_root, "results.txt"), "a") as out_file:
out_file.write("Epoch %d. Training Running mean PSNR %0.6f SSIM %0.6f\n" % (epoch, np.mean(psnrs), np.mean(ssims)))
if epoch % 5 == 0:
util.save_model(model_dir, weightPred, 'analogy', epoch)
util.save_opt(model_dir, optimizer, 'opt', epoch)
output_imgs = model.get_output_img(pred_output).cpu().detach().numpy()
comparisons = model.get_comparisons(model_input,
pred_output,
ground_truth)
for idx in range(1):
img_only_path = os.path.join(train_img_dir, "output", "%03d" % epoch)
comp_path = os.path.join(train_img_dir, "compare", "%03d" % epoch)
util.cond_mkdir(img_only_path)
util.cond_mkdir(comp_path)
pred = util.convert_image(output_imgs[idx].squeeze())
comp = util.convert_image(comparisons[idx].squeeze())
util.write_img(pred, os.path.join(img_only_path, "%03d_%06d.png" % (batch_id, idx)))
util.write_img(comp, os.path.join(comp_path, "%03d_%06d.png" % (batch_id, idx)))
if epoch % 5 == 0:
weightPred.eval()
model.eval()
with torch.no_grad():
print('Testing')
psnrs, ssims = list(), list()
for batch_id, data in enumerate(test_dataloader):
model_input, ground_truth = data
model_input = model_input[0]
ground_truth = ground_truth[0]
if USE_EMBED:
feat_sim = weightPred(model.pre_phi(model.resnet(model_input["input_img"].cuda()).squeeze()), basis_feat)
pred_output = model(input = model_input, weighted_ebd = torch.matmul(feat_sim, basis_feat.squeeze()))
else:
feat_sim = weightPred(model.resnet(model_input["input_img"].cuda()).squeeze(), basis_feat)
pred_output = model(input = model_input, weighted_ebd = model.pre_phi(torch.matmul(feat_sim, basis_feat.squeeze())))
psnr, ssim = model.get_psnr(pred_output, ground_truth)
psnrs.extend(psnr)
ssims.extend(ssim)
print("Testing epoch %d. Running mean PSNR %0.6f SSIM %0.6f" % (epoch, np.mean(psnrs), np.mean(ssims)))
with open(os.path.join(opt.logging_root, "results.txt"), "a") as out_file:
out_file.write("Epoch %d. Testing Running mean PSNR %0.6f SSIM %0.6f\n" % (epoch, np.mean(psnrs), np.mean(ssims)))
output_imgs = model.get_output_img(pred_output).cpu().detach().numpy()
comparisons = model.get_comparisons(model_input,
pred_output,
ground_truth)
for idx in range(len(output_imgs)):
img_only_path = os.path.join(test_img_dir, "output", "%03d" % epoch)
comp_path = os.path.join(test_img_dir, "compare", "%03d" % epoch)
util.cond_mkdir(img_only_path)
util.cond_mkdir(comp_path)
pred = util.convert_image(output_imgs[idx].squeeze())
comp = util.convert_image(comparisons[idx].squeeze())
util.write_img(pred, os.path.join(img_only_path, "%03d_%06d.png" % (batch_id, idx)))
util.write_img(comp, os.path.join(comp_path, "%03d_%06d.png" % (batch_id, idx)))
if __name__ == '__main__':
back_up()
USE_EMBED = True
# Create recording place
model_dir = os.path.join(opt.logging_root, 'model')
util.cond_mkdir(model_dir)
train_img_dir = os.path.join(opt.logging_root, 'train_image')
util.cond_mkdir(train_img_dir)
test_img_dir = os.path.join(opt.logging_root, 'test_image')
util.cond_mkdir(test_img_dir)
# Get model, dataset ready
dataset, dataloader, test_dataset, test_dataloader= create_dataloader()
print(len(dataset))
print(len(test_dataset))
model, weightPred, optimizer = create_network(USE_EMBED)
train_weightPred(model, weightPred, dataset, dataloader, test_dataset, test_dataloader, optimizer, USE_EMBED)
``` |
{
"source": "joycenerd/AICUP_MangoClassification",
"score": 3
} |
#### File: joycenerd/AICUP_MangoClassification/dataset.py
```python
from torch.utils.data import Dataset,DataLoader
from pathlib import Path
import numpy as np
from options import opt
from PIL import Image
from torchvision import transforms
import torch
import collections
import numbers
import random
import torch.nn as nn
# from dataset_utils import *
label_dict={
'A':0,
'B':1,
'C':2
}
class MangoDataset(Dataset):
def __init__(self,root_dir,transform=None):
self.root_dir=Path(root_dir)
self.x=[]
self.y=[]
self.transform=transform
self.num_classes=opt.num_classes
if root_dir.name=='C1-P1_Train':
labels=np.genfromtxt(Path(opt.data_root).joinpath('train.csv'),dtype=np.str,delimiter=',',skip_header=1)
else:
labels=np.genfromtxt(Path(opt.data_root).joinpath('dev.csv'),dtype=np.str,delimiter=',',skip_header=1)
for label in labels:
self.x.append(label[0])
self.y.append(label_dict[label[1]])
def __len__(self):
return len(self.x)
def __getitem__(self, index):
image_path=Path(self.root_dir).joinpath(self.x[index])
image=Image.open(image_path).convert('RGB')
image=image.copy()
if self.transform:
image=self.transform(image)
return image,self.y[index]
def Dataloader(dataset,batch_size,shuffle,num_workers):
data_loader=DataLoader(dataset=dataset,batch_size=batch_size,shuffle=shuffle,num_workers=num_workers)
return data_loader
def _random_colour_space(x):
output = x.convert("HSV")
return output
class RandomShift(object):
def __init__(self, shift):
self.shift = shift
@staticmethod
def get_params(shift):
"""Get parameters for ``rotate`` for a random rotation.
Returns:
sequence: params to be passed to ``rotate`` for random rotation.
"""
hshift, vshift = np.random.uniform(-shift, shift, size=2)
return hshift, vshift
def __call__(self, img):
hshift, vshift = self.get_params(self.shift)
return img.transform(img.size, Image.AFFINE, (1,0,hshift,0,1,vshift), resample=Image.BICUBIC, fill=1)
def make_dataset(_dir):
colour_transform = transforms.Lambda(lambda x: _random_colour_space(x))
transform = [
transforms.RandomAffine(degrees=30,shear=50, resample=False, fillcolor=0),
transforms.RandomGrayscale(p=0.5),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomPerspective(distortion_scale=0.5, p=0.5, interpolation=3, fill=0),
transforms.RandomVerticalFlip(p=0.5),
transforms.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.5),
transforms.Grayscale(num_output_channels=3),
RandomShift(3),
transforms.RandomApply([colour_transform]),
]
data_transform_train=transforms.Compose([
transforms.RandomResizedCrop(opt.img_size),
transforms.RandomApply(transform,p=0.5),
transforms.RandomApply([transforms.RandomRotation((-90,90), resample=False, expand=False, center=None)],p=0.5),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485,0.456,0.406],std=[0.229,0.224,0.225])
])
data_transform_dev=transforms.Compose([
transforms.Resize((opt.img_size,opt.img_size)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485,0.456,0.406],std=[0.229,0.224,0.225])
])
if(_dir=='C1-P1_Train'):
data_set=MangoDataset(Path(opt.data_root).joinpath(_dir),data_transform_train)
elif(_dir=='C1-P1_Dev'):
data_set=MangoDataset(Path(opt.data_root).joinpath(_dir),data_transform_dev)
return data_set
``` |
{
"source": "joycenerd/Computer_Vision_2021",
"score": 3
} |
#### File: Computer_Vision_2021/HW2/hybrid_image.py
```python
from PIL import Image
import glob
import numpy as np
import matplotlib.pyplot as plt
import os
DATA_PATH="./hw2_data/task1,2_hybrid_pyramid/"
def fourier_transform(feature):
F=np.fft.fft2(feature)
F=np.fft.fftshift(F)
return F
def gauss_low_pass(height,width,D0):
H=np.zeros((height,width))
center_point=(height/2,width/2)
for v in range(height):
for u in range(width):
D=np.sqrt((v-center_point[0])**2+(u-center_point[1])**2)
H[v][u]=np.exp(-D**2/(2*D0**2))
return H
def inv_fourier_transform(F):
f=np.fft.ifftshift(F)
f=np.fft.ifft2(f)
return f
if __name__=='__main__':
if not os.path.exists("./results"):
os.mkdir("./results")
num_list=["0","1","2","3","4","5","6","7"]
for num in num_list:
image_pair=[]
plt.figure(figsize=(10,4))
for image_path in glob.glob(DATA_PATH+num+"*"):
image_pair.append(image_path)
image1=Image.open(image_pair[0])
image1=np.array(image1)
height1,width1,channel=image1.shape
output_image_1=np.zeros((height1,width1,channel))
for c in range(channel):
feature=image1[:,:,c]
# fourier transform
F=fourier_transform(feature)
# H(u,v) -> low pass filter
H=gauss_low_pass(height1,width1,8)
# F(u,v)*H(u,v)
low_pass_image=F*H
# Compute the inverse Fourier transformation
f=inv_fourier_transform(low_pass_image)
# Obtain the real part
real_image=np.abs(f)
output_image_1[:,:,c]=real_image
output_image_1=Image.fromarray(np.uint8(output_image_1)).convert("RGB")
plt.subplot(1,3,1)
plt.imshow(output_image_1)
image2=Image.open(image_pair[1])
image2=np.array(image2)
height2,width2,channel=image2.shape
output_image_2=np.zeros((height2,width2,channel))
for c in range(channel):
feature=image2[:,:,c]
# fourier transform
F=fourier_transform(feature)
# H(u,v) -> high pass filter
H=gauss_low_pass(height2,width2,8)
H=1-H
high_pass_image=F*H
# compute the inverse fourier transform
f=inv_fourier_transform(high_pass_image)
# obtain the real part
real_image=np.abs(f)
output_image_2[:,:,c]=real_image
output_image_2=Image.fromarray(np.uint8(output_image_2)).convert("RGB")
plt.subplot(1,3,2)
plt.imshow(output_image_2)
# merge two image
height=height1 if height1>=height2 else height2
width=width1 if width1>=width2 else width2
output_image_1=output_image_1.resize((width,height))
output_image_2=output_image_2.resize((width,height))
blend_image=Image.blend(output_image_1,output_image_2,0.5)
plt.subplot(1,3,3)
plt.imshow(blend_image)
plt.savefig("./results/"+num+".jpg")
plt.show()
print(f'{num} completed')
```
#### File: HW5/model/model_utils.py
```python
from .resnest.restnest import get_model
from options import opt
from efficientnet_pytorch import EfficientNet
import torch
def get_net(model):
if model == 'resnest50':
model = torch.hub.load('zhanghang1989/ResNeSt', 'resnest50', pretrained=True)
return model
elif model == 'resnest101':
model = torch.hub.load('zhanghang1989/ResNeSt', 'resnest101', pretrained=True)
return model
elif model == 'resnest200':
model = torch.hub.load('zhanghang1989/ResNeSt', 'resnest200', pretrained=True)
return model
elif model == 'efficientnet-b7':
model = EfficientNet.from_pretrained(
'efficientnet-b7', num_classes=opt.num_classes)
return model
elif model == 'efficientnet-b5':
model = EfficientNet.from_pretrained(
'efficientnet-b5', num_classes=opt.num_classes)
return model
elif model == 'efficientnet-b4':
model = EfficientNet.from_pretrained(
'efficientnet-b4', num_classes=opt.num_classes)
return model
elif model == 'efficientnet-b3':
model = EfficientNet.from_pretrained(
'efficientnet-b3', num_classes=opt.num_classes)
return model
```
#### File: Computer_Vision_2021/HW5/test.py
```python
import torchvision.models as models
import torch.nn as nn
from torchvision import transforms
import os
from pathlib import Path
from PIL import Image
import torch
from torch.autograd import Variable
from options import opt
import numpy as np
from dataset import make_dataset,Dataloader
from model.model_utils import get_net
ROOTDIR="/home/zchin/NCTU-CV_2021/HW5"
label_dict = {
0 : 'A',
1 : 'B',
2 : 'C'
}
def test():
classes = opt.num_classes
model_path = opt.weight_path
#model= torch.load(str(model_path))
#model = model.cuda(opt.cuda_devices)
model=get_net(opt.model)
model_dict=torch.load(model_path,map_location="cpu")
model.load_state_dict(model_dict)
model=model.cuda(opt.cuda_devices)
model.eval()
# print(f"Cuda num: {opt.cuda_devices}")
test_set=make_dataset("test")
test_loader=eval_loader = Dataloader(
dataset=test_set, batch_size=opt.dev_batch_size, shuffle=True, num_workers=opt.num_workers)
test_loss = 0.0
test_corrects = 0
criterion = nn.CrossEntropyLoss()
for i, (inputs, labels) in enumerate(test_loader):
inputs = Variable(inputs.cuda(opt.cuda_devices))
labels = Variable(labels.cuda(opt.cuda_devices))
outputs = model(inputs)
_, preds = torch.max(outputs.data, 1)
loss = criterion(outputs, labels)
test_loss += loss.item()*inputs.size(0)
test_corrects += torch.sum(preds == labels.data)
test_loss = test_loss/len(test_set)
test_acc = float(test_corrects)/len(test_set)
print(f"Test Loss: {test_loss}")
print(f"Test Accuracy: {test_acc}")
if __name__=='__main__':
test()
``` |
{
"source": "joycenerd/Deep_Learning_Practice_labs",
"score": 3
} |
#### File: Deep_Learning_Practice_labs/lab2/dataloader.py
```python
import numpy as np
def read_bci_data():
"""
Read the BCI data
:return train and test data
"""
S4b_train = np.load('./Data/S4b_train.npz')
X11b_train = np.load('./Data/X11b_train.npz')
S4b_test = np.load('./Data/S4b_test.npz')
X11b_test = np.load('./Data/X11b_test.npz')
train_data = np.concatenate((S4b_train['signal'], X11b_train['signal']), axis=0)
train_label = np.concatenate((S4b_train['label'], X11b_train['label']), axis=0)
test_data = np.concatenate((S4b_test['signal'], X11b_test['signal']), axis=0)
test_label = np.concatenate((S4b_test['label'], X11b_test['label']), axis=0)
train_label = train_label - 1
test_label = test_label - 1
train_data = np.transpose(np.expand_dims(train_data, axis=1), (0, 1, 3, 2))
test_data = np.transpose(np.expand_dims(test_data, axis=1), (0, 1, 3, 2))
mask = np.where(np.isnan(train_data))
train_data[mask] = np.nanmean(train_data)
mask = np.where(np.isnan(test_data))
test_data[mask] = np.nanmean(test_data)
# print(train_data.shape, train_label.shape, test_data.shape, test_label.shape)
return train_data, train_label, test_data, test_label
```
#### File: Deep_Learning_Practice_labs/lab2/demo.py
```python
from model import EEGNet, DeepConvNet
from dataloader import read_bci_data
from dataset import EEGDataset
from torch.utils.data import DataLoader
import torch.nn as nn
import torch
def demo(checkpoints, X_test, y_test):
"""
In the convenience of demonstrating the best results
:param checkpoints: checkpoint path dict
:param X_test: testing data (signal)
:param y_test: testing label
"""
device = "cuda"
# prepare data
test_set = EEGDataset(X_test, y_test)
test_loader = DataLoader(test_set, batch_size=256, shuffle=True, num_workers=4)
test_size = len(test_set)
for act in checkpoints:
model_path = checkpoints[act]
# model
net = load_model(model_path)
net.to(device)
net.eval()
loss_func = nn.CrossEntropyLoss()
test_loss = 0.0
test_acc = 0.0
for idx, (inputs, targets) in enumerate(test_loader):
inputs = inputs.to(device, dtype=torch.float)
targets = targets.to(device, dtype=torch.long)
outputs = net(inputs)
test_loss += loss_func(outputs, targets).item()
_, predicted = torch.max(outputs.data, 1)
test_acc += (predicted == targets).sum().item()
test_loss /= test_size
test_acc /= test_size
print(f"{act} test_loss: {test_loss:.4}\ttest_acc: {test_acc:.4}")
print("===========================================================\n")
def load_model(model_path):
"""
load the model from checkpoint
:param model_path: checkpoint path
:return model for testing
"""
checkpoint = torch.load(model_path)
model_name = checkpoint['model_name']
act = checkpoint['act']
if model_name == "EEGNet":
net = EEGNet(act)
elif model_name == "DeepConvNet":
net = DeepConvNet(act)
net.load_state_dict(checkpoint['model_state_dict'])
return net
if __name__ == '__main__':
# read data
X_train, y_train, X_test, y_test = read_bci_data()
# EEGNet results
EEGNet_checkpoints = {
'ELU': './checkpoints/EEGNet/EEGNet_elu_5e-3_amsgrad_0.8407.pt',
'ReLU': './checkpoints/EEGNet/EEGNet_relu_1e-3_0.8731.pt',
'LeakyReLU': './checkpoints/EEGNet/EEGNet_leaky_relu_1e-2_init_amsgrad_0.8787.pt'
}
print("EEGNet results")
demo(EEGNet_checkpoints, X_test, y_test)
# DeepConvNet results
DeepConvNet_checkpoints = {
'ELU': './checkpoints/DeepConvNet/DeepConvNet_elu_1e-3_amsgrad_0.7454.pt',
'ReLU': './checkpoints/DeepConvNet/DeepConvNet_relu_1e-2_0.7102.pt',
'LeakyReLU': './checkpoints/DeepConvNet/DeepConvNet_leaky_relu_1e-2_init_amsgrad_0.7352.pt'
}
print("DeepConvNet results")
demo(DeepConvNet_checkpoints, X_test, y_test)
```
#### File: Deep_Learning_Practice_labs/lab3/train.py
```python
from resnet import resnet18, resnet50, pretrained_resnet, initialize_weights, funct
from dataloader import RetinopathyLoader
from torch.utils.tensorboard import SummaryWriter
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader
from torchsummary import summary
from tqdm import tqdm
import torch.nn as nn
import torch
import argparse
import logging
import copy
import os
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, default="resnet18", help="which model: [resnet18, resnet50]")
parser.add_argument("--pretrain", type=bool, default=False, help="whether to use pretrained weights")
parser.add_argument("--act", type=str, default='relu', help="which activation function to use: [relu, leaky_relu,selu]")
parser.add_argument("--device", type=str, default="cuda:1", help="use which device for training")
parser.add_argument("--batch-size", type=int, default=32, help="batch size for training")
parser.add_argument("--lr", type=float, default=1e-3, help="learning rata")
parser.add_argument("--epochs", type=int, default=10, help="num of epochs for training")
parser.add_argument("--model-path", type=str, default="./checkpoints/resnet50/relu_0.001_0.7335.pt")
parser.add_argument("--load", type=bool, default=True, help="if load the weight param from checkpoint before training")
parser.add_argument("--img-size", type=int, default=256, help="Size to resize the image")
args = parser.parse_args()
def train():
_model = args.model
pretrain = args.pretrain
act = args.act
device = args.device
batch_size = args.batch_size
lr = args.lr
epochs = args.epochs
load = args.load
model_path = args.model_path
img_size = args.img_size
if pretrain:
save_name = f"{act}_{lr}"
else:
save_name = f"{act}_{lr}"
writer = SummaryWriter(f"runs/{_model}/{save_name}")
logging.basicConfig(format='%(asctime)s - %(message)s',
level=logging.INFO,
handlers=[logging.FileHandler(f'./record/{save_name}.log', 'w', 'utf-8')])
# data preprocessing
train_set = RetinopathyLoader("./Data/data", "train", img_size)
train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=4)
test_set = RetinopathyLoader("./Data/data", "test", img_size)
test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=4)
train_size = len(train_set)
test_size = len(test_set)
# initialize model
if load:
model = load_model(model_path)
elif pretrain:
model = pretrained_resnet(_model)
else:
if _model == "resnet18":
model = resnet18(3, 5, act=act)
elif _model == "resnet50":
model = resnet50(3, 5, act=act)
model.apply(initialize_weights)
model = model.to(device)
# initialize loss function
loss_func = nn.CrossEntropyLoss()
# initialize optimizer
optimizer = torch.optim.SGD(model.parameters(), lr, momentum=0.9, weight_decay=5e-4)
# initialize scheduler
scheduler = StepLR(optimizer, step_size=2, gamma=0.5)
best_train_acc = 0.0
best_acc = 0.0
best_model_params = copy.deepcopy(model.state_dict())
for epoch in range(1, epochs + 1):
print(f"Epoch {epoch}/{epochs}")
print("-" * len(f"Epoch {epoch}/{epochs}"))
logging.info(f"Epoch {epoch}/{epochs}")
train_loss = 0.0
train_acc = 0.0
# training
model.train()
for idx, (inputs, targets) in enumerate(tqdm(train_loader)):
inputs = inputs.to(device, dtype=torch.float)
targets = targets.to(device, dtype=torch.long)
# forward pass
outputs = model(inputs)
loss = loss_func(outputs, targets)
train_loss += loss.item() * inputs.shape[0]
_, predicted = torch.max(outputs.data, 1)
train_acc += (predicted == targets).sum().item()
# update the parameters
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss /= train_size
train_acc /= train_size
print(f"train_loss: {train_loss:.4f}\ttrain_acc: {train_acc:.4f}")
logging.info(f"train_loss: {train_loss:.4f}\ttrain_acc: {train_acc:.4f}")
# evaluation
eval_loss, eval_acc = eval(model, test_loader, test_size, loss_func)
logging.info(f"eval_loss: {eval_loss:.4f}\teval_acc: {eval_acc:.4f}")
if epoch >= 4:
scheduler.step()
# log the result
# log the result
writer.add_scalar("train_loss", train_loss, epoch)
writer.add_scalar("train_acc", train_acc, epoch)
writer.add_scalar("eval_loss", eval_loss, epoch)
writer.add_scalar("eval_acc", eval_acc, epoch)
writer.flush()
# save model parameters if accuracy is higher
if eval_acc > best_acc:
best_model_params = copy.deepcopy(model.state_dict())
best_acc = eval_acc
best_train_acc = train_acc
# save the best model
print(f"best_acc: {best_acc:.4f}\tbest_train_acc: {best_train_acc:.4f}")
logging.info(f"best_acc: {best_acc:.4f}\tbest_train_acc: {best_train_acc:.4f}")
save_model(_model, best_model_params, pretrain, act, save_name, best_acc)
writer.close()
def eval(model, test_loader, test_size, loss_func):
"""
model evaluation
Args:
model: model for evaluation
test_loader: testing data loader
test_size: size of the testing data
loss_func: loss function we are using
Returns:
evaluation loss and accuracy
"""
device = args.device
model.eval()
eval_loss = 0.0
eval_acc = 0.0
for idx, (inputs, targets) in enumerate(tqdm(test_loader)):
inputs = inputs.to(device, dtype=torch.float)
targets = targets.to(device, dtype=torch.long)
outputs = model(inputs)
eval_loss += loss_func(outputs, targets).item() * inputs.shape[0]
_, predicted = torch.max(outputs.data, 1)
eval_acc += (predicted == targets).sum().item()
eval_loss /= test_size
eval_acc /= test_size
print(f"eval_loss: {eval_loss:.4f}\teval_acc: {eval_acc:.4f}")
return eval_loss, eval_acc
def test():
"""
Test the saving model
"""
model_path = args.model_path
device = args.device
batch_size = args.batch_size
img_size = args.img_size
# prepare data
test_set = RetinopathyLoader("./Data/data", "test", img_size)
test_loader = DataLoader(test_set, batch_size=batch_size, num_workers=4)
test_size = len(test_set)
# load the model
model = load_model(model_path)
model.to(device)
model.eval()
loss_func = nn.CrossEntropyLoss()
test_loss = 0.0
test_acc = 0.0
for idx, (inputs, targets) in enumerate(tqdm(test_loader)):
inputs = inputs.to(device, dtype=torch.float)
targets = targets.to(device, dtype=torch.long)
outputs = model(inputs)
test_loss += loss_func(outputs, targets).item() * inputs.shape[0]
_, predicted = torch.max(outputs.data, 1)
test_acc += (predicted == targets).sum().item()
test_loss /= test_size
test_acc /= test_size
print(f"test_loss: {test_loss:.4}\ttest_acc: {test_acc:.4}")
def save_model(_model, model_params, pretrain, act, save_name, best_acc):
"""
Save the model as checkpoint
Args:
_model: (str) model name
model_params: (state_dict) model parameters
pretrain: (bool) whether this is a pretrained model
act: (str) activation function use in the model
save_name: (str) saving name of this checkpoint file
best_acc: (float) best evaluation accuracy
"""
save_obj = {
'model_state_dict': model_params,
'model_name': _model,
'pretrained': pretrain,
'act': act
}
torch.save(save_obj, f"./checkpoints/{_model}/{save_name}_{best_acc:.4f}.pt")
def load_model(model_path):
"""
Load the model from checkpoint file
Args:
model_path: (str) checkpoint file path
Returns:
model with saving params
"""
checkpoint = torch.load(model_path)
_model = checkpoint['model_name']
pretrain = checkpoint['pretrained']
act = checkpoint['act']
if pretrain:
model = pretrained_resnet(_model)
else:
if _model == "resnet18":
model = resnet18(3, 5, act=act)
elif _model == "resnet50":
model = resnet50(3, 5, act=act)
model.load_state_dict(checkpoint['model_state_dict'])
return model
if __name__ == "__main__":
print(args)
input("Press ENTER if no problem...")
if not os.path.isdir("./checkpoints/resnet18"):
os.makedirs("./checkpoints/resnet18")
if not os.path.isdir("./checkpoints/resnet50"):
os.makedirs("./checkpoints/resnet50")
# train()
test()
```
#### File: Deep_Learning_Practice_labs/lab5/evaluator.py
```python
import torch
import torch.nn as nn
import torchvision.models as models
'''===============================================================
1. Title:
DLP spring 2021 Lab7 classifier
2. Purpose:
For computing the classification accruacy.
3. Details:
The model is based on ResNet18 with only chaning the
last linear layer. The model is trained on iclevr dataset
with 1 to 5 objects and the resolution is the upsampled
64x64 images from 32x32 images.
It will capture the top k highest accuracy indexes on generated
images and compare them with ground truth labels.
4. How to use
You should call eval(images, labels) and to get total accuracy.
images shape: (batch_size, 3, 64, 64)
labels shape: (batch_size, 24) where labels are one-hot vectors
e.g. [[1,1,0,...,0],[0,1,1,0,...],...]
==============================================================='''
class evaluation_model():
def __init__(self,device):
#modify the path to your own path
checkpoint = torch.load('./checkpoints/classifier_weight.pth')
self.resnet18 = models.resnet18(pretrained=False)
self.resnet18.fc = nn.Sequential(
nn.Linear(512,24),
nn.Sigmoid()
)
self.resnet18.load_state_dict(checkpoint['model'])
self.resnet18 = self.resnet18.to(device)
self.resnet18.eval()
self.classnum = 24
def compute_acc(self, out, onehot_labels):
batch_size = out.size(0)
acc = 0
total = 0
for i in range(batch_size):
k = int(onehot_labels[i].sum().item())
total += k
outv, outi = out[i].topk(k)
lv, li = onehot_labels[i].topk(k)
for j in outi:
if j in li:
acc += 1
return acc / total
def eval(self, images, labels):
with torch.no_grad():
#your image shape should be (batch, 3, 64, 64)
out = self.resnet18(images)
acc = self.compute_acc(out.cpu(), labels.cpu())
return acc
``` |
{
"source": "joycenerd/genrep_aug",
"score": 2
} |
#### File: genrep_aug/GenRep/main_autoencoder.py
```python
from __future__ import print_function
import argparse
import os
import random
import itertools
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torchvision.utils as vutils
from torch.nn.functional import cosine_similarity
from tensorboardX import SummaryWriter
import oyaml as yaml
# from utils import zdataset, customnet, pbar, util, masking
# from utils import customnet, pbar, util, masking
from utils import pbar, util, masking
import customenet_biggan as customnet
# import zdataset_biggan
from networks import biggan_networks
import numpy as np
import json
import sys
sys.path.append('resources/PerceptualSimilarity') # TODO: just use lpips import
import models
import pdb;
def train(opt):
print("Random Seed: ", opt.seed)
random.seed(opt.seed)
torch.manual_seed(opt.seed)
cudnn.benchmark = True
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
# tensorboard
writer = SummaryWriter(logdir='training/runs/%s' % os.path.basename(opt.outf))
device = torch.device("cuda:0" if opt.cuda else "cpu")
batch_size = int(opt.batchSize)
# load the generator
netG = biggan_networks.load_biggan(opt.netG).to(device).eval() #for biggan, it's model_name, e.g. 'biggan-deep-256'
util.set_requires_grad(False, netG)
# print(netG)
# # find output shape
## Ali: to find output shape, we use biggan_networks.truncated_noise_sample_() instead of zdataset_biggan.z_sample_for_model()
# z = zdataset_biggan.z_sample_for_model(netG, size=1).to(device)
# # Prepare an input for netG
truncation = 1.0
zbs = 1
z = biggan_networks.truncated_noise_sample_(truncation=truncation, batch_size=zbs).to(device)
cls_vector = biggan_networks.one_hot_from_int_(77, batch_size=zbs).to(device)
out_shape = netG(z, cls_vector, truncation).shape
in_shape = z.shape
nz = in_shape[1]
# print(out_shape)
# determine encoder input dim
assert(not (opt.masked and opt.vae_like)), "specify 1 of masked or vae_like"
has_masked_input = opt.masked or opt.vae_like
input_dim = 4 if has_masked_input else 3
modify_input = customnet.modify_layers # adds the to_z layer
# load the encoder
depth = int(opt.netE_type.split('-')[-1])
nz = nz * 2 if opt.vae_like else nz
netE = customnet.CustomResNet(size=depth, halfsize=out_shape[-1]<=150,
num_classes=nz,
modify_sequence=modify_input,
channels_in=input_dim)
netE.to(device)
# print(netE)
# import pdb;
# pdb.set_trace()
last_layer_z = torch.nn.Linear(2048, 128).to(device)
last_layer_y = torch.nn.Linear(2048, opt.num_imagenet_classes).to(device)
# losses + optimizers
mse_loss = nn.MSELoss()
l1_loss = nn.L1Loss()
perceptual_loss = models.PerceptualLoss(model='net-lin', net='vgg',
use_gpu=opt.cuda)
# optimizerE = optim.Adam(netE.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
start_ep = 0
## also loss_y and optim for z and y:
ce_loss = nn.CrossEntropyLoss()
# optimizer_z = optim.Adam(last_layer_z.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
# optimizer_y = optim.Adam(last_layer_y.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
optimizerE = optim.Adam(list(netE.parameters()) + list(last_layer_z.parameters()) + list(last_layer_y.parameters()),
lr=opt.lr, betas=(opt.beta1, 0.999))
# z datasets
min_bs = min(16, batch_size)
train_loader = training_loader(truncation, batch_size, opt.seed)
test_zs = biggan_networks.truncated_noise_sample_(truncation=truncation,
batch_size=min_bs,
seed=opt.seed).to(device)
class_name_list = ['robin', 'standard_poodle', 'African_hunting_dog', 'gibbon', 'ambulance', 'boathouse', 'cinema', 'Dutch_oven',
'lampshade', 'laptop', 'mixing_bowl', 'pedestal', 'rotisserie', 'slide_rule', 'tripod', 'chocolate_sauce']
test_class_vectors = biggan_networks.one_hot_from_names_(class_name_list[0:min_bs], batch_size=min_bs).to(device)
# with open('./imagenet100_class_index.json', 'rb') as fid:
# imagenet100_dict = json.load(fid)
test_idx = [15, 267, 275, 368, 407, 449, 498, 544, 619, 620, 659, 708, 766, 798, 872, 960]
test_idx = test_idx[0:min_bs]
# load data from checkpoint
# come back
assert(not (opt.netE and opt.finetune)), "specify 1 of netE or finetune"
if opt.finetune:
checkpoint = torch.load(opt.finetune)
sd = checkpoint['state_dict']
# skip weights with dim mismatch, e.g. if you finetune from
# an RGB encoder
if sd['conv1.weight'].shape[1] != input_dim:
# skip first conv if needed
print("skipping initial conv")
sd = {k: v for k, v in sd.items() if k != 'conv1.weight'}
if sd['fc.bias'].shape[0] != nz:
# skip fc if needed
print("skipping fc layers")
sd = {k: v for k, v in sd.items() if 'fc' not in k}
netE.load_state_dict(sd, strict=False)
if opt.netE:
checkpoint = torch.load(opt.netE)
netE.load_state_dict(checkpoint['state_dict'])
last_layer_z.load_state_dict(checkpoint['state_dict_last_z'])
last_layer_y.load_state_dict(checkpoint['state_dict_last_y'])
optimizerE.load_state_dict(checkpoint['optimizer'])
start_ep = checkpoint['epoch'] + 1
epoch_batches = 1600 // batch_size
for epoch, epoch_loader in enumerate(pbar(
epoch_grouper(train_loader, epoch_batches),
total=(opt.niter-start_ep)), start_ep):
# stopping condition
if epoch > opt.niter:
break
# run a train epoch of epoch_batches batches
for step, (z_batch,) in enumerate(pbar(
epoch_loader, total=epoch_batches), 1):
z_batch = z_batch.to(device)
netE.zero_grad()
last_layer_z.zero_grad()
last_layer_y.zero_grad()
# fake_im = netG(z_batch).detach()
idx = np.random.choice(opt.num_imagenet_classes, z_batch.shape[0]).tolist()
class_vector = biggan_networks.one_hot_from_int_(idx, batch_size=z_batch.shape[0]).to(device)
fake_im = netG(z_batch, class_vector, truncation).detach()
if has_masked_input:
## come back
hints_fake, mask_fake = masking.mask_upsample(fake_im)
encoded = netE(torch.cat([hints_fake, mask_fake], dim=1)).view(z_batch.shape)
if opt.masked:
regenerated = netG(encoded, class_vector, truncation)
elif opt.vae_like:
sample = torch.randn_like(encoded[:, nz//2:, :, :])
encoded_mean = encoded[:, nz//2:, :, :]
encoded_sigma = torch.exp(encoded[:, :nz//2, :, :])
reparam = encoded_mean + encoded_sigma * sample
regenerated = netG(reparam, class_vector, truncation)
encoded = encoded_mean # just use mean in z loss
else:
# standard RGB encoding
encoded = netE(fake_im)
z_pred = last_layer_z(encoded)
y_pred = last_layer_y(encoded)
regenerated = netG(z_pred, class_vector, truncation)
# compute loss
loss_y = ce_loss(y_pred, torch.tensor(idx, dtype=torch.int64).to(device))
loss_z = cor_square_error_loss(z_pred, z_batch)
loss_mse = mse_loss(regenerated, fake_im)
loss_perceptual = perceptual_loss.forward(
regenerated, fake_im).mean()
loss = (opt.lambda_z * loss_y + opt.lambda_z * loss_z + opt.lambda_mse * loss_mse
+ opt.lambda_lpips * loss_perceptual)
loss = (opt.lambda_z * loss_y + opt.lambda_z * loss_z + opt.lambda_mse * loss_mse
+ opt.lambda_lpips * loss_perceptual)
# optimize
loss.backward()
optimizerE.step()
# optimizer_z.step()
# optimizer_y.step()
# send losses to tensorboard
if step % 20 == 0:
total_batches = epoch * epoch_batches + step
writer.add_scalar('loss/train_y', loss_y, total_batches)
writer.add_scalar('loss/train_z', loss_z, total_batches)
writer.add_scalar('loss/train_mse', loss_mse, total_batches)
writer.add_scalar('loss/train_lpips', loss_perceptual,
total_batches)
writer.add_scalar('loss/train_total', loss, total_batches)
# import pdb;
# pdb.set_trace()
# run the fixed test zs for visualization
netE.eval()
last_layer_z.eval()
last_layer_y.eval()
with torch.no_grad():
fake_im = netG(test_zs, test_class_vectors, truncation)
if has_masked_input:
## come back
hints_fake, mask_fake = masking.mask_upsample(fake_im)
encoded = netE(torch.cat([hints_fake, mask_fake], dim=1)).view(test_zs.shape)
if opt.masked:
regenerated = netG(encoded, test_class_vectors, truncation)
elif opt.vae_like:
sample = torch.randn_like(encoded[:, nz//2:, :, :])
encoded_mean = encoded[:, nz//2:, :, :]
encoded_sigma = torch.exp(encoded[:, :nz//2, :, :])
reparam = encoded_mean + encoded_sigma * sample
regenerated = netG(reparam, test_class_vectors, truncation)
encoded = encoded_mean # just use mean in z loss
else:
encoded = netE(fake_im)
pred_z = last_layer_z(encoded)
pred_y = last_layer_y(encoded)
regenerated = netG(pred_z, test_class_vectors, truncation)
# compute loss
loss_y = ce_loss(y_pred, torch.tensor(test_idx, dtype=torch.int64).to(device))
loss_z = cor_square_error_loss(pred_z, test_zs)
loss_mse = mse_loss(regenerated, fake_im)
loss_perceptual = perceptual_loss.forward(
regenerated, fake_im).mean()
loss = (opt.lambda_z * loss_y + opt.lambda_z * loss_z + opt.lambda_mse * loss_mse
+ opt.lambda_lpips * loss_perceptual)
loss = (opt.lambda_z * loss_y + opt.lambda_z * loss_z)
# send to tensorboard
writer.add_scalar('loss/test_y', loss_y, epoch)
writer.add_scalar('loss/test_z', loss_z, epoch)
writer.add_scalar('loss/test_mse', loss_mse, epoch)
writer.add_scalar('loss/test_lpips', loss_perceptual,
epoch)
writer.add_scalar('loss/test_total', loss, epoch)
if has_masked_input:
grid = vutils.make_grid(
torch.cat((fake_im, hints_fake, regenerated)), nrow=8,
normalize=True, scale_each=(-1, 1))
else:
grid = vutils.make_grid(
torch.cat((fake_im, regenerated)), nrow=8,
normalize=True, scale_each=(-1, 1))
writer.add_image('Image', grid, epoch)
netE.train()
# do checkpointing
if epoch % 1000 == 0 or epoch == opt.niter:
sd = {
'state_dict': netE.state_dict(),
'state_dict_last_z': last_layer_z.state_dict(),
'state_dict_last_y': last_layer_y.state_dict(),
'optimizer': optimizerE.state_dict(),
'epoch': epoch
}
torch.save(sd, '%s/netE_epoch_%d.pth' % (opt.outf, epoch))
def cor_square_error_loss(x, y, eps=1e-8):
# Analogous to MSE, but in terms of Pearson's correlation
return (1.0 - cosine_similarity(x, y, eps=eps)).mean()
def training_loader(truncation, batch_size, global_seed=0):
'''
Returns an infinite generator that runs through randomized z
batches, forever.
'''
g_epoch = 1
while True:
z_data = biggan_networks.truncated_noise_dataset(truncation=truncation,
batch_size=10000,
seed=g_epoch + global_seed)
dataloader = torch.utils.data.DataLoader(
z_data,
shuffle=False,
batch_size=batch_size,
num_workers=10,
pin_memory=True)
for batch in dataloader:
yield batch
g_epoch += 1
def epoch_grouper(loader, epoch_size, num_epochs=None):
'''
To use with the infinite training loader: groups the training data
batches into epochs of the given size.
'''
it = iter(loader)
epoch = 0
while True:
chunk_it = itertools.islice(it, epoch_size)
try:
first_el = next(chunk_it)
except StopIteration:
return
yield itertools.chain((first_el,), chunk_it)
epoch += 1
if num_epochs is not None and epoch >= num_epochs:
return
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--num_imagenet_classes', type=int, default=1000,
help='e.g., 100 or 1000')
parser.add_argument('--netE_type', type=str, default='resnet-50',
help='type of encoder architecture')
parser.add_argument('--batchSize', type=int, default=8, help='input batch size')
parser.add_argument('--niter', type=int, default=2000, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--netG', default='', help="generator to load")
parser.add_argument('--netE', default='', help="path to netE (to continue training)")
parser.add_argument('--outf', default='./resnet50_zy_pix', help='folder to output model checkpoints')
parser.add_argument('--seed', default=0, type=int, help='manual seed')
parser.add_argument('--lambda_z', default=1.0, type=float, help='loss weighting')
parser.add_argument('--lambda_mse', default=1.0, type=float, help='loss weighting')
parser.add_argument('--lambda_lpips', default=1.0, type=float, help='loss weighting')
parser.add_argument('--finetune', type=str, default='',
help="finetune from these weights")
parser.add_argument('--masked', action='store_true', help="train with masking")
parser.add_argument('--vae_like', action='store_true',
help='train with masking, predict mean and sigma')
opt = parser.parse_args()
opt.outf = '{}_{}'.format(opt.outf, opt.num_imagenet_classes)
print(opt)
assert opt.netE_type == 'resnet-50'
opt.outf = opt.outf.format(**vars(opt))
os.makedirs(opt.outf, exist_ok=True)
# save options
with open(os.path.join(opt.outf, 'optE.yml'), 'w') as f:
yaml.dump(vars(opt), f, default_flow_style=False)
train(opt)
```
#### File: GenRep/online_learning/main_supcon_xavi_online.py
```python
from __future__ import print_function
import os
import sys
import numpy as np
import argparse
import time
import math
import tensorboard_logger as tb_logger
import torch
import torch.backends.cudnn as cudnn
from torchvision import transforms, datasets
from util import TwoCropTransform, AverageMeter, GansetDataset, OnlineGansetDataset
from util import adjust_learning_rate, warmup_learning_rate
from util import set_optimizer, save_model
from networks.resnet_big import SupConResNet
from losses import SupConLoss
import ipdb
try:
import apex
from apex import amp, optimizers
except ImportError:
pass
def parse_option():
parser = argparse.ArgumentParser('argument for training')
parser.add_argument('--print_freq', type=int, default=10,
help='print frequency')
parser.add_argument('--save_freq', type=int, default=20,
help='save frequency')
parser.add_argument('--batch_size', type=int, default=256,
help='batch_size')
parser.add_argument('--num_workers', type=int, default=16,
help='num of workers to use')
parser.add_argument('--epochs', type=int, default=200,
help='number of training epochs')
# optimization
parser.add_argument('--learning_rate', type=float, default=0.03,
help='learning rate')
parser.add_argument('--lr_decay_epochs', type=str, default='120,160',
help='where to decay lr, can be a list')
parser.add_argument('--lr_decay_rate', type=float, default=0.1,
help='decay rate for learning rate')
parser.add_argument('--weight_decay', type=float, default=1e-4,
help='weight decay')
parser.add_argument('--momentum', type=float, default=0.9,
help='momentum')
# model dataset
parser.add_argument('--model', type=str, default='resnet50')
parser.add_argument('--dataset', type=str, default='biggan',
choices=['biggan', 'cifar10', 'cifar100', 'imagenet100', 'imagenet100K', 'imagenet'], help='dataset')
# method
parser.add_argument('--method', type=str, default='SimCLR',
choices=['SupCon', 'SimCLR'], help='choose method')
parser.add_argument('--ganrndwalk', action='store_true', help='augment by walking in the gan latent space')
parser.add_argument('--online', action='store_true', help='whether we are generating the samples online')
parser.add_argument('--walktype', type=str, help='how should we random walk on latent space',
choices=['gaussian', 'uniform'])
parser.add_argument('--zstd', type=float, default=1.0, help='augment std away from z')
parser.add_argument('--uniformb', type=float, default=1.0, help='augment std away from z')
# temperature
parser.add_argument('--temp', type=float, default=0.1,
help='temperature for loss function')
# other setting
parser.add_argument('--cosine', action='store_true', help='using cosine annealing')
parser.add_argument('--syncBN', action='store_true',
help='using synchronized batch normalization')
parser.add_argument('--warm', action='store_true',
help='warm-up for large batch training')
parser.add_argument('--trial', type=str, default='0',
help='id for recording multiple runs')
# specifying folders
parser.add_argument('-d', '--data_folder', type=str,
default='/data/scratch-oc40/jahanian/ganclr_results/ImageNet100',
help='the data folder')
parser.add_argument('-s', '--cache_folder', type=str,
default='/data/scratch-oc40/jahanian/ganclr_results/',
help='the saving folder')
opt = parser.parse_args()
# set the path according to the environment
opt.data_folder = opt.data_folder
opt.model_path = os.path.join(opt.cache_folder, 'SupCon/{}_models'.format(opt.dataset))
opt.tb_path = os.path.join(opt.cache_folder, 'SupCon/{}_tensorboard'.format(opt.dataset))
iterations = opt.lr_decay_epochs.split(',')
opt.lr_decay_epochs = list([])
for it in iterations:
opt.lr_decay_epochs.append(int(it))
if opt.ganrndwalk:
if opt.walktype == 'gaussian':
walk_type = 'zstd_{}'.format(opt.zstd)
elif opt.walktype == 'uniform':
walk_type = 'uniform_{}'.format(opt.zstd)
opt.model_name = '{}_{}_ganrndwalkonline_{}_{}_lr_{}_decay_{}_bsz_{}_temp_{}_trial_{}'.\
format(opt.method, opt.dataset, walk_type, opt.model, opt.learning_rate,
opt.weight_decay, opt.batch_size, opt.temp, opt.trial)
else:
opt.model_name = '{}_{}_{}_lr_{}_decay_{}_bsz_{}_temp_{}_trial_{}'.\
format(opt.method, opt.dataset, opt.model, opt.learning_rate,
opt.weight_decay, opt.batch_size, opt.temp, opt.trial)
if opt.cosine:
opt.model_name = '{}_cosine'.format(opt.model_name)
# warm-up for large-batch training,
if opt.batch_size > 256:
opt.warm = True
if opt.warm:
opt.model_name = '{}_warm'.format(opt.model_name)
opt.warmup_from = 0.01
opt.warm_epochs = 10
if opt.cosine:
eta_min = opt.learning_rate * (opt.lr_decay_rate ** 3)
opt.warmup_to = eta_min + (opt.learning_rate - eta_min) * (
1 + math.cos(math.pi * opt.warm_epochs / opt.epochs)) / 2
else:
opt.warmup_to = opt.learning_rate
opt.tb_folder = os.path.join(opt.tb_path, opt.model_name)
if not os.path.isdir(opt.tb_folder):
os.makedirs(opt.tb_folder)
opt.save_folder = os.path.join(opt.model_path, opt.model_name)
if not os.path.isdir(opt.save_folder):
os.makedirs(opt.save_folder)
if opt.dataset == 'biggan' or opt.dataset == 'imagenet100' or opt.dataset == 'imagenet100K' or opt.dataset == 'imagenet':
# or 256 as you like
opt.img_size = 128
elif opt.dataset == 'cifar10' or opt.dataset == 'cifar100':
opt.img_size = 32
return opt
def set_loader(opt):
# construct data loader
if opt.dataset == 'cifar10':
mean = (0.4914, 0.4822, 0.4465)
std = (0.2023, 0.1994, 0.2010)
elif opt.dataset == 'cifar100':
mean = (0.5071, 0.4867, 0.4408)
std = (0.2675, 0.2565, 0.2761)
elif opt.dataset == 'biggan' or opt.dataset == 'imagenet100' or opt.dataset == 'imagenet100K' or opt.dataset == 'imagenet':
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
else:
raise ValueError('dataset not supported: {}'.format(opt.dataset))
normalize = transforms.Normalize(mean=mean, std=std)
train_transform = transforms.Compose([
transforms.RandomResizedCrop(size=opt.img_size, scale=(0.2, 1.)),
transforms.RandomHorizontalFlip(),
transforms.RandomApply([
transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)
], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.ToTensor(),
normalize,
])
train_dataset = OnlineGansetDataset(root_dir=os.path.join(opt.data_folder, 'train'), neighbor_std=opt.zstd,
transform=train_transform, walktype='gaussian', device_id=1)
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=opt.batch_size, shuffle=(train_sampler is None),
num_workers=opt.num_workers, pin_memory=True, sampler=train_sampler)
return train_loader
def set_model(opt):
model = SupConResNet(name=opt.model, img_size=opt.img_size)
criterion = SupConLoss(temperature=opt.temp)
# enable synchronized Batch Normalization
if opt.syncBN:
model = apex.parallel.convert_syncbn_model(model)
if torch.cuda.is_available():
if torch.cuda.device_count() > 1:
model.encoder = torch.nn.DataParallel(model.encoder)
model = model.cuda()
criterion = criterion.cuda()
cudnn.benchmark = True
return model, criterion
def train(train_loader, model, criterion, optimizer, epoch, opt, logger=None):
"""one epoch training"""
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
end = time.time()
# for idx, (images, labels) in enumerate(train_loader):
# data_time.update(time.time() - end)
for idx, data_w in enumerate(train_loader):
z, dz, one_hot_index, labels = data_w
data_time.update(time.time() - end)
images_z, im_orig_z = train_loader.dataset.gen_images_transform(z, one_hot_index)
images_dz, im_orig_dz = train_loader.dataset.gen_images_transform(dz, one_hot_index)
images = torch.cat([images_z.unsqueeze(1), images_dz.unsqueeze(1)],
dim=1)
images = images.view(-1, 3, opt.img_size, opt.img_size).cuda(non_blocking=True)
labels = labels.cuda(non_blocking=True)
bsz = labels.shape[0]
if idx % 1000 == 0:
images_cat = [np.concatenate([np.array(im1), np.array(im2)], 1) for im1, im2 in zip(im_orig_z, im_orig_dz)]
logger.log_images('im_anchor', images_cat[:2], step=epoch)
# warm-up learning rate
warmup_learning_rate(opt, epoch, idx, len(train_loader), optimizer)
# compute loss
features = model(images)
features = features.view(bsz, 2, -1)
if opt.method == 'SupCon':
loss = criterion(features, labels)
elif opt.method == 'SimCLR':
loss = criterion(features)
else:
raise ValueError('contrastive method not supported: {}'.
format(opt.method))
# update metric
losses.update(loss.item(), bsz)
# SGD
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# print info
if (idx + 1) % opt.print_freq == 0:
print('Train: [{0}][{1}/{2}]\t'
'BT {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'DT {data_time.val:.3f} ({data_time.avg:.3f})\t'
'loss {loss.val:.3f} ({loss.avg:.3f})'.format(
epoch, idx + 1, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses))
sys.stdout.flush()
return losses.avg
def main():
opt = parse_option()
# build data loader
train_loader = set_loader(opt)
# build model and criterion
model, criterion = set_model(opt)
# build optimizer
optimizer = set_optimizer(opt, model)
# tensorboard
logger = tb_logger.Logger(logdir=opt.tb_folder, flush_secs=2)
# training routine
for epoch in range(1, opt.epochs + 1):
adjust_learning_rate(opt, optimizer, epoch)
# train for one epoch
time1 = time.time()
loss = train(train_loader, model, criterion, optimizer, epoch, opt, logger)
time2 = time.time()
print('epoch {}, total time {:.2f}'.format(epoch, time2 - time1))
# tensorboard logger
logger.log_value('loss', loss, epoch)
logger.log_value('learning_rate', optimizer.param_groups[0]['lr'], epoch)
if epoch % opt.save_freq == 0:
save_file = os.path.join(
opt.save_folder, 'ckpt_epoch_{epoch}.pth'.format(epoch=epoch))
save_model(model, optimizer, opt, epoch, save_file)
# save the last model
save_file = os.path.join(
opt.save_folder, 'last.pth')
save_model(model, optimizer, opt, opt.epochs, save_file)
if __name__ == '__main__':
main()
```
#### File: GenRep/utils/generate_dataset_biggan.py
```python
import torch
from pytorch_pretrained_biggan import (
BigGAN,
truncated_noise_sample,
one_hot_from_int
)
import PIL.Image
import numpy as np
import os
import argparse
from tqdm import tqdm
import json
def convert_to_images(obj):
""" Convert an output tensor from BigGAN in a list of images.
"""
# need to fix import, see: https://github.com/huggingface/pytorch-pretrained-BigGAN/pull/14/commits/68a7446951f0b9400ebc7baf466ccc48cdf1b14c
if not isinstance(obj, np.ndarray):
obj = obj.detach().numpy()
obj = obj.transpose((0, 2, 3, 1))
obj = np.clip(((obj + 1) / 2.0) * 256, 0, 255)
img = []
for i, out in enumerate(obj):
out_array = np.asarray(np.uint8(out), dtype=np.uint8)
img.append(PIL.Image.fromarray(out_array))
return img
def sample(opt):
output_path = (os.path.join(opt.out_dir, 'biggan%dtr%d-%s_100classes' %
(opt.size, int(opt.truncation), opt.imformat)))
partition = opt.partition
# start_seed, nimg = constants.get_seed_nimg(partition)
start_seed = opt.start_seed
nimg = opt.num_imgs
model_name = 'biggan-deep-%s' % opt.size
truncation = opt.truncation
imformat = opt.imformat
batch_size = opt.batch_size
with open('./imagenet_class_index.json', 'rb') as fid:
imagenet_class_index_dict = json.load(fid)
list100 = os.listdir('/data/scratch-oc40/jahanian/ganclr_results/ImageNet100/train')
model = BigGAN.from_pretrained(model_name).cuda()
imagenet_class_index_keys = imagenet_class_index_dict.keys()
for key in imagenet_class_index_keys:
if imagenet_class_index_dict[key][0] not in list100:
continue
class_dir_name = os.path.join(output_path, partition, imagenet_class_index_dict[key][0])
os.makedirs(class_dir_name, exist_ok=True)
idx = int(key)
print('Generating images for class {}'.format(idx))
class_vector = one_hot_from_int(idx, batch_size=nimg)
seed = start_seed + idx
noise_vector = truncated_noise_sample(truncation=truncation,
batch_size=nimg,
seed=seed)
class_vector = torch.from_numpy(class_vector).cuda()
noise_vector = torch.from_numpy(noise_vector).cuda()
for batch_start in range(0, nimg, batch_size):
s = slice(batch_start, min(nimg, batch_start + batch_size))
with torch.no_grad():
output = model(noise_vector[s], class_vector[s], truncation)
output = output.cpu()
ims = convert_to_images(output)
for i, im in enumerate(ims):
im.save(os.path.join(class_dir_name, 'seed%04d_sample%05d.%s' % (seed, batch_start+i, imformat)))
if __name__ == '__main__':
parser = argparse.ArgumentParser("Sample from biggan")
parser.add_argument('--out_dir', default='/data/scratch-oc40/jahanian/ganclr_results/', type=str)
parser.add_argument('--partition', default='train', type=str)
parser.add_argument('--truncation', default=1.0, type=float)
parser.add_argument('--size', default=256, type=int)
parser.add_argument('--batch_size', default=32, type=int)
parser.add_argument('--imformat', default='png', type=str)
parser.add_argument('--num_imgs', default=1300, type=int, help='num imgs per class')
parser.add_argument('--start_seed', default=0, type=int)
opt = parser.parse_args()
sample(opt)
```
#### File: GenRep/utils/generate_dataset_biggan_steer.py
```python
import torch
import tqdm
from pytorch_pretrained_biggan import (
BigGAN,
truncated_noise_sample,
one_hot_from_int
)
import PIL.Image
import numpy as np
import os
import argparse
from tqdm import tqdm
import json
import pickle
from scipy.stats import truncnorm
import random
import pixel_transformations
import oyaml as yaml
def convert_to_images(obj):
""" Convert an output tensor from BigGAN in a list of images.
"""
# need to fix import, see: https://github.com/huggingface/pytorch-pretrained-BigGAN/pull/14/commits/68a7446951f0b9400ebc7baf466ccc48cdf1b14c
if not isinstance(obj, np.ndarray):
obj = obj.detach().numpy()
obj = obj.transpose((0, 2, 3, 1))
obj = np.clip(((obj + 1) / 2.0) * 256, 0, 255)
img = []
for i, out in enumerate(obj):
out_array = np.asarray(np.uint8(out), dtype=np.uint8)
img.append(PIL.Image.fromarray(out_array))
return img
def sample(opt):
model_name = opt.model_name
output_path = opt.output_path
partition = opt.partition
# start_seed, nimg = constants.get_seed_nimg(partition)
start_seed = opt.start_seed
nimg = opt.num_imgs
model_name = 'biggan-deep-%s' % opt.size
truncation = opt.truncation
imformat = opt.imformat
batch_size = opt.batch_size
# with open('./imagenet_class_index.json', 'rb') as fid:
with open('../data/imagenet25_labels.json', 'rb') as fid:
imagenet_class_index_dict = json.load(fid)
imagenet_class_index_keys = list(imagenet_class_index_dict.keys())
print('Loading the model ...')
model = BigGAN.from_pretrained(model_name)
if torch.cuda.device_count() > 1:
print('Using 2 gpus for G')
model = torch.nn.DataParallel(model,device_ids=[0,1])
model.to('cuda')
# load pretrained walk
walk_composed_final_path = ('./walk_weights_biggan_deep/w_composed_final.pth')
walk_composed_final = torch.load(walk_composed_final_path)
walk_color = walk_composed_final['walk_color'].to('cuda')
walk_rot3d = walk_composed_final['walk_rot3d'].to('cuda')
walk_rot2d = walk_composed_final['walk_rot2d'].to('cuda')
walk_zoom = walk_composed_final['walk_zoom'].to('cuda')
walk_shiftx = walk_composed_final['walk_shiftx'].to('cuda')
walk_shifty = walk_composed_final['walk_shifty'].to('cuda')
# transforms
rot3d_transform = pixel_transformations.Rot3dTransform()
rot2d_transform = pixel_transformations.Rot2dTransform()
zxy_transform = pixel_transformations.ZoomShiftXYTransform()
color_transform = pixel_transformations.ColorTransform()
random.shuffle(imagenet_class_index_keys)
for key in tqdm(imagenet_class_index_keys):
class_dir_name = os.path.join(output_path, partition, imagenet_class_index_dict[key][0])
if os.path.isdir(class_dir_name):
continue
os.makedirs(class_dir_name, exist_ok=True)
idx = int(key)
z_dict = dict()
print('Generating images for class {}'.format(idx))
class_vectors = one_hot_from_int(idx, batch_size=nimg)
class_vectors = torch.from_numpy(class_vectors).to('cuda')
seed = start_seed + idx
noise_vectors = truncated_noise_sample(truncation=truncation, batch_size=nimg, seed=seed)
noise_vectors = torch.from_numpy(noise_vectors).to('cuda')
for batch_start in range(0, nimg, batch_size):
s = slice(batch_start, min(nimg, batch_start + batch_size))
ys = class_vectors[s]
zs = noise_vectors[s]
tbs = zs.shape[0]
# get anchors
with torch.no_grad():
out_anchors = model(zs, ys, truncation)
out_anchors = out_anchors.cpu()
ims_anchors = convert_to_images(out_anchors)
# get neighbors
for ii in range(opt.num_neighbors):
# alphas for transforms
# 3D
_, alphas_rot3d_graph = rot3d_transform.get_alphas(tbs)
alphas_rot3d_graph = torch.tensor(alphas_rot3d_graph, device='cuda', dtype=torch.float32)
# 2D
_, alphas_rot2d_graph = rot2d_transform.get_alphas(tbs)
alphas_rot2d_graph = torch.tensor(alphas_rot2d_graph, device='cuda', dtype=torch.float32)
# Zoom, shiftx, shifty
alphas_zxy = zxy_transform.get_alphas(tbs)
alphas_zoom_graph = torch.tensor(alphas_zxy[1], device='cuda', dtype=torch.float32)
alphas_shiftx_graph = torch.tensor(alphas_zxy[3], device='cuda', dtype=torch.float32)
alphas_shifty_graph = torch.tensor(alphas_zxy[5], device='cuda', dtype=torch.float32)
# Color
_, alphas_color_graph = color_transform.get_alphas(tbs)
alphas_color_graph = torch.tensor(alphas_color_graph, device='cuda', dtype=torch.float32)
# generate neighbors
z_new = zs + 5*alphas_rot3d_graph * walk_rot3d + alphas_rot2d_graph * walk_rot2d + \
alphas_zoom_graph * walk_zoom + alphas_shiftx_graph * walk_shiftx + alphas_shifty_graph * walk_shifty
for c in range(color_transform.num_channels):
z_new = z_new + alphas_color_graph[:,c].unsqueeze(1) * walk_color[:,:,c]
with torch.no_grad():
out_neighbors = model(z_new, ys, truncation)
out_neighbors = out_neighbors.cpu()
ims_neighbors = convert_to_images(out_neighbors)
# save anchor and its neighbors
# save anchors
for b in range(tbs):
if ii == 0:
im = ims_anchors[b]
im_name = 'seed%04d_sample%05d_anchor.%s' % (seed, batch_start+b, imformat)
im.save(os.path.join(class_dir_name, im_name))
z_dict[im_name] = [zs[b].cpu().numpy(), idx]
im = ims_neighbors[b]
im_name = 'seed%04d_sample%05d_neighbor_%d.%s' % (seed, batch_start+b, ii, imformat)
im.save(os.path.join(class_dir_name, im_name))
z_dict[im_name] = [z_new[b].detach().cpu().numpy(), idx]
with open(os.path.join(class_dir_name, 'z_dataset.pkl'), 'wb') as fid:
pickle.dump(z_dict,fid)
pix_transforms_alphas_dict = {'rot3d_alpha_max': rot3d_transform.alpha_max,
'rot2d_alpha_max': rot2d_transform.alpha_max,
'zoom_alpha_max': zxy_transform.alpha_max_zoom,
'shiftxy_alpha_max': zxy_transform.alpha_max_shift,
'color_alpha_max': color_transform.alpha_max}
return pix_transforms_alphas_dict
if __name__ == '__main__':
parser = argparse.ArgumentParser("Sample from biggan")
parser.add_argument('--out_dir', default='/eva_data/zchin/steer_img', type=str)
parser.add_argument('--partition', default='train', type=str)
parser.add_argument('--truncation', default=1.0, type=float)
parser.add_argument('--size', default=256, type=int)
parser.add_argument('--batch_size', default=32, type=int)
parser.add_argument('--imformat', default='png', type=str)
parser.add_argument('--num_imgs', default=1300, type=int, help='num imgs per class')
parser.add_argument('--start_seed', default=0, type=int)
parser.add_argument('--num_neighbors', default=20, type=int, help='num samples per anchor')
parser.add_argument('--desc', default='steer_pth_imagenet100', type=str, help='this will be the tag of this specfic dataset, added to the end of the dataset name')
opt = parser.parse_args()
print(opt)
model_name = 'biggan-deep-%s' % opt.size
output_path = (os.path.join(opt.out_dir, '{}_tr{}_{}_N{}'.format(model_name,
opt.truncation,
opt.desc,
opt.num_neighbors)))
parser.add_argument('--model_name', default=model_name)
parser.add_argument('--output_path', default=output_path)
opt = parser.parse_args()
print(opt)
pix_transforms_alphas_dict = sample(opt)
with open(os.path.join(opt.output_path, opt.partition, 'steer_alphas_config.yml'), 'w') as fid:
yaml.dump(pix_transforms_alphas_dict, fid, default_flow_style=False)
```
#### File: GenRep/utils/vis_html.py
```python
from os import listdir
import pdb
from os.path import isfile, join, basename, isfile
import numpy as np
import random
def make_html(home_dir):
random.seed(0)
std_list = ['0.1', '0.2', '0.5', '1.0']
try:
index_html_path = join(home_dir, "index.html")
print('Saving index file in', index_html_path)
fid = open(index_html_path, 'w', encoding = 'utf-8')
fid.write('<table style="text-align:center;">')
header_str = '<tr><td>anchore filename</td><td>anchore</td>'
for i in range(len(std_list)):
header_str += '<td>std_'+std_list[i]+'</td>'
header_str += '</tr>'
fid.write(header_str)
dir_list = sorted(listdir(home_dir))
#random.shuffle(dir_list)
# for ii in range(len(dir_list)):
for ii in range(100):
print(ii)
images_dir = join(home_dir, dir_list[ii])
if isfile(images_dir):
continue
file_names = sorted([f for f in listdir(images_dir) if join(images_dir, f).endswith('_anchor.png')])
file_names = file_names[:3]
for i in range(len(file_names)):
fid.write('<tr>')
anchor_file_name = join('.', basename(images_dir), file_names[i])
fid.write('<td>' + anchor_file_name + '</td>')
fid.write('<td><a href="' + anchor_file_name + '"><img src="' +
anchor_file_name + '"/></a></td>')
for j in range(len(std_list)):
std_file_name = anchor_file_name.replace('anchor', std_list[j])
fid.write('<td><a href="' + std_file_name + '"><img src="' +
std_file_name + '"/></a></td>')
fid.write('</tr>')
fid.write('</table>')
finally:
fid.close()
make_html('/data/vision/torralba/scratch/xavierpuig/ganclr_results/gan_samples/biggan256tr1-png_steer_rndball_100/train')
make_html('/data/vision/torralba/scratch/xavierpuig/ganclr_results/gan_samples/biggan256tr1-png_steer_rndgaussian_100/train')
# make_html('./utils/val')
``` |
{
"source": "joycenerd/image-super-resolution",
"score": 2
} |
#### File: SRFBN_CVPR19/scripts/Prepare_TrainData_HR_LR.py
```python
from glob import glob
from flags import *
import os
from scipy import misc
import numpy as np
import datetime
import imageio
from multiprocessing.dummy import Pool as ThreadPool
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--save-dir', type=str, default='/eva_data/zchin/srfbn_data',
help='save directory of images after pre-processing')
parser.add_argument('--dataroot', type=str, default='/eva_data/zchin/vrdl_hw4_data', help='raw hr training images')
parser.add_argument('--mode', type=str, default='train', help='all_train, train or val')
args = parser.parse_args()
starttime = datetime.datetime.now()
if not os.path.isdir(args.save_dir):
os.makedirs(args.save_dir)
save_HR_path = os.path.join(args.save_dir, args.mode, 'HR_x3')
save_LR_path = os.path.join(args.save_dir, args.mode, 'LR_x3')
os.makedirs(save_HR_path, exist_ok=True)
os.makedirs(save_LR_path, exist_ok=True)
train_HR_dir = os.path.join(args.dataroot, args.mode)
file_list = sorted(glob(os.path.join(train_HR_dir, '*.png')))
# print(file_list)
HR_size = [100, 0.8, 0.7, 0.6, 0.5]
def save_HR_LR(img, size, path, idx):
HR_img = misc.imresize(img, size, interp='bicubic')
HR_img = modcrop(HR_img, 3)
rot180_img = misc.imrotate(HR_img, 180)
x4_img = misc.imresize(HR_img, 1 / 3, interp='bicubic')
x4_rot180_img = misc.imresize(rot180_img, 1 / 3, interp='bicubic')
img_path = path.split('/')[-1].split('.')[0] + '_rot0_' + 'ds' + str(idx) + '.png'
rot180img_path = path.split('/')[-1].split('.')[0] + '_rot180_' + 'ds' + str(idx) + '.png'
x4_img_path = path.split('/')[-1].split('.')[0] + '_rot0_' + 'ds' + str(idx) + '.png'
x4_rot180img_path = path.split('/')[-1].split('.')[0] + '_rot180_' + 'ds' + str(idx) + '.png'
misc.imsave(save_HR_path + '/' + img_path, HR_img)
misc.imsave(save_HR_path + '/' + rot180img_path, rot180_img)
misc.imsave(save_LR_path + '/' + x4_img_path, x4_img)
misc.imsave(save_LR_path + '/' + x4_rot180img_path, x4_rot180_img)
def modcrop(image, scale=3):
if len(image.shape) == 3:
h, w, _ = image.shape
h = h - np.mod(h, scale)
w = w - np.mod(w, scale)
image = image[0:h, 0:w, :]
else:
h, w = image.shape
h = h - np.mod(h, scale)
w = w - np.mod(w, scale)
image = image[0:h, 0:w]
return image
def main(path):
print('Processing-----{}/0800'.format(path.split('/')[-1].split('.')[0]))
img = imageio.imread(path)
idx = 0
for size in HR_size:
save_HR_LR(img, size, path, idx)
idx += 1
items = file_list
pool = ThreadPool()
pool.map(main, items)
pool.close()
pool.join()
endtime = datetime.datetime.now()
print((endtime - starttime).seconds)
``` |
{
"source": "joycenerd/Machine_Learning_2021",
"score": 3
} |
#### File: Machine_Learning_2021/HW5/gaussian_process.py
```python
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize
INPUT_SIZE = 34
DATA_PATH = "./data/"
BETA = 5
RESULT_PATH = "./results/"
def get_kernel(x, x_prime, theta):
# rational quadratic kernel: sigma^2*(1+(x-x_prime)^2/(2*alpha*length_scale^2))^(-alpha)
sigma, alpha, length_scale = theta
x = x.reshape(-1, 1)
x_prime = x_prime.reshape(1, -1)
divisor = 1+(x-x_prime)*(x-x_prime)
dividend = 2*alpha*length_scale**2
kernel = sigma**2*np.power(divisor/dividend, -alpha)
return kernel
def predict(test_x, train_x, train_y, theta):
kernel = get_kernel(train_x, train_x, theta) # k(x,x)
C = kernel + 1/BETA*np.eye(INPUT_SIZE)
k_x_xstar = get_kernel(train_x, test_x, theta) # k(x,x*)
k_xstar_xstar = get_kernel(test_x, test_x, theta) # k(x*,x*)
# predictive distribution
# predictive_mean=k(x,x*)^T*C^(-1)*y
pred_mean = [email protected](C)@train_y
pred_mean = pred_mean.reshape(-1)
# predictive_variance=k(x*,x*)-k(x,x*)^T*C^(-1)*k(x,x*)
pred_var = [email protected](C)@k_x_xstar
pred_var = np.sqrt(np.diag(pred_var))
return pred_mean, pred_var
def get_log_likelihood(theta, *args):
train_x, train_y = args
kernel = get_kernel(train_x, train_x, theta)
C = kernel + 1/BETA*np.eye(INPUT_SIZE)
# log(p(y|X)) = min( 0.5 * (y.T*C^(-1)*y + log(det(C)) + N*log(2*pi)))
log_likelihood = [email protected](C)@train_y+np.sum(
np.log(np.diagonal(np.linalg.cholesky(kernel))))+INPUT_SIZE*np.log(2*np.pi)
log_likelihood /= 2.0
return log_likelihood
if __name__ == "__main__":
train_x = np.zeros(INPUT_SIZE)
train_y = np.zeros(INPUT_SIZE)
data = open(DATA_PATH+"input.data")
for i, coordinate in enumerate(data):
train_x[i], train_y[i] = coordinate.strip("\n").split()
test_x = np.linspace(-60, 60, 500)
theta = np.ones(3)
pred_mean, pred_var = predict(test_x, train_x, train_y, theta)
# plot the result
plt.figure(figsize=(10, 10))
plt.scatter(train_x, train_y)
plt.plot(test_x, pred_mean)
plt.fill_between(test_x, pred_mean+2*pred_var,
pred_mean-2*pred_var, alpha=0.3)
plt.title(
f"Initial Gaussian Process sigma={theta[0]}, alpha={theta[1]}, length scale={theta[2]}")
plt.savefig(RESULT_PATH+"initial_gaussian_process.jpg")
plt.show()
# Optimize the kernel parameters
x0 = np.ones(3)
opt_param = scipy.optimize.minimize(
get_log_likelihood, args=(train_x, train_y), x0=x0, method='CG').x
pred_mean, pred_var = predict(test_x, train_x, train_y, opt_param)
# plot the result
plt.figure(figsize=(10, 10))
plt.scatter(train_x, train_y)
plt.plot(test_x, pred_mean)
plt.fill_between(test_x, pred_mean+2*pred_var,
pred_mean-2*pred_var, alpha=0.3)
plt.title(
f"Optimize Gaussian Process sigma={opt_param[0]}, opt_param={theta[1]}, length scale={opt_param[2]}")
plt.savefig(RESULT_PATH+"optimize_gaussian_process.jpg")
plt.show()
```
#### File: Machine_Learning_2021/HW6/kernel_K_means.py
```python
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
from glob import glob
import argparse
import time
import os
parser = argparse.ArgumentParser()
parser.add_argument("--clusters", type=int, default=4, help="Number of clusters")
parser.add_argument("--gamma-s", type=float, default=2.5, help="hyperparameter gamma_s in the kernel")
parser.add_argument("--gamma-c", type=float, default=2.5, help="hyperparameter gamma_c in the kernel")
parser.add_argument("--iterations", type=int, default=50, help="Maximum iteration for K-means")
parser.add_argument("--init-mode", type=str, default="k-means++", help="initialize cluster mode")
parser.add_argument("--kernel-type", type=str, default="tanh", help="kernel function type")
parser.add_argument("--kappa", type=float, default=0.25, help="kappa value for hyperbolic tangent kernal")
parser.add_argument("--c", type=float, default=0.3, help="constant value for hyperbolic tangent kernel")
args = parser.parse_args()
print("".join(f"{k}={v}\n" for k, v in vars(args).items()))
DATA_PATH = "./data/"
SAVE_PATH = "./results/"
def get_kernel(img, h, w):
img = img.reshape(h * w, 3)
img = img / 255.0
coor = []
for i in range(w):
for j in range(h):
coor.append([i, j])
coor = np.array(coor, dtype=float)
coor = coor / 100.0
if args.kernel_type == "rbf":
pix_dist = cdist(img, img, "sqeuclidean")
spatial_dist = cdist(coor, coor, "sqeuclidean")
# e^-gamma_s*spatial_dist x e^-gamma_c*color_dist
g_s = args.gamma_s
g_c = args.gamma_c
gram_matrix = np.multiply(np.exp(-g_s * spatial_dist), np.exp(-g_c * pix_dist))
elif args.kernel_type == "tanh":
kappa = args.kappa
c = args.c
# tanh(kappa*xi*xj+c)
pix_dist = np.tanh(kappa * img @ img.T + c)
spatial_dist = np.tanh(kappa * coor @ coor.T + c)
gram_matrix = np.multiply(pix_dist, spatial_dist)
return gram_matrix
def init_cluster(h, w, img):
if args.init_mode == "random":
cluster = np.random.randint(args.clusters, size=h * w)
elif args.init_mode == "nearest_neighbor":
coor = []
for i in range(w):
for j in range(h):
coor.append([i, j])
coor = np.array(coor, dtype=float)
coor = coor / 100.0
center = np.random.choice(h * w, size=args.clusters)
center_idx = coor[center]
dist = cdist(center_idx, coor, metric="sqeuclidean")
cluster = np.argmin(dist, axis=0)
elif args.init_mode == "k-means++":
# 1. Choose one center uniformly at random among the data points.
# 2. For each data point x not chosen yet, compute D(x), the distance between x and the nearest center that has already been chosen.
# 3. Choose one new data point at random as a new center, using a weighted probability distribution where a point x is chosen with probability proportional to D(x)2.
# 4. Repeat Steps 2 and 3 until k centers have been chosen.
img = img.reshape(-1, 3)
img = img / 255.0
first_mean = np.random.choice(h * w, size=1)
center = np.full(args.clusters, first_mean, dtype=int)
center_val = img[center]
for i in range(1, args.clusters):
dist = cdist(center_val, img, metric="sqeuclidean")
min_dist = np.min(dist, axis=0)
center[i] = np.random.choice(h * w, size=1, p=min_dist ** 2 / np.sum(min_dist ** 2))
center_val = img[center]
dist = cdist(center_val, img, metric="sqeuclidean")
cluster = np.argmin(dist, axis=0)
return cluster
def run(h, w, gram_matrix, img):
K = args.clusters
all_alpha = []
# initialize the clusters
alpha = init_cluster(h, w, img)
all_alpha.append(alpha.reshape(h, w))
# Kernel K-means
for iter in range(1, args.iterations + 1):
first_term = np.diag(gram_matrix).reshape(-1, 1)
# 2/|C_k|*sum_n(alpha_kn*k(xj,xn))
C = np.zeros(K, dtype=float)
for i in range(K):
C[i] = np.count_nonzero(alpha == i)
if C[i] == 0:
C[i] = 1
second_term = np.zeros((h * w, K), dtype=float)
for k in range(K):
second_term[:, k] = np.sum(gram_matrix[:, alpha == k], axis=1)
second_term *= -2.0 / C
# 1/|C_k|^2 alpha_kp*alpha_kq*k(xp,xq)
third_term = np.zeros(K, dtype=float)
for k in range(K):
third_term[k] = np.sum(gram_matrix[alpha == k, :][:, alpha == k])
third_term = third_term / (C ** 2)
new_alpha = np.argmin(first_term + second_term + third_term, axis=1)
all_alpha.append(new_alpha)
if np.array_equal(alpha, new_alpha):
print(f"Converge in {iter}th iterations!")
break
alpha = new_alpha
# print(f"Iteration #{iter} complete...")
return all_alpha
def plot_result(all_alpha, img_name):
save_dir = SAVE_PATH + img_name
if not os.path.isdir(save_dir):
os.mkdir(save_dir)
color = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 1, 0]], dtype=float)
imgs = []
for i in range(len(all_alpha)):
out_img = color[all_alpha[i]]
out_img = out_img.reshape((h, w, 3))
plt.imsave(f"{save_dir}/{img_name}_{i}.png", out_img)
imgs.append(Image.fromarray(np.uint8(out_img * 255)))
video_path = SAVE_PATH + "video/" + img_name + ".gif"
imgs[0].save(video_path, format="GIF", append_images=imgs[1:], loop=0, save_all=True, duration=300)
if __name__ == "__main__":
start_time = time.time()
for img_path in glob(DATA_PATH + "*"):
print(f"Start processing {img_path}")
img = Image.open(img_path)
img_path = os.path.normpath(img_path)
path_list = img_path.split(os.sep)
img_name = path_list[-1][:-4]
img = np.array(img)
h, w, _ = img.shape
gram_matrix = get_kernel(img, h, w)
print("Get gram matrix complete...")
all_alpha = run(h, w, gram_matrix, img)
img_name += f"_k{args.clusters}_{args.init_mode}_{args.kernel_type}"
plot_result(all_alpha, img_name)
print("Plotting complete...")
print(f"--- {time.time()-start_time} seconds ---")
```
#### File: Machine_Learning_2021/HW6/spectral_clustering.py
```python
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
from numpy.linalg import eig
from PIL import Image
import numpy as np
import argparse
import glob
import time
import os
parser = argparse.ArgumentParser()
parser.add_argument("--kernel-type", type=str,
default="rbf", help="kernel function type")
parser.add_argument("--gamma-s", type=float, default=2.5,
help="hyperparameter gamma_s in the rbf kernel")
parser.add_argument("--gamma-c", type=float, default=2.5,
help="hyperparameter gamma_c in the rbf kernel")
parser.add_argument("--sigma", type=float, default=0.1,
help="Sigma value for Laplace rbf kernel")
parser.add_argument("--cut", type=str, default="normalized",
help="ratio or normalized cut")
parser.add_argument("--K", type=int, default=2, help="number of clusters")
parser.add_argument("--init-mode", type=str, default="k-means++",
help="initialize cluster mode")
parser.add_argument("--iterations", type=str, default=50,
help="Maximum iterations for K-means to run")
args = parser.parse_args()
print("".join(f"{k}={v}\n" for k, v in vars(args).items()))
DATA_PATH = "./data/"
SAVE_PATH = "./results/"
def get_kernel(img, h, w):
img = img.reshape(h * w, 3)
img = img / 255.0
coor = []
for i in range(w):
for j in range(h):
coor.append([i, j])
coor = np.array(coor, dtype=float)
coor = coor / 100.0
if args.kernel_type == "rbf":
pix_dist = cdist(img, img, "sqeuclidean")
spatial_dist = cdist(coor, coor, "sqeuclidean")
# e^-gamma_s*spatial_dist x e^-gamma_c*color_dist
g_s = args.gamma_s
g_c = args.gamma_c
gram_matrix = np.multiply(
np.exp(-g_s * spatial_dist), np.exp(-g_c * pix_dist))
elif args.kernel_type == "Laplace_rbf":
sigma = args.sigma
pix_dist = cdist(img, img, metric="minkowski", p=1)
spatial_dist = cdist(coor, coor, metric="minkowski", p=1)
gram_matrix = np.multiply(
np.exp(-1 / sigma * spatial_dist), np.exp(-1/sigma * pix_dist))
return gram_matrix
def get_img_name(img_path):
img_path = os.path.normpath(img_path)
path_list = img_path.split(os.sep)
img_name = path_list[-1][:-4]
return img_name
def get_graph_Laplacian(W):
cut_type = args.cut
d = np.sum(W, axis=1)
D = np.diag(d) # degree matrix D=[dii]
if cut_type == "ratio":
L = D - W
elif cut_type == "normalized":
L = np.sqrt(D) @ (D - W) @ np.sqrt(D)
return L
def eigen_decomposition(img_name, L):
cut = args.cut
K = args.K
kernel_type = args.kernel_type
eigval_f = DATA_PATH + f"eigval_{img_name}_{cut}_{kernel_type}.npy"
eigvec_f = DATA_PATH + f"eigvec_{img_name}_{cut}_{kernel_type}.npy"
if os.path.exists(eigval_f):
eigval = np.load(eigval_f)
eigvec = np.load(eigvec_f)
else:
eigval, eigvec = eig(L)
np.save(eigval_f, eigval)
np.save(eigvec_f, eigvec)
order = np.argsort(eigval)
sorted_eigvec = eigvec[:, order]
U = sorted_eigvec[:, 1: K + 1]
T = U.copy()
if cut == "normalized":
for i, u in enumerate(U):
T[i, :] = u / np.sqrt(np.sum(u ** 2))
return T
def init_cluster(data, img):
K = args.K
mode = args.init_mode
if mode == "random":
rand_idx = np.random.choice(data.shape[0], size=K)
mean = data[rand_idx]
dist = cdist(mean, data, metric="sqeuclidean")
cluster = np.argmin(dist, axis=0)
elif mode == "k-means++":
# 1. Choose one center uniformly at random among the data points.
# 2. For each data point x not chosen yet, compute D(x), the distance between x and the nearest center that has already been chosen.
# 3. Choose one new data point at random as a new center, using a weighted probability distribution where a point x is chosen with probability proportional to D(x)2.
# 4. Repeat Steps 2 and 3 until k centers have been chosen.
img = img.reshape(-1, 3)
img = img / 255.0
first_mean = np.random.choice(h * w, size=1)
center = np.full(K, first_mean, dtype=int)
center_val = img[center]
for i in range(1, K):
dist = cdist(center_val, img, metric="sqeuclidean")
min_dist = np.min(dist, axis=0)
center[i] = np.random.choice(
h * w, size=1, p=min_dist ** 2 / np.sum(min_dist ** 2))
center_val = img[center]
dist = cdist(center_val, img, metric="sqeuclidean")
cluster = np.argmin(dist, axis=0)
return cluster
def run(data, h, w, img):
iterations = args.iterations
K = args.K
all_alpha = []
alpha = init_cluster(data, img)
all_alpha.append(alpha.reshape(h, w))
for iter in range(iterations):
cnt = np.zeros(K, dtype=float)
for i in range(K):
cnt[i] = np.count_nonzero(alpha == i)
if cnt[i] == 0:
cnt[i] = 1
mean = np.zeros((K, K), dtype=float)
for i in range(K):
mean[i] = np.sum(data[alpha == i, :], axis=0)
mean[i] = mean[i]/cnt[i]
dist = cdist(mean, data, metric="sqeuclidean")
new_alpha = np.argmin(dist, axis=0)
all_alpha.append(new_alpha.reshape(h, w))
if np.array_equal(alpha, new_alpha):
print(f"Converge in {iter+1}th iterations!")
break
alpha = new_alpha
all_alpha = np.array(all_alpha)
return all_alpha
def plot_result(all_alpha, img_name, data):
K = args.K
mode = args.init_mode
kernel_type = args.kernel_type
cut = args.cut
img_name += f"_{cut}_k{K}_{kernel_type}_{mode}"
# export video .gif
save_dir = SAVE_PATH + img_name
if not os.path.isdir(save_dir):
os.mkdir(save_dir)
color = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 1, 0]], dtype=float)
imgs = []
for i in range(len(all_alpha)):
out_img = color[all_alpha[i]]
out_img = out_img.reshape((h, w, 3))
plt.imsave(f"{save_dir}/{img_name}_{i}.png", out_img)
imgs.append(Image.fromarray(np.uint8(out_img * 255)))
video_path = SAVE_PATH + "spectral_video/" + img_name + ".gif"
imgs[0].save(video_path, format="GIF", append_images=imgs[1:],
loop=0, save_all=True, duration=300)
# plot eigenspace
alpha = all_alpha[-1]
alpha = np.array(alpha)
alpha = alpha.reshape(-1)
if K == 2:
plt.figure(figsize=(10, 10))
plt.scatter(data[alpha == 0, 0], data[alpha == 0, 1], c='yellow')
plt.scatter(data[alpha == 1, 0], data[alpha == 1, 1], c='blue')
plt.title(f"Eigendspace {cut} K={K} {kernel_type} {mode}")
eigen_path = SAVE_PATH+"eigenspace/"+img_name+".png"
plt.savefig(eigen_path)
plt.show()
if __name__ == "__main__":
start_time = time.time()
for img_path in glob.glob(DATA_PATH + "*.png"):
img_name = get_img_name(img_path)
img = Image.open(img_path, "r")
img = np.array(img)
h, w, _ = img.shape
W = get_kernel(img, h, w)
L = get_graph_Laplacian(W)
T = eigen_decomposition(img_name, L)
all_alpha = run(T, h, w, img)
plot_result(all_alpha, img_name, T)
print(f"--- {time.time()-start_time} seconds ---")
```
#### File: Machine_Learning_2021/HW7/kernel_eigenface.py
```python
from scipy.spatial.distance import cdist
from numpy.linalg import eig, norm, pinv
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
import argparse
import ntpath
import glob
import os
parser = argparse.ArgumentParser()
parser.add_argument("--option", type=str, default="PCA",
help="Choose which task to do: [PCA, LDA]")
parser.add_argument("--img-size", type=int, default=50,
help="image resize shape")
parser.add_argument("--kernel-type", type=str, default="linear",
help="kernel type for PCA/LDA: [linear, polynomial, rbf]")
parser.add_argument("--gamma", type=float, default=1,
help="gamma value for polynomial or rbf kernel")
parser.add_argument("--coeff", type=int, default=2,
help="coeff value for polynomial kernel")
parser.add_argument("--degree", type=int, default=20,
help="degree value for polynomial kernel")
args = parser.parse_args()
DATA_PATH = "./Yale_Face_Database/"
SAVE_PATH = "./results/"
def read_data(data_path):
img_size = args.img_size
data = []
filepath = []
label = []
for file in glob.glob(data_path+"*"):
# file path (135,)
filepath.append(file)
# data (135,10000)
image = Image.open(file)
image = image.resize((img_size, img_size), Image.ANTIALIAS)
image = np.array(image)
data.append(image.ravel())
# label (135,)
_, tail = ntpath.split(file)
label.append(int(tail[7:9]))
return np.array(data), filepath, np.array(label)
def get_eig(data, method, kernel_type="none"):
# get eigenvalue and eigenvector by np.linalg.eig()
eigval, eigvec = eig(data)
# sort by decreasing order of eigenvalues
idx = eigval.argsort()[::-1]
eigval = eigval[idx]
eigvec = eigvec[:, idx]
return eigval, eigvec
def get_kernel(X):
kernel_type = args.kernel_type
gamma = args.gamma
coeff = args.coeff
degree = args.degree
if kernel_type == "linear":
kernel = [email protected]
elif kernel_type == "polynomial":
kernel = np.power(gamma*([email protected])+coeff, degree)
elif kernel_type == "rbf":
kernel = np.exp(-gamma*cdist(X, X, metric="sqeuclidean"))
return kernel
def pca(x, kernel_type=None, kernel=None):
if kernel_type == None:
x_bar = np.mean(x, axis=0)
cov = (x-x_bar)@(x-x_bar).T
eigval, eigvec = get_eig(cov, "pca")
# project data
eigvec = (x-x_bar).T@eigvec
else:
x_bar = 0
# cetralize the kernel
n = kernel.shape[0]
one = np.ones((n, n), dtype=float)
one *= 1.0/n
kernel = kernel - one @ kernel - kernel @ one + one @ kernel @ one
eigval, eigvec = get_eig(kernel, "pca", kernel_type)
for i in range(eigvec.shape[1]):
eigvec[:, i] *= 1/norm(eigvec[:, i], 1)
# get the top 25 eigenvectors
W = eigvec[:, :25].real
return x_bar, W
def draw_eigenface(W, name):
img_size = args.img_size
# save eigenface in 5x5 grid
for i in range(5):
for j in range(5):
idx = i * 5 + j
plt.subplot(5, 5, idx + 1)
plt.imshow(W[:, idx].reshape((img_size, img_size)), cmap='gray')
plt.axis('off')
plt.savefig(SAVE_PATH+name+".jpg")
def lda(X, label, kernel_type="none", dims=25):
(n, d) = X.shape
label = np.asarray(label)
c = np.unique(label)
mu = np.mean(X, axis=0)
S_w = np.zeros((d, d), dtype=np.float64)
S_b = np.zeros((d, d), dtype=np.float64)
# Sw=(xi-mj)*(xi-mj)^T
# Sb=nj*(mj-m)*(mj-m)^T
for i in c:
X_i = X[np.where(label == i)[0], :]
mu_i = np.mean(X_i, axis=0)
S_w += (X_i - mu_i).T @ (X_i - mu_i)
S_b += X_i.shape[0] * ((mu_i - mu).T @ (mu_i - mu))
# get eigenvalues and eigenvectors
S = pinv(S_w) @ S_b
eigen_val, eigen_vec = get_eig(S, "lda", kernel_type)
for i in range(eigen_vec.shape[1]):
eigen_vec[:, i] = eigen_vec[:, i] / norm(eigen_vec[:, i])
W = eigen_vec[:, :25].real
return W
def reconstruct(data, W, method, m=None):
img_size = args.img_size
if method == "pca":
reconstruction = (data-m)@[email protected]+m
elif method == "lda":
reconstruction = <EMAIL>
idx = 1
for i in range(2):
for j in range(5):
plt.subplot(2, 5, idx)
plt.imshow(reconstruction[idx-1, :].reshape(
(img_size, img_size)), cmap='gray')
plt.axis('off')
idx += 1
plt.savefig(SAVE_PATH+method+"_reconstruction"+".jpg")
def face_recognition(train_data, train_label, test_data, test_label):
num_of_train = train_label.shape[0]
num_of_test = test_label.shape[0]
dist_mat = np.zeros((num_of_test, num_of_train), dtype=float)
# calculate distance
for i in range(num_of_test):
dist = np.zeros(num_of_train, dtype=float)
for j in range(num_of_train):
dist[j] = np.sum((test_data[i, :]-train_data[j, :])**2)
dist = np.argsort(dist)
dist_mat[i, :] = label[dist]
# KNN
K = [1, 3, 5, 7, 9, 11]
best_acc = 0.0
for k in K:
correct = 0.0
for i in range(num_of_test):
dist = dist_mat[i, :]
dist = dist[:k]
val, cnt = np.unique(dist, return_counts=True)
most_cnt = np.argmax(cnt)
pred = val[most_cnt]
if pred == test_label[i]:
correct += 1
acc = correct/num_of_test
print(f"Face recognition accuracy when K={k}: {acc:.4}")
if acc > best_acc:
best_acc = acc
best_K = k
print(f"Best K: {best_K}\tBest accuracy: {best_acc:.4}")
def project(train_data, test_data, W, m=0):
# data dimensionality reductionn
option = args.option
if option == "PCA":
train_proj = (train_data-m)@W
test_proj = (test_data-m)@W
elif option == "LDA":
train_proj = train_data@W
test_proj = test_data@W
return train_proj, test_proj
if __name__ == "__main__":
option = args.option
kernel_type = args.kernel_type
# read training and testing data
train_data, train_filepath, train_label = read_data(DATA_PATH+"Training/")
test_data, test_filepath, test_label = read_data(DATA_PATH+"Testing/")
data = np.vstack((train_data, test_data)) # (165,10000)
filepath = np.hstack((train_filepath, test_filepath)) # (165,)
label = np.hstack((train_label, test_label)) # (165,)
num_of_data = label.shape[0]
print(f"Num of data: {num_of_data}")
if option == "PCA":
rand_idx = np.random.randint(num_of_data, size=10)
samples = data[rand_idx, :] # (10,10000)
x_bar, W = pca(data)
draw_eigenface(W, "eigenface")
print("eigenface completed...")
reconstruct(samples, W, "pca", x_bar)
print("reconstruction completed...")
train_proj, test_proj = project(train_data, test_data, W, x_bar)
face_recognition(train_proj, train_label, test_proj, test_label)
print("pca face recognition completed...\n")
# python kernel_eigenface.py --option PCA --kernel-type polynomial --gamma 5 --coeff 1 --degree 2
# python kernel_eigenface.py --option PCA --kernel-type rbf --gamma 1e-7
kernel = get_kernel(data)
_, W = pca(data, kernel_type, kernel)
train_kernel = kernel[:train_label.shape[0], :]
test_kernel = kernel[train_label.shape[0]:, :]
train_proj, test_proj = project(train_kernel, test_kernel, W)
face_recognition(train_proj, train_label, test_proj, test_label)
print(
f"kernel pca with {kernel_type} kernel face recognition completed...")
if option == "LDA":
rand_idx = np.random.randint(num_of_data, size=10)
samples = data[rand_idx, :] # (10,10000)
W = lda(data, label)
draw_eigenface(W, "fisherface")
print("fisherface completed...")
reconstruct(samples, W, "lda")
print("reconstruction completed...")
train_proj, test_proj = project(train_data, test_data, W)
face_recognition(train_proj, train_label, test_proj, test_label)
print("lda face recognition completed...\n")
# python kernel_eigenface.py --option LDA --kernel-type polynomial --gamma 1 --coeff 2 --degree 20
# python kernel_eigenface.py --option PCA --kernel-type rbf --gamma 1e-4
kernel = get_kernel(data.T)
W = lda(kernel, kernel_type)
train_kernel = kernel[:train_label.shape[0], :]
test_kernel = kernel[train_label.shape[0]:, :]
train_proj, test_proj = project(train_kernel, test_kernel, W)
face_recognition(train_proj, train_label, test_proj, test_label)
print(
f"kernel lda with {kernel_type} kernel face recognition completed...")
``` |
{
"source": "joycenerd/ML_2020",
"score": 2
} |
#### File: ML_2020/project_1/opt.py
```python
import argparse
def parse_args():
parser=argparse.ArgumentParser()
parser.add_argument('--cuda_devices', type=int, default=0, help='gpu device number')
return parser.parse_args()
``` |
{
"source": "joycenerd/MPO_Reimplementation",
"score": 3
} |
#### File: MPO_Reimplementation/my_mpo/actor.py
```python
import torch.nn.functional as F
import torch.nn as nn
import torch
from torch.distributions import MultivariateNormal
from torch.distributions import Categorical
class Actor(nn.Module):
"""
Policy network
:param env: OpenAI gym environment
"""
def __init__(self, env):
super(Actor, self).__init__()
self.env = env
self.ds = env.observation_space.shape[0]
self.da = env.action_space.shape[0]
self.lin1 = nn.Linear(self.ds, 256)
self.lin2 = nn.Linear(256, 256)
self.mean_layer = nn.Linear(256, self.da)
self.cholesky_layer = nn.Linear(256, (self.da * (self.da + 1)) // 2)
def forward(self, state):
"""
forwards input through the network
:param state: (B, ds)
:return: mean vector (B, da) and cholesky factorization of covariance matrix (B, da, da)
"""
device = state.device
B = state.size(0)
ds = self.ds
da = self.da
action_low = torch.from_numpy(self.env.action_space.low)[None, ...].to(device) # (1, da)
action_high = torch.from_numpy(self.env.action_space.high)[None, ...].to(device) # (1, da)
x = F.relu(self.lin1(state))
x = F.relu(self.lin2(x))
mean = torch.sigmoid(self.mean_layer(x)) # (B, da)
mean = action_low + (action_high - action_low) * mean
cholesky_vector = self.cholesky_layer(x) # (B, (da*(da+1))//2)
cholesky_diag_index = torch.arange(da, dtype=torch.long) + 1
cholesky_diag_index = (cholesky_diag_index * (cholesky_diag_index + 1)) // 2 - 1
cholesky_vector[:, cholesky_diag_index] = F.softplus(cholesky_vector[:, cholesky_diag_index])
tril_indices = torch.tril_indices(row=da, col=da, offset=0)
cholesky = torch.zeros(size=(B, da, da), dtype=torch.float32).to(device)
cholesky[:, tril_indices[0], tril_indices[1]] = cholesky_vector
return mean, cholesky
def action(self, state):
"""
:param state: (ds,)
:return: an action
"""
with torch.no_grad():
mean, cholesky = self.forward(state[None, ...])
action_distribution = MultivariateNormal(mean, scale_tril=cholesky)
action = action_distribution.sample()
return action[0]
``` |
{
"source": "joycenerd/Reinforcement_Learning_2021",
"score": 2
} |
#### File: MPO/mpo/mpo.py
```python
import os
from time import sleep
import numpy as np
from scipy.optimize import minimize
from tqdm import tqdm
import gym
import torch
import torch.nn as nn
from torch.nn.utils import clip_grad_norm_
from torch.distributions import MultivariateNormal, Categorical
from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler
from tensorboardX import SummaryWriter
from mpo.actor import ActorContinuous, ActorDiscrete
from mpo.critic import CriticContinuous, CriticDiscrete
from mpo.replaybuffer import ReplayBuffer
import matplotlib.pyplot as plt
from matplotlib import animation
def bt(m):
return m.transpose(dim0=-2, dim1=-1)
def btr(m):
return m.diagonal(dim1=-2, dim2=-1).sum(-1)
def gaussian_kl(ฮผi, ฮผ, Ai, A):
"""
decoupled KL between two multivariate gaussian distribution
C_ฮผ = KL(f(x|ฮผi,ฮฃi)||f(x|ฮผ,ฮฃi))
C_ฮฃ = KL(f(x|ฮผi,ฮฃi)||f(x|ฮผi,ฮฃ))
:param ฮผi: (B, n)
:param ฮผ: (B, n)
:param Ai: (B, n, n)
:param A: (B, n, n)
:return: C_ฮผ, C_ฮฃ: scalar
mean and covariance terms of the KL
:return: mean of determinanats of ฮฃi, ฮฃ
ref : https://stanford.edu/~jduchi/projects/general_notes.pdf page.13
"""
n = A.size(-1)
ฮผi = ฮผi.unsqueeze(-1) # (B, n, 1)
ฮผ = ฮผ.unsqueeze(-1) # (B, n, 1)
ฮฃi = Ai @ bt(Ai) # (B, n, n)
ฮฃ = A @ bt(A) # (B, n, n)
ฮฃi_det = ฮฃi.det() # (B,)
ฮฃ_det = ฮฃ.det() # (B,)
# determinant can be minus due to numerical calculation error
# https://github.com/daisatojp/mpo/issues/11
ฮฃi_det = torch.clamp_min(ฮฃi_det, 1e-6)
ฮฃ_det = torch.clamp_min(ฮฃ_det, 1e-6)
ฮฃi_inv = ฮฃi.inverse() # (B, n, n)
ฮฃ_inv = ฮฃ.inverse() # (B, n, n)
inner_ฮผ = ((ฮผ - ฮผi).transpose(-2, -1) @ ฮฃi_inv @ (ฮผ - ฮผi)).squeeze() # (B,)
inner_ฮฃ = torch.log(ฮฃ_det / ฮฃi_det) - n + btr(ฮฃ_inv @ ฮฃi) # (B,)
C_ฮผ = 0.5 * torch.mean(inner_ฮผ)
C_ฮฃ = 0.5 * torch.mean(inner_ฮฃ)
return C_ฮผ, C_ฮฃ, torch.mean(ฮฃi_det), torch.mean(ฮฃ_det)
def categorical_kl(p1, p2):
"""
calculates KL between two Categorical distributions
:param p1: (B, D)
:param p2: (B, D)
"""
p1 = torch.clamp_min(p1, 0.0001) # actually no need to clamp
p2 = torch.clamp_min(p2, 0.0001) # avoid zero division
return torch.mean((p1 * torch.log(p1 / p2)).sum(dim=-1))
def save_frames_as_gif(frames, path='./', filename='gym_animation.gif'):
#Mess with this to change frame size
plt.figure(figsize=(frames[0].shape[1] / 72.0, frames[0].shape[0] / 72.0), dpi=72)
patch = plt.imshow(frames[0])
plt.axis('off')
def animate(i):
patch.set_data(frames[i])
anim = animation.FuncAnimation(plt.gcf(), animate, frames = len(frames), interval=50)
anim.save(path + filename, writer='imagemagick', fps=60)
class MPO(object):
"""
Maximum A Posteriori Policy Optimization (MPO)
:param device:
:param env: gym environment
:param dual_constraint:
(float) hard constraint of the dual formulation in the E-step
correspond to [2] p.4 ฮต
:param kl_mean_constraint:
(float) hard constraint of the mean in the M-step
correspond to [2] p.6 ฮต_ฮผ for continuous action space
:param kl_var_constraint:
(float) hard constraint of the covariance in the M-step
correspond to [2] p.6 ฮต_ฮฃ for continuous action space
:param kl_constraint:
(float) hard constraint in the M-step
correspond to [2] p.6 ฮต_ฯ for discrete action space
:param discount_factor: (float) discount factor used in Policy Evaluation
:param alpha_scale: (float) scaling factor of the lagrangian multiplier in the M-step
:param sample_episode_num: the number of sampled episodes
:param sample_episode_maxstep: maximum sample steps of an episode
:param sample_action_num:
:param batch_size: (int) size of the sampled mini-batch
:param episode_rerun_num:
:param mstep_iteration_num: (int) the number of iterations of the M-step
:param evaluate_episode_maxstep: maximum evaluate steps of an episode
[1] https://arxiv.org/pdf/1806.06920.pdf
[2] https://arxiv.org/pdf/1812.02256.pdf
"""
def __init__(self,
device,
env,
log_dir,
dual_constraint=0.1,
kl_mean_constraint=0.01,
kl_var_constraint=0.0001,
kl_constraint=0.01,
discount_factor=0.99,
alpha_mean_scale=1.0,
alpha_var_scale=100.0,
alpha_scale=10.0,
alpha_mean_max=0.1,
alpha_var_max=10.0,
alpha_max=1.0,
sample_episode_num=30,
sample_episode_maxstep=200,
sample_action_num=64,
batch_size=256,
episode_rerun_num=3,
mstep_iteration_num=5,
evaluate_period=10,
evaluate_episode_num=100,
evaluate_episode_maxstep=200):
self.device = device
self.env = env
self.log_dir = log_dir
if self.env.action_space.dtype == np.float32:
self.continuous_action_space = True
else: # discrete action space
self.continuous_action_space = False
# the number of dimensions of state space
self.ds = env.observation_space.shape[0]
# the number of dimensions of action space
if self.continuous_action_space:
self.da = env.action_space.shape[0]
else: # discrete action space
self.da = env.action_space.n
self.ฮต_dual = dual_constraint
self.ฮต_kl_ฮผ = kl_mean_constraint
self.ฮต_kl_ฮฃ = kl_var_constraint
self.ฮต_kl = kl_constraint
self.ฮณ = discount_factor
self.ฮฑ_ฮผ_scale = alpha_mean_scale
self.ฮฑ_ฮฃ_scale = alpha_var_scale
self.ฮฑ_scale = alpha_scale
self.ฮฑ_ฮผ_max = alpha_mean_max
self.ฮฑ_ฮฃ_max = alpha_var_max
self.ฮฑ_max = alpha_max
self.sample_episode_num = sample_episode_num
self.sample_episode_maxstep = sample_episode_maxstep
self.sample_action_num = sample_action_num
self.batch_size = batch_size
self.episode_rerun_num = episode_rerun_num
self.mstep_iteration_num = mstep_iteration_num
self.evaluate_period = evaluate_period
self.evaluate_episode_num = evaluate_episode_num
self.evaluate_episode_maxstep = evaluate_episode_maxstep
if not self.continuous_action_space:
self.A_eye = torch.eye(self.da).to(self.device)
if self.continuous_action_space:
self.actor = ActorContinuous(env).to(self.device)
self.critic = CriticContinuous(env).to(self.device)
self.target_actor = ActorContinuous(env).to(self.device)
self.target_critic = CriticContinuous(env).to(self.device)
else: # discrete action space
self.actor = ActorDiscrete(env).to(self.device)
self.critic = CriticDiscrete(env).to(self.device)
self.target_actor = ActorDiscrete(env).to(self.device)
self.target_critic = CriticDiscrete(env).to(self.device)
for target_param, param in zip(self.target_actor.parameters(), self.actor.parameters()):
target_param.data.copy_(param.data)
target_param.requires_grad = False
for target_param, param in zip(self.target_critic.parameters(), self.critic.parameters()):
target_param.data.copy_(param.data)
target_param.requires_grad = False
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=5e-4)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=1e-3)
self.norm_loss_q = nn.MSELoss()
self.ฮท = np.random.rand()
self.ฮฑ_ฮผ = 0.0 # lagrangian multiplier for continuous action space in the M-step
self.ฮฑ_ฮฃ = 0.0 # lagrangian multiplier for continuous action space in the M-step
self.ฮฑ = 0.0 # lagrangian multiplier for discrete action space in the M-step
self.replaybuffer = ReplayBuffer()
self.max_return_eval = -np.inf
self.iteration = 1
self.render = False
def train(self,
iteration_num=1000,
log_dir='log',
model_save_period=10,
render=False):
"""
:param iteration_num:
:param log_dir:
:param model_save_period:
:param render:
"""
self.render = render
log_dir = self.log_dir
# model_save_dir = os.path.join(log_dir, 'model')
model_save_dir = "checkpoints"
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
writer = SummaryWriter(os.path.join("runs", log_dir))
for it in range(self.iteration, iteration_num + 1):
self.__sample_trajectory(self.sample_episode_num)
buff_sz = len(self.replaybuffer)
mean_reward = self.replaybuffer.mean_reward()
mean_return = self.replaybuffer.mean_return()
mean_loss_q = []
mean_loss_p = []
mean_loss_l = []
mean_est_q = []
max_kl_ฮผ = []
max_kl_ฮฃ = []
max_kl = []
mean_ฮฃ_det = []
for r in range(self.episode_rerun_num):
for indices in tqdm(
BatchSampler(
SubsetRandomSampler(range(buff_sz)), self.batch_size, drop_last=True),
desc='training {}/{}'.format(r + 1, self.episode_rerun_num)):
K = len(indices) # the sample number of states
N = self.sample_action_num # the sample number of actions per state
ds = self.ds # the number of state space dimensions
da = self.da # the number of action space dimensions
state_batch, action_batch, next_state_batch, reward_batch = zip(
*[self.replaybuffer[index] for index in indices])
state_batch = torch.from_numpy(np.stack(state_batch)).type(torch.float32).to(self.device) # (K, ds)
action_batch = torch.from_numpy(np.stack(action_batch)).type(torch.float32).to(
self.device) # (K, da) or (K,)
next_state_batch = torch.from_numpy(np.stack(next_state_batch)).type(torch.float32).to(
self.device) # (K, ds)
reward_batch = torch.from_numpy(np.stack(reward_batch)).type(torch.float32).to(self.device) # (K,)
# Policy Evaluation
# [2] 3 Policy Evaluation (Step 1)
loss_q, q = self.__update_critic_td(
state_batch=state_batch,
action_batch=action_batch,
next_state_batch=next_state_batch,
reward_batch=reward_batch,
sample_num=self.sample_action_num
)
mean_loss_q.append(loss_q.item())
mean_est_q.append(q.abs().mean().item())
# E-Step of Policy Improvement
# [2] 4.1 Finding action weights (Step 2)
with torch.no_grad():
if self.continuous_action_space:
# sample N actions per state
b_ฮผ, b_A = self.target_actor.forward(state_batch) # (K,)
b = MultivariateNormal(b_ฮผ, scale_tril=b_A) # (K,)
sampled_actions = b.sample((N,)) # (N, K, da)
expanded_states = state_batch[None, ...].expand(N, -1, -1) # (N, K, ds)
target_q = self.target_critic.forward(
expanded_states.reshape(-1, ds), # (N * K, ds)
sampled_actions.reshape(-1, da) # (N * K, da)
).reshape(N, K) # (N, K)
target_q_np = target_q.cpu().transpose(0, 1).numpy() # (K, N)
else: # discrete action spaces
# sample da actions per state
# Because of discrete action space, we can cover the all actions per state.
actions = torch.arange(da)[..., None].expand(da, K).to(self.device) # (da, K)
b_p = self.target_actor.forward(state_batch) # (K, da)
b = Categorical(probs=b_p) # (K,)
b_prob = b.expand((da, K)).log_prob(actions).exp() # (da, K)
expanded_actions = self.A_eye[None, ...].expand(K, -1, -1) # (K, da, da)
expanded_states = state_batch.reshape(K, 1, ds).expand((K, da, ds)) # (K, da, ds)
target_q = (
self.target_critic.forward(
expanded_states.reshape(-1, ds), # (K * da, ds)
expanded_actions.reshape(-1, da) # (K * da, da)
).reshape(K, da) # (K, da)
).transpose(0, 1) # (da, K)
b_prob_np = b_prob.cpu().transpose(0, 1).numpy() # (K, da)
target_q_np = target_q.cpu().transpose(0, 1).numpy() # (K, da)
# https://arxiv.org/pdf/1812.02256.pdf
# [2] 4.1 Finding action weights (Step 2)
# Using an exponential transformation of the Q-values
if self.continuous_action_space:
def dual(ฮท):
"""
dual function of the non-parametric variational
Q = target_q_np (K, N)
g(ฮท) = ฮท*ฮต + ฮท*mean(log(mean(exp(Q(s, a)/ฮท), along=a)), along=s)
For numerical stabilization, this can be modified to
Qj = max(Q(s, a), along=a)
g(ฮท) = ฮท*ฮต + mean(Qj, along=j) + ฮท*mean(log(mean(exp((Q(s, a)-Qj)/ฮท), along=a)), along=s)
"""
max_q = np.max(target_q_np, 1)
return ฮท * self.ฮต_dual + np.mean(max_q) \
+ ฮท * np.mean(np.log(np.mean(np.exp((target_q_np - max_q[:, None]) / ฮท), axis=1)))
else: # discrete action space
def dual(ฮท):
"""
dual function of the non-parametric variational
g(ฮท) = ฮท*ฮต + ฮท*mean(log(sum(ฯ(a|s)*exp(Q(s, a)/ฮท))))
We have to multiply ฯ by exp because this is expectation.
This equation is correspond to last equation of the [2] p.15
For numerical stabilization, this can be modified to
Qj = max(Q(s, a), along=a)
g(ฮท) = ฮท*ฮต + mean(Qj, along=j) + ฮท*mean(log(sum(ฯ(a|s)*(exp(Q(s, a)-Qj)/ฮท))))
"""
max_q = np.max(target_q_np, 1)
return ฮท * self.ฮต_dual + np.mean(max_q) \
+ ฮท * np.mean(np.log(np.sum(
b_prob_np * np.exp((target_q_np - max_q[:, None]) / ฮท), axis=1)))
bounds = [(1e-6, None)]
res = minimize(dual, np.array([self.ฮท]), method='SLSQP', bounds=bounds)
self.ฮท = res.x[0]
qij = torch.softmax(target_q / self.ฮท, dim=0) # (N, K) or (da, K)
# M-Step of Policy Improvement
# [2] 4.2 Fitting an improved policy (Step 3)
for _ in range(self.mstep_iteration_num):
if self.continuous_action_space:
ฮผ, A = self.actor.forward(state_batch)
# First term of last eq of [2] p.5
# see also [2] 4.2.1 Fitting an improved Gaussian policy
ฯ1 = MultivariateNormal(loc=ฮผ, scale_tril=b_A) # (K,)
ฯ2 = MultivariateNormal(loc=b_ฮผ, scale_tril=A) # (K,)
loss_p = torch.mean(
qij * (
ฯ1.expand((N, K)).log_prob(sampled_actions) # (N, K)
+ ฯ2.expand((N, K)).log_prob(sampled_actions) # (N, K)
)
)
mean_loss_p.append((-loss_p).item())
kl_ฮผ, kl_ฮฃ, ฮฃi_det, ฮฃ_det = gaussian_kl(
ฮผi=b_ฮผ, ฮผ=ฮผ,
Ai=b_A, A=A)
max_kl_ฮผ.append(kl_ฮผ.item())
max_kl_ฮฃ.append(kl_ฮฃ.item())
mean_ฮฃ_det.append(ฮฃ_det.item())
if np.isnan(kl_ฮผ.item()): # This should not happen
raise RuntimeError('kl_ฮผ is nan')
if np.isnan(kl_ฮฃ.item()): # This should not happen
raise RuntimeError('kl_ฮฃ is nan')
# Update lagrange multipliers by gradient descent
# this equation is derived from last eq of [2] p.5,
# just differentiate with respect to ฮฑ
# and update ฮฑ so that the equation is to be minimized.
self.ฮฑ_ฮผ -= self.ฮฑ_ฮผ_scale * (self.ฮต_kl_ฮผ - kl_ฮผ).detach().item()
self.ฮฑ_ฮฃ -= self.ฮฑ_ฮฃ_scale * (self.ฮต_kl_ฮฃ - kl_ฮฃ).detach().item()
self.ฮฑ_ฮผ = np.clip(0.0, self.ฮฑ_ฮผ, self.ฮฑ_ฮผ_max)
self.ฮฑ_ฮฃ = np.clip(0.0, self.ฮฑ_ฮฃ, self.ฮฑ_ฮฃ_max)
self.actor_optimizer.zero_grad()
# last eq of [2] p.5
loss_l = -(
loss_p
+ self.ฮฑ_ฮผ * (self.ฮต_kl_ฮผ - kl_ฮผ)
+ self.ฮฑ_ฮฃ * (self.ฮต_kl_ฮฃ - kl_ฮฃ)
)
mean_loss_l.append(loss_l.item())
loss_l.backward()
clip_grad_norm_(self.actor.parameters(), 0.1)
self.actor_optimizer.step()
else: # discrete action space
ฯ_p = self.actor.forward(state_batch) # (K, da)
# First term of last eq of [2] p.5
ฯ = Categorical(probs=ฯ_p) # (K,)
loss_p = torch.mean(
qij * ฯ.expand((da, K)).log_prob(actions)
)
mean_loss_p.append((-loss_p).item())
kl = categorical_kl(p1=ฯ_p, p2=b_p)
max_kl.append(kl.item())
if np.isnan(kl.item()): # This should not happen
raise RuntimeError('kl is nan')
# Update lagrange multipliers by gradient descent
# this equation is derived from last eq of [2] p.5,
# just differentiate with respect to ฮฑ
# and update ฮฑ so that the equation is to be minimized.
self.ฮฑ -= self.ฮฑ_scale * (self.ฮต_kl - kl).detach().item()
self.ฮฑ = np.clip(self.ฮฑ, 0.0, self.ฮฑ_max)
self.actor_optimizer.zero_grad()
# last eq of [2] p.5
loss_l = -(loss_p + self.ฮฑ * (self.ฮต_kl - kl))
mean_loss_l.append(loss_l.item())
loss_l.backward()
clip_grad_norm_(self.actor.parameters(), 0.1)
self.actor_optimizer.step()
self.__update_param()
return_eval = None
if it % self.evaluate_period == 0:
self.actor.eval()
return_eval = self.__evaluate()
self.actor.train()
self.max_return_eval = max(self.max_return_eval, return_eval)
mean_loss_q = np.mean(mean_loss_q)
mean_loss_p = np.mean(mean_loss_p)
mean_loss_l = np.mean(mean_loss_l)
mean_est_q = np.mean(mean_est_q)
if self.continuous_action_space:
max_kl_ฮผ = np.max(max_kl_ฮผ)
max_kl_ฮฃ = np.max(max_kl_ฮฃ)
mean_ฮฃ_det = np.mean(mean_ฮฃ_det)
else: # discrete action space
max_kl = np.max(max_kl)
print('iteration :', it)
if it % self.evaluate_period == 0:
print(' max_return_eval :', self.max_return_eval)
print(' return_eval :', return_eval)
print(' mean return :', mean_return)
print(' mean reward :', mean_reward)
print(' mean loss_q :', mean_loss_q)
print(' mean loss_p :', mean_loss_p)
print(' mean loss_l :', mean_loss_l)
print(' mean est_q :', mean_est_q)
print(' ฮท :', self.ฮท)
if self.continuous_action_space:
print(' max_kl_ฮผ :', max_kl_ฮผ)
print(' max_kl_ฮฃ :', max_kl_ฮฃ)
print(' mean_ฮฃ_det :', mean_ฮฃ_det)
print(' ฮฑ_ฮผ :', self.ฮฑ_ฮผ)
print(' ฮฑ_ฮฃ :', self.ฮฑ_ฮฃ)
else: # discrete action space
print(' max_kl :', max_kl)
print(' ฮฑ :', self.ฮฑ)
if it%100==0:
self.save_model(os.path.join(model_save_dir, f'{self.log_dir}_{it}ep_{mean_reward:.4}rewards.pt'))
# if it % model_save_period == 0:
# self.save_model(os.path.join(model_save_dir, 'model_{}.pt'.format(it)))
if it % self.evaluate_period == 0:
writer.add_scalar('max_return_eval', self.max_return_eval, it)
writer.add_scalar('return_eval', return_eval, it)
writer.add_scalar('return', mean_return, it)
writer.add_scalar('reward', mean_reward, it)
writer.add_scalar('loss_q', mean_loss_q, it)
writer.add_scalar('loss_p', mean_loss_p, it)
writer.add_scalar('loss_l', mean_loss_l, it)
writer.add_scalar('mean_q', mean_est_q, it)
writer.add_scalar('ฮท', self.ฮท, it)
if self.continuous_action_space:
writer.add_scalar('max_kl_ฮผ', max_kl_ฮผ, it)
writer.add_scalar('max_kl_ฮฃ', max_kl_ฮฃ, it)
writer.add_scalar('mean_ฮฃ_det', mean_ฮฃ_det, it)
writer.add_scalar('ฮฑ_ฮผ', self.ฮฑ_ฮผ, it)
writer.add_scalar('ฮฑ_ฮฃ', self.ฮฑ_ฮฃ, it)
else:
writer.add_scalar('ฮท_kl', max_kl, it)
writer.add_scalar('ฮฑ', self.ฮฑ, it)
writer.flush()
# end training
if writer is not None:
writer.close()
def test(self):
"""
:return: average return over 100 consecutive episodes
"""
with torch.no_grad():
total_rewards = []
for e in tqdm(range(10), desc='testing'):
total_reward = 0.0
state = self.env.reset()
frames=[]
for s in range(self.evaluate_episode_maxstep):
frames.append(self.env.render(mode="rgb_array"))
action = self.actor.action(
torch.from_numpy(state).type(torch.float32).to(self.device)
).cpu().numpy()
state, reward, done, _ = self.env.step(action)
total_reward += reward
if done:
break
total_rewards.append(total_reward)
save_frames_as_gif(frames,"./",f"{self.log_dir}.gif")
return np.mean(total_rewards)
def load_model(self, path=None):
"""
loads a model from a given path
:param path: (str) file path (.pt file)
"""
load_path = path if path is not None else self.save_path
checkpoint = torch.load(load_path)
self.iteration = checkpoint['iteration']
self.critic.load_state_dict(checkpoint['critic_state_dict'])
self.target_critic.load_state_dict(checkpoint['target_critic_state_dict'])
self.actor.load_state_dict(checkpoint['actor_state_dict'])
self.target_actor.load_state_dict(checkpoint['target_actor_state_dict'])
self.critic_optimizer.load_state_dict(checkpoint['critic_optim_state_dict'])
self.actor_optimizer.load_state_dict(checkpoint['actor_optim_state_dict'])
self.critic.train()
self.target_critic.train()
self.actor.train()
self.target_actor.train()
def save_model(self, path=None):
"""
saves a model to a given path
:param path: (str) file path (.pt file)
"""
data = {
'iteration': self.iteration,
'actor_state_dict': self.actor.state_dict(),
'target_actor_state_dict': self.target_actor.state_dict(),
'critic_state_dict': self.critic.state_dict(),
'target_critic_state_dict': self.target_critic.state_dict(),
'actor_optim_state_dict': self.actor_optimizer.state_dict(),
'critic_optim_state_dict': self.critic_optimizer.state_dict()
}
torch.save(data, path)
def __sample_trajectory_worker(self, i):
buff = []
state = self.env.reset()
for steps in range(self.sample_episode_maxstep):
action = self.target_actor.action(
torch.from_numpy(state).type(torch.float32).to(self.device)
).cpu().numpy()
next_state, reward, done, _ = self.env.step(action)
buff.append((state, action, next_state, reward))
if self.render and i == 0:
self.env.render()
sleep(0.01)
if done:
break
else:
state = next_state
return buff
def __sample_trajectory(self, sample_episode_num):
self.replaybuffer.clear()
episodes = [self.__sample_trajectory_worker(i)
for i in tqdm(range(sample_episode_num), desc='sample_trajectory')]
self.replaybuffer.store_episodes(episodes)
def __evaluate(self):
"""
:return: average return over 100 consecutive episodes
"""
with torch.no_grad():
total_rewards = []
for e in tqdm(range(self.evaluate_episode_num), desc='evaluating'):
total_reward = 0.0
state = self.env.reset()
for s in range(self.evaluate_episode_maxstep):
action = self.actor.action(
torch.from_numpy(state).type(torch.float32).to(self.device)
).cpu().numpy()
state, reward, done, _ = self.env.step(action)
total_reward += reward
if done:
break
total_rewards.append(total_reward)
return np.mean(total_rewards)
def __update_critic_td(self,
state_batch,
action_batch,
next_state_batch,
reward_batch,
sample_num=64):
"""
:param state_batch: (B, ds)
:param action_batch: (B, da) or (B,)
:param next_state_batch: (B, ds)
:param reward_batch: (B,)
:param sample_num:
:return:
"""
B = state_batch.size(0)
ds = self.ds
da = self.da
with torch.no_grad():
r = reward_batch # (B,)
if self.continuous_action_space:
ฯ_ฮผ, ฯ_A = self.target_actor.forward(next_state_batch) # (B,)
ฯ = MultivariateNormal(ฯ_ฮผ, scale_tril=ฯ_A) # (B,)
sampled_next_actions = ฯ.sample((sample_num,)).transpose(0, 1) # (B, sample_num, da)
expanded_next_states = next_state_batch[:, None, :].expand(-1, sample_num, -1) # (B, sample_num, ds)
expected_next_q = self.target_critic.forward(
expanded_next_states.reshape(-1, ds), # (B * sample_num, ds)
sampled_next_actions.reshape(-1, da) # (B * sample_num, da)
).reshape(B, sample_num).mean(dim=1) # (B,)
else: # discrete action space
ฯ_p = self.target_actor.forward(next_state_batch) # (B, da)
ฯ = Categorical(probs=ฯ_p) # (B,)
ฯ_prob = ฯ.expand((da, B)).log_prob(
torch.arange(da)[..., None].expand(-1, B).to(self.device) # (da, B)
).exp().transpose(0, 1) # (B, da)
sampled_next_actions = self.A_eye[None, ...].expand(B, -1, -1) # (B, da, da)
expanded_next_states = next_state_batch[:, None, :].expand(-1, da, -1) # (B, da, ds)
expected_next_q = (
self.target_critic.forward(
expanded_next_states.reshape(-1, ds), # (B * da, ds)
sampled_next_actions.reshape(-1, da) # (B * da, da)
).reshape(B, da) * ฯ_prob # (B, da)
).sum(dim=-1) # (B,)
y = r + self.ฮณ * expected_next_q
self.critic_optimizer.zero_grad()
if self.continuous_action_space:
t = self.critic(
state_batch,
action_batch
).squeeze()
else: # discrete action space
t = self.critic(
state_batch,
self.A_eye[action_batch.long()]
).squeeze(-1) # (B,)
loss = self.norm_loss_q(y, t)
loss.backward()
self.critic_optimizer.step()
return loss, y
def __update_param(self):
"""
Sets target parameters to trained parameter
"""
# Update policy parameters
for target_param, param in zip(self.target_actor.parameters(), self.actor.parameters()):
target_param.data.copy_(param.data)
# Update critic parameters
for target_param, param in zip(self.target_critic.parameters(), self.critic.parameters()):
target_param.data.copy_(param.data)
``` |
{
"source": "joycenerd/rsna-pneumonia-detection",
"score": 2
} |
#### File: rsna-pneumonia-detection/CheXNet-with-localization/inference.py
```python
import numpy as np
from os import listdir
import skimage.transform
import torch
from torch.utils.data import Dataset, DataLoader
from torch.nn import functional as F
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
from torch.autograd import Variable
import torch.optim as optim
from torch.autograd import Function
from torchvision import models
from torchvision import utils
import cv2
import sys
import os
import pickle
from collections import defaultdict
from collections import OrderedDict
import skimage
from skimage.io import *
from skimage.transform import *
import scipy
import scipy.ndimage as ndimage
import scipy.ndimage.filters as filters
from scipy.ndimage import binary_dilation
import matplotlib.patches as patches
import argparse
import glob
import imageio
import tqdm
os.environ['CUDA_VISIBLE_DEVICES'] = "0"
test_X = []
parser=argparse.ArgumentParser()
parser.add_argument('--dataroot',type=str,default='/eva_data/zchin/rsna_data_all/images/test',help='test image save dir')
parser.add_argument('--weights',type=str,default='/eva_data/zchin/rsna_outputs/CheXNet/DenseNet121_aug2_pretrain_noWeight_2_0.8875203559299248.pkl',help='trained model weights')
args=parser.parse_args()
def get_file_names(dataroot):
imgs=[]
for img_path in glob.glob(dataroot+'/*'):
imgs.append(img_path)
return imgs
# imgs = judger.get_file_names()
# f = judger.get_output_file_object()
imgs=get_file_names(args.dataroot)
for img in tqdm.tqdm(imgs):
# img = scipy.misc.imread(img)
img=imageio.imread(img)
if img.shape != (1024,1024):
img = img[:,:,0]
img_resized = skimage.transform.resize(img,(256,256))
test_X.append((np.array(img_resized)).reshape(256,256,1))
test_X = np.array(test_X)
print(test_X.shape)
# model archi
# construct model
class DenseNet121(nn.Module):
"""Model modified.
The architecture of our model is the same as standard DenseNet121
except the classifier layer which has an additional sigmoid function.
"""
def __init__(self, out_size):
super(DenseNet121, self).__init__()
self.densenet121 = torchvision.models.densenet121(pretrained=True)
num_ftrs = self.densenet121.classifier.in_features
self.densenet121.classifier = nn.Sequential(
nn.Linear(num_ftrs, out_size),
nn.Sigmoid()
)
def forward(self, x):
x = self.densenet121(x)
return x
model = DenseNet121(2).cuda()
model = torch.nn.DataParallel(model)
model.load_state_dict(torch.load(args.weights))
print("model loaded")
# build test dataset
class ChestXrayDataSet_plot(Dataset):
def __init__(self, input_X = test_X, transform=None):
self.X = np.uint8(test_X*255)
self.transform = transform
def __getitem__(self, index):
"""
Args:
index: the index of item
Returns:
image
"""
current_X = np.tile(self.X[index],3)
image = self.transform(current_X)
return image
def __len__(self):
return len(self.X)
test_dataset = ChestXrayDataSet_plot(input_X = test_X,transform=transforms.Compose([
transforms.ToPILImage(),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])
]))
thresholds = np.load("thresholds.npy")
print(thresholds)
thres=thresholds[6]
# ======= Grad CAM Function =========
class PropagationBase(object):
def __init__(self, model, cuda=False):
self.model = model
self.model.eval()
if cuda:
self.model.cuda()
self.cuda = cuda
self.all_fmaps = OrderedDict()
self.all_grads = OrderedDict()
self._set_hook_func()
self.image = None
def _set_hook_func(self):
raise NotImplementedError
def _encode_one_hot(self, idx):
one_hot = torch.FloatTensor(1, self.preds.size()[-1]).zero_()
one_hot[0][idx] = 1.0
return one_hot.cuda() if self.cuda else one_hot
def forward(self, image):
self.image = image
self.preds = self.model.forward(self.image)
# self.probs = F.softmax(self.preds)[0]
# self.prob, self.idx = self.preds[0].data.sort(0, True)
return self.preds.cpu().data.numpy()
def backward(self, idx):
self.model.zero_grad()
one_hot = self._encode_one_hot(idx)
self.preds.backward(gradient=one_hot, retain_graph=True)
class GradCAM(PropagationBase):
def _set_hook_func(self):
def func_f(module, input, output):
self.all_fmaps[id(module)] = output.data.cpu()
def func_b(module, grad_in, grad_out):
self.all_grads[id(module)] = grad_out[0].cpu()
for module in self.model.named_modules():
module[1].register_forward_hook(func_f)
module[1].register_backward_hook(func_b)
def _find(self, outputs, target_layer):
for key, value in outputs.items():
for module in self.model.named_modules():
if id(module[1]) == key:
if module[0] == target_layer:
return value
raise ValueError('Invalid layer name: {}'.format(target_layer))
def _normalize(self, grads):
l2_norm = torch.sqrt(torch.mean(torch.pow(grads, 2))) + 1e-5
return grads / l2_norm.item()
def _compute_grad_weights(self, grads):
grads = self._normalize(grads)
self.map_size = grads.size()[2:]
return nn.AvgPool2d(self.map_size)(grads)
def generate(self, target_layer):
fmaps = self._find(self.all_fmaps, target_layer)
grads = self._find(self.all_grads, target_layer)
weights = self._compute_grad_weights(grads)
gcam = torch.FloatTensor(self.map_size).zero_()
for fmap, weight in zip(fmaps[0], weights[0]):
gcam += fmap * weight.data
gcam = F.relu(Variable(gcam))
gcam = gcam.data.cpu().numpy()
gcam -= gcam.min()
gcam /= gcam.max()
gcam = cv2.resize(gcam, (self.image.size(3), self.image.size(2)))
return gcam
def save(self, filename, gcam, raw_image):
gcam = cv2.applyColorMap(np.uint8(gcam * 255.0), cv2.COLORMAP_JET)
gcam = gcam.astype(np.float) + raw_image.astype(np.float)
gcam = gcam / gcam.max() * 255.0
cv2.imwrite(filename, np.uint8(gcam))
# ======== Create heatmap ===========
heatmap_output = []
image_id = []
output_class = []
gcam = GradCAM(model=model, cuda=True)
for index in tqdm.tqdm(range(len(test_dataset))):
input_img = Variable((test_dataset[index]).unsqueeze(0).cuda(), requires_grad=True)
probs = gcam.forward(input_img)
probs=probs[0]
if probs[0]>probs[1]:
heatmap_output.append(np.full((224,224),np.nan))
image_id.append(index)
output_class.append(0)
continue
# activate_classes = np.where((probs > thresholds)[0]==True)[0]
# for activate_class in activate_classes:
gcam.backward(idx=1)
output = gcam.generate(target_layer="module.densenet121.features.denseblock4.denselayer16.conv2")
#### this output is heatmap ####
if np.sum(np.isnan(output)) > 0:
print("fxxx nan")
heatmap_output.append(output)
image_id.append(index)
# output_class.append(activate_class)
output_class.append(1)
print("heatmap output done")
# ======= Plot bounding box =========
img_width, img_height = 224, 224
img_width_exp, img_height_exp = 1024, 1024
crop_del = 16
rescale_factor = 4
# class_index = ['Atelectasis', 'Cardiomegaly', 'Effusion', 'Infiltration', 'Mass', 'Nodule', 'Pneumonia', 'Pneumothorax']
class_index=['normal','pneumonia']
default_box = np.array([[403.4, 433.4, 197.9, 206.7], [458.0, 337.9, 252.2, 306.4]])
# npy_list = os.listdir(sys.argv[1])
# with open('test.txt', 'r') as f:
# fname_list = f.readlines()
# fname_list = [s.strip('\n') for s in fname_list]
prediction_dict = {}
for i in range(len(imgs)):
prediction_dict[i] = []
for img_id, k, npy in zip(image_id, output_class, heatmap_output):
data = npy
img_fname = imgs[img_id]
# output default_box
prediction_sent = '%s %.1f %.1f %.1f %.1f' % (class_index[k], default_box[k][0], default_box[k][1], default_box[k][2], default_box[k][3])
prediction_dict[img_id].append(prediction_sent)
if np.isnan(data).any() or k==0:
continue
w_k, h_k = (default_box[k][2:] * (256 / 1024)).astype(np.int)
# Find local maxima
neighborhood_size = 100
threshold = .1
data_max = filters.maximum_filter(data, neighborhood_size)
maxima = (data == data_max)
data_min = filters.minimum_filter(data, neighborhood_size)
diff = ((data_max - data_min) > threshold)
maxima[diff == 0] = 0
for _ in range(5):
maxima = binary_dilation(maxima)
labeled, num_objects = ndimage.label(maxima)
slices = ndimage.find_objects(labeled)
xy = np.array(ndimage.center_of_mass(data, labeled, range(1, num_objects+1)))
for pt in xy:
if data[int(pt[0]), int(pt[1])] > np.max(data)*.9:
upper = int(max(pt[0]-(h_k/2), 0.))
left = int(max(pt[1]-(w_k/2), 0.))
right = int(min(left+w_k, img_width))
lower = int(min(upper+h_k, img_height))
if lower == img_height and not k in [1]:
# avoid bbox touching bottom
continue
elif k in [5]:
# avoid predicting low acc classes
continue
else:
prediction_sent = '%s %.1f %.1f %.1f %.1f' % (class_index[k], (left+crop_del)*rescale_factor, \
(upper+crop_del)*rescale_factor, \
(right-left)*rescale_factor, \
(lower-upper)*rescale_factor)
prediction_dict[img_id].append(prediction_sent)
f=open('output.txt','w')
for i in range(len(prediction_dict)):
fname = imgs[i]
prediction = prediction_dict[i]
box_num = len(prediction)
if box_num <= 10:
print(fname, box_num)
# f.write(('%s %d\n' % (fname, box_num)).encode())
f.write('%s %d\n' % (fname, box_num))
for p in prediction:
print(p)
# f.write((p+"\n").encode())
f.write(p+"\n")
else:
print(fname, 10)
# f.write(('%s %d\n' % (fname, 10)).encode())
f.write('%s %d\n' % (fname, 10))
for p in prediction[:10]:
print(p)
# f.write((p+"\n").encode())
f.write(p+"\n")
f.close()
# score, err = judger.judge()
# if err is not None: # in case we failed to judge your submission
# print (err)
```
#### File: efficientnet/utils/loss.py
```python
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
class NLL_OHEM(torch.nn.NLLLoss):
""" Online hard example mining.
Needs input from nn.LogSotmax() """
def __init__(self, ratio,device,total_ep):
super(NLL_OHEM, self).__init__(None, True)
self.ratio = ratio
self.device=device
self.total_ep=total_ep
def forward(self, x, y, epoch,sched_ratio=True):
num_inst = x.size(0)
if sched_ratio:
self.step_ratio_sched(epoch)
else:
self.ratio=1
# print(self.ratio)
num_hns = int(self.ratio * num_inst)
if num_hns>0:
x_ = x.clone()
inst_losses = torch.autograd.Variable(torch.zeros(num_inst).to(self.device))
for idx, label in enumerate(y.data):
inst_losses[idx] = -x_.data[idx, label]
_, idxs = inst_losses.topk(num_hns)
x_hn = x.index_select(0, idxs)
y_hn = y.index_select(0, idxs)
loss=torch.nn.functional.nll_loss(x_hn, y_hn,reduction='mean')
else:
loss=torch.nn.functional.nll_loss(x,y,reduction='mean')
return loss
def cyclic_ratio_sched(self,epoch):
half=int(self.total_ep/2)
max_range=int(half*0.2)
if epoch<half:
if epoch<max_range:
self.ratio=1.0
else:
self.ratio=(half-epoch)/float(half-max_range)
else:
if epoch<(half+max_range):
self.ratio=0.5
else:
self.ratio=0.5*(self.total_ep-epoch)/float(half-max_range)
def step_ratio_sched(self,epoch):
if epoch<40:
self.ratio=1
elif epoch>=40 and epoch<60:
self.ratio=0.9
elif epoch>=60 and epoch<90:
self.ratio=0.8
elif epoch>=90 and epoch<130:
self.ratio=0.7
elif epoch>=130 and epoch<170:
self.ratio=0.6
elif epoch>=170:
self.ratio=0.5
if __name__=='__main__':
ratio_list=[]
epoch_list=[]
for epoch in range(0,200):
if epoch<40:
ratio=1
elif epoch>=40 and epoch<60:
ratio=0.9
elif epoch>=60 and epoch<90:
ratio=0.8
elif epoch>=90 and epoch<130:
ratio=0.7
elif epoch>=130 and epoch<170:
ratio=0.6
elif epoch>=170:
ratio=0.5
ratio_list.append(ratio)
epoch_list.append(epoch)
plt.rcParams["font.family"] = "serif"
CB91_Blue = '#2CBDFE'
CB91_Green = '#47DBCD'
CB91_Pink = '#F3A0F2'
CB91_Purple = '#9D2EC5'
CB91_Violet = '#661D98'
CB91_Amber = '#F5B14C'
color_list = [CB91_Blue, CB91_Pink, CB91_Green, CB91_Amber,
CB91_Purple, CB91_Violet]
plt.rcParams['axes.prop_cycle'] = plt.cycler(color=color_list)
plt.plot(epoch_list,ratio_list)
plt.xlabel('epoch')
plt.ylabel('hard mining ratio (k)')
plt.savefig('ohem_step_ratio.png')
``` |
{
"source": "joycenerd/Vehicle-ReID",
"score": 3
} |
#### File: Vehicle-ReID/preprocess/extract_vehicle.py
```python
from path import args
import sys
sys.path.insert(0,str(args.root_path))
from options import opt
from pathlib import Path
import os
import cv2
# [frame, ID, left, top, width, height, 1, -1, -1, -1]
def sort_gt_id(gt_path):
sorted_gt=[]
gt_file = open(str(gt_path), 'r')
lines = gt_file.readlines()
for line in lines:
line = line.strip()
frame_id, vehicle_id, left, top, width, height, _, _, _, _ = line.split(',')
if vehicle_id in car_list:
sorted_gt.append([False, frame_id, vehicle_id, int(left), int(top), int(width), int(height)])
else:
sorted_gt.append([True, frame_id, vehicle_id, int(left), int(top), int(width), int(height)])
car_list.append(vehicle_id)
return sorted_gt
def extract_im(sorted_gt, mode, cam_id):
for gt in sorted_gt:
is_query, frame_id, vehicle_id, left, top, width, height = gt
if mode == 'train':
image_name = cam_id + '_' + str(int(frame_id)-1).zfill(4) + '.jpg'
if Path(frames_path).joinpath('train_frame').joinpath(image_name).exists():
image_path = str(Path(frames_path).joinpath('train_frame').joinpath(image_name))
else:
continue
elif mode == 'valid':
image_name = cam_id + '_' + str(int(frame_id)-1).zfill(4) + '.jpg'
if Path(frames_path).joinpath('valid_frame').joinpath(image_name).exists():
image_path = str(Path(frames_path).joinpath('valid_frame').joinpath(image_name))
else:
continue
img = cv2.imread(image_path)
crop_img = img[top:top+height, left:left+width]
crop_img_name = vehicle_id.zfill(5) + '_' + cam_id + '_' + frame_id.zfill(4) + '.jpg'
if is_query == True:
crop_img_path = Path(opt.data_root).joinpath('reid_data/query_data').joinpath(crop_img_name)
elif mode == 'train':
crop_img_path = Path(opt.data_root).joinpath('reid_data/train_data').joinpath(crop_img_name)
elif mode == 'valid':
crop_img_path = Path(opt.data_root).joinpath('reid_data/gallery_data').joinpath(crop_img_name)
cv2.imwrite(str(crop_img_path), crop_img)
def get_frame(frames_dir):
frame_dir_path = Path(frames_path).joinpath(frames_dir)
print(frames_dir + ' start')
if frames_dir == 'train_frame':
for i in range(len(train_S_dirname)):
S_dir = Path(gt_root_path).joinpath('train').joinpath(train_S_dirname[i])
for cam_id in train_S[i]:
gt_path = Path(S_dir).joinpath(cam_id).joinpath('gt/gt.txt')
sorted_gt = sort_gt_id(gt_path)
extract_im(sorted_gt, 'train', cam_id)
print(cam_id + ' complete')
elif frames_dir == 'valid_frame':
for i in range(len(valid_S_dirname)):
S_dir = Path(gt_root_path).joinpath('validation').joinpath(valid_S_dirname[i])
for cam_id in valid_S[i]:
gt_path = Path(S_dir).joinpath(cam_id).joinpath('gt/gt.txt')
sorted_gt = sort_gt_id(gt_path)
extract_im(sorted_gt, 'valid', cam_id)
print(cam_id + ' complete')
print(frames_dir + ' complete\n')
if __name__ == '__main__':
S01 = ['c001', 'c002', 'c003', 'c004', 'c005']
S02 = ['c006', 'c007', 'c008', 'c009']
S03 = ['c010', 'c011', 'c012', 'c013', 'c014', 'c015']
S04 = ['c016', 'c017', 'c018', 'c019', 'c020', 'c021', 'c022', 'c023', 'c024', 'c025', 'c026', 'c027',
'c028', 'c029', 'c030', 'c031', 'c032', 'c033', 'c034', 'c035', 'c036', 'c037', 'c038', 'c039',
'c040']
S05 = ['c010', 'c016', 'c017', 'c018', 'c019', 'c020', 'c021', 'c022', 'c023', 'c024', 'c025', 'c026',
'c027', 'c028', 'c029', 'c033', 'c034', 'c035', 'c036']
S06 = ['c041', 'c042', 'c043', 'c044', 'c045', 'c046']
train_S = [S01, S03, S04, S05]
valid_S = [S02]
train_S_dirname = ['S01', 'S03', 'S04', 'S05']
valid_S_dirname = ['S02']
frames_path = Path(opt.data_root).joinpath('frames')
gt_root_path = opt.raw_data_path
car_list = []
get_frame('train_frame')
get_frame('valid_frame')
```
#### File: Vehicle-ReID/solver/optim.py
```python
from options import opt
from pathlib import Path
import torch
def make_optimizer(model, center_criterion):
params = []
for key, value in model.named_parameters():
if not value.requires_grad:
continue
lr = opt.base_lr
weight_decay = opt.weight_decay
if "bias" in key:
lr = opt.base_lr * opt.bias_lr_factor
weight_decay = opt.weight_decay_bias
params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]
if opt.optim_name == 'SGD':
optimizer = getattr(torch.optim, opt.optim_name)(params, momentum=opt.momentum)
else:
optimizer = getattr(torch.optim, opt.optim_name)(params)
optimizer_center = torch.optim.SGD(center_criterion.parameters(), lr=opt.center_lr)
return optimizer, optimizer_center
"""def optimizer_with_center(model, center_criterion):
normal_lr = opt.base_lr
center_lr = opt.center_lr
weight_decay = opt.weight_decay
optimizer = torch.optim.Adam(model.parameters(), lr=normal_lr, weight_decay=weight_decay, amsgrad=True)
optimizer_center = torch.optim.SGD(center_criterion.parameters(), lr=center_lr)
return optimizer, optimizer_center"""
```
#### File: joycenerd/Vehicle-ReID/train_reid.py
```python
from options import opt
from datasets.dataset import make_reid_dataset
from pathlib import Path
import torch
from modeling import make_model
import numpy as np
import torch.nn as nn
from losses import make_loss
from tqdm import tqdm
from torch.autograd import Variable
from utils.train_reid_utils import accuracy, calc_mAP
import os
import copy
from solver import make_optimizer, WarmupMultiStepLR
from visual import visualization
from utils.metrics import R1_mAP_eval
os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
def train():
trainloader,queryloader,galleryloader, num_classes = make_reid_dataset(opt.reid_data_root)
print(f'image height: {opt.image_height}\t image width: {opt.image_width}')
model = make_model(num_classes)
model = model.cuda(opt.cuda_devices)
loss_func, center_criterion = make_loss(num_classes)
# optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=5e-04, betas=(0.9, 0.999), amsgrad=True)
optimizer, optimizer_center = make_optimizer(model, center_criterion)
scheduler = WarmupMultiStepLR(optimizer, opt.milestones, opt.gamma,
opt.warmup_factor,
opt.warmup_epochs, opt.warmup_method)
best_mAP = 0.0
evaluator = R1_mAP_eval(num_classes, max_rank=50, feat_norm=opt.feat_norm,reranking=opt.reranking)
loss_list = []
acc_list = []
mAP_list = []
for epoch in range(opt.num_epochs):
print(f'Epoch: {epoch+1}/{opt.num_epochs}')
print('-'*len(f'Epoch: {epoch+1}/{opt.num_epochs}'))
total_xentropy_loss = 0.0
total_triplet_loss = 0.0
total_center_loss = 0.0
training_loss = 0.0
training_corrects = 0
train_set_size = 0
evaluator.reset()
model = model.train()
for idx, (images, pids, cam_ids, frameids) in enumerate(tqdm(trainloader)):
images = images.cuda(opt.cuda_devices)
pids = pids.cuda(opt.cuda_devices)
optimizer.zero_grad()
optimizer_center.zero_grad()
outputs, features = model(images, pids)
if opt.dataloader_sampler == 'softmax_triplet':
xentropy_loss, triplet_loss, loss = loss_func(outputs, features, pids)
elif opt.dataloader_sampler == 'softmax_triplet_center':
xentropy_loss, triplet_loss, center_loss, loss = loss_func(outputs, features, pids)
loss.backward()
optimizer.step()
if 'center' in opt.metric_loss_type:
for param in center_criterion.parameters():
param.grad.data *= (1. / opt.center_loss_weight)
optimizer_center.step()
total_xentropy_loss += xentropy_loss.item() * images.size(0)
total_triplet_loss += triplet_loss.item() * images.size(0)
if 'center' in opt.metric_loss_type:
total_center_loss += center_loss.item() * images.size(0)
training_loss += loss.item() * images.size(0)
training_corrects += accuracy(outputs, pids)[0] * images.size(0) / 100.0
train_set_size += images.size(0)
avg_xentropy_loss = total_xentropy_loss / train_set_size
avg_triplet_loss = total_triplet_loss / train_set_size
if 'center' in opt.metric_loss_type:
avg_center_loss = total_center_loss / train_set_size
avg_training_loss = training_loss / train_set_size
avg_training_acc = float(training_corrects) / train_set_size
if 'center' in opt.metric_loss_type:
print(f'xentropy_loss: {avg_xentropy_loss:.4f}\ttriplet_loss: {avg_triplet_loss:.4f}\tcenter_loss: {avg_center_loss:.4f}')
else:
print(f'xentropy_loss: {avg_xentropy_loss:.4f}\ttriplet_loss: {avg_triplet_loss:.4f}')
print(f'training_loss: {avg_training_loss:.4f}\ttrain_accuracy: {avg_training_acc:.4f}')
model.eval()
# mAP = calc_mAP(queryloader,galleryloader,model)
for idx, (images, pids, cam_ids,frameids) in enumerate(tqdm(queryloader)):
with torch.no_grad():
images = images.cuda(opt.cuda_devices)
feature = model(images)
evaluator.update((feature, pids, cam_ids))
for idx, (images, pids, cam_ids,frameids) in enumerate(tqdm(galleryloader)):
with torch.no_grad():
images = images.cuda(opt.cuda_devices)
feature = model(images)
evaluator.update((feature, pids, cam_ids))
cmc, mAP, _, _, _, _, _ = evaluator.compute()
print(f'mAP: {mAP}\n')
scheduler.step()
loss_list.append(avg_training_loss)
acc_list.append(avg_training_acc)
mAP_list.append(mAP)
if mAP > best_mAP:
best_mAP = mAP
best_xentropy_loss = avg_xentropy_loss
best_triplet_loss = avg_triplet_loss
if 'center' in opt.metric_loss_type:
best_center_loss = avg_center_loss
best_training_loss = avg_training_loss
best_training_acc = avg_training_acc
best_model_params = copy.deepcopy(model.state_dict())
if (epoch+1)%20 == 0:
model.load_state_dict(best_model_params)
weight_path = Path(opt.checkpoint_dir).joinpath(f'model-{epoch+1}epoch-{best_mAP:.03f}-mAP.pth')
torch.save(model,str(weight_path))
torch.save({'state_dict': model.state_dict()}, str(weight_path)+'.tar')
visualization(loss_list, acc_list, mAP_list, epoch+1)
record = open("record.txt",'w')
if 'center' in opt.metric_loss_type:
print(f'best_xentropy_loss: {best_xentropy_loss:.4f}\tbest_triplet_loss: {best_triplet_loss:.4f}\tbest_center_loss: {best_center_loss:.4f}')
record.write(f'best_xentropy_loss: {best_xentropy_loss:.4f}\tbest_triplet_loss: {best_triplet_loss:.4f}\tbest_center_loss: {best_center_loss:.4f}\n')
else:
print(f'best_xentropy_loss: {best_xentropy_loss:.4f}\tbest_triplet_loss: {best_triplet_loss:.4f}')
record.write(f'best_xentropy_loss: {best_xentropy_loss:.4f}\tbest_triplet_loss: {best_triplet_loss:.4f}\n')
print(f'best_training_loss: {best_training_loss:.4f}\tbest_accuracy: {best_training_acc:.4f}')
record.write(f'best_training_loss: {best_training_loss:.4f}\tbest_accuracy: {best_training_acc:.4f}\n')
print(f'best_mAP: {best_mAP}')
record.write(f'best_mAP: {best_mAP}')
record.close()
model.load_state_dict(best_model_params)
weight_path = Path(opt.checkpoint_dir).joinpath(f'model-{best_mAP:.03f}-best_mAP.pth')
torch.save(model, str(weight_path))
torch.save({'state_dict': model.state_dict()}, str(weight_path)+'.tar')
visualization(loss_list, acc_list, mAP_list, epoch+1)
if __name__ == '__main__':
train()
``` |
{
"source": "Joyce-NL/gep-python-coding-challenge",
"score": 2
} |
#### File: Joyce-NL/gep-python-coding-challenge/setup.py
```python
from __future__ import absolute_import
from __future__ import print_function
from glob import glob
from os.path import basename
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
def read():
with open("README.md", "r") as f:
return f.read()
setup(
name='gep_python_coding_challenge',
version='0.0.5',
license='MIT',
description='This package contains the beginner level assignments of the GEP Python Coding Challenge.',
long_description=read(),
long_description_content_type="text/markdown",
author='JcS',
author_email='<EMAIL>',
url='https://github.com/Joyce-NL/gep-python-coding-challenge',
py_modules=['Problem01', 'Problem02', 'Problem41'],
package_dir={'': 'src'},
zip_safe=False,
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Other/Nonlisted Topic',
],
keywords=[
'Euler', 'Project Euler'
],
python_requires='>=3.0',
install_requires=[],
extras_require={},
)
```
#### File: src/tests/test_Problem41.py
```python
import unittest
from Problem41 import find_largest_pandigital_prime, is_prime_number
class TestProblem41(unittest.TestCase):
def test_main_function(self):
self.assertEqual(find_largest_pandigital_prime(1, 9), 7652413, 'the result of the function does not match the expected result.')
self.assertEqual(find_largest_pandigital_prime(1, 3), "NA", 'the result of the function does not match the expected result.')
self.assertEqual(find_largest_pandigital_prime(5, 6), "NA", 'the result of the funtion does not match the expected result.')
self.assertEqual(find_largest_pandigital_prime(8, 9), "NA", 'the result of the function does not match the expeced result.')
self.assertEqual(find_largest_pandigital_prime(5, 7), 7652413, 'the result of the function does not match the expected result.')
def test_input_type(self):
with self.assertRaises(TypeError):
find_largest_pandigital_prime(5j, 5)
find_largest_pandigital_prime(1, 2j)
find_largest_pandigital_prime("yes", 5)
find_largest_pandigital_prime(1, "no")
def test_input_value(self):
with self.assertRaises(ValueError):
find_largest_pandigital_prime(-1, 2)
find_largest_pandigital_prime(1, 0)
find_largest_pandigital_prime(8, 2)
find_largest_pandigital_prime(1, 10)
find_largest_pandigital_prime(0, 7)
def test_is_prime_number_function(self):
self.assertEqual(is_prime_number(1), False,'the result of the function does not match the expected result.')
self.assertEqual(is_prime_number(2), True, 'the result of the function does not match the expected result.')
self.assertEqual(is_prime_number(3), True, 'the result of the function does not match the expected result.')
self.assertEqual(is_prime_number(4), False, 'the result of the function does not match the expected result.')
self.assertEqual(is_prime_number(7919), True, 'the result of the function does not match the expected result.')
self.assertEqual(is_prime_number(123456), False, 'the result of the function does not match the expected result.')
self.assertEqual(is_prime_number(7652413), True, 'the result of the function does not match the expected result.')
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joyceren/Advanced-Lane-Finder",
"score": 3
} |
#### File: joyceren/Advanced-Lane-Finder/calibrate_camera.py
```python
import cv2
import numpy as np
def calibrateCamera():
# chessboard size
nx = 9
ny = 6
# objpoints and imgpoints for all images
objpoints = []
imgpoints = []
# preparing objpoints, formatted (0, 0, 0), (1, 0, 0), (2, 0, 0), etc...
objp = np.zeros((nx * ny, 3), np.float32)
objp[:,:2] = np.mgrid[0:nx,0:ny].T.reshape(-1, 2)
# stepping through calibration images
for i in range(1, 20):
img = cv2.imread("camera_cal/calibration" + str(i) + '.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# finding chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
# if corner was found, add obj and img points
if ret == True:
imgpoints.append(corners)
objpoints.append(objp)
# get camera calibration variables
ret, cameraMatrix, distortionCoefficents, rotationVector, translationVector = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
return cameraMatrix, distortionCoefficents
cameraMatrix, distortionCoefficents = calibrateCamera()
def undistort_image(img):
return cv2.undistort(img, cameraMatrix, distortionCoefficents, None, cameraMatrix)
``` |
{
"source": "joyceren/lane-finder",
"score": 2
} |
#### File: joyceren/lane-finder/find_lanes.py
```python
from moviepy.editor import VideoFileClip
import numpy as np
from helper_functions import grayscale, gaussian_blur, canny, \
region_of_interest, hough_lines, weighted_img
def find_lanes_in_image(img):
grey_img = grayscale(img)
blurred_img = gaussian_blur(grey_img, 9)
img_edges = canny(blurred_img, 30, 100)
# defining mask vertices
vertices = np.array([
[
(200/960*img.shape[1], 500/540*img.shape[0]),
(420/960*img.shape[1], 340/540*img.shape[0]),
(550/960*img.shape[1], 340/540*img.shape[0]),
(800/960*img.shape[1], 500/540*img.shape[0]),
]
], dtype=np.int32)
masked_img = region_of_interest(img_edges, vertices)
# defining hough_lines variables
rho = 2
theta = np.pi / 360
threshold = 50
min_line_length = 50
max_line_gap = img.shape[0]/3
img_lines = hough_lines(masked_img, rho, theta, threshold, min_line_length,max_line_gap)
lines_overlay_img = weighted_img(img_lines, img)
# plt.imshow(lines_overlay_img)
# plt.show()
return lines_overlay_img
def find_lanes_in_video(video_address, output_address):
clip = VideoFileClip(video_address)
processed_clip = clip.fl_image(find_lanes_in_image)
processed_clip.write_videofile(output_address, audio=False)
``` |
{
"source": "Joycetyty/leetcode",
"score": 4
} |
#### File: Joycetyty/leetcode/runningSum.py
```python
from typing import List
def runningSum(self, nums: List[int]) -> List[int]:
for i in range(1, len(nums)):
nums[i] += nums[i - 1]
return nums
if __name__ == "__main__":
nums = [1,2,3,4]
result = runningSum(self, nums)
print(result)
``` |
{
"source": "Joyce-yanqiongzhang/proj2_storytelling",
"score": 3
} |
#### File: proj2_storytelling/preprocessing/eliminate_empty_stories.py
```python
import logging
import sys
import argparse
class ParserWithUsage(argparse.ArgumentParser):
""" A custom parser that writes error messages followed by command line usage documentation."""
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
def create_arguments():
parser = ParserWithUsage()
parser.description = "Removes stories that have no keywords"
parser.add_argument("--input_keywords", help="Input file containing keywords", required=True)
parser.add_argument("--input_storylines", help="Input file containing story lines", required=True)
parser.add_argument("--input_stories", help="Input file containing stories", required=True)
parser.add_argument("--output", help="Output directory (will create clean files here)", required=True)
return parser.parse_args()
def get_file_name(file_path: str) -> str:
"""
Returns the name of a file from a file path.
:param file_path: file path
:return: name of file
"""
from pathlib import Path
p = Path(file_path)
return str(p.name)
def main():
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s', level=logging.INFO,
datefmt='%m/%d/%Y %H:%M:%S')
args = create_arguments()
args_input_keywords = args.input_keywords
args_input_storylines = args.input_storylines
args_input_stories = args.input_stories
args_output_dir = args.output
logging.info("STARTED")
logging.info("Identifying empty keyword sets")
lines_to_skip_keywords = get_lines_with_empty_keywords(args_input_keywords)
lines_to_skip_train = get_lines_with_empty_keywords(args_input_storylines)
lines_to_skip = lines_to_skip_keywords.union(lines_to_skip_train)
logging.info("Writing new files")
write_lines_to_keep(args_input_keywords, args_output_dir + "/" + get_file_name(args_input_keywords), lines_to_skip)
write_lines_to_keep(args_input_storylines, args_output_dir + "/" + get_file_name(args_input_storylines), lines_to_skip)
write_lines_to_keep(args_input_stories, args_output_dir + "/" + get_file_name(args_input_stories), lines_to_skip)
logging.info("DONE")
def write_lines_to_keep(file_path: str, output_path: str, lines_to_skip: set) -> None:
"""
Copies only lines from the input file that are not listed in the set of lines to skip.
:param file_path: where to read the input from
:param output_path: where to write the output
:param lines_to_skip: which lines to skip
:return: nothing
"""
with open(output_path, "w") as file_out:
with open(file_path, "r") as file_input:
line_number = -1
for line in file_input:
line_number += 1
if line_number not in lines_to_skip:
file_out.write(line)
def get_lines_with_empty_keywords(file_path: str) -> set:
"""
Identifies lines in the file that are empty.
:param file_path: path to keywords file
:return: set of lines that are empty
"""
empty_keywords = set()
with open(file_path, "r") as file_obj:
line_number = -1
for line in file_obj:
line = line.rstrip()
line_number += 1
if len(line) == 0:
empty_keywords.add(line_number)
return empty_keywords
if __name__ == "__main__":
main()
```
#### File: proj2_storytelling/python_src/keywords_incorp.py
```python
import argparse
from itertools import combinations
import numpy as np
import re
import sys
def read_w2v(w2v_path, word2index, n_dims=300, unk_token="unk"):
"""takes tokens from files and returns word vectors
:param w2v_path: path to pretrained embedding file
:param word2index: Counter of tokens from processed files
:param n_dims: embedding dimensions
:param unk_token: this is the unk token for glove <KEY>. Ideally we make this less hardcode-y
:return numpy array of word vectors
"""
print('Getting Word Vectors...', file=sys.stderr)
vocab = set()
# hacky thing to deal with making sure to incorporate unk tokens in the form they are in for a given embedding type
if unk_token not in word2index:
word2index[unk_token] = 0 # hardcoded, this would be better if it was a method of a class
word_vectors = np.zeros((len(word2index), n_dims)) # length of vocab x embedding dimensions
with open(w2v_path) as file:
lc = 0
for line in file:
lc += 1
line = line.strip()
if line:
row = line.split()
token = row[0]
if token in word2index or token == unk_token:
vocab.add(token)
try:
vec_data = [float(x) for x in row[1:]]
word_vectors[word2index[token]] = np.asarray(vec_data)
if lc == 1:
if len(vec_data) != n_dims:
raise RuntimeError("wrong number of dimensions")
except:
print('Error on line {}'.format(lc), file=sys.stderr)
# puts data for a given embedding at an index based on the word2index dict
# end up with a matrix of the entire vocab
tokens_without_embeddings = set(word2index) - vocab
print('Word Vectors ready!', file=sys.stderr)
print('{} tokens from text ({:.2f}%) have no embeddings'.format(
len(tokens_without_embeddings), len(tokens_without_embeddings)*100/len(word2index)), file=sys.stderr)
print('Tokens without embeddings: {}'.format(tokens_without_embeddings), file=sys.stderr)
print('Setting those tokens to unk embedding', file=sys.stderr)
for token in tokens_without_embeddings:
word_vectors[word2index[token]] = word_vectors[word2index[unk_token]]
return word_vectors
def get_tokens(files):
"""take a list of filepaths, returns word2idx dict"""
print('Getting tokens ... ...', file=sys.stderr)
all_tokens = set()
for path in files:
with open(path, 'r') as infile:
all_tokens.update(set(infile.read().strip().split()))
word2index = dict(map(reversed, enumerate(list(all_tokens))))
return word2index
def cos_sim(v1, v2):
return v1.dot(v2) / (np.sqrt(v1.dot(v1)) * np.sqrt(v2.dot(v2)))
def cos_sim_array(vec, vec_array):
"""
take dot product of 2 vectors. which reduces dimensionality and gives me an array of results.
IMPORTANT that vec_array is first arg as a result
:param vec: a vector
:param vec_array: an array of vectors
:return: cosine_sim_array of the cosine similarity between the vector and each vector in the array
"""
dot_prod_array = np.dot(vec_array, vec)
len_vec_array, len_x_d = (vec_array**2).sum(axis=1) ** .5, (vec ** 2).sum() ** .5
cosine_sim_array = np.divide(dot_prod_array, len_vec_array*len_x_d)
return cosine_sim_array
def remove_chars(text: str, remove='#') -> str:
"""take a string and optional chars to remove and returns string without them"""
return re.sub(r'[{}]'.format(remove), '', text)
def make_vec_array(word_list: list, word_vectors, word2index: dict, drop_set={'#', '<EOL>', '<EOT>', '<\s>'}):
"""take a list of strings, an array of word vectors, return a numpy array of word vectors"""
vecs = [np.array(word_vectors[word2index.get(word, 0)])
for word in word_list if word not in drop_set]
return np.array(vecs)
def calc_similarity(storyline_path, story_path, word2index, word_vectors):
"""calculates cosine similarity between keywords in storyline and between keywords in storyline
and corresponding sentence in story. Averaged over all """
keyword_relatedness = 0
keyword_incorporation_rate = 0
storylines, stories = [], []
with open(storyline_path, 'r') as infile:
for line in infile:
processed_line = remove_chars(line).strip().split()[:-1] # remove the <EOL> at the end
storylines.append(processed_line)
with open(story_path, 'r') as infile:
for line in infile:
processed_line = remove_chars(line).strip().split()
stories.append(processed_line)
num_storylines = len(storylines)
assert(num_storylines == len(stories)), "Mismatch between number of storylines and number of stories"
# loop through stories and storylines and calc similarities
for i in range(num_storylines):
storyline_word_array = make_vec_array(storylines[i], word_vectors, word2index) # all storyline vectors
story_word_array = make_vec_array(stories[i], word_vectors, word2index) # all story word vectors
# calculate the similarities between the storyline words
# this is the cumulative cosine similarity between each word and all the other words then averaged
num_words_in_storyline = len(storyline_word_array)
storyline_idx_combinations = list(combinations(range(num_words_in_storyline), 2))
this_storyline_relatedness = 0
for kw1, kw2 in storyline_idx_combinations:
this_storyline_relatedness += cos_sim(storyline_word_array[kw1], storyline_word_array[kw2])
#print("KW Relatedness", this_storyline_relatedness/len(storyline_idx_combinations)) # to debug individual lines
keyword_relatedness += this_storyline_relatedness/len(storyline_idx_combinations) # since end up with 2x comparisons as words
# calculate the similarities between the word and the sentence
# this is the maximum cosine sim between each keyword and any other word in the sentence, summed over keywords then averaged
this_incorporation_rate = 0
for kw_vec in storyline_word_array:
cosine_max = np.nanmax(cos_sim_array(kw_vec, story_word_array))
this_incorporation_rate += cosine_max
#print("KW Incorporation", this_incorporation_rate/num_words_in_storyline) # to debug individual lines
keyword_incorporation_rate += this_incorporation_rate/num_words_in_storyline
# report average over all in set
keyword_relatedness /= num_storylines
keyword_incorporation_rate /= num_storylines
print('Metrics for {} samples'.format(num_storylines))
print('dynamic relatedness : {:.2f}'.format(keyword_relatedness))
print('dynamic keyword_incorporation_rate : {:.2f}'.format(keyword_incorporation_rate))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('storyline_file', type=str,
help='location of file with storylines')
parser.add_argument('story_file', type=str, help='location of story file')
parser.add_argument('wordvec_file', type=str, help='path to wordvec file' )
args = parser.parse_args()
word2idx = get_tokens([args.storyline_file, args.story_file]) # takes list of arbitrarily many files
word_vectors = read_w2v(args.wordvec_file, word2idx)
calc_similarity(args.storyline_file, args.story_file, word2idx, word_vectors)
```
#### File: proj2_storytelling/python_src/preprocess.py
```python
import sys
import json
import nltk
from tokenize_it import wtokenizer
from util import tokenize, get_noun
class ROCstory:
def load_data(self, infile):
dataSet = []
with open(infile) as inf:
inf.readline()
for line in inf:
elems = line.strip().split('\t')
try:
assert len(elems) == 7 or len(elems) == 8 or len(elems) == 5
except:
print('wrong format!!', len(elems), elems)
if len(elems) == 7:
dataSet.append(elems[2:])
elif len(elems) == 8:
dataSet.append(elems[1:5] + [elems[4+int(elems[-1])]])
elif len(elems) == 5:
dataSet.append(elems)
assert len(dataSet[-1]) == 5
print ('Finished loading data!!!')
return dataSet
def get_alternative_endings(self, infile, outfile):
body, trueEnd, falseEnd = [], [], []
with open(infile) as inf, open(outfile+'.body', 'w') as boutf, open(outfile+'.true', 'w') as toutf, open(outfile+'.false', 'w') as foutf:
inf.readline()
for line in inf:
elems = line.strip().split('\t')
try:
assert len(elems) == 8
except:
print('wrong format!!', len(elems), elems)
body.append('\t'.join(elems[1:5]))
trueEnd.append(elems[4+int(elems[-1])])
falseEnd.append(elems[7-int(elems[-1])])
print ('Finished loading data!!!')
assert len(body) == len(trueEnd)
assert len(trueEnd) == len(falseEnd)
for bd, te, fe in zip(body, trueEnd, falseEnd):
boutf.write(wtokenizer(bd)+'\n')
toutf.write(wtokenizer(te)+'\n')
foutf.write(wtokenizer(fe)+'\n')
def gen_pair(self, dataSet, mode='vanilla', look_back=1):
pairs = []
keywords = []
for line in dataSet:
if mode.startswith('pad'):
line.insert(0, '<BOST>')
line.append('<EOST>')
# The vanilla version
if mode.endswith('vanilla'):
pair = (' '.join(line[:-1]).lower(), line[-1].lower())
pairs.append(pair)
kws = [get_noun(tokenize(sent)) for sent in line]
keywords.append(kws)
# all possible context + ending
elif mode.endswith('ending'):
for i in range(len(line)-1):
pair = (' '.join(line[i:-1]).lower(), line[-1].lower())
pairs.append(pair)
# all pair
elif mode.endswith('all'):
for j in range(1, len(line)):
for i in range(j):
pair = (' '.join(line[i:j]).lower(), line[j].lower())
pairs.append(pair)
# n-gram
elif mode.endswith('lookback'):
# hack for generating test data
#for j in range(len(line)-1, len(line)):
for j in range(1, len(line)):
i = max(j-look_back, 0)
pair = (' '.join(line[i:j]).lower(), line[j].lower())
pairs.append(pair)
else:
raise NotImplementedError
print ('generate %d pairs!' % len(pairs))
return pairs, keywords
def generate_data(self, infile, outfile, mode='vanilla', look_back=1):
dataSet = self.load_data(infile)
pairs, keywords = self.gen_pair(dataSet, mode, look_back)
with open(outfile+'.tok', 'w') as outf, open(outfile+'.key', 'w') as kof:
for pair in pairs:
tpair = map(wtokenizer, pair)
outf.write('\t'.join(tpair) + '\n')
for kws in keywords:
kof.write(' '.join(kws) + '\n')
class VStory(ROCstory):
def load_data(self, infile):
dataSet = []
with open(infile) as inf:
fobj = json.load(inf)
#for i in fobj:
story = fobj['annotations']
temp_data = []
for line in story:
assert len(line) == 1, len(line)
item_dict = line[0]
order = item_dict['worker_arranged_photo_order']
text = item_dict['text']
assert int(order) == len(temp_data)
temp_data.append(text)
if len(temp_data) == 5:
dataSet.append(temp_data)
temp_data = []
return dataSet
if __name__ == '__main__':
infile, outfile, trmode = sys.argv[1:4]
lb = None
if len(sys.argv) > 4:
lb = int(sys.argv[4])
# generate training data
rocstory_proc = ROCstory()
#rocstory_proc.generate_data(infile, outfile, trmode, lb)
rocstory_proc.get_alternative_endings(infile, outfile)
#vstory = VStory()
#vstory.generate_data(infile, outfile, trmode, lb)
```
#### File: storytelling/storytelling/generate_story_from_img.py
```python
from numpy.core.fromnumeric import argmax
from . import predict
img_paths = ['/home/serena/Desktop/proj2_storytelling/storytelling/statics/uploaded_imgs/1609853089.6942558user1.png',
'/home/serena/Desktop/proj2_storytelling/storytelling/statics/uploaded_imgs/1609853095.5605984user2.png',
'/home/serena/Desktop/proj2_storytelling/storytelling/statics/uploaded_imgs/1609853100.6583555user3.png']
# get attributes of the users' faces
class User_face:
def __init__(self, user_number, gender, age1, age2, attributes):
self.user_number = user_number
self.gender = gender
self.age1 = age1
self.age2 = age2
self.attributes = attributes
self.score = []
class Character:
def __init__(self, index, name, gender, gender_weight, age1, age2, age_weight, attributes, attribute_weight):
self.index = index
self.name = name
self.gender = gender
self.gender_weight = gender_weight
self.age1 = age1
self.age2 = age2
self.age_weight = age_weight
self.attributes = attributes
self.attribute_weight = attribute_weight
prince = Character(0, 'prince', 'Male', 50, 5, 25, 25, ['Attractive', 'Young', 'Oval_Face'], 15)
princess = Character(1, 'princess', 'Female', 50, 5, 25, 25, ['Attractive', 'Young', 'Oval_Face'], 15)
wizard = Character(2, 'wizard', 'Male', 10, 30, 80, 50, ['Chubby', 'Eeyeglasses', 'Bags_Under_Eyes', 'Mustache', 'Heavy_Makeup', 'Goatee'], 25)
rabbit = Character(3, 'Rabbit', 'Female', 10, 0, 10, 50, ['Chubby', 'Young', 'Pale_Skin', 'Wearing_Lipstick'], 30)
character_set = [prince, princess, wizard, rabbit]
def calculate_score(user_face):
def calculate_single(user_face, character):
single_score = 0
if user_face.gender == character.gender:
single_score += character.gender_weight
if user_face.age1 in range(character.age1, character.age2+1) or user_face.age2 in range(character.age1, character.age2+1):
single_score += character.age_weight
for attr in user_face.attributes:
if attr in character.attributes:
single_score += character.attribute_weight
return single_score
user_score = []
for cha in character_set:
user_score.append(calculate_single(user_face, cha))
return user_score
def character_mapping(img_paths):
user_face_set = []
for i, img_path in enumerate(img_paths):
gender, age1, age2, attributes = predict.get_attributes(img_path)
user_face_set.append(User_face(i+1, gender, int(age1), int(age2), attributes))
user_scores = []
for user in user_face_set:
user_score = calculate_score(user)
user.score = user_score
print("user " + str(user.user_number) + " got the scores for prince, princess, wizard, rabbit :", user_score)
user_scores.append(user_score)
print(user_scores)
user_characters = []
for user_score in user_scores:
selected_index = user_score.index(max(user_score))
selected = character_set[selected_index]
user_score_co = user_score.copy()
while selected.name in user_characters:
user_score_co[selected_index] = -1
selected_index = user_score_co.index(max(user_score_co))
selected = character_set[selected_index]
user_characters.append(selected.name)
print(user_characters)
return user_characters, user_face_set
``` |
{
"source": "JoyChen1998/Network_PacketCapture",
"score": 2
} |
#### File: JoyChen1998/Network_PacketCapture/Sniffer.py
```python
import socket
import time
import psutil
from struct import *
from multiprocessing import Pool
__AUTHOR__ = 'JoyChan'
__REPO__ = "https://github.com/JoyChen1998/Network_PacketCapture"
# ---* CONFIG *---
INTERVAL = 1 # for default speed to get a packet
HAVE_SAVED = False # control file save
HAVE_FILTER_PROTOCOL = False # control filter rules for protocol
HAVE_FILTER_IP = False # control filter rules for ip
__VERSION__ = '1.3.5'
# ---* CONFIG *---
ackn = []
protocol_filter_list = []
source_ip_filter_list = []
destination_ip_filter_list = []
allows_protocol = ['TCP', 'ICMP', 'UDP'] # for transfer protocol number to protocol name
class Sniffer:
def __init__(self):
'''
do basically set
'''
global protocol_filter_list
global source_ip_filter_list
global destination_ip_filter_list
self.s = None
self.filter_proto = protocol_filter_list
self.filter_in_ip = source_ip_filter_list
self.filter_out_ip = destination_ip_filter_list
self.cnt = 1 # for count packet
self.cnt_merge = 1 # for merge count
self.ack = []
self.Packet_MAC = {
'Source MAC': None,
'Destination MAC': None
}
self.Packet_IP = {
'Version': None,
'IP Header Length': None,
'Differ Service': None,
'All Length': None,
'Identification': None,
'DF': None,
'MF': None,
'Offset': None,
'TTL': None,
'Protocol': None,
'Source Address': None,
'Destination Address': None
}
self.Packet_UDP = {
'Source_port': None,
'Dest_port': None,
'Length': None,
'Checksum': None,
'Data_seg': None,
'Data_length': None
}
self.Packet_TCP = {
'Source_port': None,
'Dest_port': None,
'Sequence': None,
'Acknowledgement': None,
'TCP Header Length': None,
'Data_seg': None,
'Data_length': None
}
self.Packet_ICMP = {
'Type': None,
'Code': None,
'Checksum': None,
'Data_seg': None,
'Data_length': None
}
@staticmethod
def get_netcard():
netcard_info = []
info = psutil.net_if_addrs()
for k, v in info.items():
for item in v:
if item[0] == 2 and not item[1] == '127.0.0.1':
netcard_info.append((k, item[1]))
return netcard_info
@staticmethod
def eth_addr(a):
b = "%.2x-%.2x-%.2x-%.2x-%.2x-%.2x" % (a[0], a[1], a[2], a[3], a[4], a[5])
return b
@staticmethod
def convert_hex_to_ascii(data):
tmp = ""
try:
tmp = data.decode().encode("utf-8").decode("utf-8")
except:
for j in range(0, len(data)):
tmp += chr(int("%.2x" % data[j], 16))
return tmp
def record_http_msg(self, data, acknowledge):
for index in range(0, len(self.ack)):
if str(acknowledge) == str(self.ack[index]):
ackn[index] += data
if len(self.ack) > 0:
with open('HTTP_record.txt', 'w') as f:
f.write('** http record started **\n')
for index_i in ackn:
f.write('new http packet data\n')
f.write(index_i + '\n')
f.write('*'*40 + '\n')
f.write('\n** http record ended **\n')
f.close()
@staticmethod
def get_flag(e):
f = bin(int(e[0], 16))[2:]
o = '0' * (4 - len(f)) + f
return o[1:3]
@staticmethod
def get_offset(e):
f = bin(int(e[0], 16))[2:]
o = '0' * (4 - len(f)) + f
return int(o[3] + e[1:], 16)
@staticmethod
def change_digit_to_word(protocol):
protocols = {
'0': 'IP',
'1': 'ICMP',
'6': 'TCP',
'17': 'UDP'
}
return protocols[str(protocol)]
def soc_establish_conn(self):
'''
To create a socket
:return: nil
'''
'''
(You can skip it)
I just want to say something about the `socket.AF_INET` & `socket.AF_PACKET`.
When I use `socket.AF_INET`,I only can get one protocol just like `TCP`, `UDP`, or `ICMP`...
So, should I neet to use Multiprocessing Pool ??
I have been thinking for a long time about multi-process parallelism ...
But, When I saw the anotation about `AF_PACKET` I got a clear idea. Why not unpack the MAC-Packet?
Well, You can see the word about AF_PACKET => `When using socket.AF_PACKET to create a socket,
it will be able to capture all Ethernet frames received or sent by the unit.`
So, the final version change to use AF_PACKET, I don't need to care about multi-process!
'''
try:
# self.s = socket.socket(socket.AF_INET, socket.SOCK_RAW, self.param)
self.s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs(0x0003)) # set a packet socket conn
except:
print('Socket could not be created')
exit(-1)
print('Socket established success!')
self.unpack_eth_packet()
def unpack_eth_packet(self):
# for i in range(1, 20):
while True:
packet = self.s.recvfrom(65565)
packet = packet[0]
# parse ethernet header
eth_length = 14
eth_header = packet[: eth_length]
eth = unpack('!6s6sH', eth_header)
eth_protocol = socket.ntohs(eth[2]) # Convert a 16-bit integer from network to host byte order.
source_eth_addr = self.eth_addr(packet[6:12])
dest_eth_addr = self.eth_addr(packet[0:6])
self.Packet_MAC['Source MAC'] = source_eth_addr
self.Packet_MAC['Destination MAC'] = dest_eth_addr
if eth_protocol == 8:
self.unpack_ip_packet(packet, eth_length)
# add a interval
time.sleep(INTERVAL)
def unpack_ip_packet(self, packet, eth_len):
'''
this function is to unpack the ip packet
:param eth_len: the header include ` MAC frame header`
:param packet: the packet that needed to packet
:return: nil & just return
'''
# Parse IP header
# take first 20 characters for the ip header
self.cnt += 1
ip_header = packet[eth_len: eth_len + 20]
# ip packet unpack
iph = unpack('!BBHH2sBBH4s4s', ip_header)
version_ihl = iph[0]
differ_service = iph[1]
version = version_ihl >> 4
ihl = version_ihl & 0xF
iph_lenth = ihl * 4
all_lenth = iph[2]
id = iph[3]
flag_N_offset = iph[4].hex()
flags = self.get_flag(flag_N_offset)
MF = flags[1]
DF = flags[0]
offst = self.get_offset(flag_N_offset)
ttl = iph[5]
protocol = iph[6]
self.Packet_IP['Version'] = version
self.Packet_IP['Differ Service'] = differ_service
self.Packet_IP['IP Header Length'] = ihl
self.Packet_IP['All Length'] = all_lenth
self.Packet_IP['Identification'] = id
self.Packet_IP['MF'] = MF
self.Packet_IP['DF'] = DF
self.Packet_IP['Offset'] = offst
self.Packet_IP['TTL'] = ttl
self.Packet_IP['Protocol'] = self.change_digit_to_word(protocol)
self.Packet_IP['Source Address'] = socket.inet_ntoa(iph[8])
self.Packet_IP['Destination Address'] = socket.inet_ntoa(iph[9])
# filter for ip in/out
if len(self.filter_in_ip) > 0 and self.Packet_IP['Source Address'] not in self.filter_in_ip:
return
if len(self.filter_out_ip) > 0 and self.Packet_IP['Destination Address'] not in self.filter_out_ip:
return
new_length = iph_lenth + eth_len # upgrade packet parser start length
if HAVE_SAVED and DF == '0':
if MF == '1':
with open('merge.'+str(self.cnt_merge)+'.txt', 'a') as f:
f.write('packet_cnt=' + str(self.cnt) + '\noffset=' + str(offst) + '\ndata=' + str(packet[new_length + 20:]) + '\n')
f.close()
elif MF == '0':
with open('merge.'+str(self.cnt_merge)+'.txt', 'a') as f:
f.write('packet_cnt=' + str(self.cnt) + '\noffset=' + str(offst) + '\ndata=' + str(packet[new_length + 20:]) + '\n')
f.close()
self.cnt_merge += 1
# classify different kinds of packet
if HAVE_FILTER_PROTOCOL:
if protocol == 6 and protocol in protocol_filter_list:
self.unpack_tcp_packet(new_length, packet)
elif protocol == 17 and protocol in protocol_filter_list:
self.unpack_udp_packet(new_length, packet)
elif protocol == 1 and protocol in protocol_filter_list:
self.unpack_icmp_packet(new_length, packet)
else:
return
else:
if protocol == 6:
self.unpack_tcp_packet(new_length, packet)
elif protocol == 17:
self.unpack_udp_packet(new_length, packet)
elif protocol == 1:
self.unpack_icmp_packet(new_length, packet)
else:
print('This Packe\'s Protocol is not in [ TCP , ICMP , UDP ]')
print()
def unpack_tcp_packet(self, iph_lenth, packet):
'''
this function is to unpack the tcp packet
:param iph_lenth: the header include ` MAC frame header & ip header`
:param packet: the packet that needed to packet
:return: nil
'''
tcp_header = packet[iph_lenth:iph_lenth + 20]
tcph = unpack('!HHLLBBHHH', tcp_header)
source_port = tcph[0]
dest_port = tcph[1]
sequence = tcph[2]
acknowledgement = tcph[3]
doff_reserved = tcph[4]
tcph_length = doff_reserved >> 4
h_size = iph_lenth + tcph_length * 4
data_size = len(packet) - h_size
# TCP Packet's data segment
data = self.convert_hex_to_ascii(packet[h_size:])
if "HTTP" in data:
self.ack.append(acknowledgement)
ackn.append('')
if acknowledgement in self.ack:
self.record_http_msg(data, acknowledgement)
self.Packet_TCP['Source_port'] = source_port
self.Packet_TCP['Dest_port'] = dest_port
self.Packet_TCP['Sequence'] = sequence
self.Packet_TCP['Acknowledgement'] = acknowledgement
self.Packet_TCP['TCP Header Length'] = tcph_length
self.Packet_TCP['Data_seg'] = data
self.Packet_TCP['Data_length'] = data_size
if HAVE_SAVED:
with open('tcp_packet.txt', 'a') as f:
f.write('----- packet - index: ' + str(self.cnt) + ' -----\n')
for key, value in self.Packet_MAC.items():
f.write(key + ':' + str(value) + '\n')
f.write('\n')
for key, value in self.Packet_IP.items():
f.write(key + ':' + str(value) + '\n')
f.write('\n')
for key, value in self.Packet_TCP.items():
f.write(key + ':' + str(value) + '\n')
f.write('\n***************************\n\n')
print('----- packet - index: ', str(self.cnt), ' -----')
for key, value in self.Packet_MAC.items():
print(key, ':', value)
print()
for key, value in self.Packet_IP.items():
print(key, ':', value)
print()
for key, value in self.Packet_TCP.items():
print(key, ':', value)
print()
print('*' * 35)
print()
def unpack_udp_packet(self, iph_lenth, packet):
'''
this function is to unpack the udp packet
:param iph_lenth: the header include ` MAC frame header & ip header`
:param packet: the packet that needed to packet
:return: nil
'''
udph_length = 8
udp_header = packet[iph_lenth:iph_lenth + 8]
udph = unpack('!HHHH', udp_header)
source_port = udph[0]
dest_port = udph[1]
length = udph[2]
checksum = udph[3]
h_size = iph_lenth + udph_length
data_size = len(packet) - h_size
data = self.convert_hex_to_ascii(packet[h_size:])
self.Packet_UDP['Source_port'] = source_port
self.Packet_UDP['Dest_port'] = dest_port
self.Packet_UDP['Length'] = length
self.Packet_UDP['Checksum'] = checksum
self.Packet_UDP['Data_seg'] = data
self.Packet_UDP['Data_length'] = data_size
if HAVE_SAVED:
with open('udp_packet.txt', 'a') as f:
f.write('----- packet - index: ' + str(self.cnt) + ' -----\n')
for key, value in self.Packet_MAC.items():
f.write(key + ':' + str(value) + '\n')
f.write('\n')
for key, value in self.Packet_IP.items():
f.write(key + ':' + str(value) + '\n')
f.write('\n')
for key, value in self.Packet_UDP.items():
f.write(key + ':' + str(value) + '\n')
f.write('\n***************************\n\n')
print('----- packet - index: ', str(self.cnt), ' -----')
for key, value in self.Packet_MAC.items():
print(key, ':', value)
print()
for key, value in self.Packet_IP.items():
print(key, ':', value)
print()
for key, value in self.Packet_UDP.items():
print(key, ':', value)
print()
print('*' * 35)
print()
def unpack_icmp_packet(self, iph_lenth, packet):
'''
this function is to unpack the icmp packet
:param iph_lenth: the header include ` MAC frame header & ip header`
:param packet: the packet that needed to packet
:return: nil
'''
icmph_length = 4
icmp_header = packet[iph_lenth:iph_lenth + 4]
icmph = unpack('!BBH', icmp_header)
icmp_type = icmph[0]
code = icmph[1]
checksum = icmph[2]
h_size = iph_lenth + icmph_length
data_size = len(packet) - h_size
data = self.convert_hex_to_ascii(packet[h_size:])
self.Packet_ICMP['Type'] = icmp_type
self.Packet_ICMP['Code'] = code
self.Packet_ICMP['Checksum'] = checksum
self.Packet_ICMP['Data_seg'] = data
self.Packet_ICMP['Data_length'] = data_size
if HAVE_SAVED:
with open('icmp_packet.txt', 'a') as f:
f.write('----- packet - index: ' + str(self.cnt) + ' -----\n')
for key, value in self.Packet_MAC.items():
f.write(key + ':' + str(value) + '\n')
f.write('\n')
for key, value in self.Packet_IP.items():
f.write(key + ':' + str(value) + '\n')
f.write('\n')
for key, value in self.Packet_ICMP.items():
f.write(key + ':' + str(value) + '\n')
f.write('\n***************************\n\n')
print('----- packet - index: ', str(self.cnt), ' -----')
for key, value in self.Packet_MAC.items():
print(key, ':', value)
print()
for key, value in self.Packet_IP.items():
print(key, ':', value)
print()
for key, value in self.Packet_ICMP.items():
print(key, ':', value)
print()
print('*' * 35)
print()
if __name__ == '__main__':
# pool = Pool()
snif = Sniffer()
print(snif.get_netcard())
if HAVE_FILTER_PROTOCOL:
str_filter = input('Please input protocol filter\n')
protocol_filter_list = str_filter.strip().split(' ')
print(protocol_filter_list)
for i in range(0, len(protocol_filter_list)):
if protocol_filter_list[i] in allows_protocol:
if protocol_filter_list[i] == 'TCP':
protocol_filter_list[i] = 6
elif protocol_filter_list[i] == 'ICMP':
protocol_filter_list[i] = 1
elif protocol_filter_list[i] == 'UDP':
protocol_filter_list[i] = 17
else:
print('Maybe your input has something wrong...')
protocol_filter_list = []
# print(protocol_filter_list)
if HAVE_FILTER_IP:
str_filter_in_ip = input('Please input source-ip filter\n')
source_ip_filter_list = str_filter_in_ip
str_filter_out_ip = input('Please input destination-ip filter\n')
destination_ip_filter_list = str_filter_out_ip
try:
# pool.map(snif.soc_establish_conn, params) # udp will cause suspended
snif.soc_establish_conn()
except KeyboardInterrupt:
print('HALTED!')
``` |
{
"source": "joychugh/learning-kafka",
"score": 3
} |
#### File: joychugh/learning-kafka/twitter.py
```python
__author__ = 'jchugh'
from simple_oauth.oauth_request import Request
class TwitterStream(object):
"""
A class representing twitter client to get streaming tweets
"""
def __init__(self, oauth, json_parser):
"""
Initialize the twitter streaming client with the Oauth instance
:param Oauth: Oauth instance configured for twitter.
:type Oauth: Oauth
:return: `TwitterStream` instance
"""
self.__oauth = oauth
self.__json_parser = json_parser
self.__response_stream = None
def get_tweets(self, request):
"""
Get the tweets from the given request
:param request: the streaming request
:type request: Request
:return: `Tweets` instance
:rtype: Tweets
"""
self.__oauth.make_request(request)
self.__response_stream = self.__oauth.get_response_content_iterator()
return Tweets(self.__response_stream, self.__json_parser)
class Tweets(object):
def __init__(self, data_stream, json_parser):
"""
Initialize the `Tweets` class
:param data_stream: raw data stream iterator from Twitter
:type data_stream: iterator
:param json_parser: json parser to parse the json data
:type json_parser: `Json`
:return: `Tweets` object
:rtype: Tweets
"""
self.__data_stream = data_stream
self.__json_parser = json_parser
def __iter__(self):
return self
def next(self):
"""
Returns decoded json as a dict
:return: the tweet as dict
:rtype: dict
"""
raw_text = next(self.__data_stream)
if len(raw_text) > 0:
try:
message = self.__json_parser.loads(raw_text)
if 'text' in message and 'user' in message:
return message
except ValueError as ve:
return None
else:
return None
``` |
{
"source": "joyc/watchlist",
"score": 2
} |
#### File: joyc/watchlist/test_watchlist.py
```python
import unittest
from watchlist import app, db
from watchlist.models import Movie, User
from watchlist.commands import forge, initdb
class WatchlistTestCase(unittest.TestCase):
def setUp(self):
# ๆดๆฐ้
็ฝฎ
app.config.update(
TESTING=True,
SQLALCHEMY_DATABASE_URI='sqlite:///:memory:'
)
# ๅๅปบๆฐๆฎๅบๅ่กจ
db.create_all()
# ๅๅปบๆต่ฏๆฐๆฎ๏ผไธไธช็จๆท๏ผไธไธช็ตๅฝฑๆก็ฎ
user = User(name='Test', username='test')
user.set_password('<PASSWORD>')
movie = Movie(title='Test Movie Title', year='2019')
# ไฝฟ็จ add_all() ๆนๆณไธๆฌกๆทปๅ ๅคไธชๆจกๅ็ฑปๅฎไพ๏ผไผ ๅ
ฅๅ่กจ
db.session.add_all([user, movie])
db.session.commit()
self.client = app.test_client() # ๅๅปบๆต่ฏๅฎขๆท็ซฏ
self.runner = app.test_cli_runner() # ๅๅปบๆต่ฏๅฝไปค่ฟ่กๅจ
def tearDown(self):
db.session.remove() # ๆธ
้คๆฐๆฎๅบไผ่ฏ
db.drop_all() # ๅ ้คๆฐๆฎๅบ่กจ
def login(self):
self.client.post('/login', data=dict(
username='test',
password='<PASSWORD>'
), follow_redirects=True)
# ๆต่ฏ็จๅบๅฎไพๆฏๅฆๅญๅจ
def test_app_exist(self):
self.assertIsNotNone(app)
# ๆต่ฏ็จๅบๆฏๅฆๅคไบๆต่ฏๆจกๅผ
def test_app_is_testing(self):
self.assertTrue(app.config['TESTING'])
# ๆต่ฏ 404 ้กต้ข
def test_404_page(self):
response = self.client.get('/nothing') # ไผ ๅ
ฅ็ฎๆ URL
data = response.get_data(as_text=True)
self.assertIn('Page Not Found - 404', data)
self.assertIn('Go Back', data)
self.assertEqual(response.status_code, 404) # ๅคๆญๅๅบ็ถๆ็
# ๆต่ฏไธป้กต
def test_index_page(self):
response = self.client.get('/')
data = response.get_data(as_text=True)
self.assertIn('Test\'s Watchlist', data)
self.assertIn('Test Movie Title', data)
self.assertEqual(response.status_code, 200)
def test_login_protect(self):
response = self.client.get('/')
data = response.get_data(as_text=True)
self.assertNotIn('Logout', data)
self.assertNotIn('Settings', data)
self.assertNotIn('<form method="post">', data)
self.assertNotIn('Delete', data)
self.assertNotIn('Edit', data)
def test_login(self):
response = self.client.post('/login', data=dict(
username='test',
password='<PASSWORD>'
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertIn('Login success.', data)
self.assertIn('Logout', data)
self.assertIn('Settings', data)
self.assertIn('Delete', data)
self.assertIn('Edit', data)
self.assertIn('<form method="post">', data)
response = self.client.post('/login', data=dict(
username='test',
password='<PASSWORD>'
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertNotIn('Login success.', data)
self.assertIn('Invalid username or password.', data)
response = self.client.post('/login', data=dict(
username='wrong',
password='<PASSWORD>'
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertNotIn('Login success.', data)
self.assertIn('Invalid username or password.', data)
response = self.client.post('/login', data=dict(
username='',
password='<PASSWORD>'
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertNotIn('Login success.', data)
self.assertIn('Invalid input.', data)
response = self.client.post('/login', data=dict(
username='test',
password=''
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertNotIn('Login success.', data)
self.assertIn('Invalid input.', data)
def test_logout(self):
self.login()
response = self.client.get('/logout', follow_redirects=True)
data = response.get_data(as_text=True)
self.assertIn('Goodbye.', data)
self.assertNotIn('Logout', data)
self.assertNotIn('Settings', data)
self.assertNotIn('Delete', data)
self.assertNotIn('Edit', data)
self.assertNotIn('<form method="post">', data)
def test_settings(self):
self.login()
response = self.client.get('/settings')
data = response.get_data(as_text=True)
self.assertIn('Settings', data)
self.assertIn('Your Name', data)
response = self.client.post('/settings', data=dict(
name='',
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertNotIn('Settings updated.', data)
self.assertIn('Invalid input.', data)
response = self.client.post('/settings', data=dict(
name='Grey Li',
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertIn('Settings updated.', data)
self.assertIn('Grey Li', data)
def test_create_item(self):
self.login()
response = self.client.post('/', data=dict(
title='New Movie',
year='2019'
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertIn('Item created.', data)
self.assertIn('New Movie', data)
response = self.client.post('/', data=dict(
title='',
year='2019'
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertNotIn('Item created.', data)
self.assertIn('Invalid input.', data)
response = self.client.post('/', data=dict(
title='New Movie',
year=''
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertNotIn('Item created.', data)
self.assertIn('Invalid input.', data)
def test_update_item(self):
self.login()
response = self.client.get('/movie/edit/1')
data = response.get_data(as_text=True)
self.assertIn('Edit item', data)
self.assertIn('Test Movie Title', data)
self.assertIn('2019', data)
response = self.client.post('/movie/edit/1', data=dict(
title='New Movie Edited',
year='2019'
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertIn('Item updated.', data)
self.assertIn('New Movie Edited', data)
response = self.client.post('/movie/edit/1', data=dict(
title='',
year='2019'
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertNotIn('Item updated.', data)
self.assertIn('Invalid input.', data)
response = self.client.post('/movie/edit/1', data=dict(
title='New Movie Edited Again',
year=''
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertNotIn('Item updated.', data)
self.assertNotIn('New Movie Edited Again', data)
self.assertIn('Invalid input.', data)
def test_delete_item(self):
self.login()
response = self.client.post('/movie/delete/1', follow_redirects=True)
data = response.get_data(as_text=True)
self.assertIn('Item deleted.', data)
self.assertNotIn('Test Movie Title', data)
# ๆต่ฏ่ๆๆฐๆฎ
def test_forge_command(self):
result = self.runner.invoke(forge)
self.assertIn('Done.', result.output)
self.assertNotEqual(Movie.query.count(), 0)
# ๆต่ฏๅๅงๅๆฐๆฎๅบ
def test_initdb_command(self):
result = self.runner.invoke(initdb)
self.assertIn('Initialized database.', result.output)
# ๆต่ฏ็ๆ็ฎก็ๅ่ดฆๆท
def test_admin_command(self):
db.drop_all()
db.create_all()
result = self.runner.invoke(args=['admin', '--username', 'grey', '--password', '<PASSWORD>'])
self.assertIn('Creating user...', result.output)
self.assertIn('Done.', result.output)
self.assertEqual(User.query.count(), 1)
self.assertEqual(User.query.first().username, 'grey')
self.assertTrue(User.query.first().validate_password('<PASSWORD>'))
# ๆต่ฏๆดๆฐ็ฎก็ๅ่ดฆๆท
def test_admin_command_update(self):
# ไฝฟ็จ args ๅๆฐ็ปๅบๅฎๆด็ๅฝไปคๅๆฐๅ่กจ
result = self.runner.invoke(args=['admin', '--username', 'peter', '--password', '<PASSWORD>'])
self.assertIn('Updating user...', result.output)
self.assertIn('Done.', result.output)
self.assertEqual(User.query.count(), 1)
self.assertEqual(User.query.first().username, 'peter')
self.assertTrue(User.query.first().validate_password('<PASSWORD>'))
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joydeba/autocount",
"score": 2
} |
#### File: joydeba/autocount/datasetIMG.py
```python
import torch
from torchvision import transforms
from torch.utils.data import Dataset
import glob
import os
from PIL import Image
import numpy as np
from xml.dom import minidom
import cv2
import math
class DataLoaderInstanceSegmentation(Dataset):
def __init__(self, folder_path="ethz_1_all", train = True):
super(DataLoaderInstanceSegmentation, self).__init__()
if train:
folder_path="ethz_1_all"
else:
folder_path="images_testing"
self.train = train
self.img_files = glob.glob(os.path.join(folder_path,"images","*.jpg"))
self.ins_mask_files = []
self.filenames = []
self.to_tensor = transforms.ToTensor()
for img_path in self.img_files:
self.ins_mask_files.append(os.path.join(folder_path,'croped_masks',os.path.basename(img_path)))
self.filenames.append(os.path.basename(img_path))
def __len__(self):
return len(self.img_files)
def __getitem__(self, index):
img_path = self.img_files[index]
ins_mask_path = self.ins_mask_files[index]
filename = self.filenames[index]
data = self.to_tensor(Image.open(img_path).convert('RGB'))
label_ins = self.to_tensor(Image.open(ins_mask_path).convert('RGB'))
if self.train:
return data, label_ins
else:
return data, filename
``` |
{
"source": "joydeba/BackportingPR",
"score": 3
} |
#### File: joydeba/BackportingPR/ModelNet.py
```python
import tensorflow as tf
'''
Model class of ReBack.
Initialize variables of parameters from the command line inputs
'''
class ReBack(object):
def __init__(self, max_msg_length, max_meta_length, max_code_length, max_code_line, max_code_hunk, vocab_size_text, vocab_size_meta,
vocab_size_code, embedding_size_text, filter_sizes, num_filters, l2_reg_lambda, num_classes,
hidden_units):
self.max_msg_length = max_msg_length
self.max_meta_length = max_meta_length
self.max_code_length = max_code_length
self.max_code_line = max_code_line
self.max_code_hunk = max_code_hunk
self.vocab_size_text = vocab_size_text
self.vocab_size_meta = vocab_size_meta
self.vocab_size_code = vocab_size_code
self.embedding_size_text = embedding_size_text
self.filter_sizes = filter_sizes
self.num_filters = num_filters
self.l2_reg_lambda = l2_reg_lambda
self.num_classes = num_classes
self.hidden_units = hidden_units
def _create_place_holder(self):
# Placeholders for discussion and code inputs
self.input_msg = tf.placeholder(tf.int32, [None, self.max_msg_length], name='input_msg')
self.input_meta = tf.placeholder(tf.int32, [None, self.max_meta_length], name='input_meta')
self.input_addedcode = tf.placeholder(tf.int32,
[None, self.max_code_hunk, self.max_code_line, self.max_code_length],
name='input_addedcode')
self.input_removedcode = tf.placeholder(tf.int32,
[None, self.max_code_hunk, self.max_code_line, self.max_code_length],
name='input_removedcode')
# Target classes of the ReBack
self.input_y = tf.placeholder(tf.float32, [None, self.num_classes], name="input_y")
# Parameters regarding loss functions. No regularization is used here.
self.l2_loss = tf.constant(0.0)
self.num_filters_total = self.num_filters * len(self.filter_sizes)
# Dropout placeholder.
self.dropout_keep_prob = tf.placeholder(tf.float32, name='dropout_keep_prob')
'''
This part creats the embeding message and code layer
'''
# Embedding vector for each word from discussion and commit code
def _create_embedding_msg_layer(self):
with tf.device('/cpu:0'), tf.name_scope("embedding_msg"):
self.W_msg = tf.Variable(
tf.random_uniform([self.vocab_size_text, self.embedding_size_text], -1.0, 1.0),
name="W_msg")
def _create_embedding_meta_layer(self):
with tf.device('/cpu:0'), tf.name_scope("embedding_meta"):
self.W_meta = tf.Variable(
tf.random_uniform([self.vocab_size_meta, self.embedding_size_text], -1.0, 1.0),
name="W_meta")
def _create_embedding_code_layer(self):
with tf.device('/cpu:0'), tf.name_scope("embedding_code"):
self.W_code = tf.Variable(
tf.random_uniform([self.vocab_size_code, self.embedding_size_text], -1.0, 1.0),
name="W_code")
'''
Mapping between embedding vector - discussion, meta, commit code
'''
def _create_embedding_chars_layer(self, W, input):
embedded_chars = tf.nn.embedding_lookup(W, input)
return tf.expand_dims(embedded_chars, -1)
# Discussion embedding layer
def _create_embedding_chars_msg_layer(self):
self.embedded_chars_expanded_msg = self._create_embedding_chars_layer(W=self.W_msg,
input=self.input_msg)
# meta embeding layer
def _create_embedding_chars_meta_layer(self):
self.embedded_chars_expanded_meta = self._create_embedding_chars_layer(W=self.W_meta,
input=self.input_meta)
# Commit code embedding layer
def _create_embedding_chars_code_layer(self):
self.embedded_chars_expanded_addedcode = self._create_embedding_chars_layer(W=self.W_code,
input=self.input_addedcode)
self.embedded_chars_expanded_removedcode = self._create_embedding_chars_layer(W=self.W_code,
input=self.input_removedcode)
# Pooling for metrics
def pool_outputs_2d(self, embedded_chars_expanded, W, b, max_length, filter_size):
conv = tf.nn.conv2d(
embedded_chars_expanded,
W,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
# Applying nonlinearity by using elu
h = tf.nn.elu(tf.nn.bias_add(conv, b), name="elu")
# Maxpooling on outputs
pooled = tf.nn.max_pool(
h,
ksize=[1, max_length - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name="pool")
return pooled
def h_pool_2d(self, num_filters_total, pooled_outputs):
h_pool_ = tf.reshape(tf.concat(pooled_outputs, 3), [-1, num_filters_total])
return h_pool_
# Weight embedding layer for discussion and meta. Then pooling for discussion and meta
def _create_weight_conv_msg_layer(self):
self.w_filter_text, self.b_filter_text = [], []
for i, filter_size in enumerate(self.filter_sizes):
with tf.device("/cpu:" + str(filter_size)):
with tf.name_scope("weight-conv-maxpool-text-%s" % filter_size):
filter_shape_text = [filter_size, self.embedding_size_text, 1, self.num_filters]
# Convolution layer
w = tf.Variable(tf.truncated_normal(filter_shape_text, stddev=0.1), name="W_filter_text")
b = tf.Variable(tf.constant(0.1, shape=[self.num_filters]), name="b")
self.w_filter_text.append(w)
self.b_filter_text.append(b)
def _create_weight_conv_meta_layer(self):
self.w_filter_text, self.b_filter_text = [], []
for i, filter_size in enumerate(self.filter_sizes):
with tf.device("/cpu:" + str(filter_size)):
with tf.name_scope("weight-conv-maxpool-text-%s" % filter_size):
filter_shape_text = [filter_size, self.embedding_size_text, 1, self.num_filters]
# Convolution layer
w = tf.Variable(tf.truncated_normal(filter_shape_text, stddev=0.1), name="W_filter_text")
b = tf.Variable(tf.constant(0.1, shape=[self.num_filters]), name="b")
self.w_filter_text.append(w)
self.b_filter_text.append(b)
def _create_conv_maxpool_2d_layer(self, filter_sizes, embedded_chars_expanded, W, b, max_msg_length):
pooled_outputs_text = []
for i, filter_size in enumerate(filter_sizes):
with tf.device("/cpu:" + str(filter_size)):
pooled_outputs_text.append(self.pool_outputs_2d(embedded_chars_expanded=embedded_chars_expanded,
W=W[i], b=b[i], max_length=max_msg_length,
filter_size=filter_size))
return pooled_outputs_text
def _create_conv_maxpool_msg_layer(self):
pooled_outputs_discussion = self._create_conv_maxpool_2d_layer(filter_sizes=self.filter_sizes,
embedded_chars_expanded=self.embedded_chars_expanded_msg,
W=self.w_filter_text, b=self.b_filter_text,
max_msg_length=self.max_msg_length)
self.pooled_outputs_discussion = self.h_pool_2d(num_filters_total=len(self.filter_sizes) * self.num_filters,
pooled_outputs=pooled_outputs_discussion)
def _create_conv_maxpool_meta_layer(self):
pooled_outputs_meta = self._create_conv_maxpool_2d_layer(filter_sizes=self.filter_sizes,
embedded_chars_expanded=self.embedded_chars_expanded_meta,
W=self.w_filter_text, b=self.b_filter_text,
max_msg_length=self.max_meta_length)
self.pooled_outputs_meta = self.h_pool_2d(num_filters_total=len(self.filter_sizes) * self.num_filters,
pooled_outputs=pooled_outputs_meta)
'''
Weight embedding layer for lines in commit code
'''
def _create_embedding_code_line(self, embedded_chars_expanded):
return tf.reduce_mean(embedded_chars_expanded, 3)
def _create_embedding_addedcode_line(self):
self.embedded_chars_expanded_addedcode_line = self._create_embedding_code_line(
embedded_chars_expanded=self.embedded_chars_expanded_addedcode)
def _create_embedding_removedcode_line(self):
self.embedded_chars_expanded_removedcode_line = self._create_embedding_code_line(
embedded_chars_expanded=self.embedded_chars_expanded_removedcode)
'''
Weight embedding layer for hunks in commit code
'''
def _create_weight_conv_code_layer(self):
self.w_filter_code, self.b_filter_code = list(), list()
for i, filter_size in enumerate(self.filter_sizes):
with tf.device("/cpu:" + str(filter_size)):
with tf.name_scope("weight-conv-maxpool-lines-%s" % filter_size):
filter_shape_lines_code = [1, filter_size, self.embedding_size_text, 1, self.num_filters]
# Convolution layer
w = tf.Variable(tf.truncated_normal(filter_shape_lines_code, stddev=0.1),
name="W_filter_lines_%s" % filter_size)
b = tf.Variable(tf.constant(0.1, shape=[self.num_filters]), name="b_filter_lines_%s" % filter_size)
self.w_filter_code.append(w)
self.b_filter_code.append(b)
return self.w_filter_code, self.b_filter_code
'''
Weight embedding layer for hunks in commit code
'''
def pool_outputs_3d(self, embedded_chars_expanded, W, b, max_length, filter_size):
conv = tf.nn.conv3d(
embedded_chars_expanded,
W,
strides=[1, 1, 1, 1, 1],
padding="VALID",
name="conv")
# Applying nonlinearity by using elu
h = tf.nn.elu(tf.nn.bias_add(conv, b), name="elu")
pooled = tf.nn.max_pool3d(
h,
ksize=[1, 1, max_length - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1, 1],
padding='VALID',
name="pool")
return pooled
def h_pool_3d(self, num_filters_total, pooled_outputs, height):
pooled_outputs = tf.concat(pooled_outputs, 4)
h_pool_ = tf.reshape(pooled_outputs, [-1, height, num_filters_total])
return h_pool_
def _create_conv_maxpool_3d_layer(self, filter_sizes, embedded_chars, W, b, max_length):
pool_outputs_linescode = []
for i, filter_size in enumerate(filter_sizes):
with tf.device("/cpu:" + str(filter_size)):
# convolution and maxpool for discussion
pool_outputs_linescode.append(
self.pool_outputs_3d(embedded_chars_expanded=embedded_chars, W=W[i], b=b[i],
filter_size=filter_size, max_length=max_length))
return pool_outputs_linescode
'''
Embedding vectors for hunks in commit code
'''
def _create_conv_maxpool_hunk_addedcode_layer(self):
pooled_outputs_hunk_addedcode = self._create_conv_maxpool_3d_layer(filter_sizes=self.filter_sizes,
embedded_chars=self.embedded_chars_expanded_addedcode_line,
W=self.w_filter_code,
b=self.b_filter_code,
max_length=self.max_code_line)
self.pooled_outputs_hunk_addedcode = self.h_pool_3d(
num_filters_total=len(self.filter_sizes) * self.num_filters,
pooled_outputs=pooled_outputs_hunk_addedcode,
height=self.max_code_hunk)
def _create_conv_maxpool_hunk_removedcode_layer(self):
pooled_outputs_hunk_removedcode = self._create_conv_maxpool_3d_layer(filter_sizes=self.filter_sizes,
embedded_chars=self.embedded_chars_expanded_removedcode_line,
W=self.w_filter_code,
b=self.b_filter_code,
max_length=self.max_code_line)
self.pooled_outputs_hunk_removedcode = self.h_pool_3d(
num_filters_total=len(self.filter_sizes) * self.num_filters,
pooled_outputs=pooled_outputs_hunk_removedcode,
height=self.max_code_hunk)
'''
Embedding vectors for added code and removed code
'''
def _create_embedding_addedcode(self):
self.embedding_addedcode_layer = tf.contrib.layers.flatten(self.pooled_outputs_hunk_addedcode)
def _create_embedding_removedcode(self):
self.embedding_removedcode_layer = tf.contrib.layers.flatten(self.pooled_outputs_hunk_removedcode)
'''
Fusion layer for discussion, meta and commit code
'''
def _create_fusion_layer(self):
self.fusion_layer = tf.concat(
[self.pooled_outputs_discussion, self.pooled_outputs_meta, self.embedding_addedcode_layer, self.embedding_removedcode_layer], 1)
def _create_fusion_discussion_meta_diffcode_layer(self):
self.diff_code = self.embedding_addedcode_layer - self.embedding_removedcode_layer
self.fusion_layer = tf.concat(
[self.pooled_outputs_discussion, self.pooled_outputs_meta, self.diff_code], 1)
'''
Fusion layer for discussion
'''
def _create_fusion_discussion_layer(self):
self.fusion_layer = self.pooled_outputs_discussion
'''
Fusion layer for meta
'''
def _create_fusion_meta_layer(self):
self.fusion_layer = self.pooled_outputs_meta
'''
Fusion layer for code
'''
def _create_fusion_code_layer(self):
self.fusion_layer = tf.concat(
[self.embedding_addedcode_layer, self.embedding_removedcode_layer], 1)
def _create_fusion_diffcode_layer(self):
self.diff_code = self.embedding_addedcode_layer - self.embedding_removedcode_layer
self.fusion_layer = self.diff_code
# Added drop_out
def _adding_dropout_fusion_layer(self):
self.fusion_layer_dropout = tf.nn.dropout(self.fusion_layer, self.dropout_keep_prob)
# Making weight to connect fusion layer then hidden layer then output layer
def _create_weight_fusion_hidden_layer(self):
with tf.name_scope("weight_fusion_hidden"):
self.W_hidden = tf.get_variable(
"W_hidden",
shape=[self.fusion_layer_dropout.get_shape()[1], self.hidden_units],
initializer=tf.contrib.layers.xavier_initializer())
self.b_hidden = tf.Variable(tf.constant(0.1, shape=[self.hidden_units]), name="b_hidden")
self.A_hidden = tf.nn.elu(tf.nn.xw_plus_b(self.fusion_layer_dropout, self.W_hidden, self.b_hidden))
self.A_hidden_dropout = tf.nn.dropout(self.A_hidden, self.dropout_keep_prob)
self.W_fusion = tf.get_variable(
"W_fusion",
shape=[self.A_hidden_dropout.get_shape()[1], self.num_classes],
initializer=tf.contrib.layers.xavier_initializer())
self.b_fusion = tf.Variable(tf.constant(0.1, shape=[self.num_classes]), name="b")
self.l2_loss += tf.nn.l2_loss(self.W_fusion)
self.l2_loss += tf.nn.l2_loss(self.b_fusion)
def _create_output_fusion_hidden_layer(self):
with tf.name_scope("output"):
self.scores = tf.nn.xw_plus_b(self.A_hidden_dropout, self.W_fusion, self.b_fusion, name="scores")
self.predictions = tf.argmax(self.scores, 1, name="predictions")
# Making weight to connect fusion layer then output layer
def _create_weight_fusion_layer(self):
with tf.name_scope("weight_fusion"):
self.W_fusion = tf.get_variable(
"W_fusion",
shape=[self.fusion_layer_dropout.get_shape()[1], self.num_classes],
initializer=tf.contrib.layers.xavier_initializer())
self.b_fusion = tf.Variable(tf.constant(0.1, shape=[self.num_classes]), name="b")
self.l2_loss += tf.nn.l2_loss(self.W_fusion)
self.l2_loss += tf.nn.l2_loss(self.b_fusion)
# Creating output layer (Score and Prediction)
def _create_output_layer(self):
with tf.name_scope("output"):
self.scores = tf.nn.xw_plus_b(self.fusion_layer_dropout, self.W_fusion, self.b_fusion, name="scores")
self.predictions = tf.argmax(self.scores, 1, name="predictions")
def _create_loss_function(self):
with tf.name_scope("loss"):
losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)
self.loss = tf.reduce_mean(losses) + self.l2_reg_lambda * self.l2_loss
# Measure definitions
def _measure_accuracy(self):
with tf.name_scope("accuracy"):
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
def _measure_precision(self):
with tf.name_scope("precision"):
self.precision = tf.metrics.precision(tf.argmax(self.input_y, 1), self.predictions, name ="precision")
def _measure_recall(self):
with tf.name_scope("recall"):
self.recall = tf.metrics.recall(tf.argmax(self.input_y, 1), self.predictions, name ="recall")
def _measure_f1(self):
with tf.name_scope("f1_score"):
self.f1_score = tf.contrib.metrics.f1_score(tf.argmax(self.input_y, 1), self.predictions, name ="f1_score")
def _measure_auc(self):
with tf.name_scope("auc"):
self.auc = tf.metrics.auc(tf.argmax(self.input_y, 1), self.predictions, name ="auc")
# Biulding the Net
def build_graph(self, model):
if model == "all":
self._create_place_holder()
self._create_embedding_msg_layer()
self._create_embedding_chars_msg_layer()
self._create_weight_conv_msg_layer()
self._create_conv_maxpool_msg_layer()
self._create_embedding_meta_layer()
self._create_embedding_chars_meta_layer()
self._create_weight_conv_meta_layer()
self._create_conv_maxpool_meta_layer()
self._create_embedding_code_layer()
self._create_embedding_chars_code_layer()
self._create_embedding_addedcode_line()
self._create_embedding_removedcode_line()
self._create_weight_conv_code_layer()
self._create_conv_maxpool_hunk_addedcode_layer()
self._create_embedding_addedcode()
self._create_conv_maxpool_hunk_removedcode_layer()
self._create_embedding_removedcode()
self._create_fusion_discussion_meta_diffcode_layer()
self._adding_dropout_fusion_layer()
self._create_weight_fusion_hidden_layer()
self._create_output_fusion_hidden_layer()
self._create_loss_function()
self._measure_accuracy()
self._measure_precision()
self._measure_recall()
self._measure_f1()
self._measure_auc()
elif model == "discussion":
self._create_place_holder()
self._create_embedding_msg_layer()
self._create_embedding_chars_msg_layer()
self._create_weight_conv_msg_layer()
self._create_conv_maxpool_msg_layer()
self._create_fusion_discussion_layer()
self._adding_dropout_fusion_layer()
self._create_weight_fusion_layer()
self._create_output_layer()
self._create_loss_function()
self._measure_accuracy()
self._measure_precision()
self._measure_recall()
self._measure_f1()
self._measure_auc()
elif model == "meta":
self._create_place_holder()
self._create_embedding_meta_layer()
self._create_embedding_chars_meta_layer()
self._create_weight_conv_meta_layer()
self._create_conv_maxpool_meta_layer()
self._create_fusion_meta_layer()
self._adding_dropout_fusion_layer()
self._create_weight_fusion_layer()
self._create_output_layer()
self._create_loss_function()
self._measure_accuracy()
self._measure_precision()
self._measure_recall()
self._measure_f1()
self._measure_auc()
elif model == "prcodechange":
self._create_place_holder()
self._create_embedding_code_layer()
self._create_embedding_chars_code_layer()
self._create_embedding_addedcode_line()
self._create_embedding_removedcode_line()
self._create_weight_conv_code_layer()
self._create_conv_maxpool_hunk_addedcode_layer()
self._create_embedding_addedcode()
self._create_conv_maxpool_hunk_removedcode_layer()
self._create_embedding_removedcode()
self._create_fusion_diffcode_layer()
self._adding_dropout_fusion_layer()
self._create_weight_fusion_hidden_layer()
self._create_output_fusion_hidden_layer()
self._create_loss_function()
self._measure_accuracy()
self._measure_precision()
self._measure_recall()
self._measure_f1()
self._measure_auc()
```
#### File: joydeba/BackportingPR/predict.py
```python
from Utils import load_dict_file, mini_batches, write_file
from padding import padding_pred_commit
import os
import tensorflow as tf
import numpy as np
from sklearn.metrics import precision_recall_fscore_support as score
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score , roc_auc_score
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x_sum = np.sum(np.exp(x), axis=1)
return np.exp(x) / e_x_sum[:, None]
def predict_model(commits, params):
path_dict = os.path.abspath(os.path.join(os.path.curdir, params.model))
dict_msg = load_dict_file(path_file=path_dict + '/dict_msg.txt')
dict_meta = load_dict_file(path_file=path_dict + '/dict_meta.txt')
dict_code = load_dict_file(path_file=path_dict + '/dict_code.txt')
pad_msg, pad_meta, pad_added_code, pad_removed_code, labels = padding_pred_commit(commits=commits,
params=params, dict_msg=dict_msg, dict_meta = dict_meta,
dict_code=dict_code)
checkpoint_dir = path_dict
checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir)
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=params.allow_soft_placement,
log_device_placement=params.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
# Loading saved meta graph and restoring variables
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
# predictions = graph.get_operation_by_name("output/predictions").outputs[0]
# input_y = graph.get_operation_by_name("input_y").outputs[0]
# m_recall = tf.metrics.recall(tf.argmax(input_y, 1), predictions, name ="recall")
# m_f1_score = tf.contrib.metrics.f1_score(tf.argmax(input_y, 1), predictions, name ="f1_score")
# m_auc = tf.metrics.auc(tf.argmax(input_y, 1), predictions, name ="auc")
# m_precision = tf.metrics.precision(tf.argmax(input_y, 1), predictions, name ="precision")
# Geting placeholders from graph by name
input_msg = graph.get_operation_by_name("input_msg").outputs[0]
input_meta = graph.get_operation_by_name("input_meta").outputs[0]
input_addedcode = graph.get_operation_by_name("input_addedcode").outputs[0]
input_removedcode = graph.get_operation_by_name("input_removedcode").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
# Evaluating temsor
scores = graph.get_operation_by_name("output/scores").outputs[0]
# Batches for one epoch
batches = mini_batches(X_msg=pad_msg, X_meta =pad_meta, X_added_code=pad_added_code,
X_removed_code=pad_removed_code,
Y=labels, mini_batch_size=params.batch_size)
commits_scores = list()
# accuracy_list = list()
# precision_list = list()
# recall_list = list()
# f1_score_list = list()
# auc_list = list()
# init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
# sess.run(init)
for batch in batches:
batch_input_msg, batch_input_meta, batch_input_added_code, batch_input_removed_code, batch_input_labels = batch
# correct_predictions = tf.equal(predictions, tf.argmax(batch_input_labels, 1))
# m_accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
# batch_scores, accuracy, precision, recall, f1_score, auc = sess.run([scores, m_accuracy, m_precision, m_recall, m_f1_score, m_auc],
# {input_msg: batch_input_msg, input_meta: batch_input_meta, input_addedcode: batch_input_added_code,
# input_removedcode: batch_input_removed_code, input_y:batch_input_labels, dropout_keep_prob: 1.0})
batch_scores = sess.run(scores,
{input_msg: batch_input_msg, input_meta: batch_input_meta, input_addedcode: batch_input_added_code,
input_removedcode: batch_input_removed_code, dropout_keep_prob: 1.0})
batch_scores = np.ravel(softmax(batch_scores)[:, [1]])
commits_scores = np.concatenate([commits_scores, batch_scores])
# accuracy_list = np.concatenate([accuracy_list, [accuracy]])
# precision_list = np.concatenate([precision_list, [precision[0]]])
# recall_list = np.concatenate([recall_list, [recall[0]]])
# f1_score_list = np.concatenate([f1_score_list, [f1_score[0]]])
# auc_list = np.concatenate([auc_list, [auc[0]]])
# print("acc {:g}, preci {}, reca {}, f1 {}, auc {}".format(np.mean(accuracy_list), np.mean(precision_list), np.mean(recall_list), np.mean(f1_score_list), np.mean(auc_list)))
write_file(path_file=os.path.abspath(os.path.join(os.path.curdir)) + '/prediction.txt',
data=commits_scores)
y_test = labels.dot(1 << np.arange(labels.shape[-1] - 1, -1, -1)).tolist()
predicted = [1 if value > 0.5 else 2 for value in commits_scores.tolist()]
precision, recall, fscore, support = score(y_test, predicted)
accuracy = accuracy_score(y_test, predicted)
auc_score = roc_auc_score(y_test, predicted)
print('Accuracy: {}'.format(accuracy))
print('precision: {}'.format(precision))
print('recall: {}'.format(recall))
print('fscore: {}'.format(fscore))
print('AUC: {}'.format(auc_score))
```
#### File: joydeba/BackportingPR/reformating.py
```python
def reformat_file(commits, num_file):
for c in commits:
if len(c['code']) > num_file:
code_files = c['code']
c.update({'code': [code_files[0]]})
return commits
def update_hunk(hunk, num_hunk, num_loc, num_leng):
new_hunk = dict()
for key in hunk:
if key <= num_hunk:
loc_values = hunk[key][:num_loc]
length_values = list()
for v in loc_values:
split_v = v.split(',')[:num_leng]
length_values.append(','.join(split_v))
new_hunk[key] = length_values
return new_hunk
def reformat_hunk(commits, num_hunk, num_loc, num_leng):
for c in commits:
hunk = c['code'][0]
new_added_hunk = update_hunk(hunk=hunk['added'], num_hunk=num_hunk, num_loc=num_loc, num_leng=num_leng)
new_removed_hunk = update_hunk(hunk=hunk['removed'], num_hunk=num_hunk, num_loc=num_loc, num_leng=num_leng)
hunk.update({'added': new_added_hunk})
hunk.update({'removed': new_removed_hunk})
return commits
```
#### File: joydeba/BackportingPR/Utils.py
```python
import numpy as np
import math
import os
from extracting import commit_id, commit_port, commit_msg, commit_date, commit_code, commit_meta
from reformating import reformat_file, reformat_hunk
def load_file(path_file):
lines = list(open(path_file, "r").readlines())
lines = [l.strip() for l in lines]
return lines
def commits_index(commits):
commits_index = [i for i, c in enumerate(commits) if c.startswith("commits:")]
return commits_index
def commit_info(commit):
id = commit_id(commit)
port = commit_port(commit)
date = commit_date(commit)
meta = commit_meta(commit)
msg = commit_msg(commit)
code = commit_code(commit)
return id, port, date, meta, msg, code
def extract_commit(path_file):
commits = load_file(path_file=path_file)
indexes = commits_index(commits=commits)
dicts = list()
for i in xrange(0, len(indexes)):
dict = {}
if i == len(indexes) - 1:
id, port, date, meta, msg, code = commit_info(commits[indexes[i]:])
else:
id, port, date, meta, msg, code = commit_info(commits[indexes[i]:indexes[i + 1]])
dict["id"] = id
dict["port"] = port
dict["date"] = date
dict["meta"] = meta
dict["msg"] = msg
dict["code"] = code
dicts.append(dict)
return dicts
def reformat_meta(commits):
# Def for future
return commits
def reformat_discussion(commits):
# Def for future
return commits
def reformat_commit_code(commits, num_file, num_hunk, num_loc, num_leng):
commits = reformat_file(commits=commits, num_file=num_file)
commits = reformat_hunk(commits=commits, num_hunk=num_hunk, num_loc=num_loc, num_leng=num_leng)
return commits
def random_mini_batch(X_msg, X_meta, X_added_code, X_removed_code, Y, mini_batch_size=64, seed=0):
m = X_msg.shape[0] # Number of training samples
mini_batches = []
np.random.seed(seed)
# Step 1: Shuffle (X, Y)
permutation = list(np.random.permutation(m))
shuffled_X_msg = X_msg[permutation, :]
shuffled_X_meta = X_meta[permutation, :]
shuffled_X_added = X_added_code[permutation, :, :, :]
shuffled_X_removed = X_removed_code[permutation, :, :, :]
if len(Y.shape) == 1:
shuffled_Y = Y[permutation]
else:
shuffled_Y = Y[permutation, :]
# Step 2: Partition. Minus the end case.
num_complete_minibatches = math.floor(
m / float(mini_batch_size)) # Mini_batch_size in your partitionning
num_complete_minibatches = int(num_complete_minibatches)
for k in range(0, num_complete_minibatches):
mini_batch_X_msg = shuffled_X_msg[k * mini_batch_size: k * mini_batch_size + mini_batch_size, :]
mini_batch_X_meta = shuffled_X_meta[k * mini_batch_size: k * mini_batch_size + mini_batch_size, :]
mini_batch_X_added = shuffled_X_added[k * mini_batch_size: k * mini_batch_size + mini_batch_size, :, :, :]
mini_batch_X_removed = shuffled_X_removed[k * mini_batch_size: k * mini_batch_size + mini_batch_size, :, :, :]
if len(Y.shape) == 1:
mini_batch_Y = Y[k * mini_batch_size: k * mini_batch_size + mini_batch_size]
else:
mini_batch_Y = shuffled_Y[k * mini_batch_size: k * mini_batch_size + mini_batch_size, :]
mini_batch = (mini_batch_X_msg, mini_batch_X_meta, mini_batch_X_added, mini_batch_X_removed, mini_batch_Y)
mini_batches.append(mini_batch)
# End case
if m % mini_batch_size != 0:
mini_batch_X_msg = shuffled_X_msg[num_complete_minibatches * mini_batch_size: m, :]
mini_batch_X_meta = shuffled_X_meta[num_complete_minibatches * mini_batch_size: m, :]
mini_batch_X_added = shuffled_X_added[num_complete_minibatches * mini_batch_size: m, :, :, :]
mini_batch_X_removed = shuffled_X_removed[num_complete_minibatches * mini_batch_size: m, :, :, :]
if len(Y.shape) == 1:
mini_batch_Y = shuffled_Y[num_complete_minibatches * mini_batch_size: m]
else:
mini_batch_Y = shuffled_Y[num_complete_minibatches * mini_batch_size: m, :]
mini_batch = (mini_batch_X_msg, mini_batch_X_meta, mini_batch_X_added, mini_batch_X_removed, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
def write_dict_file(path_file, dictionary):
split_path = path_file.split("/")
path_ = split_path[:len(split_path) - 1]
path_ = "/".join(path_)
if not os.path.exists(path_):
os.makedirs(path_)
with open(path_file, 'w') as out_file:
for key in dictionary.keys():
out_file.write(str(key) + '\t' + str(dictionary[key]))
out_file.write("\n")
out_file.close()
def mini_batches(X_msg, X_meta, X_added_code, X_removed_code, Y, mini_batch_size=64, seed=0):
m = X_msg.shape[0] # Training examples
mini_batches = []
np.random.seed(seed)
# Step 1: No shuffle (X, Y)
shuffled_X_msg = X_msg
shuffled_X_meta = X_meta
shuffled_X_added = X_added_code
shuffled_X_removed = X_removed_code
shuffled_Y = Y
# Step 2: Partition (X, Y) without the end case.
num_complete_minibatches = math.floor(
m / float(mini_batch_size)) # number of mini batches of size mini_batch_size in your partitionning
num_complete_minibatches = int(num_complete_minibatches)
for k in range(0, num_complete_minibatches):
mini_batch_X_msg = shuffled_X_msg[k * mini_batch_size: k * mini_batch_size + mini_batch_size, :]
mini_batch_X_meta = shuffled_X_meta[k * mini_batch_size: k * mini_batch_size + mini_batch_size, :]
mini_batch_X_added = shuffled_X_added[k * mini_batch_size: k * mini_batch_size + mini_batch_size, :, :, :]
mini_batch_X_removed = shuffled_X_removed[k * mini_batch_size: k * mini_batch_size + mini_batch_size, :, :, :]
mini_batch_Y = shuffled_Y[k * mini_batch_size: k * mini_batch_size + mini_batch_size, :]
mini_batch = (mini_batch_X_msg, mini_batch_X_meta, mini_batch_X_added, mini_batch_X_removed, mini_batch_Y)
mini_batches.append(mini_batch)
# End case
if m % mini_batch_size != 0:
mini_batch_X_msg = shuffled_X_msg[num_complete_minibatches * mini_batch_size: m, :]
mini_batch_X_meta = shuffled_X_meta[num_complete_minibatches * mini_batch_size: m, :]
mini_batch_X_added = shuffled_X_added[num_complete_minibatches * mini_batch_size: m, :, :, :]
mini_batch_X_removed = shuffled_X_removed[num_complete_minibatches * mini_batch_size: m, :, :, :]
mini_batch_Y = shuffled_Y[num_complete_minibatches * mini_batch_size: m, :]
mini_batch = (mini_batch_X_msg, mini_batch_X_meta, mini_batch_X_added, mini_batch_X_removed, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
def load_dict_file(path_file):
lines = list(open(path_file, "r").readlines())
dictionary = dict()
for line in lines:
key, value = line.split('\t')[0], line.split('\t')[1]
dictionary[key] = value
return dictionary
def write_file(path_file, data):
split_path = path_file.split("/")
path_ = split_path[:len(split_path) - 1]
path_ = "/".join(path_)
if not os.path.exists(path_):
os.makedirs(path_)
with open(path_file, 'w') as out_file:
for line in data:
# write line to output file
out_file.write(str(line))
out_file.write("\n")
out_file.close()
``` |
{
"source": "joydeep1701/GSTBull",
"score": 2
} |
#### File: joydeep1701/GSTBull/app.py
```python
from flask import Flask, flash, redirect, render_template, request, session, url_for, Response, make_response
from flask_session import Session
from tempfile import gettempdir
import json
from helper import *
from sql import *
import authenticator
import ledgers
import vouchers
import gstr
import download
import drive
import data
app = Flask(__name__)
app.config["SESSION_FILE_DIR"] = "./session/"
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
db = SQL("sqlite:///watchdog.db")
@app.route('/statecodes')
def dumpstatecodes():
return json.dumps(state_codes)
@app.route('/ledger/add', methods=['GET','POST'])
@login_required
def addledger():
if request.method == "GET":
return render_template('add_ledger.html')
else:
ledgers.create(request.form, session['company_id'])
return render_template('add_ledger.html')
@app.route('/ledger/search/<s>')
@login_required
def searchledger(s):
return json.dumps(ledgers.search(s, session['company_id']))
@app.route('/ledger/data/<ledger_id>')
@login_required
def getledgerdata(ledger_id):
return json.dumps(ledgers.getLedgerById(ledger_id, session['company_id'])[0])
@app.route('/ledger/edit/<name>', methods=['GET','POST'])
@login_required
def editledger(name):
if request.method == "GET":
return render_template('add_ledger.html',
data=ledgers.search(s, session['company_id'])[0])
else:
ledgers.create(request.form, session['company_id'])
return render_template('add_ledger.html')
@app.route('/sales/add', methods=['GET','POST'])
@login_required
def addsales():
if request.method == 'POST':
vouchers.createSalesVoucher((request.form), session['company_id'])
#return str(dict(request.form))
return render_template('add_voucher.html',voucher_type='Sales',
taxrates=vouchers.getTaxrates())
@app.route('/sales/view')
@login_required
def viewsales():
return render_template('search_voucher.html',view_type='sales')
@app.route('/sales/search/bymonth/<year>/<month>')
@login_required
def searchsalesbymonth(month,year):
return json.dumps(vouchers.getSalesVoucherByMonth(month, year, session['company_id']))
@app.route('/sales/search/byinv/',methods=['GET','POST'])
@login_required
def getsalesvoucherdata():
return json.dumps(
vouchers.getSalesVoucherByInvNo(request.form.get('inv_no'),
session['company_id'])
)
@app.route('/sales/delete/<id>')
@login_required
def deletesales(id):
vouchers.deleteSalesVoucher(id, session['company_id'])
return json.dumps({'status':'ok'})
@app.route('/purchase/add', methods=['GET','POST'])
@login_required
def addpurchase():
if request.method == 'POST':
vouchers.createPurchaseVoucher((request.form), session['company_id'])
#return str(dict(request.form))
return render_template('add_voucher.html',voucher_type='Purchase',
taxrates=vouchers.getTaxrates())
@app.route('/purchase/view')
@login_required
def viewpurchase():
return render_template('search_voucher.html',view_type='purchase')
@app.route('/purchase/search/bymonth/<year>/<month>')
@login_required
def searchpurchasebymonth(month,year):
return json.dumps(vouchers.getPurchaseVoucherByMonth(month, year, session['company_id']))
@app.route('/purchase/search/byinv/',methods=['GET','POST'])
@login_required
def getpurchasevoucherdata():
return json.dumps(
vouchers.getPurchaseVoucherByInvNo(request.form.get('inv_no'),
session['company_id'])
)
@app.route('/purchase/delete/<id>')
@login_required
def deletepurchase(id):
vouchers.deletePurchaseVoucher(id, session['company_id'])
return json.dumps({'status':'ok'})
@app.route('/')
@login_required
def index():
return render_template('dashboard.html')
@app.route('/login', methods=['GET','POST'])
def login():
session.clear()
if request.method == 'GET':
return render_template('validate.html',
authList=authenticator.authenticationList())
else:
if authenticator.setSessionData(request, session):
return redirect(url_for('index'))
return redirect(url_for('login'))
@app.route('/register', methods=['GET','POST'])
def register():
if request.method == 'POST':
authenticator.createCompany(request.form)
return 'Success'
return render_template('register.html')
@app.route('/gstr/')
@login_required
def gstr_main():
return render_template('gstr.html')
@app.route('/gstr1/<y>/<m>')
@login_required
def gstr3b(m,y):
form_1 = gstr.GSTR1(m, y, session['company_id'])
data = form_1.getData()
#return json.dumps(data)
return render_template('gstr1.html',data=data,year=y,month=m)
@app.route('/gstr3b/<y>/<m>')
@login_required
def gstr1(m,y):
form_3b = gstr.GSTR3b(m,y,session['company_id'])
data = form_3b.getData()
#return json.dumps(data)
return render_template('gstr3b.html',data=data,year=y,month=m)
@app.route('/download/gstr1/<y>/<m>')
@login_required
def downloadGSTR1(m,y):
filename = session["company_name"].strip()
gstr1 = download.GSTR1(m,y,session['company_id'])
b2b = gstr1.downloadB2b()
# We need to modify the response, so the first thing we
# need to do is create a response out of the CSV string
response = make_response(b2b)
# This is the key: Set the right header for the response
# to be downloaded, instead of just printed on the browser
response.headers["Content-Disposition"] = "attachment;filename="+filename+" "+m+"_b2b"+".csv"
return response
@app.route('/backup/',methods=["GET","POST"])
def backup_gdrive():
backup = drive.GoogleDrive()
if request.method == "GET":
url = backup.OauthURL()
return render_template('backup.html',url=url)
else:
backup.upload(request.form.get('key'))
return "Success"
@app.route('/chart/<type>/<dur>')
@login_required
def chart_data(type,dur):
chartdata = data.ChartData(session["company_id"])
if dur == "monthlytotal":
return json.dumps(chartdata.getMonthlyTotalData(type))
if dur == "dailytotal":
return json.dumps(chartdata.getDailyTotalData(type))
if dur == "partytotal":
return json.dumps(chartdata.getPartyTotalData(type))
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5001)
```
#### File: joydeep1701/GSTBull/drive.py
```python
import pprint
import httplib2
import apiclient.discovery
import apiclient.http
import oauth2client.client
import time
# Check https://developers.google.com/drive/scopes for all available scopes.
OAUTH2_SCOPE = 'https://www.googleapis.com/auth/drive'
# Location of the client secrets.
CLIENT_SECRETS = '../client_secret.json'
# Path to the file to upload.
FILENAME = 'watchdog.db'
# Metadata about the file.
MIMETYPE = 'text/plain'
TITLE = "GSTBull watchdog at {}-{}".format(time.localtime().tm_mday,time.localtime().tm_mon)
DESCRIPTION = 'Backup of watchdog.db'
# OAuth 2.0 scope that will be authorized.
class GoogleDrive(object):
def __init__(self):
# Perform OAuth2.0 authorization flow.
self.flow = oauth2client.client.flow_from_clientsecrets(CLIENT_SECRETS, OAUTH2_SCOPE)
self.flow.redirect_uri = oauth2client.client.OOB_CALLBACK_URN
self.authorize_url = self.flow.step1_get_authorize_url()
def OauthURL(self):
return self.authorize_url
def upload(self,code):
code = code.strip()
credentials = self.flow.step2_exchange(code)
# Create an authorized Drive API client.
http = httplib2.Http()
credentials.authorize(http)
drive_service = apiclient.discovery.build('drive', 'v2', http=http)
# Insert a file. Files are comprised of contents and metadata.
# MediaFileUpload abstracts uploading file contents from a file on disk.
media_body = apiclient.http.MediaFileUpload(
FILENAME,
mimetype=MIMETYPE,
resumable=True
)
# The body contains the metadata for the file.
body = {
'title': TITLE,
'description': DESCRIPTION,
}
# Perform the request and print the result.
new_file = drive_service.files().insert(body=body, media_body=media_body).execute()
pprint.pprint(new_file)
if __name__ == "__main__":
GD = GoogleDrive()
print(GD.OauthURL())
GD.upload(input())
```
#### File: joydeep1701/GSTBull/gstr.py
```python
from flask import redirect, render_template, request, url_for, flash
from sql import *
import ledgers
import vouchers
db = SQL("sqlite:///watchdog.db")
def encode(value):
if value is None:
return 0.00
if isinstance(value, dict):
for key in value.keys():
value[key] = encode(value[key])
return value
class GSTR1():
def __init__(self, month, year, company_id):
self.company_id = company_id
self.month = month
self.year = year
self.sales_view = str(company_id) + '_sales_view'
self.sales_master = str(company_id) + '_master_sales'
self.sales_secondary = str(company_id) + '_secondary_sales'
self.ledger_table = str(company_id) + '_ledgers'
def table_b2b(self):
data = {}
summary_count = db.execute("""SELECT count(inv_no) AS no_records,
sum(invoice_value) AS invoice_value,
sum(invoice_taxable_value) AS taxable_value
FROM
(SELECT inv_no,invoice_value,invoice_taxable_value
FROM :table WHERE
un_reg='False'
AND month=:month AND year=:year
GROUP BY inv_no)
""", table=self.sales_view,
month=self.month, year=self.year)
summary_tax_os = db.execute("""SELECT sum(rate*rate_amount) AS igst
FROM :table WHERE
un_reg='False'
AND pos != :hs
AND month=:month AND year=:year""",
table=self.sales_view, month=self.month, year=self.year, hs=19)
summary_tax_hs = db.execute("""SELECT sum(rate*rate_amount/2) AS cgst,
sum(rate*rate_amount/2) AS sgst
FROM :table WHERE
un_reg='False'
AND pos = :hs
AND month=:month AND year=:year""",
table=self.sales_view, month=self.month, year=self.year, hs=19)
reciever_wise = db.execute("""SELECT
name,gstin,sum(invoice_taxable_value) as taxable_value,
sum(invoice_tax) as tax,pos,count(inv_no) AS no_records
FROM
(SELECT name,gstin,inv_no,invoice_taxable_value,
invoice_tax,pos FROM :table
INNER JOIN
(SELECT id AS l_id, name, gstin
FROM :ledgers)
ON l_id=ledger_id
WHERE un_reg='False' AND month=:month AND year=:year
GROUP BY master_id)
GROUP BY gstin""",table=self.sales_view, ledgers=self.ledger_table,
month=self.month, year=self.year)
#print(summary_count)
data['summary'] = summary_count[0]
data['summary']['igst'] = summary_tax_os[0]['igst']
data['summary']['sgst'] = summary_tax_hs[0]['sgst']
data['summary']['cgst'] = summary_tax_hs[0]['cgst']
data['reciever_wise'] = reciever_wise
#print(data)
return encode(data)
def table_b2cs(self):
data = {}
summary_count = db.execute("""SELECT count(inv_no) AS no_records,
sum(invoice_value) AS invoice_value,
sum(invoice_taxable_value) AS taxable_value
FROM
(SELECT inv_no,invoice_value,invoice_taxable_value
FROM :table WHERE
un_reg='True'
AND month=:month AND year=:year
GROUP BY inv_no)
""", table=self.sales_view,
month=self.month, year=self.year)
summary_tax_os = db.execute("""SELECT sum(rate*rate_amount) AS igst
FROM :table WHERE
un_reg='True'
AND pos != :hs
AND month=:month AND year=:year""",
table=self.sales_view, month=self.month, year=self.year, hs=19)
summary_tax_hs = db.execute("""SELECT sum(rate*rate_amount/2) AS cgst,
sum(rate*rate_amount/2) AS sgst
FROM :table WHERE
un_reg='True'
AND pos = :hs
AND month=:month AND year=:year""",
table=self.sales_view, month=self.month, year=self.year, hs=19)
details = db.execute("""SELECT rate, sum(rate_amount) AS taxable_value,
sum(rate_amount*rate) AS tax, pos FROM :table
WHERE un_reg='True'
AND month=:month AND year=:year
GROUP BY pos,rate""",
table=self.sales_view, month=self.month, year=self.year)
data['summary'] = summary_count[0]
data['summary']['igst'] = summary_tax_os[0]['igst']
data['summary']['sgst'] = summary_tax_hs[0]['sgst']
data['summary']['cgst'] = summary_tax_hs[0]['cgst']
data['details'] = details
return encode(data)
def table_8(self):
data = {}
summary = db.execute("""SELECT count(rate_amount) AS no_records,
sum(rate_amount) AS taxable_value FROM :table
WHERE rate='0.00'
AND month=:month AND year=:year""",
table=self.sales_view, month=self.month, year=self.year)
hs_reg = db.execute("""SELECT sum(rate_amount) AS taxable_value
FROM :table WHERE rate='0.00'
AND pos=:hs AND un_reg='False'
AND month=:month AND year=:year""",
table=self.sales_view, month=self.month, year=self.year, hs=19)
os_reg = db.execute("""SELECT sum(rate_amount) AS taxable_value
FROM :table WHERE rate='0.00'
AND pos!=:hs AND un_reg='False'
AND month=:month AND year=:year""",
table=self.sales_view, month=self.month, year=self.year, hs=19)
hs_unreg = db.execute("""SELECT sum(rate_amount) AS taxable_value
FROM :table WHERE rate='0.00'
AND pos=:hs AND un_reg='True'
AND month=:month AND year=:year""",
table=self.sales_view, month=self.month, year=self.year, hs=19)
os_unreg = db.execute("""SELECT sum(rate_amount) AS taxable_value
FROM :table WHERE rate='0.00'
AND pos!=:hs AND un_reg='True'
AND month=:month AND year=:year""",
table=self.sales_view, month=self.month, year=self.year, hs=19)
data['summary'] = summary[0]
data['details'] = {}
data['details']['nil_rated'] = {'hs_reg':hs_reg[0]['taxable_value'],
'os_reg':os_reg[0]['taxable_value'],
'hs_unreg':hs_unreg[0]['taxable_value'],
'os_unreg':os_unreg[0]['taxable_value'],
}
return encode(data)
def getData(self):
data = {'b2b':self.table_b2b(),
'b2cs':self.table_b2cs(),
'table_8':self.table_8()}
return data
class GSTR3b():
def __init__(self, month, year, company_id):
self.company_id = company_id
self.month = month
self.year = year
self.sales_view = str(company_id) + '_sales_view'
self.sales_master = str(company_id) + '_master_sales'
self.sales_secondary = str(company_id) + '_secondary_sales'
self.purchase_view = str(company_id) + '_purchase_view'
self.purchase_master = str(company_id) + '_master_purchase'
self.purchase_secondary = str(company_id) + '_secondary_purchase'
self.ledger_table = str(company_id) + '_ledgers'
def table3_1(self):
data = {}
rowa_hs = db.execute("""SELECT sum(rate_amount) AS taxable_value,
sum(rate_amount*rate/2) AS cgst, sum(rate_amount*rate/2) AS sgst
FROM :table
WHERE sez='False' AND rate !='0.0'
AND pos = :hs
AND month=:month AND year=:year""",
table=self.sales_view, month=self.month, year=self.year, hs=19)
rowa_os = db.execute("""SELECT sum(rate_amount) AS taxable_value,
sum(rate_amount*rate) AS igst
FROM :table
WHERE sez='False' AND rate !='0.0'
AND pos != :hs
AND month=:month AND year=:year""",
table=self.sales_view, month=self.month, year=self.year, hs=19)
#print(rowa_hs,rowa_os)
data['row_a'] = {'taxable_value':
encode(rowa_os[0]['taxable_value'])
+
encode(rowa_hs[0]['taxable_value']) ,
'igst':rowa_os[0]['igst'],
'cgst':rowa_hs[0]['cgst'] ,
'sgst':rowa_hs[0]['sgst'] ,
}
rowb = db.execute("""SELECT sum(rate_amount) AS taxable_value,
sum(rate_amount*rate) AS igst FROM :table WHERE sez='True'
AND month=:month AND year=:year""",
table=self.sales_view, month=self.month, year=self.year)
data['row_b'] = {
'taxable_value':
rowb[0]['taxable_value'] ,
'igst': rowb[0]['igst']}
rowc = db.execute("""SELECT sum(rate_amount) AS taxable_value
FROM :table WHERE sez='False' AND rate='0.0'
AND month=:month AND year=:year""",
table=self.sales_view, month=self.month, year=self.year)
data['row_c'] = {'taxable_value': rowc[0]['taxable_value']}
return encode(data)
def table3_2(self):
data = {}
row_ur = db.execute("""SELECT
pos, sum(rate_amount) AS taxable_value,
sum(rate_amount*rate) AS igst FROM :table
WHERE pos != :hs
AND un_reg='True' AND comp='False' AND sez='False'
AND rate !='0.0'
AND month=:month AND year=:year
GROUP BY pos
""",
table=self.sales_view, month=self.month, year=self.year, hs=19)
data['row_ur'] = row_ur
row_cmp = db.execute("""SELECT
pos, sum(rate_amount) AS taxable_value,
sum(rate_amount*rate) AS igst FROM :table
WHERE pos != :hs
AND un_reg='False' AND comp='True' AND sez='False'
AND rate !='0.0'
AND month=:month AND year=:year
GROUP BY pos
""",
table=self.sales_view, month=self.month, year=self.year, hs=19)
data['row_cmp'] = row_cmp
return encode(data)
def table4(self):
data = {}
row_5_os = db.execute("""SELECT sum(rate_amount*rate) AS igst
FROM :table WHERE
sez='False' AND comp='False' AND rate != '0.00' AND pos != :hs
AND month=:month AND year=:year""",
table=self.purchase_view, month=self.month, year=self.year, hs=19)
row_5_hs = db.execute("""SELECT sum(rate_amount*rate/2) AS cgst,
sum(rate_amount*rate/2) AS sgst
FROM :table WHERE
sez='False' AND comp='False' AND rate != '0.00' AND pos = :hs
AND month=:month AND year=:year""",
table=self.purchase_view, month=self.month, year=self.year, hs=19)
data.update(row_5_os[0])
data.update(row_5_hs[0])
return encode(data)
def table5(self):
data = {}
row_hs = db.execute("""SELECT sum(rate_amount) AS intra_state_value
FROM :table WHERE sez='False' AND pos=:hs
AND (rate = '0.00' OR comp='True' )
AND month=:month AND year=:year""",
table=self.purchase_view, month=self.month, year=self.year, hs=19)
row_os = db.execute("""SELECT sum(rate_amount) AS inter_state_value
FROM :table WHERE sez='False' AND pos!=:hs
AND (rate = '0.00' OR comp='True' )
AND month=:month AND year=:year""",
table=self.purchase_view, month=self.month, year=self.year, hs=19)
data.update(row_os[0])
data.update(row_hs[0])
return encode(data)
def getData(self):
data = {'table3_1':self.table3_1(),
'table3_2':self.table3_2(),
'table4' :self.table4(),
'table5' :self.table5(),}
return encode(data)
```
#### File: joydeep1701/GSTBull/sql.py
```python
import sqlalchemy
class SQL(object):
"""TODO"""
def __init__(self, url):
"""TODO"""
try:
self.engine = sqlalchemy.create_engine(url)
except Exception as e:
raise RuntimeError(e)
def execute(self, text, *multiparams, **params):
"""TODO"""
try:
# http://docs.sqlalchemy.org/en/latest/core/sqlelement.html#sqlalchemy.sql.expression.text
# https://groups.google.com/forum/#!topic/sqlalchemy/FfLwKT1yQlg
# http://docs.sqlalchemy.org/en/latest/core/connections.html#sqlalchemy.engine.Engine.execute
# http://docs.sqlalchemy.org/en/latest/faq/sqlexpressions.html#how-do-i-render-sql-expressions-as-strings-possibly-with-bound-parameters-inlined
statement = sqlalchemy.text(text).bindparams(*multiparams, **params)
result = self.engine.execute(str(statement.compile(compile_kwargs={"literal_binds": True})))
# SELECT
if result.returns_rows:
rows = result.fetchall()
return [dict(row) for row in rows]
# INSERT
elif result.lastrowid is not None:
return result.lastrowid
# DELETE, UPDATE
else:
return result.rowcount
except sqlalchemy.exc.IntegrityError:
return None
except Exception as e:
raise RuntimeError(e)
``` |
{
"source": "joydeepmitra/Machine-Learning",
"score": 2
} |
#### File: lab2/python/grepc.py
```python
import apache_beam as beam
import re
def my_grep(line, term):
if re.match( r'^' + re.escape(term), line):
yield line
PROJECT='cloud-training-demos'
BUCKET='cloud-training-demos'
def run():
argv = [
'--project={0}'.format(PROJECT),
'--job_name=examplejob2',
'--save_main_session',
'--staging_location=gs://{0}/staging/'.format(BUCKET),
'--temp_location=gs://{0}/staging/'.format(BUCKET),
'--runner=DataflowRunner'
]
p = beam.Pipeline(argv=argv)
input = 'gs://{0}/javahelp/*.java'.format(BUCKET)
output_prefix = 'gs://{0}/javahelp/output'.format(BUCKET)
searchTerm = 'import'
# find all lines that contain the searchTerm
(p
| 'GetJava' >> beam.io.ReadFromText(input)
| 'Grep' >> beam.FlatMap(lambda line: my_grep(line, searchTerm) )
| 'write' >> beam.io.WriteToText(output_prefix)
)
p.run()
if __name__ == '__main__':
run()
```
#### File: streaming/publish/send_sensor_data.py
```python
import time
import gzip
import logging
import argparse
import datetime
from google.cloud import pubsub
TIME_FORMAT = '%Y-%m-%d %H:%M:%S'
TOPIC = 'sandiego'
INPUT = 'sensor_obs2008.csv.gz'
def publish(publisher, topic, events):
numobs = len(events)
if numobs > 0:
logging.info('Publishing {} events from {}'.format(numobs, get_timestamp(events[0])))
for event_data in events:
publisher.publish(topic,event_data.encode())
def get_timestamp(line):
# look at first field of row
timestamp = line.split(',')[0]
return datetime.datetime.strptime(timestamp, TIME_FORMAT)
def simulate(topic, ifp, firstObsTime, programStart, speedFactor):
# sleep computation
def compute_sleep_secs(obs_time):
time_elapsed = (datetime.datetime.utcnow() - programStart).seconds
sim_time_elapsed = (obs_time - firstObsTime).seconds / speedFactor
to_sleep_secs = sim_time_elapsed - time_elapsed
return to_sleep_secs
topublish = list()
for line in ifp:
event_data = line # entire line of input CSV is the message
obs_time = get_timestamp(line) # from first column
# how much time should we sleep?
if compute_sleep_secs(obs_time) > 1:
# notify the accumulated topublish
publish(publisher, topic, topublish) # notify accumulated messages
topublish = list() # empty out list
# recompute sleep, since notification takes a while
to_sleep_secs = compute_sleep_secs(obs_time)
if to_sleep_secs > 0:
logging.info('Sleeping {} seconds'.format(to_sleep_secs))
time.sleep(to_sleep_secs)
topublish.append(event_data)
# left-over records; notify again
publish(publisher, topic, topublish)
def peek_timestamp(ifp):
# peek ahead to next line, get timestamp and go back
pos = ifp.tell()
line = ifp.readline()
ifp.seek(pos)
return get_timestamp(line)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Send sensor data to Cloud Pub/Sub in small groups, simulating real-time behavior')
parser.add_argument('--speedFactor', help='Example: 60 implies 1 hour of data sent to Cloud Pub/Sub in 1 minute', required=True, type=float)
parser.add_argument('--project', help='Example: --project $DEVSHELL_PROJECT_ID', required=True)
args = parser.parse_args()
# create Pub/Sub notification topic
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
publisher = pubsub.PublisherClient()
event_type = publisher.topic_path(args.project,TOPIC)
try:
publisher.get_topic(event_type)
logging.info('Reusing pub/sub topic {}'.format(TOPIC))
except:
publisher.create_topic(event_type)
logging.info('Creating pub/sub topic {}'.format(TOPIC))
# notify about each line in the input file
programStartTime = datetime.datetime.utcnow()
with gzip.open(INPUT, 'rb') as ifp:
header = ifp.readline() # skip header
firstObsTime = peek_timestamp(ifp)
logging.info('Sending sensor data from {}'.format(firstObsTime))
simulate(event_type, ifp, firstObsTime, programStartTime, args.speedFactor)
``` |
{
"source": "joydeepnandi/Algo",
"score": 4
} |
#### File: graph/Young Common Ancestor/YoungCommonAncestor.py
```python
class AncestralTree:
def __init__(self, name):
self.name = name
self.ancestor = None
# O(d) time | O(1) space - where d is the depth (height) of the ancestral tree
def getYoungestCommonAncestor(topAncestor, descendantOne, descendantTwo):
depthOne = getDescendantDepth(descendantOne, topAncestor)
depthTwo = getDescendantDepth(descendantTwo, topAncestor)
if depthOne > depthTwo:
return backtrackAncestralTree(descendantOne, descendantTwo, depthOne - depthTwo)
else:
return backtrackAncestralTree(descendantTwo, descendantOne, depthTwo - depthOne)
def getDescendantDepth(descendant, topAncestor):
depth = 0
while descendant != topAncestor:
depth += 1
descendant = descendant.ancestor
return depth
def backtrackAncestralTree(lowerDescendant, higherDescendant, diff):
while diff > 0:
lowerDescendant = lowerDescendant.ancestor
diff -= 1
while lowerDescendant != higherDescendant:
lowerDescendant = lowerDescendant.ancestor
higherDescendant = higherDescendant.ancestor
return lowerDescendant
```
#### File: Greedy/Tandem Bicycle/TandemBicycle.py
```python
def tandemBicycle(redShirtSpeeds, blueShirtSpeeds, fastest):
redShirtSpeeds.sort()
blueShirtSpeeds.sort()
if not fastest:
reverseArrayInPlace(redShirtSpeeds)
totalSpeed = 0
for idx in range(len(redShirtSpeeds)):
rider1 = redShirtSpeeds[idx]
rider2 = blueShirtSpeeds[len(blueShirtSpeeds) - idx - 1]
totalSpeed += max(rider1, rider2)
return totalSpeed
def reverseArrayInPlace(array):
start = 0
end = len(array) - 1
while start < end:
array[start], array[end] = array[end], array[start]
start += 1
end -= 1
'''
My solution
def tandemBicycle(redShirtSpeeds, blueShirtSpeeds, fastest):
# Write your code here.
redShirtSpeeds.sort()
blueShirtSpeeds.sort()
if not fastest:
rev(redShirtSpeeds)
res=0
for i in range(len(redShirtSpeeds)):
r1=redShirtSpeeds[i]
r2=blueShirtSpeeds[len(blueShirtSpeeds)-1-i]
res+=max(r1,r2)
return res
def rev(arr):
r=len(arr)-1
while l<r:
arr[l],arr[r]=arr[r],arr[l]
l+=1
r-=1
'''
```
#### File: LinkedList/Node Swap/solution.py
```python
class LinkedList:
def __init__(self, value):
self.value = value
self.next = None
# O(n) time | O(n) space - where n is the number of nodes in the Linked List
def nodeSwap(head):
if head is None or head.next is None:
return head
nextNode = head.next
head.next = nodeSwap(head.next.next)
nextNode.next = head
return nextNode
```
#### File: LinkedList/Reverse LinkedList/Solution.py
```python
class LinkedList:
def __init__(self, value):
self.value = value
self.next = None
# O(n) time | O(1) space - where n is the number of nodes in the Linked List
def reverseLinkedList(head):
previousNode, currentNode = None, head
while currentNode is not None:
nextNode = currentNode.next
currentNode.next = previousNode
previousNode = currentNode
currentNode = nextNode
return previousNode
```
#### File: Recursion/Generate Div Tags/GenerateDivTags.py
```python
def generateDivTags(numberOfTags):
matchedDivTags = []
generateDivTagsFromPrefix(numberOfTags, numberOfTags, "", matchedDivTags)
return matchedDivTags
def generateDivTagsFromPrefix(openingTagsNeeded, closingTagsNeeded, prefix, result):
if openingTagsNeeded > 0:
newPrefix = prefix + "<div>"
generateDivTagsFromPrefix(openingTagsNeeded - 1, closingTagsNeeded, newPrefix, result)
if openingTagsNeeded < closingTagsNeeded:
newPrefix = prefix + "</div>"
generateDivTagsFromPrefix(openingTagsNeeded, closingTagsNeeded - 1, newPrefix, result)
if closingTagsNeeded == 0:
result.append(prefix)
```
#### File: Recursion/Lowest Common Manager/LowestCommonManager.py
```python
def getLowestCommonManager(topManager, reportOne, reportTwo):
return getOrgInfo(topManager, reportOne, reportTwo).lowestCommonManager
def getOrgInfo(manager, reportOne, reportTwo):
numImportantReports = 0
for directReport in manager.directReports:
orgInfo = getOrgInfo(directReport, reportOne, reportTwo)
if orgInfo.lowestCommonManager is not None:
return orgInfo
numImportantReports += orgInfo.numImportantReports
if manager == reportOne or manager == reportTwo:
numImportantReports += 1
lowestCommonManager = manager if numImportantReports == 2 else None
return OrgInfo(lowestCommonManager, numImportantReports)
class OrgInfo:
def __init__(self, lowestCommonManager, numImportantReports):
self.lowestCommonManager = lowestCommonManager
self.numImportantReports = numImportantReports
# This is the input class.
class OrgChart:
def __init__(self, name):
self.name = name
self.directReports = []
```
#### File: Recursion/Number of Binary Tree Topologies/NumberOfBinaryTreeTopology3.py
```python
def numberOfBinaryTreeTopologies(n):
cache = [1]
for m in range(1, n + 1):
numberOfTrees = 0
for leftTreeSize in range(m):
rightTreeSize = m - 1 - leftTreeSize
numberOfLeftTrees = cache[leftTreeSize]
numberOfRightTrees = cache[rightTreeSize]
numberOfTrees += numberOfLeftTrees * numberOfRightTrees
cache.append(numberOfTrees)
return cache[n]
```
#### File: Recursion/Staircase Traversal/StaircaseTraversal1.py
```python
def staircaseTraversal(height, maxSteps):
return numberOfWaysToTop(height, maxSteps)
def numberOfWaysToTop(height, maxSteps):
if height <= 1:
return 1
numberOfWays = 0
for step in range(1, min(maxSteps, height) + 1):
numberOfWays += numberOfWaysToTop(height - step, maxSteps)
return numberOfWays
``` |
{
"source": "joydenfew/514-SDN-assignment-",
"score": 3
} |
#### File: onfsdn/faucet/conf.py
```python
class Conf(object):
defaults = {}
def update(self, dictionary):
# TODO: it would be good to warn on keys that are set but arent in
# defaults
self.__dict__.update(dictionary)
def _set_default(self, key, value):
if key not in self.__dict__ or self.__dict__[key] is None:
self.__dict__[key] = value
def _to_conf(self):
result = {}
for k in self.defaults.iterkeys():
if k != 'name':
result[k] = self.__dict__[str(k)]
return result
```
#### File: onfsdn/faucet/port.py
```python
from conf import Conf
class Port(Conf):
name = None
number = None
enabled = None
permanent_learn = None
unicast_flood = None
mirror = None
mirror_destination = None
native_vlan = None
tagged_vlans = []
acl_in = None
stack = {}
defaults = {
'number': None,
'name': None,
'description': None,
'enabled': True,
'permanent_learn': False,
'unicast_flood': True,
'mirror': None,
'mirror_destination': False,
'native_vlan': None,
'tagged_vlans': None,
'acl_in': None,
'stack': None,
}
def __init__(self, _id, conf=None):
if conf is None:
conf = {}
self._id = _id
self.update(conf)
self.set_defaults()
self.dyn_phys_up = False
def set_defaults(self):
for key, value in self.defaults.iteritems():
self._set_default(key, value)
self._set_default('number', self._id)
self._set_default('name', str(self._id))
self._set_default('description', self.name)
self._set_default('tagged_vlans', [])
@property
def phys_up(self):
return self.dyn_phys_up
@phys_up.setter
def phys_up(self, status):
self.dyn_phys_up = status
def running(self):
return self.enabled and self.phys_up
def to_conf(self):
result = self._to_conf()
if 'stack' in result and result['stack'] is not None:
result['stack'] = {
'dp': str(self.stack['dp']),
'port': str(self.stack['port'])
}
return result
def __eq__(self, other):
return hash(self) == hash(other)
def __hash__(self):
items = [(k,v) for k, v in self.__dict__.iteritems() if 'dyn' not in k]
return hash(frozenset(map(str, items)))
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return self.name
def __repr__(self):
return "Port %s" % self.number
```
#### File: onfsdn/faucet/valve_route.py
```python
import time
import ipaddr
from ryu.lib.packet import arp, icmp, icmpv6, ipv4, ipv6
from ryu.ofproto import ether
from ryu.ofproto import inet
import valve_of
import valve_packet
class NextHop(object):
"""Describes a directly connected (at layer 2) nexthop."""
def __init__(self, eth_src, now):
self.eth_src = eth_src
self.cache_time = now
self.last_retry_time = None
self.resolve_retries = 0
class ValveRouteManager(object):
"""Base class to implement RIB/FIB."""
def __init__(self, logger, faucet_mac, arp_neighbor_timeout,
max_hosts_per_resolve_cycle, max_host_fib_retry_count,
max_resolve_backoff_time,
fib_table, eth_src_table, eth_dst_table, route_priority,
valve_in_match, valve_flowdel, valve_flowmod,
valve_flowcontroller, use_group_table):
self.logger = logger
self.faucet_mac = faucet_mac
self.arp_neighbor_timeout = arp_neighbor_timeout
self.max_hosts_per_resolve_cycle = max_hosts_per_resolve_cycle
self.max_host_fib_retry_count = max_host_fib_retry_count
self.max_resolve_backoff_time = max_resolve_backoff_time
self.fib_table = fib_table
self.eth_src_table = eth_src_table
self.eth_dst_table = eth_dst_table
self.route_priority = route_priority
self.valve_in_match = valve_in_match
self.valve_flowdel = valve_flowdel
self.valve_flowmod = valve_flowmod
self.valve_flowcontroller = valve_flowcontroller
self.use_group_table = use_group_table
self.ip_gw_to_group_id = {}
def _vlan_vid(self, vlan, in_port):
vid = None
if vlan.port_is_tagged(in_port):
vid = vlan.vid
return vid
def _eth_type(self):
"""Return EtherType for FIB entries."""
pass
def _vlan_routes(self, vlan):
pass
def _vlan_nexthop_cache(self, vlan):
pass
def _vlan_nexthop_cache_entry(self, vlan, ip_gw):
nexthop_cache = self._vlan_nexthop_cache(vlan)
if ip_gw in nexthop_cache:
return nexthop_cache[ip_gw]
return None
def _neighbor_resolver_pkt(self, vid, faucet_vip, ip_gw):
pass
def _neighbor_resolver(self, ip_gw, faucet_vip, vlan, ports):
ofmsgs = []
if ports:
port_num = ports[0].number
vid = self._vlan_vid(vlan, port_num)
resolver_pkt = self._neighbor_resolver_pkt(
vid, faucet_vip, ip_gw)
for port in ports:
ofmsgs.append(valve_of.packetout(
port.number, resolver_pkt.data))
return ofmsgs
def _nexthop_actions(self, eth_dst):
return [
valve_of.set_eth_src(self.faucet_mac),
valve_of.set_eth_dst(eth_dst),
valve_of.dec_ip_ttl()]
def _add_resolved_route(self, vlan, ip_gw, ip_dst, eth_dst, is_updated):
ofmsgs = []
in_match = self.valve_in_match(
self.fib_table, vlan=vlan,
eth_type=self._eth_type(), nw_dst=ip_dst)
prefixlen = ipaddr.IPNetwork(ip_dst).prefixlen
priority = self.route_priority + prefixlen
if is_updated:
self.logger.info(
'Updating next hop for route %s via %s (%s)',
ip_dst, ip_gw, eth_dst)
ofmsgs.extend(self.valve_flowdel(
self.fib_table,
in_match,
priority=priority))
else:
self.logger.info(
'Adding new route %s via %s (%s)',
ip_dst, ip_gw, eth_dst)
if self.use_group_table:
inst = [valve_of.apply_actions([valve_of.group_act(
group_id=self.ip_gw_to_group_id[ip_gw])])]
else:
inst = [valve_of.apply_actions(self._nexthop_actions(eth_dst)),
valve_of.goto_table(self.eth_dst_table)]
ofmsgs.append(self.valve_flowmod(
self.fib_table,
in_match,
priority=priority,
inst=inst))
return ofmsgs
def _group_id_from_ip_gw(self, resolved_ip_gw):
return (hash(str(resolved_ip_gw)) + valve_of.ROUTE_GROUP_OFFSET) & ((1<<32) -1)
def _update_nexthop_cache(self, vlan, eth_src, ip_gw):
now = time.time()
nexthop = NextHop(eth_src, now)
nexthop_cache = self._vlan_nexthop_cache(vlan)
nexthop_cache[ip_gw] = nexthop
def _nexthop_group_buckets(self, vlan, in_port, eth_src):
actions = self._nexthop_actions(eth_src)
if not vlan.port_is_tagged(in_port):
actions.append(valve_of.pop_vlan())
actions.append(valve_of.output_port(in_port))
buckets = [valve_of.bucket(actions=actions)]
return buckets
def _update_nexthop_group(self, is_updated, resolved_ip_gw,
vlan, in_port, eth_src):
group_mod_method = None
group_id = None
ofmsgs = []
if is_updated:
group_mod_method = valve_of.groupmod
group_id = self.ip_gw_to_group_id[resolved_ip_gw]
else:
group_mod_method = valve_of.groupadd
group_id = self._group_id_from_ip_gw(resolved_ip_gw)
self.ip_gw_to_group_id[resolved_ip_gw] = group_id
buckets = self._nexthop_group_buckets(vlan, in_port, eth_src)
ofmsgs = [group_mod_method(group_id=group_id, buckets=buckets)]
return ofmsgs
def _update_nexthop(self, vlan, in_port, eth_src, resolved_ip_gw):
is_updated = False
routes = self._vlan_routes(vlan)
cached_eth_dst = self._cached_nexthop_eth_dst(vlan, resolved_ip_gw)
ofmsgs = []
if (cached_eth_dst is not None and cached_eth_dst != eth_src):
is_updated = True
if self.use_group_table:
ofmsgs.extend(
self._update_nexthop_group(
is_updated, resolved_ip_gw,
vlan, in_port, eth_src))
for ip_dst, ip_gw in routes.iteritems():
if ip_gw == resolved_ip_gw:
ofmsgs.extend(self._add_resolved_route(
vlan, ip_gw, ip_dst, eth_src, is_updated))
self._update_nexthop_cache(vlan, eth_src, resolved_ip_gw)
return ofmsgs
def _vlan_ip_gws(self, vlan):
"""Return IP gateways in VLAN.
Args:
vlan (vlan): VLAN containing this RIB/FIB.
Returns:
list: tuple, gateway, controller IP in same subnet.
"""
routes = self._vlan_routes(vlan)
ip_gws = []
for ip_gw in set(routes.values()):
for faucet_vip in vlan.faucet_vips:
if ip_gw in faucet_vip:
ip_gws.append((ip_gw, faucet_vip))
return ip_gws
def _add_unresolved_nexthops(self, vlan, ip_gws):
"""Populates any missing nexthop cache entries.
Args:
vlan (vlan): VLAN containing this RIB/FIB.
ip_gws (list): tuple, IP gateway and controller IP in same subnet.
"""
for ip_gw, _ in ip_gws:
if self._vlan_nexthop_cache_entry(vlan, ip_gw) is None:
self._update_nexthop_cache(vlan, None, ip_gw)
def _retry_backoff(self, now, resolve_retries, last_retry_time):
backoff_seconds = min(
2**resolve_retries, self.max_resolve_backoff_time)
if now - last_retry_time > backoff_seconds:
return True
return False
def _vlan_unresolved_nexthops(self, vlan, ip_gws, now):
"""Return unresolved or expired IP gateways, never tried/oldest first.
Args:
vlan (vlan): VLAN containing this RIB/FIB.
ip_gws (list): tuple, IP gateway and controller IP in same subnet.
now (float): seconds since epoch.
Returns:
list: tuple, gateway, controller IP in same subnet, last retry time.
"""
ip_gws_never_tried = []
ip_gws_with_retry_time = []
for ip_gw, faucet_vip in ip_gws:
if self._nexthop_fresh(vlan, ip_gw, now):
continue
nexthop_cache_entry = self._vlan_nexthop_cache_entry(vlan, ip_gw)
last_retry_time = nexthop_cache_entry.last_retry_time
ip_gw_with_retry_time = (ip_gw, faucet_vip, last_retry_time)
if last_retry_time is None:
ip_gws_never_tried.append(ip_gw_with_retry_time)
else:
if self._retry_backoff(
now, nexthop_cache_entry.resolve_retries, last_retry_time):
ip_gws_with_retry_time.append(ip_gw_with_retry_time)
ip_gws_with_retry_time_sorted = list(
sorted(ip_gws_with_retry_time, key=lambda x: x[-1]))
return ip_gws_never_tried + ip_gws_with_retry_time_sorted
def _is_host_fib_route(self, vlan, host_ip):
"""Return True if IP destination is a host FIB route.
Args:
vlan (vlan): VLAN containing this RIB/FIB.
ip_gw (ipaddr.IPAddress): potential host FIB route.
Returns:
True if a host FIB route (and not used as a gateway).
"""
routes = self._vlan_routes(vlan)
for ip_dst, ip_gw in routes.iteritems():
if ip_gw == host_ip:
if ip_dst.prefixlen < ip_dst.max_prefixlen:
return False
return True
def resolve_gateways(self, vlan, now):
"""Re/resolve all gateways.
Args:
vlan (vlan): VLAN containing this RIB/FIB.
now (float): seconds since epoch.
Returns:
list: OpenFlow messages.
"""
untagged_ports = vlan.untagged_flood_ports(False)
tagged_ports = vlan.tagged_flood_ports(False)
ip_gws = self._vlan_ip_gws(vlan)
self._add_unresolved_nexthops(vlan, ip_gws)
all_unresolved_nexthops = self._vlan_unresolved_nexthops(
vlan, ip_gws, now)
cycle_unresolved_nexthops = all_unresolved_nexthops[
:self.max_hosts_per_resolve_cycle]
deferred_unresolved_nexthops = (len(all_unresolved_nexthops) -
len(cycle_unresolved_nexthops))
if deferred_unresolved_nexthops:
self.logger.info('deferring resolution of %u nexthops',
deferred_unresolved_nexthops)
ofmsgs = []
for ip_gw, faucet_vip, last_retry_time in cycle_unresolved_nexthops:
nexthop_cache_entry = self._vlan_nexthop_cache_entry(vlan, ip_gw)
if (self._is_host_fib_route(vlan, ip_gw) and
nexthop_cache_entry.resolve_retries >= self.max_host_fib_retry_count):
self.logger.info(
'expiring dead host FIB route %s (age %us)',
ip_gw,
now - nexthop_cache_entry.cache_time)
ofmsgs.extend(self._del_host_fib_route(vlan, ip_gw))
else:
nexthop_cache_entry.last_retry_time = now
nexthop_cache_entry.resolve_retries += 1
if last_retry_time is None:
self.logger.info('resolving %s', ip_gw)
else:
self.logger.info(
'resolving %s retry %u (last attempt was %us ago)',
ip_gw,
nexthop_cache_entry.resolve_retries,
now - last_retry_time)
for ports in untagged_ports, tagged_ports:
ofmsgs.extend(self._neighbor_resolver(
ip_gw, faucet_vip, vlan, ports))
return ofmsgs
def _cached_nexthop_eth_dst(self, vlan, ip_gw):
nexthop_cache_entry = self._vlan_nexthop_cache_entry(vlan, ip_gw)
if (nexthop_cache_entry is not None and
nexthop_cache_entry.eth_src is not None):
return nexthop_cache_entry.eth_src
return None
def add_route(self, vlan, ip_gw, ip_dst):
"""Add a route to the RIB.
Args:
vlan (vlan): VLAN containing this RIB.
ip_gw (ipaddr.IPAddress): IP address of nexthop.
ip_dst (ipaddr.IPNetwork): destination IP network.
Returns:
list: OpenFlow messages.
"""
ofmsgs = []
routes = self._vlan_routes(vlan)
routes[ip_dst] = ip_gw
cached_eth_dst = self._cached_nexthop_eth_dst(vlan, ip_gw)
if cached_eth_dst is not None:
ofmsgs.extend(self._add_resolved_route(
vlan=vlan,
ip_gw=ip_gw,
ip_dst=ip_dst,
eth_dst=cached_eth_dst,
is_updated=False))
return ofmsgs
def _add_host_fib_route(self, vlan, host_ip):
"""Add a host FIB route.
Args:
vlan (vlan): VLAN containing this RIB.
host_ip (ipaddr.IPAddress): IP address of host.
Returns:
list: OpenFlow messages.
"""
host_route = ipaddr.IPNetwork(host_ip.exploded)
return self.add_route(vlan, host_ip, host_route)
def _del_host_fib_route(self, vlan, host_ip):
"""Delete a host FIB route.
Args:
vlan (vlan): VLAN containing this RIB.
host_ip (ipaddr.IPAddress): IP address of host.
Returns:
list: OpenFlow messages.
"""
host_route = ipaddr.IPNetwork(host_ip.exploded)
return self.del_route(vlan, host_route)
def _ip_pkt(self, pkt):
"""Return an IP packet from an Ethernet packet.
Args:
pkt: ryu.lib.packet from host.
Returns:
IP ryu.lib.packet parsed from pkt.
"""
pass
def _nexthop_fresh(self, vlan, ip_gw, now):
nexthop_cache_entry = self._vlan_nexthop_cache_entry(vlan, ip_gw)
if nexthop_cache_entry is not None:
if nexthop_cache_entry.eth_src is not None:
cache_time = nexthop_cache_entry.cache_time
cache_age = now - cache_time
if cache_age < self.arp_neighbor_timeout:
return True
return False
def add_host_fib_route_from_pkt(self, pkt_meta):
"""Add a host FIB route given packet from host.
Args:
pkt_meta (PacketMeta): received packet.
Returns:
list: OpenFlow messages.
"""
ip_pkt = self._ip_pkt(pkt_meta.pkt)
ofmsgs = []
if ip_pkt:
src_ip = ipaddr.IPAddress(ip_pkt.src)
if src_ip and pkt_meta.vlan.ip_in_vip_subnet(src_ip):
now = time.time()
nexthop_fresh = self._nexthop_fresh(pkt_meta.vlan, src_ip, now)
self._update_nexthop_cache(
pkt_meta.vlan, pkt_meta.eth_src, src_ip)
if not nexthop_fresh:
ofmsgs.extend(
self._add_host_fib_route(pkt_meta.vlan, src_ip))
return ofmsgs
def del_route(self, vlan, ip_dst):
"""Delete a route from the RIB.
Only one route with this exact destination is supported.
Args:
vlan (vlan): VLAN containing this RIB.
ip_dst (ipaddr.IPNetwork): destination IP network.
Returns:
list: OpenFlow messages.
"""
ofmsgs = []
routes = self._vlan_routes(vlan)
if ip_dst in routes:
del routes[ip_dst]
route_match = self.valve_in_match(
self.fib_table, vlan=vlan,
eth_type=self._eth_type(), nw_dst=ip_dst)
ofmsgs.extend(self.valve_flowdel(
self.fib_table, route_match))
return ofmsgs
def control_plane_handler(self, pkt_meta):
pass
class ValveIPv4RouteManager(ValveRouteManager):
"""Implement IPv4 RIB/FIB."""
def _eth_type(self):
return ether.ETH_TYPE_IP
def _vlan_routes(self, vlan):
return vlan.ipv4_routes
def _vlan_nexthop_cache(self, vlan):
return vlan.arp_cache
def _neighbor_resolver_pkt(self, vid, faucet_vip, ip_gw):
return valve_packet.arp_request(
self.faucet_mac, vid, faucet_vip.ip, ip_gw)
def _ip_pkt(self, pkt):
return pkt.get_protocol(ipv4.ipv4)
def add_faucet_vip(self, vlan, faucet_vip):
ofmsgs = []
faucet_vip_net = ipaddr.IPNetwork(faucet_vip.exploded)
faucet_vip_host = ipaddr.IPNetwork(faucet_vip.ip)
max_prefixlen = faucet_vip_host.prefixlen
priority = self.route_priority + max_prefixlen
ofmsgs.append(self.valve_flowmod(
self.eth_src_table,
self.valve_in_match(
self.eth_src_table,
eth_type=ether.ETH_TYPE_ARP,
nw_dst=faucet_vip_host,
vlan=vlan),
priority=priority,
inst=[valve_of.apply_actions([valve_of.output_controller()]),
valve_of.goto_table(self.eth_dst_table)]))
# Initialize IPv4 FIB
ofmsgs.append(self.valve_flowmod(
self.eth_src_table,
self.valve_in_match(
self.eth_src_table,
eth_type=self._eth_type(),
eth_dst=self.faucet_mac,
vlan=vlan),
priority=self.route_priority,
inst=[valve_of.goto_table(self.fib_table)]))
ofmsgs.append(self.valve_flowcontroller(
self.fib_table,
self.valve_in_match(
self.fib_table,
vlan=vlan,
eth_type=self._eth_type(),
nw_proto=inet.IPPROTO_ICMP,
nw_src=faucet_vip_net,
nw_dst=faucet_vip_host),
priority=priority))
return ofmsgs
def _control_plane_arp_handler(self, pkt_meta, arp_pkt):
src_ip = ipaddr.IPv4Address(arp_pkt.src_ip)
dst_ip = ipaddr.IPv4Address(arp_pkt.dst_ip)
vlan = pkt_meta.vlan
opcode = arp_pkt.opcode
ofmsgs = []
if vlan.from_connected_to_vip(src_ip, dst_ip):
in_port = pkt_meta.port.number
eth_src = pkt_meta.eth_src
if opcode == arp.ARP_REQUEST:
ofmsgs.extend(
self._add_host_fib_route(vlan, src_ip))
vid = self._vlan_vid(vlan, in_port)
arp_reply = valve_packet.arp_reply(
self.faucet_mac, eth_src, vid, dst_ip, src_ip)
ofmsgs.append(
valve_of.packetout(in_port, arp_reply.data))
self.logger.info(
'Responded to ARP request for %s from %s (%s)',
dst_ip, src_ip, eth_src)
elif (opcode == arp.ARP_REPLY and
pkt_meta.eth_dst == self.faucet_mac):
ofmsgs.extend(
self._update_nexthop(vlan, in_port, eth_src, src_ip))
self.logger.info(
'ARP response %s (%s)', src_ip, eth_src)
return ofmsgs
def _control_plane_icmp_handler(self, pkt_meta, ipv4_pkt, icmp_pkt):
src_ip = ipaddr.IPv4Address(ipv4_pkt.src)
dst_ip = ipaddr.IPv4Address(ipv4_pkt.dst)
vlan = pkt_meta.vlan
icmpv4_type = icmp_pkt.type
ofmsgs = []
if vlan.from_connected_to_vip(src_ip, dst_ip):
if (icmpv4_type == icmp.ICMP_ECHO_REQUEST and
pkt_meta.eth_dst == self.faucet_mac):
in_port = pkt_meta.port.number
vid = self._vlan_vid(vlan, in_port)
echo_reply = valve_packet.echo_reply(
self.faucet_mac, pkt_meta.eth_src,
vid, dst_ip, src_ip, icmp_pkt.data)
ofmsgs.append(
valve_of.packetout(in_port, echo_reply.data))
return ofmsgs
def control_plane_handler(self, pkt_meta):
arp_pkt = pkt_meta.pkt.get_protocol(arp.arp)
if arp_pkt is not None:
return self._control_plane_arp_handler(pkt_meta, arp_pkt)
ipv4_pkt = pkt_meta.pkt.get_protocol(ipv4.ipv4)
if ipv4_pkt is not None:
icmp_pkt = pkt_meta.pkt.get_protocol(icmp.icmp)
if icmp_pkt is not None:
return self._control_plane_icmp_handler(
pkt_meta, ipv4_pkt, icmp_pkt)
return []
class ValveIPv6RouteManager(ValveRouteManager):
"""Implement IPv6 FIB."""
def _eth_type(self):
return ether.ETH_TYPE_IPV6
def _vlan_routes(self, vlan):
return vlan.ipv6_routes
def _vlan_nexthop_cache(self, vlan):
return vlan.nd_cache
def _neighbor_resolver_pkt(self, vid, faucet_vip, ip_gw):
return valve_packet.nd_request(
self.faucet_mac, vid, faucet_vip.ip, ip_gw)
def _ip_pkt(self, pkt):
return pkt.get_protocol(ipv6.ipv6)
def add_faucet_vip(self, vlan, faucet_vip):
ofmsgs = []
faucet_vip_host = ipaddr.IPNetwork(faucet_vip.ip)
max_prefixlen = faucet_vip_host.prefixlen
priority = self.route_priority + max_prefixlen
ofmsgs.append(self.valve_flowmod(
self.eth_src_table,
self.valve_in_match(
self.eth_src_table,
eth_type=self._eth_type(),
vlan=vlan,
nw_proto=inet.IPPROTO_ICMPV6,
ipv6_nd_target=faucet_vip_host,
icmpv6_type=icmpv6.ND_NEIGHBOR_SOLICIT),
priority=priority,
inst=[valve_of.apply_actions([valve_of.output_controller()]),
valve_of.goto_table(self.eth_dst_table)]))
ofmsgs.append(self.valve_flowmod(
self.eth_src_table,
self.valve_in_match(
self.eth_src_table,
eth_type=self._eth_type(),
eth_dst=self.faucet_mac,
vlan=vlan,
nw_proto=inet.IPPROTO_ICMPV6,
icmpv6_type=icmpv6.ND_NEIGHBOR_ADVERT),
priority=priority,
inst=[valve_of.apply_actions([valve_of.output_controller()]),
valve_of.goto_table(self.eth_dst_table)]))
# Initialize IPv6 FIB
ofmsgs.append(self.valve_flowmod(
self.eth_src_table,
self.valve_in_match(
self.eth_src_table,
eth_type=self._eth_type(),
eth_dst=self.faucet_mac,
vlan=vlan),
priority=self.route_priority,
inst=[valve_of.goto_table(self.fib_table)]))
ofmsgs.append(self.valve_flowcontroller(
self.fib_table,
self.valve_in_match(
self.fib_table,
eth_type=self._eth_type(),
vlan=vlan,
nw_proto=inet.IPPROTO_ICMPV6,
nw_dst=faucet_vip_host,
icmpv6_type=icmpv6.ICMPV6_ECHO_REQUEST),
priority=priority))
return ofmsgs
def _control_plane_icmpv6_handler(self, pkt_meta, ipv6_pkt, icmpv6_pkt):
vlan = pkt_meta.vlan
src_ip = ipaddr.IPv6Address(ipv6_pkt.src)
dst_ip = ipaddr.IPv6Address(ipv6_pkt.dst)
icmpv6_type = icmpv6_pkt.type_
ofmsgs = []
if vlan.ip_in_vip_subnet(src_ip):
in_port = pkt_meta.port.number
vid = self._vlan_vid(vlan, in_port)
eth_src = pkt_meta.eth_src
if icmpv6_type == icmpv6.ND_NEIGHBOR_SOLICIT:
solicited_ip = icmpv6_pkt.data.dst
if vlan.is_faucet_vip(ipaddr.IPAddress(solicited_ip)):
ofmsgs.extend(
self._add_host_fib_route(vlan, src_ip))
nd_reply = valve_packet.nd_reply(
self.faucet_mac, eth_src, vid,
solicited_ip, src_ip, ipv6_pkt.hop_limit)
ofmsgs.append(
valve_of.packetout(in_port, nd_reply.data))
self.logger.info(
'Responded to ND solicit for %s to %s (%s)',
solicited_ip, src_ip, eth_src)
elif icmpv6_type == icmpv6.ND_NEIGHBOR_ADVERT:
ofmsgs.extend(self._update_nexthop(
vlan, in_port, eth_src, src_ip))
self.logger.info(
'ND advert %s (%s)', src_ip, eth_src)
elif vlan.from_connected_to_vip(src_ip, dst_ip):
if (icmpv6_type == icmpv6.ICMPV6_ECHO_REQUEST and
pkt_meta.eth_dst == self.faucet_mac):
icmpv6_echo_reply = valve_packet.icmpv6_echo_reply(
self.faucet_mac, eth_src, vid,
dst_ip, src_ip, ipv6_pkt.hop_limit,
icmpv6_pkt.data.id, icmpv6_pkt.data.seq,
icmpv6_pkt.data.data)
ofmsgs.append(
valve_of.packetout(in_port, icmpv6_echo_reply.data))
return ofmsgs
def control_plane_handler(self, pkt_meta):
pkt = pkt_meta.pkt
ipv6_pkt = pkt.get_protocol(ipv6.ipv6)
if ipv6_pkt is not None:
icmpv6_pkt = pkt.get_protocol(icmpv6.icmpv6)
if icmpv6_pkt is not None:
return self._control_plane_icmpv6_handler(
pkt_meta, ipv6_pkt, icmpv6_pkt)
return []
```
#### File: 514-SDN-assignment-/tests/faucet_mininet_test_base.py
```python
import json
import os
import re
import shutil
import tempfile
import time
import unittest
import yaml
import ipaddr
import requests
from mininet.node import Controller
from mininet.node import Host
from mininet.node import OVSSwitch
from mininet.topo import Topo
from ryu.ofproto import ofproto_v1_3 as ofp
import faucet_mininet_test_util
class FAUCET(Controller):
"""Start a FAUCET controller."""
def __init__(self,
name,
ports_sock,
cdir=faucet_mininet_test_util.FAUCET_DIR,
command='ryu-manager ryu.app.ofctl_rest faucet.py',
cargs='--ofp-tcp-listen-port=%s --verbose --use-stderr',
**kwargs):
name = 'faucet-%u' % os.getpid()
self.ofctl_port, _ = faucet_mininet_test_util.find_free_port(
ports_sock)
cargs = '--wsapi-port=%u %s' % (self.ofctl_port, cargs)
Controller.__init__(
self,
name,
cdir=cdir,
command=command,
cargs=cargs,
**kwargs)
class Gauge(Controller):
"""Start a Gauge controller."""
def __init__(self,
name,
cdir=faucet_mininet_test_util.FAUCET_DIR,
command='ryu-manager gauge.py',
cargs='--ofp-tcp-listen-port=%s --verbose --use-stderr',
**kwargs):
name = 'gauge-%u' % os.getpid()
Controller.__init__(
self,
name,
cdir=cdir,
command=command,
cargs=cargs,
**kwargs)
class FaucetAPI(Controller):
'''Start a controller to run the Faucet API tests.'''
def __init__(self,
name,
command='ryu-manager {0}/faucet.py test_api.py'.format(
faucet_mininet_test_util.FAUCET_DIR),
cargs='--ofp-tcp-listen-port=%s --verbose --use-stderr',
**kwargs):
name = 'faucet-api-%u' % os.getpid()
Controller.__init__(
self,
name,
command=command,
cargs=cargs,
**kwargs)
class FaucetSwitch(OVSSwitch):
"""Switch that will be used by all tests (kernel based OVS)."""
def __init__(self, name, **params):
OVSSwitch.__init__(
self, name=name, datapath='kernel', **params)
class VLANHost(Host):
"""Implementation of a Mininet host on a tagged VLAN."""
def config(self, vlan=100, **params):
"""Configure VLANHost according to (optional) parameters:
vlan: VLAN ID for default interface"""
super_config = super(VLANHost, self).config(**params)
intf = self.defaultIntf()
vlan_intf_name = '%s.%d' % (intf, vlan)
self.cmd('ip -4 addr flush dev %s' % intf)
self.cmd('ip -6 addr flush dev %s' % intf)
self.cmd('vconfig add %s %d' % (intf, vlan))
self.cmd('ip link set dev %s up' % vlan_intf_name)
self.cmd('ip -4 addr add %s dev %s' % (params['ip'], vlan_intf_name))
intf.name = vlan_intf_name
self.nameToIntf[vlan_intf_name] = intf
return super_config
class FaucetSwitchTopo(Topo):
"""FAUCET switch topology that contains a software switch."""
def _get_sid_prefix(self, ports_served):
"""Return a unique switch/host prefix for a test."""
# Linux tools require short interface names.
return '%2.2x' % ports_served
def _add_tagged_host(self, sid_prefix, tagged_vid, host_n):
"""Add a single tagged test host."""
host_name = 't%s%1.1u' % (sid_prefix, host_n + 1)
return self.addHost(
name=host_name,
cls=VLANHost,
vlan=tagged_vid)
def _add_untagged_host(self, sid_prefix, host_n):
"""Add a single untagged test host."""
host_name = 'u%s%1.1u' % (sid_prefix, host_n + 1)
return self.addHost(name=host_name)
def _add_faucet_switch(self, sid_prefix, port, dpid):
"""Add a FAUCET switch."""
switch_name = 's%s' % sid_prefix
return self.addSwitch(
name=switch_name,
cls=FaucetSwitch,
listenPort=port,
dpid=faucet_mininet_test_util.mininet_dpid(dpid))
def build(self, ports_sock, dpid=0, n_tagged=0, tagged_vid=100, n_untagged=0):
port, ports_served = faucet_mininet_test_util.find_free_port(ports_sock)
sid_prefix = self._get_sid_prefix(ports_served)
for host_n in range(n_tagged):
self._add_tagged_host(sid_prefix, tagged_vid, host_n)
for host_n in range(n_untagged):
self._add_untagged_host(sid_prefix, host_n)
switch = self._add_faucet_switch(sid_prefix, port, dpid)
for host in self.hosts():
self.addLink(host, switch)
class FaucetHwSwitchTopo(FaucetSwitchTopo):
"""FAUCET switch topology that contains a hardware switch."""
def build(self, ports_sock, dpid=0, n_tagged=0, tagged_vid=100, n_untagged=0):
port, ports_served = faucet_mininet_test_util.find_free_port(ports_sock)
sid_prefix = self._get_sid_prefix(ports_served)
for host_n in range(n_tagged):
self._add_tagged_host(sid_prefix, tagged_vid, host_n)
for host_n in range(n_untagged):
self._add_untagged_host(sid_prefix, host_n)
dpid = str(int(dpid) + 1)
print 'remap switch will use DPID %s (%x)' % (dpid, int(dpid))
switch = self._add_faucet_switch(sid_prefix, port, dpid)
for host in self.hosts():
self.addLink(host, switch)
class FaucetTestBase(unittest.TestCase):
"""Base class for all FAUCET unit tests."""
ONE_GOOD_PING = '1 packets transmitted, 1 received, 0% packet loss'
FAUCET_VIPV4 = ipaddr.IPv4Network('10.0.0.254/24')
FAUCET_VIPV4_2 = ipaddr.IPv4Network('172.16.0.254/24')
FAUCET_VIPV6 = ipaddr.IPv6Network('fc00::1:254/64')
FAUCET_VIPV6_2 = ipaddr.IPv6Network('fc01::1:254/64')
OFCTL = 'ovs-ofctl -OOpenFlow13'
BOGUS_MAC = '01:02:03:04:05:06'
FAUCET_MAC = '0e:00:00:00:00:01'
LADVD = 'timeout 30s ladvd -e lo -f'
CONFIG = ''
CONFIG_GLOBAL = ''
config = None
dpid = None
hardware = 'Open vSwitch'
hw_switch = False
gauge_of_port = None
net = None
of_port = None
port_map = {'port_1': 1, 'port_2': 2, 'port_3': 3, 'port_4': 4}
switch_map = {}
tmpdir = None
def __init__(self, name, config, root_tmpdir, ports_sock):
super(FaucetTestBase, self).__init__(name)
self.config = config
self.root_tmpdir = root_tmpdir
self.ports_sock = ports_sock
def tmpdir_name(self):
test_name = '-'.join(self.id().split('.')[1:])
return tempfile.mkdtemp(
prefix='%s-' % test_name, dir=self.root_tmpdir)
def tearDown(self):
"""Clean up after a test."""
# must not be any controller exception.
self.assertEquals(
0, os.path.getsize(os.environ['FAUCET_EXCEPTION_LOG']))
controller_names = []
for controller in self.net.controllers:
controller_names.append(controller.name)
if self.net is not None:
self.net.stop()
test_class_name = self.id().split('.')[1]
if (not test_class_name.startswith('FaucetGroup') and
not test_class_name.startswith('FaucetSingleGroup')):
for dp_name, debug_log in self.get_ofchannel_logs():
self.assertFalse(re.search('OFPErrorMsg', open(debug_log).read()),
msg='debug log has OFPErrorMsgs')
# Associate controller log with test results, if we are keeping
# the temporary directory, or effectively delete it if not.
# mininet doesn't have a way to change its log name for the controller.
for controller_name in controller_names:
shutil.move('/tmp/%s.log' % controller_name, self.tmpdir)
def pre_start_net(self):
"""Hook called after Mininet initializtion, before Mininet started."""
return
def get_config_header(self, config_global, debug_log, dpid, hardware):
"""Build v2 FAUCET config header."""
return """
version: 2
%s
dps:
faucet-1:
ofchannel_log: %s
dp_id: 0x%x
hardware: "%s"
""" % (config_global, debug_log, int(dpid), hardware)
def get_gauge_config(self, faucet_config_file,
monitor_stats_file,
monitor_state_file,
monitor_flow_table_file):
"""Build Gauge config."""
return """
version: 2
faucet_configs:
- %s
watchers:
port_stats:
dps: ['faucet-1']
type: 'port_stats'
interval: 5
db: 'stats_file'
port_state:
dps: ['faucet-1']
type: 'port_state'
interval: 5
db: 'state_file'
flow_table:
dps: ['faucet-1']
type: 'flow_table'
interval: 5
db: 'flow_file'
dbs:
stats_file:
type: 'text'
file: %s
state_file:
type: 'text'
file: %s
flow_file:
type: 'text'
file: %s
""" % (faucet_config_file, monitor_stats_file,
monitor_state_file, monitor_flow_table_file)
def get_controller(self):
"""Return the first (only) controller."""
return self.net.controllers[0]
def ofctl_rest_url(self):
"""Return control URL for Ryu ofctl module."""
return 'http://127.0.0.1:%u' % self.get_controller().ofctl_port
def get_all_groups_desc_from_dpid(self, dpid, timeout=2):
int_dpid = faucet_mininet_test_util.str_int_dpid(dpid)
for _ in range(timeout):
try:
ofctl_result = json.loads(requests.get(
'%s/stats/groupdesc/%s' % (self.ofctl_rest_url(),
int_dpid)).text)
flow_dump = ofctl_result[int_dpid]
return [json.dumps(flow) for flow in flow_dump]
except (ValueError, requests.exceptions.ConnectionError):
# Didn't get valid JSON, try again
time.sleep(1)
continue
return []
def get_group_id_for_matching_flow(self, exp_flow, timeout=10):
for _ in range(timeout):
flow_dump = self.get_all_flows_from_dpid(self.dpid, timeout)
for flow in flow_dump:
if re.search(exp_flow, flow):
flow = json.loads(flow)
group_id = int(re.findall(r'\d+', str(flow['actions']))[0])
return group_id
time.sleep(1)
self.assertTrue(False,
"Can't find group_id for matching flow %s" % exp_flow)
def wait_matching_in_group_table(self, exp_flow, group_id, timeout=10):
exp_group = '%s.+"group_id": %d' % (exp_flow, group_id)
for _ in range(timeout):
group_dump = self.get_all_groups_desc_from_dpid(self.dpid, 1)
for group_desc in group_dump:
if re.search(exp_group, group_desc):
return True
time.sleep(1)
return False
def get_all_flows_from_dpid(self, dpid, timeout=10):
"""Return all flows from DPID."""
for _ in range(timeout):
try:
ofctl_result = json.loads(requests.get(
'%s/stats/flow/%s' % (self.ofctl_rest_url(), dpid)).text)
except (ValueError, requests.exceptions.ConnectionError):
# Didn't get valid JSON, try again
time.sleep(1)
continue
flow_dump = ofctl_result[dpid]
return [json.dumps(flow) for flow in flow_dump]
return []
def get_matching_flow_on_dpid(self, dpid, exp_flow, timeout=10):
"""Return flow matching an RE from DPID."""
for _ in range(timeout):
flow_dump = self.get_all_flows_from_dpid(dpid)
for flow in flow_dump:
if re.search(exp_flow, flow):
return json.loads(flow)
time.sleep(1)
return {}
def get_matching_flow(self, exp_flow, timeout=10):
"""Return flow matching an RE from default DPID."""
return self.get_matching_flow_on_dpid(self.dpid, exp_flow, timeout)
def matching_flow_present_on_dpid(self, dpid, exp_flow, timeout=10):
"""Return True if matching flow is present on a DPID."""
if self.get_matching_flow_on_dpid(dpid, exp_flow, timeout):
return True
return False
def matching_flow_present(self, exp_flow, timeout=10):
"""Return True if matching flow is present on default DPID."""
return self.matching_flow_present_on_dpid(self.dpid, exp_flow, timeout)
def wait_until_matching_flow(self, exp_flow, timeout=10):
"""Wait (require) for flow to be present on default DPID."""
self.assertTrue(self.matching_flow_present(exp_flow, timeout),
msg=exp_flow)
def host_learned(self, host, timeout=10):
"""Return True if a host has been learned on default DPID."""
return self.matching_flow_present(
'"table_id": 3,.+"dl_src": "%s"' % host.MAC(), timeout)
def host_ipv4(self, host):
"""Return first IPv4/netmask for host's default interface."""
host_ip_cmd = (
r'ip -o -f inet addr show %s|grep -m 1 -Eo "[0-9\\.]+\/[0-9]+"')
return host.cmd(host_ip_cmd % host.defaultIntf()).strip()
def host_ipv6(self, host):
"""Return first IPv6/netmask for host's default interface."""
host_ip_cmd = (
r'ip -o -f inet6 addr show %s|grep -m 1 -Eo "[0-9a-f\:]+\/[0-9]+"')
return host.cmd(host_ip_cmd % host.defaultIntf()).strip()
def require_host_learned(self, host, retries=3):
"""Require a host be learned on default DPID."""
host_ip_net = self.host_ipv4(host)
ping_cmd = 'ping'
if not host_ip_net:
host_ip_net = self.host_ipv6(host)
broadcast = (ipaddr.IPNetwork(host_ip_net).broadcast)
if broadcast.version == 6:
ping_cmd = 'ping6'
for _ in range(retries):
if self.host_learned(host, timeout=1):
return
# stimulate host learning with a broadcast ping
host.cmd('%s -i 0.2 -c 1 -b %s' % (ping_cmd, broadcast))
self.fail('host %s could not be learned' % host)
def get_ofchannel_logs(self):
config = yaml.load(open(os.environ['FAUCET_CONFIG']))
ofchannel_logs = []
for dp_name, dp_config in config['dps'].iteritems():
if 'ofchannel_log' in dp_config:
debug_log = dp_config['ofchannel_log']
ofchannel_logs.append((dp_name, debug_log))
return ofchannel_logs
def wait_debug_log(self):
"""Require all switches to have exchanged flows with controller."""
ofchannel_logs = self.get_ofchannel_logs()
for dp_name, debug_log in ofchannel_logs:
debug_log_present = False
for _ in range(20):
if (os.path.exists(debug_log) and
os.path.getsize(debug_log) > 0):
debug_log_present = True
break
time.sleep(1)
if not debug_log_present:
self.fail(
'no controller debug log for switch %s' % dp_name)
def hup_faucet(self):
"""Send a HUP signal to the controller."""
controller = self.get_controller()
tcp_pattern = '%s/tcp' % controller.port
fuser_out = controller.cmd('fuser %s -k -1' % tcp_pattern)
self.assertTrue(re.search(r'%s:\s+\d+' % tcp_pattern, fuser_out))
def force_faucet_reload(self, new_config):
"""Force FAUCET to reload by adding new line to config file."""
open(os.environ['FAUCET_CONFIG'], 'a').write(new_config)
self.hup_faucet()
def curl_portmod(self, int_dpid, port_no, config, mask):
"""Use curl to send a portmod command via the ofctl module."""
curl_format = ' '.join((
'curl -X POST -d'
'\'{"dpid": %s, "port_no": %u, "config": %u, "mask": %u}\'',
'%s/stats/portdesc/modify'))
return curl_format % (
int_dpid, port_no, config, mask, self.ofctl_rest_url())
def flap_all_switch_ports(self, flap_time=1):
"""Flap all ports on switch."""
for port_no in self.port_map.itervalues():
os.system(self.curl_portmod(
self.dpid,
port_no,
ofp.OFPPC_PORT_DOWN,
ofp.OFPPC_PORT_DOWN))
time.sleep(flap_time)
os.system(self.curl_portmod(
self.dpid,
port_no,
0,
ofp.OFPPC_PORT_DOWN))
def add_host_ipv6_address(self, host, ip_v6):
"""Add an IPv6 address to a Mininet host."""
self.assertEquals(
'',
host.cmd('ip -6 addr add %s dev %s' % (ip_v6, host.intf())))
def add_host_ipv6_route(self, host, ip_dst, ip_gw):
"""Add an IPv6 route to a Mininet host."""
host.cmd('ip -6 route del %s' % ip_dst.masked())
self.assertEquals(
'',
host.cmd('ip -6 route add %s via %s' % (ip_dst.masked(), ip_gw)))
def add_host_ipv4_route(self, host, ip_dst, ip_gw):
"""Add an IPv4 route to a Mininet host."""
host.cmd('ip -4 route del %s' % ip_dst.masked())
self.assertEquals(
'',
host.cmd('ip -4 route add %s via %s' % (ip_dst.masked(), ip_gw)))
def one_ipv4_ping(self, host, dst, retries=3):
"""Ping an IPv4 destination from a host."""
self.require_host_learned(host)
for _ in range(retries):
ping_result = host.cmd('ping -c1 %s' % dst)
if re.search(self.ONE_GOOD_PING, ping_result):
return
self.assertTrue(re.search(self.ONE_GOOD_PING, ping_result))
def one_ipv4_controller_ping(self, host):
"""Ping the controller from a host with IPv4."""
self.one_ipv4_ping(host, self.FAUCET_VIPV4.ip)
self.verify_ipv4_host_learned_mac(
host, self.FAUCET_VIPV4.ip, self.FAUCET_MAC)
def one_ipv6_ping(self, host, dst, retries=3):
"""Ping an IPv6 destination from a host."""
self.require_host_learned(host)
# TODO: retry our one ping. We should not have to retry.
for _ in range(retries):
ping_result = host.cmd('ping6 -c1 %s' % dst)
if re.search(self.ONE_GOOD_PING, ping_result):
return
self.assertTrue(re.search(self.ONE_GOOD_PING, ping_result))
def one_ipv6_controller_ping(self, host):
"""Ping the controller from a host with IPv6."""
self.one_ipv6_ping(host, self.FAUCET_VIPV6.ip)
self.verify_ipv6_host_learned_mac(
host, self.FAUCET_VIPV6.ip, self.FAUCET_MAC)
def wait_for_tcp_listen(self, host, port, timeout=10):
"""Wait for a host to start listening on a port."""
for _ in range(timeout):
fuser_out = host.cmd('fuser -n tcp %u' % port)
if re.search(r'.*%u/tcp.*' % port, fuser_out):
return
time.sleep(1)
self.fail('%s never listened on port %u (%s)' % (host, port, fuser_out))
def serve_hello_on_tcp_port(self, host, port):
"""Serve 'hello' on a TCP port on a host."""
host.cmd('timeout 10s echo hello | nc -l %s %u &' % (host.IP(), port))
self.wait_for_tcp_listen(host, port)
def verify_tp_dst_blocked(self, port, first_host, second_host):
"""Verify that a TCP port on a host is blocked from another host."""
self.serve_hello_on_tcp_port(second_host, port)
self.assertEquals(
'', first_host.cmd('timeout 10s nc %s %u' % (second_host.IP(), port)))
self.wait_until_matching_flow(
r'"packet_count": [1-9]+.+"tp_dst": %u' % port)
def verify_tp_dst_notblocked(self, port, first_host, second_host):
"""Verify that a TCP port on a host is NOT blocked from another host."""
self.serve_hello_on_tcp_port(second_host, port)
self.assertEquals(
'hello\r\n',
first_host.cmd('nc -w 5 %s %u' % (second_host.IP(), port)))
self.wait_until_matching_flow(
r'"packet_count": [1-9]+.+"tp_dst": %u' % port)
def swap_host_macs(self, first_host, second_host):
"""Swap the MAC addresses of two Mininet hosts."""
first_host_mac = first_host.MAC()
second_host_mac = second_host.MAC()
first_host.setMAC(second_host_mac)
second_host.setMAC(first_host_mac)
def start_exabgp(self, exabgp_conf, listen_address='127.0.0.1', port=179):
"""Start exabgp process on controller host."""
self.stop_exabgp(port)
exabgp_conf_file = os.path.join(self.tmpdir, 'exabgp.conf')
exabgp_log = os.path.join(self.tmpdir, 'exabgp.log')
exabgp_err = os.path.join(self.tmpdir, 'exabgp.err')
exabgp_env = ' '.join((
'exabgp.tcp.bind="%s"' % listen_address,
'exabgp.tcp.port=%u' % port,
'exabgp.log.all=true',
'exabgp.log.routes=true',
'exabgp.log.rib=true',
'exabgp.log.packets=true',
'exabgp.log.parser=true',
))
open(exabgp_conf_file, 'w').write(exabgp_conf)
controller = self.get_controller()
controller.cmd(
'env %s timeout -s9 180s '
'stdbuf -o0 -e0 exabgp %s -d 2> %s > %s &' % (
exabgp_env, exabgp_conf_file, exabgp_err, exabgp_log))
self.wait_for_tcp_listen(controller, port)
return exabgp_log
def wait_bgp_up(self, exabgp_log):
"""Wait for BGP to come up."""
for _ in range(60):
exabgp_log_content = open(exabgp_log).read()
if exabgp_log_content.find('OPENCONFIRM') > -1:
return
time.sleep(1)
self.fail('exabgp did not peer with FAUCET')
def stop_exabgp(self, port=179):
"""Stop exabgp process on controller host."""
controller = self.get_controller()
controller.cmd('fuser %s/tcp -k -9' % port)
def exabgp_updates(self, exabgp_log):
"""Verify that exabgp process has received BGP updates."""
controller = self.get_controller()
# exabgp should have received our BGP updates
for _ in range(60):
updates = controller.cmd(
r'grep UPDATE %s |grep -Eo "\S+ next-hop \S+"' % exabgp_log)
if updates:
return updates
time.sleep(1)
self.fail('exabgp did not receive BGP updates')
def wait_exabgp_sent_updates(self, exabgp_log):
"""Verify that exabgp process has sent BGP updates."""
for _ in range(60):
exabgp_log_content = open(exabgp_log).read()
if re.search(r'>> [1-9]+[0-9]* UPDATE', exabgp_log_content):
return
time.sleep(1)
self.fail('exabgp did not send BGP updates')
def ping_all_when_learned(self, retries=3):
"""Verify all hosts can ping each other once FAUCET has learned all."""
# Cause hosts to send traffic that FAUCET can use to learn them.
for _ in range(retries):
loss = self.net.pingAll()
# we should have learned all hosts now, so should have no loss.
for host in self.net.hosts:
self.require_host_learned(host)
if loss == 0:
return
self.assertEquals(0, loss)
def wait_for_route_as_flow(self, nexthop, prefix, timeout=10,
with_group_table=False):
"""Verify a route has been added as a flow."""
if prefix.version == 6:
exp_prefix = '/'.join(
(str(prefix.masked().ip), str(prefix.netmask)))
nw_dst_match = '"ipv6_dst": "%s"' % exp_prefix
else:
exp_prefix = prefix.masked().with_netmask
nw_dst_match = '"nw_dst": "%s"' % exp_prefix
if with_group_table:
group_id = self.get_group_id_for_matching_flow(nw_dst_match)
self.wait_matching_in_group_table('SET_FIELD: {eth_dst:%s}' % nexthop,
group_id, timeout)
else:
self.wait_until_matching_flow(
'SET_FIELD: {eth_dst:%s}.+%s' % (nexthop, nw_dst_match), timeout)
def host_ipv4_alias(self, host, alias_ip):
"""Add an IPv4 alias address to a host."""
del_cmd = 'ip addr del %s/%s dev %s' % (
alias_ip.ip, alias_ip.prefixlen, host.intf())
add_cmd = 'ip addr add %s/%s dev %s label %s:1' % (
alias_ip.ip, alias_ip.prefixlen, host.intf(), host.intf())
host.cmd(del_cmd)
self.assertEquals('', host.cmd(add_cmd))
def verify_ipv4_host_learned_mac(self, host, ip, mac):
learned_mac = host.cmd(
"arp -n %s | grep %s | awk '{ print $3 }'" % (ip, ip))
self.assertEqual(learned_mac.strip(), mac,
msg='MAC learned on host mismatch')
def verify_ipv4_host_learned_host(self, host, learned_host):
learned_ip = ipaddr.IPNetwork(self.host_ipv4(learned_host))
self.verify_ipv4_host_learned_mac(host, learned_ip.ip, learned_host.MAC())
def verify_ipv6_host_learned_mac(self, host, ip6, mac):
learned_mac = host.cmd(
"ip -6 neighbor show %s | awk '{ print $5 }'" % ip6)
self.assertEqual(learned_mac.strip(), mac,
msg='MAC learned on host mismatch')
def verify_ipv6_host_learned_host(self, host, learned_host):
learned_ip6 = ipaddr.IPNetwork(self.host_ipv6(learned_host))
self.verify_ipv6_host_learned_mac(host, learned_ip6.ip, learned_host.MAC())
def verify_ipv4_routing(self, first_host, first_host_routed_ip,
second_host, second_host_routed_ip,
with_group_table=False):
"""Verify one host can IPV4 route to another via FAUCET."""
self.host_ipv4_alias(first_host, first_host_routed_ip)
self.host_ipv4_alias(second_host, second_host_routed_ip)
self.add_host_ipv4_route(
first_host, second_host_routed_ip, self.FAUCET_VIPV4.ip)
self.add_host_ipv4_route(
second_host, first_host_routed_ip, self.FAUCET_VIPV4.ip)
self.net.ping(hosts=(first_host, second_host))
self.wait_for_route_as_flow(
first_host.MAC(), first_host_routed_ip,
with_group_table=with_group_table)
self.wait_for_route_as_flow(
second_host.MAC(), second_host_routed_ip,
with_group_table=with_group_table)
self.one_ipv4_ping(first_host, second_host_routed_ip.ip)
self.one_ipv4_ping(second_host, first_host_routed_ip.ip)
self.verify_ipv4_host_learned_host(first_host, second_host)
self.verify_ipv4_host_learned_host(second_host, first_host)
def verify_ipv4_routing_mesh(self, with_group_table=False):
"""Verify hosts can route to each other via FAUCET."""
host_pair = self.net.hosts[:2]
first_host, second_host = host_pair
first_host_routed_ip = ipaddr.IPv4Network('10.0.1.1/24')
second_host_routed_ip = ipaddr.IPv4Network('10.0.2.1/24')
second_host_routed_ip2 = ipaddr.IPv4Network('10.0.3.1/24')
self.verify_ipv4_routing(
first_host, first_host_routed_ip,
second_host, second_host_routed_ip,
with_group_table=with_group_table)
self.verify_ipv4_routing(
first_host, first_host_routed_ip,
second_host, second_host_routed_ip2,
with_group_table=with_group_table)
self.swap_host_macs(first_host, second_host)
self.verify_ipv4_routing(
first_host, first_host_routed_ip,
second_host, second_host_routed_ip,
with_group_table=with_group_table)
self.verify_ipv4_routing(
first_host, first_host_routed_ip,
second_host, second_host_routed_ip2,
with_group_table=with_group_table)
def setup_ipv6_hosts_addresses(self, first_host, first_host_ip,
first_host_routed_ip, second_host,
second_host_ip, second_host_routed_ip):
"""Configure host IPv6 addresses for testing."""
for host in first_host, second_host:
host.cmd('ip -6 addr flush dev %s' % host.intf())
self.add_host_ipv6_address(first_host, first_host_ip)
self.add_host_ipv6_address(second_host, second_host_ip)
self.add_host_ipv6_address(first_host, first_host_routed_ip)
self.add_host_ipv6_address(second_host, second_host_routed_ip)
for host in first_host, second_host:
self.require_host_learned(host)
def verify_ipv6_routing(self, first_host, first_host_ip,
first_host_routed_ip, second_host,
second_host_ip, second_host_routed_ip,
with_group_table=False):
"""Verify one host can IPV6 route to another via FAUCET."""
self.one_ipv6_ping(first_host, second_host_ip.ip)
self.one_ipv6_ping(second_host, first_host_ip.ip)
self.add_host_ipv6_route(
first_host, second_host_routed_ip, self.FAUCET_VIPV6.ip)
self.add_host_ipv6_route(
second_host, first_host_routed_ip, self.FAUCET_VIPV6.ip)
self.wait_for_route_as_flow(
first_host.MAC(), first_host_routed_ip,
with_group_table=with_group_table)
self.wait_for_route_as_flow(
second_host.MAC(), second_host_routed_ip,
with_group_table=with_group_table)
self.one_ipv6_controller_ping(first_host)
self.one_ipv6_controller_ping(second_host)
self.one_ipv6_ping(first_host, second_host_routed_ip.ip)
self.one_ipv6_ping(second_host, first_host_routed_ip.ip)
self.verify_ipv6_host_learned_mac(
first_host, second_host_ip.ip, second_host.MAC())
self.verify_ipv6_host_learned_mac(
second_host, first_host_ip.ip, first_host.MAC())
def verify_ipv6_routing_pair(self, first_host, first_host_ip,
first_host_routed_ip, second_host,
second_host_ip, second_host_routed_ip,
with_group_table=False):
"""Verify hosts can route IPv6 to each other via FAUCET."""
self.setup_ipv6_hosts_addresses(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip)
self.verify_ipv6_routing(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip,
with_group_table=with_group_table)
def verify_ipv6_routing_mesh(self, with_group_table=False):
"""Verify IPv6 routing between hosts and multiple subnets."""
host_pair = self.net.hosts[:2]
first_host, second_host = host_pair
first_host_ip = ipaddr.IPv6Network('fc00::1:1/112')
second_host_ip = ipaddr.IPv6Network('fc00::1:2/112')
first_host_routed_ip = ipaddr.IPv6Network('fc00::10:1/112')
second_host_routed_ip = ipaddr.IPv6Network('fc00::20:1/112')
second_host_routed_ip2 = ipaddr.IPv6Network('fc00::30:1/112')
self.verify_ipv6_routing_pair(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip,
with_group_table=with_group_table)
self.verify_ipv6_routing_pair(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip2,
with_group_table=with_group_table)
self.swap_host_macs(first_host, second_host)
self.verify_ipv6_routing_pair(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip,
with_group_table=with_group_table)
self.verify_ipv6_routing_pair(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip2,
with_group_table=with_group_table)
def verify_invalid_bgp_route(self, pattern):
"""Check if we see the pattern in Faucet's Log"""
controller = self.get_controller()
count = controller.cmd(
'grep -c "%s" %s' % (pattern, os.environ['FAUCET_LOG']))
self.assertGreater(count, 0)
```
#### File: 514-SDN-assignment-/tests/faucet_mininet_test_util.py
```python
import os
import socket
FAUCET_DIR = os.getenv('FAUCET_DIR', '../src/ryu_faucet/org/onfsdn/faucet')
RESERVED_FOR_TESTS_PORTS = (179, 5001, 5002, 6633, 6653, 9179)
def mininet_dpid(int_dpid):
"""Return stringified hex version, of int DPID for mininet."""
return str('%x' % int(int_dpid))
def str_int_dpid(str_dpid):
"""Return stringified int version, of int or hex DPID from YAML."""
str_dpid = str(str_dpid)
if str_dpid.startswith('0x'):
return str(int(str_dpid, 16))
else:
return str(int(str_dpid))
def find_free_port(ports_socket):
"""Retrieve a free TCP port from test server."""
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(ports_socket)
buf = ''
while not buf.find('\n') > -1:
buf = buf + sock.recv(1024)
return [int(x) for x in buf.strip().split()]
def serve_ports(ports_socket):
"""Implement a TCP server to dispense free TCP ports."""
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.bind(ports_socket)
sock.listen(1)
ports_served = set()
while True:
connection, _ = sock.accept()
while True:
free_socket = socket.socket()
free_socket.bind(('', 0))
free_port = free_socket.getsockname()[1]
free_socket.close()
if free_port < 1024:
continue
if free_port in RESERVED_FOR_TESTS_PORTS:
continue
if free_port in ports_served:
continue
break
ports_served.add(free_port)
connection.sendall('%u %u\n' % (free_port, len(ports_served)))
connection.close()
``` |
{
"source": "joye1503/cocrawler",
"score": 4
} |
#### File: cocrawler/cocrawler/topk.py
```python
from collections import namedtuple
from sortedcollections import ItemSortedDict
def getvaluevalue(k, v):
return -v.value # minus to invert sort
class topK_max:
'''
Given a stream of (key,value,ridealong) tuples, remember the k largest values.
If a key is added repeatedly, use the largest value.
'''
def __init__(self, size):
self.size = size
self.element = namedtuple('topK_max_element', ['value', 'ridealong'])
self.d = ItemSortedDict(getvaluevalue)
def add(self, key, value, ridealong):
if key in self.d:
if value >= self.d[key].value:
self.d[key] = self.element(value, ridealong)
elif len(self.d) < self.size:
self.d[key] = self.element(value, ridealong)
elif value > self.d.peekitem()[1].value:
self.d.popitem()
self.d[key] = self.element(value, ridealong)
def readout(self):
return [(k, list(v)) for k, v in self.d.items()]
# XXX to do: union, update
class topK_sum:
'''
Space-saving heavy hitters.
Given a stream of (key, value) tuples, rememgber the k items
with the largest sum of values.
http://www.cse.ust.hk/~raywong/comp5331/References/EfficientComputationOfFrequentAndTop-kElementsInDataStreams.pdf
'''
def __init__(self, size):
self.size = size
self.element = namedtuple('topK_max_element', ['value', 'ridealong', 'fake'])
self.d = ItemSortedDict(getvaluevalue)
def add(self, key, value, ridealong):
if key in self.d:
self.d[key] = self.element(self.d[key].value + value, ridealong, self.d[key].fake)
elif len(self.d) < self.size:
self.d[key] = self.element(value, ridealong, 0)
elif value >= self.d.peekitem()[1].value:
self.d.popitem()
self.d[key] = self.element(value, ridealong, 0)
else:
evicted = self.d.popitem()
oldvalue = evicted[1].value
newvalue = max(value, oldvalue + 1)
fake = max(newvalue - value, 0)
self.d[key] = self.element(newvalue, ridealong, fake)
def readout(self):
ret = []
for i in self.d.items():
if i[1].value > 2 * i[1].fake:
ret.append((i[0], [i[1].value, i[1].ridealong]))
return ret
# XXX to do: union, update
class topK_sum_hhh:
'''
Hierarchical heavy hitter.
Given a stream of (key, (parts,), value) tuples, remember which
parts have the largest sum of values.
Example: given a list uf url paths within a website, remember the most popular ones
'''
class topK_sum_hll:
'''
Heavy hitters with hyperloglog.
Given a stream of (key, value) pairs, remember the k keys
with the largest count of unique values. E.g. webpages
in a site with most unique incoming external links
'''
```
#### File: cocrawler/scripts/run_burner_bench.py
```python
import sys
import logging
import functools
import asyncio
import cocrawler.burner as burner
import cocrawler.parse as parse
import cocrawler.stats as stats
test_threadcount = 2
loop = asyncio.get_event_loop()
b = burner.Burner(test_threadcount, loop, 'parser')
queue = asyncio.Queue()
def parse_all(name, string):
links1, _ = parse.find_html_links(string, url=name)
links2, embeds2 = parse.find_html_links_and_embeds(string, url=name)
all2 = links2.union(embeds2)
if len(links1) != len(all2):
print('{} had different link counts of {} and {}'.format(name, len(links1), len(all2)))
extra1 = links1.difference(all2)
extra2 = all2.difference(links1)
print(' extra in links: {!r}'.format(extra1))
print(' extra in links and embeds: {!r}'.format(extra2))
return 1,
async def work():
while True:
w = await queue.get()
string = ' ' * 10000
partial = functools.partial(parse_all, w, string)
await b.burn(partial)
queue.task_done()
async def crawl():
workers = [asyncio.Task(work(), loop=loop) for _ in range(test_threadcount)]
print('queue count is {}'.format(queue.qsize()))
await queue.join()
print('join is done')
for w in workers:
if not w.done():
w.cancel()
# Main program:
for i in range(10000):
queue.put_nowait('foo')
print('Queue size is {}, beginning work.'.format(queue.qsize()))
try:
loop.run_until_complete(crawl())
print('exit run until complete')
except KeyboardInterrupt:
sys.stderr.flush()
print('\nInterrupt. Exiting cleanly.\n')
finally:
loop.stop()
loop.run_forever()
loop.close()
levels = [logging.ERROR, logging.WARN, logging.INFO, logging.DEBUG]
logging.basicConfig(level=levels[3])
stats.report()
parse.report()
```
#### File: tests/unit/test_accumulator.py
```python
from pytest import approx
import cocrawler.accumulator as accumulator
def test_event_accumulator():
A = accumulator.EventAccumulator()
assert A.read() == [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
A.accumulate(1)
assert A.read() == [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
A.accumulate(2.0)
assert A.read() == [1.5, 1.5, 1.5, 1.5, 1.5, 1.5]
[A.accumulate(x) for x in (2.0,)*8]
assert A.read() == [1.9, 1.9, 1.9, 1.9, 1.9, 1.9]
A.accumulate(2.0)
assert A.read() == [2.0, 1.9, 1.9, 1.9, 1.9, 1.9]
[A.accumulate(x) for x in (2.0,)*8]
assert A.read() == [2.0, 1.9, 1.9, 1.9, 1.9, 1.9]
A.accumulate(2.0)
assert A.read() == [2.0, 1.9, 1.9, 1.9, 1.9, 1.9]
# we don't expect it to change until we have 100 values
[A.accumulate(x) for x in (2.0,)*100]
assert A.read() == [2.0, approx(1.99), approx(1.99),
approx(1.99), approx(1.99), approx(1.99)]
A = accumulator.EventAccumulator()
[A.accumulate(x) for x in range(0, 110)]
assert A.read() == [104.5, 49.5, 49.5, 49.5, 49.5, 49.5]
A = accumulator.EventAccumulator()
[A.accumulate(x) for x in range(0, 1100)]
assert A.read() == [1094.5, 1049.5, 499.5, 499.5, 499.5, 499.5]
A = accumulator.EventAccumulator(function='max')
A.accumulate(1.0)
assert A.read() == [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
A.accumulate(2.0)
assert A.read() == [2.0, 2.0, 2.0, 2.0, 2.0, 2.0]
A.accumulate(2.0)
assert A.read() == [2.0, 2.0, 2.0, 2.0, 2.0, 2.0]
[A.accumulate(2.0) for x in range(0, 100)]
assert A.read() == [2.0, 2.0, 2.0, 2.0, 2.0, 2.0]
[A.accumulate(1.0) for x in range(0, 20)]
assert A.read() == [1.0, 2.0, 2.0, 2.0, 2.0, 2.0]
def test_event_accumulator_debug(capsys):
A = accumulator.EventAccumulator(function='max')
A.accumulate(1.0, debug=True)
out, err = capsys.readouterr()
assert len(out) > 85 # not a very good test!
assert len(out) < 200 # not a very good test!
assert err == ''
```
#### File: tests/unit/test_parse.py
```python
from bs4 import BeautifulSoup
import cocrawler.parse as parse
from cocrawler.urls import URL
test_html = '''
<html>
<head><title>Foo</title><link href='link.html'></link></head>
<body>
<a href = "foo1.html">Anchor 1</a>
<a
href = foo2.htm>Anchor 2</a>
<a
href='foo3.html '>Anchor 3</a>
<img src=foo.gif />
<a href='torture"
<url>'>torture
anchor</a>
</body>
'''
test_html_harder = '''
<html>
<head></head>
<body>
<iframe src="iframe.html"></iframe>
<iframe src=""></iframe>
<link href="stylesheet.blah" rel="stylesheet">
<link href="" rel="stylesheet">
<link href="http://example.com" rel="prefetch">
<link href="do-not-crash-1">
<link href="do-not-crash-2" rel="one" rel="two">
<link href="">
</body>
'''
test_html_no_body = '''
<html>
<head><title>Foo</title><link href='link.html'></link></head>
<a href="foo1.html">Anchor 4</a>
<a
href=foo2.htm>Anchor 5</a>
<a
href="foo3.html ">Anchor 6</a>
<img src=foo.gif />
'''
test_html_no_head = '''
<html>
<body>
<a href="foo1.html">Anchor 7</a>
<a
href=foo2.htm>Anchor 8</a>
<a
href="foo3.html ">Anchor 9</a>
<img src=foo.gif />
</body>
'''
test_html_no_nothing = '''
<a href="foo1.html">Anchor 10</a>
<a
href=foo2.htm>Anchor 11</a>
<a
href="foo3.html ">Anchor 12</a>
<img src=foo.gif />
'''
def test_do_burner_work_html():
urlj = URL('http://example.com')
test_html_bytes = test_html.encode(encoding='utf-8', errors='replace')
headers = {}
links, embeds, sha1, facets, base = parse.do_burner_work_html(test_html, test_html_bytes, headers, url=urlj)
assert len(links) == 4
assert len(embeds) == 2
linkset = set(u.url for u in links)
embedset = set(e.url for e in embeds)
assert 'http://example.com/foo3.html' in linkset
assert 'http://example.com/foo.gif' in embedset
assert sha1 == 'sha1:cdcb087d39afd827d5d523e165a6566d65a2e9b3'
assert base is None
# as a handwave, let's expect these defective pages to also work.
test_html_bytes = test_html_no_body.encode(encoding='utf-8', errors='replace')
links, embeds, sha1, facets, base = parse.do_burner_work_html(test_html_no_body, test_html_bytes, headers, url=urlj)
assert len(links) == 3
assert len(embeds) == 2
test_html_bytes = test_html_no_head.encode(encoding='utf-8', errors='replace')
links, embeds, sha1, facets, base = parse.do_burner_work_html(test_html_no_head, test_html_bytes, headers, url=urlj)
assert len(links) == 3
assert len(embeds) == 1
test_html_bytes = test_html_no_nothing.encode(encoding='utf-8', errors='replace')
links, embeds, sha1, facets, base = parse.do_burner_work_html(test_html_no_nothing, test_html_bytes, headers, url=urlj)
assert len(links) == 3
assert len(embeds) == 1
def test_clean_link_objects():
test = [{'href': 'http://example.com'}, {'href': 'data:46532656'}, {'href': 'https://example.com'}]
ret = [{'href': 'http://example.com'}, {'href': 'https://example.com'}]
assert parse.clean_link_objects(test, ('data:', 'javascript:')) == ret
def test_individual_parsers():
links, embeds = parse.find_html_links_re(test_html)
assert len(links) == 6
assert len(embeds) == 0
linkset = set(parse.collapse_links(links))
assert 'foo2.htm' in linkset
assert 'foo3.html ' in linkset
assert 'foo.gif' in linkset
assert 'torture"\n<url>' in linkset
head, body = parse.split_head_body(test_html)
links, embeds = parse.find_body_links_re(body)
assert len(links) == 4
assert len(embeds) == 1
linkset = set(parse.collapse_links(links))
embedset = set(parse.collapse_links(embeds))
assert 'foo2.htm' in linkset
assert 'foo3.html ' in linkset
assert 'torture"\n<url>' in linkset
assert 'foo.gif' in embedset
links, embeds = parse.find_body_links_anchors_re(body)
assert len(links) == 4
assert len(embeds) == 1
linkdict = dict([(l['href'], l['anchor']) for l in links])
# {('foo1.html', 'Anchor 1'), ('foo3.html ', 'Anchor 3'), ('foo2.htm', 'Anchor 2'), ('torture"\n<url>', 'torture\nanchor')}
assert linkdict['foo2.htm'] == 'Anchor 2'
assert linkdict['foo3.html '] == 'Anchor 3'
assert linkdict['torture"\n<url>'] == 'torture\nanchor'
assert 'foo.gif' in embeds[0]['src']
head_soup = BeautifulSoup(head, 'lxml')
links, embeds = parse.find_head_links_soup(head_soup)
embedset = set(parse.collapse_links(embeds))
assert len(links) == 0
assert len(embeds) == 1
assert 'link.html' in embedset
head_soup = BeautifulSoup(head, 'lxml')
body_soup = BeautifulSoup(body, 'lxml')
links, embeds = parse.find_head_links_soup(head_soup)
lbody, ebody = parse.find_body_links_soup(body_soup)
links += lbody
embeds += ebody
linkset = set(parse.collapse_links(links))
embedset = set(parse.collapse_links(embeds))
assert len(links) == 4
assert len(embeds) == 2
assert 'foo2.htm' in linkset
assert 'foo3.html ' in linkset
assert 'torture"\n<url>' in linkset
assert 'link.html' in embedset
assert 'foo.gif' in embedset
head, body = parse.split_head_body(test_html_harder)
body_soup = BeautifulSoup(body, 'lxml')
lbody, ebody = parse.find_body_links_soup(body_soup)
assert len(lbody) == 1
assert len(ebody) == 1
assert 'iframe.html' == lbody[0]['src']
assert 'stylesheet.blah' == ebody[0]['href']
test_css = '''
@import url('foo1.css')
url(images/foo2.png)
url( images/foo3.png )
'''
def test_css_parser():
links, embeds = parse.find_css_links_re(test_css)
assert len(links) == 0
assert len(embeds) == 3
assert 'images/foo3.png' in embeds
def test_split_head_body():
'''
Whitebox test of the heuristics in this function
'''
head, body = parse.split_head_body('x'*100000)
assert head == ''
assert len(body) == 100000
head, body = parse.split_head_body('x' + '<HeAd>' + 'x'*100000)
assert head == ''
assert len(body) == 100007
head, body = parse.split_head_body('x' + '</HeAd>' + 'x'*100000)
assert head == 'x'
assert len(body) == 100000
head, body = parse.split_head_body('x' + '<BoDy>' + 'x'*100000)
assert head == 'x'
assert len(body) == 100000
head, body = parse.split_head_body('x' + '<heAd><boDy>' + 'x'*100000)
assert head == 'x<heAd>'
assert len(body) == 100000
head, body = parse.split_head_body('x' + '<hEad></heAd>' + 'x'*100000)
assert head == 'x<hEad>'
assert len(body) == 100000
head, body = parse.split_head_body('x' + '<heaD></Head><bOdy>' + 'x'*100000)
assert head == 'x<heaD>'
assert len(body) == 100006
def test_parse_refresh():
test = ((('0;foo'), (0, 'foo')),
((';'), (None, None)),
(('1.1.1.1; bar'), (1, 'bar')),
(('2.2, urbaz'), (2, 'urbaz')),
(('3; url=barf'), (3, 'barf')),
(('3; url="barf"asdf'), (3, 'barf')),
(('3; UrL='), (3, '')))
for t in test:
assert parse.parse_refresh(t[0]) == t[1]
def test_regex_out_comments():
t = 'Hello <!-- foo --> world!'
assert parse.regex_out_comments(t) == 'Hello world!'
def test_regex_out_some_scripts():
t = '<script>foo</script> bar'
assert parse.regex_out_some_scripts(t) == ' bar'
def test_regex_out_all_script():
t = '<script>foo</script> bar <script type="baz">barf</script> '
assert parse.regex_out_all_scripts(t) == ' bar '
```
#### File: tests/unit/test_seeds.py
```python
import cocrawler.seeds as seeds
def test_special_seed_handling():
specialsh = seeds.special_seed_handling
assert specialsh('foo') == 'http://foo'
assert specialsh('//foo/') == 'http://foo/'
assert specialsh('https://foo') == 'https://foo'
#assert specialsh('mailto:foo') == 'mailto:foo'
```
#### File: tests/warc/warc-diff.py
```python
import sys
import difflib
f1 = sys.argv[1]
f2 = sys.argv[2]
with open(f1, 'r') as fd:
contents1 = fd.read()
with open(f2, 'r') as fd:
contents2 = fd.read()
def munge(s):
'''
Remove things known to differ in WARC files:
uuids
WARC-Date: headers
WARC-Warcinfo-ID headers
'''
out = ''
for line in s.split('\n'):
if line.startswith('WARC-Date:'):
line = 'WARC-Date:'
elif line.startswith('WARC-Warcinfo-ID:'):
line = 'WARC-Warcinfo-ID:'
elif ':uuid:' in line:
line, _, _ = line.partition(':uuid:')
elif line.startswith('software:'):
continue
out += line + '\n'
return out
m1 = munge(contents1)
m2 = munge(contents2)
if m1 == m2:
sys.exit(0)
print('{} and {} differ'.format(f1, f2))
for line in difflib.unified_diff(m1.splitlines(), m2.splitlines(),
fromfile=f1, tofile=f2):
print(line)
sys.exit(1)
``` |
{
"source": "joye1503/colorama",
"score": 3
} |
#### File: colorama/demos/demo08.py
```python
from __future__ import print_function
import fixpath
from colorama import colorama_text, Fore
def main():
"""automatically reset stdout"""
with colorama_text():
print(Fore.GREEN + 'text is green')
print(Fore.RESET + 'text is back to normal')
print('text is back to stdout')
if __name__ == '__main__':
main()
``` |
{
"source": "joye1503/Distributed-Computing-Scripts",
"score": 2
} |
#### File: joye1503/Distributed-Computing-Scripts/primenet.py
```python
from __future__ import division, print_function
import subprocess
from random import getrandbits
from collections import namedtuple
import sys
import os.path
import re
import time
import optparse
from hashlib import sha256
import json
import platform
import logging
try:
import requests
except ImportError:
print("Installing requests as dependency")
subprocess.check_output("pip install requests", shell=True)
print("The Requests library has been installed. Please run the program again")
sys.exit(0)
try:
# Python3
from urllib.parse import urlencode
from requests.exceptions import ConnectionError, HTTPError
except ImportError:
# Python2
from urllib import urlencode
from urllib2 import URLError as ConnectionError
from urllib2 import HTTPError
try:
from configparser import ConfigParser, Error as ConfigParserError
except ImportError:
from ConfigParser import ConfigParser, Error as ConfigParserError # ver. < 3.0
if sys.version_info[:2] >= (3, 7):
# If is OK to use dict in 3.7+ because insertion order is guaranteed to be preserved
# Since it is also faster, it is better to use raw dict()
OrderedDict = dict
else:
try:
from collections import OrderedDict
except ImportError:
# For python2.6 and before which don't have OrderedDict
try:
from ordereddict import OrderedDict
except ImportError:
# Tests will not work correctly but it doesn't affect the functionnality
OrderedDict = dict
s = requests.Session() # session that maintains our cookies
# [***] <NAME>'s functions
# get assignment
def ga(guid):
args = primenet_v5_bargs.copy()
args["t"] = "ga" # transaction type
args["g"] = guid
args["c"] = options.cpu
args["a"] = ""
return args
# register assignment
def ra(n):
'''Note: this function is not used'''
args = primenet_v5_bargs.copy()
args["t"] = "ra"
args["g"] = get_guid(config)
args["c"] = options.cpu
args["b"] = 2
args["n"] = n
args["w"] = options.worktype
return args
# unreserve assignment
def au(k):
args = primenet_v5_bargs.copy()
args["t"] = "au"
args["g"] = get_guid(config)
args["k"] = k
return args
# TODO -- have people set their own program options for commented out portions
def program_options(guid):
args = primenet_v5_bargs.copy()
args["t"] = "po"
args["g"] = guid
args["c"] = "" # no value updates all cpu threads with given worktype
args["w"] = options.worktype if config.has_option("primenet", "first_time") is False \
or hasattr(opts_no_defaults, "worktype") else ""
# args["nw"] = 1
# args["Priority"] = 1
args["DaysOfWork"] = options.days_work if config.has_option("primenet", "first_time") is False \
or hasattr(opts_no_defaults, "days_work") else ""
# args["DayMemory"] = 8
# args["NightMemory"] = 8
# args["DayStartTime"] = 0
# args["NightStartTime"] = 0
# args["RunOnBattery"] = 1
result = send_request(guid, args)
config_updated = False
if result is None or int(result["pnErrorResult"]) != 0:
parser.error("Error while setting program options on mersenne.org")
if "w" in result:
config.set("primenet", "worktype", result["w"])
config_updated = True
if "DaysOfWork" in result:
config.set("primenet", "days_work", result["DaysOfWork"])
config_updated = True
if config.has_option("primenet", "first_time") is False:
config.set("primenet", "first_time", "false")
config_updated = True
if "w" in result or "DaysOfWork" in result:
merge_config_and_options(config, options)
if config_updated:
config_write(config)
def unreserve_all(guid):
if guid is None:
debug_print("Cannot unreserve, the registration is not done",
file=sys.stderr)
w = readonly_list_file(workfile)
tasks = greplike(workpattern, w)
for task in tasks:
assignment = get_progress_assignment(task)
args = au(assignment.id)
result = send_request(guid, args)
if result is None or int(result["pnErrorResult"]) != 0:
debug_print("ERROR while releasing assignment on mersenne.org: assignment_id={0}".format(
assignment.id), file=sys.stderr)
# TODO: Delete task from workfile
def return_code(result, aid):
'''Check if the return code is not OKAY and do something about it
@param result - return from send_request()
@param aid - ?
'''
rc = int(result["pnErrorResult"])
if rc == primenet_api.ERROR_OK:
debug_print(
"Result correctly send to server: assignment_id={0}".format(aid))
return True
else: # non zero ERROR code
debug_print("ERROR while submitting result on mersenne.org: assignment_id={0}".format(
aid), file=sys.stderr)
if rc is primenet_api.ERROR_UNREGISTERED_CPU:
# should register again and retry
debug_print(
"ERROR UNREGISTERED CPU: Please remove guid line from local.ini, run with and retry", file=sys.stderr)
return False
elif rc is primenet_api.ERROR_INVALID_PARAMETER:
debug_print(
"INVALID PARAMETER: this is a bug in the script, please create an issue: https://github.com/tdulcet/Distributed-Computing-Scripts/issues", file=sys.stderr)
return False
else:
# In all other error case, the submission must not be retried
return True
def get_cpu_signature():
output = ""
if platform.system() == "Windows":
output = subprocess.check_output('wmic cpu list brief').decode()
elif platform.system() == "Darwin":
os.environ['PATH'] = os.environ['PATH'] + os.pathsep + '/usr/sbin'
command = "sysctl -n machdep.cpu.brand_string"
output = subprocess.check_output(command).decode().strip()
elif platform.system() == "Linux":
with open('/proc/cpuinfo', 'r') as f1:
all_info = f1.read()
for line in all_info.split("\n"):
if "model name" in line:
output = re.sub(".*model name.*:", "", line, 1).lstrip()
break
return output
def get_cpu_name(signature):
'''Note: Not used'''
search = re.search(
r'\bPHENOM\b|\bAMD\b|\bATOM\b|\bCore 2\b|\bCore(TM)2\b|\bCORE(TM) i7\b|\bPentium(R) M\b|\bCore\b|\bIntel\b|\bUnknown\b|\bK5\b|\bK6\b', signature)
return search.group(0) if search else ""
cpu_signature = get_cpu_signature()
cpu_brand = get_cpu_name(cpu_signature)
# END Daniel's Functions
primenet_v5_burl = "http://v5.mersenne.org/v5server/?"
PRIMENET_TRANSACTION_API_VERSION = 0.95
VERSION = 19
primenet_v5_bargs = OrderedDict(
(("px", "GIMPS"), ("v", PRIMENET_TRANSACTION_API_VERSION)))
primenet_baseurl = "https://www.mersenne.org/"
primenet_login = False
class primenet_api:
ERROR_OK = 0
ERROR_SERVER_BUSY = 3
ERROR_INVALID_VERSION = 4
ERROR_INVALID_TRANSACTION = 5
# Returned for length, type, or character invalidations.
ERROR_INVALID_PARAMETER = 7
ERROR_ACCESS_DENIED = 9
ERROR_DATABASE_CORRUPT = 11
ERROR_DATABASE_FULL_OR_BROKEN = 13
# Account related errors:
ERROR_INVALID_USER = 21
# Computer cpu/software info related errors:
ERROR_UNREGISTERED_CPU = 30
ERROR_OBSOLETE_CLIENT = 31
ERROR_STALE_CPU_INFO = 32
ERROR_CPU_IDENTITY_MISMATCH = 33
ERROR_CPU_CONFIGURATION_MISMATCH = 34
# Work assignment related errors:
ERROR_NO_ASSIGNMENT = 40
ERROR_INVALID_ASSIGNMENT_KEY = 43
ERROR_INVALID_ASSIGNMENT_TYPE = 44
ERROR_INVALID_RESULT_TYPE = 45
ERROR_INVALID_WORK_TYPE = 46
ERROR_WORK_NO_LONGER_NEEDED = 47
PRIMENET_AR_NO_RESULT = 0 # No result, just sending done msg
PRIMENET_AR_TF_FACTOR = 1 # Trial factoring, factor found
PRIMENET_AR_P1_FACTOR = 2 # P-1, factor found
PRIMENET_AR_ECM_FACTOR = 3 # ECM, factor found
PRIMENET_AR_TF_NOFACTOR = 4 # Trial Factoring no factor found
PRIMENET_AR_P1_NOFACTOR = 5 # P-1 Factoring no factor found
PRIMENET_AR_ECM_NOFACTOR = 6 # ECM Factoring no factor found
PRIMENET_AR_LL_RESULT = 100 # LL result, not prime
PRIMENET_AR_LL_PRIME = 101 # LL result, Mersenne prime
PRIMENET_AR_PRP_RESULT = 150 # PRP result, not prime
PRIMENET_AR_PRP_PRIME = 151 # PRP result, probably prime
# Teal's addition
errors = {primenet_api.ERROR_SERVER_BUSY: "Server busy",
primenet_api.ERROR_INVALID_VERSION: "Invalid version",
primenet_api.ERROR_INVALID_TRANSACTION: "Invalid transaction",
primenet_api.ERROR_INVALID_PARAMETER: "Invalid parameter",
primenet_api.ERROR_ACCESS_DENIED: "Access denied",
primenet_api.ERROR_DATABASE_CORRUPT: "Server database malfunction",
primenet_api.ERROR_DATABASE_FULL_OR_BROKEN: "Server database full or broken",
primenet_api.ERROR_INVALID_USER: "Invalid user",
primenet_api.ERROR_UNREGISTERED_CPU: "CPU not registered",
primenet_api.ERROR_OBSOLETE_CLIENT: "Obsolete client, please upgrade",
primenet_api.ERROR_STALE_CPU_INFO: "Stale cpu info",
primenet_api.ERROR_CPU_IDENTITY_MISMATCH: "CPU identity mismatch",
primenet_api.ERROR_CPU_CONFIGURATION_MISMATCH: "CPU configuration mismatch",
primenet_api.ERROR_NO_ASSIGNMENT: "No assignment",
primenet_api.ERROR_INVALID_ASSIGNMENT_KEY: "Invalid assignment key",
primenet_api.ERROR_INVALID_ASSIGNMENT_TYPE: "Invalid assignment type",
primenet_api.ERROR_INVALID_RESULT_TYPE: "Invalid result type"}
def debug_print(text, file=sys.stdout):
if options.debug or file == sys.stderr:
caller_name = sys._getframe(1).f_code.co_name
if caller_name == '<module>':
caller_name = 'main loop'
caller_string = caller_name + ": "
print(progname + ": " + caller_string + " " + time.strftime("%Y-%m-%d %H:%M") +
" \t" + str(text), file=file)
file.flush()
def greplike(pattern, lines):
output = []
for line in lines:
s = pattern.search(line)
if s:
output.append(s.group(0))
return output
def num_to_fetch(line, targetsize):
num_existing = len(line)
num_needed = targetsize - num_existing
return max(num_needed, 0)
def readonly_list_file(filename, mode="r"):
# Used when there is no intention to write the file back, so don't
# check or write lockfiles. Also returns a single string, no list.
try:
with open(filename, mode=mode) as File:
contents = File.readlines()
return [x.rstrip() for x in contents]
except (IOError, OSError):
return []
def write_list_file(filename, line, mode="w"):
# A "null append" is meaningful, as we can call this to clear the
# lockfile. In this case the main file need not be touched.
if not ("a" in mode and len(line) == 0):
newline = b'\n' if 'b' in mode else '\n'
content = newline.join(line) + newline
with open(filename, mode) as File:
File.write(content)
def primenet_fetch(num_to_get):
if not options.username:
return []
# As of early 2018, here is the full list of assignment-type codes supported by the Primenet server; Mlucas
# v18 (and thus this script) supports only the subset of these indicated by an asterisk in the left column.
# Supported assignment types may be specified via either their PrimeNet number code or the listed Mnemonic:
# Worktype:
# Code Mnemonic Description
# ---- ----------------- -----------------------
# 0 Whatever makes the most sense
# 1 Trial factoring to low limits
# 2 Trial factoring
# 4 P-1 factoring
# 5 ECM for first factor on Mersenne numbers
# 6 ECM on Fermat numbers
# 8 ECM on mersenne cofactors
# *100 SmallestAvail Smallest available first-time tests
# *101 DoubleCheck Double-checking
# *102 WorldRecord World record primality tests
# *104 100Mdigit 100M digit number to LL test (not recommended)
# *150 SmallestAvailPRP First time PRP tests (Gerbicz)
# *151 DoubleCheckPRP Doublecheck PRP tests (Gerbicz)
# *152 WorldRecordPRP World record sized numbers to PRP test (Gerbicz)
# *153 100MdigitPRP 100M digit number to PRP test (Gerbicz)
# 160 PRP on Mersenne cofactors
# 161 PRP double-checks on Mersenne cofactors
# Convert mnemonic-form worktypes to corresponding numeric value, check worktype value vs supported ones:
option_dict = {"SmallestAvail": "100", "DoubleCheck": "101", "WorldRecord": "102", "100Mdigit": "104",
"SmallestAvailPRP": "150", "DoubleCheckPRP": "151", "WorldRecordPRP": "152", "100MdigitPRP": "153"}
if options.worktype in option_dict: # this and the above line of code enables us to use words or numbers on the cmdline
options.worktype = option_dict[options.worktype]
supported = set(['100', '101', '102', '104', '150', '151', '152', '153']
) if program == "MLucas" else set(['100', '101', '102', '104'])
if options.worktype not in supported:
debug_print("Unsupported/unrecognized worktype = " +
options.worktype + " for " + program)
return []
try:
# Get assignment (Loarer's way)
if options.password:
assignment = OrderedDict((
("cores", "1"),
("num_to_get", num_to_get),
("pref", options.worktype),
("exp_lo", ""),
("exp_hi", ""),
("B1", "Get Assignments")
))
openurl = primenet_baseurl + "manual_assignment/?"
debug_print("Fetching work via URL = " +
openurl + urlencode(assignment))
r = s.post(openurl, data=assignment)
return greplike(workpattern, [line.decode('utf-8', 'replace') for line in r.iter_lines()])
# Get assignment using V5 API
else:
guid = get_guid(config)
assignment = ga(guid) # get assignment
debug_print("Fetching work via V5 Primenet = " +
primenet_v5_burl + urlencode(assignment))
tests = []
for _ in range(num_to_get):
r = send_request(guid, assignment)
if r is None or int(r["pnErrorResult"]) != 0:
debug_print(
"ERROR while requesting an assignment on mersenne.org", file=sys.stderr)
break
if r['w'] not in supported:
debug_print("ERROR: Returned assignment from server is not a supported worktype for " + program + ".", file=sys.stderr)
return []
# if options.worktype == LL
if r['w'] in set(['100', '102', '104']):
tests.append("Test="+",".join([r[i] for i in ['k', 'n', 'sf', 'p1']]))
# if options.worktype == DC
elif r['w'] in set(['101']):
tests.append("DoubleCheck="+",".join([r[i] for i in ['k', 'n', 'sf', 'p1']]))
# if PRP type testing, first time
elif r['w'] in set(['150', '152', '153']):
tests.append("PRP="+",".join([r[i] for i in ['k', 'b', 'n', 'c', 'sf', 'saved']]))
# if PRP-DC (probable-primality double-check) testing
elif r['w'] in set(['151']):
tests.append("PRP="+",".join([r[i] for i in ['k', 'b', 'n', 'c', 'sf', 'saved', 'base', 'rt']]))
return tests
except ConnectionError:
debug_print("URL open error at primenet_fetch")
return []
def get_assignment(progress):
w = readonly_list_file(workfile)
tasks = greplike(workpattern, w)
(percent, time_left) = None, None
if progress is not None and type(progress) == tuple and len(progress) == 2:
(percent, time_left) = progress # unpack update_progress output
num_cache = int(options.num_cache) + 1
if time_left is not None and time_left <= options.days_work*24*3600:
# time_left and percent increase are exclusive (don't want to do += 2)
num_cache += 1
debug_print("Time_left is {0} and smaller than limit ({1}), so num_cache is increased by one to {2}".format(
time_left, options.days_work*24*3600, num_cache))
num_to_get = num_to_fetch(tasks, num_cache)
if num_to_get < 1:
debug_print(workfile + " already has " + str(len(tasks)) +
" >= " + str(num_cache) + " entries, not getting new work")
return 0
debug_print("Fetching " + str(num_to_get) + " assignments")
new_tasks = primenet_fetch(num_to_get)
num_fetched = len(new_tasks)
if num_fetched > 0:
debug_print("Fetched {0} assignments:".format(num_fetched))
for new_task in new_tasks:
debug_print("{0}".format(new_task))
write_list_file(workfile, new_tasks, "a")
if num_fetched < num_to_get:
debug_print("Error: Failed to obtain requested number of new assignments, " +
str(num_to_get) + " requested, " + str(num_fetched) + " successfully retrieved")
return num_fetched
resultpattern = re.compile("[Pp]rogram|CUDALucas")
def mersenne_find(line, complete=True):
# Pre-v19 old-style HRF-formatted result used "Program:..."; starting w/v19 JSON-formatted result uses "program",
return resultpattern.search(line)
try:
from statistics import median_low
except ImportError:
def median_low(mylist):
sorts = sorted(mylist)
length = len(sorts)
return sorts[(length-1)//2]
def parse_stat_file(p):
statfile = 'p' + str(p) + '.stat'
if os.path.exists(statfile) is False:
print("ERROR: stat file does not exist")
return 0, None
w = readonly_list_file(statfile) # appended line by line, no lock needed
found = 0
regex = re.compile("Iter# = (.+?) .*?(\d+\.\d+) (m?sec)/iter")
list_usec_per_iter = []
# get the 5 most recent Iter line
for line in reversed(w):
res = regex.search(line)
if res:
found += 1
# keep the last iteration to compute the percent of progress
if found == 1:
iteration = int(res.group(1))
usec_per_iter = float(res.group(2))
unit = res.group(3)
if unit == "sec":
usec_per_iter *= 1000
list_usec_per_iter.append(usec_per_iter)
if found == 5:
break
if found == 0:
return 0, None # iteration is 0, but don't know the estimated speed yet
# take the media of the last grepped lines
usec_per_iter = median_low(list_usec_per_iter)
return iteration, usec_per_iter
def parse_v5_resp(r):
ans = dict()
for line in r.splitlines():
if line == "==END==":
break
option, _, value = line.partition("=")
ans[option] = value
return ans
def send_request(guid, args):
args["g"] = guid
# to mimic mprime, it is necessary to add safe='"{}:,' argument to urlencode, in
# particular to encode JSON in result submission. But safe is not supported by python2...
url_args = urlencode(args)
url_args += "&ss=19191919&sh=ABCDABCDABCDABCDABCDABCDABCDABCD"
try:
r = requests.get(primenet_v5_burl+url_args)
result = parse_v5_resp(r.text)
rc = int(result["pnErrorResult"])
if rc:
if rc in errors:
resmsg = errors[rc]
else:
resmsg = "Unknown error code"
debug_print("PrimeNet error " + str(rc) +
": " + resmsg, file=sys.stderr)
debug_print(result["pnErrorDetail"], file=sys.stderr)
else:
if result["pnErrorDetail"] != "SUCCESS":
debug_print("PrimeNet success code with additional info:")
debug_print(result["pnErrorDetail"])
except HTTPError as e:
debug_print("ERROR receiving answer to request: " +
str(primenet_v5_burl+url_args), file=sys.stderr)
debug_print(e, file=sys.stderr)
return None
except ConnectionError as e:
debug_print("ERROR connecting to server for request: " +
str(primenet_v5_burl+url_args), file=sys.stderr)
debug_print(e, file=sys.stderr)
return None
return result
def create_new_guid():
guid = hex(getrandbits(128))
if guid[:2] == '0x':
guid = guid[2:] # remove the 0x prefix
if guid[-1] == 'L':
guid = guid[:-1] # remove trailling 'L' in python2
# add missing 0 to the beginning"
guid = (32-len(guid))*"0" + guid
return guid
def register_instance(guid):
# register the instance to server, guid is the instance identifier
if options.username is None or options.hostname is None:
parser.error(
"To register the instance, --username and --hostname are required")
hardware_id = sha256(options.cpu_model.encode(
"utf-8")).hexdigest()[:32] # similar as mprime
args = primenet_v5_bargs.copy()
args["t"] = "uc" # update compute command
args["a"] = platform.system() + ('64' if platform.machine().endswith('64')
else '') + ",Mlucas,v" + str(VERSION)
if config.has_option("primenet", "sw_version"):
args["a"] = config.get("primenet", "sw_version")
args["wg"] = "" # only filled on Windows by mprime
args["hd"] = hardware_id # 32 hex char (128 bits)
args["c"] = options.cpu_model[:64] # CPU model (len between 8 and 64)
args["f"] = options.features[:64] # CPU option (like asimd, max len 64)
args["L1"] = options.L1 # L1 cache size in KBytes
args["L2"] = options.L2 # L2 cache size in KBytes
# if smaller or equal to 256,
# server refuses to gives LL assignment
args["np"] = options.np # number of cores
args["hp"] = options.hp # number of hyperthreading cores
args["m"] = options.memory # number of megabytes of physical memory
args["s"] = options.frequency # CPU frequency
args["h"] = 24 # pretend to run 24h/day
args["r"] = 0 # pretend to run at 100%
args["u"] = options.username #
args["cn"] = options.hostname[:20] # truncate to 20 char max
if guid is None:
guid = create_new_guid()
result = send_request(guid, args)
if result is None or int(result["pnErrorResult"]) != 0:
parser.error("Error while registering on mersenne.org")
# Save program options in case they are changed by the PrimeNet server.
config.set("primenet", "username", result["u"])
config.set("primenet", "name", result["un"])
config.set("primenet", "hostname", result["cn"])
merge_config_and_options(config, options)
config_write(config, guid=guid)
program_options(guid)
print("GUID {guid} correctly registered with the following features:".format(
guid=guid))
print("Username: {0}".format(options.username))
print("Computer name: {0}".format(options.hostname))
print("CPU model: {0}".format(options.cpu_model))
print("CPU features: {0}".format(options.features))
print("CPU L1 Cache size: {0} KIB".format(options.L1))
print("CPU L2 Cache size: {0} KiB".format(options.L2))
print("CPU cores: {0}".format(options.np))
print("CPU threads per core: {0}".format(options.hp))
print("CPU frequency: {0} MHz".format(options.frequency))
print("Total RAM: {0} MiB".format(options.memory))
print(u"If you want to change the value, please edit the โ" +
options.localfile + u"โ file")
print("You can see the result in this page:")
print("https://www.mersenne.org/editcpu/?g={guid}".format(guid=guid))
return
def config_read():
config = ConfigParser(dict_type=OrderedDict)
try:
config.read([localfile])
except ConfigParserError as e:
debug_print("ERROR reading {0} file:".format(
localfile), file=sys.stderr)
debug_print(e, file=sys.stderr)
if not config.has_section("primenet"):
# Create the section to avoid having to test for it later
config.add_section("primenet")
return config
def get_guid(config):
try:
return config.get("primenet", "guid")
except ConfigParserError:
return None
def config_write(config, guid=None):
# generate a new local.ini file
if guid is not None: # update the guid if necessary
config.set("primenet", "guid", guid)
with open(localfile, "w") as configfile:
config.write(configfile)
def merge_config_and_options(config, options):
# getattr and setattr allow access to the options.xxxx values by name
# which allow to copy all of them programmatically instead of having
# one line per attribute. Only the attr_to_copy list need to be updated
# when adding an option you want to copy from argument options to local.ini config.
attr_to_copy = ["workfile", "resultsfile", "username", "password", "worktype", "num_cache", "days_work",
"hostname", "cpu_model", "features", "frequency", "memory", "L1", "L2", "np", "hp", "gpu"]
updated = False
for attr in attr_to_copy:
# if "attr" has its default value in options, copy it from config
attr_val = getattr(options, attr)
if not hasattr(opts_no_defaults, attr) \
and config.has_option("primenet", attr):
# If no option is given and the option exists in local.ini, take it from local.ini
new_val = config.get("primenet", attr)
# config file values are always str()
# they need to be converted to the expected type from options
if attr_val is not None:
new_val = type(attr_val)(new_val)
setattr(options, attr, new_val)
elif attr_val is not None and (not config.has_option("primenet", attr)
or config.get("primenet", attr) != str(attr_val)):
# If an option is given (even default value) and it is not already
# identical in local.ini, update local.ini
debug_print(u"update โ" + options.localfile +
u"โ with {0}={1}".format(attr, attr_val))
config.set("primenet", attr, str(attr_val))
updated = True
global localfile
global workfile
global resultsfile
localfile = os.path.join(workdir, options.localfile)
workfile = os.path.join(workdir, options.workfile)
resultsfile = os.path.join(workdir, options.resultsfile)
return updated
Assignment = namedtuple('Assignment', "id p is_prp iteration usec_per_iter")
def update_progress():
w = readonly_list_file(workfile)
tasks = greplike(workpattern, w)
if not len(tasks):
return # don't update if no worktodo
config_updated = False
# Treat the first assignment. Only this one is used to save the usec_per_iter
# The idea is that the first assignment is having a .stat file with correct values
# Most of the time, a later assignment would not have a .stat file to obtain information,
# but if it has, it may come from an other computer if the user moved the files, and so
# it doesn't have revelant values for speed estimation.
# Using usec_per_iter from one p to another is a good estimation if both p are close enougth
# if there is big gap, it will be other or under estimated.
# Any idea for a better estimation of assignment duration when only p and type (LL or PRP) is known ?
assignment = get_progress_assignment(tasks[0])
usec_per_iter = assignment.usec_per_iter
if usec_per_iter is not None:
config.set("primenet", "usec_per_iter",
"{0:.2f}".format(usec_per_iter))
config_updated = True
elif config.has_option("primenet", "usec_per_iter"):
# If not speed available, get it from the local.ini file
usec_per_iter = float(config.get("primenet", "usec_per_iter"))
percent, time_left = compute_progress(
assignment.p, assignment.iteration, usec_per_iter)
debug_print("p:{0} is {1:.2f}% done".format(assignment.p, percent))
if time_left is None:
debug_print("Finish cannot be estimated")
else:
debug_print("Finish estimated in {0:.1f} days (used {1:.1f} msec/iter estimation)".format(
time_left/3600/24, usec_per_iter))
send_progress(assignment.id, assignment.is_prp, percent, time_left)
# Do the other assignment accumulating the time_lefts
cur_time_left = time_left
for task in tasks[1:]:
assignment = get_progress_assignment(task)
percent, time_left = compute_progress(
assignment.p, assignment.iteration, usec_per_iter)
debug_print("p:{0} is {1:.2f}% done".format(assignment.p, percent))
if time_left is None:
debug_print("Finish cannot be estimated")
else:
cur_time_left += time_left
debug_print("Finish estimated in {0:.1f} days (used {1:.1f} msec/iter estimation)".format(
cur_time_left/3600/24, usec_per_iter))
send_progress(assignment.id, assignment.is_prp,
percent, cur_time_left)
if config_updated:
config_write(config)
return percent, cur_time_left
def get_progress_assignment(task):
''' Ex: Test=197ED240A7A41EC575CB408F32DDA661,57600769,74 '''
found = workpattern.search(task)
print(task)
if not found:
debug_print("ERROR: Unable to extract valid PrimeNet assignment ID from entry in " +
workfile + ": " + str(task[0]), file=sys.stderr)
return
assignment_id = found.group(2) # e.g., "197ED240A7A41EC575CB408F32DDA661"
is_prp = found.group(1) == "PRP" # e.g., "Test"
debug_print("type = {0}, assignment_id = {1}".format(
found.group(1), assignment_id)) # e.g., "57600769", "197ED240A7A41EC575CB408F32DDA661"
found = task.split(",")
idx = 3 if is_prp else 1
if len(found) <= idx:
debug_print("Unable to extract valid exponent substring from entry in " +
workfile + ": " + str(task))
return None, None
# Extract the subfield containing the exponent, whose position depends on the assignment type:
p = int(found[idx])
if not options.gpu:
iteration, usec_per_iter = parse_stat_file(p)
else:
iteration, usec_per_iter = parse_stat_file_cuda()
return Assignment(assignment_id, p, is_prp, iteration, usec_per_iter)
def parse_stat_file_cuda():
# CUDALucas only function
# appended line by line, no lock needed
if os.path.exists(options.gpu) == False:
print("ERROR: GPU file does not exist")
return 0, None
w = readonly_list_file(options.gpu)
found = 0
iter_regex = re.compile(r'\b\d{5,}\b')
ms_per_regex = re.compile(r'\b\d+\.\d+\b')
list_msec_per_iter = []
# get the 5 most recent Iter line
for line in reversed(w):
iter_res = re.findall(iter_regex, line)
ms_res = re.findall(ms_per_regex, line)
# regex matches, but not when cudalucas is continuing
# if iter_res and ms_res and "Compatibility" not in line and "Continuing" not in line and "M(" not in line:
if iter_res and ms_res:
found += 1
# keep the last iteration to compute the percent of progress
if found == 1:
iteration = int(iter_res[1])
elif int(iter_res[1]) > iteration:
break
msec_per_iter = float(ms_res[1])
list_msec_per_iter.append(msec_per_iter)
if found == 5:
break
if found == 0:
return 0, None # iteration is 0, but don't know the estimated speed yet
# take the media of the last grepped lines
msec_per_iter = median_low(list_msec_per_iter)
return iteration, msec_per_iter
def compute_progress(p, iteration, usec_per_iter):
percent = 100*float(iteration)/float(p)
if usec_per_iter is None:
return percent, None
iteration_left = p - iteration
time_left = int(usec_per_iter * iteration_left / 1000)
return percent, time_left
def send_progress(assignment_id, is_prp, percent, time_left, retry_count=0):
guid = get_guid(config)
if guid is None:
debug_print("Cannot update, the registration is not done",
file=sys.stderr)
return
if retry_count > 5:
return
# Assignment Progress fields:
# g= the machine's GUID (32 chars, assigned by Primenet on 1st-contact from a given machine, stored in 'guid=' entry of local.ini file of rundir)
#
args = primenet_v5_bargs.copy()
args["t"] = "ap" # update compute command
# k= the assignment ID (32 chars, follows '=' in Primenet-geerated workfile entries)
args["k"] = assignment_id
# p= progress in %-done, 4-char format = xy.z
args["p"] = "{0:.1f}".format(percent)
# d= when the client is expected to check in again (in seconds ... )
args["d"] = options.timeout if options.timeout else 24*3600
# e= the ETA of completion in seconds, if unknown, just put 1 week
args["e"] = time_left if time_left is not None else 7*24*3600
# c= the worker thread of the machine ... always sets = 0 for now, elaborate later if desired
args["c"] = options.cpu
# stage= LL in this case, although an LL test may be doing TF or P-1 work first so it's possible to be something besides LL
if not is_prp:
args["stage"] = "LL"
retry = False
result = send_request(guid, args)
if result is None:
debug_print("ERROR while updating on mersenne.org", file=sys.stderr)
# Try again
retry = True
else:
rc = int(result["pnErrorResult"])
if rc == primenet_api.ERROR_OK:
debug_print("Update correctly sent to server")
else:
if rc == primenet_api.ERROR_STALE_CPU_INFO:
debug_print("STALE CPU INFO ERROR: re-send computer update")
register_instance(guid)
retry = True
elif rc == primenet_api.ERROR_UNREGISTERED_CPU:
debug_print(
"UNREGISTERED CPU ERROR: pick a new GUID and register again")
register_instance(None)
retry = True
elif rc == primenet_api.ERROR_SERVER_BUSY:
retry = True
else:
# TODO: treat more errors correctly in all send_request callers
# primenet_api.ERROR_INVALID_ASSIGNMENT_KEY
# primenet_api.ERROR_WORK_NO_LONGER_NEEDED
# drop the assignment
debug_print("ERROR while updating on mersenne.org",
file=sys.stderr)
if retry:
return send_progress(assignment_id, is_prp, percent, time_left, retry_count+1)
return
def get_cuda_ar_object(sendline):
# CUDALucas only function
# sendline example: 'M( 108928711 )C, 0x810d83b6917d846c, offset = 106008371, n = 6272K, CUDALucas v2.06, AID: 02E4F2B14BB23E2E4B95FC138FC715A8'
ar = {}
# args example: ['M( 108928711 )C', '0x810d83b6917d846c', 'offset = 106008371', 'n = 6272K', 'CUDALucas v2.06', 'AID: 02E4F2B14BB23E2E4B95FC138FC715A8']
args = ([x.strip() for x in sendline.split(",")])
ar['aid'] = args[5][5:]
ar['worktype'] = 'LL' # CUDAlucas only does LL tests
# the else does not matter in Loarer's program
ar['status'] = 'P' if int(args[1], 0) == 0 else 'R'
ar['exponent'] = re.search(r'\d{5,}', args[0]).group(0)
ar['res64'] = args[1][2:]
ar['shift-count'] = args[2].strip("offset = ")
ar['error-code'] = "00000000"
ar['fft-length'] = str(int(args[3].strip("n = ").strip("K")) * 1000)
return ar
def submit_one_line(sendline):
"""Submit one line"""
if not options.gpu: # MLucas
try:
ar = json.loads(sendline)
is_json = True
except json.decoder.JSONDecodeError:
is_json = False
else: # CUDALucas
ar = get_cuda_ar_object(sendline)
guid = get_guid(config)
if guid is not None and ar is not None and (options.gpu or is_json):
# If registered and the ar object was returned successfully, submit using the v5 API
# The result will be attributed to the registered computer
# If registered and the line is a JSON, submit using the v5 API
# The result will be attributed to the registered computer
sent = submit_one_line_v5(sendline, guid, ar)
else:
# The result will be attributed to "Manual testing"
sent = submit_one_line_manually(sendline)
return sent
def announce_prime_to_user(exponent, worktype):
for i in range(3):
print('\a')
time.sleep(.5)
if worktype == 'LL':
print("New Mersenne Prime!!!! M"+exponent+" is prime!")
else:
print("New Probable Prime!!!! "+exponent+" is a probable prime!")
def get_result_type(ar):
"""Extract result type from JSON result"""
if ar['worktype'] == 'LL':
if ar['status'] == 'P':
announce_prime_to_user(ar['exponent'], ar['worktype'])
return primenet_api.PRIMENET_AR_LL_PRIME
else:
return primenet_api.PRIMENET_AR_LL_RESULT
elif ar['worktype'].startswith('PRP'):
if ar['status'] == 'P':
announce_prime_to_user(ar['exponent'], ar['worktype'])
return primenet_api.PRIMENET_AR_PRP_PRIME
else:
return primenet_api.PRIMENET_AR_PRP_RESULT
else:
raise ValueError(
"This is a bug in primenet.py, Unsupported worktype {0}".format(ar['worktype']))
def submit_one_line_v5(sendline, guid, ar):
"""Submit one result line using V5 API, will be attributed to the computed identified by guid"""
"""Return False if the submission should be retried"""
# JSON is required because assignment_id is necessary in that case
# and it is not present in old output format.
debug_print("Submitting using V5 API\n" + sendline)
aid = ar['aid']
result_type = get_result_type(ar)
args = primenet_v5_bargs.copy()
args["t"] = "ar" # assignment result
args["k"] = ar['aid'] if 'aid' in ar else 0 # assignment id
args["m"] = sendline # message is the complete JSON string
args["r"] = result_type # result type
args["d"] = 1 # done: 0 for no closing is used for partial results
args["n"] = ar['exponent']
if result_type in (primenet_api.PRIMENET_AR_LL_RESULT, primenet_api.PRIMENET_AR_LL_PRIME):
if result_type == primenet_api.PRIMENET_AR_LL_RESULT:
args["rd"] = ar['res64']
if 'shift-count' in ar:
args['sc'] = ar['shift-count']
if 'error-code' in ar:
args["ec"] = ar['error-code']
elif result_type in (primenet_api.PRIMENET_AR_PRP_RESULT, primenet_api.PRIMENET_AR_PRP_PRIME):
args.update((("A", 1), ("b", 2), ("c", -1)))
if result_type == primenet_api.PRIMENET_AR_PRP_RESULT:
args["rd"] = ar['res64']
if 'error-code' in ar:
args["ec"] = ar['error-code']
if 'known-factors' in ar:
args['nkf'] = len(ar['known-factors'])
args["base"] = ar['worktype'][4:] # worktype == PRP-base
if 'residue-type' in ar:
args["rt"] = ar['residue-type']
if 'shift-count' in ar:
args['sc'] = ar['shift-count']
if 'errors' in ar:
args['gbz'] = 1
args['fftlen'] = ar['fft-length']
result = send_request(guid, args)
if result is None:
debug_print("ERROR while submitting result on mersenne.org: assignment_id={0}".format(
aid), file=sys.stderr)
# if this happens, the submission can be retried
# since no answer has been received from the server
return False
else:
return return_code(result, aid)
def submit_one_line_manually(sendline):
"""Submit results using manual testing, will be attributed to "Manual Testing" in mersenne.org"""
debug_print("Submitting using manual results\n" + sendline)
try:
url = primenet_baseurl + "manual_result/default.php"
r = s.post(url, data={"data": sendline})
res_str = r.text
if "Error" in res_str:
ibeg = res_str.find("Error")
iend = res_str.find("</div>", ibeg)
print("Submission failed: '{0}'".format(res_str[ibeg:iend]))
elif "Accepted" in res_str:
pass
else:
print("submit_work: Submission of results line '" + sendline +
"' failed for reasons unknown - please try manual resubmission.")
except ConnectionError:
debug_print("URL open ERROR")
return True # EWM: Append entire results_send rather than just sent to avoid resubmitting
# bad results (e.g. previously-submitted duplicates) every time the script executes.
def submit_work():
results_send = readonly_list_file(sentfile)
# Only submit completed work, i.e. the exponent must not exist in worktodo file any more
# appended line by line, no lock needed
results = readonly_list_file(resultsfile)
# EWM: Note that readonly_list_file does not need the file(s) to exist - nonexistent files simply yield 0-length rs-array entries.
# remove nonsubmittable lines from list of possibles
results = filter(mersenne_find, results)
# if a line was previously submitted, discard
results_send = [line for line in results if line not in results_send]
# Only for new results, to be appended to results_sent
sent = []
if len(results_send) == 0:
debug_print("No complete results found to send.")
return
# EWM: Switch to one-result-line-at-a-time submission to support error-message-on-submit handling:
for sendline in results_send:
# case where password is entered (not needed in v5 API since we have a key)
if options.password:
is_sent = submit_one_line_manually(sendline)
else:
is_sent = submit_one_line(sendline)
if is_sent:
sent.append(sendline)
write_list_file(sentfile, sent, "a")
#######################################################################################################
#
# Start main program here
#
#######################################################################################################
parser = optparse.OptionParser(version="%prog 1.0", description=u"""This program will automatically get assignments, report assignment results and optionally progress to PrimeNet for both the CUDALucas and Mlucas GIMPS programs. It also saves its configuration to a โlocal.iniโ file, so it is only necessary to give most of the arguments the first time it is run.
The first time it is run, if a password is NOT provided, it will register the current CUDALucas/Mlucas instance with PrimeNet (see below).
Then, it will get assignments, report the results and progress, if registered, to PrimeNet on a โtimeoutโ interval, or only once if timeout is 0.
"""
)
# options not saved to local.ini
parser.add_option("-d", "--debug", action="count", dest="debug",
default=False, help="Display debugging info")
parser.add_option("-w", "--workdir", dest="workdir", default=".",
help=u"Working directory with โworktodo.iniโ and โresults.txtโ from the GIMPS program, and โlocal.iniโ from this program, Default: %default (current directory)")
parser.add_option("-i", "--workfile", dest="workfile",
default="worktodo.ini", help=u"WorkFile filename, Default: โ%defaultโ")
parser.add_option("-r", "--resultsfile", dest="resultsfile",
default="results.txt", help=u"ResultsFile filename, Default: โ%defaultโ")
parser.add_option("-l", "--localfile", dest="localfile", default="local.ini",
help=u"Local configuration file filename, Default: โ%defaultโ")
# all other options are saved to local.ini
parser.add_option("-u", "--username", dest="username",
help="GIMPS/PrimeNet User ID. Create a GIMPS/PrimeNet account: https://www.mersenne.org/update/. If you do not want a PrimeNet account, you can use ANONYMOUS.")
parser.add_option("-p", "--password", dest="password",
help="GIMPS/PrimeNet Password. Only provide if you want to do manual testing and not report the progress (not recommend). This was the default behavior for old versions of this script.")
# -t is reserved for timeout, instead use -T for assignment-type preference:
parser.add_option("-T", "--worktype", dest="worktype", default="100", help="""Type of work, Default: %default,
100 (smallest available first-time LL),
101 (double-check LL),
102 (world-record-sized first-time LL),
104 (100M digit number to LL test - not recommended),
150 (smallest available first-time PRP),
151 (double-check PRP),
152 (world-record-sized first-time PRP),
153 (100M digit number to PRP test)
"""
)
# parser.add_option("-g", "--gpu", action="store_true", dest="gpu", default=False,
parser.add_option("-g", "--gpu", dest="gpu", help="Get assignments for a GPU (CUDALucas) instead of the CPU (Mlucas). This flag takes as an argument the CUDALucas output filename.")
parser.add_option("-c", "--cpu_num", dest="cpu", type="int", default=0,
help="CPU core or GPU number to get assignments for, Default: %default")
parser.add_option("-n", "--num_cache", dest="num_cache", type="int",
default=0, help="Number of assignments to cache, Default: %default")
parser.add_option("-L", "--days_work", dest="days_work", type="int", default=3,
help="Days of work to queue, Default: %default days. Add one to num_cache when the time left for the current assignment is less then this number of days.")
parser.add_option("-t", "--timeout", dest="timeout", type="int", default=60*60*6,
help="Seconds to wait between network updates, Default: %default seconds (6 hours). Use 0 for a single update without looping.")
parser.add_option("--unreserve_all", action="store_true", dest="unreserve_all", default=False, help="Unreserve all assignments and exit. Requires that the instance is registered with PrimeNet.")
group = optparse.OptionGroup(parser, "Registering Options: sent to PrimeNet/GIMPS when registering. The progress will automatically be sent and the program can then be monitored on the GIMPS website CPUs page (https://www.mersenne.org/cpus/), just like with Prime95/MPrime. This also allows for the program to get much smaller Category 0 and 1 exponents, if it meets the other requirements (https://www.mersenne.org/thresholds/).")
group.add_option("-H", "--hostname", dest="hostname",
default=platform.node()[:20], help="Computer name, Default: %default")
# TODO: add detection for most parameter, including automatic change of the hardware
group.add_option("--cpu_model", dest="cpu_model", default=cpu_signature,
help="Processor (CPU) model, Default: %default")
group.add_option("--features", dest="features", default="",
help="CPU features, Default: '%default'")
group.add_option("--frequency", dest="frequency", type="int",
default=1000, help="CPU frequency (MHz), Default: %default MHz")
group.add_option("-m", "--memory", dest="memory", type="int",
default=0, help="Total memory (RAM) (MiB), Default: %default MiB")
group.add_option("--L1", dest="L1", type="int", default=8,
help="L1 Cache size (KiB), Default: %default KiB")
group.add_option("--L2", dest="L2", type="int", default=512,
help="L2 Cache size (KiB), Default: %default KiB")
group.add_option("--np", dest="np", type="int", default=1,
help="Number of CPU Cores, Default: %default")
group.add_option("--hp", dest="hp", type="int", default=0,
help="Number of CPU threads per core (0 is unknown), Default: %default")
parser.add_option_group(group)
#(options, args) = parser.parse_args()
#print(options)
opts_no_defaults = optparse.Values()
__, args = parser.parse_args(values=opts_no_defaults)
options = optparse.Values(parser.get_default_values().__dict__)
options._update_careful(opts_no_defaults.__dict__)
progname = os.path.basename(sys.argv[0])
workdir = os.path.expanduser(options.workdir)
localfile = os.path.join(workdir, options.localfile)
workfile = os.path.join(workdir, options.workfile)
resultsfile = os.path.join(workdir, options.resultsfile)
# print(opts_no_defaults)
# print(options)
# A cumulative backup
sentfile = os.path.join(workdir, "results_sent.txt")
# Good refs re. Python regexp: https://www.geeksforgeeks.org/pattern-matching-python-regex/, https://www.python-course.eu/re.php
# pre-v19 only handled LL-test assignments starting with either DoubleCheck or Test, followed by =, and ending with 3 ,number pairs:
#
# workpattern = r"(DoubleCheck|Test)=.*(,[0-9]+){3}"
#
# v19 we add PRP-test support - both first-time and DC of these start with PRP=, the DCs tack on 2 more ,number pairs representing
# the PRP base to use and the PRP test-type (the latter is a bit complex to explain here). Sample of the 4 worktypes supported by v19:
#
# Test=7A30B8B6C0FC79C534A271D9561F7DCC,89459323,76,1
# DoubleCheck=92458E009609BD9E10577F83C2E9639C,50549549,73,1
# PRP=BC914675C81023F252E92CF034BEFF6C,1,2,96364649,-1,76,0
# PRP=51D650F0A3566D6C256B1679C178163E,1,2,81348457,-1,75,0,3,1
#
# and the obvious regexp pattern-modification is
#
# workpattern = r"(DoubleCheck|Test|PRP)=.*(,[0-9]+){3}"
#
# Here is where we get to the kind of complication the late baseball-philosopher <NAME> captured via his aphorism,
# "In theory, theory and practice are the same. In practice, they're different". Namely, while the above regexp pattern
# should work on all 4 assignment patterns, since each has a string of at least 3 comma-separated nonnegative ints somewhere
# between the 32-hexchar assignment ID and end of the line, said pattern failed on the 3rd of the above 4 assignments,
# apparently because when the regexp is done via the 'greplike' below, the (,[0-9]+){3} part of the pattern gets implicitly
# tiled to the end of the input line. Assignment # 3 above happens to have a negative number among the final 3, thus the
# grep fails. This weird behavior is not reproducible running Python in console mode:
#
# >>> import re
# >>> s1 = "DoubleCheck=92458E009609BD9E10577F83C2E9639C,50549549,73,1"
# >>> s2 = "Test=7A30B8B6C0FC79C534A271D9561F7DCC,89459323,76,1"
# >>> s3 = "PRP=BC914675C81023F252E92CF034BEFF6C,1,2,96364649,-1,76,0"
# >>> s4 = "PRP=51D650F0A3566D6C256B1679C178163E,1,2,81348457,-1,75,0,3,1"
# >>> print re.search(r"(DoubleCheck|Test|PRP)=.*(,[0-9]+){3}" , s1)
# <_sre.SRE_Match object at 0x1004bd250>
# >>> print re.search(r"(DoubleCheck|Test|PRP)=.*(,[0-9]+){3}" , s2)
# <_sre.SRE_Match object at 0x1004bd250>
# >>> print re.search(r"(DoubleCheck|Test|PRP)=.*(,[0-9]+){3}" , s3)
# <_sre.SRE_Match object at 0x1004bd250>.
# >> print re.search(r"(DoubleCheck|Test|PRP)=.*(,[0-9]+){3}" , s4)
# <_sre.SRE_Match object at 0x1004bd250>
#
# Anyhow, based on that I modified the grep pattern to work around the weirdness, by appending .* to the pattern, thus
# changing things to "look for 3 comma-separated nonnegative ints somewhere in the assignment, followed by anything",
# also now to specifically look for a 32-hexchar assignment ID preceding such a triplet, and to allow whitespace around
# the =. The latter bit is not needed based on current server assignment format, just a personal aesthetic bias of mine:
#
workpattern = re.compile(
"(DoubleCheck|Test|PRP)\s*=\s*([0-9A-F]{32})(,[0-9]+){3}.*")
# mersenne.org limit is about 4 KB; stay on the safe side
sendlimit = 3000 # TODO: enforce this limit
# If debug is requested
# https://stackoverflow.com/questions/10588644/how-can-i-see-the-entire-http-request-thats-being-sent-by-my-python-application
if options.debug > 1:
try:
import http.client as http_client
except ImportError:
# Python 2
import httplib as http_client
http_client.HTTPConnection.debuglevel = options.debug
# You must initialize logging, otherwise you'll not see debug output.
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
# load local.ini and update options
config = config_read()
config_updated = merge_config_and_options(config, options)
# check options after merging so that if local.ini file is changed by hand,
# values are also checked
# TODO: check that input char are ascii or at least supported by the server
if not (8 <= len(options.cpu_model) <= 64):
parser.error("cpu_model must be between 8 and 64 characters")
if options.hostname is not None and len(options.hostname) > 20:
parser.error("hostname must be less than 21 characters")
if options.features is not None and len(options.features) > 64:
parser.error("features must be less than 64 characters")
# write back local.ini if necessary
if config_updated:
debug_print("write " + options.localfile)
config_write(config)
# if guid already exist, recover it, this way, one can (re)register to change
# the CPU model (changing instance name can only be done in the website)
guid = get_guid(config)
if options.username is None:
parser.error("Username must be given")
if options.unreserve_all:
unreserve_all(guid)
sys.exit(0)
program = "CUDALucas" if options.gpu else "MLucas"
while True:
# Carry on with Loarer's style of primenet
try:
if options.password:
login_data = {"user_login": options.username,
"user_password": <PASSWORD>}
url = primenet_baseurl + "default.php"
r = s.post(url, data=login_data)
if options.username + "<br>logged in" not in r.text:
primenet_login = False
debug_print("ERROR: Login failed.")
else:
primenet_login = True
# use the v5 API for registration and program options
else:
if guid is None:
register_instance(guid)
if options.timeout <= 0:
break
# worktype has changed, update worktype preference in program_options()
# if config_updated:
elif config_updated:
program_options(guid)
except HTTPError as e:
debug_print("ERROR: Login failed.")
# branch 1 or branch 2 above was taken
if not options.password or (options.password and primenet_login):
submit_work()
progress = update_progress()
got = get_assignment(progress)
debug_print("Got: " + str(got))
if got > 0 and not options.password:
debug_print(
"Redo progress update to update the just obtain assignmment(s)")
time.sleep(1)
update_progress()
if options.timeout <= 0:
break
try:
time.sleep(options.timeout)
except KeyboardInterrupt:
break
sys.exit(0)
``` |
{
"source": "joye1503/python-zstd",
"score": 2
} |
#### File: python-zstd/tests/test_compress.py
```python
import os
from tests.base import BaseTestZSTD, raise_skip
class TestZSTD(BaseTestZSTD):
def setUp(self):
if os.getenv("LEGACY"):
self.LEGACY = True
if os.getenv("PYZSTD_LEGACY"):
self.PYZSTD_LEGACY = True
def test_compression_random(self):
BaseTestZSTD.helper_compression_random(self)
def test_compression_default_level(self):
BaseTestZSTD.helper_compression_default_level(self)
def test_compression_default_level_zero(self):
BaseTestZSTD.helper_compression_default_level_zero(self)
def test_compression_default_level_default(self):
BaseTestZSTD.helper_compression_default_level_default(self)
def test_compression_negative_level(self):
BaseTestZSTD.helper_compression_negative_level(self)
def test_compression_negative_level_notdefault(self):
BaseTestZSTD.helper_compression_negative_level_notdefault(self)
def test_compression_wrong_level(self):
BaseTestZSTD.helper_compression_wrong_level(self)
def test_compression_multi_thread_one(self):
BaseTestZSTD.helper_compression_multi_thread_one(self)
def test_compression_multi_thread_many(self):
BaseTestZSTD.helper_compression_multi_thread_many(self)
def test_compression_old_default_level(self):
if not self.PYZSTD_LEGACY:
return raise_skip("PyZstd was build without legacy functions support")
BaseTestZSTD.helper_compression_old_default_level(self)
def test_compression_level1(self):
BaseTestZSTD.helper_compression_level1(self)
def test_compression_level6(self):
BaseTestZSTD.helper_compression_level6(self)
def test_compression_level20(self):
BaseTestZSTD.helper_compression_level20(self)
def test_decompression_v036(self):
if self.LEGACY and self.PYZSTD_LEGACY:
BaseTestZSTD.helper_decompression_v036(self)
else:
return raise_skip("PyZstd was build without legacy zstd format and functions support")
def test_decompression_v046(self):
if self.LEGACY and self.PYZSTD_LEGACY:
BaseTestZSTD.helper_decompression_v046(self)
else:
return raise_skip("PyZstd was build without legacy zstd format and functions support")
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "JoyeBright/Deep-Learning",
"score": 3
} |
#### File: JoyeBright/Deep-Learning/MSE.py
```python
import numpy as np
import Dataset4
from sklearn.metrics import mean_squared_error
def relu(x):
out = max(0, x)
return out
def predict_with_network(inputd, weights):
node0_input = (inputd * weights['node0']).sum()
node0_output = relu(node0_input)
node1_input = (inputd * weights['node1']).sum()
node1_output = relu(node1_input)
hidden_layer_values = np.array([node0_output, node1_output])
output = (hidden_layer_values * weights['output']).sum()
return output
# Create model_output_0
model_output_0 = []
# Create model_output_1
model_output_1 = []
for row in Dataset4.input_data:
model_output_0.append(predict_with_network(row, Dataset4.weights))
model_output_1.append((predict_with_network(row, Dataset4.NewWeights)))
# Calculate the Mean Squared Error for model_output_0: mse0
mse0 = mean_squared_error(model_output_0, Dataset4.ActualTargets)
# Calculate the Mean Squared Error for model_output_1: mse1
mse1 = mean_squared_error(model_output_1, Dataset4.ActualTargets)
print("Mean Squared Error with first series of weights: %f" % mse0)
print("Mean Squared Error with new weights: %f" % mse1)
```
#### File: JoyeBright/Deep-Learning/Multi_Layer_NN.py
```python
from Dataset2 import weights, input_data
import numpy as np
def relu(x):
out = max(0,x)
return out
def predict_with_network(inputd):
node00_input = (inputd * weights['node00']).sum()
node00_output = relu(node00_input)
node01_input = (inputd * weights['node01']).sum()
node01_output = relu(node01_input)
hidden_0_outputs = np.array([node00_output, node01_output])
node10_input = (hidden_0_outputs * weights['node10']).sum()
node10_output = relu(node10_input)
node11_input = (hidden_0_outputs * weights['node11']).sum()
node11_output = relu(node11_input)
hidden_1_outputs = np.array([node10_output, node11_output])
model_output = (hidden_1_outputs * weights['output']).sum()
# This part of code has been added by me cause i think it would be better if output value came
# negative consequently model output return zero instead of minus value
model_output = relu(model_output)
return model_output
output = predict_with_network(input_data)
print(output)
``` |
{
"source": "JoyeBright/nlp981",
"score": 3
} |
#### File: nlp981/Week3/TF-IDF.py
```python
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
import math
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
# Given Corpus with different documents
corpus = {1: 'i am student of computer engineering at the university of guilan',
2: 'i am studying natural language processing right now'}
# Create a bag of words for each documents
BoW = []
# Split each document
for row in corpus:
BoW.append(corpus[row].split(' '))
Num_document = row # use it for making sets
# print(BoW)
# print(Num_document)
# Remove any duplicate words
unique_Words = set(BoW[0]).union(set(BoW[1]))
# print(uniqueWord)
# Create Document-Word matrix
# Sort unique words for better visualization
unique_Words = sorted(unique_Words)
# dict.fromkeys(x=keys, y=value)
Doc1 = dict.fromkeys(unique_Words, 0)
# Count word occurrence in Doc
for word in BoW[0]:
Doc1[word] += 1
Doc2 = dict.fromkeys(unique_Words, 0)
for word in BoW[1]:
Doc2[word] += 1
# print("Doc1:", Doc1)
# print("Doc2:", Doc2)
# document_word = pd.DataFrame([Doc1, Doc2])
# print(document_word)
# Using stopwords is highly recommended but It depends on your tasks
# Compute TF
def tf_computation(document, bag_of_words):
tf_doc = {}
bow_count = len(bag_of_words)
# print(bow_count)
for w, count in document.items():
tf_doc[w] = float(count / bow_count)
return tf_doc
tfDoc1 = tf_computation(Doc1, BoW[0])
tfDoc2 = tf_computation(Doc2, BoW[1])
# print(tfDoc1)
# print(tfDoc2)
# Compute IDF
def idf_computation(docs):
n = len(docs)
idf_dict = dict.fromkeys(docs[0].keys(), 0)
for document in docs:
for w, val in document.items():
if val > 0:
idf_dict[w] += 1
for w, val in idf_dict.items():
idf_dict[w] = math.log(n/float(val))
return idf_dict
idf_s = idf_computation([Doc1, Doc2])
# print(idf_s)
def tf_idf_computation(tf, idfs):
tf_idf = {}
for w, val in tf.items():
tf_idf[w] = val * idfs[w]
return tf_idf
tf_idf_doc1 = tf_idf_computation(tfDoc1, idf_s)
tf_idf_doc2 = tf_idf_computation(tfDoc2, idf_s)
print(tf_idf_doc1)
print(tf_idf_doc2)
# Show in a data frame
data_frame = pd.DataFrame([tf_idf_doc1, tf_idf_doc2])
print(data_frame.head())
# ................. TF-IDF implementation by Sklearn .................
corpus2 = ['i am student of computer engineering at the university of guilan',
'i am studying natural language processing right now']
vectorizer = TfidfVectorizer()
x = vectorizer.fit_transform(corpus2)
feature_names = vectorizer.get_feature_names()
# print(x.shape)
output = x.todense()
output_list = output.tolist()
output_df = pd.DataFrame(output_list, columns=feature_names)
print(output_df.head())
``` |
{
"source": "joyel24/Tidal-Media-Downloader",
"score": 2
} |
#### File: TIDALDL-PY/tidal_dl/__init__.py
```python
import os
import requests
import prettytable
import ssl
import sys
import getopt
from aigpy.stringHelper import isNull
from aigpy.pathHelper import mkdirs
from aigpy.pipHelper import getLastVersion
from aigpy.versionHelper import cmpVersion
from tidal_dl.tidal import TidalAPI
from tidal_dl.settings import Settings, UserSettings
from tidal_dl.printf import Printf, VERSION
from tidal_dl.download import start
from tidal_dl.enum import AudioQuality, VideoQuality
from tidal_dl.lang.language import getLang, setLang, initLang
ssl._create_default_https_context = ssl._create_unverified_context
API = TidalAPI()
USER = UserSettings.read()
CONF = Settings.read()
TOKEN1, TOKEN2 = API.getToken()
LANG = initLang(CONF.language)
def login(username="", password=""):
while True:
if isNull(username) or isNull(password):
print("---------------" + LANG.CHOICE_LOGIN + "-----------------")
username = Printf.enter(LANG.PRINT_USERNAME)
password = Printf.enter(LANG.PRINT_PASSWORD)
msg, check = API.login(username, password, TOKEN1)
if check == False:
Printf.err(msg)
username = ""
password = ""
continue
api2 = TidalAPI()
msg, check = api2.login(username, password, TOKEN2)
break
USER.username = username
USER.password = password
USER.userid = API.key.userId
USER.countryCode = API.key.countryCode
USER.sessionid1 = API.key.sessionId
USER.sessionid2 = api2.key.sessionId
UserSettings.save(USER)
def setAccessToken():
while True:
print("-------------AccessToken---------------")
token = Printf.enter("accessToken('0' go back):")
if token == '0':
return
msg, check = API.loginByAccessToken(token, USER.userid)
if check == False:
Printf.err(msg)
continue
break
USER.assesstoken = token
UserSettings.save(USER)
def checkLogin():
if not isNull(USER.assesstoken):
mag, check = API.loginByAccessToken(USER.assesstoken)
if check == False:
Printf.err(LANG.MSG_INVAILD_ACCESSTOKEN)
USER.assesstoken = ""
if not isNull(USER.sessionid1) and not API.isValidSessionID(USER.userid, USER.sessionid1):
USER.sessionid1 = ""
if not isNull(USER.sessionid2) and API.isValidSessionID(USER.userid, USER.sessionid2):
USER.sessionid2 = ""
if isNull(USER.sessionid1) or isNull(USER.sessionid2):
login(USER.username, USER.password)
def autoGetAccessToken():
array = API.tryGetAccessToken(USER.userid)
if len(array) <= 0:
return
for item in array:
msg, check = API.loginByAccessToken(item, USER.userid)
if check == False:
continue
if item != USER.assesstoken:
USER.assesstoken = item
UserSettings.save(USER)
Printf.info("Auto get accesstoken from tidal cache success!")
return
def changeSettings():
global LANG
Printf.settings(CONF)
choice = Printf.enter(LANG.CHANGE_START_SETTINGS)
if choice == '0':
return
while True:
choice = Printf.enter(LANG.CHANGE_DOWNLOAD_PATH)
if choice == '0':
choice = CONF.downloadPath
elif not os.path.isdir(choice):
if not mkdirs(choice):
Printf.err(LANG.MSG_PATH_ERR)
continue
CONF.downloadPath = choice
break
while True:
choice = Printf.enter(LANG.CHANGE_AUDIO_QUALITY)
if choice != '1' and choice != '2' and choice != '3' and choice != '0':
Printf.err(LANG.MSG_INPUT_ERR)
continue
if choice == '0':
CONF.audioQuality = AudioQuality.Normal
if choice == '1':
CONF.audioQuality = AudioQuality.High
if choice == '2':
CONF.audioQuality = AudioQuality.HiFi
if choice == '3':
CONF.audioQuality = AudioQuality.Master
break
while True:
choice = Printf.enter(LANG.CHANGE_VIDEO_QUALITY)
if choice != '1' and choice != '2' and choice != '3' and choice != '0':
Printf.err(LANG.MSG_INPUT_ERR)
continue
if choice == '0':
CONF.videoQuality = VideoQuality.P1080
if choice == '1':
CONF.videoQuality = VideoQuality.P720
if choice == '2':
CONF.videoQuality = VideoQuality.P480
if choice == '3':
CONF.videoQuality = VideoQuality.P360
break
CONF.onlyM4a = Printf.enter(LANG.CHANGE_ONLYM4A) == '1'
# CONF.addExplicitTag = Printf.enter(LANG.CHANGE_ADD_EXPLICIT_TAG) == '1'
# CONF.addHyphen = Printf.enter(LANG.CHANGE_ADD_HYPHEN) == '1'
# CONF.addYear = Printf.enter(LANG.CHANGE_ADD_YEAR) == '1'
# CONF.useTrackNumber = Printf.enter(LANG.CHANGE_USE_TRACK_NUM) == '1'
CONF.checkExist = Printf.enter(LANG.CHANGE_CHECK_EXIST) == '1'
# CONF.artistBeforeTitle = Printf.enter(LANG.CHANGE_ARTIST_BEFORE_TITLE) == '1'
CONF.includeEP = Printf.enter(LANG.CHANGE_INCLUDE_EP) == '1'
# CONF.addAlbumIDBeforeFolder = Printf.enter(LANG.CHANGE_ALBUMID_BEFORE_FOLDER) == '1'
CONF.saveCovers = Printf.enter(LANG.CHANGE_SAVE_COVERS) == '1'
CONF.showProgress = Printf.enter(LANG.CHANGE_SHOW_PROGRESS) == '1'
CONF.language = Printf.enter(LANG.CHANGE_LANGUAGE +
"('0'-English,'1'-ไธญๆ,'2'-Turkish,'3'-Italiano,'4'-Czech,'5'-Arabic,'6'-Russian,'7'-Filipino,'8'-Croatian,'9'-Spanish,'10'-Portuguese,'11'-Ukrainian,'12'-Vietnamese,'13'-French,'14'-German):")
albumFolderFormat = Printf.enter(LANG.CHANGE_ALBUM_FOLDER_FORMAT)
if albumFolderFormat == '0':
albumFolderFormat = CONF.albumFolderFormat
else:
CONF.albumFolderFormat = albumFolderFormat
trackFileFormat = Printf.enter(LANG.CHANGE_TRACK_FILE_FORMAT)
if trackFileFormat == '0':
trackFileFormat = CONF.trackFileFormat
else:
CONF.trackFileFormat = trackFileFormat
LANG = setLang(CONF.language)
Settings.save(CONF)
def mainCommand():
try:
opts, args = getopt.getopt(sys.argv[1:], "ho:l:v:u:p:a:q:r", ["help", "output=","link=","version","username","password","accessToken","quality","resolution"])
link = None
for opt, val in opts:
if opt in ('-h', '--help'):
Printf.usage()
return
if opt in ('-v', '--version'):
Printf.logo()
return
if opt in ('-l', '--link'):
link = val
if opt in ('-o', '--output'):
CONF.downloadPath = val
if opt in ('-u', '--username'):
USER.username = val
UserSettings.save(USER)
if opt in ('-p', '--password'):
USER.password = <PASSWORD>
UserSettings.save(USER)
if opt in ('-a', '--accessToken'):
USER.assesstoken = val
UserSettings.save(USER)
if opt in ('-q', '--quality'):
CONF.audioQuality = Settings.getAudioQuality(val)
if opt in ('-r', '--resolution'):
CONF.videoQuality = Settings.getVideoQuality(val)
if link is None:
Printf.err("Please enter the link(url/id/path)! Enter 'tidal-dl -h' for help!");
return
if not mkdirs(CONF.downloadPath):
Printf.err(LANG.MSG_PATH_ERR + CONF.downloadPath)
return
checkLogin()
start(USER, CONF, link)
return
except getopt.GetoptError:
Printf.err("Argv error! Enter 'tidal -h' for help!");
def main():
if len(sys.argv) > 1:
mainCommand()
return
Printf.logo()
Printf.settings(CONF)
checkLogin()
autoGetAccessToken()
onlineVer = getLastVersion('tidal-dl')
if not isNull(onlineVer):
icmp = cmpVersion(onlineVer, VERSION)
if icmp > 0:
Printf.info(LANG.PRINT_LATEST_VERSION + ' ' + onlineVer)
while True:
Printf.choices()
choice = Printf.enter(LANG.PRINT_ENTER_CHOICE)
if choice == "0":
return
elif choice == "1":
login()
elif choice == "2":
changeSettings()
elif choice == "3":
setAccessToken()
else:
start(USER, CONF, choice)
if __name__ == "__main__":
main()
# test example
# track 70973230
# video 155608351
# album 58138532 77803199 21993753 79151897 56288918
``` |
{
"source": "JoyelManoj/youtube-dl",
"score": 2
} |
#### File: youtube_dl/extractor/openload.py
```python
from __future__ import unicode_literals
import json
import os
import random
import re
import subprocess
import tempfile
from .common import InfoExtractor
from ..compat import (
compat_urlparse,
compat_kwargs,
)
from ..utils import (
check_executable,
determine_ext,
encodeArgument,
ExtractorError,
get_element_by_id,
get_exe_version,
is_outdated_version,
std_headers,
)
def cookie_to_dict(cookie):
cookie_dict = {
'name': cookie.name,
'value': cookie.value,
}
if cookie.port_specified:
cookie_dict['port'] = cookie.port
if cookie.domain_specified:
cookie_dict['domain'] = cookie.domain
if cookie.path_specified:
cookie_dict['path'] = cookie.path
if cookie.expires is not None:
cookie_dict['expires'] = cookie.expires
if cookie.secure is not None:
cookie_dict['secure'] = cookie.secure
if cookie.discard is not None:
cookie_dict['discard'] = cookie.discard
try:
if (cookie.has_nonstandard_attr('httpOnly')
or cookie.has_nonstandard_attr('httponly')
or cookie.has_nonstandard_attr('HttpOnly')):
cookie_dict['httponly'] = True
except TypeError:
pass
return cookie_dict
def cookie_jar_to_list(cookie_jar):
return [cookie_to_dict(cookie) for cookie in cookie_jar]
class PhantomJSwrapper(object):
"""PhantomJS wrapper class
This class is experimental.
"""
_TEMPLATE = r'''
phantom.onError = function(msg, trace) {{
var msgStack = ['PHANTOM ERROR: ' + msg];
if(trace && trace.length) {{
msgStack.push('TRACE:');
trace.forEach(function(t) {{
msgStack.push(' -> ' + (t.file || t.sourceURL) + ': ' + t.line
+ (t.function ? ' (in function ' + t.function +')' : ''));
}});
}}
console.error(msgStack.join('\n'));
phantom.exit(1);
}};
var page = require('webpage').create();
var fs = require('fs');
var read = {{ mode: 'r', charset: 'utf-8' }};
var write = {{ mode: 'w', charset: 'utf-8' }};
JSON.parse(fs.read("{cookies}", read)).forEach(function(x) {{
phantom.addCookie(x);
}});
page.settings.resourceTimeout = {timeout};
page.settings.userAgent = "{ua}";
page.onLoadStarted = function() {{
page.evaluate(function() {{
delete window._phantom;
delete window.callPhantom;
}});
}};
var saveAndExit = function() {{
fs.write("{html}", page.content, write);
fs.write("{cookies}", JSON.stringify(phantom.cookies), write);
phantom.exit();
}};
page.onLoadFinished = function(status) {{
if(page.url === "") {{
page.setContent(fs.read("{html}", read), "{url}");
}}
else {{
{jscode}
}}
}};
page.open("");
'''
_TMP_FILE_NAMES = ['script', 'html', 'cookies']
@staticmethod
def _version():
return get_exe_version('phantomjs', version_re=r'([0-9.]+)')
def __init__(self, extractor, required_version=None, timeout=10000):
self._TMP_FILES = {}
self.exe = check_executable('phantomjs', ['-v'])
if not self.exe:
raise ExtractorError('PhantomJS executable not found in PATH, '
'download it from http://phantomjs.org',
expected=True)
self.extractor = extractor
if required_version:
version = self._version()
if is_outdated_version(version, required_version):
self.extractor._downloader.report_warning(
'Your copy of PhantomJS is outdated, update it to version '
'%s or newer if you encounter any errors.' % required_version)
self.options = {
'timeout': timeout,
}
for name in self._TMP_FILE_NAMES:
tmp = tempfile.NamedTemporaryFile(delete=False)
tmp.close()
self._TMP_FILES[name] = tmp
def __del__(self):
for name in self._TMP_FILE_NAMES:
try:
os.remove(self._TMP_FILES[name].name)
except (IOError, OSError, KeyError):
pass
def _save_cookies(self, url):
cookies = cookie_jar_to_list(self.extractor._downloader.cookiejar)
for cookie in cookies:
if 'path' not in cookie:
cookie['path'] = '/'
if 'domain' not in cookie:
cookie['domain'] = compat_urlparse.urlparse(url).netloc
with open(self._TMP_FILES['cookies'].name, 'wb') as f:
f.write(json.dumps(cookies).encode('utf-8'))
def _load_cookies(self):
with open(self._TMP_FILES['cookies'].name, 'rb') as f:
cookies = json.loads(f.read().decode('utf-8'))
for cookie in cookies:
if cookie['httponly'] is True:
cookie['rest'] = {'httpOnly': None}
if 'expiry' in cookie:
cookie['expire_time'] = cookie['expiry']
self.extractor._set_cookie(**compat_kwargs(cookie))
def get(self, url, html=None, video_id=None, note=None, note2='Executing JS on webpage', headers={}, jscode='saveAndExit();'):
"""
Downloads webpage (if needed) and executes JS
Params:
url: website url
html: optional, html code of website
video_id: video id
note: optional, displayed when downloading webpage
note2: optional, displayed when executing JS
headers: custom http headers
jscode: code to be executed when page is loaded
Returns tuple with:
* downloaded website (after JS execution)
* anything you print with `console.log` (but not inside `page.execute`!)
In most cases you don't need to add any `jscode`.
It is executed in `page.onLoadFinished`.
`saveAndExit();` is mandatory, use it instead of `phantom.exit()`
It is possible to wait for some element on the webpage, for example:
var check = function() {
var elementFound = page.evaluate(function() {
return document.querySelector('#b.done') !== null;
});
if(elementFound)
saveAndExit();
else
window.setTimeout(check, 500);
}
page.evaluate(function(){
document.querySelector('#a').click();
});
check();
"""
if 'saveAndExit();' not in jscode:
raise ExtractorError('`saveAndExit();` not found in `jscode`')
if not html:
html = self.extractor._download_webpage(url, video_id, note=note, headers=headers)
with open(self._TMP_FILES['html'].name, 'wb') as f:
f.write(html.encode('utf-8'))
self._save_cookies(url)
replaces = self.options
replaces['url'] = url
user_agent = headers.get('User-Agent') or std_headers['User-Agent']
replaces['ua'] = user_agent.replace('"', '\\"')
replaces['jscode'] = jscode
for x in self._TMP_FILE_NAMES:
replaces[x] = self._TMP_FILES[x].name.replace('\\', '\\\\').replace('"', '\\"')
with open(self._TMP_FILES['script'].name, 'wb') as f:
f.write(self._TEMPLATE.format(**replaces).encode('utf-8'))
if video_id is None:
self.extractor.to_screen('%s' % (note2,))
else:
self.extractor.to_screen('%s: %s' % (video_id, note2))
p = subprocess.Popen([
self.exe, '--ssl-protocol=any',
self._TMP_FILES['script'].name
], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if p.returncode != 0:
raise ExtractorError(
'Executing JS failed\n:' + encodeArgument(err))
with open(self._TMP_FILES['html'].name, 'rb') as f:
html = f.read().decode('utf-8')
self._load_cookies()
return (html, encodeArgument(out))
class OpenloadIE(InfoExtractor):
_DOMAINS = r'(?:openload\.(?:co|io|link|pw)|oload\.(?:tv|stream|site|xyz|win|download|cloud|cc|icu|fun|club|info|press|pw|live|space|services)|oladblock\.(?:services|xyz|me)|openloed\.co)'
_VALID_URL = r'''(?x)
https?://
(?P<host>
(?:www\.)?
%s
)/
(?:f|embed)/
(?P<id>[a-zA-Z0-9-_]+)
''' % _DOMAINS
_EMBED_WORD = 'embed'
_STREAM_WORD = 'f'
_REDIR_WORD = 'stream'
_URL_IDS = ('streamurl', 'streamuri', 'streamurj')
_TESTS = [{
'url': 'https://openload.co/f/kUEfGclsU9o',
'md5': 'bf1c059b004ebc7a256f89408e65c36e',
'info_dict': {
'id': 'kUEfGclsU9o',
'ext': 'mp4',
'title': 'skyrim_no-audio_1080.mp4',
'thumbnail': r're:^https?://.*\.jpg$',
},
}, {
'url': 'https://openload.co/embed/rjC09fkPLYs',
'info_dict': {
'id': 'rjC09fkPLYs',
'ext': 'mp4',
'title': 'movie.mp4',
'thumbnail': r're:^https?://.*\.jpg$',
'subtitles': {
'en': [{
'ext': 'vtt',
}],
},
},
'params': {
'skip_download': True, # test subtitles only
},
}, {
'url': 'https://openload.co/embed/kUEfGclsU9o/skyrim_no-audio_1080.mp4',
'only_matching': True,
}, {
'url': 'https://openload.io/f/ZAn6oz-VZGE/',
'only_matching': True,
}, {
'url': 'https://openload.co/f/_-ztPaZtMhM/',
'only_matching': True,
}, {
# unavailable via https://openload.co/f/Sxz5sADo82g/, different layout
# for title and ext
'url': 'https://openload.co/embed/Sxz5sADo82g/',
'only_matching': True,
}, {
# unavailable via https://openload.co/embed/e-Ixz9ZR5L0/ but available
# via https://openload.co/f/e-Ixz9ZR5L0/
'url': 'https://openload.co/f/e-Ixz9ZR5L0/',
'only_matching': True,
}, {
'url': 'https://oload.tv/embed/KnG-kKZdcfY/',
'only_matching': True,
}, {
'url': 'http://www.openload.link/f/KnG-kKZdcfY',
'only_matching': True,
}, {
'url': 'https://oload.stream/f/KnG-kKZdcfY',
'only_matching': True,
}, {
'url': 'https://oload.xyz/f/WwRBpzW8Wtk',
'only_matching': True,
}, {
'url': 'https://oload.win/f/kUEfGclsU9o',
'only_matching': True,
}, {
'url': 'https://oload.download/f/kUEfGclsU9o',
'only_matching': True,
}, {
'url': 'https://oload.cloud/f/4ZDnBXRWiB8',
'only_matching': True,
}, {
# Its title has not got its extension but url has it
'url': 'https://oload.download/f/N4Otkw39VCw/Tomb.Raider.2018.HDRip.XviD.AC3-EVO.avi.mp4',
'only_matching': True,
}, {
'url': 'https://oload.cc/embed/5NEAbI2BDSk',
'only_matching': True,
}, {
'url': 'https://oload.icu/f/-_i4y_F_Hs8',
'only_matching': True,
}, {
'url': 'https://oload.fun/f/gb6G1H4sHXY',
'only_matching': True,
}, {
'url': 'https://oload.club/f/Nr1L-aZ2dbQ',
'only_matching': True,
}, {
'url': 'https://oload.info/f/5NEAbI2BDSk',
'only_matching': True,
}, {
'url': 'https://openload.pw/f/WyKgK8s94N0',
'only_matching': True,
}, {
'url': 'https://oload.pw/f/WyKgK8s94N0',
'only_matching': True,
}, {
'url': 'https://oload.live/f/-Z58UZ-GR4M',
'only_matching': True,
}, {
'url': 'https://oload.space/f/IY4eZSst3u8/',
'only_matching': True,
}, {
'url': 'https://oload.services/embed/bs1NWj1dCag/',
'only_matching': True,
}, {
'url': 'https://oload.press/embed/drTBl1aOTvk/',
'only_matching': True,
}, {
'url': 'https://oladblock.services/f/b8NWEgkqNLI/',
'only_matching': True,
}, {
'url': 'https://oladblock.xyz/f/b8NWEgkqNLI/',
'only_matching': True,
}, {
'url': 'https://oladblock.me/f/b8NWEgkqNLI/',
'only_matching': True,
}, {
'url': 'https://openloed.co/f/b8NWEgkqNLI/',
'only_matching': True,
}]
_USER_AGENT_TPL = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/%s Safari/537.36'
_CHROME_VERSIONS = (
'74.0.3729.129',
'76.0.3780.3',
'76.0.3780.2',
'74.0.3729.128',
'76.0.3780.1',
'76.0.3780.0',
'75.0.3770.15',
'74.0.3729.127',
'74.0.3729.126',
'76.0.3779.1',
'76.0.3779.0',
'75.0.3770.14',
'74.0.3729.125',
'76.0.3778.1',
'76.0.3778.0',
'75.0.3770.13',
'74.0.3729.124',
'74.0.3729.123',
'73.0.3683.121',
'76.0.3777.1',
'76.0.3777.0',
'75.0.3770.12',
'74.0.3729.122',
'76.0.3776.4',
'75.0.3770.11',
'74.0.3729.121',
'76.0.3776.3',
'76.0.3776.2',
'73.0.3683.120',
'74.0.3729.120',
'74.0.3729.119',
'74.0.3729.118',
'76.0.3776.1',
'76.0.3776.0',
'76.0.3775.5',
'75.0.3770.10',
'74.0.3729.117',
'76.0.3775.4',
'76.0.3775.3',
'74.0.3729.116',
'75.0.3770.9',
'76.0.3775.2',
'76.0.3775.1',
'76.0.3775.0',
'75.0.3770.8',
'74.0.3729.115',
'74.0.3729.114',
'76.0.3774.1',
'76.0.3774.0',
'75.0.3770.7',
'74.0.3729.113',
'74.0.3729.112',
'74.0.3729.111',
'76.0.3773.1',
'76.0.3773.0',
'75.0.3770.6',
'74.0.3729.110',
'74.0.3729.109',
'76.0.3772.1',
'76.0.3772.0',
'75.0.3770.5',
'74.0.3729.108',
'74.0.3729.107',
'76.0.3771.1',
'76.0.3771.0',
'75.0.3770.4',
'74.0.3729.106',
'74.0.3729.105',
'75.0.3770.3',
'74.0.3729.104',
'74.0.3729.103',
'74.0.3729.102',
'75.0.3770.2',
'74.0.3729.101',
'75.0.3770.1',
'75.0.3770.0',
'74.0.3729.100',
'75.0.3769.5',
'75.0.3769.4',
'74.0.3729.99',
'75.0.3769.3',
'75.0.3769.2',
'75.0.3768.6',
'74.0.3729.98',
'75.0.3769.1',
'75.0.3769.0',
'74.0.3729.97',
'73.0.3683.119',
'73.0.3683.118',
'74.0.3729.96',
'75.0.3768.5',
'75.0.3768.4',
'75.0.3768.3',
'75.0.3768.2',
'74.0.3729.95',
'74.0.3729.94',
'75.0.3768.1',
'75.0.3768.0',
'74.0.3729.93',
'74.0.3729.92',
'73.0.3683.117',
'74.0.3729.91',
'75.0.3766.3',
'74.0.3729.90',
'75.0.3767.2',
'75.0.3767.1',
'75.0.3767.0',
'74.0.3729.89',
'73.0.3683.116',
'75.0.3766.2',
'74.0.3729.88',
'75.0.3766.1',
'75.0.3766.0',
'74.0.3729.87',
'73.0.3683.115',
'74.0.3729.86',
'75.0.3765.1',
'75.0.3765.0',
'74.0.3729.85',
'73.0.3683.114',
'74.0.3729.84',
'75.0.3764.1',
'75.0.3764.0',
'74.0.3729.83',
'73.0.3683.113',
'75.0.3763.2',
'75.0.3761.4',
'74.0.3729.82',
'75.0.3763.1',
'75.0.3763.0',
'74.0.3729.81',
'73.0.3683.112',
'75.0.3762.1',
'75.0.3762.0',
'74.0.3729.80',
'75.0.3761.3',
'74.0.3729.79',
'73.0.3683.111',
'75.0.3761.2',
'74.0.3729.78',
'74.0.3729.77',
'75.0.3761.1',
'75.0.3761.0',
'73.0.3683.110',
'74.0.3729.76',
'74.0.3729.75',
'75.0.3760.0',
'74.0.3729.74',
'75.0.3759.8',
'75.0.3759.7',
'75.0.3759.6',
'74.0.3729.73',
'75.0.3759.5',
'74.0.3729.72',
'73.0.3683.109',
'75.0.3759.4',
'75.0.3759.3',
'74.0.3729.71',
'75.0.3759.2',
'74.0.3729.70',
'73.0.3683.108',
'74.0.3729.69',
'75.0.3759.1',
'75.0.3759.0',
'74.0.3729.68',
'73.0.3683.107',
'74.0.3729.67',
'75.0.3758.1',
'75.0.3758.0',
'74.0.3729.66',
'73.0.3683.106',
'74.0.3729.65',
'75.0.3757.1',
'75.0.3757.0',
'74.0.3729.64',
'73.0.3683.105',
'74.0.3729.63',
'75.0.3756.1',
'75.0.3756.0',
'74.0.3729.62',
'73.0.3683.104',
'75.0.3755.3',
'75.0.3755.2',
'73.0.3683.103',
'75.0.3755.1',
'75.0.3755.0',
'74.0.3729.61',
'73.0.3683.102',
'74.0.3729.60',
'75.0.3754.2',
'74.0.3729.59',
'75.0.3753.4',
'74.0.3729.58',
'75.0.3754.1',
'75.0.3754.0',
'74.0.3729.57',
'73.0.3683.101',
'75.0.3753.3',
'75.0.3752.2',
'75.0.3753.2',
'74.0.3729.56',
'75.0.3753.1',
'75.0.3753.0',
'74.0.3729.55',
'73.0.3683.100',
'74.0.3729.54',
'75.0.3752.1',
'75.0.3752.0',
'74.0.3729.53',
'73.0.3683.99',
'74.0.3729.52',
'75.0.3751.1',
'75.0.3751.0',
'74.0.3729.51',
'73.0.3683.98',
'74.0.3729.50',
'75.0.3750.0',
'74.0.3729.49',
'74.0.3729.48',
'74.0.3729.47',
'75.0.3749.3',
'74.0.3729.46',
'73.0.3683.97',
'75.0.3749.2',
'74.0.3729.45',
'75.0.3749.1',
'75.0.3749.0',
'74.0.3729.44',
'73.0.3683.96',
'74.0.3729.43',
'74.0.3729.42',
'75.0.3748.1',
'75.0.3748.0',
'74.0.3729.41',
'75.0.3747.1',
'73.0.3683.95',
'75.0.3746.4',
'74.0.3729.40',
'74.0.3729.39',
'75.0.3747.0',
'75.0.3746.3',
'75.0.3746.2',
'74.0.3729.38',
'75.0.3746.1',
'75.0.3746.0',
'74.0.3729.37',
'73.0.3683.94',
'75.0.3745.5',
'75.0.3745.4',
'75.0.3745.3',
'75.0.3745.2',
'74.0.3729.36',
'75.0.3745.1',
'75.0.3745.0',
'75.0.3744.2',
'74.0.3729.35',
'73.0.3683.93',
'74.0.3729.34',
'75.0.3744.1',
'75.0.3744.0',
'74.0.3729.33',
'73.0.3683.92',
'74.0.3729.32',
'74.0.3729.31',
'73.0.3683.91',
'75.0.3741.2',
'75.0.3740.5',
'74.0.3729.30',
'75.0.3741.1',
'75.0.3741.0',
'74.0.3729.29',
'75.0.3740.4',
'73.0.3683.90',
'74.0.3729.28',
'75.0.3740.3',
'73.0.3683.89',
'75.0.3740.2',
'74.0.3729.27',
'75.0.3740.1',
'75.0.3740.0',
'74.0.3729.26',
'73.0.3683.88',
'73.0.3683.87',
'74.0.3729.25',
'75.0.3739.1',
'75.0.3739.0',
'73.0.3683.86',
'74.0.3729.24',
'73.0.3683.85',
'75.0.3738.4',
'75.0.3738.3',
'75.0.3738.2',
'75.0.3738.1',
'75.0.3738.0',
'74.0.3729.23',
'73.0.3683.84',
'74.0.3729.22',
'74.0.3729.21',
'75.0.3737.1',
'75.0.3737.0',
'74.0.3729.20',
'73.0.3683.83',
'74.0.3729.19',
'75.0.3736.1',
'75.0.3736.0',
'74.0.3729.18',
'73.0.3683.82',
'74.0.3729.17',
'75.0.3735.1',
'75.0.3735.0',
'74.0.3729.16',
'73.0.3683.81',
'75.0.3734.1',
'75.0.3734.0',
'74.0.3729.15',
'73.0.3683.80',
'74.0.3729.14',
'75.0.3733.1',
'75.0.3733.0',
'75.0.3732.1',
'74.0.3729.13',
'74.0.3729.12',
'73.0.3683.79',
'74.0.3729.11',
'75.0.3732.0',
'74.0.3729.10',
'73.0.3683.78',
'74.0.3729.9',
'74.0.3729.8',
'74.0.3729.7',
'75.0.3731.3',
'75.0.3731.2',
'75.0.3731.0',
'74.0.3729.6',
'73.0.3683.77',
'73.0.3683.76',
'75.0.3730.5',
'75.0.3730.4',
'73.0.3683.75',
'74.0.3729.5',
'73.0.3683.74',
'75.0.3730.3',
'75.0.3730.2',
'74.0.3729.4',
'73.0.3683.73',
'73.0.3683.72',
'75.0.3730.1',
'75.0.3730.0',
'74.0.3729.3',
'73.0.3683.71',
'74.0.3729.2',
'73.0.3683.70',
'74.0.3729.1',
'74.0.3729.0',
'74.0.3726.4',
'73.0.3683.69',
'74.0.3726.3',
'74.0.3728.0',
'74.0.3726.2',
'73.0.3683.68',
'74.0.3726.1',
'74.0.3726.0',
'74.0.3725.4',
'73.0.3683.67',
'73.0.3683.66',
'74.0.3725.3',
'74.0.3725.2',
'74.0.3725.1',
'74.0.3724.8',
'74.0.3725.0',
'73.0.3683.65',
'74.0.3724.7',
'74.0.3724.6',
'74.0.3724.5',
'74.0.3724.4',
'74.0.3724.3',
'74.0.3724.2',
'74.0.3724.1',
'74.0.3724.0',
'73.0.3683.64',
'74.0.3723.1',
'74.0.3723.0',
'73.0.3683.63',
'74.0.3722.1',
'74.0.3722.0',
'73.0.3683.62',
'74.0.3718.9',
'74.0.3702.3',
'74.0.3721.3',
'74.0.3721.2',
'74.0.3721.1',
'74.0.3721.0',
'74.0.3720.6',
'73.0.3683.61',
'72.0.3626.122',
'73.0.3683.60',
'74.0.3720.5',
'72.0.3626.121',
'74.0.3718.8',
'74.0.3720.4',
'74.0.3720.3',
'74.0.3718.7',
'74.0.3720.2',
'74.0.3720.1',
'74.0.3720.0',
'74.0.3718.6',
'74.0.3719.5',
'73.0.3683.59',
'74.0.3718.5',
'74.0.3718.4',
'74.0.3719.4',
'74.0.3719.3',
'74.0.3719.2',
'74.0.3719.1',
'73.0.3683.58',
'74.0.3719.0',
'73.0.3683.57',
'73.0.3683.56',
'74.0.3718.3',
'73.0.3683.55',
'74.0.3718.2',
'74.0.3718.1',
'74.0.3718.0',
'73.0.3683.54',
'74.0.3717.2',
'73.0.3683.53',
'74.0.3717.1',
'74.0.3717.0',
'73.0.3683.52',
'74.0.3716.1',
'74.0.3716.0',
'73.0.3683.51',
'74.0.3715.1',
'74.0.3715.0',
'73.0.3683.50',
'74.0.3711.2',
'74.0.3714.2',
'74.0.3713.3',
'74.0.3714.1',
'74.0.3714.0',
'73.0.3683.49',
'74.0.3713.1',
'74.0.3713.0',
'72.0.3626.120',
'73.0.3683.48',
'74.0.3712.2',
'74.0.3712.1',
'74.0.3712.0',
'73.0.3683.47',
'72.0.3626.119',
'73.0.3683.46',
'74.0.3710.2',
'72.0.3626.118',
'74.0.3711.1',
'74.0.3711.0',
'73.0.3683.45',
'72.0.3626.117',
'74.0.3710.1',
'74.0.3710.0',
'73.0.3683.44',
'72.0.3626.116',
'74.0.3709.1',
'74.0.3709.0',
'74.0.3704.9',
'73.0.3683.43',
'72.0.3626.115',
'74.0.3704.8',
'74.0.3704.7',
'74.0.3708.0',
'74.0.3706.7',
'74.0.3704.6',
'73.0.3683.42',
'72.0.3626.114',
'74.0.3706.6',
'72.0.3626.113',
'74.0.3704.5',
'74.0.3706.5',
'74.0.3706.4',
'74.0.3706.3',
'74.0.3706.2',
'74.0.3706.1',
'74.0.3706.0',
'73.0.3683.41',
'72.0.3626.112',
'74.0.3705.1',
'74.0.3705.0',
'73.0.3683.40',
'72.0.3626.111',
'73.0.3683.39',
'74.0.3704.4',
'73.0.3683.38',
'74.0.3704.3',
'74.0.3704.2',
'74.0.3704.1',
'74.0.3704.0',
'73.0.3683.37',
'72.0.3626.110',
'72.0.3626.109',
'74.0.3703.3',
'74.0.3703.2',
'73.0.3683.36',
'74.0.3703.1',
'74.0.3703.0',
'73.0.3683.35',
'72.0.3626.108',
'74.0.3702.2',
'74.0.3699.3',
'74.0.3702.1',
'74.0.3702.0',
'73.0.3683.34',
'72.0.3626.107',
'73.0.3683.33',
'74.0.3701.1',
'74.0.3701.0',
'73.0.3683.32',
'73.0.3683.31',
'72.0.3626.105',
'74.0.3700.1',
'74.0.3700.0',
'73.0.3683.29',
'72.0.3626.103',
'74.0.3699.2',
'74.0.3699.1',
'74.0.3699.0',
'73.0.3683.28',
'72.0.3626.102',
'73.0.3683.27',
'73.0.3683.26',
'74.0.3698.0',
'74.0.3696.2',
'72.0.3626.101',
'73.0.3683.25',
'74.0.3696.1',
'74.0.3696.0',
'74.0.3694.8',
'72.0.3626.100',
'74.0.3694.7',
'74.0.3694.6',
'74.0.3694.5',
'74.0.3694.4',
'72.0.3626.99',
'72.0.3626.98',
'74.0.3694.3',
'73.0.3683.24',
'72.0.3626.97',
'72.0.3626.96',
'72.0.3626.95',
'73.0.3683.23',
'72.0.3626.94',
'73.0.3683.22',
'73.0.3683.21',
'72.0.3626.93',
'74.0.3694.2',
'72.0.3626.92',
'74.0.3694.1',
'74.0.3694.0',
'74.0.3693.6',
'73.0.3683.20',
'72.0.3626.91',
'74.0.3693.5',
'74.0.3693.4',
'74.0.3693.3',
'74.0.3693.2',
'73.0.3683.19',
'74.0.3693.1',
'74.0.3693.0',
'73.0.3683.18',
'72.0.3626.90',
'74.0.3692.1',
'74.0.3692.0',
'73.0.3683.17',
'72.0.3626.89',
'74.0.3687.3',
'74.0.3691.1',
'74.0.3691.0',
'73.0.3683.16',
'72.0.3626.88',
'72.0.3626.87',
'73.0.3683.15',
'74.0.3690.1',
'74.0.3690.0',
'73.0.3683.14',
'72.0.3626.86',
'73.0.3683.13',
'73.0.3683.12',
'74.0.3689.1',
'74.0.3689.0',
'73.0.3683.11',
'72.0.3626.85',
'73.0.3683.10',
'72.0.3626.84',
'73.0.3683.9',
'74.0.3688.1',
'74.0.3688.0',
'73.0.3683.8',
'72.0.3626.83',
'74.0.3687.2',
'74.0.3687.1',
'74.0.3687.0',
'73.0.3683.7',
'72.0.3626.82',
'74.0.3686.4',
'72.0.3626.81',
'74.0.3686.3',
'74.0.3686.2',
'74.0.3686.1',
'74.0.3686.0',
'73.0.3683.6',
'72.0.3626.80',
'74.0.3685.1',
'74.0.3685.0',
'73.0.3683.5',
'72.0.3626.79',
'74.0.3684.1',
'74.0.3684.0',
'73.0.3683.4',
'72.0.3626.78',
'72.0.3626.77',
'73.0.3683.3',
'73.0.3683.2',
'72.0.3626.76',
'73.0.3683.1',
'73.0.3683.0',
'72.0.3626.75',
'71.0.3578.141',
'73.0.3682.1',
'73.0.3682.0',
'72.0.3626.74',
'71.0.3578.140',
'73.0.3681.4',
'73.0.3681.3',
'73.0.3681.2',
'73.0.3681.1',
'73.0.3681.0',
'72.0.3626.73',
'71.0.3578.139',
'72.0.3626.72',
'72.0.3626.71',
'73.0.3680.1',
'73.0.3680.0',
'72.0.3626.70',
'71.0.3578.138',
'73.0.3678.2',
'73.0.3679.1',
'73.0.3679.0',
'72.0.3626.69',
'71.0.3578.137',
'73.0.3678.1',
'73.0.3678.0',
'71.0.3578.136',
'73.0.3677.1',
'73.0.3677.0',
'72.0.3626.68',
'72.0.3626.67',
'71.0.3578.135',
'73.0.3676.1',
'73.0.3676.0',
'73.0.3674.2',
'72.0.3626.66',
'71.0.3578.134',
'73.0.3674.1',
'73.0.3674.0',
'72.0.3626.65',
'71.0.3578.133',
'73.0.3673.2',
'73.0.3673.1',
'73.0.3673.0',
'72.0.3626.64',
'71.0.3578.132',
'72.0.3626.63',
'72.0.3626.62',
'72.0.3626.61',
'72.0.3626.60',
'73.0.3672.1',
'73.0.3672.0',
'72.0.3626.59',
'71.0.3578.131',
'73.0.3671.3',
'73.0.3671.2',
'73.0.3671.1',
'73.0.3671.0',
'72.0.3626.58',
'71.0.3578.130',
'73.0.3670.1',
'73.0.3670.0',
'72.0.3626.57',
'71.0.3578.129',
'73.0.3669.1',
'73.0.3669.0',
'72.0.3626.56',
'71.0.3578.128',
'73.0.3668.2',
'73.0.3668.1',
'73.0.3668.0',
'72.0.3626.55',
'71.0.3578.127',
'73.0.3667.2',
'73.0.3667.1',
'73.0.3667.0',
'72.0.3626.54',
'71.0.3578.126',
'73.0.3666.1',
'73.0.3666.0',
'72.0.3626.53',
'71.0.3578.125',
'73.0.3665.4',
'73.0.3665.3',
'72.0.3626.52',
'73.0.3665.2',
'73.0.3664.4',
'73.0.3665.1',
'73.0.3665.0',
'72.0.3626.51',
'71.0.3578.124',
'72.0.3626.50',
'73.0.3664.3',
'73.0.3664.2',
'73.0.3664.1',
'73.0.3664.0',
'73.0.3663.2',
'72.0.3626.49',
'71.0.3578.123',
'73.0.3663.1',
'73.0.3663.0',
'72.0.3626.48',
'71.0.3578.122',
'73.0.3662.1',
'73.0.3662.0',
'72.0.3626.47',
'71.0.3578.121',
'73.0.3661.1',
'72.0.3626.46',
'73.0.3661.0',
'72.0.3626.45',
'71.0.3578.120',
'73.0.3660.2',
'73.0.3660.1',
'73.0.3660.0',
'72.0.3626.44',
'71.0.3578.119',
'73.0.3659.1',
'73.0.3659.0',
'72.0.3626.43',
'71.0.3578.118',
'73.0.3658.1',
'73.0.3658.0',
'72.0.3626.42',
'71.0.3578.117',
'73.0.3657.1',
'73.0.3657.0',
'72.0.3626.41',
'71.0.3578.116',
'73.0.3656.1',
'73.0.3656.0',
'72.0.3626.40',
'71.0.3578.115',
'73.0.3655.1',
'73.0.3655.0',
'72.0.3626.39',
'71.0.3578.114',
'73.0.3654.1',
'73.0.3654.0',
'72.0.3626.38',
'71.0.3578.113',
'73.0.3653.1',
'73.0.3653.0',
'72.0.3626.37',
'71.0.3578.112',
'73.0.3652.1',
'73.0.3652.0',
'72.0.3626.36',
'71.0.3578.111',
'73.0.3651.1',
'73.0.3651.0',
'72.0.3626.35',
'71.0.3578.110',
'73.0.3650.1',
'73.0.3650.0',
'72.0.3626.34',
'71.0.3578.109',
'73.0.3649.1',
'73.0.3649.0',
'72.0.3626.33',
'71.0.3578.108',
'73.0.3648.2',
'73.0.3648.1',
'73.0.3648.0',
'72.0.3626.32',
'71.0.3578.107',
'73.0.3647.2',
'73.0.3647.1',
'73.0.3647.0',
'72.0.3626.31',
'71.0.3578.106',
'73.0.3635.3',
'73.0.3646.2',
'73.0.3646.1',
'73.0.3646.0',
'72.0.3626.30',
'71.0.3578.105',
'72.0.3626.29',
'73.0.3645.2',
'73.0.3645.1',
'73.0.3645.0',
'72.0.3626.28',
'71.0.3578.104',
'72.0.3626.27',
'72.0.3626.26',
'72.0.3626.25',
'72.0.3626.24',
'73.0.3644.0',
'73.0.3643.2',
'72.0.3626.23',
'71.0.3578.103',
'73.0.3643.1',
'73.0.3643.0',
'72.0.3626.22',
'71.0.3578.102',
'73.0.3642.1',
'73.0.3642.0',
'72.0.3626.21',
'71.0.3578.101',
'73.0.3641.1',
'73.0.3641.0',
'72.0.3626.20',
'71.0.3578.100',
'72.0.3626.19',
'73.0.3640.1',
'73.0.3640.0',
'72.0.3626.18',
'73.0.3639.1',
'71.0.3578.99',
'73.0.3639.0',
'72.0.3626.17',
'73.0.3638.2',
'72.0.3626.16',
'73.0.3638.1',
'73.0.3638.0',
'72.0.3626.15',
'71.0.3578.98',
'73.0.3635.2',
'71.0.3578.97',
'73.0.3637.1',
'73.0.3637.0',
'72.0.3626.14',
'71.0.3578.96',
'71.0.3578.95',
'72.0.3626.13',
'71.0.3578.94',
'73.0.3636.2',
'71.0.3578.93',
'73.0.3636.1',
'73.0.3636.0',
'72.0.3626.12',
'71.0.3578.92',
'73.0.3635.1',
'73.0.3635.0',
'72.0.3626.11',
'71.0.3578.91',
'73.0.3634.2',
'73.0.3634.1',
'73.0.3634.0',
'72.0.3626.10',
'71.0.3578.90',
'71.0.3578.89',
'73.0.3633.2',
'73.0.3633.1',
'73.0.3633.0',
'72.0.3610.4',
'72.0.3626.9',
'71.0.3578.88',
'73.0.3632.5',
'73.0.3632.4',
'73.0.3632.3',
'73.0.3632.2',
'73.0.3632.1',
'73.0.3632.0',
'72.0.3626.8',
'71.0.3578.87',
'73.0.3631.2',
'73.0.3631.1',
'73.0.3631.0',
'72.0.3626.7',
'71.0.3578.86',
'72.0.3626.6',
'73.0.3630.1',
'73.0.3630.0',
'72.0.3626.5',
'71.0.3578.85',
'72.0.3626.4',
'73.0.3628.3',
'73.0.3628.2',
'73.0.3629.1',
'73.0.3629.0',
'72.0.3626.3',
'71.0.3578.84',
'73.0.3628.1',
'73.0.3628.0',
'71.0.3578.83',
'73.0.3627.1',
'73.0.3627.0',
'72.0.3626.2',
'71.0.3578.82',
'71.0.3578.81',
'71.0.3578.80',
'72.0.3626.1',
'72.0.3626.0',
'71.0.3578.79',
'70.0.3538.124',
'71.0.3578.78',
'72.0.3623.4',
'72.0.3625.2',
'72.0.3625.1',
'72.0.3625.0',
'71.0.3578.77',
'70.0.3538.123',
'72.0.3624.4',
'72.0.3624.3',
'72.0.3624.2',
'71.0.3578.76',
'72.0.3624.1',
'72.0.3624.0',
'72.0.3623.3',
'71.0.3578.75',
'70.0.3538.122',
'71.0.3578.74',
'72.0.3623.2',
'72.0.3610.3',
'72.0.3623.1',
'72.0.3623.0',
'72.0.3622.3',
'72.0.3622.2',
'71.0.3578.73',
'70.0.3538.121',
'72.0.3622.1',
'72.0.3622.0',
'71.0.3578.72',
'70.0.3538.120',
'72.0.3621.1',
'72.0.3621.0',
'71.0.3578.71',
'70.0.3538.119',
'72.0.3620.1',
'72.0.3620.0',
'71.0.3578.70',
'70.0.3538.118',
'71.0.3578.69',
'72.0.3619.1',
'72.0.3619.0',
'71.0.3578.68',
'70.0.3538.117',
'71.0.3578.67',
'72.0.3618.1',
'72.0.3618.0',
'71.0.3578.66',
'70.0.3538.116',
'72.0.3617.1',
'72.0.3617.0',
'71.0.3578.65',
'70.0.3538.115',
'72.0.3602.3',
'71.0.3578.64',
'72.0.3616.1',
'72.0.3616.0',
'71.0.3578.63',
'70.0.3538.114',
'71.0.3578.62',
'72.0.3615.1',
'72.0.3615.0',
'71.0.3578.61',
'70.0.3538.113',
'72.0.3614.1',
'72.0.3614.0',
'71.0.3578.60',
'70.0.3538.112',
'72.0.3613.1',
'72.0.3613.0',
'71.0.3578.59',
'70.0.3538.111',
'72.0.3612.2',
'72.0.3612.1',
'72.0.3612.0',
'70.0.3538.110',
'71.0.3578.58',
'70.0.3538.109',
'72.0.3611.2',
'72.0.3611.1',
'72.0.3611.0',
'71.0.3578.57',
'70.0.3538.108',
'72.0.3610.2',
'71.0.3578.56',
'71.0.3578.55',
'72.0.3610.1',
'72.0.3610.0',
'71.0.3578.54',
'70.0.3538.107',
'71.0.3578.53',
'72.0.3609.3',
'71.0.3578.52',
'72.0.3609.2',
'71.0.3578.51',
'72.0.3608.5',
'72.0.3609.1',
'72.0.3609.0',
'71.0.3578.50',
'70.0.3538.106',
'72.0.3608.4',
'72.0.3608.3',
'72.0.3608.2',
'71.0.3578.49',
'72.0.3608.1',
'72.0.3608.0',
'70.0.3538.105',
'71.0.3578.48',
'72.0.3607.1',
'72.0.3607.0',
'71.0.3578.47',
'70.0.3538.104',
'72.0.3606.2',
'72.0.3606.1',
'72.0.3606.0',
'71.0.3578.46',
'70.0.3538.103',
'70.0.3538.102',
'72.0.3605.3',
'72.0.3605.2',
'72.0.3605.1',
'72.0.3605.0',
'71.0.3578.45',
'70.0.3538.101',
'71.0.3578.44',
'71.0.3578.43',
'70.0.3538.100',
'70.0.3538.99',
'71.0.3578.42',
'72.0.3604.1',
'72.0.3604.0',
'71.0.3578.41',
'70.0.3538.98',
'71.0.3578.40',
'72.0.3603.2',
'72.0.3603.1',
'72.0.3603.0',
'71.0.3578.39',
'70.0.3538.97',
'72.0.3602.2',
'71.0.3578.38',
'71.0.3578.37',
'72.0.3602.1',
'72.0.3602.0',
'71.0.3578.36',
'70.0.3538.96',
'72.0.3601.1',
'72.0.3601.0',
'71.0.3578.35',
'70.0.3538.95',
'72.0.3600.1',
'72.0.3600.0',
'71.0.3578.34',
'70.0.3538.94',
'72.0.3599.3',
'72.0.3599.2',
'72.0.3599.1',
'72.0.3599.0',
'71.0.3578.33',
'70.0.3538.93',
'72.0.3598.1',
'72.0.3598.0',
'71.0.3578.32',
'70.0.3538.87',
'72.0.3597.1',
'72.0.3597.0',
'72.0.3596.2',
'71.0.3578.31',
'70.0.3538.86',
'71.0.3578.30',
'71.0.3578.29',
'72.0.3596.1',
'72.0.3596.0',
'71.0.3578.28',
'70.0.3538.85',
'72.0.3595.2',
'72.0.3591.3',
'72.0.3595.1',
'72.0.3595.0',
'71.0.3578.27',
'70.0.3538.84',
'72.0.3594.1',
'72.0.3594.0',
'71.0.3578.26',
'70.0.3538.83',
'72.0.3593.2',
'72.0.3593.1',
'72.0.3593.0',
'71.0.3578.25',
'70.0.3538.82',
'72.0.3589.3',
'72.0.3592.2',
'72.0.3592.1',
'72.0.3592.0',
'71.0.3578.24',
'72.0.3589.2',
'70.0.3538.81',
'70.0.3538.80',
'72.0.3591.2',
'72.0.3591.1',
'72.0.3591.0',
'71.0.3578.23',
'70.0.3538.79',
'71.0.3578.22',
'72.0.3590.1',
'72.0.3590.0',
'71.0.3578.21',
'70.0.3538.78',
'70.0.3538.77',
'72.0.3589.1',
'72.0.3589.0',
'71.0.3578.20',
'70.0.3538.76',
'71.0.3578.19',
'70.0.3538.75',
'72.0.3588.1',
'72.0.3588.0',
'71.0.3578.18',
'70.0.3538.74',
'72.0.3586.2',
'72.0.3587.0',
'71.0.3578.17',
'70.0.3538.73',
'72.0.3586.1',
'72.0.3586.0',
'71.0.3578.16',
'70.0.3538.72',
'72.0.3585.1',
'72.0.3585.0',
'71.0.3578.15',
'70.0.3538.71',
'71.0.3578.14',
'72.0.3584.1',
'72.0.3584.0',
'71.0.3578.13',
'70.0.3538.70',
'72.0.3583.2',
'71.0.3578.12',
'72.0.3583.1',
'72.0.3583.0',
'71.0.3578.11',
'70.0.3538.69',
'71.0.3578.10',
'72.0.3582.0',
'72.0.3581.4',
'71.0.3578.9',
'70.0.3538.67',
'72.0.3581.3',
'72.0.3581.2',
'72.0.3581.1',
'72.0.3581.0',
'71.0.3578.8',
'70.0.3538.66',
'72.0.3580.1',
'72.0.3580.0',
'71.0.3578.7',
'70.0.3538.65',
'71.0.3578.6',
'72.0.3579.1',
'72.0.3579.0',
'71.0.3578.5',
'70.0.3538.64',
'71.0.3578.4',
'71.0.3578.3',
'71.0.3578.2',
'71.0.3578.1',
'71.0.3578.0',
'70.0.3538.63',
'69.0.3497.128',
'70.0.3538.62',
'70.0.3538.61',
'70.0.3538.60',
'70.0.3538.59',
'71.0.3577.1',
'71.0.3577.0',
'70.0.3538.58',
'69.0.3497.127',
'71.0.3576.2',
'71.0.3576.1',
'71.0.3576.0',
'70.0.3538.57',
'70.0.3538.56',
'71.0.3575.2',
'70.0.3538.55',
'69.0.3497.126',
'70.0.3538.54',
'71.0.3575.1',
'71.0.3575.0',
'71.0.3574.1',
'71.0.3574.0',
'70.0.3538.53',
'69.0.3497.125',
'70.0.3538.52',
'71.0.3573.1',
'71.0.3573.0',
'70.0.3538.51',
'69.0.3497.124',
'71.0.3572.1',
'71.0.3572.0',
'70.0.3538.50',
'69.0.3497.123',
'71.0.3571.2',
'70.0.3538.49',
'69.0.3497.122',
'71.0.3571.1',
'71.0.3571.0',
'70.0.3538.48',
'69.0.3497.121',
'71.0.3570.1',
'71.0.3570.0',
'70.0.3538.47',
'69.0.3497.120',
'71.0.3568.2',
'71.0.3569.1',
'71.0.3569.0',
'70.0.3538.46',
'69.0.3497.119',
'70.0.3538.45',
'71.0.3568.1',
'71.0.3568.0',
'70.0.3538.44',
'69.0.3497.118',
'70.0.3538.43',
'70.0.3538.42',
'71.0.3567.1',
'71.0.3567.0',
'70.0.3538.41',
'69.0.3497.117',
'71.0.3566.1',
'71.0.3566.0',
'70.0.3538.40',
'69.0.3497.116',
'71.0.3565.1',
'71.0.3565.0',
'70.0.3538.39',
'69.0.3497.115',
'71.0.3564.1',
'71.0.3564.0',
'70.0.3538.38',
'69.0.3497.114',
'71.0.3563.0',
'71.0.3562.2',
'70.0.3538.37',
'69.0.3497.113',
'70.0.3538.36',
'70.0.3538.35',
'71.0.3562.1',
'71.0.3562.0',
'70.0.3538.34',
'69.0.3497.112',
'70.0.3538.33',
'71.0.3561.1',
'71.0.3561.0',
'70.0.3538.32',
'69.0.3497.111',
'71.0.3559.6',
'71.0.3560.1',
'71.0.3560.0',
'71.0.3559.5',
'71.0.3559.4',
'70.0.3538.31',
'69.0.3497.110',
'71.0.3559.3',
'70.0.3538.30',
'69.0.3497.109',
'71.0.3559.2',
'71.0.3559.1',
'71.0.3559.0',
'70.0.3538.29',
'69.0.3497.108',
'71.0.3558.2',
'71.0.3558.1',
'71.0.3558.0',
'70.0.3538.28',
'69.0.3497.107',
'71.0.3557.2',
'71.0.3557.1',
'71.0.3557.0',
'70.0.3538.27',
'69.0.3497.106',
'71.0.3554.4',
'70.0.3538.26',
'71.0.3556.1',
'71.0.3556.0',
'70.0.3538.25',
'71.0.3554.3',
'69.0.3497.105',
'71.0.3554.2',
'70.0.3538.24',
'69.0.3497.104',
'71.0.3555.2',
'70.0.3538.23',
'71.0.3555.1',
'71.0.3555.0',
'70.0.3538.22',
'69.0.3497.103',
'71.0.3554.1',
'71.0.3554.0',
'70.0.3538.21',
'69.0.3497.102',
'71.0.3553.3',
'70.0.3538.20',
'69.0.3497.101',
'71.0.3553.2',
'69.0.3497.100',
'71.0.3553.1',
'71.0.3553.0',
'70.0.3538.19',
'69.0.3497.99',
'69.0.3497.98',
'69.0.3497.97',
'71.0.3552.6',
'71.0.3552.5',
'71.0.3552.4',
'71.0.3552.3',
'71.0.3552.2',
'71.0.3552.1',
'71.0.3552.0',
'70.0.3538.18',
'69.0.3497.96',
'71.0.3551.3',
'71.0.3551.2',
'71.0.3551.1',
'71.0.3551.0',
'70.0.3538.17',
'69.0.3497.95',
'71.0.3550.3',
'71.0.3550.2',
'71.0.3550.1',
'71.0.3550.0',
'70.0.3538.16',
'69.0.3497.94',
'71.0.3549.1',
'71.0.3549.0',
'70.0.3538.15',
'69.0.3497.93',
'69.0.3497.92',
'71.0.3548.1',
'71.0.3548.0',
'70.0.3538.14',
'69.0.3497.91',
'71.0.3547.1',
'71.0.3547.0',
'70.0.3538.13',
'69.0.3497.90',
'71.0.3546.2',
'69.0.3497.89',
'71.0.3546.1',
'71.0.3546.0',
'70.0.3538.12',
'69.0.3497.88',
'71.0.3545.4',
'71.0.3545.3',
'71.0.3545.2',
'71.0.3545.1',
'71.0.3545.0',
'70.0.3538.11',
'69.0.3497.87',
'71.0.3544.5',
'71.0.3544.4',
'71.0.3544.3',
'71.0.3544.2',
'71.0.3544.1',
'71.0.3544.0',
'69.0.3497.86',
'70.0.3538.10',
'69.0.3497.85',
'70.0.3538.9',
'69.0.3497.84',
'71.0.3543.4',
'70.0.3538.8',
'71.0.3543.3',
'71.0.3543.2',
'71.0.3543.1',
'71.0.3543.0',
'70.0.3538.7',
'69.0.3497.83',
'71.0.3542.2',
'71.0.3542.1',
'71.0.3542.0',
'70.0.3538.6',
'69.0.3497.82',
'69.0.3497.81',
'71.0.3541.1',
'71.0.3541.0',
'70.0.3538.5',
'69.0.3497.80',
'71.0.3540.1',
'71.0.3540.0',
'70.0.3538.4',
'69.0.3497.79',
'70.0.3538.3',
'71.0.3539.1',
'71.0.3539.0',
'69.0.3497.78',
'68.0.3440.134',
'69.0.3497.77',
'70.0.3538.2',
'70.0.3538.1',
'70.0.3538.0',
'69.0.3497.76',
'68.0.3440.133',
'69.0.3497.75',
'70.0.3537.2',
'70.0.3537.1',
'70.0.3537.0',
'69.0.3497.74',
'68.0.3440.132',
'70.0.3536.0',
'70.0.3535.5',
'70.0.3535.4',
'70.0.3535.3',
'69.0.3497.73',
'68.0.3440.131',
'70.0.3532.8',
'70.0.3532.7',
'69.0.3497.72',
'69.0.3497.71',
'70.0.3535.2',
'70.0.3535.1',
'70.0.3535.0',
'69.0.3497.70',
'68.0.3440.130',
'69.0.3497.69',
'68.0.3440.129',
'70.0.3534.4',
'70.0.3534.3',
'70.0.3534.2',
'70.0.3534.1',
'70.0.3534.0',
'69.0.3497.68',
'68.0.3440.128',
'70.0.3533.2',
'70.0.3533.1',
'70.0.3533.0',
'69.0.3497.67',
'68.0.3440.127',
'70.0.3532.6',
'70.0.3532.5',
'70.0.3532.4',
'69.0.3497.66',
'68.0.3440.126',
'70.0.3532.3',
'70.0.3532.2',
'70.0.3532.1',
'69.0.3497.60',
'69.0.3497.65',
'69.0.3497.64',
'70.0.3532.0',
'70.0.3531.0',
'70.0.3530.4',
'70.0.3530.3',
'70.0.3530.2',
'69.0.3497.58',
'68.0.3440.125',
'69.0.3497.57',
'69.0.3497.56',
'69.0.3497.55',
'69.0.3497.54',
'70.0.3530.1',
'70.0.3530.0',
'69.0.3497.53',
'68.0.3440.124',
'69.0.3497.52',
'70.0.3529.3',
'70.0.3529.2',
'70.0.3529.1',
'70.0.3529.0',
'69.0.3497.51',
'70.0.3528.4',
'68.0.3440.123',
'70.0.3528.3',
'70.0.3528.2',
'70.0.3528.1',
'70.0.3528.0',
'69.0.3497.50',
'68.0.3440.122',
'70.0.3527.1',
'70.0.3527.0',
'69.0.3497.49',
'68.0.3440.121',
'70.0.3526.1',
'70.0.3526.0',
'68.0.3440.120',
'69.0.3497.48',
'69.0.3497.47',
'68.0.3440.119',
'68.0.3440.118',
'70.0.3525.5',
'70.0.3525.4',
'70.0.3525.3',
'68.0.3440.117',
'69.0.3497.46',
'70.0.3525.2',
'70.0.3525.1',
'70.0.3525.0',
'69.0.3497.45',
'68.0.3440.116',
'70.0.3524.4',
'70.0.3524.3',
'69.0.3497.44',
'70.0.3524.2',
'70.0.3524.1',
'70.0.3524.0',
'70.0.3523.2',
'69.0.3497.43',
'68.0.3440.115',
'70.0.3505.9',
'69.0.3497.42',
'70.0.3505.8',
'70.0.3523.1',
'70.0.3523.0',
'69.0.3497.41',
'68.0.3440.114',
'70.0.3505.7',
'69.0.3497.40',
'70.0.3522.1',
'70.0.3522.0',
'70.0.3521.2',
'69.0.3497.39',
'68.0.3440.113',
'70.0.3505.6',
'70.0.3521.1',
'70.0.3521.0',
'69.0.3497.38',
'68.0.3440.112',
'70.0.3520.1',
'70.0.3520.0',
'69.0.3497.37',
'68.0.3440.111',
'70.0.3519.3',
'70.0.3519.2',
'70.0.3519.1',
'70.0.3519.0',
'69.0.3497.36',
'68.0.3440.110',
'70.0.3518.1',
'70.0.3518.0',
'69.0.3497.35',
'69.0.3497.34',
'68.0.3440.109',
'70.0.3517.1',
'70.0.3517.0',
'69.0.3497.33',
'68.0.3440.108',
'69.0.3497.32',
'70.0.3516.3',
'70.0.3516.2',
'70.0.3516.1',
'70.0.3516.0',
'69.0.3497.31',
'68.0.3440.107',
'70.0.3515.4',
'68.0.3440.106',
'70.0.3515.3',
'70.0.3515.2',
'70.0.3515.1',
'70.0.3515.0',
'69.0.3497.30',
'68.0.3440.105',
'68.0.3440.104',
'70.0.3514.2',
'70.0.3514.1',
'70.0.3514.0',
'69.0.3497.29',
'68.0.3440.103',
'70.0.3513.1',
'70.0.3513.0',
'69.0.3497.28',
)
@classmethod
def _extract_urls(cls, webpage):
return re.findall(
r'<iframe[^>]+src=["\']((?:https?://)?%s/%s/[a-zA-Z0-9-_]+)'
% (cls._DOMAINS, cls._EMBED_WORD), webpage)
def _extract_decrypted_page(self, page_url, webpage, video_id, headers):
phantom = PhantomJSwrapper(self, required_version='2.0')
webpage, _ = phantom.get(page_url, html=webpage, video_id=video_id, headers=headers)
return webpage
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
host = mobj.group('host')
video_id = mobj.group('id')
url_pattern = 'https://%s/%%s/%s/' % (host, video_id)
headers = {
'User-Agent': self._USER_AGENT_TPL % random.choice(self._CHROME_VERSIONS),
}
for path in (self._EMBED_WORD, self._STREAM_WORD):
page_url = url_pattern % path
last = path == self._STREAM_WORD
webpage = self._download_webpage(
page_url, video_id, 'Downloading %s webpage' % path,
headers=headers, fatal=last)
if not webpage:
continue
if 'File not found' in webpage or 'deleted by the owner' in webpage:
if not last:
continue
raise ExtractorError('File not found', expected=True, video_id=video_id)
break
webpage = self._extract_decrypted_page(page_url, webpage, video_id, headers)
for element_id in self._URL_IDS:
decoded_id = get_element_by_id(element_id, webpage)
if decoded_id:
break
if not decoded_id:
decoded_id = self._search_regex(
(r'>\s*([\w-]+~\d{10,}~\d+\.\d+\.0\.0~[\w-]+)\s*<',
r'>\s*([\w~-]+~\d+\.\d+\.\d+\.\d+~[\w~-]+)',
r'>\s*([\w-]+~\d{10,}~(?:[a-f\d]+:){2}:~[\w-]+)\s*<',
r'>\s*([\w~-]+~[a-f0-9:]+~[\w~-]+)\s*<',
r'>\s*([\w~-]+~[a-f0-9:]+~[\w~-]+)'), webpage,
'stream URL')
video_url = 'https://%s/%s/%s?mime=true' % (host, self._REDIR_WORD, decoded_id)
title = self._og_search_title(webpage, default=None) or self._search_regex(
r'<span[^>]+class=["\']title["\'][^>]*>([^<]+)', webpage,
'title', default=None) or self._html_search_meta(
'description', webpage, 'title', fatal=True)
entries = self._parse_html5_media_entries(page_url, webpage, video_id)
entry = entries[0] if entries else {}
subtitles = entry.get('subtitles')
return {
'id': video_id,
'title': title,
'thumbnail': entry.get('thumbnail') or self._og_search_thumbnail(webpage, default=None),
'url': video_url,
'ext': determine_ext(title, None) or determine_ext(url, 'mp4'),
'subtitles': subtitles,
'http_headers': headers,
}
class VerystreamIE(OpenloadIE):
IE_NAME = 'verystream'
_DOMAINS = r'(?:verystream\.com)'
_VALID_URL = r'''(?x)
https?://
(?P<host>
(?:www\.)?
%s
)/
(?:stream|e)/
(?P<id>[a-zA-Z0-9-_]+)
''' % _DOMAINS
_EMBED_WORD = 'e'
_STREAM_WORD = 'stream'
_REDIR_WORD = '<PASSWORD>'
_URL_IDS = ('videolink', )
_TESTS = [{
'url': 'https://verystream.com/stream/c1GWQ9ngBBx/',
'md5': 'd3e8c5628ccb9970b65fd65269886795',
'info_dict': {
'id': 'c1GWQ9ngBBx',
'ext': 'mp4',
'title': 'Big Buck Bunny.mp4',
'thumbnail': r're:^https?://.*\.jpg$',
},
}, {
'url': 'https://verystream.com/e/c1GWQ9ngBBx/',
'only_matching': True,
}]
def _extract_decrypted_page(self, page_url, webpage, video_id, headers):
return webpage # for Verystream, the webpage is already decrypted
```
#### File: youtube_dl/extractor/packtpub.py
```python
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_HTTPError,
)
from ..utils import (
clean_html,
ExtractorError,
remove_end,
strip_or_none,
unified_timestamp,
urljoin,
)
class PacktPubBaseIE(InfoExtractor):
_PACKT_BASE = 'https://www.packtpub.com'
_MAPT_REST = '%s/mapt-rest' % _PACKT_BASE
class PacktPubIE(PacktPubBaseIE):
_VALID_URL = r'https?://(?:(?:www\.)?packtpub\.com/mapt|subscription\.packtpub\.com)/video/[^/]+/(?P<course_id>\d+)/(?P<chapter_id>\d+)/(?P<id>\d+)'
_TESTS = [{
'url': 'https://www.packtpub.com/mapt/video/web-development/9781787122215/20528/20530/Project+Intro',
'md5': '1e74bd6cfd45d7d07666f4684ef58f70',
'info_dict': {
'id': '20530',
'ext': 'mp4',
'title': 'Project Intro',
'thumbnail': r're:(?i)^https?://.*\.jpg',
'timestamp': 1490918400,
'upload_date': '20170331',
},
}, {
'url': 'https://subscription.packtpub.com/video/web_development/9781787122215/20528/20530/project-intro',
'only_matching': True,
}]
_NETRC_MACHINE = 'packtpub'
_TOKEN = None
def _real_initialize(self):
username, password = self._get_login_info()
if username is None:
return
try:
self._TOKEN = self._download_json(
self._MAPT_REST + '/users/tokens', None,
'Downloading Authorization Token', data=json.dumps({
'email': username,
'password': password,
}).encode())['data']['access']
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code in (400, 401, 404):
message = self._parse_json(e.cause.read().decode(), None)['message']
raise ExtractorError(message, expected=True)
raise
def _handle_error(self, response):
if response.get('status') != 'success':
raise ExtractorError(
'% said: %s' % (self.IE_NAME, response['message']),
expected=True)
def _download_json(self, *args, **kwargs):
response = super(PacktPubIE, self)._download_json(*args, **kwargs)
self._handle_error(response)
return response
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
course_id, chapter_id, video_id = mobj.group(
'course_id', 'chapter_id', 'id')
headers = {}
if self._TOKEN:
headers['Authorization'] = 'Bearer ' + self._TOKEN
video = self._download_json(
'%s/users/me/products/%s/chapters/%s/sections/%s'
% (self._MAPT_REST, course_id, chapter_id, video_id), video_id,
'Downloading JSON video', headers=headers)['data']
content = video.get('content')
if not content:
self.raise_login_required('This video is locked')
video_url = content['file']
metadata = self._download_json(
'%s/products/%s/chapters/%s/sections/%s/metadata'
% (self._MAPT_REST, course_id, chapter_id, video_id),
video_id)['data']
title = metadata['pageTitle']
course_title = metadata.get('title')
if course_title:
title = remove_end(title, ' - %s' % course_title)
timestamp = unified_timestamp(metadata.get('publicationDate'))
thumbnail = urljoin(self._PACKT_BASE, metadata.get('filepath'))
return {
'id': video_id,
'url': video_url,
'title': title,
'thumbnail': thumbnail,
'timestamp': timestamp,
}
class PacktPubCourseIE(PacktPubBaseIE):
_VALID_URL = r'(?P<url>https?://(?:(?:www\.)?packtpub\.com/mapt|subscription\.packtpub\.com)/video/[^/]+/(?P<id>\d+))'
_TESTS = [{
'url': 'https://www.packtpub.com/mapt/video/web-development/9781787122215',
'info_dict': {
'id': '9781787122215',
'title': 'Learn Nodejs by building 12 projects [Video]',
},
'playlist_count': 90,
}, {
'url': 'https://subscription.packtpub.com/video/web_development/9781787122215',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return False if PacktPubIE.suitable(url) else super(
PacktPubCourseIE, cls).suitable(url)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
url, course_id = mobj.group('url', 'id')
course = self._download_json(
'%s/products/%s/metadata' % (self._MAPT_REST, course_id),
course_id)['data']
entries = []
for chapter_num, chapter in enumerate(course['tableOfContents'], 1):
if chapter.get('type') != 'chapter':
continue
children = chapter.get('children')
if not isinstance(children, list):
continue
chapter_info = {
'chapter': chapter.get('title'),
'chapter_number': chapter_num,
'chapter_id': chapter.get('id'),
}
for section in children:
if section.get('type') != 'section':
continue
section_url = section.get('seoUrl')
if not isinstance(section_url, compat_str):
continue
entry = {
'_type': 'url_transparent',
'url': urljoin(url + '/', section_url),
'title': strip_or_none(section.get('title')),
'description': clean_html(section.get('summary')),
'ie_key': PacktPubIE.ie_key(),
}
entry.update(chapter_info)
entries.append(entry)
return self.playlist_result(entries, course_id, course.get('title'))
``` |
{
"source": "joyent/python-manta",
"score": 2
} |
#### File: python-manta/test/common.py
```python
from __future__ import absolute_import
__all__ = ["stor", "MantaTestCase"]
import sys
import os
from posixpath import join as ujoin
import unittest
import subprocess
from subprocess import PIPE
import manta
#---- exports
def stor(*subpaths):
MANTA_USER = os.environ['MANTA_USER']
if not subpaths:
return '/%s/stor' % MANTA_USER
subpath = ujoin(*subpaths)
if subpath.startswith("/"):
subpath = subpath[1:]
return "/%s/stor/%s" % (MANTA_USER, subpath)
class MantaTestCase(unittest.TestCase):
def __init__(self, *args):
self.account = os.environ["MANTA_USER"]
self.subuser = os.environ.get("MANTA_SUBUSER", None)
self.role = os.environ.get("MANTA_ROLE", None)
unittest.TestCase.__init__(self, *args)
_client = None
def get_client(self):
MANTA_URL = os.environ['MANTA_URL']
MANTA_KEY_ID = os.environ['MANTA_KEY_ID']
MANTA_TLS_INSECURE = bool(os.environ.get('MANTA_TLS_INSECURE', False))
if not self._client:
signer = manta.CLISigner(key_id=MANTA_KEY_ID)
self._client = manta.MantaClient(
url=MANTA_URL,
account=self.account,
subuser=self.subuser,
role=self.role,
signer=signer,
# Uncomment this for verbose client output for test run.
#verbose=True,
disable_ssl_certificate_validation=MANTA_TLS_INSECURE)
return self._client
def mantash(self, args):
mantash = os.path.realpath(os.path.join(
os.path.dirname(__file__), "..", "bin", "mantash"))
argv = [sys.executable, mantash]
MANTA_INSECURE = bool(os.environ.get('MANTA_INSECURE', False))
if MANTA_INSECURE:
argv.append('-k')
argv += args
p = subprocess.Popen(argv,
shell=False,
stdout=PIPE,
stderr=PIPE,
close_fds=True)
p.wait()
stdout = p.stdout.read()
stderr = p.stderr.read()
code = p.returncode
return code, stdout, stderr
``` |
{
"source": "joyer7/CDSW-Demos-KOLON",
"score": 3
} |
#### File: CDSW-Demos-KOLON/Example06-ds-for-telco/predict_churn_pyspark.py
```python
from pyspark.sql import SparkSession
from pyspark.sql.types import *
from pyspark.ml import PipelineModel
spark = SparkSession.builder \
.appName("Telco Customer Churn") \
.master("local[*]") \
.getOrCreate()
model = PipelineModel.load("file:///home/cdsw/models/spark")
# model = PipelineModel.load("hdfs:///user/bmoran/models/telco")
features = ["intl_plan", "account_length", "number_vmail_messages", "total_day_calls",
"total_day_charge", "total_eve_calls", "total_eve_charge",
"total_night_calls", "total_night_charge", "total_intl_calls",
"total_intl_charge","number_customer_service_calls"]
def predict(args):
account=args["feature"].split(",")
feature = spark.createDataFrame([account[:1] + list(map(float,account[1:12]))], features)
result=model.transform(feature).collect()[0].prediction
return {"result" : result}
``` |
{
"source": "joyfeel/leetcode",
"score": 3
} |
#### File: leetcode-algorithms/027. Remove Element/solution.py
```python
class Solution:
def removeElement(self, nums: List[int], val: int) -> int:
index = 0
nums_length = range(len(nums))
for i in nums_length:
if nums[i] != val:
nums[index] = nums[i]
index += 1
# nums = nums[0: index]
return index
```
#### File: leetcode-algorithms/066. Plus One/solution.py
```python
class Solution:
def plusOne(self, digits: List[int]) -> List[int]:
length = len(digits)
for i in reversed(range(length)):
if digits[i] == 9:
digits[i] = 0
else:
digits[i] += 1
return digits
digits[0] = 1
digits.append(0)
return digits
```
#### File: leetcode-algorithms/709. To Lower Case/solution.py
```python
class Solution:
def toLowerCase(self, str: str) -> str:
return(str.lower())
``` |
{
"source": "joyfuI/vlive",
"score": 2
} |
#### File: joyfuI/vlive/main.py
```python
from typing import Optional
import traceback
import os
import sqlite3
import time
import re
from threading import Thread
from flask import render_template, jsonify
import requests
from framework import path_data, scheduler, app, db, celery
from framework.common.plugin import LogicModuleBase, default_route_socketio
from .plugin import P
from .logic_queue import LogicQueue
from .model import ModelScheduler
from .api_youtube_dl import APIYoutubeDL
logger = P.logger
package_name = P.package_name
ModelSetting = P.ModelSetting
class LogicMain(LogicModuleBase):
db_default = {
'db_version': '2',
f'{package_name}_interval': '* * * * *',
f'{package_name}_auto_start': 'False',
'default_save_path': os.path.join(path_data, 'download', package_name),
'default_filename': '%(title)s.%(id)s.%(ext)s',
'cookiefile_path': ''
}
def __init__(self, p):
super(LogicMain, self).__init__(p, None, scheduler_desc='V LIVE ์๋ก์ด ์์ ๋ค์ด๋ก๋')
self.name = package_name # ๋ชจ๋๋ช
default_route_socketio(p, self)
def plugin_load(self):
try:
LogicQueue.queue_load()
except Exception as e:
logger.error('Exception:%s', e)
logger.error(traceback.format_exc())
def process_menu(self, sub, req):
try:
arg = {
'package_name': package_name,
'sub': package_name,
'template_name': f'{package_name}_{sub}'
}
if sub == 'setting':
arg.update(ModelSetting.to_dict())
job_id = f'{self.P.package_name}_{self.name}'
arg['scheduler'] = str(scheduler.is_include(job_id))
arg['is_running'] = str(scheduler.is_running(job_id))
arg['path_data'] = path_data
elif sub == 'recent':
arg['url'] = req.args.get('url', '')
arg['recent_html'] = LogicMain.get_recent_html()
arg['save_path'] = ModelSetting.get('default_save_path')
arg['filename'] = ModelSetting.get('default_filename')
elif sub == 'scheduler':
arg['save_path'] = ModelSetting.get('default_save_path')
arg['filename'] = ModelSetting.get('default_filename')
return render_template(f'{package_name}_{sub}.html', arg=arg)
except Exception as e:
logger.error('Exception:%s', e)
logger.error(traceback.format_exc())
return render_template('sample.html', title=f'{package_name} - {sub}')
def process_ajax(self, sub, req):
try:
logger.debug('AJAX: %s, %s', sub, req.values)
ret = {'ret': 'success'}
if sub == 'add_download':
ret['msg'] = f'{LogicMain.download(req.form)}๊ฐ๋ฅผ ํ์ ์ถ๊ฐํ์์ต๋๋ค.'
elif sub == 'list_scheduler':
ret['data'] = LogicMain.get_scheduler()
elif sub == 'add_scheduler':
if LogicMain.add_scheduler(req.form):
ret['msg'] = '์ค์ผ์ค์ ์ ์ฅํ์์ต๋๋ค.'
else:
ret['ret'] = 'warning'
ret['msg'] = 'V LIVE ์ฑ๋์ ๋ถ์ํ์ง ๋ชปํ์ต๋๋ค.'
elif sub == 'del_scheduler':
LogicMain.del_scheduler(req.form['id'])
ret['msg'] = '์ญ์ ํ์์ต๋๋ค.'
return jsonify(ret)
except Exception as e:
logger.error('Exception:%s', e)
logger.error(traceback.format_exc())
return jsonify({'ret': 'danger', 'msg': str(e)})
def scheduler_function(self):
if app.config['config']['use_celery']:
result = LogicMain.task.apply_async()
result.get()
else:
LogicMain.task()
def migration(self):
try:
db_version = ModelSetting.get_int('db_version')
connect = sqlite3.connect(os.path.join(path_data, 'db', f'{package_name}.db'))
if db_version < 2:
cursor = connect.cursor()
cursor.execute(f"SELECT * FROM {package_name}_setting WHERE key = 'interval'")
interval = cursor.fetchone()[2]
cursor.execute(f"UPDATE {package_name}_setting SET value = ? WHERE key = '{package_name}_interval'",
(interval,))
cursor.execute(f"DELETE FROM {package_name}_setting WHERE key = 'interval'")
cursor.execute(f"SELECT * FROM {package_name}_setting WHERE key = 'auto_start'")
auto_start = cursor.fetchone()[2]
cursor.execute(f"UPDATE {package_name}_setting SET value = ? WHERE key = '{package_name}_auto_start'",
(auto_start,))
cursor.execute(f"DELETE FROM {package_name}_setting WHERE key = 'auto_start'")
connect.commit()
connect.close()
ModelSetting.set('db_version', LogicMain.db_default['db_version'])
db.session.flush()
except Exception as e:
logger.error('Exception:%s', e)
logger.error(traceback.format_exc())
@staticmethod
@celery.task
def task():
try:
for entity in ModelScheduler.get_list():
if not entity.is_live:
continue
logger.debug('scheduler download %s', entity.url)
video_url = LogicMain.get_first_live_video(entity.url) # ์ฒซ๋ฒ์งธ ์์
if video_url is None or video_url in LogicMain.download_list:
continue
download = APIYoutubeDL.download(package_name, entity.key, video_url, filename=entity.filename,
save_path=entity.save_path, start=True,
cookiefile=ModelSetting.get('cookiefile_path'))
entity.update(LogicMain.get_count_video(entity.url)) # ์์
if download['errorCode'] == 0:
LogicMain.download_list.add(video_url)
Thread(target=LogicMain.download_check_function,
args=(video_url, download['index'], entity.key)).start()
entity.update()
else:
logger.debug('scheduler download fail %s', download['errorCode'])
except Exception as e:
logger.error('Exception:%s', e)
logger.error(traceback.format_exc())
download_list = set()
@staticmethod
def download_check_function(url: str, index: int, key: str):
time.sleep(10) # 10์ด ๋๊ธฐ
status = APIYoutubeDL.status(package_name, index, key)
if status['status'] == 'ERROR':
LogicMain.download_list.remove(url)
@staticmethod
def download(form) -> int:
options = {
'save_path': form['save_path'],
'filename': form['filename'],
}
for i in form.getlist('download[]'):
LogicQueue.add_queue(i, options)
return len(form.getlist('download[]'))
@staticmethod
def get_scheduler() -> list:
scheduler_list = []
for i in ModelScheduler.get_list(True):
i['last_time'] = i['last_time'].strftime('%m-%d %H:%M:%S')
i['path'] = os.path.join(i['save_path'], i['filename'])
scheduler_list.append(i)
return scheduler_list
@staticmethod
def add_scheduler(form) -> bool:
if form['db_id']:
data = {
'save_path': form['save_path'],
'filename': form['filename'],
'is_live': True
# 'is_live': bool(form['is_live']) if str(form['is_live']).lower() != 'false' else False
}
ModelScheduler.find(form['db_id']).update(data)
else:
info_dict = LogicMain.get_channel_info(form['url'])
if info_dict is None:
return False
data = {
'webpage_url': info_dict['webpage_url'],
'title': info_dict['title'],
'count': info_dict['count'],
'save_path': form['save_path'],
'filename': form['filename'],
'is_live': True
# 'is_live': bool(form['is_live']) if str(form['is_live']).lower() != 'false' else False
}
ModelScheduler.create(data)
return True
@staticmethod
def del_scheduler(db_id: int):
logger.debug('del_scheduler %s', db_id)
ModelScheduler.find(db_id).delete()
@staticmethod
def get_channel_info(channel_url: str) -> Optional[dict]:
channel_id = channel_url.split('/')[-1]
url = f'https://www.vlive.tv/globalv-web/vam-web/member/v1.0/channel-{channel_id}/officialProfiles'
params = {
'appId': '8c6cc7b45d2568fb668be6e05b6e5a3b',
'fields': 'officialName',
'types': 'STAR',
'gcc': 'KR',
'locale': 'ko_KR'
}
headers = {
'Referer': 'https://www.vlive.tv/'
}
try:
json = requests.get(url, params=params, headers=headers).json()[0]
except (IndexError, KeyError):
# ์๋ชป๋ channel_id ๋ฑ์ ์ด์ ๋ก ์๋ฑํ ๊ฐ์ด ๋ฐํ๋๋ฉด
return None
channel_info = {
'webpage_url': f'https://www.vlive.tv/channel/{channel_id}',
'title': json['officialName'],
'count': LogicMain.get_count_video(channel_url),
}
return channel_info
@staticmethod
def get_first_live_video(channel_url: str) -> Optional[str]:
channel_id = channel_url.split('/')[-1]
url = f'https://www.vlive.tv/globalv-web/vam-web/post/v1.0/channel-{channel_id}/starPosts'
params = {
'appId': '8c6cc7b45d2568fb668be6e05b6e5a3b',
'fields': 'contentType,officialVideo,title,url',
'gcc': 'KR',
'locale': 'ko_KR',
'pageSize': 5
}
headers = {
'Referer': 'https://www.vlive.tv/'
}
json = requests.get(url, params=params, headers=headers).json()
video_url = None
for data in json['data']:
if data['contentType'] == 'VIDEO':
if data['officialVideo']['type'] == 'LIVE':
video_url = data['url']
break
return video_url
@staticmethod
def get_count_video(channel_url: str) -> int:
html = requests.get(channel_url).text
pattern = re.compile(r'"videoCountOfStar":(\d+)')
return int(pattern.findall(html)[0])
@staticmethod
def get_recent_html() -> str:
url = 'https://www.vlive.tv/home/video/more'
params = {
'viewType': 'recent',
'pageSize': 20,
'pageNo': 1,
}
headers = {
'Accept-Language': 'ko-KR,ko;q=0.8,en-US;q=0.5,en;q=0.3'
}
html = requests.get(url, params=params, headers=headers).text
html = re.sub(r'href="(.+?)"', r'href="https://www.vlive.tv\1"', html)
html = re.sub(r'onclick="vlive.tv.common.videoGa\(this\);"', r'onclick="link_click(this); return false;"', html)
html = re.sub(r'onclick="vlive.tv.common.chGa\(this\);"|onerror="(.+?)"', '', html)
return html
```
#### File: joyfuI/vlive/plugin.py
```python
import os
import traceback
from flask import Blueprint
from framework import app, path_data
from framework.logger import get_logger
from framework.util import Util
from framework.common.plugin import get_model_setting, Logic, default_route_single_module
class P(object):
package_name = __name__.split('.')[0]
logger = get_logger(package_name)
blueprint = Blueprint(package_name, package_name, url_prefix=f'/{package_name}',
template_folder=os.path.join(os.path.dirname(__file__), 'templates'),
static_folder=os.path.join(os.path.dirname(__file__), 'static'))
# ๋ฉ๋ด ์ ์
menu = {
'main': [package_name, 'V LIVE'],
'sub': [
['setting', '์ค์ '], ['recent', '์ต๊ทผ ๋ฐฉ์ก'], ['scheduler', '์ค์ผ์ค๋ง'], ['log', '๋ก๊ทธ']
],
'category': 'vod'
}
plugin_info = {
'version': '2.0.1',
'name': package_name,
'category_name': 'vod',
'icon': '',
'developer': 'joyfuI',
'description': 'V LIVE ๋ค์ด๋ก๋',
'home': f'https://github.com/joyfuI/{package_name}',
'more': '',
}
ModelSetting = get_model_setting(package_name, logger)
logic = None
module_list = None
home_module = 'recent' # ๊ธฐ๋ณธ๋ชจ๋
def initialize():
try:
app.config['SQLALCHEMY_BINDS'][
P.package_name] = f"sqlite:///{os.path.join(path_data, 'db', f'{P.package_name}.db')}"
Util.save_from_dict_to_json(P.plugin_info, os.path.join(os.path.dirname(__file__), 'info.json'))
# ๋ก๋ํ ๋ชจ๋ ์ ์
from .main import LogicMain
P.module_list = [LogicMain(P)]
P.logic = Logic(P)
default_route_single_module(P)
except Exception as e:
P.logger.error('Exception:%s', e)
P.logger.error(traceback.format_exc())
logger = P.logger
initialize()
``` |
{
"source": "joyfuI/youtube",
"score": 2
} |
#### File: joyfuI/youtube/logic.py
```python
import os
import traceback
import time
import sqlite3
from threading import Thread
from framework import db, scheduler, path_data
from framework.logger import get_logger
from framework.job import Job
from framework.util import Util
from .model import ModelSetting, ModelQueue
from .logic_normal import LogicNormal
package_name = __name__.split('.')[0]
logger = get_logger(package_name)
class Logic(object):
db_default = {
'db_version': '4',
'interval': '360',
'auto_start': 'False',
'default_save_path': os.path.join(path_data, 'download', package_name),
'default_filename': '%(title)s-%(id)s.%(ext)s',
'cookiefile_path': ''
}
@staticmethod
def db_init():
try:
for key, value in Logic.db_default.items():
if db.session.query(ModelSetting).filter_by(key=key).count() == 0:
db.session.add(ModelSetting(key, value))
db.session.commit()
Logic.migration()
except Exception as e:
logger.error('Exception:%s', e)
logger.error(traceback.format_exc())
@staticmethod
def plugin_load():
try:
logger.debug('%s plugin_load', package_name)
Logic.db_init() # DB ์ด๊ธฐํ
# archive ํ์ผ ์ ์ฅ ํด๋ ์์ฑ
path = os.path.join(path_data, 'db', package_name)
if not os.path.isdir(path):
os.makedirs(path)
if ModelSetting.get_bool('auto_start'):
Logic.scheduler_start()
# ํธ์๋ฅผ ์ํด json ํ์ผ ์์ฑ
from .plugin import plugin_info
Util.save_from_dict_to_json(plugin_info, os.path.join(os.path.dirname(__file__), 'info.json'))
except Exception as e:
logger.error('Exception:%s', e)
logger.error(traceback.format_exc())
@staticmethod
def plugin_unload():
try:
logger.debug('%s plugin_unload', package_name)
except Exception as e:
logger.error('Exception:%s', e)
logger.error(traceback.format_exc())
@staticmethod
def scheduler_start():
try:
logger.debug('%s scheduler_start', package_name)
interval = ModelSetting.get('interval')
job = Job(package_name, package_name, interval, Logic.scheduler_function, '์ ํ๋ธ ์๋ก์ด ์์ ๋ค์ด๋ก๋', False)
scheduler.add_job_instance(job)
except Exception as e:
logger.error('Exception:%s', e)
logger.error(traceback.format_exc())
@staticmethod
def scheduler_stop():
try:
logger.debug('%s scheduler_stop', package_name)
scheduler.remove_job(package_name)
except Exception as e:
logger.error('Exception:%s', e)
logger.error(traceback.format_exc())
@staticmethod
def scheduler_function():
try:
LogicNormal.scheduler_function()
except Exception as e:
logger.error('Exception:%s', e)
logger.error(traceback.format_exc())
@staticmethod
def one_execute():
try:
if scheduler.is_include(package_name):
if scheduler.is_running(package_name):
ret = 'is_running'
else:
scheduler.execute_job(package_name)
ret = 'scheduler'
else:
def func():
time.sleep(2)
Logic.scheduler_function()
Thread(target=func, args=()).start()
ret = 'thread'
except Exception as e:
logger.error('Exception:%s', e)
logger.error(traceback.format_exc())
ret = 'fail'
return ret
@staticmethod
def reset_db():
try:
db.session.query(ModelQueue).delete()
db.session.commit()
return True
except Exception as e:
logger.error('Exception:%s', e)
logger.error(traceback.format_exc())
return False
@staticmethod
def migration():
try:
db_version = ModelSetting.get_int('db_version')
connect = sqlite3.connect(os.path.join(path_data, 'db', '%s.db' % package_name))
if db_version < 2:
cursor = connect.cursor()
cursor.execute("SELECT * FROM youtube_setting WHERE key = 'save_path'")
save_path = cursor.fetchone()[2]
cursor.execute("UPDATE youtube_setting SET value = ? WHERE key = 'default_save_path'", (save_path,))
cursor.execute("DELETE FROM youtube_setting WHERE key = 'save_path'")
cursor.execute("ALTER TABLE youtube_scheduler ADD save_path VARCHAR")
cursor.execute("UPDATE youtube_scheduler SET save_path = ?", (save_path,))
cursor.execute("ALTER TABLE youtube_queue ADD save_path VARCHAR")
cursor.execute("UPDATE youtube_queue SET save_path = ?", (save_path,))
if db_version < 3:
cursor = connect.cursor()
cursor.execute("ALTER TABLE youtube_scheduler ADD date_after DATE")
cursor.execute("UPDATE youtube_scheduler SET date_after = ?", (None,))
cursor.execute("ALTER TABLE youtube_queue ADD date_after DATE")
cursor.execute("UPDATE youtube_queue SET date_after = ?", (None,))
if db_version < 4:
cursor = connect.cursor()
cursor.execute("ALTER TABLE youtube_scheduler ADD subtitle VARCHAR")
cursor.execute("UPDATE youtube_scheduler SET subtitle = ?", (None,))
cursor.execute("ALTER TABLE youtube_queue ADD subtitle VARCHAR")
cursor.execute("UPDATE youtube_queue SET subtitle = ?", (None,))
cursor.execute("ALTER TABLE youtube_scheduler ADD playlistreverse BOOLEAN")
cursor.execute("UPDATE youtube_scheduler SET playlistreverse = ?", (False,))
cursor.execute("ALTER TABLE youtube_queue ADD playlistreverse BOOLEAN")
cursor.execute("UPDATE youtube_queue SET playlistreverse = ?", (False,))
connect.commit()
connect.close()
ModelSetting.set('db_version', Logic.db_default['db_version'])
db.session.flush()
except Exception as e:
logger.error('Exception:%s', e)
logger.error(traceback.format_exc())
```
#### File: joyfuI/youtube/logic_queue.py
```python
import traceback
import time
from threading import Thread
from framework.logger import get_logger
from .model import ModelSetting, ModelQueue
from .api_youtube_dl import APIYoutubeDL
package_name = __name__.split('.')[0]
logger = get_logger(package_name)
class LogicQueue(object):
_thread = None
@staticmethod
def queue_load():
Thread(target=LogicQueue.queue_start).start()
@staticmethod
def queue_start():
try:
time.sleep(10) # youtube-dl ํ๋ฌ๊ทธ์ธ์ด ์ธ์ ๋ก๋๋ ์ง ๋ชจ๋ฅด๋ ์ผ๋จ 10์ด ๋๊ธฐ
for i in ModelQueue.get_list():
logger.debug('queue add %s', i.url)
date_after = i.date_after.strftime('%Y%m%d') if i.date_after else None
download = APIYoutubeDL.download(package_name, i.key, i.url, filename=i.filename, save_path=i.save_path,
format_code=i.format, preferredcodec='mp3' if i.convert_mp3 else None,
dateafter=date_after,
playlist='reverse' if i.playlistreverse else None, start=False,
cookiefile=ModelSetting.get('cookiefile_path'))
if i.subtitle is not None:
sub = APIYoutubeDL.sub(package_name, i.key, i.url, filename=i.filename, save_path=i.save_path,
all_subs=False, sub_lang=i.subtitle, auto_sub=True, dateafter=date_after,
playlist='reverse' if i.playlistreverse else None, start=True,
cookiefile=ModelSetting.get('cookiefile_path'))
else:
sub = {'errorCode': 0}
if download['errorCode'] == 0 and sub['errorCode'] == 0:
i.set_index(download['index'])
else:
logger.debug('queue add fail %s %s', download['errorCode'], sub['errorCode'])
i.delete()
LogicQueue._thread = Thread(target=LogicQueue.thread_function)
LogicQueue._thread.daemon = True
LogicQueue._thread.start()
except Exception as e:
logger.error('Exception:%s', e)
logger.error(traceback.format_exc())
@staticmethod
def thread_function():
try:
while not ModelQueue.is_empty():
entity = ModelQueue.peek()
logger.debug('queue download %s', entity.url)
start = APIYoutubeDL.start(package_name, entity.index, entity.key)
if start['errorCode'] == 0:
while True:
time.sleep(10) # 10์ด ๋๊ธฐ
status = APIYoutubeDL.status(package_name, entity.index, entity.key)
if status['status'] in ('COMPLETED', 'ERROR', 'STOP'):
break
else:
logger.debug('queue download fail %s', start['errorCode'])
entity.delete()
except Exception as e:
logger.error('Exception:%s', e)
logger.error(traceback.format_exc())
@staticmethod
def add_queue(url, options):
try:
options['webpage_url'] = url
entity = ModelQueue.create(options)
date_after = entity.date_after.strftime('%Y%m%d') if entity.date_after else None
download = APIYoutubeDL.download(package_name, entity.key, url, filename=entity.filename,
save_path=entity.save_path, format_code=entity.format,
preferredcodec='mp3' if entity.convert_mp3 else None, dateafter=date_after,
playlist='reverse' if entity.playlistreverse else None, start=False,
cookiefile=ModelSetting.get('cookiefile_path'))
if entity.subtitle is not None:
sub = APIYoutubeDL.sub(package_name, entity.key, url, filename=entity.filename,
save_path=entity.save_path, all_subs=False, sub_lang=entity.subtitle,
auto_sub=True, dateafter=date_after,
playlist='reverse' if entity.playlistreverse else None, start=True,
cookiefile=ModelSetting.get('cookiefile_path'))
else:
sub = {'errorCode': 0}
if download['errorCode'] == 0 and sub['errorCode'] == 0:
entity.set_index(download['index'])
else:
logger.debug('queue add fail %s %s', download['errorCode'], sub['errorCode'])
entity.delete()
return None
if not LogicQueue._thread.is_alive():
LogicQueue._thread = Thread(target=LogicQueue.thread_function)
LogicQueue._thread.daemon = True
LogicQueue._thread.start()
return entity
except Exception as e:
logger.error('Exception:%s', e)
logger.error(traceback.format_exc())
return None
```
#### File: joyfuI/youtube/plugin.py
```python
import os
import traceback
from datetime import date
from flask import Blueprint, request, render_template, redirect, jsonify
from flask_login import login_required
from framework import scheduler, socketio
from framework.logger import get_logger
from .logic import Logic
from .logic_normal import LogicNormal
from .logic_queue import LogicQueue
from .model import ModelSetting
package_name = __name__.split('.')[0]
logger = get_logger(package_name)
#########################################################
# ํ๋ฌ๊ทธ์ธ ๊ณต์ฉ
#########################################################
blueprint = Blueprint(package_name, package_name, url_prefix='/%s' % package_name,
template_folder=os.path.join(os.path.dirname(__file__), 'templates'),
static_folder=os.path.join(os.path.dirname(__file__), 'static'))
menu = {
'main': [package_name, '์ ํ๋ธ'],
'sub': [
['setting', '์ค์ '], ['request', '์์ฒญ'], ['scheduler', '์ค์ผ์ค๋ง'], ['log', '๋ก๊ทธ']
],
'category': 'vod'
}
plugin_info = {
'version': '2.1.0',
'name': 'youtube',
'category_name': 'vod',
'developer': 'joyfuI',
'description': 'YouTube ๋ค์ด๋ก๋',
'home': 'https://github.com/joyfuI/youtube',
'more': ''
}
def plugin_load():
Logic.plugin_load()
LogicQueue.queue_load()
def plugin_unload():
Logic.plugin_unload()
#########################################################
# WEB Menu
#########################################################
@blueprint.route('/')
def home():
return redirect('/%s/request' % package_name)
@blueprint.route('/<sub>')
@login_required
def first_menu(sub):
try:
arg = {
'package_name': package_name,
'template_name': '%s_%s' % (package_name, sub)
}
if sub == 'setting':
arg.update(ModelSetting.to_dict())
arg['scheduler'] = str(scheduler.is_include(package_name))
arg['is_running'] = str(scheduler.is_running(package_name))
return render_template('%s_%s.html' % (package_name, sub), arg=arg)
elif sub == 'request':
arg['url'] = request.args.get('url', '')
arg['save_path'] = ModelSetting.get('default_save_path')
arg['filename'] = ModelSetting.get('default_filename')
arg['preset_list'] = LogicNormal.get_preset_list()
arg['date_after'] = date.today()
return render_template('%s_%s.html' % (package_name, sub), arg=arg)
elif sub == 'scheduler':
arg['save_path'] = ModelSetting.get('default_save_path')
arg['filename'] = ModelSetting.get('default_filename')
arg['preset_list'] = LogicNormal.get_preset_list()
arg['date_after'] = date.today()
return render_template('%s_%s.html' % (package_name, sub), arg=arg)
elif sub == 'log':
return render_template('log.html', package=package_name)
except Exception as e:
logger.error('Exception:%s', e)
logger.error(traceback.format_exc())
return render_template('sample.html', title='%s - %s' % (package_name, sub))
#########################################################
# For UI
#########################################################
@blueprint.route('/ajax/<sub>', methods=['POST'])
@login_required
def ajax(sub):
logger.debug('AJAX %s %s', package_name, sub)
try:
# ๊ณตํต ์์ฒญ
if sub == 'setting_save':
ret = ModelSetting.setting_save(request)
return jsonify(ret)
elif sub == 'scheduler':
go = request.form['scheduler']
logger.debug('scheduler:%s', go)
if go == 'true':
Logic.scheduler_start()
else:
Logic.scheduler_stop()
return jsonify(go)
elif sub == 'one_execute':
ret = Logic.one_execute()
return jsonify(ret)
elif sub == 'reset_db':
ret = Logic.reset_db()
return jsonify(ret)
# UI ์์ฒญ
elif sub == 'analysis':
url = request.form['url']
ret = LogicNormal.analysis(url)
return jsonify(ret)
elif sub == 'add_download':
ret = LogicNormal.download(request.form)
return jsonify(ret)
elif sub == 'list_scheduler':
ret = LogicNormal.get_scheduler()
return jsonify(ret)
elif sub == 'add_scheduler':
ret = LogicNormal.add_scheduler(request.form)
return jsonify(ret)
elif sub == 'del_scheduler':
ret = LogicNormal.del_scheduler(request.form['id'])
return jsonify(ret)
elif sub == 'del_archive':
LogicNormal.del_archive(request.form['id'])
return jsonify([])
except Exception as e:
logger.error('Exception:%s', e)
logger.error(traceback.format_exc())
#########################################################
# socketio
#########################################################
def socketio_emit(cmd, data):
socketio.emit(cmd, data, namespace='/%s' % package_name, broadcast=True)
``` |
{
"source": "joyfulflyer/billboard-reader",
"score": 3
} |
#### File: billboard-reader/grabber/load_songs.py
```python
import billboard
import os
import time
from . import database_connection
from . import saver
Session = database_connection.connect(os.environ['DATABASE'])
def scrape_for_year(year):
yearString = str(year)
firstChart = billboard.ChartData('hot-100', date=yearString + '-01-01',
fetch=True)
chart = firstChart
while yearString in chart.date:
print("Saving date" + chart.date)
saver.save_chart(chart, Session)
time.sleep(10) # robots.txt requests 10 seconds per request
try:
chart = billboard.ChartData('hot-100', chart.nextDate)
except AttributeError:
return
```
#### File: billboard-reader/reader/getHot100ByDate.py
```python
import billboard
from flask import Flask
app = Flask(__name__)
# chart = billboard.ChartData('hot-100', date='1965-03-25', fetch=True, timeout=30)
@app.route("/seeChart/<chosenDate>")
def seeChart(chosenDate):
newChart = billboard.ChartData('hot-100', date=chosenDate,
fetch=True, timeout=30)
return str(newChart)
# uncomment for flask
# if __name__ == "__main__":
# app.run()
```
#### File: billboard-reader/reader/getSongsFlaskApp.py
```python
from flask import Flask
import billboardReader.songDatabase as db
import billboardReader.songSearch
from billboardReader.songDataSource import DataSource
import json
app = Flask(__name__)
@app.route("/partialSong/<input>")
def partialSong(input):
data = DataSource().getSongsWithPartialName(input)
return str(json.dumps(data))
def start():
app.run()
# uncomment for flask
if __name__ == "__main__":
app.run()
```
#### File: reader/models/song.py
```python
from . base import Base
from sqlalchemy import Integer, Column, String
class Song(Base):
__tablename__ = 'songs'
id = Column(Integer, primary_key=True)
name = Column(String)
artist = Column(String)
def __repr__(self):
return "Song: <id='%r', name='%r', artist='%r'>" % (self.id, self.name, self.artist)
```
#### File: billboard-reader/reader/songDatabase.py
```python
import sys
import billboard
import sqlite3
import datetime
import time
def connect():
print('connecting to db')
sys.stdout.flush()
conn = sqlite3.connect('charts.db')
return conn
def getCursor(conn):
sys.stdout.flush()
c = conn.cursor()
return c
def dropTables(conn):
c = conn.cursor()
print('dropping table')
c.execute(''' DROP TABLE IF EXISTS songs''')
c.execute(''' DROP TABLE IF EXISTS charts ''')
c.execute(''' DROP TABLE IF EXISTS entries ''')
sys.stdout.flush()
def connectAndCreate():
conn = connect()
createTables(conn)
return conn
def createTables(conn):
c = conn.cursor()
print("creating table")
c.execute(''' CREATE TABLE IF NOT EXISTS entries
(id integer primary key,
name text,
artist text,
place integer,
peak_position integer,
last_position integer,
weeks_on_chart integer,
chart_id integer,
song_id integer) ''')
c.execute(''' CREATE TABLE IF NOT EXISTS charts
(id integer primary key,
type text,
date_string text unique) ''')
c.execute(''' CREATE TABLE IF NOT EXISTS songs
(id integer primary key,
name text,
artist text)''')
conn.commit()
sys.stdout.flush()
def getInitialChart():
chart = billboard.ChartData('hot-100',
fetch=True, timeout=30)
return chart
def scrapeDataFromChartIntoConnection(chart, conn):
while chart.previousDate and "2017" not in chart.date:
saveChart(chart, conn)
chart = billboard.ChartData('hot-100', chart.previousDate, timeout=45)
def saveChart(chart, conn):
c = conn.cursor()
try:
c.execute(''' INSERT INTO charts(type, date_string)
VALUES (?, ?) ''', ("hot-100", chart.date))
except sqlite3.IntegrityError:
return
rowId = c.lastrowid
for i, entry in enumerate(chart.entries):
c.execute(''' INSERT INTO entries(
name, artist, place, peak_position,
last_position, weeks_on_chart, chart_id)
VALUES (?, ?, ?, ?, ?, ?, ?) ''',
(entry.title, entry.artist, entry.rank,
entry.peakPos, entry.lastPos, entry.weeks,
rowId))
conn.commit()
def crawlEntriesForSongs(cursor):
songs = []
# gives me the distinct name/artist combos
entries = cursor.execute(''' SELECT DISTINCT name, artist FROM entries ''').fetchone()
entry = cursor.execute(''' SELECT * FROM entries WHERE name = ? AND artist = ? ''', (entries[0], entries[1])).fetchall()
return entry
def doesDatabaseContainDate(date, conn):
c = conn.cursor()
countTuple = c.execute(''' SELECT count(*) from charts
WHERE date_string IS ? ''', (date,)).fetchone()
count = countTuple[0]
return count > 0
def scrapeDataForYear(year, conn, onYearDone):
finalDate = getFinalDate(year)
lastChart = billboard.ChartData('hot-100', date=finalDate,
fetch=True, timeout=30)
prevYear = getPreviousYear(year)
chart = lastChart
while(chart.previousDate and prevYear not in chart.date):
saveChart(chart, conn)
onYearDone()
time.sleep(10)
chart = billboard.ChartData('hot-100', chart.previousDate, timeout=45)
def getFinalDate(year):
now = datetime.date.today()
delt = datetime.timedelta(weeks=1)
then = now - delt
if year == now.year:
finalDate = "{}-{:0>2}-{:0>2}".format(then.year, then.month, then.day)
else:
finalDate = str(year) + '-12-31'
return finalDate
def getPreviousYear(year):
return str(int(year) - 1)
def hasData(conn):
c = getCursor(conn)
c.execute(''' SELECT count(*) FROM songs ''')
data = c.fetchall()
return len(list(data)) is not 0
def getSavedSongsFromConnection(conn):
c = conn.cursor()
songs = c.execute('SELECT * FROM songs').fetchall()
return songs
# Grabs all the songs in the database as a list.
# Assumes it's of an ok size to have in memory
def getAllSavedSongs():
conn = connect()
c = conn.cursor()
songs = c.execute('SELECT * FROM songs').fetchall()
conn.close()
return songs
``` |
{
"source": "joyfulflyer/billboard-song-converter",
"score": 2
} |
#### File: joyfulflyer/billboard-song-converter/db_saver.py
```python
import asyncio
import functools
import logging
from sqlalchemy import desc, exc
from models.chart import Chart
from models.entry import Entry
from models.song import Song
from models.tiered_song import SONG_TYPE_BASIC, Tiered_Song
from models.tiered_song_entry import Tiered_Song_Entry
from models.tiered_song_link import Tiered_Song_Link
logger = logging.getLogger(__name__)
def create_song(name, artist, session):
new_song = Song(name=name, artist=artist)
return _commit_and_return(session, new_song)
async def create_song_async(name, artist, session):
loop = asyncio.get_running_loop()
return await loop.run_in_executor(
None, functools.partial(create_song, name, artist, session))
def create_tiered_song(session, name, artist, song_type):
new_tiered_song = Tiered_Song(name=name,
artist=artist,
song_type=song_type)
return _commit_and_return(session, new_tiered_song)
def create_tiered_song_link(session, entry, tiered_song):
new_entry = Tiered_Song_Entry(entry_id=entry.id,
tiered_song_id=tiered_song.id)
return _commit_and_return(session, new_entry)
def create_link_between_tiered_songs(session, from_id, to_id):
new_link = Tiered_Song_Link(from_id=from_id, to_id=to_id)
return _commit_and_return(session, new_link)
def _commit_and_return(session, item):
session.add(item)
session.commit()
return item
```
#### File: joyfulflyer/billboard-song-converter/entry_generators.py
```python
import db_retriever
from instrument import instrument
STEP = 5000
# Applies a step function to the entry func
# entry_func returns lists (or generators) of entries
# Iterate over this to get lists to iterate over
# We're mostly generating a step and an offset but using those to get the lists of entries
def _step_generator(session, entry_func, limit, batch_size):
step = batch_size
if limit < step:
step = limit
# step is lower of limit and batch size
offset = 0
while offset <= limit:
entries = entry_func(session=session, step=step, offset=offset)
if len(entries) > 0:
yield entries
else:
return
offset = offset + step
def _calculate_step(limit, batch_size):
step = batch_size
if limit < step:
step = limit
return step
# Gives list of steps
def _step_2(limit, step):
offset = 0
while offset <= limit:
yield offset
offset = offset + step
# Returns individual entries.
def entries_with_no_tiered_songs_singular(session,
limit=float('inf'),
batch_size=STEP):
step = _calculate_step(limit, batch_size)
for offset in _step_2(limit, step):
if offset > limit:
return
entries = _get_entries_with_no_tiered_song_limited_offset(
session, step, offset)
if len(entries) == 0:
return
for entry in entries:
yield entry
def _get_entries_with_no_tiered_song_limited_offset(session, step, offset):
return db_retriever.get_entries_with_no_tiered_song(session,
limit=step,
offset=offset)
def entries_without_song_id_steps(session, limit=float('inf')):
step = STEP
if limit < step:
step = limit
offset = 0
while offset + step <= limit:
entries = db_retriever.get_entries_with_song_id_pagination(
session, -1, limit=step, offset=offset)
if len(entries) > 0:
yield entries
else:
return
offset = offset + step
```
#### File: billboard-song-converter/models/tiered_song_link.py
```python
from sqlalchemy import Column, Integer, String, ForeignKey
from models.base import Base
from models.tiered_song import Tiered_Song
class Tiered_Song_Link(Base):
__tablename__ = 'tiered_song_links'
id = Column(Integer, primary_key=True)
from_id = Column(Integer,
ForeignKey("%s.id" % (Tiered_Song.__tablename__)))
to_id = Column(Integer, ForeignKey("%s.id" % (Tiered_Song.__tablename__)))
def __repr__(self):
return "Song to entry: <id=%r, from id=%r, to id=%r>" % \
(self.id, self.from_id, self.to_id)
```
#### File: joyfulflyer/billboard-song-converter/Session.py
```python
import database_connection
from config import Config as config
_session_makers = {}
def get_session(url=None, timeout=0):
if url is None:
url = database_connection.create_url_from_parts(
username=config.username,
password=config.password,
host=config.host,
dbname=config.db_name)
if url not in _session_makers:
Session = database_connection.connect(url)
if timeout and not database_connection.wait_for_db_connection(
Session, timeout):
raise Exception('Unavailalble')
_session_makers[url] = Session
return _session_makers[url]
# proxy for more descriptive name
def get_session_maker(url=None):
return get_session(url)
``` |
{
"source": "joyfulflyer/billboard-spotify",
"score": 3
} |
#### File: billboard-spotify/models/song.py
```python
from . base import Base
from sqlalchemy import Integer, Column, String, ForeignKey
class Song(Base):
__tablename__ = 'songs'
id = Column(Integer, primary_key=True)
name = Column(String(256), nullable=False)
artist = Column(String(128), nullable=False)
spotify_id = Column(Integer, nullable=True)
search_term = Column(String(256), nullable=True)
search_results = Column(Integer, nullable=True)
def __repr__(self):
return "Song: <id=%r, name=%r, spotify_id=%r>" % \
(self.id, self.name, self.spotify_id)
```
#### File: joyfulflyer/billboard-spotify/search.py
```python
from spotify_auth import auth
from urllib import parse
import json
def search(track_name, artist, type='track'):
parsed = parse.quote_plus(query)
query = "artist:{}%20track:{}".format(artist, track_name)
response = auth.get(
'https://api.spotify.com/v1/search?q={}&type={}'.format(query, type))
response_object = json.loads(response.text)
return response_object
``` |
{
"source": "joyfulflyer/billboard-viewer",
"score": 3
} |
#### File: reader/elastic/elastic.py
```python
import requests
import json
s = requests.Session()
DEFAULT_URL = "http://localhost:9200/"
DEFAULT_HOST = "localhost"
DEFAULT_PORT = "9200"
DEFAULT_SCHEME = "http"
SEARCH_PATH = "song/_search/"
host = DEFAULT_HOST
scheme = DEFAULT_SCHEME
port = DEFAULT_PORT
def create_url():
return f"{scheme}://{host}:{port}/"
def search_for_song(query):
data = {"query": {"match": {"name": {"query": query}}}}
return s.post(create_url() + SEARCH_PATH, json=data).json()
def results_for_song_search(query):
response = search_for_song(query)
hits = response['hits']
return hits['hits']
```
#### File: reader/models/chart.py
```python
from .base import Base
from sqlalchemy import Integer, Column, String
class Chart(Base):
__tablename__ = 'charts'
id = Column(Integer, primary_key=True)
chart_type = Column('type', String(128))
date_string = Column(String(128))
next_chart_date = Column(String(128))
def get_year(self):
return self.date_string.split('-')[0]
def __repr__(self):
return "Chart: <id=%r, type=%r, date=%r>" % (self.id, self.chart_type,
self.date_string)
``` |
{
"source": "joyghosh/tiny",
"score": 3
} |
#### File: src/mapper/mock.py
```python
mock_cache = {}
def insert(key, value):
if(not mock_cache.has_key(key)):
mock_cache[key] = value
return mock_cache[key]
def remove(key):
if(mock_cache.has_key(key)):
del mock_cache[key]
def get(key):
return mock_cache[key]
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.