metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "Johnson-Su/Certi-Chain",
"score": 4
} |
#### File: Certi-Chain/LocalBlockChain/Blockchain.py
```python
import Block as bl
import time
class Blockchain:
DIFFICULTY = 4
def __init__(self):
self.chain = []
self.unconfirmed_transactions = [] # data not yet validated
def create_genesis_block(self):
genesis_block = bl.Block(0, [], 0, "0")
genesis_block.hash = genesis_block.compute_hash()
self.chain.append(genesis_block)
@property
def last_block(self):
"""
The last block in the chain, ie. the most recent block added
"""
return self.chain[-1]
@staticmethod
def proof_of_work(block):
"""
A proof of work is the process of adding a constraint to a block's
hash. By adding the constraint, it makes it difficult for a valid
hash to be computed.
"""
block.nonce = 0
computed_hash = block.compute_hash()
while (not computed_hash.startswith('0' * Blockchain.DIFFICULTY)):
block.nonce += 1
computed_hash = block.compute_hash()
return computed_hash
def add_block(self, block, proof):
"""
To add a block into the blockchain, we must determine if the block
to be added is in the correct chronological order (no adding
transactions that occured before the last block),
and we must determine if the data has not been tampered with.
"""
previous_hash = self.last_block.hash
# is the block in the right chronological order?
if (previous_hash != block.previous_hash):
return False
# has the block been tampered with
if (not Blockchain.is_valid_proof(block, proof)):
return False
# if the above constraints are satisfied, add the block
block.hash = proof
self.chain.append(block)
return True
@classmethod
def is_valid_proof(self, block, block_hash):
# does the hash satisfy the contraints?
# does the hash of the block match the proof provided?
return (block_hash.startswith('0' * Blockchain.DIFFICULTY) and
block_hash == block.compute_hash())
def add_transaction(self, transaction):
# Add a transaction to the list
self.unconfirmed_transactions.append(transaction)
def mine(self):
# is the list of unconfirmed transactions empty?
if (not self.unconfirmed_transactions):
return False
# get the last block to determine the index and previous_hash of
# the new block
last_block = self.last_block
new_block = bl.Block(last_block.index + 1,
self.unconfirmed_transactions,
time.time(),
last_block.hash)
# do work to find a valid hash
proof = self.proof_of_work(new_block)
self.add_block(new_block, proof)
# reset the transactions
self.unconfirmed_transactions = []
return True
@classmethod
def check_chain_validity(cls, chain):
result = True
previous_hash = "0"
# Iterate through every block
for block in chain:
block_hash = block.hash
# remove the hash field in order to compute it again
delattr(block, "hash")
if not cls.is_valid_proof(block, block.hash) or \
previous_hash != block.previous_hash:
result = False
break
block.hash, previous_hash = block_hash, block_hash
return result
```
#### File: Certi-Chain/LocalBlockChain/Node_Server.py
```python
from flask import Flask, request
import requests, json, time, re
import Blockchain, Block
# init flask
app = Flask(__name__)
# init blockchain
blockchain = Blockchain.Blockchain()
blockchain.create_genesis_block()
# The last thing requested
tx_search = None
# List of accepted accounts
accounts = ["1001"]
# Number of transactions in the current block to be
tx_number = 1
# Set of adresses of peers within the network
peers = set()
@app.route('/new_transaction', methods=['POST'])
def new_transaction():
"""
Defines the endpoint to create a new transaction to add to the chain.
Checks if a transaction has the valid information before adding it
to the list of transactions to add
"""
global tx_number
# Get the tx data
tx_data = request.get_json()
# Required information
required_fields = ["name", "description", "materials", "location",
"brand", "year", "journey", "key"]
for field in required_fields:
# If informtaion is missing, do not add this transaction
if (not tx_data.get(field)):
return "Invalid transaction data", 404
if (not tx_data["key"] in accounts):
return "Invalid user", 405
# Time tx was made
tx_data["timestamp"] = time.time()
# Block tx will belong to
tx_data["block_num"] = blockchain.last_block.index + 1
# the number the tx has within the block
tx_data["tx_num"] = tx_number
# increment tx number
tx_number += 1
blockchain.add_transaction(tx_data)
return "Success", 201
@app.route('/chain', methods=['GET'])
def get_chain():
"""
The end point for the data of the chain
"""
# Data for every block in the chain
chain_data = []
for block in blockchain.chain:
chain_data.append(block.__dict__)
return json.dumps({"length": len(chain_data),
"chain": chain_data,
"peers": list(peers)})
@app.route('/pending_tx', methods=["GET"])
def get_pending_tx():
"""
The end point for the number of pending transactions.
"""
return json.dumps(blockchain.unconfirmed_transactions)
@app.route('/check', methods=['POST'])
def add_check():
"""
Check if the given transaction id is in the blockchain. Set the /check
endpoint to have the information of the given transaction id, if it exists.
Leave an error if there does not exists a tx with the given id, or if the
id is not in the correct format.
"""
global tx_search
# Get the tx id
tx_data = request.get_json()
# If there is no id
if (not tx_data.get("tx_id")):
tx_search = {"error": "inv_tx_id"}
return "Invalid", 404
# If the id is not in the correct format
tx_id = tx_data["tx_id"]
if not re.match("[0-9]+b[0-9]+t", tx_id):
tx_search = {"error": "inv_tx_id"}
return "Invalid", 404
# Parse out the block number from the id
block_num = tx_id[0:tx_id.index("b")]
# Parse out the tx number from the id
tx_num = tx_id[tx_id.index("b") + 1 : tx_id.index("t")]
# Find the block with the given block number
for block in blockchain.chain:
if block.index == int(block_num):
# Find the transaction
for tx in block.transactions:
if tx["tx_num"] == int(tx_num):
# Grab the relevant info
tx_search = tx
tx_search["error"] = "None"
return "Success", 201
# There does not exists a transaction with the given id
tx_search = {"error": "no_tx"}
return "Cannot find", 400
@app.route('/checkout', methods=['GET'])
def output_validity():
"""
The endpoint for the needed product info.
"""
return json.dumps(tx_search)
# Now establish decentralization and concensus
# Endpoint to add new peers
@app.route('/register_node', methods=['POST'])
def register_new_peers():
"""
Add a new peer to the list.
"""
# The host address to the peer node
node_address = request.get_json()["node_address"]
if not node_address:
return "Invalid data", 400
# Add the node to the peer list
peers.add(node_address)
# Return the blockchain to the newly registered node so that it can sync
return get_chain()
@app.route('/register_with', methods=['POST'])
def register_with_existing_node():
"""
Internally calls the `register_node` endpoint to
register current node with the remote node specified in the
request, and sync the blockchain as well with the remote node.
"""
node_address = request.get_json()["node_address"]
if not node_address:
return "Invalid data", 400
data = {"node_address": request.host_url}
headers = {'Content-Type': "application/json"}
# Make a request to register with remote node and obtain information
response = requests.post(node_address + "/register_node",
data=json.dumps(data), headers=headers)
if response.status_code == 200:
global blockchain
global peers
# update chain and the peers
chain_dump = response.json()['chain']
blockchain = create_chain_from_dump(chain_dump)
peers.update(response.json()['peers'])
return "Registration successful", 200
else:
# if something goes wrong, pass it on to the API response
return response.content, response.status_code
def create_chain_from_dump(chain_dump):
"""
Create a blockchain from the json object of a block chain. (Init a copy of
the parent blockchain to the node)
"""
generated_blockchain = Blockchain.Blockchain()
generated_blockchain.create_genesis_block()
for idx, block_data in enumerate(chain_dump):
if idx == 0:
continue # skip genesis block
block = Block.Block(block_data["index"],
block_data["transactions"],
block_data["timestamp"],
block_data["previous_hash"],
block_data["nonce"])
proof = block_data['hash']
added = generated_blockchain.add_block(block, proof)
if not added:
raise Exception("The chain dump is tampered!!")
return generated_blockchain
def consensus():
"""
Consensus algorithm. If a longer valid chain is
found, our chain is replaced with it.
"""
global blockchain
longest_chain = None
current_len = len(blockchain.chain)
for node in peers:
response = requests.get(f'{node}chain')
length = response.json()['length']
chain = response.json()['chain']
if length > current_len and blockchain.check_chain_validity(chain):
# Longer valid chain found!
current_len = length
longest_chain = chain
if longest_chain:
blockchain = longest_chain
return True
return False
@app.route('/add_block', methods=['POST'])
def verify_and_add_block():
"""
Once the blockchain is updated, add the updated block and check if
that block is valid before updating the local chain.
"""
block_data = request.get_json()
block = Block.Block(block_data["index"],
block_data["transactions"],
block_data["timestamp"],
block_data["previous_hash"],
block_data["nonce"])
proof = block_data['hash']
added = blockchain.add_block(block, proof)
if not added:
return "The block was discarded by the node", 400
return "Block added to the chain", 201
def announce_new_block(block):
"""
Announce to the network once a block has been mined.
Other blocks can simply verify the proof of work and add it to their
respective chains.
"""
for peer in peers:
url = f"{peer}add_block"
headers = {'Content-Type': "application/json"}
requests.post(url,
data=json.dumps(block.__dict__, sort_keys=True),
headers=headers)
@app.route('/mine', methods=['GET'])
def mine_unconfirmed_transactions():
"""
Mine all pending transactions and announce to all nodes there is a new
block.
"""
result = blockchain.mine()
global tx_number
tx_number = 1
if not result:
return "No transactions to mine"
else:
# Making sure we have the longest chain before announcing to the network
chain_length = len(blockchain.chain)
consensus()
if chain_length == len(blockchain.chain):
# announce the recently mined block to the network
announce_new_block(blockchain.last_block)
return f"Block #{blockchain.last_block.index} is mined."
# if __name__ == "__main__":
# app.run(debug=True, port=8000)
``` |
{
"source": "johnsons-ux/100-days-of-python",
"score": 4
} |
#### File: 100-days-of-python/Day 12 - Guessing Number Games/day12.py
```python
from art import logo
import random
from replit import clear
# Allow the player to submit a guess for a number between 1 and 100.
def choose_level():
global invalid_level
difficulty = input("Before we start, what difficulty would you like - easy or hard? For easy 🥸, type 'easy' for hard 🥵, type 'hard'. Goodluck! 🎊🎉 ").lower().strip()
if difficulty == 'easy':
invalid_level = True
return 'easy'
elif difficulty == 'hard':
invalid_level = True
return 'hard'
else: # != 'easy' or choose_level() != 'hard':
print("Please enter a valid difficulty. 👀 : ")
invalid_level = False
invalid_level = False
game_over = False
def continue_game():
global invalid_level
global game_over
clear()
random_number = random.randint(1,100)
while game_over is not True:
#Welcome the player to the game using ASCII art and a nice welcome message. Player has to choose a difficulty.
print(logo)
print("Welcome to Johnsons' Number Guessing Game!\n I'm thinking of a number between 1 and 100. Can you guess it? ")
print(f"Pssst the number is {random_number}.")
#Create a while loop condition to ensure that a valid difficulty is chosen. This is to avoid an error being produced and
while invalid_level is not True:
level = choose_level()
if level == 'easy':
lives = 10
elif level == 'hard':
lives = 5
else:
print(f"You have {lives} attempts remaining to guess the number.")
stil_play = True
while stil_play is True:
user_guess = int(input("Make a guess: "))
if lives > 1 and user_guess != random_number:
if user_guess > random_number:
print("Too high.")
lives -= 1
print(f"You have {lives} attempts remaining to guess the number.")
elif user_guess < random_number:
print("Too low.")
lives -= 1
print(f"You have {lives} attempts remaining to guess the number.")
elif lives >= 1 and user_guess == random_number:
print("You win! 🤯")
stil_play = False
game_over = True
else:
print("You've run out of guesses. You lose. 😭 ")
stil_play = False
game_over = True
if game_over is True:
play_again = input("Would you like to play again? Y for yes or N for no. 🤓 ").lower().strip()
if play_again == 'y':
invalid_level = False
game_over = False
continue_game()
elif play_again == 'n':
game_over = True
continue_game()
```
#### File: Day_17/quiz-game-start/question_model.py
```python
class Question:
def __init__(self, text, answer):
self.text = text
self.answer = answer
new_q = Question("lkajsdkf", "False")
```
#### File: Day_25/us-states-game-start/main.py
```python
import turtle
screen = turtle.Screen()
screen.title("U.S. States Game")
image = "blank_states_img.gif"
screen.addshape(image)
turtle.shape(image)
# The following code is used to get the x and y coordinates of the states in the map image.
# def get_mouse_click_coor(x, y):
# print(x, y)
#
#
# turtle.onscreenclick(get_mouse_click_coor)
# turtle.mainloop()
answer_state = screen.textinput(title="Guess the State", prompt="What's another state's name?")
print(answer_state)
screen.exitonclick()
``` |
{
"source": "johnson-tay09/data-structures-and-algorithms",
"score": 4
} |
#### File: fifo_animal_shelter/fifo_animal_shelter/fifo_animal_shelter.py
```python
class Node:
def __init__(self, value, next_=None):
self.value = value
self.next = next_
class Queue:
def __init__(self, front=None, back=None):
self.front = front
self.back = back
def enqueue(self, value):
node = Node(value)
# is the list empty?
if not self.front:
self.front = node
self.back = node
return
# current back next value becomes new node
self.back.next = node
# the new back is the new node
self.back = node
def peek(self):
if not self.front:
raise InvalidOperationError(
"Method not allowed on empty collection")
return self.front.value
def is_empty(self):
# return boolean true as self.front is falsys
return not self.front
def dequeue(self):
if not self.front:
raise InvalidOperationError(
"Method not allowed on empty collection")
# save front node into a variable
target_node = self.front
# reassign front node to to old front.next
self.front = target_node.next
# return stored node
return target_node.value
class AnimalShelter:
# initiate class with a queue for each cats and dogs
def __init__(self):
self.dog_q = Queue()
self.cat_q = Queue()
# add dogs to dog queue and cats to cat queue or return null
def enqueue(self, value):
if value == "dog":
self.dog_q.enqueue(value)
return
elif value == "cat":
self.cat_q.enqueue(value)
return
return "Null"
# return preferred animal
def dequeue(self, preference):
if preference.lower == "dog":
return self.dog_q.dequeue()
elif preference.lower == "cat":
return self.cat_q.dequeue()
return "Null"
```
#### File: code_challenges/hashtable/test_hashtable.py
```python
from hashtable import Hashtable
def test_create():
hashtable = Hashtable()
assert hashtable
def test_predictable_hash():
hashtable = Hashtable()
initial = hashtable._hash('spam')
secondary = hashtable._hash('spam')
assert initial == secondary
def test_in_range_hash():
hashtable = Hashtable()
actual = hashtable._hash('spam')
# assert actual >= 0
# assert actual < hashtable._size
assert 0 <= actual < hashtable._size
def test_same_hash():
hashtable = Hashtable()
initial = hashtable._hash('listen')
secondary = hashtable._hash('silent')
assert initial == secondary
def test_different_hash():
hashtable = Hashtable()
initial = hashtable._hash('glisten')
secondary = hashtable._hash('silent')
assert initial != secondary
def test_get_apple():
hashtable = Hashtable()
hashtable.set("apple", "Used for apple sauce")
actual = hashtable.get("apple")
expected = "Used for apple sauce"
assert actual == expected
def test_get_silent_and_listen():
hashtable = Hashtable()
hashtable.set('listen', 'to me')
hashtable.set('silent', 'so quiet')
assert hashtable.get('listen') == 'to me'
assert hashtable.get('silent') == 'so quiet'
```
#### File: code_challenges/pascals-triangle/test_pascal.py
```python
from pascals_triangle import printPascal
def test_zero():
actual = printPascal(0)
expected = "Not a valid input"
assert actual == expected
def test_one():
actual = printPascal(1)
expected = print(1)
assert actual == expected
# def test_two():
# actual = printPascal(2)
# expected = print(1), print("1 1")
# assert actual == expected
# def test_many():
# actual = printPascal(12)
# expected = 1
# assert actual == expected
```
#### File: stack_and_queue/stacks_and_queues/stacks_and_queues.py
```python
class InvalidOperationError(Exception):
pass
class Node:
def __init__(self, value, next_=None):
self.value = value
self.next = next_
class Stack:
def __init__(self):
self.top = None
def push(self, value):
node = Node(value)
# new node on top of stack gets next value of previous top stack node
node.next = self.top
# new node becomes top
self.top = node
def pop(self):
if self.top:
value = self.top.value
self.top = self.top.next
return value
raise InvalidOperationError("Method not allowed on empty collection")
def is_empty(self):
if self.top:
return False
return True
# (return not self.top) most pythonic
def peek(self):
if not self.top:
raise InvalidOperationError(
"Method not allowed on empty collection")
return self.top.value
class Queue:
def __init__(self, front=None, back=None):
self.front = front
self.back = back
def enqueue(self, value):
node = Node(value)
# is the list empty?
if not self.front:
self.front = node
self.back = node
return
# current back next value becomes new node
self.back.next = node
# the new back is the new node
self.back = node
def peek(self):
if not self.front:
raise InvalidOperationError(
"Method not allowed on empty collection")
return self.front.value
def is_empty(self):
# return boolean true as self.front is falsy
return not self.front
def dequeue(self):
if not self.front:
raise InvalidOperationError(
"Method not allowed on empty collection")
# save front node into a variable
target_node = self.front
# reassign front node to to old front.next
self.front = target_node.next
# return stored node
return target_node.value
```
#### File: code_challenges/tree-intersection/tree_intersection.py
```python
class Node:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
class BinaryTree:
def __init__(self):
self.root = None
def pre_order(self):
# return list of values ordered correctly
values = []
def traverse(root):
if not root:
return
values.append(root.value)
traverse(root.left)
traverse(root.right)
traverse(self.root)
print(values)
return values
def in_order(self):
values = []
def traverse(root):
if not root:
return
traverse(root.left)
values.append(root.value)
traverse(root.right)
traverse(self.root)
print(values)
return values
def post_order(self):
values = []
def traverse(root):
if not root:
return
traverse(root.left)
traverse(root.right)
# print(root.value)
values.append(root.value)
traverse(self.root)
print(values)
return values
class BinarySearchTree(BinaryTree):
def add(self, value):
node = Node(value)
# check if there is no root, if so make the new node root.
if not self.root:
self.root = node
return
def traverse(root):
if value < root.value:
# check if the node already has a left, if not add our new node
if not root.left:
root.left = node
else:
# keep traverseing
traverse(root.left)
else:
# check if the node already has a right, if not add our new node
if not root.right:
root.right = node
else:
# keep traverseing
traverse(root.right)
traverse(self.root)
def contains(self, value):
def search(root):
if value == root.value:
return True
if value < root.value:
if not root.left:
return False
else:
# keep searching onto the next node
search(root.left)
else:
if not root.right:
return False
else:
# keep searching onto the next node
search(root.right)
def tree_intersection(self, tree_two):
# array to hold the shared values
shared_values = []
# traverse the tree
def traversal(root):
if not root:
return
# use contains method to see if tree two has current value
if tree_two.contains(root.value):
# add value in common to array
shared_values.append(root.value)
# check left branch
traversal(root.left)
# check right branch
traversal(root.right)
traversal(self.root)
return shared_values
``` |
{
"source": "JohnsonWang0319/MystanCodeProjects",
"score": 4
} |
#### File: stanCode_Projects/boggle_game_solver/boggle.py
```python
import time
# This is the file name of the dictionary txt file
# we will be checking if a word exists by searching through it
FILE = 'dictionary.txt'
# Define the Boggle size
LEN_COL = 4
LEN_ROW = 4
# Global Variables
count = 0
boggle = []
now_word_lst = []
class TrieNode:
"""Node in the Trie Structure"""
def __init__(self, char: str):
# the character stored in the structure
self.char = char
# whether the end of a word or not
self.end_of_word = False
# indicate how many times a word is inserted
self.counter = 0
# a dictionary of child nodes
# keys are characters, values are nodes
self.children = {}
class Trie:
"""Trie Object"""
def __init__(self):
self.output_list = []
# the trie root note does not store any character
self.root = TrieNode('')
def insert(self, word):
node = self.root
for char in word:
if char in node.children:
node = node.children[char]
else:
# if a character is not found in trie, create a new node
new_node = TrieNode(char)
node.children[char] = new_node
node = new_node
# mark the end of a word
node.end_of_word = True
node.counter += 1
def find(self, node, prefix):
"""
:param node: TriNode, the node to start with
:param prefix: string, for tracing a word while traversing the trie structure
"""
if node.end_of_word:
self.output_list.append((prefix, node.counter))
for child_value in node.children.keys():
self.find(node.children[child_value], prefix + child_value)
def query(self, x: str) -> list:
"""
:param x: string, a input prefix
:return: list, retrieve all words stored in the trie with input prefix
"""
node = self.root
for char in x:
if char in node.children:
node = node.children[char]
else:
return []
self.find(node, x)
return sorted(self.output_list, key=lambda z: z[1], reverse=True)
def search(self, word: str):
"""
:param word: string, a complete word
:return: bool, whether this word in the trie structure or not
"""
node = self.root
for char in word:
if char not in node.children:
return False
node = node.children[char]
return node.end_of_word
def main():
"""
Use the Trie structure to find the matched word between dictionary and boggle matrix
"""
print(f'# of Column = {LEN_COL}.')
print(f'# of Row = {LEN_ROW}.')
start = time.time()
t = Trie()
build_boggle()
for row in range(LEN_ROW):
for column in range(LEN_COL):
dict_lst = read_dictionary()
for i in range(len(dict_lst)):
word = dict_lst[i]
if len(word) >= 4 and word[0] == boggle[row][column]:
t.insert(word)
current = t.root
dfs(row, column, [], current.children, '', row, column)
end = time.time()
print(f'There are {count} words in total.')
print('----------------------------------')
print(f'The speed of your boggle algorithm: {end - start} seconds.')
def dfs(row, column, visited, trie_children, now_word, original_row, original_column):
"""
:param row: int, the row (neighbors) that we need to update everytime when execute this function
:param column: int, the column (neighbors) that we need to update everytime when execute this function
:param visited: list, [(row, column)], the index that we have already visited while traverse the boggle
:param trie_children: TrieNode, the trie structure that we need to compare during traverse the boggle
:param now_word: str, update during the recursion, and if it is the end_of_word, we found answer
:param original_row: the row that we traverse in the first round, using this to pruning the trie structure
:param original_column: the column that we traverse in the first round, using this to pruning the trie structure
:return:
"""
global count
t = Trie()
dict_lst = read_dictionary()
if (row, column) in visited:
return
letter = boggle[row][column]
original_letter = boggle[original_row][original_column]
visited.append((row, column))
# when now_word length >= 3, start to generate trie structure of the dictionary
if len(now_word) >= 3:
for i in range(len(dict_lst)):
word = dict_lst[i]
if len(word) >= 4 and word[0] == original_letter:
t.insert(word)
# if the letter is in the trie_children, keep finding the next children.
if letter in trie_children and len(now_word) <= LEN_ROW + 2:
now_word += letter
current = trie_children[letter]
# Because it would be possible that there is the same word in different combination of boggle matrix
# Use a if to check whether there is the same matched word in the now_word_lst
if len(now_word) >= 4 and now_word not in now_word_lst:
# Use t.search to determine whether now_word is a complete word in trie structure or not
if t.search(now_word):
now_word_lst.append(now_word)
print(f'Found \"{now_word}\"')
count += 1
# Use recursive to achieve depth-first traversal (each neighbor)
neighbors = get_neighbors(row, column)
for n in neighbors:
dfs(n[0], n[1], visited[:], current.children, now_word, original_row, original_column)
def build_boggle():
"""
Ask user to input the boggle matrix, user can determine the matrix size by controlling the global variable LEN_ROW
and LEN_COL.
:return: list, [[X, X, X, X],[X, X, X, X],[X, X, X, X],[X, X, X, X]]
"""
count_input = 0
while count_input != LEN_ROW:
row = input(f'{count_input + 1} row of letters: ').split(' ')
if len(row) != LEN_COL:
print('Illegal input.')
exit()
else:
boggle.append(row)
count_input += 1
return boggle
def read_dictionary():
"""
This function reads file "dictionary.txt" stored in FILE
and appends words in each line into a Python list
"""
# dict_list = ['apple', 'banana', 'cat', 'dog' ......]
dict_list = []
with open(FILE, 'r') as f:
for line in f:
line = line.strip('\n')
dict_list.extend(line.split())
return dict_list
def get_neighbors(row, column):
"""
:param row: int, the row index
:param column: int, the column index
:return: list, all the neighbors of a specific index, [(),(),(),(),(),(),(),()]
"""
# Return the neighbors fo a given coordinates
neighbors = []
neighbors_index = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1)]
for neighbor in neighbors_index:
new_row = row + neighbor[0]
new_column = column + neighbor[1]
if new_row >= LEN_ROW or new_column >= LEN_COL or new_row < 0 or new_column < 0:
continue
neighbors.append((new_row, new_column))
return neighbors
if __name__ == '__main__':
main()
```
#### File: stanCode_Projects/hangman_game/similarity.py
```python
def main():
dna = input('Please give me a DNA sequence to search: ')
match = input('What DNA sequence would you like to match? ')
dna = dna.upper()
match = match.upper()
ans = find_match(dna, match)
print('The best match is ' + str(ans) + '.')
def find_match(dna, match):
'''
:param dna: str, the user will input the dna string.
:param match: str, the user will input the small facture of dna.
:return: str, the similar fracture in the long dna string.
'''
score_list = [] # Through for loop, we can compare the original dna
for i in range(len(dna) - len(match) + 1): # with the fracture one and give each a score!
score = 0
for j in range(len(match)):
if match[j] == dna[i + j]:
score += 1
score_list.append(score)
maximum = score_list[0] # Find the biggest score, which is the most similar
for num in score_list: # fracture of dna.
if maximum <= num:
maximum = num
location = score_list.index(maximum)
ans = dna[location:location+len(match)]
return ans
###### DO NOT EDIT CODE BELOW THIS LINE ######
if __name__ == '__main__':
main()
``` |
{
"source": "johnsonwj/spirit-island-generator",
"score": 3
} |
#### File: johnsonwj/spirit-island-generator/generator.py
```python
import random
class GameData:
def __init__(self, base_data):
self.spirits = base_data.get('spirits', dict())
self.blight_cards = base_data.get('blight-cards', 0)
self.scenarios = base_data.get('scenarios', dict())
self.adversaries = base_data.get('adversaries', dict())
self.expansions = []
def add_expansion(self, expansion_id, expansion_data):
self.expansions.append(expansion_id)
self.spirits.update(expansion_data.get('spirits', dict()))
self.blight_cards += expansion_data.get('blight-cards', 0)
self.scenarios.update(expansion_data.get('scenarios', dict()))
self.adversaries.update(expansion_data.get('adversaries', dict()))
def has_expansion(self, expansion_id):
return expansion_id in self.expansions
class GamePreferences:
def __init__(self, prefs, game_data):
self.players = prefs.get('players', [None])
self.balance_spirits = prefs.get('balance-spirits', True)
self.game_data = GameData(game_data['base'])
for expansion in prefs.get('expansions', []):
self.game_data.add_expansion(expansion, game_data[expansion])
self.thematic_map = prefs.get('thematic-map', False)
self.use_blight_card = prefs.get('blight-card', True) or self.game_data.has_expansion('branch-and-claw')
self.difficulty_level = prefs.get('difficulty-level', 2)
self.randomize_scenario = not (prefs.get('scenario') or prefs.get('scenario-disabled', False))
self.scenario = prefs.get('scenario')
self.randomize_adversary = not (prefs.get('adversary') or prefs.get('adversary-disabled', False))
self.adversary = prefs.get('adversary')
self.adversary_level = prefs.get('adversary-level', None)
def count_available_blight_cards(self):
if self.use_blight_card:
return self.game_data.blight_cards
return 0
def thematic_map_difficulty(self):
if not self.thematic_map:
return 0
if self.game_data.has_expansion('branch-and-claw'):
return 1
return 3
def generate_invader_deck():
return [random.sample(range(1, 5 + i), 3 + i) for i in range(3)]
def pick_boards(player_count, use_thematic_map):
if use_thematic_map:
return ['NW', 'NE', 'W', 'E'][:player_count]
return random.sample(['A', 'B', 'C', 'D'], player_count)
def pick_spirit(pref, available_spirits, power_balance):
if type(pref) == str:
return pref
if type(pref) == int:
choices = [spirit_id for spirit_id in available_spirits
if available_spirits[spirit_id]['complexity'] <= pref]
if type(pref) == type(None):
choices = list(available_spirits)
if power_balance:
for p in power_balance[::-1]:
choices = sorted(choices, key=lambda s: available_spirits[s]['powers'][p])
return choices[0]
return random.choice(choices)
class GameConfiguration:
def __init__(self, prefs):
self.prefs = prefs
self.spirits = prefs.players.copy()
self.boards = pick_boards(len(self.spirits), prefs.thematic_map)
self.blight_card = 'default'
self.scenario = None
self.adversary = None
self.adversary_level = None
self.invader_deck = generate_invader_deck()
def pick_blight_card(self):
n_blight_cards = self.prefs.count_available_blight_cards()
if n_blight_cards > 0:
self.blight_card = random.randint(1, n_blight_cards)
def locked_spirits(self):
return [s for s in self.spirits if type(s) == str]
def power_balance(self):
powers = dict()
for spirit_id in self.locked_spirits():
spirit_powers = self.prefs.game_data.spirits[spirit_id]['powers']
for p, v in spirit_powers.items():
powers[p] = powers.get(p, 0) + v
if powers:
return sorted(powers, key=powers.get)
return None
def pick_spirits(self):
available_spirits = self.prefs.game_data.spirits.copy()
print(self.spirits)
for i in range(len(self.spirits)):
current_balance = self.power_balance() if self.prefs.balance_spirits else None
spirit = pick_spirit(self.spirits[i], available_spirits, current_balance)
del available_spirits[spirit]
self.spirits[i] = spirit
def pick_adversary(self):
if not self.prefs.randomize_adversary:
self.adversary = self.prefs.adversary
self.adversary_level = self.prefs.adversary_level
return
max_adversary_difficulty = self.prefs.difficulty_level - self.difficulty_level()
all_adversary_handicaps = dict()
for spirit_id in self.locked_spirits():
spirit_handicaps = self.prefs.game_data.spirits[spirit_id].get('adversary-handicaps', dict())
for s, hc in spirit_handicaps.items():
all_adversary_handicaps[s] = all_adversary_handicaps.get(s, 0) + hc
effective_difficulties = list()
for aid, a in self.prefs.game_data.adversaries.items():
for level in range(len(a['difficulty'])):
base_difficulty = a['difficulty'][level]
handicap = all_adversary_handicaps.get(aid, 0)
effective_difficulties.append((aid, level, base_difficulty + handicap))
possible_adversaries = [(a, l) for (a, l, d) in effective_difficulties
if d <= max_adversary_difficulty]
if not possible_adversaries:
self.adversary = None
self.adversary_level = None
return
a, l = random.choice(possible_adversaries)
self.adversary = a
self.adversary_level = l
def pick_scenario(self):
if not self.prefs.randomize_scenario:
self.scenario = self.prefs.scenario
return
max_scenario_difficulty = self.prefs.difficulty_level - self.difficulty_level()
all_scenario_handicaps = dict()
for spirit_id in self.locked_spirits():
spirit_handicaps = self.prefs.game_data.spirits[spirit_id].get('scenario-handicaps', dict())
for s, hc in spirit_handicaps.items():
all_scenario_handicaps[s] = all_scenario_handicaps.get(s, 0) + hc
effective_difficulties = {sid: s['difficulty'] + all_scenario_handicaps.get(sid, 0)
for sid, s in self.prefs.game_data.scenarios.items()}
possible_scenarios = [s for s, d in effective_difficulties.items()
if d <= max_scenario_difficulty]
if not possible_scenarios:
self.scenario = None
return
self.scenario = random.choice(possible_scenarios)
def difficulty_level(self):
difficulty = 0
if self.adversary:
difficulty += self.prefs.game_data.adversaries[self.adversary]['difficulty'][self.adversary_level]
if self.scenario:
scenario = self.prefs.game_data.scenarios[self.scenario]
difficulty += scenario['difficulty']
if self.adversary:
difficulty += scenario.get('adversary-handicaps', dict()).get(self.adversary, 0)
for spirit_id in self.locked_spirits():
spirit = self.prefs.game_data.spirits[spirit_id]
if self.adversary:
difficulty += spirit.get('adversary-handicaps', dict()).get(self.adversary, 0)
if self.scenario:
difficulty += spirit.get('scenario-handicaps', dict()).get(self.scenario, 0)
difficulty += self.prefs.thematic_map_difficulty()
return difficulty
def dump(self):
return {
'spirits': self.spirits,
'boards': self.boards,
'invader-deck': self.invader_deck,
'blight-card': self.blight_card,
'scenario': self.scenario,
'adversary': self.adversary,
'adversary-level': self.adversary_level,
'thematic-map': self.prefs.thematic_map
}
def generate_game(prefs):
game = GameConfiguration(prefs)
game.pick_blight_card()
game.pick_spirits()
game.pick_adversary()
game.pick_scenario()
return game
```
#### File: johnsonwj/spirit-island-generator/main.py
```python
import generator
import yaml
import json
with open('data.yaml') as data_file:
game_data = yaml.safe_load(data_file)
def handle(request):
prefs = generator.GamePreferences(request.get_json(), game_data)
config = generator.generate_game(prefs)
return json.dumps(config.dump(), indent=2)
``` |
{
"source": "Johnson-yue/lffont",
"score": 3
} |
#### File: models/modules/frn.py
```python
from functools import partial
import torch
import torch.nn as nn
class TLU(nn.Module):
""" Thresholded Linear Unit """
def __init__(self, num_features):
super().__init__()
self.num_features = num_features
self.tau = nn.Parameter(torch.zeros(1, num_features, 1, 1))
def forward(self, x):
return torch.max(x, self.tau)
def extra_repr(self):
return 'num_features={}'.format(self.num_features)
# NOTE generalized version
class FilterResponseNorm(nn.Module):
""" Filter Response Normalization """
def __init__(self, num_features, ndim, eps=None, learnable_eps=False):
"""
Args:
num_features
ndim
eps: if None is given, use the paper value as default.
from paper, fixed_eps=1e-6 and learnable_eps_init=1e-4.
learnable_eps: turn eps to learnable parameter, which is recommended on
fully-connected or 1x1 activation map.
"""
super().__init__()
if eps is None:
if learnable_eps:
eps = 1e-4
else:
eps = 1e-6
self.num_features = num_features
self.init_eps = eps
self.learnable_eps = learnable_eps
self.ndim = ndim
self.mean_dims = list(range(2, 2+ndim))
self.weight = nn.Parameter(torch.ones([1, num_features] + [1]*ndim))
self.bias = nn.Parameter(torch.zeros([1, num_features] + [1]*ndim))
if learnable_eps:
self.eps = nn.Parameter(torch.as_tensor(eps))
else:
self.register_buffer('eps', torch.as_tensor(eps))
def forward(self, x):
# normalize
nu2 = x.pow(2).mean(self.mean_dims, keepdim=True)
x = x * torch.rsqrt(nu2 + self.eps.abs())
# modulation
x = x * self.weight + self.bias
return x
def extra_repr(self):
return 'num_features={}, init_eps={}, ndim={}'.format(
self.num_features, self.init_eps, self.ndim)
FilterResponseNorm1d = partial(FilterResponseNorm, ndim=1, learnable_eps=True)
FilterResponseNorm2d = partial(FilterResponseNorm, ndim=2)
```
#### File: lffont/trainer/factorize_trainer.py
```python
import torch
import torch.nn as nn
from .base_trainer import BaseTrainer
from datasets import cyclize
import utils
import copy
from itertools import chain
class FactorizeTrainer(BaseTrainer):
def __init__(self, gen, disc, g_optim, d_optim, aux_clf, ac_optim,
writer, logger, evaluator, cv_loaders, cfg):
super().__init__(gen, disc, g_optim, d_optim, aux_clf, ac_optim,
writer, logger, evaluator, cv_loaders, cfg)
self.frozen_emb_style = copy.deepcopy(self.gen.emb_style)
self.frozen_emb_comp = copy.deepcopy(self.gen.emb_comp)
utils.freeze(self.frozen_emb_style)
utils.freeze(self.frozen_emb_comp)
def sync_g_ema(self, in_style_ids, in_comp_ids, in_imgs, trg_style_ids, trg_comp_ids,
content_imgs):
org_train_mode = self.gen_ema.training
with torch.no_grad():
self.gen_ema.train()
self.gen_ema.encode_write_fact(in_style_ids, in_comp_ids, in_imgs)
self.gen_ema.read_decode(trg_style_ids, trg_comp_ids, content_imgs=content_imgs,
phase="fact")
self.gen_ema.train(org_train_mode)
def train(self, loader, st_step=1, max_step=100000):
self.gen.train()
self.disc.train()
losses = utils.AverageMeters("g_total", "pixel", "disc", "gen", "fm",
"ac", "ac_gen", "dec_const")
discs = utils.AverageMeters("real_font", "real_uni", "fake_font", "fake_uni",
"real_font_acc", "real_uni_acc",
"fake_font_acc", "fake_uni_acc")
# etc stats
stats = utils.AverageMeters("B_style", "B_target", "ac_acc", "ac_gen_acc")
self.step = st_step
self.clear_losses()
self.logger.info("Start training ...")
for (in_style_ids, in_comp_ids, in_imgs,
trg_style_ids, trg_uni_ids, trg_comp_ids, trg_imgs, content_imgs) in cyclize(loader):
epoch = self.step // len(loader)
if self.cfg.use_ddp and (self.step % len(loader)) == 0:
loader.sampler.set_epoch(epoch)
B = trg_imgs.size(0)
stats.updates({
"B_style": in_imgs.size(0),
"B_target": B
})
in_style_ids = in_style_ids.cuda()
in_comp_ids = in_comp_ids.cuda()
in_imgs = in_imgs.cuda()
trg_style_ids = trg_style_ids.cuda()
trg_imgs = trg_imgs.cuda()
content_imgs = content_imgs.cuda()
if self.cfg.use_half:
in_imgs = in_imgs.half()
content_imgs = content_imgs.half()
feat_styles, feat_comps = self.gen.encode_write_fact(
in_style_ids, in_comp_ids, in_imgs, write_comb=True
)
feats_rc = (feat_styles * feat_comps).sum(1)
ac_feats = feats_rc
self.add_dec_const_loss()
out = self.gen.read_decode(
trg_style_ids, trg_comp_ids, content_imgs=content_imgs, phase="fact", try_comb=True
)
trg_uni_disc_ids = trg_uni_ids.cuda()
real_font, real_uni, *real_feats = self.disc(
trg_imgs, trg_style_ids, trg_uni_disc_ids, out_feats=self.cfg['fm_layers']
)
fake_font, fake_uni = self.disc(out.detach(), trg_style_ids, trg_uni_disc_ids)
self.add_gan_d_loss(real_font, real_uni, fake_font, fake_uni)
self.d_optim.zero_grad()
self.d_backward()
self.d_optim.step()
fake_font, fake_uni, *fake_feats = self.disc(
out, trg_style_ids, trg_uni_disc_ids, out_feats=self.cfg['fm_layers']
)
self.add_gan_g_loss(real_font, real_uni, fake_font, fake_uni)
self.add_fm_loss(real_feats, fake_feats)
def racc(x):
return (x > 0.).float().mean().item()
def facc(x):
return (x < 0.).float().mean().item()
discs.updates({
"real_font": real_font.mean().item(),
"real_uni": real_uni.mean().item(),
"fake_font": fake_font.mean().item(),
"fake_uni": fake_uni.mean().item(),
'real_font_acc': racc(real_font),
'real_uni_acc': racc(real_uni),
'fake_font_acc': facc(fake_font),
'fake_uni_acc': facc(fake_uni)
}, B)
self.add_pixel_loss(out, trg_imgs)
self.g_optim.zero_grad()
if self.aux_clf is not None:
self.add_ac_losses_and_update_stats(
ac_feats, in_comp_ids, out, trg_comp_ids, stats
)
self.ac_optim.zero_grad()
self.ac_backward()
self.ac_optim.step()
self.g_backward()
self.g_optim.step()
loss_dic = self.clear_losses()
losses.updates(loss_dic, B) # accum loss stats
self.accum_g()
if self.is_bn_gen:
self.sync_g_ema(in_style_ids, in_comp_ids, in_imgs, trg_style_ids, trg_comp_ids,
content_imgs=content_imgs)
torch.cuda.synchronize()
if self.cfg.gpu <= 0:
if self.step % self.cfg['tb_freq'] == 0:
self.baseplot(losses, discs, stats)
self.plot(losses)
if self.step % self.cfg['print_freq'] == 0:
self.log(losses, discs, stats)
self.logger.debug("GPU Memory usage: max mem_alloc = %.1fM / %.1fM",
torch.cuda.max_memory_allocated() / 1000 / 1000,
torch.cuda.max_memory_cached() / 1000 / 1000)
losses.resets()
discs.resets()
stats.resets()
if self.step % self.cfg['val_freq'] == 0:
epoch = self.step / len(loader)
self.logger.info("Validation at Epoch = {:.3f}".format(epoch))
if not self.is_bn_gen:
self.sync_g_ema(in_style_ids, in_comp_ids, in_imgs, trg_style_ids, trg_comp_ids,
content_imgs=content_imgs)
self.evaluator.cp_validation(self.gen_ema, self.cv_loaders, self.step, phase="fact",
ext_tag="factorize")
self.save(loss_dic['g_total'], self.cfg['save'], self.cfg.get('save_freq', self.cfg['val_freq']))
else:
pass
if self.step >= max_step:
break
self.step += 1
self.logger.info("Iteration finished.")
def add_dec_const_loss(self):
loss = self.gen.get_fact_memory_var()
self.g_losses['dec_const'] = loss * self.cfg["dec_const_w"]
return loss
def add_ac_losses_and_update_stats(self, in_sc_feats, in_comp_ids, generated, trg_comp_ids, stats):
aux_feats, loss, acc = self.infer_ac(in_sc_feats, in_comp_ids)
self.ac_losses['ac'] = loss * self.cfg['ac_w']
stats.ac_acc.update(acc, in_comp_ids.numel())
enc = self.frozen_enc
enc.load_state_dict(self.gen.component_encoder.state_dict())
emb_style = self.frozen_emb_style
emb_comp = self.frozen_emb_comp
emb_style.load_state_dict(self.gen.emb_style.state_dict())
emb_comp.load_state_dict(self.gen.emb_comp.state_dict())
trg_comp_lens = torch.LongTensor([*map(len, trg_comp_ids)]).cuda()
trg_comp_ids = torch.LongTensor([*chain(*trg_comp_ids)]).cuda()
generated = generated.repeat_interleave(trg_comp_lens, dim=0)
feats = enc(generated, trg_comp_ids)
gen_feats = feats["last"]
gen_emb_style = emb_style(gen_feats.unsqueeze(1))
gen_emb_comp = emb_comp(gen_feats.unsqueeze(1))
gen_recon = (gen_emb_style * gen_emb_comp).sum(1)
aux_gen_feats, loss, acc = self.infer_ac(gen_recon, trg_comp_ids)
stats.ac_gen_acc.update(acc, trg_comp_ids.numel())
if self.cfg['ac_gen_w'] > 0.:
self.ac_losses['ac_gen'] = loss * self.cfg['ac_gen_w']
def plot(self, losses):
tag_scalar_dic = {
"train/dec_const_loss": losses.dec_const.val
}
self.writer.add_scalars(tag_scalar_dic, self.step)
def log(self, losses, discs, stats):
self.logger.info(
" Step {step:7d}: L1 {L.pixel.avg:7.4f} D {L.disc.avg:7.3f} G {L.gen.avg:7.3f}"
" Dec_Const {L.dec_const.avg:3.3f} FM {L.fm.avg:7.3f} AC {S.ac_acc.avg:5.1%}"
" R_font {D.real_font_acc.avg:7.3f} F_font {D.fake_font_acc.avg:7.3f}"
" R_uni {D.real_uni_acc.avg:7.3f} F_uni {D.fake_uni_acc.avg:7.3f}"
" B_stl {S.B_style.avg:5.1f} B_trg {S.B_target.avg:5.1f}"
.format(step=self.step, L=losses, D=discs, S=stats))
``` |
{
"source": "Johnson-yue/mmsr",
"score": 2
} |
#### File: models/archs/DUF_arch.py
```python
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def adapt_official(Rx, scale=4):
'''Adapt the weights translated from the official tensorflow weights
Not necessary if you are training from scratch'''
x = Rx.clone()
x1 = x[:, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, :, :]
x2 = x[:, fc00:db20:35b:7399::5, :, :]
x3 = x[:, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, :, :]
Rx[:, :scale**2, :, :] = x1
Rx[:, scale**2:2 * (scale**2), :, :] = x2
Rx[:, 2 * (scale**2):, :, :] = x3
return Rx
class DenseBlock(nn.Module):
'''Dense block
for the second denseblock, t_reduced = True'''
def __init__(self, nf=64, ng=32, t_reduce=False):
super(DenseBlock, self).__init__()
self.t_reduce = t_reduce
if self.t_reduce:
pad = (0, 1, 1)
else:
pad = (1, 1, 1)
self.bn3d_1 = nn.BatchNorm3d(nf, eps=1e-3, momentum=1e-3)
self.conv3d_1 = nn.Conv3d(nf, nf, (1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0), bias=True)
self.bn3d_2 = nn.BatchNorm3d(nf, eps=1e-3, momentum=1e-3)
self.conv3d_2 = nn.Conv3d(nf, ng, (3, 3, 3), stride=(1, 1, 1), padding=pad, bias=True)
self.bn3d_3 = nn.BatchNorm3d(nf + ng, eps=1e-3, momentum=1e-3)
self.conv3d_3 = nn.Conv3d(nf + ng, nf + ng, (1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0),
bias=True)
self.bn3d_4 = nn.BatchNorm3d(nf + ng, eps=1e-3, momentum=1e-3)
self.conv3d_4 = nn.Conv3d(nf + ng, ng, (3, 3, 3), stride=(1, 1, 1), padding=pad, bias=True)
self.bn3d_5 = nn.BatchNorm3d(nf + 2 * ng, eps=1e-3, momentum=1e-3)
self.conv3d_5 = nn.Conv3d(nf + 2 * ng, nf + 2 * ng, (1, 1, 1), stride=(1, 1, 1),
padding=(0, 0, 0), bias=True)
self.bn3d_6 = nn.BatchNorm3d(nf + 2 * ng, eps=1e-3, momentum=1e-3)
self.conv3d_6 = nn.Conv3d(nf + 2 * ng, ng, (3, 3, 3), stride=(1, 1, 1), padding=pad,
bias=True)
def forward(self, x):
'''x: [B, C, T, H, W]
C: nf -> nf + 3 * ng
T: 1) 7 -> 7 (t_reduce=False);
2) 7 -> 7 - 2 * 3 = 1 (t_reduce=True)'''
x1 = self.conv3d_1(F.relu(self.bn3d_1(x), inplace=True))
x1 = self.conv3d_2(F.relu(self.bn3d_2(x1), inplace=True))
if self.t_reduce:
x1 = torch.cat((x[:, :, 1:-1, :, :], x1), 1)
else:
x1 = torch.cat((x, x1), 1)
x2 = self.conv3d_3(F.relu(self.bn3d_3(x1), inplace=True))
x2 = self.conv3d_4(F.relu(self.bn3d_4(x2), inplace=True))
if self.t_reduce:
x2 = torch.cat((x1[:, :, 1:-1, :, :], x2), 1)
else:
x2 = torch.cat((x1, x2), 1)
x3 = self.conv3d_5(F.relu(self.bn3d_5(x2), inplace=True))
x3 = self.conv3d_6(F.relu(self.bn3d_6(x3), inplace=True))
if self.t_reduce:
x3 = torch.cat((x2[:, :, 1:-1, :, :], x3), 1)
else:
x3 = torch.cat((x2, x3), 1)
return x3
class DynamicUpsamplingFilter_3C(nn.Module):
'''dynamic upsampling filter with 3 channels applying the same filters
filter_size: filter size of the generated filters, shape (C, kH, kW)'''
def __init__(self, filter_size=(1, 5, 5)):
super(DynamicUpsamplingFilter_3C, self).__init__()
# generate a local expansion filter, used similar to im2col
nF = np.prod(filter_size)
expand_filter_np = np.reshape(np.eye(nF, nF),
(nF, filter_size[0], filter_size[1], filter_size[2]))
expand_filter = torch.from_numpy(expand_filter_np).float()
self.expand_filter = torch.cat((expand_filter, expand_filter, expand_filter),
0) # [75, 1, 5, 5]
def forward(self, x, filters):
'''x: input image, [B, 3, H, W]
filters: generate dynamic filters, [B, F, R, H, W], e.g., [B, 25, 16, H, W]
F: prod of filter kernel size, e.g., 5*5 = 25
R: used for upsampling, similar to pixel shuffle, e.g., 4*4 = 16 for x4
Return: filtered image, [B, 3*R, H, W]
'''
B, nF, R, H, W = filters.size()
# using group convolution
input_expand = F.conv2d(x, self.expand_filter.type_as(x), padding=2,
groups=3) # [B, 75, H, W] similar to im2col
input_expand = input_expand.view(B, 3, nF, H, W).permute(0, 3, 4, 1, 2) # [B, H, W, 3, 25]
filters = filters.permute(0, 3, 4, 1, 2) # [B, H, W, 25, 16]
out = torch.matmul(input_expand, filters) # [B, H, W, 3, 16]
return out.permute(0, 3, 4, 1, 2).view(B, 3 * R, H, W) # [B, 3*16, H, W]
class DUF_16L(nn.Module):
'''Official DUF structure with 16 layers'''
def __init__(self, scale=4, adapt_official=False):
super(DUF_16L, self).__init__()
self.conv3d_1 = nn.Conv3d(3, 64, (1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=True)
self.dense_block_1 = DenseBlock(64, 64 // 2, t_reduce=False) # 64 + 32 * 3 = 160, T = 7
self.dense_block_2 = DenseBlock(160, 64 // 2, t_reduce=True) # 160 + 32 * 3 = 256, T = 1
self.bn3d_2 = nn.BatchNorm3d(256, eps=1e-3, momentum=1e-3)
self.conv3d_2 = nn.Conv3d(256, 256, (1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1),
bias=True)
self.conv3d_r1 = nn.Conv3d(256, 256, (1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0),
bias=True)
self.conv3d_r2 = nn.Conv3d(256, 3 * (scale**2), (1, 1, 1), stride=(1, 1, 1),
padding=(0, 0, 0), bias=True)
self.conv3d_f1 = nn.Conv3d(256, 512, (1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0),
bias=True)
self.conv3d_f2 = nn.Conv3d(512, 1 * 5 * 5 * (scale**2), (1, 1, 1), stride=(1, 1, 1),
padding=(0, 0, 0), bias=True)
self.dynamic_filter = DynamicUpsamplingFilter_3C((1, 5, 5))
self.scale = scale
self.adapt_official = adapt_official
def forward(self, x):
'''
x: [B, T, C, H, W], T = 7. reshape to [B, C, T, H, W] for Conv3D
Generate filters and image residual:
Fx: [B, 25, 16, H, W] for DynamicUpsamplingFilter_3C
Rx: [B, 3*16, 1, H, W]
'''
B, T, C, H, W = x.size()
x = x.permute(0, 2, 1, 3, 4) # [B, C, T, H, W] for Conv3D
x_center = x[:, :, T // 2, :, :]
x = self.conv3d_1(x)
x = self.dense_block_1(x)
x = self.dense_block_2(x) # reduce T to 1
x = F.relu(self.conv3d_2(F.relu(self.bn3d_2(x), inplace=True)), inplace=True)
# image residual
Rx = self.conv3d_r2(F.relu(self.conv3d_r1(x), inplace=True)) # [B, 3*16, 1, H, W]
# filter
Fx = self.conv3d_f2(F.relu(self.conv3d_f1(x), inplace=True)) # [B, 25*16, 1, H, W]
Fx = F.softmax(Fx.view(B, 25, self.scale**2, H, W), dim=1)
# Adapt to official model weights
if self.adapt_official:
adapt_official(Rx, scale=self.scale)
# dynamic filter
out = self.dynamic_filter(x_center, Fx) # [B, 3*R, H, W]
out += Rx.squeeze_(2)
out = F.pixel_shuffle(out, self.scale) # [B, 3, H, W]
return out
class DenseBlock_28L(nn.Module):
'''The first part of the dense blocks used in DUF_28L
Temporal dimension remains the same here'''
def __init__(self, nf=64, ng=16):
super(DenseBlock_28L, self).__init__()
pad = (1, 1, 1)
dense_block_l = []
for i in range(0, 9):
dense_block_l.append(nn.BatchNorm3d(nf + i * ng, eps=1e-3, momentum=1e-3))
dense_block_l.append(nn.ReLU())
dense_block_l.append(
nn.Conv3d(nf + i * ng, nf + i * ng, (1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0),
bias=True))
dense_block_l.append(nn.BatchNorm3d(nf + i * ng, eps=1e-3, momentum=1e-3))
dense_block_l.append(nn.ReLU())
dense_block_l.append(
nn.Conv3d(nf + i * ng, ng, (3, 3, 3), stride=(1, 1, 1), padding=pad, bias=True))
self.dense_blocks = nn.ModuleList(dense_block_l)
def forward(self, x):
'''x: [B, C, T, H, W]
C: 1) 64 -> 208;
T: 1) 7 -> 7; (t_reduce=True)'''
for i in range(0, len(self.dense_blocks), 6):
y = x
for j in range(6):
y = self.dense_blocks[i + j](y)
x = torch.cat((x, y), 1)
return x
class DUF_28L(nn.Module):
'''Official DUF structure with 28 layers'''
def __init__(self, scale=4, adapt_official=False):
super(DUF_28L, self).__init__()
self.conv3d_1 = nn.Conv3d(3, 64, (1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=True)
self.dense_block_1 = DenseBlock_28L(64, 16) # 64 + 16 * 9 = 208, T = 7
self.dense_block_2 = DenseBlock(208, 16, t_reduce=True) # 208 + 16 * 3 = 256, T = 1
self.bn3d_2 = nn.BatchNorm3d(256, eps=1e-3, momentum=1e-3)
self.conv3d_2 = nn.Conv3d(256, 256, (1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1),
bias=True)
self.conv3d_r1 = nn.Conv3d(256, 256, (1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0),
bias=True)
self.conv3d_r2 = nn.Conv3d(256, 3 * (scale**2), (1, 1, 1), stride=(1, 1, 1),
padding=(0, 0, 0), bias=True)
self.conv3d_f1 = nn.Conv3d(256, 512, (1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0),
bias=True)
self.conv3d_f2 = nn.Conv3d(512, 1 * 5 * 5 * (scale**2), (1, 1, 1), stride=(1, 1, 1),
padding=(0, 0, 0), bias=True)
self.dynamic_filter = DynamicUpsamplingFilter_3C((1, 5, 5))
self.scale = scale
self.adapt_official = adapt_official
def forward(self, x):
'''
x: [B, T, C, H, W], T = 7. reshape to [B, C, T, H, W] for Conv3D
Generate filters and image residual:
Fx: [B, 25, 16, H, W] for DynamicUpsamplingFilter_3C
Rx: [B, 3*16, 1, H, W]
'''
B, T, C, H, W = x.size()
x = x.permute(0, 2, 1, 3, 4) # [B,C,T,H,W] for Conv3D
x_center = x[:, :, T // 2, :, :]
x = self.conv3d_1(x)
x = self.dense_block_1(x)
x = self.dense_block_2(x) # reduce T to 1
x = F.relu(self.conv3d_2(F.relu(self.bn3d_2(x), inplace=True)), inplace=True)
# image residual
Rx = self.conv3d_r2(F.relu(self.conv3d_r1(x), inplace=True)) # [B, 3*16, 1, H, W]
# filter
Fx = self.conv3d_f2(F.relu(self.conv3d_f1(x), inplace=True)) # [B, 25*16, 1, H, W]
Fx = F.softmax(Fx.view(B, 25, self.scale**2, H, W), dim=1)
# Adapt to official model weights
if self.adapt_official:
adapt_official(Rx, scale=self.scale)
# dynamic filter
out = self.dynamic_filter(x_center, Fx) # [B, 3*R, H, W]
out += Rx.squeeze_(2)
out = F.pixel_shuffle(out, self.scale) # [B, 3, H, W]
return out
class DenseBlock_52L(nn.Module):
'''The first part of the dense blocks used in DUF_52L
Temporal dimension remains the same here'''
def __init__(self, nf=64, ng=16):
super(DenseBlock_52L, self).__init__()
pad = (1, 1, 1)
dense_block_l = []
for i in range(0, 21):
dense_block_l.append(nn.BatchNorm3d(nf + i * ng, eps=1e-3, momentum=1e-3))
dense_block_l.append(nn.ReLU())
dense_block_l.append(
nn.Conv3d(nf + i * ng, nf + i * ng, (1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0),
bias=True))
dense_block_l.append(nn.BatchNorm3d(nf + i * ng, eps=1e-3, momentum=1e-3))
dense_block_l.append(nn.ReLU())
dense_block_l.append(
nn.Conv3d(nf + i * ng, ng, (3, 3, 3), stride=(1, 1, 1), padding=pad, bias=True))
self.dense_blocks = nn.ModuleList(dense_block_l)
def forward(self, x):
'''x: [B, C, T, H, W]
C: 1) 64 -> 400;
T: 1) 7 -> 7; (t_reduce=True)'''
for i in range(0, len(self.dense_blocks), 6):
y = x
for j in range(6):
y = self.dense_blocks[i + j](y)
x = torch.cat((x, y), 1)
return x
class DUF_52L(nn.Module):
'''Official DUF structure with 52 layers'''
def __init__(self, scale=4, adapt_official=False):
super(DUF_52L, self).__init__()
self.conv3d_1 = nn.Conv3d(3, 64, (1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=True)
self.dense_block_1 = DenseBlock_52L(64, 16) # 64 + 21 * 9 = 400, T = 7
self.dense_block_2 = DenseBlock(400, 16, t_reduce=True) # 400 + 16 * 3 = 448, T = 1
self.bn3d_2 = nn.BatchNorm3d(448, eps=1e-3, momentum=1e-3)
self.conv3d_2 = nn.Conv3d(448, 256, (1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1),
bias=True)
self.conv3d_r1 = nn.Conv3d(256, 256, (1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0),
bias=True)
self.conv3d_r2 = nn.Conv3d(256, 3 * (scale**2), (1, 1, 1), stride=(1, 1, 1),
padding=(0, 0, 0), bias=True)
self.conv3d_f1 = nn.Conv3d(256, 512, (1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0),
bias=True)
self.conv3d_f2 = nn.Conv3d(512, 1 * 5 * 5 * (scale**2), (1, 1, 1), stride=(1, 1, 1),
padding=(0, 0, 0), bias=True)
self.dynamic_filter = DynamicUpsamplingFilter_3C((1, 5, 5))
self.scale = scale
self.adapt_official = adapt_official
def forward(self, x):
'''
x: [B, T, C, H, W], T = 7. reshape to [B, C, T, H, W] for Conv3D
Generate filters and image residual:
Fx: [B, 25, 16, H, W] for DynamicUpsamplingFilter_3C
Rx: [B, 3*16, 1, H, W]
'''
B, T, C, H, W = x.size()
x = x.permute(0, 2, 1, 3, 4) # [B,C,T,H,W] for Conv3D
x_center = x[:, :, T // 2, :, :]
x = self.conv3d_1(x)
x = self.dense_block_1(x)
x = self.dense_block_2(x)
x = F.relu(self.conv3d_2(F.relu(self.bn3d_2(x), inplace=True)), inplace=True)
# image residual
Rx = self.conv3d_r2(F.relu(self.conv3d_r1(x), inplace=True)) # [B, 3*16, 1, H, W]
# filter
Fx = self.conv3d_f2(F.relu(self.conv3d_f1(x), inplace=True)) # [B, 25*16, 1, H, W]
Fx = F.softmax(Fx.view(B, 25, self.scale**2, H, W), dim=1)
# Adapt to official model weights
if self.adapt_official:
adapt_official(Rx, scale=self.scale)
# dynamic filter
out = self.dynamic_filter(x_center, Fx) # [B, 3*R, H, W]
out += Rx.squeeze_(2)
out = F.pixel_shuffle(out, self.scale) # [B, 3, H, W]
return out
```
#### File: codes/models/base_model.py
```python
import os
from collections import OrderedDict
import torch
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel
class BaseModel():
def __init__(self, opt):
self.opt = opt
self.device = torch.device('cuda' if opt['gpu_ids'] is not None else 'cpu')
self.is_train = opt['is_train']
self.schedulers = []
self.optimizers = []
def feed_data(self, data):
pass
def optimize_parameters(self):
pass
def get_current_visuals(self):
pass
def get_current_losses(self):
pass
def print_network(self):
pass
def save(self, label):
pass
def load(self):
pass
def _set_lr(self, lr_groups_l):
"""Set learning rate for warmup
lr_groups_l: list for lr_groups. each for a optimizer"""
for optimizer, lr_groups in zip(self.optimizers, lr_groups_l):
for param_group, lr in zip(optimizer.param_groups, lr_groups):
param_group['lr'] = lr
def _get_init_lr(self):
"""Get the initial lr, which is set by the scheduler"""
init_lr_groups_l = []
for optimizer in self.optimizers:
init_lr_groups_l.append([v['initial_lr'] for v in optimizer.param_groups])
return init_lr_groups_l
def update_learning_rate(self, cur_iter, warmup_iter=-1):
for scheduler in self.schedulers:
scheduler.step()
# set up warm-up learning rate
if cur_iter < warmup_iter:
# get initial lr for each group
init_lr_g_l = self._get_init_lr()
# modify warming-up learning rates
warm_up_lr_l = []
for init_lr_g in init_lr_g_l:
warm_up_lr_l.append([v / warmup_iter * cur_iter for v in init_lr_g])
# set learning rate
self._set_lr(warm_up_lr_l)
def get_current_learning_rate(self):
return [param_group['lr'] for param_group in self.optimizers[0].param_groups]
def get_network_description(self, network):
"""Get the string and total parameters of the network"""
if isinstance(network, nn.DataParallel) or isinstance(network, DistributedDataParallel):
network = network.module
return str(network), sum(map(lambda x: x.numel(), network.parameters()))
def save_network(self, network, network_label, iter_label):
save_filename = '{}_{}.pth'.format(iter_label, network_label)
save_path = os.path.join(self.opt['path']['models'], save_filename)
if isinstance(network, nn.DataParallel) or isinstance(network, DistributedDataParallel):
network = network.module
state_dict = network.state_dict()
for key, param in state_dict.items():
state_dict[key] = param.cpu()
torch.save(state_dict, save_path)
def load_network(self, load_path, network, strict=True):
if isinstance(network, nn.DataParallel) or isinstance(network, DistributedDataParallel):
network = network.module
load_net = torch.load(load_path)
load_net_clean = OrderedDict() # remove unnecessary 'module.'
for k, v in load_net.items():
if k.startswith('module.'):
load_net_clean[k[7:]] = v
else:
load_net_clean[k] = v
network.load_state_dict(load_net_clean, strict=strict)
def save_training_state(self, epoch, iter_step):
"""Save training state during training, which will be used for resuming"""
state = {'epoch': epoch, 'iter': iter_step, 'schedulers': [], 'optimizers': []}
for s in self.schedulers:
state['schedulers'].append(s.state_dict())
for o in self.optimizers:
state['optimizers'].append(o.state_dict())
save_filename = '{}.state'.format(iter_step)
save_path = os.path.join(self.opt['path']['training_state'], save_filename)
torch.save(state, save_path)
def resume_training(self, resume_state):
"""Resume the optimizers and schedulers for training"""
resume_optimizers = resume_state['optimizers']
resume_schedulers = resume_state['schedulers']
assert len(resume_optimizers) == len(self.optimizers), 'Wrong lengths of optimizers'
assert len(resume_schedulers) == len(self.schedulers), 'Wrong lengths of schedulers'
for i, o in enumerate(resume_optimizers):
self.optimizers[i].load_state_dict(o)
for i, s in enumerate(resume_schedulers):
self.schedulers[i].load_state_dict(s)
```
#### File: mmsr/codes/test_Vid4_REDS4_with_GT.py
```python
import os
import os.path as osp
import glob
import logging
import numpy as np
import cv2
import torch
import utils.util as util
import data.util as data_util
import models.archs.EDVR_arch as EDVR_arch
def main():
#################
# configurations
#################
device = torch.device('cuda')
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
data_mode = 'Vid4' # Vid4 | sharp_bicubic | blur_bicubic | blur | blur_comp
# Vid4: SR
# REDS4: sharp_bicubic (SR-clean), blur_bicubic (SR-blur);
# blur (deblur-clean), blur_comp (deblur-compression).
stage = 1 # 1 or 2, use two stage strategy for REDS dataset.
flip_test = False
############################################################################
#### model
if data_mode == 'Vid4':
if stage == 1:
model_path = '../experiments/pretrained_models/EDVR_Vimeo90K_SR_L.pth'
else:
raise ValueError('Vid4 does not support stage 2.')
elif data_mode == 'sharp_bicubic':
if stage == 1:
model_path = '../experiments/pretrained_models/EDVR_REDS_SR_L.pth'
else:
model_path = '../experiments/pretrained_models/EDVR_REDS_SR_Stage2.pth'
elif data_mode == 'blur_bicubic':
if stage == 1:
model_path = '../experiments/pretrained_models/EDVR_REDS_SRblur_L.pth'
else:
model_path = '../experiments/pretrained_models/EDVR_REDS_SRblur_Stage2.pth'
elif data_mode == 'blur':
if stage == 1:
model_path = '../experiments/pretrained_models/EDVR_REDS_deblur_L.pth'
else:
model_path = '../experiments/pretrained_models/EDVR_REDS_deblur_Stage2.pth'
elif data_mode == 'blur_comp':
if stage == 1:
model_path = '../experiments/pretrained_models/EDVR_REDS_deblurcomp_L.pth'
else:
model_path = '../experiments/pretrained_models/EDVR_REDS_deblurcomp_Stage2.pth'
else:
raise NotImplementedError
if data_mode == 'Vid4':
N_in = 7 # use N_in images to restore one HR image
else:
N_in = 5
predeblur, HR_in = False, False
back_RBs = 40
if data_mode == 'blur_bicubic':
predeblur = True
if data_mode == 'blur' or data_mode == 'blur_comp':
predeblur, HR_in = True, True
if stage == 2:
HR_in = True
back_RBs = 20
model = EDVR_arch.EDVR(128, N_in, 8, 5, back_RBs, predeblur=predeblur, HR_in=HR_in)
#### dataset
if data_mode == 'Vid4':
test_dataset_folder = '../datasets/Vid4/BIx4'
GT_dataset_folder = '../datasets/Vid4/GT'
else:
if stage == 1:
test_dataset_folder = '../datasets/REDS4/{}'.format(data_mode)
else:
test_dataset_folder = '../results/REDS-EDVR_REDS_SR_L_flipx4'
print('You should modify the test_dataset_folder path for stage 2')
GT_dataset_folder = '../datasets/REDS4/GT'
#### evaluation
crop_border = 0
border_frame = N_in // 2 # border frames when evaluate
# temporal padding mode
if data_mode == 'Vid4' or data_mode == 'sharp_bicubic':
padding = 'new_info'
else:
padding = 'replicate'
save_imgs = True
save_folder = '../results/{}'.format(data_mode)
util.mkdirs(save_folder)
util.setup_logger('base', save_folder, 'test', level=logging.INFO, screen=True, tofile=True)
logger = logging.getLogger('base')
#### log info
logger.info('Data: {} - {}'.format(data_mode, test_dataset_folder))
logger.info('Padding mode: {}'.format(padding))
logger.info('Model path: {}'.format(model_path))
logger.info('Save images: {}'.format(save_imgs))
logger.info('Flip test: {}'.format(flip_test))
#### set up the models
model.load_state_dict(torch.load(model_path), strict=True)
model.eval()
model = model.to(device)
avg_psnr_l, avg_psnr_center_l, avg_psnr_border_l = [], [], []
subfolder_name_l = []
subfolder_l = sorted(glob.glob(osp.join(test_dataset_folder, '*')))
subfolder_GT_l = sorted(glob.glob(osp.join(GT_dataset_folder, '*')))
# for each subfolder
for subfolder, subfolder_GT in zip(subfolder_l, subfolder_GT_l):
subfolder_name = osp.basename(subfolder)
subfolder_name_l.append(subfolder_name)
save_subfolder = osp.join(save_folder, subfolder_name)
img_path_l = sorted(glob.glob(osp.join(subfolder, '*')))
max_idx = len(img_path_l)
if save_imgs:
util.mkdirs(save_subfolder)
#### read LQ and GT images
imgs_LQ = data_util.read_img_seq(subfolder)
img_GT_l = []
for img_GT_path in sorted(glob.glob(osp.join(subfolder_GT, '*'))):
img_GT_l.append(data_util.read_img(None, img_GT_path))
avg_psnr, avg_psnr_border, avg_psnr_center, N_border, N_center = 0, 0, 0, 0, 0
# process each image
for img_idx, img_path in enumerate(img_path_l):
img_name = osp.splitext(osp.basename(img_path))[0]
select_idx = data_util.index_generation(img_idx, max_idx, N_in, padding=padding)
imgs_in = imgs_LQ.index_select(0, torch.LongTensor(select_idx)).unsqueeze(0).to(device)
if flip_test:
output = util.flipx4_forward(model, imgs_in)
else:
output = util.single_forward(model, imgs_in)
output = util.tensor2img(output.squeeze(0))
if save_imgs:
cv2.imwrite(osp.join(save_subfolder, '{}.png'.format(img_name)), output)
# calculate PSNR
output = output / 255.
GT = np.copy(img_GT_l[img_idx])
# For REDS, evaluate on RGB channels; for Vid4, evaluate on the Y channel
if data_mode == 'Vid4': # bgr2y, [0, 1]
GT = data_util.bgr2ycbcr(GT, only_y=True)
output = data_util.bgr2ycbcr(output, only_y=True)
output, GT = util.crop_border([output, GT], crop_border)
crt_psnr = util.calculate_psnr(output * 255, GT * 255)
logger.info('{:3d} - {:25} \tPSNR: {:.6f} dB'.format(img_idx + 1, img_name, crt_psnr))
if img_idx >= border_frame and img_idx < max_idx - border_frame: # center frames
avg_psnr_center += crt_psnr
N_center += 1
else: # border frames
avg_psnr_border += crt_psnr
N_border += 1
avg_psnr = (avg_psnr_center + avg_psnr_border) / (N_center + N_border)
avg_psnr_center = avg_psnr_center / N_center
avg_psnr_border = 0 if N_border == 0 else avg_psnr_border / N_border
avg_psnr_l.append(avg_psnr)
avg_psnr_center_l.append(avg_psnr_center)
avg_psnr_border_l.append(avg_psnr_border)
logger.info('Folder {} - Average PSNR: {:.6f} dB for {} frames; '
'Center PSNR: {:.6f} dB for {} frames; '
'Border PSNR: {:.6f} dB for {} frames.'.format(subfolder_name, avg_psnr,
(N_center + N_border),
avg_psnr_center, N_center,
avg_psnr_border, N_border))
logger.info('################ Tidy Outputs ################')
for subfolder_name, psnr, psnr_center, psnr_border in zip(subfolder_name_l, avg_psnr_l,
avg_psnr_center_l, avg_psnr_border_l):
logger.info('Folder {} - Average PSNR: {:.6f} dB. '
'Center PSNR: {:.6f} dB. '
'Border PSNR: {:.6f} dB.'.format(subfolder_name, psnr, psnr_center,
psnr_border))
logger.info('################ Final Results ################')
logger.info('Data: {} - {}'.format(data_mode, test_dataset_folder))
logger.info('Padding mode: {}'.format(padding))
logger.info('Model path: {}'.format(model_path))
logger.info('Save images: {}'.format(save_imgs))
logger.info('Flip test: {}'.format(flip_test))
logger.info('Total Average PSNR: {:.6f} dB for {} clips. '
'Center PSNR: {:.6f} dB. Border PSNR: {:.6f} dB.'.format(
sum(avg_psnr_l) / len(avg_psnr_l), len(subfolder_l),
sum(avg_psnr_center_l) / len(avg_psnr_center_l),
sum(avg_psnr_border_l) / len(avg_psnr_border_l)))
if __name__ == '__main__':
main()
``` |
{
"source": "Johnson-yue/PyTorch-StudioGAN",
"score": 2
} |
#### File: PyTorch-StudioGAN/utils/sample.py
```python
import numpy as np
from numpy import random, linalg
from math import sin,cos,sqrt
import random
import torch
import torch.nn.functional as F
def sample_latents(dist, n_samples, noise_dim, truncated_factor=1, n_categories=None, perturb=None, device=torch.device("cpu")):
if n_categories:
y_fake = torch.randint(low=0, high=n_categories, size=(n_samples,), dtype=torch.long, device=device)
else:
y_fake = None
if isinstance(perturb, float) and perturb > 0.0:
if dist == "gaussian":
noise = torch.randn(n_samples, noise_dim, device=device)/truncated_factor
e = perturb*torch.randn(n_samples, noise_dim, device=device)
noise_perturb = noise + e
elif dist == "uniform":
noise = torch.FloatTensor(n_samples, noise_dim).uniform_(-1.0, 1.0).to(device)
e = perturb*torch.FloatTensor(n_samples, noise_dim).uniform_(-1.0, 1.0).to(device)
noise_perturb = noise + e
elif dist == "hyper_sphere":
noise, noise_perturb = random_ball(n_samples, noise_dim, perturb=perturb)
noise, noise_perturb = torch.FloatTensor(noise).to(device), torch.FloatTensor(noise_perturb).to(device)
return noise, y_fake, noise_perturb
else:
if dist == "gaussian":
noise = torch.randn(n_samples, noise_dim, device=device)/truncated_factor
elif dist == "uniform":
noise = torch.FloatTensor(n_samples, noise_dim).uniform_(-1.0, 1.0).to(device)
elif dist == "hyper_sphere":
noise = random_ball(n_samples, noise_dim, perturb=perturb).to(device)
return noise, y_fake
def random_ball(batch_size, z_dim, perturb=False):
if perturb:
normal = np.random.normal(size=(z_dim, batch_size))
random_directions = normal/linalg.norm(normal, axis=0)
random_radii = random.random(batch_size) ** (1/z_dim)
z = 1.0 * (random_directions * random_radii).T
normal_perturb = normal + 0.05*np.random.normal(size=(z_dim, batch_size))
perturb_random_directions = normal_perturb/linalg.norm(normal_perturb, axis=0)
perturb_random_radii = random.random(batch_size) ** (1/z_dim)
z_perturb = 1.0 * (perturb_random_directions * perturb_random_radii).T
return z, z_perturb
else:
normal = np.random.normal(size=(z_dim, batch_size))
random_directions = normal/linalg.norm(normal, axis=0)
random_radii = random.random(batch_size) ** (1/z_dim)
z = 1.0 * (random_directions * random_radii).T
return z
def gaussian_mixture(batch_size, n_labels ,n_dim, x_var=0.5, y_var=0.1):
label_indices = np.random.randint(0, n_labels, size=[batch_size])
if n_dim != 2:
raise Exception("n_dim must be 2.")
def sample(x, y, label, n_labels):
shift = 1.4
r = 2.0 * np.pi / float(n_labels) * float(label)
new_x = x * cos(r) - y * sin(r)
new_y = x * sin(r) + y * cos(r)
new_x += shift * cos(r)
new_y += shift * sin(r)
return np.array([new_x, new_y]).reshape((2,))
x = np.random.normal(0, x_var, (batch_size, (int)(n_dim/2)))
y = np.random.normal(0, y_var, (batch_size, (int)(n_dim/2)))
z = np.empty((batch_size, n_dim), dtype=np.float32)
for batch in range(batch_size):
for zi in range((int)(n_dim/2)):
if label_indices is not None:
z[batch, zi*2:zi*2+2] = sample(x[batch, zi], y[batch, zi], label_indices[batch], n_labels)
else:
z[batch, zi*2:zi*2+2] = sample(x[batch, zi], y[batch, zi], np.random.randint(0, n_labels), n_labels)
return z, label_indices
def make_mask(labels, n_cls, device):
labels = labels.detach().cpu().numpy()
n_samples = labels.shape[0]
mask_multi = np.zeros([n_cls, n_samples])
for c in range(n_cls):
c_indices = np.where(labels==c)
mask_multi[c, c_indices] = +1
mask_multi = torch.tensor(mask_multi).type(torch.long)
return mask_multi.to(device)
```
#### File: PyTorch-StudioGAN/utils/utils.py
```python
import numpy as np
import random
import torch
import torch.nn.functional as F
from torch.nn import DataParallel
from datetime import datetime
# fix python, numpy, torch seed
def fix_all_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
def count_parameters(module):
return 'Number of parameters: {}'.format(sum([p.data.nelement() for p in module.parameters()]))
def elapsed_time(start_time):
now = datetime.now()
elapsed = now - start_time
return str(elapsed).split('.')[0] # remove milliseconds
def reshape_weight_to_matrix(weight):
weight_mat = weight
dim =0
if dim != 0:
# permute dim to front
weight_mat = weight_mat.permute(dim, *[d for d in range(weight_mat.dim()) if d != dim])
height = weight_mat.size(0)
return weight_mat.reshape(height, -1)
def find_string(list_, string):
for i, s in enumerate(list_):
if string == s:
return i
def calculate_all_sn(model):
sigmas = {}
with torch.no_grad():
for name, param in model.named_parameters():
if "weight" in name and "bn" not in name and "shared" not in name and "deconv" not in name:
if "blocks" in name:
splited_name = name.split('.')
idx = find_string(splited_name, 'blocks')
block_idx = int(splited_name[int(idx+1)])
module_idx = int(splited_name[int(idx+2)])
operation_name = splited_name[idx+3]
if isinstance(model, DataParallel):
operations = model.module.blocks[block_idx][module_idx]
else:
operations = model.blocks[block_idx][module_idx]
operation = getattr(operations, operation_name)
else:
splited_name = name.split('.')
idx = find_string(splited_name, 'module') if isinstance(model, DataParallel) else -1
operation_name = splited_name[idx+1]
if isinstance(model, DataParallel):
operation = getattr(model.module, operation_name)
else:
operation = getattr(model, operation_name)
weight_orig = reshape_weight_to_matrix(operation.weight_orig)
weight_u = operation.weight_u
weight_v = operation.weight_v
sigmas[name] = torch.dot(weight_u, torch.mv(weight_orig, weight_v))
return sigmas
``` |
{
"source": "Johnson-yue/TensorMONK",
"score": 3
} |
#### File: tensormonk/activations/activations.py
```python
__all__ = ["Activations", "maxout", "mish", "squash", "swish"]
import torch
import torch.nn as nn
import torch.nn.functional as F
def squash(tensor: torch.Tensor) -> torch.Tensor:
if not tensor.dim() == 3:
raise ValueError("Squash requires 3D tensors: {}".format(
tensor.dim()))
sum_squares = (tensor**2).sum(2, True)
return (sum_squares/(1+sum_squares)) * tensor / sum_squares.pow(0.5)
def swish(tensor: torch.Tensor) -> torch.Tensor:
return tensor * torch.sigmoid(tensor)
def maxout(tensor: torch.Tensor) -> torch.Tensor:
if not tensor.size(1) % 2 == 0:
raise ValueError("MaxOut: tensor.size(1) must be divisible by n_splits"
": {}".format(tensor.size(1)))
return torch.max(*tensor.split(tensor.size(1)//2, 1))
def mish(tensor: torch.Tensor) -> torch.Tensor:
return tensor * tensor.exp().add(1).log().tanh()
class Activations(nn.Module):
r""" All the usual activations along with maxout, relu + maxout and swish.
MaxOut (maxo) - https://arxiv.org/pdf/1302.4389.pdf
Swish - https://arxiv.org/pdf/1710.05941v1.pdf
Mish - https://arxiv.org/pdf/1908.08681v1.pdf
Args:
tensor_size: shape of tensor in BCHW
(None/any integer >0, channels, height, width)
activation: relu/relu6/lklu/elu/prelu/tanh/sigm/maxo/rmxo/swish/mish,
default=relu
"""
def __init__(self, tensor_size: tuple, activation: str = "relu", **kwargs):
super(Activations, self).__init__()
if activation is not None:
activation = activation.lower()
self.t_size = tensor_size
self.activation = activation
self.function = None
if activation not in self.available():
raise ValueError("activation: Invalid activation " +
"/".join(self.available()) +
": {}".format(activation))
self.function = getattr(self, "_" + activation)
if activation == "prelu":
self.weight = nn.Parameter(torch.ones(1)) * 0.1
if activation == "lklu":
self.negslope = kwargs["lklu_negslope"] if "lklu_negslope" in \
kwargs.keys() else 0.01
if activation == "elu":
self.alpha = kwargs["elu_alpha"] if "elu_alpha" in \
kwargs.keys() else 1.0
self.tensor_size = tensor_size if activation not in ["maxo", "rmxo"] \
else (None, tensor_size[1]//2, tensor_size[2], tensor_size[3])
def forward(self, tensor: torch.Tensor):
if self.function is None:
return tensor
return self.function(tensor)
def _relu(self, tensor):
return F.relu(tensor)
def _relu6(self, tensor):
return F.relu6(tensor)
def _lklu(self, tensor):
return F.leaky_relu(tensor, self.negslope)
def _elu(self, tensor):
return F.elu(tensor, self.alpha)
def _prelu(self, tensor):
return F.prelu(tensor, self.weight)
def _tanh(self, tensor):
return torch.tanh(tensor)
def _sigm(self, tensor):
return torch.sigmoid(tensor)
def _maxo(self, tensor):
return maxout(tensor)
def _rmxo(self, tensor):
return maxout(F.relu(tensor))
def _swish(self, tensor):
return swish(tensor)
def _mish(self, tensor):
return mish(tensor)
def _squash(self, tensor):
return squash(tensor)
def __repr__(self):
return self.activation
@staticmethod
def available():
return ["elu", "lklu", "maxo", "mish", "prelu", "relu", "relu6",
"rmxo", "sigm", "squash", "swish", "tanh"]
def flops(self):
import numpy as np
flops = 0
numel = np.prod(self.t_size[1:])
if self.activation == "elu":
# max(0, x) + min(0, alpha*(exp(x)-1))
flops = numel * 5
elif self.activation in ("lklu", "prelu", "sigm"):
flops = numel * 3
elif self.activation == "maxo":
# torch.max(*x.split(x.size(1)//2, 1))
flops = numel / 2
elif self.activation == "mish":
# x * tanh(ln(1 + e^x))
flops = numel * 5
elif self.activation == "relu":
# max(0, x)
flops = numel
elif self.activation == "relu6":
# min(6, max(0, x))
flops = numel * 2
elif self.activation == "rmxo":
# maxo(relu(x))
flops = int(numel * 1.5)
elif self.activation == "squash":
# sum_squares = (tensor**2).sum(2, True)
# (sum_squares/(1+sum_squares)) * tensor / sum_squares.pow(0.5)
flops = numel*4 + self.t_size[1]*2
elif self.activation == "swish":
# x * sigm(x)
flops = numel * 4
elif self.activation == "tanh":
# (exp(x) - exp(-x)) / (exp(x) + exp(-x))
flops = numel * 7
return flops
# Activations.available()
# x = torch.rand(3, 4, 10, 10).mul(2).add(-1)
# test = Activations("prelu")
# test(x).min()
```
#### File: tensormonk/loss/categorical.py
```python
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from .center_function import CenterFunction
from .utils import compute_n_embedding, compute_top15, one_hot_idx
from ..utils import Measures
import warnings
class Categorical(nn.Module):
r""" Categorical with weight's to convert embedding to n_labels
categorical responses.
Args:
tensor_size (int/list/tuple, required)
Shape of tensor in (None/any integer >0, channels, height, width)
or (None/any integer >0, in_features) or in_features
n_labels (int, required)
Number of labels
loss_type (str, default="entr")
"entr" / "smax"
log_softmax + negative log likelihood
"tsmax" / "taylor_smax"
taylor series + log_softmax + negative log likelihood
(https://arxiv.org/pdf/1511.05042.pdf)
"aaml" / "angular_margin"
additive angular margin loss (ArcFace)
(https://arxiv.org/pdf/1801.07698.pdf eq-3)
"lmcl" / "large_margin"
large margin cosine loss (CosFace)
(https://arxiv.org/pdf/1801.09414.pdf eq-4)
"lmgm" / "gaussian_mixture"
large margin gaussian mixture loss
https://arxiv.org/pdf/1803.02988.pdf eq-17
"snnl"
soft nearest neighbor loss
https://arxiv.org/pdf/1902.01889.pdf eq-1
measure (str, default="dot")
Options = "cosine" / "dot" / "euclidean". Large angular margin/
large margin cosine loss only use cosine. Gaussian mixture loss
only use "cosine" / "euclidean".
add_center (bool, default=False)
Adds center loss to final loss -
https://ydwen.github.io/papers/WenECCV16.pdf
center_alpha (float, default = 0.01)
Alpha for center loss.
center_scale (float, default=0.5)
Scale for center loss.
add_focal (bool, default=False)
Enables focal loss - https://arxiv.org/pdf/1708.02002.pdf
focal_alpha (float/Tensor, default=0.5)
Alpha for focal loss. Actual focal loss implementation requires
alpha as a tensor of length n_labels that contains class imbalance.
focal_gamma (float, default=2)
Gamma for focal loss, default = 2.
add_hard_negative (bool, default=False)
Enables hard negative mining
hard_negative_p (float, default=0.2)
Probability of hard negatives retained.
lmgm_alpha (float, default=0.01)
Alpha in eq-17.
lmgm_coefficient (float, default=0.1)
lambda in eq-17
snnl_measure (str, default="euclidean")
Squared euclidean or cosine, when cosine the score are subtracted
by 1
snnl_alpha (float, default=0.01)
Alpha in eq-2, hyper-parameter multiplied to soft nearest nieghbor
loss before adding to cross entropy
snnl_temperature (float, default=100)
Temperature in eq-1. When None, it is a trainable parameter with a
deafult temperature of 10.
scale (float, default=10)
scale, s, for large angular margin/large margin cosine loss
margin (float, default=0.3)
margin, m, for large angular margin/large margin cosine loss
Return:
loss, (top1, top5)
"""
def __init__(self,
tensor_size: tuple,
n_labels: int,
loss_type: str = "entr",
measure: str = "dot",
add_center: bool = False,
center_alpha: float = 0.01,
center_scale: float = 0.5,
add_focal: bool = False,
focal_alpha: (float, Tensor) = 0.5,
focal_gamma: float = 2.,
add_hard_negative: bool = False,
hard_negative_p: float = 0.2,
lmgm_alpha: float = 0.01,
lmgm_coefficient: float = 0.1,
snnl_measure: str = "cosine",
snnl_alpha: float = 0.01,
snnl_temperature: float = 100.,
scale: float = 10.,
margin: float = 0.3,
**kwargs):
super(Categorical, self).__init__()
METHODS = ("entr", "smax",
"tsmax", "taylor_smax",
"aaml", "angular_margin",
"lmcl", "large_margin",
"lmgm", "gaussian_mixture",
"snnl", "soft_nn")
MEASURES = ("cosine", "dot", "euclidean")
# Checks
n_embedding = compute_n_embedding(tensor_size)
if not isinstance(n_labels, int):
raise TypeError("Categorical: n_labels must be int: "
"{}".format(type(n_labels).__name__))
self.n_labels = n_labels
if "type" in kwargs.keys():
loss_type = kwargs["type"]
warnings.warn("Categorical: 'type' is deprecated, use 'loss_type' "
"instead", DeprecationWarning)
if not isinstance(loss_type, str):
raise TypeError("Categorical: loss_type must be str: "
"{}".format(type(loss_type).__name__))
self.loss_type = loss_type.lower()
if self.loss_type not in METHODS:
raise ValueError("Categorical :: loss_type != " +
"/".join(METHODS) +
" : {}".format(self.loss_type))
if not isinstance(measure, str):
raise TypeError("Categorical: measure must be str: "
"{}".format(type(measure).__name__))
self.measure = measure.lower()
if self.measure not in MEASURES:
raise ValueError("Categorical: measure != " +
"/".join(MEASURES) +
"{}".format(self.measure))
# loss function
if self.loss_type in ("entr", "smax", "tsmax", "taylor_smax"):
self.loss_function = self._cross_entropy
elif self.loss_type in ("aaml", "angular_margin"):
if not isinstance(scale, (int, float)):
raise TypeError("Categorical: scale for aaml/angular_margin "
"must be int/float")
if not isinstance(margin, float):
raise TypeError("Categorical: margin for aaml/angular_margin "
"must be float")
self.scale = scale
self.margin = margin
self.loss_function = self._angular_margin
elif self.loss_type in ("lmgm", "gaussian_mixture"):
if not (isinstance(lmgm_alpha, float) and
isinstance(lmgm_coefficient, float)):
raise TypeError("Categorical: lmgm_alpha/lmgm_coefficient"
"/both is not float")
if self.loss_type in ("lmgm", "gaussian_mixture"):
if self.measure == "dot":
raise ValueError("Categorical: measure must be "
"cosine/euclidean for loss_type=lmgm")
self.lmgm_alpha = lmgm_alpha
self.lmgm_coefficient = lmgm_coefficient
self.loss_function = self._gaussian_mixture
elif self.loss_type in ("lmcl", "large_margin"):
if not isinstance(scale, (int, float)):
raise TypeError("Categorical: scale for lmcl/large_margin "
"must be int/float")
if not isinstance(margin, float):
raise TypeError("Categorical: margin for lmcl/large_margin "
"must be float")
self.scale = scale
self.margin = margin
self.loss_function = self._large_margin
elif self.loss_type in ("snnl", "soft_nn"):
self.snnl_measure = snnl_measure.lower()
if not isinstance(self.snnl_measure, str):
raise TypeError("Categorical: snnl_measure must be str")
if self.snnl_measure not in ("cosine", "euclidean"):
raise ValueError("Categorical: snnl_measure must be "
"'cosine'/'euclidean'")
if not isinstance(snnl_alpha, float):
raise TypeError("Categorical: snnl_alpha must be float")
if snnl_temperature is None:
self.temperature = nn.Parameter(torch.zeros(1).add(10))
else:
if not isinstance(snnl_temperature, (int, float)):
raise TypeError("Categorical: snnl_temperature must be "
"int/float")
self.temperature = snnl_temperature
self.snnl_measure = snnl_measure
self.snnl_alpha = snnl_alpha
self.loss_function = self._soft_nn
self.weight = nn.Parameter(
F.normalize(torch.randn(n_labels, n_embedding), 2, 1))
# Center loss
if "center" in kwargs.keys():
add_center = kwargs["center"]
warnings.warn("Categorical: 'center' is deprecated, use "
"'add_center' instead", DeprecationWarning)
if not isinstance(add_center, bool):
raise TypeError("Categorical: add_center must be bool: "
"{}".format(type(add_center).__name__))
self.add_center = add_center
if self.add_center:
if not (isinstance(center_alpha, float) and
isinstance(center_scale, float)):
raise TypeError("Categorical: center_alpha/center_scale/both "
"is not float")
self.register_buffer("center_alpha",
torch.Tensor([center_alpha]).sum())
self.register_buffer("center_scale",
torch.Tensor([center_scale]).sum())
self.centers = nn.Parameter(
F.normalize(torch.randn(n_labels, n_embedding), p=2, dim=1))
self.center_function = CenterFunction.apply
# Focal loss
if not isinstance(add_focal, bool):
raise TypeError("Categorical: add_focal must be bool: "
"{}".format(type(add_focal).__name__))
self.add_focal = add_focal
if self.add_focal:
if not isinstance(focal_alpha, (float, Tensor)):
raise TypeError("Categorical: focal_alpha must be float/"
"torch.Tensor")
if isinstance(focal_alpha, Tensor):
if focal_alpha.numel() != n_labels:
raise ValueError("Categorical: focal_alpha.numel() "
"!= n_labels")
if not (isinstance(focal_alpha, (float, Tensor)) and
isinstance(focal_gamma, float)):
raise TypeError("Categorical: focal_alpha/focal_gamma/both "
"is not float")
if isinstance(focal_alpha, Tensor):
self.register_buffer("focal_alpha", focal_alpha)
else:
self.focal_alpha = focal_alpha
self.focal_gamma = focal_gamma
# Hard negative mining
if not isinstance(add_hard_negative, bool):
raise TypeError("Categorical: add_hard_negative must be bool: "
"{}".format(type(add_hard_negative).__name__))
self.add_hard_negative = add_hard_negative
if self.add_focal and self.add_hard_negative:
warnings.warn("Categorical: Both focal and hard negative mining "
"can not be True, add_hard_negative is set to "
"False")
self.add_hard_negative = False
if self.add_hard_negative:
if not isinstance(hard_negative_p, float):
raise TypeError("Categorical: hard_negative_p is not float")
if not (0 < hard_negative_p < 1):
raise ValueError("Categorical: hard_negative_p must be "
"> 0 & < 1: {}".format(hard_negative_p))
self.hard_negative_p = hard_negative_p
self.tensor_size = (1, )
def forward(self, tensor: Tensor, targets: Tensor):
loss, (top1, top5) = self.loss_function(tensor, targets)
if self.add_center:
center_loss = self.center_function(
tensor, targets.long(), self.centers, self.center_alpha,
self.center_scale)
loss = loss + center_loss
return loss, (top1, top5)
def _predictions(self, tensor: Tensor, measure: str = "dot") -> Tensor:
if measure == "euclidean":
# TODO: euclidean computation is not scalable to larger n_labels
# euclidean is squared euclidean for stability
return Measures.sqr_euclidean_pairwise(tensor, self.weight)
elif measure == "cosine":
return Measures.cosine_pairwise(tensor, self.weight)
# default is "dot" product
return tensor.mm(self.weight.t())
def _cross_entropy(self, tensor: Tensor, targets: Tensor,
is_reponses: bool = False):
r""" Taylor softmax, and softmax/cross entropy """
if is_reponses:
# used by other loss functions (angular_margin/gaussian_mixture/
# large_margin)
responses = tensor
else:
responses = self._predictions(tensor, self.measure)
if self.measure == "euclidean":
responses.neg_()
(top1, top5) = compute_top15(responses.data, targets.data)
if self.loss_type == "tsmax": # Taylor series
responses = 1 + responses + 0.5*(responses**2)
if self.add_hard_negative:
responses, targets = self._hard_negative_mining(responses, targets)
if self.add_focal:
""" The loss function is a dynamically scaled cross entropy loss,
where the scaling factor decays to zero as confidence in the
correct class increases. """
loss = self._focal_loss(responses, targets)
else:
loss = F.nll_loss(responses.log_softmax(1), targets)
if is_reponses:
return loss
return loss, (top1, top5)
def _angular_margin(self, tensor: Tensor, targets: Tensor):
r""" Additive angular margin loss or ArcFace """
cos_theta = self._predictions(tensor, "cosine")
(top1, top5) = compute_top15(cos_theta.data, targets.data)
m, s = min(0.5, self.margin), max(self.scale, 2.)
true_idx = one_hot_idx(targets, self.n_labels)
cos_theta = cos_theta.view(-1)
if torch.__version__.startswith("1.2."): # pytorch 1.2 inplace issue
with torch.no_grad():
true_tensor = torch.zeros_like(cos_theta)
true_tensor[true_idx] = 1
cos_theta = cos_theta * (1 - true_tensor) + \
((cos_theta).mul(math.cos(m)) -
(cos_theta).pow(2).neg().add(1).pow(0.5).mul(math.sin(m))) * \
true_tensor
else:
cos_theta[true_idx] = cos_theta[true_idx].mul(math.cos(m)) - \
cos_theta[true_idx].pow(2).neg().add(1).pow(0.5).mul(
math.sin(m))
cos_theta = (cos_theta * s).view(tensor.size(0), -1)
return self._cross_entropy(cos_theta, targets, True), (top1, top5)
def _gaussian_mixture(self, tensor: Tensor, targets: Tensor):
""" Large margin gaussian mixture or lmgm """
# TODO euclidean computation is not scalable to larger n_labels
# mahalanobis with identity covariance per paper = squared
# euclidean -- does euclidean for stability
# Switch to measure="cosine" if you have out of memory issues
responses = self._predictions(tensor, self.measure)
if self.measure != "euclidean": # invert when not euclidean
responses.neg_().add_(1)
(top1, top5) = compute_top15(responses.data.neg(), targets.data)
true_idx = one_hot_idx(targets, self.n_labels)
responses = responses.view(-1)
loss = self.lmgm_coefficient * (responses[true_idx]).mean()
if torch.__version__.startswith("1.2."): # pytorch 1.2 inplace issue
with torch.no_grad():
true_tensor = torch.zeros_like(responses)
true_tensor[true_idx] = 1
responses = responses * (1 - true_tensor) + \
responses * true_tensor * (1 + self.lmgm_alpha)
else:
responses[true_idx] = responses[true_idx] * (1 + self.lmgm_alpha)
loss = loss + self._cross_entropy(-responses.view(tensor.size(0), -1),
targets, True)
return loss, (top1, top5)
def _large_margin(self, tensor: Tensor, targets: Tensor):
r""" Large margin cosine loss or CosFace """
cos_theta = self._predictions(tensor, "cosine")
(top1, top5) = compute_top15(cos_theta.data, targets.data)
m, s = min(0.5, self.margin), max(self.scale, 2.)
true_idx = one_hot_idx(targets, self.n_labels)
cos_theta = cos_theta.view(-1)
if torch.__version__.startswith("1.2."): # pytorch 1.2 inplace issue
with torch.no_grad():
ms = torch.zeros_like(cos_theta)
ms[true_idx] = m
cos_theta = cos_theta - ms
else:
cos_theta[true_idx] = cos_theta[true_idx] - m
cos_theta = (cos_theta * s).view(tensor.size(0), -1)
return self._cross_entropy(cos_theta, targets, True), (top1, top5)
def _soft_nn(self, tensor: Tensor, targets: Tensor):
r""" Soft nearest neighbor loss """
loss, (top1, top5) = self._cross_entropy(tensor, targets)
# soft nearest -- requires multiple samples per label in a batch
same_label = targets.data.view(-1, 1).eq(targets.data.view(1, -1))
valid = torch.eye(targets.numel()).to(targets.device).eq(0)
if any((same_label * valid).sum(1)):
# soft nearest neighbor loss is valid
if self.snnl_measure == "cosine":
distance = 1 - Measures.cosine_pairwise(tensor, tensor)
else:
distance = Measures.sqr_euclidean_pairwise(tensor, tensor)
num = distance * (same_label * valid).to(distance.dtype).detach()
num = (num).div(self.temperature).neg().exp().sum(1)
den = distance * valid.to(distance.dtype).detach()
den = (den).div(self.temperature).neg().exp().sum(1)
snnl = (num / den.add(1e-6)).log().mean() # eq - 1
loss = loss + self.snnl_alpha * snnl # eq - 2
return loss, (top1, top5)
def _hard_negative_mining(self, responses: Tensor, targets: Tensor):
# get class probabilities and find n hard negatives
p = responses.softmax(1)
# zero out the genuine to find hard negatives
genuine_idx = one_hot_idx(targets, self.n_labels)
p = p.view(-1).contiguous()
p[genuine_idx] = 0
p = p.view(-1, self.n_labels)
n = max(1, int(self.n_labels * self.hard_negative_p))
hard_negatives = torch.argsort(p.detach(), dim=1)[:, -n:]
# actual class prediction and n hard_negatives are computed
new_responses = torch.cat(
(responses.gather(1, targets.view(-1, 1)),
responses.gather(1, hard_negatives.view(-1, n))), 1)
# the new target is always zero given the above concatenate
new_targets = targets.mul(0)
return new_responses, new_targets
def _focal_loss(self, responses: Tensor, targets: Tensor) -> Tensor:
# https://arxiv.org/pdf/1708.02002.pdf :: eq-5
n, n_labels = responses.shape
p = responses.softmax(1)
# hot_targets = one_hot(targets, responses.shape[1])
# pt = p * hot_targets + (1 - p) * (1 - hot_targets)
# zero out the genuine to find hard negatives
genuine_idx = one_hot_idx(targets, n_labels)
p = p.view(-1)
pt_1st_term = p.mul(0)
pt_1st_term[genuine_idx] = p[genuine_idx]
pt_2nd_term = 1 - p
pt_2nd_term[genuine_idx] = 0
pt = pt_1st_term.view(n, -1) + pt_2nd_term.view(n, -1)
if isinstance(self.focal_alpha, Tensor):
# alpha is Tensor with per label balance
return (- self.focal_alpha.view(1, -1) *
(1-pt).pow(self.focal_gamma) * pt.log()).sum(1).mean()
return (- self.focal_alpha * (1-pt).pow(self.focal_gamma) *
pt.log()).sum(1).mean()
# from tensormonk.loss.utils import (compute_n_embedding, compute_top15,
# one_hot, one_hot_idx)
# from tensormonk.utils import Measures
# from tensormonk.loss.center_function import CenterFunction
# tensor = torch.rand(3, 256)
# targets = torch.tensor([1, 3, 6])
# test = Categorical(256, 10, "aaml", measure="cosine")
# test(tensor, targets)[0].backward()
```
#### File: tensormonk/loss/utils.py
```python
__all__ = ["compute_n_embedding", "compute_top15", "one_hot", "one_hot_idx"]
import torch
import numpy as np
def compute_n_embedding(tensor_size):
if isinstance(tensor_size, list) or isinstance(tensor_size, tuple):
if len(tensor_size) > 1: # batch size is not required
tensor_size = np.prod(tensor_size[1:])
else:
tensor_size = tensor_size[0]
return int(tensor_size)
def compute_top15(responses, targets):
predicted = responses.topk(5, 1, True, True)[1]
predicted = predicted.t()
correct = predicted.eq(targets.view(1, -1).expand_as(predicted))
top1 = correct[:1].view(-1).float().sum().mul_(100.0 / responses.size(0))
top5 = correct[:5].view(-1).float().sum().mul_(100.0 / responses.size(0))
return top1, top5
def one_hot(targets, n_labels):
identity = torch.eye(n_labels, dtype=torch.int8).to(targets.device)
onehot_targets = identity.index_select(dim=0,
index=targets.long().view(-1))
return onehot_targets.requires_grad_(False)
def one_hot_idx(targets, n_labels):
targets = targets.view(-1)
return targets + \
torch.arange(0, targets.size(0)).to(targets.device) * n_labels
```
#### File: tensormonk/utils/pillow_utils.py
```python
import torch
import numpy as np
import PIL.Image as ImPIL
import random
from PIL import ImageDraw, ImageOps
from torchvision import transforms
class PillowUtils:
tensor_to_pil = transforms.ToPILImage()
@staticmethod
def to_pil(image, t_size: tuple = None, ltrb_boxes: np.ndarray = None):
r"""Converts file_name or ndarray or 3D torch.Tensor to pillow image.
Adjusts the ltrb_boxes when ltrb_boxes are provided along with t_size.
Args:
image (str/np.ndarray/torch.Tensor):
input
t_size (tuple, optional)
BCHW (Ex: (None, 3, 60, 60)) used to convert to grey scale or
resize
ltrb_boxes (np.ndarray, optional)
Must be pixel locations in (left, top, right, bottom).
"""
if ImPIL.isImageType(image):
o = image
elif isinstance(image, str):
o = ImPIL.open(image).convert("RGB")
elif isinstance(image, np.ndarray):
o = ImPIL.fromarray(image)
elif isinstance(image, torch.Tensor):
o = PillowUtils.tensor_to_pil(image)
else:
raise TypeError("to_pil: image must be str/np.ndarray/torch.Tensor"
": {}".format(type(image).__name__))
if t_size is not None and len(t_size) == 4:
w, h = o.size
if t_size[1] == 1:
o = o.convert("L")
if not (t_size[2] == w and t_size[3] == h):
o = o.resize((t_size[3], t_size[2]), ImPIL.BILINEAR)
if ltrb_boxes is not None:
ltrb_boxes[:, 0::2] *= t_size[3] / w
ltrb_boxes[:, 1::2] *= t_size[2] / h
return o, ltrb_boxes
return o
@staticmethod
def random_pad(image: ImPIL.Image,
ltrb_boxes: np.ndarray = None,
points: np.ndarray = None,
pad: float = 0.36, fill: int = 0):
r"""Does random padding with fill value. When ltrb_boxes and/or points
are not None, the boxes and/or points are adjusted to new image.
Args:
image ({str/np.ndarray/torch.Tensor}): input
ltrb_boxes (np.ndarray, optional): Must be pixel locations in
(left, top, right, bottom).
points (np.ndarray, optional): (x, y) locations of landmarks within
a box. Expects a 3D-array with points.shape[0] = ltrb.shape[0]
and points.shape[2] = 2
pad (float, optional): (w + h) / 2 * pad is the max padding, must
be in the range of 0-1, default=0.2
fill (int, optional): used to fill the extra region, default=0
Return:
(image, ltrb_boxes) or image
"""
# random padding
w, h = image.size
in_pixels = int((w + h)*0.5 * pad * random.random())
image = ImageOps.expand(image, border=in_pixels, fill=fill)
if ltrb_boxes is None:
return image
ltrb_boxes += in_pixels
if points is None:
return image, ltrb_boxes
points += in_pixels
return image, ltrb_boxes, points
@staticmethod
def random_crop(image: ImPIL.Image, ltrb_boxes: np.ndarray = None,
retain: float = 0.5, maintain_aspect_ratio: bool = False):
"""
Does random crop and adjusts the boxes when not None (crops are
limited such that the objects are within the cropped image)!
Args:
image (pil-image): pillow input image
ltrb_boxes (np.ndarray, optional): object locations in ltrb format.
Requires 2D-array, with rows of (left, top, right, bottom) in
pixels.
retain (float, optional): retain pecentage (0.2-0.8) - ignored when
boxes is not None. default=0.5
maintain_aspect_ratio (bool, optional): Retains aspect ratio when
True
Return:
image, boxes
"""
w, h = image.size
x_, y_, _x, _y = np.random.rand(4).tolist()
if ltrb_boxes is None:
retain = max(0.2, retain)
retain = min(1.0, retain)
x_, _x = int(w * x_*(1-retain)*.5), int(w * (1 - _x*(1-retain)*.5))
y_, _y = int(h * y_*(1-retain)*.5), int(h * (1 - _y*(1-retain)*.5))
else:
x_ = int(x_*ltrb_boxes[:, 0].min())
y_ = int(y_*ltrb_boxes[:, 1].min())
_x = w - int(_x * (w - ltrb_boxes[:, 2].max()))
_y = h - int(_y * (h - ltrb_boxes[:, 3].max()))
if maintain_aspect_ratio and (0.9 < (_x - x_)/(_y - y_) < 1.1):
if (_x - x_)/(_y - y_) > 1:
extra = (_x - x_) - (_y - y_)
y_ -= extra//2
_y += extra//2
else:
extra = (_y - y_) - (_x - x_)
x_ -= extra//2
_x += extra//2
image = image.crop((x_, y_, _x, _y))
if ltrb_boxes is None:
return image
ltrb_boxes[:, 0::2] -= x_
ltrb_boxes[:, 1::2] -= y_
return image, ltrb_boxes
@staticmethod
def extend_random_crop(image, labels, ltrb, points=None,
osize: tuple = (320, 320),
min_box_side: int = 30,
ignore_intersection: tuple = (0.5, 0.9),
aspect_ratio_bounds: tuple = (0.5, 2)):
r"""Does random crop and adjusts the boxes while maintaining minimum
box size. When not None, the points (xy locations within a bounding
box) are adjusted.
Args:
image (pil-image): pillow input image
labels (np.ndarray): labels of each box
ltrb (np.ndarray): object locations in ltrb format.
Requires 2D-array, with rows of (left, top, right, bottom) in
pixels.
points (np.ndarray, optional): (x, y) locations of landmarks within
a box. Expects a 3D-array with points.shape[0] = ltrb.shape[0]
and points.shape[2] = 2
osize (tuple/list): Output image size (width, height)
min_box_side (int): Minimum size of the box predicted by the model.
Default = 30 -- SSD minimum size
ignore_intersection (tuple/list of floats): avoids objects within
the intersection range in the final crop.
aspect_ratio_bounds (tuple/list of floats): allowed crop ratios
given an image
Return:
image, labels, ltrb, points
** requires some speed-up
"""
valid_points = None
# filter boxes with negative width -- not usual but a safe check
_valid = np.stack((ltrb[:, 2] - ltrb[:, 0], ltrb[:, 3] - ltrb[:, 1]))
_valid = _valid.min(0) > 2
labels, ltrb, points = labels[_valid], ltrb[_valid], points[_valid]
w, h = image.size
# minimum ltrb side on actual image
mbox = min((ltrb[:, 3] - ltrb[:, 1]).min(),
(ltrb[:, 2] - ltrb[:, 0]).min())
# min & max possible crop size to maintain min_box_side
mincw = int(mbox*1.1)
maxcw = int(min(mincw * min(osize) / min_box_side, min(w, h)))
if mincw > maxcw:
mincw = maxcw - 1
# random width and height given all the above conditions
nw = random.randint(mincw, maxcw)
nh = random.randint(int(nw*aspect_ratio_bounds[0]),
int(nw*aspect_ratio_bounds[1]))
nh = min(max(nh, int(mbox*1.1)), h)
# find all possible boxes, given nw and nh
all_ls, all_ts = np.arange(0, w-nw, 10), np.arange(0, h-nh, 10)
all_ls = all_ls.repeat(len(all_ts))
all_ts = np.tile(all_ts[None, :],
(len(np.arange(0, w-nw, 10)), 1)).reshape(-1)
possible = np.concatenate((all_ls[None, ], all_ts[None, ])).T
possible = np.concatenate([possible[:, [0]],
possible[:, [1]],
possible[:, [0]]+nw,
possible[:, [1]]+nh], 1)
# intersection in percentage to validate all possible boxes
lt = np.maximum(ltrb[:, :2][:, np.newaxis],
possible[:, :2][np.newaxis, :])
rb = np.minimum(ltrb[:, 2:][:, np.newaxis],
possible[:, 2:][np.newaxis, :])
intersection = np.multiply(*np.split(np.clip(rb - lt, 0, None), 2, 2))
intersection = intersection.squeeze(2)
area = ((ltrb[:, 2] - ltrb[:, 0]) * (ltrb[:, 3] - ltrb[:, 1]))
intersection = intersection / area[:, None]
idx = np.where((intersection > ignore_intersection[1]).sum(0))[0]
idx = [x for x in idx
if not ((intersection[:, x] > ignore_intersection[0]) *
(intersection[:, x] < ignore_intersection[1])).any()]
if len(idx) > 0:
# randomly pick one valid possible box
pick = random.randint(0, len(idx)-1)
crop = possible[idx[pick]]
valid = intersection[:, idx[pick]] > ignore_intersection[1]
valid_ltrb = ltrb[valid].copy()
if points is not None:
valid_points = points[valid].copy()
valid_labels = labels[valid].copy()
else:
# if the above fails -- fall back to a single object
pick = random.randint(0, len(ltrb)-1)
crop = ltrb[pick].copy()
# adjust crop - add some width and some height
rw_ = (crop[2] - crop[0]) * (random.random() * 0.2) + 0.05
_rw = (crop[2] - crop[0]) * (random.random() * 0.2) + 0.05
rh_ = (crop[3] - crop[1]) * (random.random() * 0.2) + 0.05
_rh = (crop[3] - crop[1]) * (random.random() * 0.2) + 0.05
crop[0] -= rw_
crop[1] -= rh_
crop[2] += _rw
crop[3] += _rh
valid_ltrb = ltrb[[pick]].copy()
if points is not None:
valid_points = points[[pick]].copy()
valid_labels = labels[[pick]].copy()
# adjust xy's
valid_ltrb[:, 0::2] -= crop[0]
valid_ltrb[:, 1::2] -= crop[1]
if points is not None:
valid_points[:, :, 0] -= crop[0]
valid_points[:, :, 1] -= crop[1]
image = image.crop(list(map(int, crop)))
w, h = image.size
image = image.resize(osize)
valid_ltrb[:, 0::2] *= osize[0] / w
valid_ltrb[:, 1::2] *= osize[1] / h
if points is not None:
valid_points[:, :, 0] *= osize[0] / w
valid_points[:, :, 1] *= osize[1] / h
valid_ltrb[:, 0::2] = np.clip(valid_ltrb[:, 0::2], 0, osize[0]-1)
valid_ltrb[:, 1::2] = np.clip(valid_ltrb[:, 1::2], 0, osize[1]-1)
if points is not None:
valid_points[:, :, 0] = np.clip(valid_points[:, :, 0], 0, osize[0])
valid_points[:, :, 1] = np.clip(valid_points[:, :, 1], 0, osize[1])
return image, valid_labels, valid_ltrb, valid_points
@staticmethod
def random_flip(image: ImPIL.Image,
ltrb_boxes: np.ndarray = None,
points: np.ndarray = None,
probability: float = 0.75,
vertical_flip: bool = True):
r"""Does random flip and adjusts the boxes & points when not None.
Args:
image (pil-image): pillow input image
ltrb_boxes (np.ndarray, optional): object locations in ltrb format.
Requires 2D-array, with rows of (left, top, right, bottom) in
pixels.
points (np.ndarray, optional): (x, y) locations of landmarks within
a box. Expects a 3D-array with points.shape[0] = ltrb.shape[0]
and points.shape[2] = 2
probability (float, optional): proability of flip, default=0.75
vertical_flip (bool, optional): When True does, vertical flip,
default=True
Return:
image, boxes
"""
ph, pv = (0.66, 0.33) if vertical_flip else (1., 1.)
if random.random() < probability:
w, h = image.size
prob = random.random()
if prob <= ph: # horizontal
image = image.transpose(ImPIL.FLIP_LEFT_RIGHT)
if ltrb_boxes is not None:
ltrb_boxes[:, 0::2] = w - ltrb_boxes[:, [2, 0]]
if points is not None:
points[:, :, 0] = w - points[:, :, 0]
if prob >= pv: # vertical
image = image.transpose(ImPIL.FLIP_TOP_BOTTOM)
if ltrb_boxes is not None:
ltrb_boxes[:, 1::2] = h - ltrb_boxes[:, [3, 1]]
if points is not None:
points[:, :, 1] = h - points[:, :, 1]
if ltrb_boxes is None:
return image
if points is None:
return image, ltrb_boxes
return image, ltrb_boxes, points
@staticmethod
def random_rotate(image: ImPIL.Image,
ltrb_boxes: np.ndarray = None,
points: np.ndarray = None,
probability: float = 0.5,
vertical_flip: bool = True):
r"""Does random 90/-90 rotation and adjusts the boxes & points when not
None!
Args:
image (pil-image): pillow input image
ltrb_boxes (np.ndarray, optional): object locations in ltrb format.
Requires 2D-array, with rows of (left, top, right, bottom) in
pixels.
points (np.ndarray, optional): (x, y) locations of landmarks within
a box. Expects a 3D-array with points.shape[0] = ltrb.shape[0]
and points.shape[2] = 2
probability (float, optional): proability of flip, default=0.75
Return:
image, boxes
"""
if random.random() < probability:
w, h = image.size
if random.random() > 0.5: # rotate left
image = image.rotate(90)
if ltrb_boxes is not None:
ltrb_boxes = np.concatenate([ltrb_boxes[:, [1]],
w - ltrb_boxes[:, [2]],
ltrb_boxes[:, [3]],
w - ltrb_boxes[:, [0]]], 1)
if points is not None:
points = np.concatenate((points[:, :, [1]],
w - points[:, :, [0]]), 2)
else:
image = image.rotate(-90)
if ltrb_boxes is not None:
ltrb_boxes = np.concatenate([h - ltrb_boxes[:, [3]],
ltrb_boxes[:, [0]],
h - ltrb_boxes[:, [1]],
ltrb_boxes[:, [2]]], 1)
if points is not None:
points = np.concatenate((h - points[:, :, [1]],
points[:, :, [0]]), 2)
if ltrb_boxes is None:
return image
if points is None:
return image, ltrb_boxes
return image, ltrb_boxes, points
@staticmethod
def annotate_boxes(image, ltrb_boxes, points: list = None,
text: list = None,
box_color: str = "#F1C40F",
point_color: str = "#00FFBB"):
r"""Annotates the boxes and points for visualization.
Args:
image ({pillow image, 3D torch.Tensor}): input image to annotate
ltrb_boxes ({2D torch.Tensor/np.ndarray}): annotation boxes
points ({2D torch.Tensor/np.ndarray}): annotation points
text (list): a list of strings to label
Return:
annotated pillow image
"""
if isinstance(image, torch.Tensor):
image = PillowUtils.tensor_to_pil(image)
if isinstance(ltrb_boxes, torch.Tensor):
ltrb_boxes = ltrb_boxes.data.cpu().numpy()
if isinstance(points, torch.Tensor):
points = points.data.cpu().numpy()
_show = image.copy()
boxes = ltrb_boxes.copy()
if points is not None:
points = points.copy()
if boxes.max() <= 2:
# convert normalized ltrb_boxes to pixel locations
boxes[:, 0::2] *= image.size[0]
boxes[:, 1::2] *= image.size[1]
if points is not None:
points[:, 0] *= image.size[0]
points[:, 1] *= image.size[1]
w, h = _show.size
boxes[:, 0::2] = np.clip(boxes[:, 0::2], 0, w)
boxes[:, 1::2] = np.clip(boxes[:, 1::2], 0, h)
if points is not None:
points[:, 0] = np.clip(points[:, 0], 0, w)
points[:, 1] = np.clip(points[:, 1], 0, h)
draw = ImageDraw.Draw(_show)
for i, x in enumerate(boxes.astype(np.int64)):
draw.rectangle((tuple(x[:2].tolist()), tuple(x[2:].tolist())),
outline=box_color)
if text is not None:
if isinstance(text[i], str):
draw.text(tuple((x[:2]).tolist()), text[i],
fill="#E74C3C")
if points is not None:
r = 2
for pt in points.reshape(-1, 2).astype(np.int64):
x, y = pt[0], pt[1]
draw.ellipse((int(x)-r, int(y)-r, int(x)+r, int(y)+r),
fill=point_color)
del draw
return _show
``` |
{
"source": "john-sonz/federated-learning-iot",
"score": 2
} |
#### File: server/server/main.py
```python
import flwr as fl
import os
from flwr.server.strategy import FedAvg
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
ENV = os.environ
FRACTION_FIT = float(ENV.get('FRACTION_FIT', "0.5"))
MIN_AVAILABLE_CLIENTS = int(ENV.get('MIN_AVAILABLE_CLIENTS', "5"))
NUM_ROUNDS = int(ENV.get('NUM_ROUNDS', "10"))
SERVER_ADDRESS = ENV.get('SERVER_ADDRESS', "localhost:8080")
def start_server(num_rounds: int, min_available_clients: int, fraction_fit: float):
strategy = FedAvg(min_available_clients=min_available_clients, fraction_fit=fraction_fit)
fl.server.start_server(SERVER_ADDRESS, strategy=strategy, config={"num_rounds": num_rounds})
if __name__ == "__main__":
start_server(NUM_ROUNDS, MIN_AVAILABLE_CLIENTS, FRACTION_FIT)
``` |
{
"source": "johnsonZhaoxin/ProjectEuler",
"score": 4
} |
#### File: johnsonZhaoxin/ProjectEuler/007-10001prime.py
```python
def isprime(n):
for i in range(2,n-1):
if n%i == 0:
#print("%d can be divided by %d"%(n,i))
return False
#print("%d is a prime number"%n)
return True
n = 1
num = 2
while n <=10001:
if isprime(num):
print("第%d个质数是%d"%(n,num))
n +=1
num +=1
else:
num +=1
```
#### File: johnsonZhaoxin/ProjectEuler/009-Pythagoreantriplet.py
```python
import numpy as np
def get_hypo(a,b):
err = 1e-7
hypo = np.sqrt(a**2+b**2)
if hypo - int(hypo) < err:
#print("斜边为整数,为%d"%(int(hypo)))
return hypo
else:
#print("斜边不是整数,为%f" % (hypo))
return hypo
for i in range(1,998):
for j in range(1,998-i):
hypo = get_hypo(i,j)
if int(hypo)+i+j == 1000:
print(i,j,hypo)
```
#### File: johnsonZhaoxin/ProjectEuler/033-digitcancelling.py
```python
def speicalcancel(n, m):
# n 和 m都是两位数
numer1 = str(n)[0]
numer2 = str(n)[1]
deno1 = str(m)[0]
deno2 = str(m)[1]
# print(numer1,numer2,deno1,deno2)
# todo 寻找四个当中相同的元素,分情况讨论,需要排除末尾为0,以及颠倒的情况,还有相同,但是最后为0的情况
if int(deno1)*int(deno2)==0:
return 0
else:
if numer1 == deno1:
return int(numer2)/int(deno2)
elif numer1 == deno2:
return int(numer2)/int(deno1)
elif numer2 == deno1:
return int(numer1)/int(deno2)
elif numer2 == deno2:
return int(numer1)/int(deno1)
for num in range(10, 99):
for deno in range(num + 1, 100):
#print("%d/%d" % (num, deno))
if num / deno == speicalcancel(num, deno):
print("%d/%d" % (num, deno))
```
#### File: johnsonZhaoxin/ProjectEuler/041-selfpower.py
```python
import math
def selfpower(x):
return pow(x,x)
sum = 0
for i in range(1,1001):
sum += selfpower(i) #为什么这样的数值类运算就很快
print(sum)
``` |
{
"source": "johnsoong216/PokerOddsCalc",
"score": 3
} |
#### File: johnsoong216/PokerOddsCalc/ranker.py
```python
import multiprocessing
from joblib import Parallel, delayed
import timeit
import logging
import numpy as np
from collections import Counter
class Ranker:
@staticmethod
def rank_all_hands(hand_combos, return_all=False):
# start = timeit.default_timer()
rank_res_arr = np.zeros(shape=(hand_combos.shape[1], hand_combos.shape[0]))
# if hand_combos.shape[0] >= 100000 and hand_combos.shape[1] > 1:
# Parallel(n_jobs=multiprocessing.cpu_count(), backend="threading")\
# (delayed(Ranker.parallel_rank_hand)(sce, hand_combos, rank_res_arr) for sce in range(hand_combos.shape[1]))
# else:
for sce in range(hand_combos.shape[1]):
Ranker.parallel_rank_hand(sce, hand_combos, rank_res_arr)
# end = timeit.default_timer()
# logging.info(f"Ranking all hands time cost: {end - start}")
if return_all:
return rank_res_arr
else:
return np.max(rank_res_arr, axis=0)
@staticmethod
def parallel_rank_hand(scenario, hand_combos, rank_res_arr):
rank_res_arr[scenario, :] = Ranker.rank_one_hand(hand_combos[:, scenario, :, :])
@staticmethod
def rank_one_hand(hand_combos):
num_combos = hand_combos[:, :, 0]
num_combos.sort(axis=1)
suit_combos = hand_combos[:, :, 1]
suit_arr = gen_suit_arr(suit_combos)
straight_arr = gen_straight_arr(num_combos)
rank_arr = np.zeros(num_combos.shape[0], dtype=np.int)
straight_flush_check(num_combos, rank_arr, straight_arr, suit_arr)
four_of_a_kind_check(num_combos, rank_arr)
full_house_check(num_combos, rank_arr)
flush_check(rank_arr, suit_arr)
straight_check(num_combos, rank_arr, straight_arr)
three_of_a_kind_check(num_combos, rank_arr)
two_pairs_check(num_combos, rank_arr)
one_pair_check(num_combos, rank_arr)
return rank_arr * (16 ** 5) + np.sum(num_combos * np.power(16, np.arange(0, 5)), axis=1)
### Helper Functions
def gen_straight_arr(num_combos):
straight_check = np.zeros(len(num_combos), dtype=np.int)
for i in range(4):
if i <= 2:
straight_check += (num_combos[:, i] == (num_combos[:, i + 1] - 1)).astype(int)
else:
straight_check += (num_combos[:, i] == (num_combos[:, i + 1] - 1)).astype(int)
straight_check += ((num_combos[:, i] == 5) & (num_combos[:, i + 1] == 14)).astype(int)
return (straight_check == 4)
def gen_suit_arr(suit_combos):
return np.max(suit_combos, axis=1) == np.min(suit_combos, axis=1)
def straight_flush_check(num_combos, rank_arr, straight_arr, suit_arr):
rank_arr[(rank_arr == 0) & (straight_arr & suit_arr)] = 8
# Rearrange order of 2345A to A2345
reorder_idx = (rank_arr == 8) & (num_combos[:, 0] == 2) & (num_combos[:, 4] == 14)
num_combos[reorder_idx, :] = np.concatenate([num_combos[reorder_idx, 4:], num_combos[reorder_idx, :4]], axis=1)
def four_of_a_kind_check(num_combos, rank_arr):
small = np.all(num_combos[:, 0:4] == num_combos[:, :1], axis=1) # 22223
large = np.all(num_combos[:, 1:] == num_combos[:, 4:], axis=1) # 24444
rank_arr[(rank_arr == 0) & (small | large)] = 7
reorder_idx = (rank_arr == 7) & small
num_combos[reorder_idx, :] = np.concatenate([num_combos[reorder_idx, 4:], num_combos[reorder_idx, :4]], axis=1)
def full_house_check(num_combos, rank_arr):
small = np.all(
(num_combos[:, 0:3] == num_combos[:, :1])
& (num_combos[:, 3:4] == num_combos[:, 4:5]), axis=1) # 22233
large = np.all(
(num_combos[:, 0:1] == num_combos[:, 1:2])
& (num_combos[:, 2:5] == num_combos[:, 4:]), axis=1) # 22444
rank_arr[(rank_arr == 0) & (small | large)] = 6
reorder_idx = (rank_arr == 6) & small
num_combos[reorder_idx, :] = np.concatenate([num_combos[reorder_idx, 3:], num_combos[reorder_idx, :3]], axis=1)
def flush_check(rank_arr, suit_arr):
rank_arr[(rank_arr == 0) & suit_arr] = 5
def straight_check(num_combos, rank_arr, straight_arr):
rank_arr[(rank_arr == 0) & straight_arr] = 4
# Rearrange order of 2345A to A2345
reorder_idx = (rank_arr == 4) & (num_combos[:, 0] == 2) & (num_combos[:, 4] == 14)
num_combos[reorder_idx, :] = np.concatenate([num_combos[reorder_idx, 4:], num_combos[reorder_idx, :4]], axis=1)
def three_of_a_kind_check(num_combos, rank_arr):
small = np.all(
(num_combos[:, 0:3] == num_combos[:, :1]), axis=1) # 22235
middle = np.all(
(num_combos[:, 1:4] == num_combos[:, 1:2]), axis=1) # 23335
large = np.all(
(num_combos[:, 2:] == num_combos[:, 2:3]), axis=1) # 36AAA
rank_arr[(rank_arr == 0) & (small | middle | large)] = 3
reorder_small = (rank_arr == 3) & small
reorder_middle = (rank_arr == 3) & large
num_combos[reorder_small, :] = np.concatenate([num_combos[reorder_small, 3:], num_combos[reorder_small, :3]],
axis=1)
num_combos[reorder_middle, :] = np.concatenate([
num_combos[reorder_middle, :1],
num_combos[reorder_middle, 4:],
num_combos[reorder_middle, 1:4]], axis=1)
def two_pairs_check(num_combos, rank_arr):
small = np.all(
(num_combos[:, 0:2] == num_combos[:, :1])
& (num_combos[:, 2:4] == num_combos[:, 2:3]), axis=1) # 2233A
middle = np.all(
(num_combos[:, 0:2] == num_combos[:, :1])
& (num_combos[:, 3:] == num_combos[:, 4:]), axis=1) # 223AA
large = np.all(
(num_combos[:, 1:3] == num_combos[:, 1:2])
& (num_combos[:, 3:] == num_combos[:, 4:]), axis=1) # 233AA
rank_arr[(rank_arr == 0) & (small | middle | large)] = 2
reorder_small = (rank_arr == 2) & small
reorder_middle = (rank_arr == 2) & large
num_combos[reorder_small, :] = np.concatenate([num_combos[reorder_small, 4:], num_combos[reorder_small, :4]],
axis=1)
num_combos[reorder_middle, :] = np.concatenate([
num_combos[reorder_middle, 2:3],
num_combos[reorder_middle, 0:2],
num_combos[reorder_middle, 3:]], axis=1)
def one_pair_check(num_combos, rank_arr):
small = np.all(
(num_combos[:, 0:2] == num_combos[:, :1]), axis=1) # 22345
mid_small = np.all(
(num_combos[:, 1:3] == num_combos[:, 1:2]), axis=1) # 23345
mid_large = np.all(
(num_combos[:, 2:4] == num_combos[:, 2:3]), axis=1) # 23445
large = np.all(
(num_combos[:, 3:] == num_combos[:, 3:4]), axis=1) # 23455
rank_arr[(rank_arr == 0) & (small | mid_small | mid_large | large)] = 1
reorder_small = (rank_arr == 1) & small
reorder_mid_small = (rank_arr == 1) & mid_small
reorder_mid_large = (rank_arr == 1) & mid_large
num_combos[reorder_small, :] = np.concatenate([num_combos[reorder_small, 2:], num_combos[reorder_small, :2]],
axis=1)
num_combos[reorder_mid_small, :] = np.concatenate([
num_combos[reorder_mid_small, :1],
num_combos[reorder_mid_small, 3:],
num_combos[reorder_mid_small, 1:3]], axis=1)
num_combos[reorder_mid_large, :] = np.concatenate([
num_combos[reorder_mid_large, :2],
num_combos[reorder_mid_large, 4:],
num_combos[reorder_mid_large, 2:4]], axis=1)
``` |
{
"source": "JohnSounder/AP-API",
"score": 3
} |
#### File: kuas_api/kuas/job.py
```python
import requests
import re
import time
import lxml
import json
from bs4 import BeautifulSoup
import ap
from pprint import pprint
#Configuration
session = requests.session()
username = ""
password = ""
page_Num = 1
show_Full = True
url = "http://140.127.113.136/StuPartTime/Announce/stu_announce_view.aspx?VCHASID="
if not ap.login(session, username, password):
print "登入失敗"
#用lxml 解析
def viewPage_lxml(page_Num, show_Full, username):
response = session.get(url+username)
tree = lxml.etree.HTML(response.content)
#不是第一頁的話,需要抓取google 分析的input
if page_Num != 1:
X = tree.xpath(u"//input[@name='__VIEWSTATE']")[0].values()[3]
Y = tree.xpath(u"//input[@name='__EVENTVALIDATION']")[0].values()[3]
form = {
"__EVENTTARGET":"GridView1",
"__EVENTARGUMENT":"Page$%s"%page_Num,
"__VIEWSTATE":X,
"__EVENTVALIDATION":Y
}
response = session.post(url, data=form)
tree = lxml.etree.HTML(response.content)
#tree.xpath(u"//table[@id='GridView1']//tr//td//span[contains(concat(' ', @id, ' '), 'Label1')]")
for x in table[0]:
print x.text
return 0
if not show_Full:
if x.text != "Y":
id_list.append(x['id'])
else:
id_list.append(x['id'])
for x in id_list:
index = str(x).replace("lblFull", "")
data = []
#單號
#data.append(bs.find('span', id=index+"Label1").text)
#刊登日
#data.append(bs.find('span', id=index+"Label2").text)
#人數 取得刊登日parent.next
print bs.find('span', id=index+"Label2").next_sibling
#工作時間
#data.append(bs.find('span', id=index+"Label3").text)
#條件
#data.append(bs.find('span', id=index+"Label4").text)
#需求單位 取得條件parent.next
for x in data:
print x.encode("utf8")
print "=========================="
#print tree.xpath(u"//span[@id=re.compile(r'Label2$')]")
#".//div[starts-with(@id,'comment-')"
#以下開始
# viewPage_lxml(1, show_Full, username)
response = session.get(url+username)
tree = lxml.etree.HTML(response.content)
result = []
#抓取公告編號
ID = []
for x in tree.xpath(u"//table[@id='GridView1']//tr//td//span[contains(concat(' ', @id, ' '), 'Label1')]"):
ID.append(x.text)
#抓取刊登時間以及需求人數
post_date = []
person = []
for x in tree.xpath(u"//table[@id='GridView1']//tr//td//span[contains(concat(' ', @id, ' '), 'Label2')]"):
post_date.append(x.text)
person.append(x.getparent().getparent().getnext().getchildren()[0].text)
#抓取時間
work_time = []
for x in tree.xpath(u"//table[@id='GridView1']//tr//td//span[contains(concat(' ', @id, ' '), 'Label3')]"):
work_time.append(x.text)
#抓取需求、聯絡人、電話、需求單位
work_required = []
contact_name = []
contact_number = []
contact_org = []
for x in tree.xpath(u"//table[@id='GridView1']//tr//td//span[contains(concat(' ', @id, ' '), 'Label4')]"):
#這個是工作需求,但是中文還沒搞定
work_required.append(x.text)
#因為聯絡人、電話、需求單位沒有特徵可以直接取得,所以使用以下方法
contact_name_tag = x.getparent().getparent().getnext()
#聯絡人姓名,但是中文還沒搞定
contact_name.append(contact_name_tag.getchildren()[0].text)
#取得電話
contact_number_tag = contact_name_tag.getnext()
contact_number.append(contact_number_tag.getchildren()[0].text)
#取得需求單位,但是中文還沒搞定
contact_org_tag = contact_number_tag.getnext()
contact_org.append(contact_org_tag.getchildren()[0].text)
total = [ID, post_date, person, work_time, work_required, contact_name, contact_number, contact_org]
for i, v in enumerate(total):
total[i] = eval(str(v).replace("u\'", "\'"))
total = json.dumps(total)
```
#### File: kuas_api/kuas/news.py
```python
import random
ENABLE = 1
NEWS_ID = 31
NEWS_DEBUG = False
DEFAULT_WEIGHT = 10
def random_by_weight(p):
choice_id = []
for i in range(len(p)):
choice_id += [i for _ in range(DEFAULT_WEIGHT + p[i]["news_weight"])]
return p[random.choice(choice_id)]
def random_news():
news_list = [
{
"news_title": "第八屆泰北團-夢想,「泰」不一樣",
"news_image": "http://i.imgur.com/iNbbd4B.jpg",
"news_url": "https://docs.google.com/forms/d/11Awcel_MfPeiEkl7zQ0MldvnAw59gXKLecbIODPOaMs/viewform?edit_requested=true",
"news_content": "",
"news_weight": 3
},
{
"news_title": "體委幹部體驗營",
"news_image": "http://i.imgur.com/aJyQlJp.jpg",
"news_url": "https://www.facebook.com/Kuas%E9%AB%94%E5%A7%94-440439566106678/?fref=ts",
"news_content": "",
"news_weight": 4
},
{
"news_title": "遊戲外掛 原理實戰",
"news_image": "http://i.imgur.com/WkI23R2.jpg",
"news_url": "https://www.facebook.com/profile.php?id=735951703168873",
"news_content": "",
"news_weight": 6
},
{
"news_title": "好日子育樂營",
"news_image": "https://scontent-hkg3-1.xx.fbcdn.net/hphotos-xft1/v/t34.0-0/p206x206/12834566_977348362345349_121675822_n.jpg?oh=e04f6830fdfe5d3a77e05a8b3c32fefc&oe=56E663E6",
"news_url": "https://m.facebook.com/kuasYGR/",
"news_content": "",
"news_weight": 6
}
]
if NEWS_DEBUG:
return news_list[0]
else:
return random_by_weight(news_list)
def news_status():
return [ENABLE, NEWS_ID]
def news():
"""
News for kuas.
return [enable, news_id, news_title, news_template, news_url]
enable: bool
news_id: int
news_title: string
news_tempalte: string
news_url: string
"""
# Get news from random news
news = random_news()
news_title = news["news_title"]
news_template = (
"<div style='text-align:center;'>"
"<div><img style='display:block;margin-left:auto;margin-right:auto;max-width:80%;min-height:150px;height:auto;' src='"
+ news["news_image"] + "'></img>" + news["news_content"] + "</div>" +
"</div>"
)
news_url = news["news_url"]
return [ENABLE, NEWS_ID, news_title, news_template, news_url]
```
#### File: kuas_api/kuas/parse.py
```python
from lxml import etree
sections_time = []
weekdays_abbr = []
def parse(fncid, content):
if fncid in parse_function:
return parse_function[fncid](content)
else:
return content
def course(cont):
"""Parse raw kuas ap course data
Return:
parse data: json
have_saturday: bool
have_sunday: bool
except_text: string
"""
root = etree.HTML(cont)
try:
center = root.xpath("//center")[0]
center_text = list(center.itertext())[0]
except:
center = ""
center_text = ""
# Return if no course data
if center_text.startswith(u'學生目前無選課資料!'):
return {}
tbody = root.xpath("//table")[-1]
course_table = []
for sections, r in enumerate(tbody[1:]):
section = ""
start_time = ""
end_time = ""
for weekends, c in enumerate(r.xpath("td")):
classes = {"title": "", "date": {},
"location": {}, "instructors": []}
r = list(
filter(
lambda x: x,
map(lambda x: x.replace("\xa0", ""), c.itertext())
)
)
if not weekends:
section = r[0]
start_time = ""
end_time = ""
if len(r) > 1:
start_time, end_time = r[1].split("-")
start_time = "%s:%s" % (start_time[: 2], start_time[2:])
end_time = "%s:%s" % (end_time[: 2], end_time[2:])
continue
if not r:
continue
classes["title"] = r[0]
classes["date"]["start_time"] = start_time
classes["date"]["end_time"] = end_time
classes["date"]["weekday"] = " MTWRFSH"[weekends]
classes["date"]["section"] = section
if len(r) > 1:
classes["instructors"].append(r[1])
classes["location"]["building"] = ""
classes["location"]["room"] = r[2] if len(r) > 2 else ""
course_table.append(classes)
timecode = []
for r in tbody[1:]:
timecode.append(list(r.itertext())[1])
course_table.append({'timecode': timecode})
return course_table
def score(cont):
root = etree.HTML(cont)
try:
tbody = root.xpath("//table")[-1]
center = root.xpath("//center")
center_text = list(center[-1].itertext())[0]
except:
tbody = ""
center = ""
center_text = ""
if center_text.startswith(u'目前無學生個人成績資料'):
return {}
score_table = []
for r_index, r in enumerate(tbody[1:-1]):
r = list(map(lambda x: x.replace(u"\xa0", ""), r.itertext()))
row = {}
row["title"] = r[1]
row["units"] = r[2]
row["hours"] = r[3]
row["required"] = r[4]
row["at"] = r[5]
row["middle_score"] = r[6]
row["final_score"] = r[7]
row["remark"] = r[8]
score_table.append(row)
total_score = root.xpath("//div")[-1].text.replace(u" ", " ").split(" ")
detail = {
"conduct": float(total_score[0].split(":")[-1]) if not total_score[0].startswith("操行成績:0") else 0.0,
"average": float(total_score[1].split(":")[-1]) if total_score[1] != "總平均:" else 0.0,
"class_rank": total_score[2].split(":")[-1] if not total_score[2].startswith("班名次/班人數:/") else "",
"class_percentage": float(total_score[3].split(":")[-1][:-1]) if not total_score[3].startswith("班名次百分比:%") else 0.0
}
return {"scores": score_table, "detail": detail}
parse_function = {"ag222": course, "ag008": score}
if __name__ == "__main__":
# print(course(open("c.html").read()))
pass
```
#### File: kuas_api/kuas/user.py
```python
from flask import g
import kuas_api.kuas.ap as ap
import kuas_api.kuas.cache as cache
from lxml import etree
AP_QUERY_USER_EXPIRE = 300
def _get_user_info(session):
"""Get user info
return: `lxml.etree._Element`
"""
content = cache.ap_query(
session, "ag003", {}, g.username, expire=AP_QUERY_USER_EXPIRE)
root = etree.HTML(content)
return root
def get_user_info(session):
root = _get_user_info(session)
td = root.xpath("//td")
result = {
"education_system": "",
"department": "",
"class": "",
"student_id": g.username,
"student_name_cht": "",
"student_name_eng": "",
"status": 200,
"message": ""
}
if len(td) > 3 :
result["education_system"] = td[3].text[5:]
result["department"] = td[4].text[5:]
result["class"] = td[8].text[5:]
result["student_id"] = td[9].text[5:]
result["student_name_cht"] = td[10].text[5:]
result["student_name_eng"] = td[11].text[5:]
else :
result["status"] = 204
result["message"] = td[0].text
return result
def get_user_picture(session):
root = _get_user_info(session)
try:
image = ap.AP_BASE_URL + "/nkust" + \
root.xpath("//img")[0].values()[0][2:]
except:
image = ""
return image
```
#### File: kuas_api/modules/error.py
```python
import json
def error_handle(status,
developer_message, user_message,
error_code=-1, more_info=""):
"""Return error handler json
:param status: HTTP status code
:type status: int
:param developer_message: message for developer
:type developer_message: str
:param user_message: message for user
:type user_message: str
:param error_code: internal error code
:type error_code: int
:param more_info: links for more information
:type more_info: str
:return: json error handle
:rtype: json
"""
error_handle = {
"status": status,
"developer_message": developer_message,
"user_message": user_message,
"error_code": error_code,
"more_info": more_info
}
return json.dumps(error_handle)
```
#### File: views/v2/bus.py
```python
import time
import json
from flask import request, g
from flask_cors import *
import kuas_api.kuas.cache as cache
from kuas_api.modules.stateless_auth import auth
import kuas_api.modules.stateless_auth as stateless_auth
import kuas_api.modules.error as error
from kuas_api.modules.json import jsonify
from .doc import auto
# Nestable blueprints problem
# not sure isn't this a best practice now.
# https://github.com/mitsuhiko/flask/issues/593
#from kuas_api.views.v2 import api_v2
routes = []
def route(rule, **options):
def decorator(f):
url_rule = {
"rule": rule,
"view_func": f,
"options": options if options else {}
}
routes.append(url_rule)
return f
return decorator
@route('/bus/timetables')
@auto.doc(groups=["public"])
#@cross_origin(supports_credentials=True)
@auth.login_required
def timetables():
"""Get KUAS school bus time table.
:reqheader Authorization: Using Basic Auth
:query string date: Specific date to query timetable. format: yyyy-mm-dd
:query string from: The start station you want to query. (not impl yet)
:statuscode 200: no error
**Request**
without date (default the date on server)
.. sourcecode:: http
GET /latest/bus/timetables HTTP/1.1
Host: kuas.grd.idv.tw:14769
Authorization: Basic xxxxxxxxxxxxx=
.. sourcecode:: shell
curl -u username:password -X GET https://kuas.grd.idv.tw:14769/v2/bus/timetables
with date
.. sourcecode:: http
GET /latest/bus/timetables?date=2015-9-1 HTTP/1.1
Host: kuas.grd.idv.tw:14769
Authorization: Basic xxxxxxxxxxxxx=
.. sourcecode:: shell
curl -u username:password -X GET https://kuas.grd.idv.tw:14769/v2/bus/timetables?date=2017-08-09
**Response**
.. sourcecode:: http
HTTP/1.0 200 OK
Content-Type: application/json
{
"timetable":[
{
"endStation":"燕巢",
"EndEnrollDateTime":"2015-08-31 17:20",
"isReserve":-1,
"Time":"08:20",
"busId":"27034",
"limitCount":"999",
"reserveCount":"27",
"runDateTime":"2015-09-01 08:20"
},
{
"endStation":"燕巢",
"EndEnrollDateTime":"2015-09-01 08:00",
"isReserve":-1,
"Time":"13:00",
"busId":"27062",
"limitCount":"999",
"reserveCount":"1",
"runDateTime":"2015-09-01 13:00"
},
{
"endStation":"建工",
"EndEnrollDateTime":"2015-09-01 07:15",
"isReserve":-1,
"Time":"12:15",
"busId":"27090",
"limitCount":"999",
"reserveCount":"5",
"runDateTime":"2015-09-01 12:15"
},
{
"endStation":"建工",
"EndEnrollDateTime":"2015-09-01 11:45",
"isReserve":-1,
"Time":"16:45",
"busId":"27118",
"limitCount":"999",
"reserveCount":"24",
"runDateTime":"2015-09-01 16:45"
}
],
"date":"2015-9-1"
}
"""
date = time.strftime("%Y-%m-%d", time.gmtime())
if request.args.get("date"):
date = request.args.get("date")
# Restore cookies
s = stateless_auth.get_requests_session_with_cookies()
return jsonify(date=date, timetable=cache.bus_query(s, date))
@route("/bus/reservations", methods=["GET"])
@route("/bus/reservations/<int:bus_id>", methods=["PUT"])
@route("/bus/reservations/<int:cancel_key>", methods=["DELETE"])
@auth.login_required
def bus_reservations(bus_id=None, cancel_key=None):
# Restore cookies
s = stateless_auth.get_requests_session_with_cookies()
# Debugging
user_agent = request.user_agent.string
user_id = g.username
if request.method == "GET":
return jsonify(reservation=cache.bus_reserve_query(s))
elif request.method == "PUT":
result = cache.bus_booking(s, bus_id, "")
try:
print("PUT,%s,%s,%s" % (user_agent, user_id, result))
except:
print("PUT ERROR, %s, %s" % (user_agent, user_id))
return jsonify(result)
elif request.method == "DELETE":
result = cache.bus_booking(s, cancel_key, "un")
print("DELETE,%s,%s,%s" % (user_agent, user_id, result))
return jsonify(result)
return request.method
``` |
{
"source": "johnspencer98/aa_228_catan",
"score": 3
} |
#### File: johnspencer98/aa_228_catan/main.py
```python
from tkinter import *
import math
import numpy as np
import player
import vertex
import Edge
import tile
# ok and we add the vertices!
vertices_coordinates = ['top_left', 'top_center', 'top_right', 'mid_left', 'mid_center', 'mid_right', 'bot_left',
'bot_center', 'bot_right']
# now we map vertices to edges, we will use this when building roads because we need to know
# which edges we can build our roads on
vert2edge = {'top_left': [(1, 1), (2, 1)], 'top_center': [(1, 1), (1, 2), (2, 2)],
'top_right': [(1, 2), (2, 3)], 'mid_left': [(2, 1), (3, 1)],
'mid_center': [(2, 2), (3, 2), (3, 1), (4, 2)],
'mid_right': [(3, 2), (2, 3)], 'bot_left': [(4, 1), (5, 1)],
'bot_center': [(5, 1), (5, 2)], 'bot_right': [(4, 3), (5, 2)],
}
# now we map vertices to edges, we will use this when building roads because we need to know
# which edges we can build our roads on
edge2vert = {(1, 1): ['top_left', 'top_center'], (1, 2): ['top_center', 'top_right'],
(2, 1): ['top_left', 'mid_left'], (2, 2): ['top_center', 'mid_center'],
(2, 3): ['top_right', 'mid_right'], (3, 1): ['mid_left', 'mid_center'],
(3, 2): ['mid_center', 'mid_right'], (4, 1): ['mid_left', 'bot_left'],
(4, 2): ['mid_center', 'bot_center'], (4, 3): ['mid_right', 'bot_right'],
(5, 1): ['bot_left', 'bot_center'], (5, 2): ['bot_center', 'bot_right'],
(5, 3): ['bot_right', 'bot_center']}
# now we map vertices to tiles, where each key is a vertex and the value is the adjacent tiles
# we will use this when collecting resources on the tiles that are adjacent to our settlements
vert2tiles = {'top_left': ['tile_2'], 'top_center': ['tile_1', 'tile_2'],
'top_right': ['tile_1'], 'mid_left': ['tile_2', 'tile_3'],
'mid_center': ['tile_1', 'tile_2', 'tile_3', 'tile_4'],
'mid_right': ['tile_1', 'tile_4'], 'bot_left': ['tile_3'],
'bot_center': ['tile_3', 'tile_4'], 'bot_right': ['tile_4']
}
def createBoard(tiles_list, vertices_list, edges_list):
# now we initiate vertices as objects
for coord in vertices_coordinates:
new_vertex = vertex.Vertex()
new_vertex.coordinate = coord
new_vertex.edges = vert2edge[coord]
new_vertex.tiles = vert2tiles[coord]
vertices_list.append(new_vertex)
# now we define edges! lets do some numerical indexing here, so this is what it looks like visually
edge_coords = [(1, 1), (1, 2), (2, 1), (2, 2), (2, 3), (3, 1), (3, 2), (4, 1), (4, 2), (4, 3), (5, 1), (5, 2)]
for coord in edge_coords:
new_edge = Edge.Edge()
new_edge.coordinate = coord
new_edge.vertices = edge2vert[coord]
edges_list.append(new_edge)
# let's create a super simple board
tile_coord = ['tile_1', 'tile_2', 'tile_3', 'tile_4']
resources = ['wood', 'brick', 'wood', 'brick']
roll_numbers = [1, 2, 3, 4]
resource_assignment = np.random.choice(4, 4, replace=False)
roll_assignment = np.random.choice(4, 4, replace=False)
for z, t_coord in enumerate(tile_coord):
new_tile = tile.Tile()
new_tile.coordinate = t_coord
new_tile.dice_roll = roll_numbers[resource_assignment[z]]
new_tile.resource = resources[roll_assignment[z]]
tiles_list.append(new_tile)
# let's print what our random board looks like
# printBoard()
def give_resources(roll):
for cur_player in players:
for cur_vertex in cur_player.settlements:
for cur_tile_coord in cur_vertex.tiles:
cur_tile = next((x for x in tiles if x.coordinate == cur_tile_coord), None)
if cur_tile.dice_roll == roll:
cur_player.hand[cur_tile.resource] += 1
def printBoard():
# top_left <- (1,1) -> top_center <- (1,2) -> top_right
# ^ ^ ^
# | | |
# (2,2) *tile_2* (2,2) *tile_1* (2,3)
# | *roll #* | *roll #* |
# v v v
# mid_left <- (3,1) -> mid_center <- (3,2) -> mid_right
# ^ ^ ^
# | | |
# (4,1) *tile_3* (4,2) *tile_4* (4,3)
# | *roll #* | *roll #* |
# v v v
# bot_left <- (5,1) -> bot_center <- (5,2) -> bot_right
print('GAME BOARD')
print()
print(next((x for x in vertices if x.coordinate == 'top_left'), None).to_string() + ' <- '
+ next((x for x in edges if x.coordinate == (1, 1)), None).to_string() + ' -> '
+ next((x for x in vertices if x.coordinate == 'top_center'), None).to_string() + ' <- '
+ next((x for x in edges if x.coordinate == (1, 2)), None).to_string() + ' -> '
+ next((x for x in vertices if x.coordinate == 'top_right'), None).to_string())
print(' ^ ^ ^')
print(' | | |')
print(next((x for x in edges if x.coordinate == (2, 1)), None).to_string() + ' '
+ next((x for x in tiles if x.coordinate == 'tile_2'), None).resource + ' '
+ next((x for x in edges if x.coordinate == (2, 2)), None).to_string() + ' '
+ next((x for x in tiles if x.coordinate == 'tile_1'), None).resource + ' '
+ next((x for x in edges if x.coordinate == (2, 3)), None).to_string())
print(' | ' + str(next((x for x in tiles if x.coordinate == 'tile_2'), None).dice_roll) + ' | '
+ ' ' + str(next((x for x in tiles if x.coordinate == 'tile_1'), None).dice_roll) + ' | ')
print(' v v v')
print(next((x for x in vertices if x.coordinate == 'mid_left'), None).to_string() + ' <- '
+ next((x for x in edges if x.coordinate == (3, 1)), None).to_string() + ' -> '
+ next((x for x in vertices if x.coordinate == 'mid_center'), None).to_string() + ' <- '
+ next((x for x in edges if x.coordinate == (3, 2)), None).to_string() + ' -> '
+ next((x for x in vertices if x.coordinate == 'mid_right'), None).to_string())
print(' ^ ^ ^')
print(' | | |')
print(next((x for x in edges if x.coordinate == (4, 1)), None).to_string() + ' '
+ next((x for x in tiles if x.coordinate == 'tile_3'), None).resource + ' '
+ next((x for x in edges if x.coordinate == (4, 2)), None).to_string() + ' '
+ next((x for x in tiles if x.coordinate == 'tile_4'), None).resource + ' '
+ next((x for x in edges if x.coordinate == (4, 3)), None).to_string())
print(' | ' + str(next((x for x in tiles if x.coordinate == 'tile_3'), None).dice_roll) + ' | '
+ ' ' + str(next((x for x in tiles if x.coordinate == 'tile_4'), None).dice_roll) + ' | ')
print(' v v v')
print(next((x for x in vertices if x.coordinate == 'bot_left'), None).to_string() + ' <- '
+ next((x for x in edges if x.coordinate == (5, 1)), None).to_string() + ' -> '
+ next((x for x in vertices if x.coordinate == 'bot_center'), None).to_string() + ' <- '
+ next((x for x in edges if x.coordinate == (5, 2)), None).to_string() + ' -> '
+ next((x for x in vertices if x.coordinate == 'bot_right'), None).to_string())
if __name__ == '__main__':
# Welcome to the simulator
no_of_games = 1000
pl1_count = 0
pl2_count = 0
for i in range(no_of_games):
#print(i)
tiles = []
edges = []
vertices = []
#print('Tiles', tiles)
createBoard(tiles, vertices, edges)
players = []
# let's create player 1
pl1 = player.RandomPlayer('pl1')
# lets create player 2
pl2 = player.GreedyPlayer('pl2')
players.append(pl1)
players.append(pl2)
# now let's have both select their first settlements
pl1.choose_first_settlement(vertices)
pl2.choose_first_settlement(vertices)
pl1.choose_first_settlement(vertices)
pl2.choose_first_settlement(vertices)
# now lets simulate who wins!!!
turns = 1
finished = False
while (not finished) and turns < 50:
pl1_dice_roll = np.random.randint(1, 5)
# print('pl1_dice_roll: ', pl1_dice_roll)
give_resources(pl1_dice_roll)
pl1.take_turn(vertices, edges)
if pl1.player_score() > 4:
# printBoard()
pl1_count += 1
finished = True
else:
pl2_dice_roll = np.random.randint(1, 5)
# print('pl2_dice_roll: ', pl2_dice_roll)
pl2.take_turn(vertices, edges)
if pl2.player_score() > 4:
# printBoard()
pl2_count += 1
finished = True
turns += 1
#print(turns)
del tiles
del edges
del vertices
del players
print(pl1_count, pl2_count)
print('pl1 Winning %:', float(pl1_count) / (pl1_count + pl2_count) * 100, 'pl2 Winning %:',
float(pl2_count) / (pl1_count + pl2_count) * 100)
# print(pl1.hand)
# print(pl2.hand)
``` |
{
"source": "JohnSpeno/python-secure-zone",
"score": 2
} |
#### File: python-secure-zone/psz/models.py
```python
from django.db import models
from django.db.models import Manager
from datetime import datetime
import os
import sys
import config
import keygen
from errors import PszError
KEY_TYPES = (
('ZSK', 'Zone signing key'),
('KSK', 'Key signing key')
)
KEY_ALGOS = [(k, v) for k, v in config.KEY_ALGORITHMS.iteritems()]
KEY_STATUSES = (
('new', 'a newly created key pair. key not in DNS'),
('pre-active', 'key pair in place to sign, but not in DNS'),
('published', 'key in DNS but not signing'),
('active', 'key in DNS and signing records'),
('rolled-stage1', 'key in DNS, ZSK not signing. KSK signing'),
('expired', 'key not in DNS. not signing.'),
('deleted', 'key pair files have been deleted'),
)
class DnskeyManager(Manager):
"""
Provides convenience methods for Dnskey objects.
"""
def get_zone_keys(self, zone=None):
"""
Returns the non expired keys for a zone or for all zones if
no zone is specified.
"""
qs = self.get_query_set().exclude(status__in=['expired', 'deleted'])
if zone:
qs = qs.filter(zone=zone)
return qs
class BaseDnskey(models.Model):
"""
A BaseDnskey object. It just handles the ORM bits of a Dnskey.
It's abstract, so no tables will be created for this model.
"""
algorithm = models.CharField(max_length=128, choices=KEY_ALGOS)
keytag = models.CharField(max_length=128, db_index=True)
zone = models.TextField(db_index=True)
type = models.CharField(max_length=32, choices=KEY_TYPES)
size = models.IntegerField()
status = models.CharField(
max_length=128, choices=KEY_STATUSES,
default='new'
)
updated = models.DateTimeField(default=datetime.now)
class Meta:
abstract = True
objects = DnskeyManager()
def __cmp__(self, other):
return cmp(self.type, other.type)
def __unicode__(self):
return "%s %s %s (%s %s bits)" % (
self.zone, self.type, self.keytag, self.algorithm, self.size
)
_KEY_LOCATIONS = {
'new': config.DEFAULTS['path_newkeydir'],
'published': config.DEFAULTS['path_newkeydir'],
'pre-active': '',
'active': '',
'ksk+rolled-stage1': '',
'zsk+rolled-stage1': config.DEFAULTS['path_oldkeydir'],
'expired': config.DEFAULTS['path_oldkeydir'],
'deleted': None,
}
def _key_file_path(zone, keytype, keystatus):
"""
Returns the directory where a key's files should be located
or None if it can't be determined.
"""
try:
subdir = _KEY_LOCATIONS[keystatus]
except KeyError:
k = '%s+%s' % (keytype.lower(), keystatus)
subdir = _KEY_LOCATIONS.get(k, None)
if subdir is None:
return None
return str(os.path.join(config.DEFAULTS['path_zonedir'], zone, subdir))
class Dnskey(BaseDnskey):
"""
A Dnskey that exists on the filesystem as a keypair.
"""
def __init__(self, *args, **kwargs):
super(Dnskey, self).__init__(*args, **kwargs)
self._dnsdata = None
self._directory = None
self._keyname = None
self._path_public = None
self._path_private = None
@property
def dnsdata(self):
"""Returns the public key portion of the DNSKEY's rdata."""
if self._dnsdata is not None:
return self._dnsdata
if self.path_public is None:
return None
try:
self._dnsdata = open(self.path_public).read()[:-1]
except IOError:
pass
return self._dnsdata
@dnsdata.setter
def dnsdata(self, value):
self._dnsdata = value
@property
def keyname(self):
if self._keyname is None:
if self.algorithm and self.zone and self.keytag:
algonum = config.KEY_ALGORITHMS[self.algorithm]
keyname = 'K%s.+%s+%s' % (self.zone, algonum, self.keytag)
self._keyname = str(keyname)
return self._keyname
@keyname.setter
def keyname(self, value):
self._keyname = value
@property
def directory(self):
if not self.zone:
return None
if self._directory is None:
self._directory = _key_file_path(self.zone, self.type, self.status)
return self._directory
@directory.setter
def directory(self, value):
self._directory = value
@property
def path_private(self):
if self.directory is None:
return None
if self._path_private is None:
if self.keyname:
filename = '%s.private' % self.keyname
self._path_private = os.path.join(self.directory, filename)
return self._path_private
@property
def path_public(self):
if self.directory is None:
return None
if self._path_public is None:
if self.keyname:
filename = '%s.key' % self.keyname
self._path_public = os.path.join(self.directory, filename)
return self._path_public
def move(self, destination):
"""
Move key's files to destination.
"""
public_file = '%s.key' % self.keyname
new_path_public = os.path.join(destination, public_file)
try:
os.rename(self.path_public, new_path_public)
except OSError, e:
raise PszError('%s' % e)
self._path_public = new_path_public
private_file = '%s.private' % self.keyname
new_path_private = os.path.join(destination, private_file)
try:
os.rename(self.path_private, new_path_private)
except OSError, e:
raise PszError('%s' % e)
self._path_private = new_path_private
self.directory = destination
def unlink(self):
"""
Unlinks a key's public and private files.
"""
if self.path_public and self.path_private:
try:
os.unlink(self.path_public)
except OSError, e:
print >>sys.stderr, "%s" % e
try:
os.unlink(self.path_private)
except OSError, e:
print >>sys.stderr, "%s" % e
self.update('deleted')
def update(self, status):
"""
Saves Dnskey with new status.
"""
if status == self.status:
return
self.status = status
self.updated = datetime.now()
self.save()
@classmethod
def from_dnssec_keygen(cls, zone, keytype='ZSK', algname=None, size=None):
"""Create key pair on disk and returns Dnskey instance
The instance isn't saved in the ORM by default.
XXX move this to keygen directory?
"""
if algname is None:
algname = config.DEFAULTS[keytype.lower() + '_algorithm']
if size is None:
size = config.DEFAULTS[keytype.lower() + '_keysize']
keyname, dnsdata = keygen.create_key(zone, algname, size, keytype)
nameparts = keyname.split('+')
keytag = nameparts[2]
inst = cls(
algorithm=algname, keytag=keytag,
zone=zone, type=keytype, size=size,
)
inst.dnsdata = dnsdata
inst.keyname = keyname
inst.directory = os.getcwd()
return inst
class LogMessage(models.Model):
zone = models.TextField(db_index=True)
user = models.CharField(max_length=32, default=config.USER)
timestamp = models.DateTimeField(default=datetime.now)
message = models.TextField()
```
#### File: python-secure-zone/tests/test_dns.py
```python
from psz import named
def setup_module():
pass
def teardown():
pass
def test_dns_defaults():
dns = named.Dns()
assert dns.server == '127.0.0.1'
def test_dns_resolver_server():
dns = named.Dns(server='ns1')
assert 'ns1' in dns._resolver.nameservers
def test_dns_update():
dns = named.Dns()
assert dns.update is not None
``` |
{
"source": "john-src/grpc",
"score": 2
} |
#### File: tests/unit/_logging_test.py
```python
import unittest
import six
from six.moves import reload_module
import logging
import grpc
import functools
import sys
def patch_stderr(f):
@functools.wraps(f)
def _impl(*args, **kwargs):
old_stderr = sys.stderr
sys.stderr = six.StringIO()
try:
f(*args, **kwargs)
finally:
sys.stderr = old_stderr
return _impl
def isolated_logging(f):
@functools.wraps(f)
def _impl(*args, **kwargs):
reload_module(logging)
reload_module(grpc)
try:
f(*args, **kwargs)
finally:
reload_module(logging)
return _impl
class LoggingTest(unittest.TestCase):
@isolated_logging
def test_logger_not_occupied(self):
self.assertEqual(0, len(logging.getLogger().handlers))
@patch_stderr
@isolated_logging
def test_handler_found(self):
self.assertEqual(0, len(sys.stderr.getvalue()))
@isolated_logging
def test_can_configure_logger(self):
intended_stream = six.StringIO()
logging.basicConfig(stream=intended_stream)
self.assertEqual(1, len(logging.getLogger().handlers))
self.assertIs(logging.getLogger().handlers[0].stream, intended_stream)
@isolated_logging
def test_grpc_logger(self):
self.assertIn("grpc", logging.Logger.manager.loggerDict)
root_logger = logging.getLogger("grpc")
self.assertEqual(1, len(root_logger.handlers))
self.assertIsInstance(root_logger.handlers[0], logging.NullHandler)
if __name__ == '__main__':
unittest.main(verbosity=2)
``` |
{
"source": "johnsriot/twilio-text-to-call",
"score": 3
} |
#### File: johnsriot/twilio-text-to-call/response.py
```python
import os
import phonenumbers
from twilio.twiml.messaging_response import MessagingResponse
from twilio.rest import Client
class Caller:
""" Automated call helper operations """
def __init__(self, phonenumber, message_payload):
self.phonenumber = phonenumber
self.message_payload = int(message_payload) #Ensure we are passing an int
self.dispatch = {
1: 'https://handler.twilio.com/twiml/EHdf6d3bfcb64fa9121736d3ea5d0b2b6a', # Ice cream truck
2: 'https://handler.twilio.com/twiml/EH50d84562bb51478300e08a017371c3e4', # Boom shaka lakka
3: 'https://handler.twilio.com/twiml/EHfa775c7c156e3fb95d642bae052d6e3f', # Rick Rolled
4: 'https://handler.twilio.com/twiml/EH0d97af2131d29ad0ca35a5768452392b' # Brother
}
def return_message(self):
return self.dispatch[self.message_payload]
def format_number(self):
call_to = str("+1" + self.phonenumber)
if self.validate_number(call_to):
return call_to
def validate_number(self, phonenumber):
parse = phonenumbers.parse(phonenumber)
return phonenumbers.is_valid_number(parse)
def lambda_handler(event, context):
# Take raw string and turn into usable variables
raw_data = event['Body'].split("+")
caller = Caller(raw_data[1], raw_data[2])
url_bin = caller.return_message() #TEST
call_to = caller.format_number()
# Initialize Twilio Client
client = Client(os.environ['ACCOUNT_SID'], os.environ['AUTH_TOKEN'])
call = client.calls.create(
url=url_bin,
to=call_to,
from_=os.environ['SOURCE']
)
# Create success message for user
resp = MessagingResponse()
resp.message('Awesomesauce')
return str(resp)
``` |
{
"source": "JohnStarich/dotfiles",
"score": 4
} |
#### File: python/johnstarich/interval.py
```python
import time
class Interval(object):
def __init__(self, delay_time: int):
self.delay_time = delay_time
self.current_time = 0
@staticmethod
def now():
return time.gmtime().tm_sec
def should_run(self) -> bool:
if self.current_time == 0:
self.current_time = Interval.now()
return True
return self.is_done()
def is_done(self) -> bool:
timestamp = Interval.now()
return self.current_time + self.delay_time < timestamp or \
self.current_time > timestamp
def start(self) -> int:
self.current_time = Interval.now()
return self.current_time
```
#### File: johnstarich/weather/__init__.py
```python
from johnstarich.weather.parse import raw_weather_info
from johnstarich.interval import Interval
from johnstarich.segment import segment, segment_default
weather_status_icons = {
'clear': '☀️',
'clear night': '🌙',
'few clouds': '⛅️',
'few clouds night': '☁️',
'clouds': '☁️',
'rain': '🌧',
'shower rain': '🌦',
'shower rain night': '🌧',
'thunderstorm': '⛈',
'snow': '🌨',
'mist': '💨',
'disaster': '🌪',
'invalid': '🚫',
}
# Yahoo! Weather codes:
# https://developer.yahoo.com/weather/documentation.html#codes
weather_status_mappings = {
0: ['tornado', 'disaster'],
1: ['tropical storm', 'disaster'],
2: ['hurricane', 'disaster'],
3: ['severe thunderstorms', 'thunderstorm'],
4: ['thunderstorms', 'thunderstorm'],
5: ['mixed rain and snow', 'snow'],
6: ['mixed rain and sleet', 'snow'],
7: ['mixed snow and sleet', 'snow'],
8: ['freezing drizzle', 'rain'],
9: ['drizzle', 'shower rain'],
10: ['freezing rain', 'rain'],
11: ['showers', 'shower rain'],
12: ['showers', 'shower rain'],
13: ['snow flurries', 'snow'],
14: ['light snow showers', 'snow'],
15: ['blowing snow', 'snow'],
16: ['snow', 'snow'],
17: ['hail', 'snow'],
18: ['sleet', 'snow'],
19: ['dust', 'mist'],
20: ['foggy', 'mist'],
21: ['haze', 'mist'],
22: ['smoky', 'mist'],
23: ['blustery', 'mist'],
24: ['windy', 'mist'],
25: ['cold', 'clear'],
26: ['cloudy', 'clouds'],
27: ['mostly cloudy (night)', 'clouds'],
28: ['mostly cloudy (day)', 'clouds'],
29: ['partly cloudy (night)', 'few clouds'],
30: ['partly cloudy (day)', 'few clouds'],
31: ['clear (night)', 'clear night'],
32: ['sunny', 'clear'],
33: ['fair (night)', 'clear night'],
34: ['fair (day)', 'clear'],
35: ['mixed rain and hail', 'snow'],
36: ['hot', 'clear'],
37: ['isolated thunderstorms', 'thunderstorm'],
38: ['scattered thunderstorms', 'thunderstorm'],
39: ['scattered thunderstorms', 'thunderstorm'],
40: ['scattered showers', 'shower rain'],
41: ['heavy snow', 'snow'],
42: ['scattered snow showers', 'snow'],
43: ['heavy snow', 'snow'],
44: ['partly cloudy', 'few clouds'],
45: ['thundershowers', 'thunderstorm'],
46: ['snow showers', 'snow'],
47: ['isolated thundershowers', 'thunderstorm'],
3200: ['not available', 'invalid'],
}
segment_kwargs = {
'highlight_groups': [
'weather_temp_gradient',
'weather_temp',
'weather'
],
}
update_interval = Interval(5 * 60)
last_status = ''
last_gradient = 0
def weather(pl, unit: str='C', temp_low: float=0, temp_high: float=100,
**kwargs) -> list:
global last_status, last_gradient
if not update_interval.should_run():
return segment(last_status, gradient_level=last_gradient,
**segment_kwargs)
if temp_low >= temp_high:
raise ValueError('temp_low cannot be higher then or '
'the same as temp_high')
weather = raw_weather_info()
warning_str = None
if 'error' in weather and 'location' in weather['error']:
warning_str = ' ⚠️ 🌎 '
if len(weather.keys()) == 1:
update_interval.start()
if not last_status.endswith(warning_str):
last_status += warning_str
return segment(last_status, gradient_level=last_gradient,
**segment_kwargs)
elif 'error' in weather:
update_interval.start()
warning_str = ' ⚠️ '
if '⚠️' not in last_status:
last_status += warning_str
return segment(last_status, gradient_level=last_gradient,
**segment_kwargs)
print(weather)
temperature = weather['temperature']
input_unit = weather['units']['temperature']
humidity = weather['humidity']
additional_variance = 0
temp_in_fahrenheit = convert_temperature(temperature, input_unit, 'F')
if temp_in_fahrenheit >= 80 and humidity >= 40:
display_temperature = heat_index(temperature, humidity,
input_unit, unit)
if display_temperature != temp_in_fahrenheit:
additional_variance = convert_temperature(1.3, 'F', unit) - \
convert_temperature(0, 'F', unit)
elif temp_in_fahrenheit <= 50:
display_temperature = convert_temperature(weather['wind']['chill'],
input_unit, unit)
else:
display_temperature = convert_temperature(temperature,
input_unit, unit)
gradient = 100 * (display_temperature - temp_low) / (temp_high - temp_low)
if display_temperature > temp_high:
gradient = 100
elif display_temperature < temp_low:
gradient = 0
variance = ''
if additional_variance != 0:
display_temperature = round(display_temperature)
variance = '±' + str(round(abs(additional_variance), 1))
else:
display_temperature = round(display_temperature, 1)
contents = '{icon} {temp}{var}°{unit}{warning}'.format(
icon=extract_icon(weather),
temp=display_temperature,
unit=unit,
var=variance,
warning=warning_str if warning_str is not None else '',
)
update_interval.start()
last_status = contents
last_gradient = gradient
return segment(contents, gradient_level=gradient, **segment_kwargs)
def extract_icon(weather: dict) -> str:
weather_code = weather['code']
if weather_code not in weather_status_mappings:
return weather_status_icons['invalid']
return weather_status_icons[weather_status_mappings[weather_code][1]]
heat_index_constants = [
-42.379, 2.04901523, 10.14333127, -0.22475541, -6.83783e-3,
-5.481717e-2, 1.22874e-3, 8.5282e-4, -1.99e-6
]
def heat_index(temperature: float, relative_humidity: float,
input_unit: str, output_unit: str) -> float:
if input_unit not in 'KCF':
raise ValueError('Invalid input unit: ' + input_unit)
if output_unit not in 'KCF':
raise ValueError('Invalid output unit: ' + output_unit)
global heat_index_constants
HI_C = heat_index_constants
T = temperature_conversions[input_unit + 'F'](temperature)
R = relative_humidity
HI = HI_C[0] + HI_C[1]*T + HI_C[2]*R + HI_C[3]*T*R + HI_C[4]*T*T + \
HI_C[5]*R*R + HI_C[6]*T*T*R + HI_C[7]*T*R*R + HI_C[8]*T*T*R*R
return temperature_conversions['F' + output_unit](HI)
temperature_conversions = {
'CC': lambda t: t,
'CF': lambda t: t * 9/5 + 32,
'CK': lambda t: t + 273.15,
'FC': lambda t: (t - 32) * 5/9,
'FF': lambda t: t,
'FK': lambda t: (t + 459.67) * 5/9,
'KC': lambda t: t - 273.15,
'KF': lambda t: t * 9/5 - 459.67,
'KK': lambda t: t,
}
def convert_temperature(temperature: float, input_unit: str, output_unit: str):
if input_unit not in 'KCF':
raise ValueError('Input unit is not valid: ' + input_unit)
if output_unit not in 'KCF':
raise ValueError('Output unit is not valid: ' + output_unit)
return temperature_conversions[input_unit + output_unit](temperature)
``` |
{
"source": "JohnStarich/java-skip-list",
"score": 3
} |
#### File: JohnStarich/java-skip-list/markdown_filter.py
```python
from panflute import *
removed_top_header = False
def markdown_header(elem, doc):
"""
Removes the first top level header (h1),
sets the document title to that header's contents,
and lowers the level of the other headers.
"""
global removed_top_header
if type(elem) != Header:
return None
if elem.level > 1:
elem.level -= 1
return None
if removed_top_header is True:
return None
removed_top_header = True
doc.metadata['title'] = MetaInlines(*elem.content)
return []
if __name__ == '__main__':
run_filter(markdown_header)
``` |
{
"source": "JohnStarich/python-pool-performance",
"score": 2
} |
#### File: python-pool-performance/pools/pool.py
```python
from requests.adapters import HTTPAdapter
from collections.abc import Mapping, Sequence
from types import FunctionType
from tqdm import tqdm
import time
import sys
import gc
class PoolTest(object):
def __init__(self, worker_count: int):
self.worker_count = worker_count
self.pool = self.init_pool(worker_count)
self.compute_resource = self.init_compute_resource()
self.network_resource = self.init_network_resource()
def init_pool(self, worker_count: int) -> object:
raise NotImplementedError("{} does not implement init_pool"
.format(self.__class__.__name__))
def destroy_pool(self):
pass
def map(self, work_func: FunctionType, inputs: Sequence) -> Sequence:
raise NotImplementedError("{} does not implement map"
.format(self.__class__.__name__))
def init_compute_resource(self) -> object:
from cmath import sqrt
return sqrt
def init_network_resource(self) -> object:
import requests
return requests.Session
@staticmethod
def do_compute_work(args) -> None:
compute_resource, num, *_ = args
sqrt = compute_resource
sqrt(sqrt(sqrt(num)))
@staticmethod
def do_network_work(args) -> None:
network_resource, *_ = args
Session = network_resource
with Session() as s:
adapter = HTTPAdapter(max_retries=3)
s.mount('http://', adapter)
s.get('http://localhost:8080/')
def run_compute_test(self, jobs: int, trials: int,
show_progress: bool=False) -> Mapping:
return self._run_test(self.do_compute_work, self.compute_resource,
jobs, trials, show_progress=show_progress)
def run_network_test(self, jobs: int, trials: int,
show_progress: bool=False) -> Mapping:
return self._run_test(self.do_network_work, self.network_resource,
jobs, trials, show_progress=show_progress)
def _run_test(self, work_func: FunctionType, work_resource: object,
jobs: int, trials: int,
show_progress: bool=False) -> Mapping:
results = {
'jobs': jobs,
'trials': trials,
'time': [],
'blocks': [],
}
# Forcibly evaluate the inputs to prevent time/resources taken up later
inputs = list(zip(
[work_resource] * jobs,
range(jobs)
))
trial_iter = range(trials)
if show_progress is True and trials > 2:
trial_iter = tqdm(trial_iter, desc='trials')
gc.collect()
for _ in trial_iter:
# Run trial of pool map function and measure it
gc.collect()
blocks_start = sys.getallocatedblocks()
time_start = time.time()
list(self.map(work_func, inputs))
time_end = time.time()
results['time'].append(time_end - time_start)
# Get allocated blocks before garbage collection to show peak usage
blocks_end = sys.getallocatedblocks()
results['blocks'].append(blocks_end - blocks_start)
return results
```
#### File: python-pool-performance/pools/standard_library.py
```python
from pools import PoolTest
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
class StandardProcessPool(PoolTest):
def init_pool(self, worker_count):
return ProcessPoolExecutor(worker_count)
def map(self, work_func, inputs):
return self.pool.map(work_func, inputs)
class StandardThreadPool(PoolTest):
def init_pool(self, worker_count):
return ThreadPoolExecutor(worker_count)
def map(self, work_func, inputs):
return self.pool.map(work_func, inputs)
``` |
{
"source": "johnstat101/my-blogsite",
"score": 3
} |
#### File: my-blogsite/tests/test_comments.py
```python
import unittest
from app.models import Comment, Blog, User
class CommentModelTest(unittest.TestCase):
def setUp(self):
self.new_comment = Comment(id = 1, comment = 'Test comment', user = self.user_emma, blog_id = self.new_blog)
def tearDown(self):
Blog.query.delete()
User.query.delete()
def test_check_instance_variables(self):
self.assertEquals(self.new_comment.comment,'Test comment')
self.assertEquals(self.new_comment.user,self.user_emma)
self.assertEquals(self.new_comment.blog_id,self.new_blog)
``` |
{
"source": "johnstat101/password-locker",
"score": 4
} |
#### File: johnstat101/password-locker/user_test.py
```python
import unittest # Importing the unittest module
from user import UserAccounts # Importing the contact class
class TestUserAccounts(unittest.TestCase):
"""
class to define test cases for the userAccounts class behavior
Args:
unittest.TestCase: TestCase class that helps in creating test cases
"""
def setUp(self):
"""
Set up method to run before each test cases.
"""
self.new_userAccount = UserAccounts("johnKim", "1234") #create an object of type UserAccounts
def test_init(self):
"""
Test whether the object is initialized correctly
"""
self.assertEqual(self.new_userAccount.username,"johnKim")
self.assertEqual(self.new_userAccount.password,"<PASSWORD>")
def test_save_userAccount(self):
"""
A test case to test if userAccount is saved in userAccountsList
"""
self.new_userAccount.save_userAccount()
self.assertEqual(len(UserAccounts.userAccounts_list),1)
def tearDown(self):
'''
tearDown method that does clean up after each test case has run.
'''
UserAccounts.userAccounts_list = []
def save_multiple_userAccounts(self):
"""
test saving of multiple userAccounts
"""
self.new_userAccount.save_userAccount()
test_userAccount = UserAccounts("kimJohn","4321")
test_userAccount.save_userAccount()
self.assertEqual(len(UserAccounts.userAccounts_list),2)
def test_delete_userAccounts(self):
"""
test case for userAccounts deletion
"""
self.new_userAccount.save_userAccount()
test_userAccount = UserAccounts("kimJohn", "4321")
test_userAccount.save_userAccount()
self.new_userAccount.delete_userAccount()
self.assertEqual(len(UserAccounts.userAccounts_list),1)
def test_find_userAccounts_by_username(self):
"""
test case to search for user userAccounts by username
"""
self.new_userAccount.save_userAccount()
test_userAccount = UserAccounts("kimJohn", "4321")
test_userAccount.save_userAccount()
found_userAccount = UserAccounts.find_userAccounts_by_username("kimJohn")
self.assertEqual(found_userAccount.password, test_userAccount.password)
def test_userAccount_exist(self):
"""
return true if a contact exists
"""
self.new_userAccount.save_userAccount()
test_userAccount = UserAccounts("kimJohn", "4321")
test_userAccount.save_userAccount()
userAccount_exists = UserAccounts.userAccount_exists("kimJohn")
self.assertTrue(userAccount_exists)
def test_display_userAccounts(self):
'''
method that returns a list of all userAccounts saved
'''
self.assertEqual(UserAccounts.display_userAccounts(),UserAccounts.userAccounts_list)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "johnstcn/cs7ns6groupF",
"score": 3
} |
#### File: cs7ns6groupF/booking/raft_rpc_client.py
```python
import logging
import socket
from typing import Tuple, Optional
from raft_peer import Peer
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.DEBUG)
class RpcClient(object):
def send(self, peer: Peer, msg) -> Tuple[Optional[int], Optional[bool]]:
LOG.debug("RpcClient send peer %s msg:%s", peer, msg)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
try:
sock.connect(peer.hostport())
sock.sendall(bytes(msg))
resp = sock.recv(1024)
LOG.debug("RpcClient response from peer %s: %s", peer, resp)
term_str, success_str = resp.strip().split(b' ')
term = int(term_str)
success = success_str == b'1'
return term, success
except Exception as e:
LOG.warning("Got RpcClient Exception: %s", e)
raise
``` |
{
"source": "johnstcn/whatsnew",
"score": 2
} |
#### File: whatsnew/templatetags/app_filters.py
```python
from django import template
from django.contrib.humanize.templatetags.humanize import naturalday, naturaltime
from datetime import datetime
from pytz import utc
register = template.Library()
@register.filter(name="custom_natural_date")
def custom_natural_date(value):
delta = datetime.now(utc) - value
if delta.days == 0:
return naturaltime(value)
else:
return naturalday(value)
@register.filter(name="get_int_key")
def get_int_key(d, key):
return d.get(unicode(key))
```
#### File: whatsnew/whatsnew/views.py
```python
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate, login
from django.db.models.aggregates import Max
from django.http import HttpResponseRedirect
from django.shortcuts import redirect
from django.views import generic
from django.views.decorators.cache import never_cache
from django.utils.decorators import method_decorator
from .models import *
from .forms import *
@login_required
def watch(request, site_id):
return_url = request.META.get('HTTP_REFERER', '/')
if not request.user.is_anonymous():
site = Site.objects.get(pk=site_id)
us = UserSeen.objects.get(user=request.user)
us.seen[site_id] = site.latest_update.id
us.save()
messages.info(request, 'You are now watching %s' % site.name)
else:
messages.warning(request, 'You need to sign in to do that!')
return redirect(return_url)
@login_required
def unwatch(request, site_id):
return_url = request.META.get('HTTP_REFERER', '/')
if not request.user.is_anonymous():
site = Site.objects.get(pk=site_id)
us = UserSeen.objects.get(user=request.user)
del(us.seen[site_id])
us.save()
messages.info(request, 'You are no longer watching %s' % site.name)
else:
messages.warning(request, 'You need to sign in to do that!')
return redirect(return_url)
@never_cache
def redirect_to_update(request, site_update_id):
return_url = request.META.get('HTTP_REFERER', '/')
update = SiteUpdate.objects.filter(pk=site_update_id).get()
if not request.user.is_anonymous():
try:
us = UserSeen.objects.get(user=request.user)
if str(update.site_id) in us.seen:
us.seen[update.site_id] = site_update_id
us.save()
except ObjectDoesNotExist:
messages.error(request, 'Sorry, something went wrong!')
return redirect(return_url)
else:
messages.info(request, 'Sign up to keep track of the comics you like!')
return redirect(update.url)
@method_decorator(never_cache, name='dispatch')
class SitesView(generic.ListView):
template_name = 'all_sites.html'
context_object_name = 'sites'
model = Site
paginate_by = 10
def get_queryset(self):
sites = Site.objects.annotate(Max('siteupdate__date')).order_by('-siteupdate__date__max')
tag = self.kwargs.get("tag", "all")
nsfw = self.kwargs.get("nsfw", "0")
watched = self.kwargs.get("watched", "0")
if tag != "all":
sites = sites.filter(site_tags__name=tag)
if nsfw == "0":
sites = sites.exclude(site_tags__name="nsfw")
if watched == "1":
if self.request.user.is_anonymous():
messages.info(self.request, "You need to sign in to do that!")
else:
seen = self.request.user.userseen.seen
sites = sites.filter(pk__in=seen)
return sites
def get_context_data(self, **kwargs):
context = super(SitesView, self).get_context_data(**kwargs)
tags = [t.name for t in Tag.objects.all() if t.name != 'nsfw']
context["tags"] = tags
context["selected_tag"] = self.kwargs.get("tag", "all")
context["show_nsfw"] = self.kwargs.get("nsfw", "0")
context["only_watched"] = self.kwargs.get("watched", "0")
next_updates = {}
if not self.request.user.is_anonymous():
seen = UserSeen.objects.get(user=self.request.user).seen
for site_id, update_id in seen.iteritems():
try:
update = SiteUpdate.objects.get(pk=update_id)
next_update = SiteUpdate.objects.filter(site_id=site_id, date__gt=update.date).order_by('date').first()
except ObjectDoesNotExist:
next_update = None
if next_update is not None:
next_updates[site_id] = next_update.id
else:
next_updates[site_id] = None
else:
seen = {}
context['next_updates'] = next_updates
context['seen'] = seen
return context
@method_decorator(never_cache, name='dispatch')
class SignUpView(generic.FormView):
template_name = 'sign_in.html'
form_class = SignUpForm
success_url = '/'
def dispatch(self, request):
if self.request.user.is_anonymous():
return super(SignUpView, self).dispatch(request)
else:
messages.warning(self.request, 'You are already signed in!')
return HttpResponseRedirect('/')
def form_valid(self, form):
form.send_signin_email()
messages.info(self.request, 'Check your email for a link to sign in!')
return super(SignUpView, self).form_valid(form)
@method_decorator(never_cache, name='dispatch')
class AuthenticateView(generic.RedirectView):
permanent = False
query_string = False
def get_redirect_url(self, *args, **kwargs):
auth_code = self.kwargs.get('auth_code', '')
try:
user = authenticate(code=auth_code)
login(self.request, user)
messages.success(self.request, 'Welcome %s!' %(user.username))
except ObjectDoesNotExist:
messages.error(self.request, "Sorry, we couldn't figure out who you are.")
finally:
return '/'
``` |
{
"source": "johnstef99/goal-dsl",
"score": 2
} |
#### File: goal-dsl/goal_dsl/__init__.py
```python
import os
from textx import language, metamodel_from_file
from goal_dsl.utils import get_mm
__version__ = "0.1.0.dev"
@language('goal_dsl', '*.goal')
def goal_dsl_language():
"goal_dsl language"
return get_mm()
```
#### File: goal-dsl/goal_dsl/utils.py
```python
from os.path import dirname, join
from textx import metamodel_from_file
import textx.scoping.providers as scoping_providers
this_dir = dirname(__file__)
def get_mm(debug=False, global_scope=True):
"""
"""
mm= metamodel_from_file(
join(this_dir, 'goal_dsl.tx'),
global_repository=global_scope,
debug=debug
)
mm.register_scope_providers(
{
"*.*": scoping_providers.FQNImportURI(
importAs=True,
# importURI_to_scope_name=importURI_to_scope_name
)
}
)
return mm
def build_model(model_fpath):
mm = get_mm(global_scope=True)
model = mm.model_from_file(model_fpath)
# print(model._tx_loaded_models)
reg_models = mm._tx_model_repository.all_models.filename_to_model
models = [val for key, val in reg_models.items() if val != model]
return (model, models)
def get_grammar(debug=False):
with open(join(this_dir, 'thing_dsl.tx')) as f:
return f.read()
``` |
{
"source": "johnstile/job_controller",
"score": 3
} |
#### File: src/web/myapp.py
```python
import os
import logging
from flask import Flask, jsonify, Response, request, abort
from flask_cors import CORS # To allow Swagger and other things to work
import datetime
from flask_jwt_extended import (
JWTManager, jwt_required, create_access_token, create_refresh_token,
get_jwt_identity, jwt_optional, get_jwt_claims
)
# My API Versions
from .api_v1_blueprint import jwt, api_v1_blueprint
here = os.path.dirname(__file__)
app = Flask(__name__, static_url_path='')
jwt = JWTManager()
jwt.init_app(app)
# Register root logger, so blueprint can send logs
# Log Level: basicConfig: my python, werkzeug: all requests
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('werkzeug').setLevel(logging.WARNING)
app.config.from_pyfile(os.path.join(here, 'flask.cfg'))
# we have some cross domain stuff behind nginx
CORS(app)
# Expose API under /V1/
# e.g. /V1/stations
app.register_blueprint(api_v1_blueprint, url_prefix='/V1')
@app.route('/')
def index():
return "Hello Job Controller!!"
@app.route('/echo_request')
def echo_request():
"""API independent route to view request header info"""
return jsonify(dict(request.headers))
if __name__ == "__main__":
app.run(debug=True, threaded=True, host='0.0.0.0')
``` |
{
"source": "johnstill/vae_experiments",
"score": 2
} |
#### File: johnstill/vae_experiments/misc_util.py
```python
import math
import os
from itertools import count, groupby
from operator import itemgetter
from types import SimpleNamespace
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch import nn, optim
from torch.distributions import Bernoulli, Normal
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import ToTensor
from pytorch_lightning import LightningModule, Trainer
@torch.no_grad()
def get_digit_samples():
by_digit = itemgetter(1)
mnist = MNIST(os.getcwd(), transform=ToTensor())
mnist = sorted(mnist, key=by_digit)
mnist = groupby(mnist, key=by_digit)
samples = []
for digit, grp in mnist:
x, y = next(grp)
samples.append(x.view(-1))
return torch.stack(samples)
@torch.no_grad()
def sweep_variable_across_samples(vae, samples, i, sweep):
"""Sweeps a single latent variable
Arguments
---------
vae : torch.Module
A VAE module; must have a decode method
samples : n-by-z array-like
Contains n samples of z latent variables
i : int < z
The latent variable to sweep
sweep : array
The values to use in sweeping z
"""
# XXX dumb, unvectorized version
recons = []
for sample in samples:
recons.append([])
for val in sweep:
sample[i] = val
# Use just means as image
img, _ = vae.decode(sample)
recons[-1].append(img.detach().numpy())
return np.array(recons)
@torch.no_grad()
def plot_sweep_grid(origs, recons, sweepvals):
idx = count(1)
fig = plt.figure(figsize=(15, 13))
fig.subplots_adjust(hspace=0, wspace=0)
for i in range(10):
plt.subplot(10, 11, next(idx))
plt.imshow(origs[i].reshape(28, 28))
plt.xticks([])
plt.yticks([])
if i == 0:
plt.title('Orig')
for j in range(10):
plt.subplot(10, 11, next(idx))
plt.imshow(recons[i][j].reshape(28, 28))
plt.xticks([])
plt.yticks([])
if i == 0:
plt.title(f'{sweepvals[j]:.2f}')
plt.show()
@torch.no_grad()
def plot_all_sweeps(model):
digits = get_digit_samples()
digit_encodings, *_ = model(digits)
sweep_range = torch.linspace(-4, 4, steps=10)
return digit_encodings, sweep_range
# for i in range(20):
for i in range(1):
print(f'Sweeping reconstructions over latent variable no. {i}')
recons_by_var = sweep_variable_across_samples(model,
digit_encodings.clone(),
i,
sweep_range)
plot_sweep_grid(digits.detach().numpy(), recons_by_var, sweep_range)
return digit_encodings, sweep_range
@torch.no_grad()
def zeroth_mu_sigma(enc, model):
m, s = model.decode(enc)
s = F.softplus(s)
m0, s0 = m[0], s[0]
plt.subplot(221)
plt.imshow(m0.reshape(28, 28), norm=None, cmap='gray', vmin=0.0, vmax=1.0)
plt.xticks([])
plt.yticks([])
plt.subplot(222)
plt.imshow(s0.reshape(28, 28), norm=None, cmap='gray', vmin=0.0, vmax=1.0)
plt.xticks([])
plt.yticks([])
plt.subplot(223)
plt.imshow(m0.reshape(28, 28))
plt.xticks([])
plt.yticks([])
plt.subplot(224)
plt.imshow(s0.reshape(28, 28))
plt.xticks([])
plt.yticks([])
plt.show()
return m, s
``` |
{
"source": "johnstonematt/pythpy",
"score": 2
} |
#### File: pythpy/calls/async.py
```python
import asyncio
import base64
from solana.rpc.async_api import AsyncClient
from solana.publickey import PublicKey
from pythpy.state.oracle import OracleAccount
async def load_account_bytes(client: AsyncClient, address: PublicKey) -> bytes:
resp = await client.get_account_info(pubkey=address)
if ('result' not in resp) or ('value' not in resp['result']):
raise Exception('Cannot load bytes.')
data = resp['result']['value']['data'][0]
bytes_data = base64.decodebytes(data.encode('ascii'))
return bytes_data
async def call_oracle_account(client: AsyncClient, address: PublicKey) -> OracleAccount:
bytes_data = await load_account_bytes(
client=client,
address=address
)
oracle_account = OracleAccount.parse(
bytes_data=bytes_data
)
return oracle_account
```
#### File: pythpy/calls/sync.py
```python
import base64
from solana.rpc.api import Client
from solana.publickey import PublicKey
from pythpy.state.oracle import OracleAccount
def load_account_bytes(client: Client, address: PublicKey) -> bytes:
resp = client.get_account_info(pubkey=address)
if ('result' not in resp) or ('value' not in resp['result']):
raise Exception('Cannot load bytes.')
data = resp['result']['value']['data'][0]
bytes_data = base64.decodebytes(data.encode('ascii'))
return bytes_data
def call_oracle_account(client: Client, address: PublicKey) -> OracleAccount:
bytes_data = load_account_bytes(
client=client,
address=address
)
oracle_account = OracleAccount.parse(
bytes_data=bytes_data
)
return oracle_account
```
#### File: pythpy/state/core.py
```python
import json
from abc import ABC, abstractmethod
from construct import Struct, Container
class StateCore(ABC):
layout: Struct = None
@classmethod
@abstractmethod
def from_container(cls, container: Container):
pass
@classmethod
def parse(cls, bytes_data: bytes, factor: int):
container = cls.layout.parse(bytes_data)
obj = cls.from_container(container=container)
obj = obj.parse_precision(factor=factor)
return obj
@abstractmethod
def parse_precision(self, factor: int):
pass
@abstractmethod
def to_dict(self) -> dict:
pass
def __str__(self):
my_dict = self.to_dict()
return json.dumps(my_dict, sort_keys=False, indent=4)
``` |
{
"source": "johnstonskj/guernsey",
"score": 3
} |
#### File: guernsey/test/chaining.py
```python
import unittest
from guernsey import Client
class TestMethodChaining(unittest.TestCase):
def testSimpleRequest(self):
c = Client.create()
r = c.resource('http://example.com/base/').path('1').accept('text/json').accept('text/xml', 0.5)
self.assertEquals('http://example.com/base/1', r.url)
self.assertEquals('text/json, text/xml; q=0.5', r.headers['Accept'])
def testServiceCall(self):
client = Client.create()
namespaces = client.resource('http://www.thomas-bayer.com/sqlrest/').accept('*/xml').get()
self.assertEquals('http://www.thomas-bayer.com/sqlrest/', namespaces.url)
self.assertEquals('*/xml', namespaces.resource.headers['Accept'])
self.assertEquals('application/xml', namespaces.headers['content-type'])
customers = namespaces.resource.path('CUSTOMERS/').accept('*/xml').get()
self.assertEquals('http://www.thomas-bayer.com/sqlrest/CUSTOMERS/', customers.url)
self.assertEquals('*/xml', customers.resource.headers['Accept'])
self.assertEquals('application/xml', customers.headers['content-type'])
customer = customers.resource.path('../CUSTOMER/22022010').accept('*/xml').get()
self.assertEquals('http://www.thomas-bayer.com/sqlrest/CUSTOMER/22022010', customer.url)
self.assertEquals('*/xml', customer.resource.headers['Accept'])
self.assertEquals('application/xml', customer.headers['content-type'])
``` |
{
"source": "johnstonskj/PyDL7",
"score": 2
} |
#### File: johnstonskj/PyDL7/setup.py
```python
from setuptools import setup
def readme():
with open('README.md') as f:
return f.read()
setup(
name='PyDL7',
version='0.0.3',
description='Python API for parsing DAN Dive Log files.',
long_description=readme(),
author='<NAME>',
author_email='<EMAIL>',
download_url='https://pypi.python.org/pypi/PyDL7',
url='https://github.com/johnstonskj/PyDL7',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
],
packages=['divelog'],
setup_requires=['pytest-runner'],
tests_require=[
'pytest',
'pytest-cov',
'pytest-catchlog',
'pytest-pep8'
],
entry_points={
'console_scripts': [
'dl7dump=divelog.command_line:main',
],
}
)
```
#### File: PyDL7/tests/test_html.py
```python
import io
import pytest
import divelog.html
import examples
def test_html_parser():
with pytest.raises(NotImplementedError):
divelog.html.parse(None)
def test_html_writer():
def wrap(s):
return '<div class="col-md-10">%s</div>' % s
log = examples.create_log()
file = io.StringIO()
divelog.html.dump(log, file)
content = file.getvalue()
assert wrap('Saturday, 05. March 2016 11:10AM') in content
assert wrap('Every 1 minute') in content
``` |
{
"source": "johnstonskj/rdftools",
"score": 2
} |
#### File: rdftools/rdftools/__init__.py
```python
import argparse
import i18n
import logging
import os
import rdflib
import sys
from termcolor import colored
from timeit import default_timer as timer
__VERSION__ = '0.2.0'
__LOG__ = None
FORMATS = ['nt', 'n3', 'turtle', 'rdfa', 'xml', 'pretty-xml']
HEADER_SEP = '='
COLUMN_SEP = '|'
EMPTY_LINE = ''
COLUMN_SPEC = '{:%d}'
USE_COLOR = False
def startup(description_key, add_args, read_files=True, argv=None):
global __LOG__, USE_COLOR
configure_translation()
description = i18n.t(description_key)
parser = configure_argparse(description, read_files)
if callable(add_args):
parser = add_args(parser)
if argv is None:
command = parser.parse_args()
else:
command = parser.parse_args(argv)
USE_COLOR = command.use_color
process = parser.prog
__LOG__ = configure_logging(process, command.verbose)
__LOG__.info(i18n.t('rdftools.started', tool=process, name=description))
__LOG__.info(argv)
return (__LOG__, command)
def configure_translation(force_locale=None):
i18n.load_path.append(os.path.join(os.path.dirname(__file__), 'messages'))
if force_locale is not None:
i18n.set('locale', force_locale)
i18n.set('fallback', 'en')
def configure_argparse(description, read_files=True):
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-v', '--verbose', default=0, action='count')
parser.add_argument('-b', '--base', action='store')
if read_files:
parser.add_argument('-i', '--input',
type=argparse.FileType('r'), nargs='*')
parser.add_argument('-r', '--read', action='store', choices=FORMATS)
parser.add_argument('-c', '--use-color', action='store_true')
return parser
def configure_logging(name, level):
global __LOG__
logging.basicConfig(format='%(asctime)-15s %(module)s.%(funcName)s:' +
'%(lineno)d [%(levelname)s] %(message)s')
logger = logging.getLogger(name)
if level > 2:
logger.setLevel(logging.INFO)
elif level > 1:
logger.setLevel(logging.DEBUG)
elif level > 0:
logger.setLevel(logging.WARN)
else:
logger.setLevel(logging.ERROR)
logger.info(i18n.t('rdftools.logging', level=logger.getEffectiveLevel()))
__LOG__ = logger
return logger
def read_into(input, format, graph, base=None):
start = end = 0
if format is None:
if input is None:
format = FORMATS[0]
else:
format = rdflib.util.guess_format(input.name)
if input is None:
__LOG__.info(i18n.t('rdftools.read_stdin', format=format))
start = timer()
graph.parse(source=sys.stdin.buffer, format=format, publicID=base)
end = timer()
else:
__LOG__.info(i18n.t('rdftools.read_file',
name=input.name, format=format))
start = timer()
graph.parse(source=input.name, format=format, publicID=base)
end = timer()
__LOG__.info(i18n.t('rdftools.read_complete',
len=len(graph), time=end - start))
return graph
def read(input, format, base=None):
graph = rdflib.Graph()
return read_into(input, format, graph, base)
def read_all(inputs, format, base=None):
graph = rdflib.Graph()
for input in inputs:
graph = read_into(input, format, graph, base)
return graph
def write(graph, output, format, base=None):
__LOG__.debug(i18n.t('rdftools.write', graph=graph, len=len(graph)))
start = end = 0
if format is None:
if output is None:
format = FORMATS[0]
else:
format = rdflib.util.guess_format(output.name)
if output is None:
__LOG__.info(i18n.t('rdftools.write_stdout', format=format))
start = timer()
data = graph.serialize(format=format, base=base)
end = timer()
try:
# This fails on Travis ONLY for Python 3.4
sys.stdout.buffer.write(data)
except AttributeError:
sys.stdout.write(data.decode('utf-8'))
else:
__LOG__.info(i18n.t('rdftools.write_file',
name=output.name, format=format))
start = timer()
graph.serialize(destination=output.name, format=format, base=base)
end = timer()
__LOG__.debug(i18n.t('rdftools.write_complete', time=(end - start)))
def get_terminal_width(default=80):
import shutil
return shutil.get_terminal_size((default, 20))[0]
def header(str):
return colored(str, attrs=['reverse']) if USE_COLOR else str
def line(str):
return colored(str, attrs=['dark']) if USE_COLOR else str
def comment(str):
return colored(str, attrs=['dark']) if USE_COLOR else str
def report(columns, rows, timer=0):
# TODO: Should also take this as a parameter? so "rdf query -c 80 -q ..."
width = get_terminal_width()
col_width = int((width - len(columns)) / len(columns))
col_string = COLUMN_SPEC % col_width
for column in columns:
print(header(col_string.format(column)), end=line(COLUMN_SEP))
print(EMPTY_LINE)
for column in columns:
print(line(HEADER_SEP * col_width), end=line(COLUMN_SEP))
print(EMPTY_LINE)
for row in rows:
for col in columns:
print(col_string.format(row[col]), end=line(COLUMN_SEP))
print(EMPTY_LINE)
if timer != 0:
print(comment(i18n.t('rdftools.report_timed',
len=len(rows), time=timer)))
else:
print(comment(i18n.t('rdftools.report_timed',
len=len(rows))))
```
#### File: rdftools/test/test_query.py
```python
import pytest
from unittest.mock import patch
from rdftools.scripts import query
from test.sample_data import input_file
expected_out = sorted([
'http://example.org/social/profile/1.0/Person',
'http://example.org/social/topics/1.0/Topic',
'http://example.org/social/profile/1.0/Family',
])
def test_query_script(capsys):
with patch('sys.argv',
['test_query', '-i', input_file, '-r', 'n3', '-q',
'SELECT DISTINCT ?type WHERE { ?s a ?type }']):
query.main()
(out, err) = capsys.readouterr()
out_lines = sorted([line for line in out.split('\n')
if line.startswith('http://')])
assert len(out_lines) == len(expected_out)
for (index, line) in enumerate(expected_out):
assert out_lines[index].startswith(line)
def test_query_script_empty(capsys):
expected = "query returned no results."
with patch('sys.argv',
['test_convert', '-i', input_file, '-r', 'n3', '-q',
'SELECT DISTINCT ?type WHERE { ?s a <http://example.org/people/me> }']): # noqa: 501
query.main()
(out, err) = capsys.readouterr()
assert out.index(expected) >= 0
def test_query_script_bad_sparql(capsys):
import pyparsing
expected_err = "Expected {SelectQuery | ConstructQuery | DescribeQuery | AskQuery}" # noqa: 501
with patch('sys.argv',
['test_convert', '-i', input_file, '-r', 'n3', '-q',
'WHAT IS SPARQL?']):
try:
query.main()
pytest.fail('expecting: %s' % expected_err)
except pyparsing.ParseException as ex:
assert str(ex).index(expected_err) >= 0
``` |
{
"source": "JohnStov/buildmon",
"score": 3
} |
#### File: JohnStov/buildmon/Missile.py
```python
import Platform
device = None
if Platform.is_raspberrypi():
import usb.core
device = usb.core.find(idVendor=0x2123, idProduct=0x1010)
try:
device.detach_kernel_driver(0)
except Exception:
pass
import time
Down = 0x01
Up = 0x02
Left = 0x04
Right = 0x08
Fire = 0x10
Stop = 0x20
def send_cmd(cmd):
global device
if device != None:
device.ctrl_transfer(0x21, 0x09, 0, 0, [0x02, cmd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00])
else:
print("sending usb command {0}".format(cmd))
def led(cmd):
global device
if device != None:
device.ctrl_transfer(0x21, 0x09, 0, 0, [0x03, cmd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00])
else:
print("sending missile led state {0}".format(cmd))
led(0)
def send_move(cmd, duration_ms):
global device
if device != None:
send_cmd(cmd)
time.sleep(duration_ms / 1000.0)
send_cmd(Stop)
def run_command(command, value):
command = command.lower()
if command == "right":
send_move(Right, value)
elif command == "left":
send_move(Left, value)
elif command == "up":
send_move(Up, value)
elif command == "down":
send_move(Down, value)
elif command == "zero" or command == "park" or command == "reset":
# Move to bottom-left
send_move(Down, 2000)
send_move(Left, 8000)
elif command == "pause" or command == "sleep":
time.sleep(value / 1000.0)
elif command == "led":
if value == 0:
led(0x00)
else:
led(0x01)
elif command == "fire" or command == "shoot":
if value < 1 or value > 4:
value = 1
# Stabilize prior to the shot, then allow for reload time after.
time.sleep(0.5)
for i in range(value):
send_cmd(Fire)
time.sleep(4.5)
else:
print "Error: Unknown command: '%s'" % command
def run_command_set(commands):
for cmd, value in commands:
run_command(cmd, value)
if __name__ == "__main__":
led(0)
time.sleep(1)
led(1)
time.sleep(1)
led(0)
send_move(Left, 1000)
send_cmd(Fire)
```
#### File: JohnStov/buildmon/Test.py
```python
from Speech import say
import time
import Display
import Lights
def test_lights():
say('Testing lights')
Display.set_cursor_position(0,2)
Display.write('Lights ')
say('Lamp 0 on')
Display.set_cursor_position(0,2)
Display.write('Lamp 0 On ')
Lights.set_lamp(0, 1)
time.sleep(3)
say('Lamp 1 on')
Display.set_cursor_position(0,2)
Display.write('Lamp 1 On ')
Lights.set_lamp(1, 1)
time.sleep(3)
say('Lamp 2 on')
Display.set_cursor_position(0,2)
Display.write('Lamp 2 On ')
Lights.set_lamp(2, 1)
time.sleep(3)
say('Lamp 3 on')
Display.set_cursor_position(0,2)
Display.write('Lamp 3 On ')
Lights.set_lamp(3, 1)
time.sleep(3)
say('Lamp 0 off')
Display.set_cursor_position(0,2)
Display.write('Lamp 0 Off ')
Lights.set_lamp(0, 0)
say('Lamp 1 off')
Display.set_cursor_position(0,2)
Display.write('Lamp 1 Off ')
Lights.set_lamp(1, 0)
say('Lamp 2 off')
Display.set_cursor_position(0,2)
Display.write('Lamp 2 Off ')
Lights.set_lamp(2, 0)
say('Lamp 3 off')
Display.set_cursor_position(0,2)
Display.write('Lamp 3 Off')
Lights.set_lamp(3, 0)
Display.set_cursor_position(0,2)
Display.write(' ')
def test_bargraph():
say('testing bargraph')
Display.set_cursor_position(0,2)
Display.write('Bargraph ')
for intensity in range (0,100):
Display.set_graph(intensity/100.0)
time.sleep(.01)
for intensity in range (100,0,-1):
Display.set_graph(intensity/100.0)
time.sleep(.01)
Display.set_cursor_position(0,2)
Display.write(' ')
def test_backlight():
say('testing backlight')
Display.set_cursor_position(0,2)
Display.write('Backlight ')
Display.rgb(255,0,0)
time.sleep(1)
Display.rgb(0,255,0)
time.sleep(1)
Display.rgb(0,0,255)
time.sleep(1)
for i in range(0, 360):
Display.hue(i/360.0)
time.sleep(0.01)
for i in range(0, 360):
Display.sweep(i/360.0)
time.sleep(0.01)
Display.rgb(255,255,255)
Display.set_cursor_position(0,2)
Display.write(' ')
def test_all():
test_backlight()
test_bargraph()
test_lights()
if __name__ == "__main__":
test_all()
``` |
{
"source": "johnsturgeon/apex-legends-api",
"score": 3
} |
#### File: apex-legends-api/apex_legends_api/al_base.py
```python
from enum import Enum
import requests
def print_description(___class, indent=0, hide_values=False):
""" prints the schema for the current object """
print(' ' * indent + type(___class).__name__ + ':')
indent += 4
for k, value in ___class.__dict__.items():
if not isinstance(value, list):
v_list = [value]
else:
v_list = value
for val in v_list:
if '__dict__' in dir(val):
print_description(val, indent)
else:
if hide_values:
print(' ' * indent + k)
else:
print(' ' * indent + k + ': ' + str(val))
class ALPlatform(Enum):
"""
Three platforms available
- XBOX
- PSN
- PC
"""
XBOX = "X1"
""" Xbox """
PSN = "PS4"
""" Playstation (any) """
PC = "PC"
""" PC """
class ALAction(Enum):
"""
Three actions available
- INFO Return the players you're currently tracking
- GET Return ALL tracked events for the player
- ADD Adds the player for history collection
- DELETE Removes the given user from the tracked users list
"""
INFO = "info"
""" Return the players you're currently tracking """
GET = "get"
""" Return ALL tracked events for the player """
ADD = "add"
""" Adds the player for history collection """
DELETE = "delete"
""" Removes the given user from the tracked users list """
class ALEventType(Enum):
"""
The four different event types
- SESSION
- GAME
- LEVEL
- RANK
"""
SESSION = 'Session'
""" Session event (leave, join) """
GAME = 'Game'
""" Game event """
LEVEL = 'Level'
""" Level Up event """
RANK = 'Rank'
""" Rank change event """
class ALHTTPExceptionFromResponse(Exception):
""" Exception raised for errors in the http request. """
def __init__(self, response: requests.Response):
self.message = f'Return Code: {response.status_code} - {response.text}'
super().__init__(self.message)
``` |
{
"source": "johnsturgeon/minecraft_led_lights",
"score": 2
} |
#### File: johnsturgeon/minecraft_led_lights/main.py
```python
import json
import time
from typing import List, Tuple
from pymemcache.client.base import Client
import numpy
import requests
from PIL import Image
import mss.tools
from light_scene import LightScene
client = Client('localhost')
sct = mss.mss()
def get_dominant_color(im) -> Tuple:
import numpy as np
import scipy.cluster
num_clusters = 5
ar = np.asarray(im)
shape = ar.shape
ar = ar.reshape(numpy.product(shape[:2]), shape[2]).astype(float)
codes, dist = scipy.cluster.vq.kmeans(ar, num_clusters)
vectors, dist = scipy.cluster.vq.vq(ar, codes) # assign codes
counts, bins = numpy.histogram(vectors, len(codes)) # count occurrences
index_max = numpy.argmax(counts) # find most frequent
peak = codes[index_max]
r, g, b = list(peak)
return int(r), int(g), int(b)
def get_quadrant_scenes() -> List[LightScene]:
global sct
left, top, width, height = client.get('front_window_frame').decode().split(',')
left = int(left)
top = int(top)
width = int(width)
height = int(height)
monitor = {"top": top, "left": left, "width": width, "height": height}
sct_img = sct.grab(monitor)
# Convert to PIL/Pillow Image
img = Image.frombytes('RGB', sct_img.size, sct_img.bgra, 'raw', 'BGRX')
size = (75, 75)
light_scenes: List[LightScene] = []
# top left
top_left_scene = LightScene()
top_left_scene.rgb = get_dominant_color(img.resize(size=size, box=(0, 0, width, height)))
top_left_scene.pixels = list(range(42, 70))
light_scenes.append(top_left_scene)
# bottom left
bottom_left_scene = LightScene()
bottom_left_scene.rgb = get_dominant_color(
img.resize(size=size, box=(0, height, width, height * 2))
)
bottom_left_scene.pixels = list(range(70, 100))
light_scenes.append(bottom_left_scene)
# top right
top_right_scene = LightScene()
top_right_scene.rgb = get_dominant_color(
img.resize(size=size, box=(width, 0, width * 2, height))
)
top_right_scene.pixels = list(range(12, 41))
light_scenes.append(top_right_scene)
# bottom right
bottom_right_scene = LightScene()
bottom_right_scene.rgb = get_dominant_color(
img.resize(size=size, box=(width, height, width * 2, height * 2))
)
bottom_right_scene.pixels = list(range(0, 11)) + list(range(101, 118))
light_scenes.append(bottom_right_scene)
return light_scenes
if __name__ == '__main__':
while True:
time.sleep(.2)
scenes: List[LightScene] = get_quadrant_scenes()
for scene in scenes:
requests.post('http://raspberry-pi:5000/set_scene',
data=scene.to_json())
``` |
{
"source": "JohnStyleZ/botty",
"score": 2
} |
#### File: src/char/basic.py
```python
import keyboard
from utils.custom_mouse import mouse
from char import IChar,CharacterCapabilities
from template_finder import TemplateFinder
from ui import UiManager
from pather import Pather
from logger import Logger
from screen import Screen
from utils.misc import wait, cut_roi
import time
from pather import Pather, Location
class Basic(IChar):
def __init__(self, skill_hotkeys: dict, screen: Screen, template_finder: TemplateFinder, ui_manager: UiManager, pather: Pather):
Logger.info("Setting up Basic Character")
super().__init__(skill_hotkeys, screen, template_finder, ui_manager)
self._pather = pather
self._do_pre_move = True
def on_capabilities_discovered(self, capabilities: CharacterCapabilities):
# offset shenk final position further to the right and bottom
if capabilities.can_teleport_natively:
self._pather.offset_node(149, [120, 70])
def _cast_attack_pattern(self, time_in_s: float):
keyboard.send(self._char_config["stand_still"], do_release=False)
wait(0.05, 0.1)
keyboard.send(self._skill_hotkeys["left_attack"])
wait(0.05, 0.1)
keyboard.send(self._skill_hotkeys["right_attack"])
wait(0.05, 0.1)
start = time.time()
while (time.time() - start) < time_in_s:
if self._ui_manager.is_right_skill_active():
wait(0.05, 0.1)
mouse.click(button="right")
else:
wait(0.05, 0.1)
mouse.click(button="left")
wait(0.01, 0.05)
keyboard.send(self._char_config["stand_still"], do_press=False)
def pre_buff(self):
if self._skill_hotkeys["buff_1"]:
keyboard.send(self._skill_hotkeys["buff_1"])
wait(0.5, 0.15)
mouse.click(button="right")
wait(0.5, 0.15)
if self._skill_hotkeys["buff_2"]:
keyboard.send(self._skill_hotkeys["buff_2"])
wait(0.5, 0.15)
mouse.click(button="right")
wait(0.5, 0.15)
def pre_move(self):
# select teleport if available
super().pre_move()
def _move_and_attack(self, abs_move: tuple[int, int], atk_len: float):
pos_m = self._screen.convert_abs_to_monitor(abs_move)
self.pre_move()
self.move(pos_m, force_move=True)
self._cast_attack_pattern(atk_len)
#this is where we kill bosses
def kill_pindle(self) -> bool:
wait(0.1, 0.15)
if self.capabilities.can_teleport_natively:
self._pather.traverse_nodes_fixed("pindle_end", self)
else:
if not self._do_pre_move:
# keyboard.send(self._skill_hotkeys["concentration"])
# wait(0.05, 0.15)
self._pather.traverse_nodes((Location.A5_PINDLE_SAFE_DIST, Location.A5_PINDLE_END), self, time_out=1.0, do_pre_move=self._do_pre_move)
self._pather.traverse_nodes((Location.A5_PINDLE_SAFE_DIST, Location.A5_PINDLE_END), self, time_out=0.1)
self._cast_attack_pattern(self._char_config["atk_len_pindle"])
wait(0.1, 0.15)
return True
def kill_eldritch(self) -> bool:
if self.capabilities.can_teleport_natively:
self._pather.traverse_nodes_fixed("eldritch_end", self)
else:
if not self._do_pre_move:
# keyboard.send(self._skill_hotkeys["concentration"])
# wait(0.05, 0.15)
self._pather.traverse_nodes((Location.A5_ELDRITCH_SAFE_DIST, Location.A5_ELDRITCH_END), self, time_out=1.0, do_pre_move=self._do_pre_move)
wait(0.05, 0.1)
self._cast_attack_pattern(self._char_config["atk_len_eldritch"])
return True
def kill_shenk(self):
# if not self._do_pre_move:
# keyboard.send(self._skill_hotkeys["concentration"])
# wait(0.05, 0.15)
self._pather.traverse_nodes((Location.A5_SHENK_SAFE_DIST, Location.A5_SHENK_END), self, time_out=1.0, do_pre_move=self._do_pre_move)
wait(0.05, 0.1)
self._cast_attack_pattern(self._char_config["atk_len_shenk"])
wait(0.1, 0.15)
return True
def kill_council(self) -> bool:
# Check out the node screenshot in assets/templates/trav/nodes to see where each node is at
atk_len = self._char_config["atk_len_trav"]
# Go inside and war cry a bit
self._pather.traverse_nodes([228, 229], self, time_out=2.5, force_tp=True)
self._cast_attack_pattern(atk_len)
# Move a bit back and another round
self._move_and_attack((40, 20), atk_len)
# Here we have two different attack sequences depending if tele is available or not
if self.capabilities.can_teleport_natively:
# Back to center stairs and more war cry
self._pather.traverse_nodes([226], self, time_out=2.5, force_tp=True)
self._cast_attack_pattern(atk_len)
# move a bit to the top
self._move_and_attack((65, -30), atk_len)
else:
# Stay inside and cast war cry again moving forward
self._move_and_attack((40, 10), atk_len)
self._move_and_attack((-40, -20), atk_len)
return True
def kill_nihlathak(self, end_nodes: list[int]) -> bool:
# Move close to nihlathak
self._pather.traverse_nodes(end_nodes, self, time_out=0.8, do_pre_move=False)
# move mouse to center (leftover from hammerdin)
pos_m = self._screen.convert_abs_to_monitor((0, 0))
mouse.move(*pos_m, randomize=80, delay_factor=[0.5, 0.7])
self._cast_attack_pattern(self._char_config["atk_len_nihlathak"] * 0.4)
self._cast_attack_pattern(0.8)
self._move_and_attack((30, 15), self._char_config["atk_len_nihlathak"] * 0.3)
self._cast_attack_pattern(0.8)
self._move_and_attack((-30, -15), self._char_config["atk_len_nihlathak"] * 0.4)
wait(0.1, 0.15)
self._cast_attack_pattern(1.2)
return True
if __name__ == "__main__":
import os
import keyboard
keyboard.add_hotkey('f12', lambda: Logger.info('Force Exit (f12)') or os._exit(1))
keyboard.wait("f11")
from config import Config
from ui.ui_manager import UiManager
config = Config()
screen = Screen()
t_finder = TemplateFinder(screen)
pather = Pather(screen, t_finder)
ui_manager = UiManager(screen, t_finder)
char = Basic(config.basic, config.char, screen, t_finder, ui_manager, pather)
```
#### File: src/transmute/transmute.py
```python
from asyncore import loop
import itertools
from random import randint, random
import threading
from config import Config
from .inventory_collection import InventoryCollection
from .stash import Stash
from .gem_picking import SimpleGemPicking
from item.item_finder import ItemFinder
from screen import Screen
from ui.ui_manager import UiManager
from utils.custom_mouse import mouse
from utils.misc import wait
from version import __version__
from logger import Logger
from game_stats import GameStats
from template_finder import TemplateFinder
import numpy as np
import keyboard
import os
import cv2
FLAWLESS_GEMS = [
"INVENTORY_TOPAZ_FLAWLESS",
"INVENTORY_AMETHYST_FLAWLESS",
"INVENTORY_SAPPHIRE_FLAWLESS",
"INVENTORY_DIAMOND_FLAWLESS",
"INVENTORY_RUBY_FLAWLESS",
"INVENTORY_EMERALD_FLAWLESS",
"INVENTORY_SKULL_FLAWLESS"
]
PERFECT_GEMS = [
"INVENTORY_TOPAZ_PERFECT",
"INVENTORY_AMETHYST_PERFECT",
"INVENTORY_SAPPHIRE_PERFECT",
"INVENTORY_DIAMOND_PERFECT",
"INVENTORY_RUBY_PERFECT",
"INVENTORY_EMERALD_PERFECT",
"INVENTORY_SKULL_PERFECT"
]
class Transmute:
@staticmethod
def _wait():
wait(0.2, 0.3)
def __init__(self, screen: Screen, template_finder: TemplateFinder, game_stats: GameStats, ui_manager: UiManager) -> None:
self._screen = screen
self._game_stats = game_stats
self._template_finder = template_finder
self._ui_manager = ui_manager
self._last_game = 0
def pick_from_area(self, column, row, roi):
slot_w = Config.ui_pos["slot_width"]
slot_h = Config.ui_pos["slot_height"]
offset_y = (row+0.5)*slot_h
offset_x = (column+0.5)*slot_w
x, y, _, _ = roi
x, y = self._screen.convert_screen_to_monitor(
(x + offset_x, y + offset_y))
mouse.move(x, y)
self._wait()
keyboard.send('ctrl', do_release=False)
self._wait()
mouse.click("left")
self._wait()
keyboard.release('ctrl')
self._wait()
def open_cube(self):
self._ui_manager._move_to_stash_tab(0)
screen = self._screen.grab()
match = self._template_finder.search(
["HORADRIC_CUBE"], screen, threshold=0.9, roi=Config.ui_roi["left_inventory"])
if match.valid:
x, y = self._screen.convert_screen_to_monitor(match.center)
mouse.move(x, y)
self._wait()
mouse.click("right")
self._wait()
else:
Logger.error(f"Can't find cube: {match.score}")
def transmute(self):
screen = self._screen.grab()
match = self._template_finder.search(
["CUBE_TRANSMUTE_BTN"], screen, roi=Config.ui_roi["cube_btn_roi"])
if match.valid:
x, y = self._screen.convert_screen_to_monitor(match.center)
mouse.move(x, y)
self._wait()
mouse.click("left")
self._wait()
def close_cube(self):
self._wait()
keyboard.send("esc")
def stash_all_items(self):
self._ui_manager.stash_all_items(
Config.char["num_loot_columns"], ItemFinder())
def pick_from_cube_at(self, column, row):
return self.pick_from_area(column, row, Config.ui_roi["cube_area_roi"])
def pick_from_inventory_at(self, column, row):
return self.pick_from_area(column, row, Config.ui_roi["right_inventory"])
def pick_from_stash_at(self, index, column, row):
self._ui_manager._move_to_stash_tab(index)
return self.pick_from_area(column, row, Config.ui_roi["left_inventory"])
def inspect_area(self, total_rows, total_columns, roi, known_items) -> InventoryCollection:
result = InventoryCollection()
x, y, w, h = roi
img = self._screen.grab()[y:y+h, x:x+w]
slot_w = Config.ui_pos["slot_width"]
slot_h = Config.ui_pos["slot_height"]
for column, row in itertools.product(range(total_columns), range(total_rows)):
y_start, y_end = row*slot_h, slot_h*(row+1)
x_start, x_end = column*slot_w, slot_w*(column+1)
slot_img = img[y_start:y_end, x_start:x_end]
if not self._is_slot_empty(slot_img[+4:-4, +4:-4], treshold=36):
result.set_empty((column, row))
match = self._template_finder.search(
known_items, slot_img, threshold=0.91, best_match=True)
if match.valid:
result.append(match.name, (column, row))
return result
def _is_slot_empty(self, img, treshold=16.0):
slot_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
avg_brightness = np.average(slot_img[:, :, 2])
return avg_brightness > treshold
def inspect_inventory_area(self, known_items) -> InventoryCollection:
return self.inspect_area(4, Config.char["num_loot_columns"], Config.ui_roi["right_inventory"], known_items)
def inspect_stash(self) -> Stash:
stash = Stash()
for i in range(4):
self._ui_manager._move_to_stash_tab(i)
wait(0.4, 0.5)
tab = self.inspect_area(
10, 10, Config.ui_roi["left_inventory"], FLAWLESS_GEMS)
stash.add_tab(i, tab)
return stash
def put_back_to_stash_randomly(self) -> None:
flawless_gems = self.inspect_inventory_area(FLAWLESS_GEMS)
pick = []
for gem in flawless_gems.all_items():
while flawless_gems.count_by(gem) > 0:
pick.append((randint(0, 3), *flawless_gems.pop(gem)))
for tab, x, y in sorted(pick, key=lambda x: x[0]):
self._ui_manager._move_to_stash_tab(tab)
self.pick_from_inventory_at(x, y)
def select_tab_with_enough_space(self, s: Stash) -> None:
tabs_priority = Config._transmute_config["stash_destination"]
for tab in tabs_priority:
if s.get_empty_on_tab(tab) > 0:
self._ui_manager._move_to_stash_tab(tab)
break
def put_back_all_gems(self, s: Stash) -> None:
Logger.info(
f'Putting back gems in the following stash tabs (by priority): {Config._transmute_config["stash_destination"]}')
perfect_gems = self.inspect_inventory_area(
PERFECT_GEMS + FLAWLESS_GEMS)
for gem in perfect_gems.all_items():
while perfect_gems.count_by(gem) > 0:
self.select_tab_with_enough_space(s)
self.pick_from_inventory_at(*perfect_gems.pop(gem))
def should_transmute(self) -> bool:
every_x_game = Config._transmute_config["transmute_every_x_game"]
if every_x_game is None or every_x_game == "" or int(every_x_game) <= 0:
return False
return self._game_stats._game_counter - self._last_game >= int(every_x_game)
def run_transmutes(self, force=False) -> None:
gold_btn = self._template_finder.search_and_wait("INVENTORY_GOLD_BTN", roi=Config.ui_roi["gold_btn"], time_out=20)
if not gold_btn.valid:
Logger.error("Could not determine to be in stash menu. Continue...")
return
if not force and not self.should_transmute():
Logger.info(f"Skipping transmutes. Force: {force}, Game#: {self._game_stats._game_counter}")
return None
self._run_gem_transmutes()
def check_cube_empty(self) -> bool:
self.open_cube()
area = self.inspect_cube()
self.close_cube()
return area.count_empty() == 12
def inspect_cube(self)-> InventoryCollection:
return self.inspect_area(4, 3, roi=Config.ui_roi["cube_area_roi"], known_items=FLAWLESS_GEMS)
def _run_gem_transmutes(self) -> None:
Logger.info("Starting gem transmute")
self._last_game = self._game_stats._game_counter
s = self.inspect_stash()
algorithm = SimpleGemPicking(s)
inventory = self.inspect_inventory_area(FLAWLESS_GEMS)
is_cube_empty = None
while True:
while inventory.count_empty() >= 3:
next_batch = algorithm.next_batch()
is_cube_empty = self.check_cube_empty() if is_cube_empty is None else is_cube_empty
if not is_cube_empty:
Logger.warning("Some items detected in the cube. Skipping transmute")
break
if next_batch is None:
Logger.info("No more gems to cube")
break
for tab, gem, x, y in next_batch:
self.pick_from_stash_at(tab, x, y)
inventory = self.inspect_inventory_area(FLAWLESS_GEMS)
if inventory.count() >= 3:
self.open_cube()
for gem in inventory.all_items():
while inventory.count_by(gem) > 0:
for _ in range(3):
next = inventory.pop(gem)
self.pick_from_inventory_at(*next)
self.transmute()
self.pick_from_cube_at(2, 3)
self.close_cube()
self.put_back_all_gems(s)
else:
self.put_back_all_gems(s)
break
Logger.info("Finished gem transmute")
``` |
{
"source": "johnsudhir/GUI-tic-tac-toe",
"score": 4
} |
#### File: johnsudhir/GUI-tic-tac-toe/nonGUI.py
```python
data = {'1': "",
'2': "",
'3': "",
'4': "",
'5': "",
'6': "",
'7': "",
'8': "",
'9': ""}
userFirst = 'X'
userSecond = 'O'
currentUser = ""
def table(data):
print(data.get('1'), "|", data.get("2"), "|", data.get("3"))
print("------")
print(data.get("4"), "|", data.get("5"), "|", data.get("6"))
print("------")
print(data.get("7"), "|", data.get("8"), "|", data.get("9"))
def winner(data, currentUser):
if(data.get("1") == data.get("2") == data.get("3") == currentUser or data.get("4") == data.get("5") == data.get("6")
== currentUser or data.get("7") == data.get("8") == data.get("9") == currentUser):
return True
elif(data.get("1") == data.get("4") == data.get("7") == currentUser or data.get("2") == data.get("5") == data.get("8")
== currentUser or data.get("3") == data.get("6") == data.get("9") == currentUser ):
return True
elif(data.get("1") == data.get("5") == data.get("9") == currentUser or data.get("3") == data.get("5") ==
data.get("7") == currentUser ):
return True
def tictactoe(data, currentUser = "X"):
for i in range(9):
position = input(f"{currentUser} Please, enter the position: ")
data[position] = currentUser
table(data)
if (winner(data, currentUser)):
print(f'{currentUser} won the game')
break;
if currentUser == 'X':
currentUser = 'Y'
else:
currentUser = 'X'
print("Game Over")
table(data)
tictactoe(data)
``` |
{
"source": "johnswon/l2ldjango_interview",
"score": 3
} |
#### File: l2l/templatetags/l2l_extras.py
```python
from django import template
from datetime import datetime
register = template.Library()
DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
ISO_FORMAT = '%Y-%m-%dT%H:%M:%S'
@register.filter()
def l2l_dt(value):
if type(value) == datetime:
return value.strftime(DATE_FORMAT)
else:
return (datetime.strptime(value, ISO_FORMAT)).strftime(DATE_FORMAT)
# register.filter('l2l_dt', l2l_dt)
``` |
{
"source": "johnsyweb/python_sparse_list",
"score": 3
} |
#### File: johnsyweb/python_sparse_list/test_sparse_list.py
```python
import sparse_list
import pytest
class TestSparseList:
def test_init_zero(self):
sl = sparse_list.SparseList(0)
assert 0 == len(sl)
assert 0 == sl.population()
def test_init_non_zero(self):
sl = sparse_list.SparseList(10)
assert 10 == len(sl)
assert 0 == sl.population()
def test_init_no_default(self):
sl = sparse_list.SparseList(1)
assert sl.default is None
assert 0 == sl.population()
def test_init_default(self):
sl = sparse_list.SparseList(1, 'test')
assert 'test' == sl.default
assert 0 == sl.population()
def test_random_access_write(self):
sl = sparse_list.SparseList(1)
sl[0] = 'alice'
assert {0: 'alice'} == sl.elements
assert 1 == sl.population()
def test_random_access_read_present(self):
sl = sparse_list.SparseList(2)
sl[0] = 'brent'
assert 'brent' == sl[0]
assert 1 == sl.population()
def test_random_access_read_absent(self):
sl = sparse_list.SparseList(2, 'absent')
sl[1] = 'clint'
assert 'absent' == sl[0]
assert 1 == sl.population()
def test_iteration_empty(self):
sl = sparse_list.SparseList(3)
assert [None, None, None] == list(sl)
def test_iteration_populated(self):
sl = sparse_list.SparseList(5)
sl[1], sl[3] = 'a', 'b'
assert [None, 'a', None, 'b', None] == list(sl)
def test_membership_absent(self):
sl = sparse_list.SparseList(5)
sl[2], sl[3], = 1, 2
assert False == (3 in sl)
def test_membership_present(self):
sl = sparse_list.SparseList(5)
sl[2], sl[3], = 1, 2
assert True == (2 in sl)
def test_string_representations(self):
sl = sparse_list.SparseList(5, 0)
sl[3], sl[4] = 5, 6
assert '[0, 0, 0, 5, 6]' == repr(sl)
assert '[0, 0, 0, 5, 6]' == str(sl)
def test_initialisation_by_dict(self):
sl = sparse_list.SparseList({
4: 6,
3: 5,
}, 0)
assert [0, 0, 0, 5, 6] == sl
assert 2 == sl.population()
def test_initialisation_by_dict_does_not_add_defaults(self):
sl = sparse_list.SparseList({
3: 0,
4: 6,
}, 0)
assert [0, 0, 0, 0, 6] == sl
assert 1 == sl.population()
def test_initialisation_by_dict_with_non_numeric_key(self):
with pytest.raises(ValueError):
sparse_list.SparseList({'a': 5})
def test_initialisation_by_list(self):
sl = sparse_list.SparseList([0, 1, 2, 4])
assert [0, 1, 2, 4] == sl
assert 4 == sl.population()
def test_initialisation_by_list_does_not_add_defaults(self):
sl = sparse_list.SparseList([0, 1, 2, 4], 0)
assert [0, 1, 2, 4] == sl
assert 3 == sl.population()
def test_initialisation_by_generator(self):
gen = (x for x in (1, 2, 3))
sl = sparse_list.SparseList(gen)
assert [1, 2, 3] == sl
assert 3 == sl.population()
def test_access_with_negative_index(self):
sl = sparse_list.SparseList([0, 1, 2, 4])
assert 4 == sl[-1]
def test_access_with_negative_index_with_no_value(self):
sl = sparse_list.SparseList(5, 0)
assert 0 == sl[-1]
def test_slice(self):
sl = sparse_list.SparseList([0, 1, 2, 4], 10)
assert [1, 2] == sl[1:3]
def test_slice_is_sparse_list(self):
sl = sparse_list.SparseList([0, 1, 2, 4], 10)
assert isinstance(sl[1:3], sparse_list.SparseList)
def test_extended_slice(self):
sl = sparse_list.SparseList([0, 1, 2, 3, 4, 5, 6])
assert [1, 3, 5] == sl[1:6:2]
def test_extended_slice_is_sparse_list(self):
sl = sparse_list.SparseList([0, 1, 2, 3, 4, 5, 6])
assert isinstance(sl[1:6:2], sparse_list.SparseList)
def test_extended_slice_with_negative_stop(self):
sl = sparse_list.SparseList([0, 1, 2, 3, 4, 5, 6])
assert [1, 3, 5] == sl[1:-1:2]
def test_slice_reversal_full(self):
sl = sparse_list.SparseList([1, 2, 3])
assert [3, 2, 1] == sl[::-1]
def test_slice_reversal_empty(self):
sl = sparse_list.SparseList(4)
assert [None, None, None, None] == sl[::-1]
def test_default_slice(self):
sl = sparse_list.SparseList(23)
sl[0:2] = (1, 2)
assert [None, None] == sl[2:4]
def test_slice_list_size(self):
initial_size = 20
sl = sparse_list.SparseList(initial_size)
sample_tuple = (1, 2, 3, 4)
sl[2:2+len(sample_tuple)] = sample_tuple
assert len(sl) == initial_size
def test_reversed(self):
sl = sparse_list.SparseList([1, 2, 3])
assert [3, 2, 1] == list(reversed(sl))
def test_sorted(self):
sl = sparse_list.SparseList({0: 1, 4: 1}, 0)
assert [0, 0, 0, 1, 1] == list(sorted(sl))
def test_get_out_of_bounds(self):
sl = sparse_list.SparseList(1)
assert sl[1] is None
def test_set_out_of_bounds(self):
sl = sparse_list.SparseList(1)
sl[100] = 1
assert 101 == len(sl)
def test_present_item_removal(self):
sl = sparse_list.SparseList({0: 1, 4: 1}, 0)
del sl[0]
assert [0, 0, 0, 1] == sl
assert 1 == sl.population()
def test_missing_item_removal(self):
sl = sparse_list.SparseList({0: 1, 4: 1}, 0)
del sl[1]
assert [1, 0, 0, 1] == sl
assert 2 == sl.population()
def test_slice_removal(self):
sl = sparse_list.SparseList(range(10))
del sl[3:5]
assert [0, 1, 2, 5, 6, 7, 8, 9] == sl
assert 8 == sl.population()
def test_slice_removal_with_default_present(self):
sl = sparse_list.SparseList(range(10), 0)
del sl[3:5]
assert [0, 1, 2, 5, 6, 7, 8, 9] == sl
assert 7 == sl.population()
def test_unbounded_head_slice_removal(self):
sl = sparse_list.SparseList(range(10))
del sl[:3]
assert [3, 4, 5, 6, 7, 8, 9] == sl
assert 7 == sl.population()
def test_unbounded_head_slice_removal_with_default_present(self):
sl = sparse_list.SparseList(range(10), 0)
del sl[:3]
assert [3, 4, 5, 6, 7, 8, 9] == sl
assert 7 == sl.population()
def test_unbounded_tail_slice_removal(self):
sl = sparse_list.SparseList(range(10), None)
del sl[5:]
assert [0, 1, 2, 3, 4] == sl
assert 5 == sl.population()
def test_stepped_slice_removal(self):
sl = sparse_list.SparseList(range(6), None)
del sl[::2]
assert [1, 3, 5] == sl
assert 3 == sl.population()
def test_empty_removal(self):
sl = sparse_list.SparseList(range(5), None)
del sl[3:3]
assert [0, 1, 2, 3, 4] == sl
assert 5 == sl.population()
def test_append(self):
sl = sparse_list.SparseList(1, 0)
sl.append(1)
assert [0, 1] == sl
assert 1 == sl.population()
def test_clone(self):
a = sparse_list.SparseList([1, 2, 3])
b = a[:]
b.append(4)
assert [1, 2, 3] == a
assert [1, 2, 3, 4] == b
assert a.population() + 1 == b.population()
def test_concatenation(self):
a = sparse_list.SparseList([1, 2, 3])
b = sparse_list.SparseList([4, 5, 6])
c = a + b
assert [1, 2, 3] == a
assert [4, 5, 6] == b
assert [1, 2, 3, 4, 5, 6] == c
assert a.population() + b.population() == c.population()
def test_in_place_concatenation(self):
a = sparse_list.SparseList([1, 2, 3])
b = sparse_list.SparseList([4, 5, 6])
a += b
assert [1, 2, 3, 4, 5, 6] == a
assert [4, 5, 6] == b
assert 6 == a.population()
def test_equality(self):
a = sparse_list.SparseList([1, 2, 3])
b = sparse_list.SparseList([1, 2, 3])
assert a == b
assert not a != b
assert a == b
assert b == a
assert not b != a
assert b == a
def test_inequality_same_length(self):
a = sparse_list.SparseList([1, 2, 3])
b = sparse_list.SparseList([1, 0, 3])
assert a != b
assert not a == b
assert a != b
assert b != a
assert not b == a
assert b != a
def test_inequality_left_longer(self):
a = sparse_list.SparseList([1, 2, 3, 4])
b = sparse_list.SparseList([1, 2, 3])
assert a != b
assert not (a == b)
assert a != b
assert b != a
assert not (b == a)
assert b != a
def test_inequality_length(self):
a = sparse_list.SparseList(2)
b = sparse_list.SparseList(4)
assert a != b
assert not (a == b)
assert a != b
assert b != a
assert not (b == a)
assert b != a
def test_less_than(self):
a = sparse_list.SparseList([1, 2, 3, 0])
b = sparse_list.SparseList([1, 2, 4, 5])
assert a < b
assert not (a == b)
assert not (a >= b)
assert not (a > b)
def test_greater_than(self):
a = sparse_list.SparseList([1, 2, 3, 0])
b = sparse_list.SparseList([1, 2, 4, 5])
assert b > a
assert not (b == a)
assert not (b <= a)
assert not (b < a)
def test_less_than_with_a_pair_that_is_greater(self):
a = sparse_list.SparseList([1, 2, 3])
b = sparse_list.SparseList([1, 0, 4])
assert not (a < b)
assert not (a == b)
assert b <= a
assert b < a
def test_less_than_prefix(self):
a = sparse_list.SparseList([1, 2, 3])
b = sparse_list.SparseList([1, 2, 3, 4])
assert a < b
assert not (a == b)
assert not (b <= a)
assert not (b < a)
def test_less_than_different_lengths(self):
a = sparse_list.SparseList([1, 2, 3, 4])
b = sparse_list.SparseList([2, 1, 3])
assert a < b
assert not (a == b)
assert not (b <= a)
assert not (b < a)
def test_multiply(self):
sl = sparse_list.SparseList({0: 1, 4: 1}, 0)
sl4 = sl * 4
assert [1, 0, 0, 0, 1] == sl
assert [1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1] == sl4
assert len(sl) * 4 == len(sl4)
assert sl.population() * 4 == sl4.population()
def test_multiply_in_place(self):
sl = sparse_list.SparseList({0: 1, 4: 1}, 0)
sl *= 4
assert [1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1] == sl
assert 8 == sl.population()
def test_count_value(self):
sl = sparse_list.SparseList({0: 1, 4: 1}, 0)
assert 2 == sl.count(1)
def test_count_default_value(self):
sl = sparse_list.SparseList(100, 1)
sl[5] = 1
assert 100 == sl.count(1)
def test_extend(self):
sl = sparse_list.SparseList([1, 2, 3])
sl.extend((4, 5, 6))
assert [1, 2, 3, 4, 5, 6] == sl
def test_index_value(self):
sl = sparse_list.SparseList({0: 1, 4: 1}, 0)
assert 0 == sl.index(1)
def test_index_default_value(self):
sl = sparse_list.SparseList({0: 1, 4: 1}, 0)
assert 1 == sl.index(0)
def test_index_absent_default_value(self):
sl = sparse_list.SparseList([1, 2, 3], 0)
with pytest.raises(ValueError):
sl.index(0)
def test_index_absent_value(self):
sl = sparse_list.SparseList(1, 0)
with pytest.raises(ValueError):
sl.index(2)
def test_pop_no_value(self):
sl = sparse_list.SparseList(4)
assert sl.pop() is None
def test_pop_empty(self):
sl = sparse_list.SparseList(0)
with pytest.raises(IndexError):
sl.pop()
def test_pop_value(self):
sl = sparse_list.SparseList([1, 2, 3])
popped = sl.pop()
assert 3 == popped
assert 2 == len(sl)
assert [1, 2] == sl
assert 2 == sl.population()
def test_push_value(self):
sl = sparse_list.SparseList([1, 2, 3])
sl.push(4)
assert 4 == len(sl)
assert [1, 2, 3, 4] == sl
assert 4 == sl.population()
def test_remove_value(self):
sl = sparse_list.SparseList([1, 2, 3])
sl.remove(2)
assert 3 == len(sl)
assert [1, None, 3] == sl
assert 2 == sl.population()
def test_remove_only_first_value(self):
sl = sparse_list.SparseList([2, 2, 3])
sl.remove(2)
assert 3 == len(sl)
assert [None, 2, 3] == sl
assert 2 == sl.population()
def test_remove_non_value(self):
sl = sparse_list.SparseList([1, 2, 3])
with pytest.raises(ValueError):
sl.remove(4)
def test_remove_default_value_does_nothing(self):
sl = sparse_list.SparseList(4, None)
sl.remove(None)
assert [None, None, None, None] == sl
assert 0 == sl.population()
def test_set_slice_observes_stop(self):
sl = sparse_list.SparseList(4, None)
sl[0:2] = [1, 2, 3]
assert [1, 2, None, None] == sl
assert 2 == sl.population()
def test_set_slice_resizes(self):
sl = sparse_list.SparseList(0, None)
sl[4:] = [4, 5]
assert [None, None, None, None, 4, 5] == sl
assert len(sl) == 6
assert 2 == sl.population()
def test_set_slice_extends_past_end(self):
sl = sparse_list.SparseList(5, None)
sl[3:] = [6, 7, 8]
assert [None, None, None, 6, 7, 8] == sl
assert 3 == sl.population()
def test_set_slice_with_step(self):
sl = sparse_list.SparseList(6, None)
sl[::2] = [1, 2, 3]
assert [1, None, 2, None, 3, None] == sl
assert 3 == sl.population()
def test_setting_an_item_with_default_does_not_increase_population(self):
sl = sparse_list.SparseList(6, None)
sl[2] = None
assert 6 == len(sl)
assert 0 == sl.population()
``` |
{
"source": "JohnTargaryen/EnlightenGAN",
"score": 2
} |
#### File: EnlightenGAN/data/unaligned_dataset.py
```python
import torch
from torch import nn
import os.path
import torchvision.transforms as transforms
from data.base_dataset import BaseDataset, get_transform
from data.image_folder import make_dataset, store_dataset
import random
from PIL import Image
import PIL
from pdb import set_trace as st
def pad_tensor(input):
height_org, width_org = input.shape[2], input.shape[3]
divide = 16
if width_org % divide != 0 or height_org % divide != 0:
width_res = width_org % divide
height_res = height_org % divide
if width_res != 0:
width_div = divide - width_res
pad_left = int(width_div / 2)
pad_right = int(width_div - pad_left)
else:
pad_left = 0
pad_right = 0
if height_res != 0:
height_div = divide - height_res
pad_top = int(height_div / 2)
pad_bottom = int(height_div - pad_top)
else:
pad_top = 0
pad_bottom = 0
padding = nn.ReflectionPad2d((pad_left, pad_right, pad_top, pad_bottom))
input = padding(input).data
else:
pad_left = 0
pad_right = 0
pad_top = 0
pad_bottom = 0
height, width = input.shape[2], input.shape[3]
assert width % divide == 0, 'width cant divided by stride'
assert height % divide == 0, 'height cant divided by stride'
return input, pad_left, pad_right, pad_top, pad_bottom
def pad_tensor_back(input, pad_left, pad_right, pad_top, pad_bottom):
height, width = input.shape[2], input.shape[3]
return input[:,:, pad_top: height - pad_bottom, pad_left: width - pad_right]
class UnalignedDataset(BaseDataset):
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A')
self.dir_B = os.path.join(opt.dataroot, opt.phase + 'B')
# self.A_paths = make_dataset(self.dir_A)
# self.B_paths = make_dataset(self.dir_B)
self.A_imgs, self.A_paths = store_dataset(self.dir_A)
self.B_imgs, self.B_paths = store_dataset(self.dir_B)
# self.A_paths = sorted(self.A_paths)
# self.B_paths = sorted(self.B_paths)
self.A_size = len(self.A_paths)
self.B_size = len(self.B_paths)
self.transform = get_transform(opt)
def __getitem__(self, index):
# A_path = self.A_paths[index % self.A_size]
# B_path = self.B_paths[index % self.B_size]
# A_img = Image.open(A_path).convert('RGB')
# B_img = Image.open(B_path).convert('RGB')
A_img = self.A_imgs[index % self.A_size]
B_img = self.B_imgs[index % self.B_size]
A_path = self.A_paths[index % self.A_size]
B_path = self.B_paths[index % self.B_size]
# A_size = A_img.size
# B_size = B_img.size
# A_size = A_size = (A_size[0]//16*16, A_size[1]//16*16)
# B_size = B_size = (B_size[0]//16*16, B_size[1]//16*16)
# A_img = A_img.resize(A_size, Image.BICUBIC)
# B_img = B_img.resize(B_size, Image.BICUBIC)
# A_gray = A_img.convert('LA')
# A_gray = 255.0-A_gray
A_img = self.transform(A_img)
B_img = self.transform(B_img)
if self.opt.resize_or_crop == 'no':
r,g,b = A_img[0]+1, A_img[1]+1, A_img[2]+1
A_gray = 1. - (0.299*r+0.587*g+0.114*b)/2.
A_gray = torch.unsqueeze(A_gray, 0)
input_img = A_img
# A_gray = (1./A_gray)/255.
else:
w = A_img.size(2)
h = A_img.size(1)
# A_gray = (1./A_gray)/255.
if (not self.opt.no_flip) and random.random() < 0.5:
idx = [i for i in range(A_img.size(2) - 1, -1, -1)]
idx = torch.LongTensor(idx)
A_img = A_img.index_select(2, idx)
B_img = B_img.index_select(2, idx)
if (not self.opt.no_flip) and random.random() < 0.5:
idx = [i for i in range(A_img.size(1) - 1, -1, -1)]
idx = torch.LongTensor(idx)
A_img = A_img.index_select(1, idx)
B_img = B_img.index_select(1, idx)
if self.opt.vary == 1 and (not self.opt.no_flip) and random.random() < 0.5: # data agumentation
times = random.randint(self.opt.low_times,self.opt.high_times)/100.
input_img = (A_img+1)/2./times # input_img is the agumented version of A_img
input_img = input_img*2-1
else:
input_img = A_img
if self.opt.lighten:
B_img = (B_img + 1)/2.
B_img = (B_img - torch.min(B_img))/(torch.max(B_img) - torch.min(B_img))
B_img = B_img*2. -1
r,g,b = input_img[0]+1, input_img[1]+1, input_img[2]+1
A_gray = 1. - (0.299*r+0.587*g+0.114*b)/2.
A_gray = torch.unsqueeze(A_gray, 0)
return {'A': A_img, 'B': B_img, 'A_gray': A_gray, 'input_img': input_img,
'A_paths': A_path, 'B_paths': B_path}
def __len__(self):
return max(self.A_size, self.B_size)
def name(self):
return 'UnalignedDataset'
``` |
{
"source": "johntconklin/monitor-core",
"score": 2
} |
#### File: python_modules/network/traffic1.py
```python
import sys
import os
import threading
import time
descriptors = list()
Desc_Skel = {}
_Worker_Thread = None
_Lock = threading.Lock() # synchronization lock
Debug = False
def dprint(f, *v):
if Debug:
print >> sys.stderr, "DEBUG: "+f % v
class UpdateTrafficThread(threading.Thread):
__slots__ = ( 'proc_file' )
def __init__(self, params):
threading.Thread.__init__(self)
self.running = False
self.shuttingdown = False
self.refresh_rate = 10
if "refresh_rate" in params:
self.refresh_rate = int(params["refresh_rate"])
self.target_device = params["target_device"]
self.metric = {}
self.proc_file = "/proc/net/dev"
self.stats_tab = {
"recv_bytes" : 0,
"recv_pkts" : 1,
"recv_errs" : 2,
"recv_drops" : 3,
"trans_bytes" : 8,
"trans_pkts" : 9,
"trans_errs" : 10,
"trans_drops" : 11,
}
self.stats = {}
self.stats_prev = {}
def shutdown(self):
self.shuttingdown = True
if not self.running:
return
self.join()
def run(self):
self.running = True
while not self.shuttingdown:
_Lock.acquire()
self.update_metric()
_Lock.release()
time.sleep(self.refresh_rate)
self.running = False
def update_metric(self):
f = open(self.proc_file, "r")
for l in f:
a = l.split(":")
dev = a[0].lstrip()
if dev != self.target_device: continue
dprint("%s", ">>update_metric")
self.stats = {}
_stats = a[1].split()
for name, index in self.stats_tab.iteritems():
self.stats[name+'_'+self.target_device] = int(_stats[index])
self.stats["time"] = time.time()
dprint("%s", self.stats)
if "time" in self.stats_prev:
dprint("%s: %d = %d - %d", "DO DIFF", self.stats["time"]-self.stats_prev["time"], self.stats["time"], self.stats_prev["time"])
d = self.stats["time"] - self.stats_prev["time"]
for name, cur in self.stats.iteritems():
self.metric[name] = float(cur - self.stats_prev[name])/d
self.stats_prev = self.stats.copy()
break
return
def metric_of(self, name):
val = 0
if name in self.metric:
_Lock.acquire()
val = self.metric[name]
_Lock.release()
return val
def metric_init(params):
global Desc_Skel, _Worker_Thread, Debug
print '[traffic1] Received the following parameters'
print params
Desc_Skel = {
'name' : 'XXX',
'call_back' : metric_of,
'time_max' : 60,
'value_type' : 'float',
'format' : '%.3f',
'units' : 'XXX',
'slope' : 'both',
'description' : 'XXX',
'groups' : 'network',
}
if "refresh_rate" not in params:
params["refresh_rate"] = 10
if "debug" in params:
Debug = params["debug"]
dprint("%s", "Debug mode on")
if "target_device" not in params:
params["target_device"] = "lo"
target_device = params["target_device"]
_Worker_Thread = UpdateTrafficThread(params)
_Worker_Thread.start()
# IP:HOSTNAME
if "spoof_host" in params:
Desc_Skel["spoof_host"] = params["spoof_host"]
descriptors.append(create_desc(Desc_Skel, {
"name" : "recv_bytes_" + target_device,
"units" : "bytes/sec",
"description" : "received bytes per sec",
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : "recv_pkts_" + target_device,
"units" : "pkts/sec",
"description" : "received packets per sec",
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : "recv_errs_" + target_device,
"units" : "pkts/sec",
"description" : "received error packets per sec",
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : "trans_bytes_" + target_device,
"units" : "bytes/sec",
"description" : "transmitted bytes per sec",
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : "trans_pkts_" + target_device,
"units" : "pkts/sec",
"description" : "transmitted packets per sec",
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : "trans_errs_" + target_device,
"units" : "pkts/sec",
"description" : "transmitted error packets per sec",
}))
return descriptors
def create_desc(skel, prop):
d = skel.copy()
for k, v in prop.iteritems():
d[k] = v
return d
def metric_of(name):
return _Worker_Thread.metric_of(name)
def metric_cleanup():
_Worker_Thread.shutdown()
if __name__ == '__main__':
try:
params = {
"target_device": "eth0",
"debug" : True,
}
metric_init(params)
while True:
for d in descriptors:
v = d['call_back'](d['name'])
print ('value for %s is '+d['format']) % (d['name'], v)
time.sleep(5)
except KeyboardInterrupt:
time.sleep(0.2)
os._exit(1)
except StandardError:
print sys.exc_info()[0]
os._exit(1)
``` |
{
"source": "johntdyer/tropo-samples",
"score": 4
} |
#### File: tropo-samples/python/orderpizza.py
```python
def sayToppings(top):
for topping in top:
say(topping)
return
#def main():
wait(500)
say("Welcome at Alfredos's Pizza Network.")
wait(500)
say("Please choose your size first, then the toppings.")
result = ask("We offer small, medium and family size pizzas. Which size would you like?",
{'choices':"small,1,medium,2,family,3,large", 'attempts':3})
if (result.name == 'choice'):
if (result.value == "small")or (result.value == "1"):
say("OK, a small pizza.")
elif (result.value == "medium")or (result.value == "2"):
say("I'll fix a Medium pizza for you")
elif (result.value == "family") or (result.value == "large")or (result.value == "3"):
say("A family sized pizza, no problem.")
# else:
# say("Please choose between small, medium and family size")
say("Which toppings would you like?")
say("Please add one topping at a time. You will return to the topping selection menu unless you say done")
say("Let's start with the vegetarian choices")
veggietoppings = []
exit = False
cnt = 0
while(exit == False):
cnt += 1
result = ask("Choose between peppers, onions, jalapenos, mushrooms, corn. To finish say done or press 9",
{'choices':"peppers, 1, onions, 2, jalapenos, 3, mushrooms, 4, corn, 4, done, 9", 'attempts':1})
if (result.name == 'choice'):
if (result.value == "peppers")or (result.value == "1"):
veggietoppings.append("peppers")
say("Adding peppers")
elif (result.value == "onions")or (result.value == "2"):
veggietoppings.append("onions")
say("Adding onions")
elif (result.value == "jalapenos")or (result.value == "3"):
veggietoppings.append("jalapenos")
say("Adding jalapenos")
elif (result.value == "mushrooms")or (result.value == "4"):
veggietoppings.append("mushrooms")
say("Adding mushrooms")
elif (result.value == "corn")or (result.value == "5"):
veggietoppings.append("corn")
say("Adding corn")
elif (result.value == "done")or (result.value == "9"):
say("So we have the following toppings so far:")
sayToppings(veggietoppings)
exit = True
if (cnt == 3):
exit = True
if exit:
result2 = ask("Do you want to add more toppings?", {'choices':"yes(1,yes), no(2,no)", 'attempts':2})
if (result2.name == 'choice'):
if (result2.value == "yes") or (result2.value == "1"):
exit = False
elif (result2.value == "onions") or (result2.value == "2"):
exit = True
say("And now the other ones")
meattoppings = []
exit = False
cnt = 0
while(exit == False):
cnt += 1
result = ask("Choose between pepperoni, ham, bacon. To finish say done or press 9",
{'choices':"pepperoni, 1, ham, 2, bacon, 3, done, 9", 'attempts':3})
if (result.name == 'choice'):
if (result.value == "pepperoni")or (result.value == "1"):
meattoppings.append("pepperoni")
say("Adding pepperoni")
elif (result.value == "ham")or (result.value == "2"):
meattoppings.append("ham")
say("Adding ham")
elif (result.value == "bacon")or (result.value == "3"):
meattoppings.append("bacon")
say("Adding bacon")
# elif (result.value == "mushrooms")or (result.value == "4"):
# veggietoppings.append("mushrooms")
# elif (result.value == "corn")or (result.value == "5"):
# veggietoppings.append("corn")
elif (result.value == "done")or (result.value == "9"):
say("So we have the following toppings so far:")
sayToppings(meattoppings)
exit = True
if (cnt == 3):
exit = True
if exit:
result2 = ask("Do you want to add more toppings?", {'choices':"yes(1,yes), no(2,no)", 'attempts':2})
if (result2.name == 'choice'):
if (result2.value == "yes") or (result2.value == "1"):
exit = False
elif (result2.value == "onions") or (result2.value == "2"):
exit = True
say("I have the following toppings for your pizza")
sayToppings(veggietoppings)
say("and")
sayToppings(meattoppings)
say("Please provide your address now")
#add delivery address recording here
#tell the caller when to expect the pizza
deliverytime = str((len(veggietoppings) + len(meattoppings))*2)
say("We will deliver your pizza in " + deliverytime + " Minutes")
say("Thanks for ordering at Alfredos's Pizza Network.")
wait(500)
``` |
{
"source": "johntelforduk/advent-of-code-2015",
"score": 4
} |
#### File: advent-of-code-2015/06-probably-a-fire-hazard/solution6_2.py
```python
import pygame
def str_pair_to_ints(str_pair: str) -> (int, int):
"""For parm string like '61,44' return pair of ints like (61, 44)."""
s1, s2 = str_pair.split(',')
return int(s1), int(s2)
def parse_instruction(instruction: str) -> (str, int, int, int, int):
"""Parse the parm instruction. Return a tuple, (command, x1, y1, x2, y2)."""
if 'turn on' in instruction:
command = 'on'
instruction = instruction.replace('turn on ', '')
elif 'turn off' in instruction:
command = 'off'
instruction = instruction.replace('turn off ', '')
else:
command = 'toggle'
instruction = instruction.replace('toggle ', '')
[one, _, two] = instruction.split(' ')
x1, y1 = str_pair_to_ints(one)
x2, y2 = str_pair_to_ints(two)
return command, x1, y1, x2, y2
def normalised_colour(brightness: int, maximum: int) -> (int, int, int):
"""For the parm brightness, return a tuple that is the normalised colour for that brightness."""
intensity = brightness * 255 // maximum
return intensity, intensity, intensity
def main():
screen_size = [1000, 1000] # [width, height]
# Each member is a k: v. Where,
# k = pair (x, y), the coordinates of the light.
# v = brightness of the light.
light = {}
# Initialise all of the lights to zero brightness.
for x in range(screen_size[0]):
for y in range(screen_size[1]):
light[(x, y)] = 0
max_bright = 0
f = open('input.txt')
whole_text = f.read()
f.close()
instructions = whole_text.split('\n')
while len(instructions) != 0:
instruction = instructions.pop(0)
command, x1, y1, x2, y2 = parse_instruction(instruction)
print(instruction, command)
for x in range(x1, x2 + 1):
for y in range(y1, y2 + 1):
# "The phrase turn on actually means that you should increase the brightness of those lights by 1."
if command == 'on':
light[(x, y)] += 1
if light[(x, y)] > max_bright:
max_bright = light[(x, y)]
# "The phrase turn off actually means that you should decrease the brightness of those lights by 1, to
# a minimum of zero."
elif command == 'off':
if light[(x, y)] > 0:
light[(x, y)] -= 1
# Must be toggle.
# "The phrase toggle actually means that you should increase the brightness of those lights by 2."
else:
light[(x, y)] += 2
if light[(x, y)] > max_bright:
max_bright = light[(x, y)]
if len(instructions) == 0:
pygame.init() # Initialize the game engine.
screen = pygame.display.set_mode(screen_size)
total_brightness = 0
for x in range(screen_size[0]):
for y in range(screen_size[1]):
total_brightness += light[(x, y)]
screen.set_at((x, y), normalised_colour(light[(x, y)], max_bright))
pygame.display.flip()
pygame.image.save(screen, 'screenshots/day6_2_final.jpg')
print('Part 2:', total_brightness)
pygame.quit()
if __name__ == "__main__":
main()
```
#### File: advent-of-code-2015/07-some-assembly-required/solution7.py
```python
def is_integer(candidate: str) -> bool:
"""Returns True if parm is a an integer."""
if candidate[0] == '-':
return candidate[1:].isnumeric()
return candidate.isnumeric()
f = open('input.txt')
whole_text = f.read()
f.close()
lines = whole_text.split('\n')
# print('lines:', lines)
circuit = {}
for connection in lines:
gate, wire = connection.split(' -> ')
circuit[wire] = gate
# For Part 2.
circuit['b'] = '956'
to_do = -1
while to_do != 0:
to_do = 0
for wire in circuit:
if 'NOT' in circuit[wire]:
_, op1 = circuit[wire].split('NOT ')
if op1.isnumeric():
circuit[wire] = str(~ int(op1))
elif is_integer(circuit[op1]):
circuit[wire] = 'NOT ' + circuit[op1]
elif 'LSHIFT' in circuit[wire]:
op1, op2 = circuit[wire].split(' LSHIFT ')
if op1.isnumeric():
circuit[wire] = str(int(op1) << int(op2))
elif is_integer(circuit[op1]):
circuit[wire] = circuit[op1] + ' LSHIFT ' + op2
elif 'RSHIFT' in circuit[wire]:
op1, op2 = circuit[wire].split(' RSHIFT ')
if is_integer(op1):
circuit[wire] = str(int(op1) >> int(op2))
elif circuit[op1].isnumeric():
circuit[wire] = circuit[op1] + ' RSHIFT ' + op2
elif 'OR' in circuit[wire]:
op1, op2 = circuit[wire].split(' OR ')
if is_integer(op1) and is_integer(op2):
circuit[wire] = str(int(op1) | int(op2))
elif not op1.isnumeric():
if is_integer(circuit[op1]):
circuit[wire] = circuit[op1] + ' OR ' + op2
elif not is_integer(op2):
if is_integer(circuit[op2]):
circuit[wire] = op1 + ' OR ' + circuit[op2]
elif 'AND' in circuit[wire]:
op1, op2 = circuit[wire].split(' AND ')
if is_integer(op1) and is_integer(op2):
circuit[wire] = str(int(op1) & int(op2))
elif not is_integer(op1):
if is_integer(circuit[op1]):
circuit[wire] = circuit[op1] + ' AND ' + op2
elif not is_integer(op2):
if is_integer(circuit[op2]):
circuit[wire] = op1 + ' AND ' + circuit[op2]
elif not is_integer(circuit[wire]):
if is_integer(circuit[circuit[wire]]):
circuit[wire] = circuit[circuit[wire]]
if not is_integer(circuit[wire]):
to_do += 1
# print(wire, ':', circuit[wire])
# print(circuit)
print('to_do:', to_do)
print(circuit['a'])
```
#### File: advent-of-code-2015/12-JSAbacusFramework-dot-io/solution12_2.py
```python
import json
def sum_nums(d) -> int:
"""Return the sum of all integers in the parm dictionary."""
if type(d) == int:
return d
elif type(d) == list:
total = 0
for i in d:
total += sum_nums(i)
return total
elif type(d) == dict:
total = 0
for k in d:
if type(d[k]) == str and d[k] == 'red': # Special rule for Part 2.
return 0
total += sum_nums(d[k])
return total
else:
return 0
assert sum_nums([1, 2, 3]) == 6
assert sum_nums([1,{"c":"red","b":2},3]) == 4
assert sum_nums({"d":"red","e":[1,2,3,4],"f":5}) == 0
assert sum_nums([1,"red",5]) == 6
f = open('input.txt')
whole_text = f.read()
f.close()
j = json.loads(whole_text)
print(sum_nums(j))
``` |
{
"source": "johntelforduk/advent-of-code-2019",
"score": 4
} |
#### File: advent-of-code-2019/01-the-tyranny-of-the-rocket-equation/fuel_calc.py
```python
def mass_to_fuel(mass):
fuel = int(mass / 3) - 2
if fuel < 0: # -ve mass requires zero fuel.
return 0
else:
return fuel
# Calculate total fuel needed to lift a mass, taking into account fuel needed to lift the mass of the fuel.
def total_fuel(mass):
this_fuel = mass_to_fuel(mass)
if this_fuel == 0: # Zero mass requires zero fuel.
return 0
else:
return this_fuel + total_fuel(this_fuel)
f = open('input.txt')
whole_text = (f.read())
string_list = whole_text.split() # Split string by any whitespace.
number_list = [int(x) for x in string_list] # Convert list of strings to list of integers.
simple_total = 0 # Answer to Part 1.
complex_total = 0 # Answer to Part 2.
for each_mass in number_list:
simple_total += mass_to_fuel(each_mass)
complex_total += total_fuel(each_mass)
print('Part 1:', simple_total)
print('Part 2:', complex_total)
```
#### File: advent-of-code-2019/03-crossed-wires/crossed.py
```python
def dist_from_origin(vertex) -> int:
"""Return the Manhattan distance that the parm vertex is from (0, 0)."""
(x, y) = vertex
return abs(x) + abs(y)
def path_to_vertices(route: str) -> (set, dict):
"""Returns a pair of items. First item is a set of vertices visited for a parm route string.
Each vertex is a tuple (x, y). Origin vertex is omitted.
Second item is a dictionary of number of steps to first reach each vertex."""
vertices = set() # The set of vertices that will be returned.
# Dictionary of distances that will be returned. Key is (x, Y), value is steps taken to first get there.
vertex_distances = {}
x, y = 0, 0 # Origin of the journey the wire will take.
total_steps = 0 # Steps taken to get to the vertex.
for instruction in route.split(','): # Instructions are comma delimited.
# Example of a step, 'U414'...
direction = instruction[0] # direction = 'U'
distance = int(instruction[1:]) # distance = 414
assert direction in {'U', 'R', 'D', 'L'} # Must be 'U'p, 'R'ight, 'D'own or 'L'eft.
if direction == 'U':
dx, dy = 0, -1 # Set deltas according to the directions.
elif direction == 'R':
dx, dy = 1, 0
elif direction == 'D':
dx, dy = 0, 1
else:
dx, dy = -1, 0
for step in range(distance): # Take the 'distance' number of steps.
x += dx
y += dy
vertex = (x, y)
vertices.add(vertex)
total_steps += 1
if vertex not in vertex_distances: # Only want the shortest distance to the vertex in the dictionary.
vertex_distances[vertex] = total_steps
return vertices, vertex_distances
f = open('input.txt')
whole_text = (f.read())
string_list = whole_text.split() # Split string by whitespace.
wire_1 = string_list[0]
wire_2 = string_list[1]
(vertices_1, vertex_distances_1) = path_to_vertices(wire_1)
(vertices_2, vertex_distances_2) = path_to_vertices(wire_2)
crossed_wires = vertices_1.intersection(vertices_2) # Wires cross where vertices intersect.
distances = list(map(dist_from_origin, crossed_wires)) # Calculate Manhattan distance for each crossed wire vertex.
distances.sort() # Put the shortest distance in position [0] of the list.
print('Part 1:', distances[0])
fewest_steps_found = None
for crossed in crossed_wires:
combined_steps = vertex_distances_1[crossed] + vertex_distances_2[crossed]
if fewest_steps_found is None or combined_steps < fewest_steps_found:
fewest_steps_found = combined_steps
print('Part 2:', fewest_steps_found)
```
#### File: advent-of-code-2019/04-secure-container/tests.py
```python
import fact_checks as fc
import unittest # These tests based on, https://docs.python.org/3/library/unittest.html
class TestIs6Digits(unittest.TestCase):
def test_is_6_digit(self):
self.assertFalse(fc.is_6_digits(0))
self.assertFalse(fc.is_6_digits(12345))
self.assertFalse(fc.is_6_digits(1234567))
self.assertTrue(fc.is_6_digits(123456))
self.assertTrue(fc.is_6_digits(100000))
self.assertTrue(fc.is_6_digits(999999))
def test_value_in_range(self):
self.assertFalse(fc.value_in_range(number=5, low_bound=6, up_bound=20))
self.assertFalse(fc.value_in_range(number=5, low_bound=1, up_bound=4))
self.assertTrue(fc.value_in_range(number=5, low_bound=4, up_bound=10))
self.assertTrue(fc.value_in_range(number=5, low_bound=5, up_bound=10))
self.assertTrue(fc.value_in_range(number=5, low_bound=1, up_bound=6))
self.assertTrue(fc.value_in_range(number=5, low_bound=1, up_bound=5))
def test_adjacent_digits_same(self):
self.assertFalse(fc.two_adjacent_digits_same(123789))
self.assertTrue(fc.two_adjacent_digits_same(122345))
self.assertTrue(fc.two_adjacent_digits_same(111111))
def test_just_two_adjacent_digits_same(self):
self.assertFalse(fc.just_two_adjacent_digits_same(123444))
self.assertFalse(fc.just_two_adjacent_digits_same(222346))
self.assertFalse(fc.just_two_adjacent_digits_same(233346))
self.assertTrue(fc.just_two_adjacent_digits_same(112233))
self.assertTrue(fc.just_two_adjacent_digits_same(111122))
self.assertTrue(fc.just_two_adjacent_digits_same(112345))
self.assertTrue(fc.just_two_adjacent_digits_same(123455))
self.assertTrue(fc.just_two_adjacent_digits_same(122456))
def test_never_decrease(self):
self.assertFalse(fc.never_decrease(223450))
self.assertTrue(fc.never_decrease(122345))
self.assertTrue(fc.never_decrease(111111))
if __name__ == '__main__':
unittest.main()
```
#### File: advent-of-code-2019/11-space-police/robot.py
```python
import dict_computer as comp
class Robot:
def __init__(self):
self.computer = comp.Computer(interactive_mode=False)
self.computer.load_file(filename='input.txt')
self.location = (0, 0) # Current location of the robot.
self.bearing = 'N' # Direction that robot is currently pointing in. "The robot starts facing up."
def turn_left(self):
left_turns = {'N': 'W', 'W': 'S', 'S': 'E', 'E': 'N'}
self.bearing = left_turns[self.bearing]
def turn_right(self):
left_turns = {'N': 'E', 'E': 'S', 'S': 'W', 'W': 'N'}
self.bearing = left_turns[self.bearing]
def forward(self):
(x, y) = self.location
dx = {'W': -1, 'E': 1, 'N': 0, 'S': 0}
dy = {'N': -1, 'S': 1, 'E': 0, 'W': 0}
self.location = (x + dx[self.bearing], y + dy[self.bearing])
this_robot = Robot()
# Key = tuple (x, y) of location of panel, value is colour of panel.
hull = dict()
hull[this_robot.location] = 1 # Part 2: "After starting the robot on a single white panel instead..."
while this_robot.computer.halted is False:
# "The program uses input instructions to access the robot's camera: provide 0 if the robot is over a black panel
# or 1 if the robot is over a white panel."
this_robot.computer.input = hull.get(this_robot.location, 0) # By default, panels are black ('0').
# "First, it will output a value indicating the color to paint the panel the robot is over: 0 means to paint the
# panel black, and 1 means to paint the panel white."
this_robot.computer.run_until_output()
assert this_robot.computer.output in {0, 1} # '0' is paint it black, '1' is paint it white.
hull[this_robot.location] = this_robot.computer.output
# Second, it will output a value indicating the direction the robot should turn: 0 means it should turn left 90
# degrees, and 1 means it should turn right 90 degrees.
this_robot.computer.run_until_output()
assert this_robot.computer.output in {0, 1} # '0 is turn left, '1' is turn right.
if this_robot.computer.output == 0:
this_robot.turn_left()
else:
this_robot.turn_right()
# After the robot turns, it should always move forward exactly one panel.
this_robot.forward()
print('Part 1:', len(hull))
# Print the image.
for y in range(10):
for x in range(45):
panel = hull.get((x, y), 0)
if panel == 1:
print('#', end='')
else:
print(' ', end='')
print()
```
#### File: advent-of-code-2019/13-care-package/screen.py
```python
class Screen:
def __init__(self):
self.grid = {} # Key = (x, y), value = tile number.
self.min_x = None
self.min_y = None
self.width = None
self.height = None
@staticmethod
def tile_code_to_char(code: int) -> str:
"""For parm tile code, return a single char that the code can be rendered into when printing the grid.
0 is an empty tile. No game object appears in this tile.
1 is a wall tile. Walls are indestructible barriers.
2 is a block tile. Blocks can be broken by the ball.
3 is a horizontal paddle tile. The paddle is indestructible.
4 is a ball tile. The ball moves diagonally and bounces off objects."""
conversion_map = {0: ' ', 1: '#', 2: '%', 3: '-', 4: 'O'}
return conversion_map[code]
def grid_to_char(self, vertex: (int, int)) -> str:
"""For parm vertex coordinates, return char that should be rendered at that vertex."""
return self.tile_code_to_char(self.grid.get(vertex, 0)) # Default to 0, as dict is sparse.
def render(self, score: int):
"""Render the contents of the grid dictionary to screen. Print current game score at bottom of screen."""
# TODO Rewrite to use Pygame to do proper graphical rendering of the game screen.
for y in range(self.min_y, self.height + 1):
for x in range(self.min_x, self.width + 1):
print(self.grid_to_char(vertex=(x, y)), end='')
print()
print('Score:', score) # Start a new line.
```
#### File: advent-of-code-2019/20-donut-maze/part_1.py
```python
from copy import deepcopy
class Donut:
def __init__(self, filename: str):
self.grid = {} # Key=(x, y), Value=character at that coordinate.
self.width, self.height = 0, 0 # Dimensions of the grid.
self.portals = {} # Key is entrance to portal (x, y), value is exit from portal (x, y).
self.start = (0, 0) # Coordinates of 'AA'.
self.end = (0, 0) # Coordinates of 'ZZ'.
self.shortest_to_square = {} # Least steps found to this square. Key=(x, y), Value=steps.
self.best = 0 # Shortest route from AA to ZZ found so fae.
f = open(filename)
whole_text = (f.read())
x, y = 0, 0
for each_char in whole_text:
if each_char == '\n': # New-line.
x = 0
y += 1
else:
self.grid[x, y] = each_char
x += 1
if x > self.width:
self.width = x
self.height = y + 1
f.close()
@staticmethod
def is_cap_letter(test_char: str):
"""Return true if the parm character is a capital letter."""
if 'A' <= test_char <= 'Z':
return True
return False
def is_special(self, cap_1: str, cap_2: str, dot: str) -> bool:
"""If the first two parms are capital letters, and the third is a dot, then return True.
Example sequence is 'BC.'"""
if self.is_cap_letter(cap_1) and self.is_cap_letter(cap_2) and dot == '.':
return True
return False
def find_special_squares(self) -> {}:
"""Return a dictionary of special square information. Key=(x, y), Value=Name of special square."""
found = {}
for x in range(self.width):
for y in range(self.height):
if x < self.width - 2:
one = self.grid[x, y]
two = self.grid[x + 1, y]
three = self.grid[x + 2, y]
# "BC."
if self.is_special(cap_1=one, cap_2=two, dot=three):
found[x + 2, y] = one + two
# ".BC"
if self.is_special(cap_1=two, cap_2=three, dot=one):
found[x, y] = two + three
if y < self.height - 2:
one = self.grid[x, y]
two = self.grid[x, y + 1]
three = self.grid[x, y + 2]
# B
# C
# .
if self.is_special(cap_1=one, cap_2=two, dot=three):
found[x, y + 2] = one + two
# .
# B
# C
if self.is_special(cap_1=two, cap_2=three, dot=one):
found[x, y] = two + three
return found
def set_specials(self):
"""Set the attributes of Start, end End
Also set attributes of Portals."""
specials = self.find_special_squares()
for vertex in specials:
name = specials[vertex]
if name == 'AA': # "Every maze on Pluto has a start (the open tile next to AA)..."
self.start = vertex
elif name == 'ZZ': # "... and an end (the open tile next to ZZ)"
self.end = vertex
else: # The start and end specials are not real portals.
for pair in specials:
if specials[pair] == name and vertex != pair:
self.portals[vertex] = pair
def search(self,
current_square: (int, int), # Start / continue search from here.
squares_visited: []): # List of squares previously visited.
"""Do a recursive search of squares, until end (ZZ) is found."""
one_way_only = True
while one_way_only:
steps_taken = len(squares_visited)
# Check if we've made it to the end of the maze.
if current_square == self.end:
# No previous best, so this must be the best found so far...
# ... or better than previous best.
if self.best == 0 or steps_taken < self.best:
self.best = steps_taken
print('Best so far:', self.best)
return
# Is this a good route to whatever the current square is?
if current_square not in self.shortest_to_square: # If never been to this square before, then this is best.
self.shortest_to_square[current_square] = steps_taken
else:
# If improvement on previous best route to this then record this.
if steps_taken < self.shortest_to_square[current_square]:
self.shortest_to_square[current_square] = steps_taken
else: # No improvement on previous route to this square, so give up.
return
possible_next_squares = []
# If standing next to portal, we could go through it...
if current_square in self.portals:
if self.portals[current_square] not in squares_visited: # ... unless it is a backtrack.
possible_next_squares.append(self.portals[current_square])
(curr_x, curr_y) = current_square
# Look for a '.' adjacent to the current square.
for (dx, dy) in [(0, 1), (0, -1), (1, 0), (-1, 0)]:
possible = (curr_x + dx, curr_y + dy)
# If the possible is a '.', and we haven't been to it before in this search, then OK.
if self.grid[possible] == '.' and possible not in squares_visited:
possible_next_squares.append(possible)
if len(possible_next_squares) == 0: # Dead end, so give up.
return
elif len(possible_next_squares) == 1: # Only one possible square to go to next, so iterate.
current_square = possible_next_squares[0]
squares_visited.append(current_square)
else: # Several possible next squares, so recurse.
for possible in possible_next_squares:
new_squares_visited = deepcopy(squares_visited)
new_squares_visited.append(possible)
self.search(current_square=possible, squares_visited=new_squares_visited)
one_way_only = False # Stop iterative loop.
this_donut = Donut('input.txt')
this_donut.set_specials()
this_donut.search(current_square=this_donut.start, squares_visited=[])
print('Part 1:', this_donut.best)
```
#### File: advent-of-code-2019/22-slam-shuffle/space_cards.py
```python
class SpaceCards:
def __init__(self, deck_size: int):
self.deck_size = deck_size
# "Just like any deck of space cards, there are 10007 cards in the deck numbered 0 through 10006. The deck must
# be new - they're still in factory order, with 0 on the top, then 1, then 2, and so on, all the way through to
# 10006 on the bottom."
self.pack = []
for i in range(deck_size):
self.pack.append(i)
def deal_into_new_stack(self):
# "... create a new stack of cards by dealing the top card of the deck onto the top of the new stack repeatedly
# until you run out of cards".
self.pack.reverse()
def cut_n_cards(self, n: int):
# "... take the top N cards off the top of the deck and move them as a single unit to the bottom of the deck,
# retaining their order."
# "You've also been getting pretty good at a version of this technique where N is negative! In that case, cut
# (the absolute value of) N cards from the bottom of the deck onto the top."
if n >= 0:
cut_point = n
else:
cut_point = self.deck_size + n
new_pack = []
for i in range(cut_point, self.deck_size): # Cards below cut.
new_pack.append(self.pack[i])
for i in range(cut_point):
new_pack.append(self.pack[i]) # Cards above cut.
self.pack = new_pack
def deal_with_increment_n(self, n: int):
# "... start by clearing enough space on your table to lay out all of the cards individually in a long line.
# Deal the top card into the leftmost position. Then, move N positions to the right and deal the next card
# there. If you would move into a position past the end of the space on your table, wrap around and keep
# counting from the leftmost card again. Continue this process until you run out of cards."
# Start by making space for new pack.
new_pack = []
for i in range(self.deck_size):
new_pack.append(-1) # Doesn't matter what each element is initialised to.
curr_pos = 0
for i in self.pack:
assert new_pack[curr_pos] == -1 # Should not be a card in this position aleady.
new_pack[curr_pos] = i
curr_pos = (curr_pos + n) % self.deck_size # Move to next position.
self.pack = new_pack.copy()
def shuffle(self, filename: str):
f = open(filename)
whole_text = f.read()
f.close()
for each_line in whole_text.split('\n'):
word_list = list(each_line.split(' '))
# print(each_line, word_list)
if word_list[0] == 'cut':
self.cut_n_cards(n=int(word_list[1]))
elif word_list[1] == 'into':
self.deal_into_new_stack()
else:
self.deal_with_increment_n(n=int(word_list[3]))
```
#### File: advent-of-code-2019/22-slam-shuffle/unit_tests.py
```python
from space_cards import SpaceCards
import unittest
class TestSpaceCards(unittest.TestCase):
def test_new_pack(self):
test_pack = SpaceCards(deck_size=10)
self.assertEqual(test_pack.deck_size, 10) # Should be 10 cards in the pack.
self.assertEqual(test_pack.pack, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
def test_deal_into_new_stack(self):
test_pack = SpaceCards(deck_size=10)
test_pack.deal_into_new_stack()
self.assertEqual(test_pack.pack, [9, 8, 7, 6, 5, 4, 3, 2, 1, 0])
def test_cut_n_cards(self):
test_pack_1 = SpaceCards(deck_size=10)
test_pack_1.cut_n_cards(n=3)
self.assertEqual(test_pack_1.pack, [3, 4, 5, 6, 7, 8, 9, 0, 1, 2])
test_pack_2 = SpaceCards(deck_size=10)
test_pack_2.cut_n_cards(n=-4)
self.assertEqual(test_pack_2.pack, [6, 7, 8, 9, 0, 1, 2, 3, 4, 5])
def test_deal_with_increment_n(self):
test_pack = SpaceCards(deck_size=10)
test_pack.deal_with_increment_n(n=3)
self.assertEqual(test_pack.pack, [0, 7, 4, 1, 8, 5, 2, 9, 6, 3])
# These tests use the example sequences of shuffles.
def test_1(self):
test_pack = SpaceCards(deck_size=10)
test_pack.shuffle(filename='test_1.txt')
self.assertEqual(test_pack.pack, [0, 3, 6, 9, 2, 5, 8, 1, 4, 7])
def test_2(self):
test_pack = SpaceCards(deck_size=10)
test_pack.shuffle(filename='test_2.txt')
self.assertEqual(test_pack.pack, [3, 0, 7, 4, 1, 8, 5, 2, 9, 6])
def test_3(self):
test_pack = SpaceCards(deck_size=10)
test_pack.shuffle(filename='test_3.txt')
self.assertEqual(test_pack.pack, [6, 3, 0, 7, 4, 1, 8, 5, 2, 9])
def test_4(self):
test_pack = SpaceCards(deck_size=10)
test_pack.shuffle(filename='test_4.txt')
self.assertEqual(test_pack.pack, [9, 2, 5, 8, 1, 4, 7, 0, 3, 6])
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "johntelforduk/advent-of-code-2020",
"score": 4
} |
#### File: advent-of-code-2020/13-shuttle-search/part2.py
```python
import sys
from math import gcd
from functools import reduce
def lcm(a: int , b: int) -> int:
"""Returns the least common multiple of the parm pair of numbers."""
return a*b // gcd(a, b)
def get_lcm_for(your_list: list) -> int:
"""Returns the least common multiple for parm list of numbers."""
return reduce(lambda x, y: lcm(x, y), your_list)
VERBOSE = ('-v' in sys.argv)
filename = sys.argv[1]
f = open(filename)
whole_text = (f.read())
two_lines = whole_text.split('\n') # Split string by any whitespace.
def departure(bus: int, time: int) -> bool:
"""For parm bus ID and time, return True iff the bus departs at that time."""
if time % bus == 0:
return True
return False
buses = [] # Each item in the list is (bus, stagger).
stagger = 0 # How many minutes after first bus does this one depart.
for bus in two_lines[1].split(','):
if bus != 'x':
buses.append((int(bus), stagger))
stagger += 1
if VERBOSE:
print('buses', buses)
# Find the three highest bus IDs and remove them from the list of buses.
b1, s1 = max(buses)
buses.remove((b1, s1))
b2, s2 = max(buses)
buses.remove((b2, s2))
b3, s3 = max(buses)
buses.remove((b3, s3))
if VERBOSE:
print('b1, s1, b2, s2, b3, s3, buses:', b1, s1, b2, s2, b3, s3, buses)
# Find the first time that the 3 biggest bus IDs have their staggers the right distance apart from each other.
candidate_time = 0
while True:
if (departure(bus=b1, time=candidate_time + s1)
and departure(bus=b2, time=candidate_time + s2)
and departure(bus=b3, time=candidate_time + s3)):
break
candidate_time += 1
big_lcm = get_lcm_for([b1, b2, b3])
if VERBOSE:
print('candidate_time, big_lcm:', candidate_time, big_lcm)
# So now we know, the solution time is someone where in the space,
# time = candidate_time + x * big_lcm
# where x is an integer.
solution_found = False
while not solution_found:
solution_found = True # Begin optimistically with each candidate time.
# Test each of the remaining buses in turn.
for bus, stagger in buses:
if departure(bus, candidate_time + stagger) is False:
solution_found = False
if solution_found:
break
else:
candidate_time += big_lcm # Try the next possible time in the solution space.
print('Part 2:', candidate_time)
```
#### File: advent-of-code-2020/15-rambunctious-recitation/test_solution15.py
```python
from solution15 import game
import unittest
class TestFunctions(unittest.TestCase):
def test_functions(self):
self.assertEqual(game(spoken=[0, 3, 6], rounds=10), 0)
self.assertEqual(game(spoken=[1, 3, 2], rounds=2020), 1)
self.assertEqual(game(spoken=[2, 1, 3], rounds=2020), 10)
self.assertEqual(game(spoken=[1, 2, 3], rounds=2020), 27)
self.assertEqual(game(spoken=[2, 3, 1], rounds=2020), 78)
self.assertEqual(game(spoken=[3, 2, 1], rounds=2020), 438)
self.assertEqual(game(spoken=[3, 2, 1], rounds=2020), 438)
self.assertEqual(game(spoken=[3, 1, 2], rounds=2020), 1836)
if __name__ == '__main__':
unittest.main()
```
#### File: advent-of-code-2020/18-operation-order/solution18.py
```python
import sys
VERBOSE = ('-v' in sys.argv)
def evaluate_left_to_right(exp: str) -> int:
"""Do left-to-right expression evaluation on the parm flat expression."""
# A flat expression contains no brackets.
assert '(' not in exp
assert ')' not in exp
terms = exp.split(' ')
result = int(terms.pop(0)) # First term in the expression is always an operand.
while len(terms) != 0:
operator = terms.pop(0)
operand = int(terms.pop(0))
assert operator in ['+', '*']
if operator == '+':
result += operand
else:
result *= operand
return result
def evaluate_one_operation(exp: str, operator: str) -> str:
"""For the first matching operation in the parm flat expression, replace it - and the 2 operands around it - with
the results of the operation."""
# A flat expression contains no brackets.
assert '(' not in exp
assert ')' not in exp
terms = exp.split(' ')
# Find the first occurrence of required operation in the list of terms.
operation_pos = terms.index(operator)
# For an infixed operation, the operands are either side of the operator.
operand1 = int(terms[operation_pos - 1])
operand2 = int(terms[operation_pos + 1])
assert operator in ['+', '*']
if operator == '+':
calculation = operand1 + operand2
else:
calculation = operand1 * operand2
# Reconstruct the string for the whole expression, with the one operation found replaced with it's result.
result = ''
pos = 0
for term in terms:
if operation_pos - 1 <= pos <= operation_pos + 1:
if operation_pos == pos:
result += str(calculation) + ' '
else:
result += term + ' '
pos += 1
return result.strip() # Each concatenation also adds a space, so last space needs to be removed.
def inner_brackets(exp: str) -> (int, int):
"""For parm expressions, return the start and end position of the first pair of inner brackets.
Inner brackets are brackets around a bit of string that contains no other brackets."""
# Looking for string "(" + some other chars + ")",
# ... where some other chars doesn't contain any brackets.
pos, start = 0, 0
for i in exp:
if i == "(":
start = pos
if i == ')':
return start, pos
pos += 1
return None, None
def inner_brackets_to_value(exp: str, method: str) -> str:
"""For the parm expression, return expression with first inner brackets evaluated."""
bf, bt = inner_brackets(exp)
# front + '(' + inner + ')' + end
front = exp[0:bf]
inner = exp[bf+1:bt]
end = exp[bt+1:]
assert method in ['l_to_r', '+_then_*']
if method == 'l_to_r':
return front + str(evaluate_part1(inner)) + end
else:
return front + str(evaluate_part2(inner)) + end
def evaluate_part1(exp: str) -> int:
"""For parm expression, evaluate it using the left to right method."""
# First, evaluate all of the things in brackets.
while '(' in exp:
exp = inner_brackets_to_value(exp, 'l_to_r')
return evaluate_left_to_right(exp)
def evaluate_part2(exp: str) -> int:
"""For parm expression, evaluate it using the addition before multiplication method.."""
# First, evaluate all of the things in brackets.
while '(' in exp:
exp = inner_brackets_to_value(exp, '+_then_*')
# Then evaluate all the addition operations.
while '+' in exp:
exp = evaluate_one_operation(exp, '+')
# Finally evaluate all the multiplications.
while '*' in exp:
exp = evaluate_one_operation(exp, '*')
return int(exp)
def main():
filename = sys.argv[1]
f = open(filename)
whole_text = f.read()
f.close()
total1 = 0
for exp in whole_text.split('\n'):
total1 += evaluate_part1(exp)
print('Part 1:', total1)
total2 = 0
for exp in whole_text.split('\n'):
total2 += evaluate_part2(exp)
print('Part 2:', total2)
if __name__ == "__main__":
main()
```
#### File: advent-of-code-2020/21-allergen-assessment/solution21.py
```python
import sys
VERBOSE = ('-v' in sys.argv)
def allergens_from_text(text: str) -> dict:
"""For parm string containing a list of foods, return a dictionary. Each item in dictionary is an allergen and
the set of ingredients that might contain that allergens.
Returned dictionary has items as follows,
k = Name of the allergen.
v = A list. Each item in the list is a set. Each set is the ingredients in each food that might contain
the allergen."""
possible = {}
for food in text.split('\n'): # Foods start on separate lines.
food_no_brackets = food.replace('(', '').replace(')', '')
ingredients, allergens = food_no_brackets.split(' contains ')
ingredient_set = set() # The set of ingredients that might include this allergen.
for ingredient in ingredients.split(' '): # Ingredients are delimited with spaces.
ingredient_set.add(ingredient)
for allergen in allergens.split(', '): # Allergens are delimited with comma + space.
if allergen not in possible:
possible[allergen] = [ingredient_set]
else:
possible[allergen].append(ingredient_set)
return possible
def intersect_list(list_of_sets: list) -> str:
"""Parm is a list of sets. Each item in each set is a string, for example,
[{'a', 'b, 'c', 'd'}, {'b'}]
If the intersection of the list of sets is a single item, then that item is returned,
otherwise 'None' is returned.
So in the example above, 'b' is returned."""
intersection_so_far = set()
first = True
for each_set in list_of_sets:
if first:
intersection_so_far = each_set
first = False # No longer the first set in the list.
else:
intersection_so_far = intersection_so_far.intersection(each_set)
if len(intersection_so_far) == 1:
for i in intersection_so_far:
return i
return 'None'
def reduce_ingredients(allergens: dict) -> (dict, set):
"""For a parm dictionary of allergens (k = Allergens, v = list of sets, where each set is the list of
ingredients in each feed that might contain the allergen).
Reduce the number of ingredients that might contain an allergen, by looking for ingredients that are in all
of the foods in the list for each allergen.
Returns a reduced dictionary, plus a set of allergens that are now matched to a single ingredient."""
solved = {}
solved_ingredients = set()
for allergen in allergens:
ingredient_sets = allergens[allergen]
intersection = intersect_list(ingredient_sets)
if VERBOSE:
print('allergen, ingredient_sets, intersection:', allergen, ingredient_sets, intersection)
# This allergen must be in 1 specific ingredient.
# So, make a note of that.
if intersection != 'None':
solved[allergen] = intersection
solved_ingredients.add(intersection)
if VERBOSE:
print('solved, solved_ingredients:', solved, solved_ingredients)
# For allergens that we've just solved (we know which ingredient they are in). Remove that ingredient from
# Sets of potential ingredients for other allergens.
result = {}
for allergen in allergens:
if allergen in solved:
ingredient = solved[allergen]
result[allergen] = [{ingredient}]
else:
ingredient_list = allergens[allergen]
if VERBOSE:
print('ingredient_list:', ingredient_list)
new_list = []
for ingredient_set in ingredient_list:
new_set = set()
for ingredient in ingredient_set:
if ingredient not in solved_ingredients:
new_set.add(ingredient)
new_list.append(new_set)
result[allergen] = new_list
return result, solved_ingredients
def count_non_allergen_ingredients(text: str, allergen_ingredients: set) -> int:
"""For parm text which is puzzle input, and parm set of ingredients which are known to be allergens. Return a
count of the number of non-allergen ingredients listed in recipes."""
count = 0
for food in text.split('\n'):
ingredients, _ = food.split(' (')
for ingredient in ingredients.split(' '):
if ingredient not in allergen_ingredients:
count += 1
return count
def main():
filename = sys.argv[1]
f = open(filename)
whole_text = f.read()
f.close()
allergens = allergens_from_text(whole_text)
if VERBOSE:
print('allergens:', allergens)
done = False
previous_length = 0
allergen_ingredients = set()
while not done: # Keep repeating until reduce_ingredients is not reducing anymore.
allergens, allergen_ingredients = reduce_ingredients(allergens)
if VERBOSE:
print('allergens, solved:', allergens, allergen_ingredients)
done = (previous_length == len(allergen_ingredients))
previous_length = len(allergen_ingredients)
print('Part 1:', count_non_allergen_ingredients(whole_text, allergen_ingredients))
# ----------------
# Turn allergens dictionary into list of pairs.
allergens_list = []
for allergen in allergens:
ingredient_list_set = allergens[allergen]
ingredient = list(ingredient_list_set[0])[0]
if VERBOSE:
print('allergen, ingredient:', allergen, ingredient)
allergens_list.append((allergen, ingredient))
# Print the ingredients out in allergen order.
first = True
print('Part 2: ', end='')
for _, ingredient in sorted(allergens_list):
if not first:
print(',', end='')
print(ingredient, end='')
first = False
if __name__ == "__main__":
main()
```
#### File: advent-of-code-2020/22-crab-combat/solution22_2.py
```python
import sys
VERBOSE = ('-v' in sys.argv)
class Deck:
def __init__(self, player: int, cards: list):
self.player = player
self.cards = cards
def take_top_card(self) -> int:
"""Remove the top card from the deck. Return the value of that card."""
card = self.cards.pop(0)
if VERBOSE:
print(self.player, 'plays:', card)
return card
def top_cards(self, top: int) -> list:
"""Return a list of the top cards in the deck. The number of cards is the parm of this method."""
return self.cards[:top].copy()
def card_on_bottom(self, card: int):
"""Put the parm card on the bottom of the deck."""
self.cards.append(card)
def display(self):
"""Print out info about the deck to stdout."""
print('Player', str(self.player) + "'s deck: ", end='')
first = True
for card in self.cards:
if not first:
print(', ', end='')
first = False
print(card, end='')
print()
class Combat:
def __init__(self, game: int, p1_cards: list, p2_cards: list):
self.p1_deck = Deck(player=1, cards=p1_cards) # Player 1's deck of cards.
self.p2_deck = Deck(player=2, cards=p2_cards) # Player 2's card deck.
self.previous_rounds = [] # List of decks that each player had in previous rounds.
self.game = game
if VERBOSE:
print('=== Game', self.game, '===')
print()
self.round = 1
self.winner = 0 # 0 indicates no winner yet.
while self.winner == 0:
self.winner = self.play_a_round()
def play_a_round(self) -> int:
"""Play a round of the game.
If one of the players wins the game in this round, return their player number.
Otherwise, return 0, to indicate no winner after this round."""
if VERBOSE:
print('-- Round', self.round, '(Game ' + str(self.game) + ')--')
self.p1_deck.display()
self.p2_deck.display()
# "Before either player deals a card, if there was a previous round in this game that had exactly the same
# cards in the same order in the same players' decks, the game instantly ends in a win for player 1."
if (self.p1_deck.cards, self.p2_deck.cards) in self.previous_rounds:
if VERBOSE:
print('Stalemate, hence Player 1 wins')
return 1
self.previous_rounds.append((self.p1_deck.cards.copy(), self.p2_deck.cards.copy()))
# "... both players draw their top card..."
p1_card = self.p1_deck.take_top_card()
p2_card = self.p2_deck.take_top_card()
# "If both players have at least as many cards remaining in their deck as the value of the card they just drew,
# the winner of the round is determined by playing a new game of Recursive Combat."
if p1_card <= len(self.p1_deck.cards) and p2_card <= len(self.p2_deck.cards):
if VERBOSE:
print('Playing a sub-game to determine the winner...')
p1_new_game_cards = self.p1_deck.top_cards(top=p1_card)
p2_new_game_cards = self.p2_deck.top_cards(top=p2_card)
new_game = Combat(game=self.game + 1, p1_cards=p1_new_game_cards, p2_cards=p2_new_game_cards)
round_winner = new_game.winner
if VERBOSE:
print('...anyway, back to game', self.game)
elif p1_card > p2_card: # "The player with the higher-valued card wins the round."
round_winner = 1
else:
round_winner = 2
if round_winner == 1:
if VERBOSE:
print('Player 1 wins round', self.round, 'of game', self.game)
# "The winner keeps both cards, placing them on the bottom of their own deck so that the winner's card
# is above the other card."
self.p1_deck.card_on_bottom(p1_card)
self.p1_deck.card_on_bottom(p2_card)
else:
if VERBOSE:
print('Player 2 wins round', self.round, 'of game', self.game)
self.p2_deck.card_on_bottom(p2_card)
self.p2_deck.card_on_bottom(p1_card)
if VERBOSE:
print()
self.round += 1
# "If this causes a player to have all of the cards, they win, and the game ends."
if len(self.p1_deck.cards) == 0: # p1 has no cards left, so p2 wins.
if VERBOSE:
print('The winner of game', self.game, 'is player 2')
return 2
elif len(self.p2_deck.cards) == 0: # p2 has no cards left, so p1 wins.
if VERBOSE:
print('The winner of game', self.game, 'is player 1')
return 1
return 0 # 0 indicates no winner of the game during this round.
def calculate_winning_score(self) -> int:
"""Return score of winning deck."""
# "The bottom card in their deck is worth the value of the card multiplied by 1, the second-from-the-bottom
# card is worth the value of the card multiplied by 2, and so on."
if self.winner == 1:
cards = self.p1_deck.cards
else:
cards = self.p2_deck.cards
score = 0
multiplier = 1
for card in cards[::-1]: # Go through the winner's cards backwards.
score += card * multiplier
multiplier += 1
return score
def text_to_cards(text: str) -> list:
"""For parm text file, return a list of integers which are the cards in that text file."""
cards = []
# Each card starts on a new line. Ignore the first line, as it is the player number.
for card in text.split('\n')[1:]:
cards.append(int(card))
return cards
def main():
filename = sys.argv[1]
f = open(filename)
whole_text = f.read()
f.close()
p1_text, p2_text = whole_text.split('\n\n') # There is a blank line between the 2 players.
p1_cards_list = text_to_cards(p1_text)
p2_cards_list = text_to_cards(p2_text)
game = Combat(game=1, p1_cards=p1_cards_list, p2_cards=p2_cards_list)
print('== Post-game results ==')
game.p1_deck.display()
game.p2_deck.display()
print('Part 2:', game.calculate_winning_score())
if __name__ == "__main__":
main()
```
#### File: advent-of-code-2020/23-crab-cups/test_solution23_1.py
```python
from solution23_1 import Game
import unittest
class TestFunctions(unittest.TestCase):
def test_game_class(self):
g1 = Game('389125467')
self.assertEqual(g1.current, 3) # Current is first cup.
self.assertEqual(g1.pick_up_1_cup(), 8)
self.assertEqual(g1.cups, [3, 9, 1, 2, 5, 4, 6, 7])
g1.update_destination_cup()
self.assertEqual(g1.destination, 2)
g2 = Game('389125467')
g2.current = 7 # Force current to be last cup in the list.
self.assertEqual(g2.current, 7)
self.assertEqual(g2.pick_up_1_cup(), 3)
self.assertEqual(g2.cups, [8, 9, 1, 2, 5, 4, 6, 7])
g2.update_destination_cup()
self.assertEqual(g2.destination, 6)
g3 = Game('389125467')
g3.current = 2 # Force current to be a mid-cup.
self.assertEqual(g3.current, 2)
self.assertEqual(g3.pick_up_1_cup(), 5)
self.assertEqual(g3.cups, [3, 8, 9, 1, 2, 4, 6, 7])
g4 = Game('389125467')
self.assertEqual(g4.current, 3) # Current is first cup.
picked_up = g4.pick_up_cups(3)
self.assertEqual(picked_up, [8, 9, 1])
self.assertEqual(g4.cups, [3, 2, 5, 4, 6, 7])
g4.update_destination_cup()
self.assertEqual(g4.destination, 2)
g4.insert_cups(inserts=picked_up, insert_after=g4.destination)
self.assertEqual(g4.cups, [3, 2, 8, 9, 1, 5, 4, 6, 7])
g4.select_new_current_cup()
self.assertEqual(g4.current, 2)
picked_up = g4.pick_up_cups(3)
g4.update_destination_cup()
g4.insert_cups(inserts=picked_up, insert_after=g4.destination)
self.assertEqual(g4.cups, [3, 2, 5, 4, 6, 7, 8, 9, 1])
g4.select_new_current_cup()
self.assertEqual(g4.current, 5)
g5 = Game('389125467')
g5.current = 6 # Force current to be the penultimate cup.
self.assertEqual(g5.current, 6)
self.assertEqual(g5.pick_up_cups(3), [7, 3, 8])
self.assertEqual(g5.cups, [9, 1, 2, 5, 4, 6])
g6 = Game('689125437')
g6.current = 7
self.assertEqual(g6.current, 7)
picked_up = g6.pick_up_cups(3)
self.assertEqual(picked_up, [6, 8, 9])
self.assertEqual(g6.cups, [1, 2, 5, 4, 3, 7])
g6.update_destination_cup()
self.assertEqual(g6.destination, 5)
g6.insert_cups(inserts=picked_up, insert_after=g6.destination)
self.assertEqual(g6.cups, [1, 2, 5, 6, 8, 9, 4, 3, 7])
g6.select_new_current_cup()
self.assertEqual(g6.current, 1)
g7 = Game('685219437')
g7.current = 2
self.assertEqual(g7.current, 2)
picked_up = g7.pick_up_cups(3)
self.assertEqual(picked_up, [1, 9, 4])
self.assertEqual(g7.cups, [6, 8, 5, 2, 3, 7])
g7.update_destination_cup()
self.assertEqual(g7.destination, 8)
g7.insert_cups(inserts=picked_up, insert_after=g7.destination)
self.assertEqual(g7.cups, [6, 8, 1, 9, 4, 5, 2, 3, 7])
g7.select_new_current_cup()
self.assertEqual(g7.current, 3)
if __name__ == '__main__':
unittest.main()
```
#### File: advent-of-code-2020/24-lobby-layout/solution24.py
```python
import sys
import pygame
import imageio # For making animated GIFs.
VERBOSE = ('-v' in sys.argv)
class Floor:
def __init__(self, zoom: int):
# Define the colors we will use in RGB format.
self.white_tile_colour = (240, 240, 240)
self.black_tile_colour = (40, 40, 40)
self.grout_colour = (100, 100, 100)
self.text_background = (255, 255, 255)
self.text_colour = (0, 0, 0)
self.hex_to_cartesian = {'e': (2, 0),
'w': (-2, 0),
'se': (1, 2),
'sw': (-1, 2),
'ne': (1, -2),
'nw': (-1, -2)}
self.zoom = zoom # Lower zoom, means more tiles in the room.
self.screen_size = [1100, 800] # [width, height]
pygame.init() # Initialize the game engine.
self.screen = pygame.display.set_mode(self.screen_size)
pygame.font.init() # Start the Pygame text rendering system.
self.myfont = pygame.font.SysFont('Courier New', 30)
pygame.display.set_caption('Lobby Layout') # The window title.
self.black_tiles = set() # Each member is pair (x, y).
self.day = 0
def art_exhibit(self, days: int) -> bool:
"""Render image of the floor to screen. Then follow art exhibit flipping rules for parm number of days.
Returns a bool to indicate if the process was quit early."""
filenames = []
quiting = False
while not quiting and self.day < days:
for event in pygame.event.get(): # User did something.
if event.type == pygame.QUIT: # If user clicked close.
quiting = True # Flag that we are done so we exit this loop, and quit the game
self.draw_background() # Cover screen with white tiles.
for (x, y) in self.black_tiles:
self.draw_tile(x, y, 'B')
pygame.draw.rect(self.screen, self.text_background, (0, 0, 450, 40))
caption = 'Day ' + str(self.day) + ': ' + str(len(self.black_tiles)) + ' black tiles'
text_surface = self.myfont.render(caption, False, self.text_colour)
self.screen.blit(text_surface, (5, 5))
pygame.display.flip()
if self.day in [0, 100]:
pygame.image.save(self.screen, 'screenshots/day_' + str(self.day) + '.jpg')
if self.day <= 10:
screenshot_name = 'screenshots/screen' + format(self.day, '02') + '.png'
pygame.image.save(self.screen, screenshot_name)
filenames.append(screenshot_name)
if self.day == 10:
images = []
for filename in filenames:
images.append(imageio.imread(filename))
imageio.mimsave('solution_part2.gif', images, fps=1)
self.iterate()
pygame.quit()
return not quiting
def draw_background(self):
"""Tile the floor with default white tiles."""
for y in range(- self.screen_size[1] // self.zoom, self.screen_size[1] // self.zoom):
for x in range(- self.screen_size[0] // self.zoom, self.screen_size[0] // self.zoom):
self.draw_tile(x * 2, y * 4, 'W')
self.draw_tile(1 + x * 2, 2 + y * 4, 'W')
def draw_tile(self, x, y, colour: str):
"""Draw a tile on the screen."""
if colour == 'B':
tile_colour = self.black_tile_colour
else:
tile_colour = self.white_tile_colour
x_screen = self.screen_size[0] // 2 + x * self.zoom * 2
y_screen = self.screen_size[1] // 2 + y * self.zoom * 1.5
vertices = ((x_screen, y_screen),
(x_screen + 2 * self.zoom, y_screen + 1 * self.zoom),
(x_screen + 2 * self.zoom, y_screen + 3 * self.zoom),
(x_screen, y_screen + 4 * self.zoom),
(x_screen - 2 * self.zoom, y_screen + 3 * self.zoom),
(x_screen - 2 * self.zoom, y_screen + 1 * self.zoom),
(x_screen, y_screen))
# Draw the tile.
pygame.draw.polygon(surface=self.screen, color=tile_colour, points=vertices)
# Draw a grout border around the tile.
pygame.draw.polygon(surface=self.screen, color=self.grout_colour, points=vertices, width=1 + self.zoom // 5)
def flip(self, directions: list):
"""Follows the parm directions to identify a tile. Then flips that tile, from white to black (or vice versa)."""
x, y = 0, 0 # Start at the origin tile position.
for each_step in directions:
(dx, dy) = self.hex_to_cartesian[each_step]
x += dx
y += dy
if (x, y) in self.black_tiles:
self.black_tiles.remove((x, y))
else:
self.black_tiles.add((x, y))
def iterate(self):
"""Do an iteration of the art exhibit rules."""
# "The rules are applied simultaneously to every tile; put another way, it is first determined which tiles need
# to be flipped, then they are all flipped at the same time."
prev_black_tiles = self.black_tiles.copy()
self.black_tiles = set()
# Find a set of tiles that need to be checked. It is is every tile that either a black tile already, or
# a neighbor of a black tile. That is to say, white tiles that have no black neighbors don't need to be checked.
check = prev_black_tiles.copy()
for (x, y) in prev_black_tiles:
for dx, dy in self.hex_to_cartesian.values():
check.add((x + dx, y + dy))
# for range_y in range(- self.screen_size[1] // self.zoom, self.screen_size[1] // self.zoom):
# for range_x in range(- self.screen_size[0] // self.zoom, self.screen_size[0] // self.zoom):
# self.iteration_rule(prev_black_tiles, range_x * 2, range_y * 4)
# self.iteration_rule(prev_black_tiles, 1 + range_x * 2, 2 + range_y * 4)
for (x, y) in check:
self.iteration_rule(prev_black_tiles, x, y)
self.day += 1
def iteration_rule(self, previous_black_tiles: set, x, y):
"""Apply the iteration rule to parm tile position."""
adjacent = self.count_adjacent_blacks(previous_black_tiles, x, y)
# "Any black tile with zero or more than 2 black tiles immediately adjacent to it is flipped to white."
if (x, y) in previous_black_tiles and not (adjacent == 0 or adjacent > 2):
self.black_tiles.add((x, y))
# "Any white tile with exactly 2 black tiles immediately adjacent to it is flipped to black."
if (x, y) not in previous_black_tiles and adjacent == 2:
self.black_tiles.add((x, y))
def count_adjacent_blacks(self, previous_black_tiles: set, x, y) -> int:
"""Return the number of black tiles that are adjacent to the parm (x, y) tile."""
# "Here, tiles immediately adjacent means the six tiles directly touching the tile in question."
count = 0
for (dx, dy) in self.hex_to_cartesian.values():
if (x + dx, y + dy) in previous_black_tiles:
count += 1
return count
def text_to_directions(text: str) -> list:
"""Return a list of directions for parm string of codes."""
# "Because the tiles are hexagonal, every tile has six neighbors: east, southeast, southwest, west, northwest,
# and northeast. These directions are given in your list, respectively, as e, se, sw, w, nw, and ne.
# A tile is identified by a series of these directions with no delimiters; for example, esenee identifies the tile
# you land on if you start at the reference tile and then move one tile east, one tile southeast,
# one tile northeast, and one tile east."
directions = []
pos = 0
while pos < len(text):
if text[pos] in ['e', 'w']: # Single character directions.
directions.append(text[pos])
pos += 1
else: # Two character directions.
directions.append(text[pos:pos + 2])
pos += 2
return directions
def main():
filename = sys.argv[1]
zoom = int(sys.argv[2])
f = open(filename)
whole_text = f.read()
f.close()
if VERBOSE:
print('filename:', filename)
the_floor = Floor(zoom)
for each_instruction in whole_text.split('\n'):
directions = text_to_directions(each_instruction)
the_floor.flip(directions)
# "After all of the instructions have been followed, how many tiles are left with the black side up?"
print('Part 1:', len(the_floor.black_tiles))
if the_floor.art_exhibit(days=100):
print('Part 2:', len(the_floor.black_tiles))
if __name__ == "__main__":
main()
``` |
{
"source": "johntelforduk/advent-of-code-2021",
"score": 4
} |
#### File: advent-of-code-2021/04-giant-squid/solution4.py
```python
class Board:
def __init__(self, numbers: []):
"""Each board is made from parm list containing 5 strings of space separated numbers."""
self.completed = False
# def cartesian_to_grid(x, y): return y * 5 + x
# so...
# grid[0] is top left corner.
# grid[4] is top right corner.
# grid[12] is centre.
# grid[24] is bottom right.
self.grid = {}
g = 0
for row in numbers:
num_list = row.split(' ')
while len(num_list) > 0:
possible_num = num_list.pop(0)
if possible_num != '': # Sometimes there are double spaces between numbers.
self.grid[g] = possible_num
g += 1
def is_winner(self) -> bool:
"""Returns True if the board has either a completed row or a completed column."""
for i in range(5):
row_win, col_win = True, True # Assume both row & column about to be checked are winners.
for j in range(5):
if self.grid[i * 5 + j] != '*':
col_win = False
if self.grid[i + j * 5] != '*':
row_win = False
if row_win or col_win:
self.completed = True
return True
return False
def mark(self, drawn_number: str):
"""Mark off the parm drawn number on the board by setting it to a star."""
for g in self.grid:
if self.grid[g] == drawn_number:
self.grid[g] = '*'
def sum_unmarked(self) -> int:
"""Return the sum of the non-star numbers on the grid."""
total = 0
for g in self.grid:
if self.grid[g] != '*':
total += int(self.grid[g])
return total
f = open('input.txt')
t = f.read()
f.close()
r = t.split('\n')
selections = r.pop(0).split(',') # List of number selections, as list if strings.
boards = []
while len(r) > 0:
r.pop(0) # Discard the blank line between each board.
five_strings = [] # Each board is created from a list of 5 strings.
while len(five_strings) < 5:
five_strings.append(r.pop(0))
boards.append(Board(five_strings))
winner_count = 0 # No winners at the start.
while winner_count < len(boards): # Loop until all boards completed.
drawn = selections.pop(0)
for b in boards:
if b.completed is False:
b.mark(drawn)
if b.is_winner():
winner_count += 1
if winner_count == 1 or winner_count == len(boards):
print(b.sum_unmarked() * int(drawn))
```
#### File: advent-of-code-2021/12-passage-pathing/solution12_1.py
```python
def search(current_cave: str, visited: list):
global all_routes
visited = visited.copy()
visited.append(current_cave)
if current_cave == 'end':
all_routes.append(visited)
return
for next_vertex in caves[current_cave]:
if not next_vertex.islower() or next_vertex not in visited:
search(current_cave=next_vertex, visited=visited)
f = open('input.txt')
t = f.read()
f.close()
caves = {}
for line in t.split('\n'):
a, b = line.split('-')
# Store from a -> b.
if a in caves:
caves[a].append(b)
else:
caves[a] = [b]
# Also store from b -> a.
if b in caves:
caves[b].append(a)
else:
caves[b] = [a]
print(caves)
all_routes = []
search(current_cave='start', visited=[])
print(all_routes)
print(len(all_routes))
```
#### File: advent-of-code-2021/13-transparent-origami/solution13.py
```python
f = open('input.txt')
t = f.read()
f.close()
raw_dots, raw_folds = t.split('\n\n')
dots = set()
def print_dots():
for dy in range(max_y + 1):
for dx in range(max_x + 1):
if (dx, dy) in dots:
print('#', end='')
else:
print('.', end='')
print()
print()
for raw_dot in raw_dots.split('\n'):
raw_x, raw_y = raw_dot.split(',')
dots.add((int(raw_x), int(raw_y)))
max_x, max_y = 0, 0
for px, py in dots:
max_x, max_y = max(px, max_x), max(py, max_y)
print(len(dots))
for instruction in raw_folds.split('\n'):
raw_axis, raw_position = instruction.split('=')
axis = raw_axis[-1]
position = int(raw_position)
new_dots = set()
for x, y in dots:
if axis == 'x':
if x < position:
new_dots.add((x, y))
elif x > position:
new_dots.add((2 * position - x, y))
max_x = position - 1
else:
if y < position:
new_dots.add((x, y))
elif y > position:
new_dots.add((x, 2 * position - y))
max_y = position - 1
dots = new_dots.copy()
print(len(dots))
print_dots()
```
#### File: advent-of-code-2021/14-extended-polymerization/solution14.py
```python
from collections import Counter
def str_to_pairs(poly: str) -> dict:
"""For the parm string, return a dictionary of counts of adjacent paris of characters.
For example 'AAAB, returns {'AA': 2, AB: 1}."""
pairs = {}
for i in range(len(poly) - 1):
pair = poly[i:i+2]
if pair in pairs:
pairs[pair] += 1
else:
pairs[pair] = 1
return pairs
def base_to_pair_rules(base_rules: dict) -> dict:
"""For dictionary of base rules, like {'CH': 'B', 'HH': 'N'} return a dictionary of pair rules. These have same
keys as base rules, but the value is list of pairs.
For example, {'CH': 'B''} parm returns {'CH': ['CB', 'BH']}"""
pair_rules = {}
for pair in base_rules:
new_element = base_rules[pair]
pair_rules[pair] = [pair[0] + new_element, new_element + pair[1]]
return pair_rules
def next_step(poly_pairs: dict, pair_rules: dict) -> dict:
"""For parm dictionary of polymer pair counts, and transformation rules. Return the output of this transformation
step.
For example:
poly_pairs={'NN': 1, 'NC': 2, 'CB': 3}
pair_rule={'NN': ['NC', 'CN'], 'NC': ['NB', 'BC'], 'CB': ['CH', 'HB']
Returns: {'NC': 1, 'CN': 1, 'NB': 2, 'BC': 2, 'CH': 3, 'HB': 3}"""
pairs = {}
for pair in poly_pairs:
for new_pair in pair_rules[pair]:
if new_pair in pairs:
pairs[new_pair] += poly_pairs[pair]
else:
pairs[new_pair] = poly_pairs[pair]
return pairs
def pairs_to_counts(original_poly: str, poly_pairs: dict) -> dict:
"""For parm dictionary of polymer pairs, return dictionary of counts of each elements letter.
Most letters are 'double counted'. For example, consider the polymer 'ABC'. 'B' is in the pairs 'AB' and 'BC'.
Except the first and last letters of the original polymer. In this example, 'A' is in 'AB' only. So count all the
letters, add 1 extra to count for first and last letters of original polymer, then divide all counts by 2."""
counts = {}
for pair in poly_pairs:
for element in pair:
if element in counts:
counts[element] += poly_pairs[pair]
else:
counts[element] = poly_pairs[pair]
# Add 1 extra to the count for the first and last elements in the original polymer, as these are the only ones
# that are not double counted.
for element in [original_poly[0], original_poly[-1]]:
if element in counts:
counts[element] += 1
else:
counts[element] += 1
# Finally, divide everything by 2 - as every element is double counted.
adjusted_counts = {}
for element in counts:
adjusted_counts[element] = counts[element] // 2
return adjusted_counts
f = open('input.txt')
t = f.read()
f.close()
raw_polymer, raw_rules = t.split('\n\n')
base_rules = {}
for r in raw_rules.split('\n'):
pair, result = r.split(' -> ')
base_rules[pair] = result
assert str_to_pairs('NNCB') == {'NN': 1, 'NC': 1, 'CB': 1}
assert str_to_pairs('AAAB') == {'AA': 2, 'AB': 1}
print(str_to_pairs('ABCDEF'))
print(Counter('ABCDEF').most_common())
pairs_dict = str_to_pairs('ABCDEF')
print(pairs_to_counts(original_poly='AXF', poly_pairs=pairs_dict))
assert base_to_pair_rules({'CH': 'B'}) == {'CH': ['CB', 'BH']}
assert base_to_pair_rules({'CH': 'B', 'HH': 'N'}) == {'CH': ['CB', 'BH'], 'HH': ['HN', 'NH']}
polymer = str_to_pairs(raw_polymer)
print('raw_polymer, polymer:', raw_polymer, polymer)
print('base_rules:', base_rules)
pair_rules = base_to_pair_rules(base_rules)
print('pair_rules:', pair_rules)
# assert (next_step(poly_pairs={'NN': 1, 'NC': 2, 'CB': 3}, pair_rules=pair_rules)) == {
# 'NC': 1, 'CN': 1, 'NB': 2, 'BC': 2, 'CH': 3, 'HB': 3}
print()
print(0, polymer)
print(pairs_to_counts(original_poly=raw_polymer, poly_pairs=polymer))
for step in range(1, 40 + 1):
new_polymer = next_step(polymer, pair_rules)
polymer = new_polymer.copy()
print()
print(step, polymer)
print(pairs_to_counts(original_poly=raw_polymer, poly_pairs=polymer))
# Find the smallest and largest counts.
counts = pairs_to_counts(original_poly=raw_polymer, poly_pairs=polymer)
min_count, max_count = None, None
for element in counts:
if min_count is None:
min_count = counts[element]
max_count = counts[element]
else:
min_count = min(counts[element], min_count)
max_count = max(counts[element], max_count)
print(max_count - min_count)
```
#### File: advent-of-code-2021/15-chiton/solution15_1.py
```python
def find_least_risk(current: tuple, goal: tuple, visited: list, total_risk: int):
global best
# This search is getting too circuitous, so best to give up.
# Why 220? Its greater than cavern_x + cavern_y which is length of shortest routes to goal (no backtracking).
# The higher this number is, the longer the search takes. 220 is big enough to produce correct answer.
# Greater than about 950, and Python recursion stack is likely to overflow.
if len(visited) > 220:
return
# We've gone outside of the boundaries of the cavern.
if current not in cavern:
return
# Abandon paths that visit a coordinate that we've been to before in this search.
if current in visited:
return
total_risk += cavern[current]
# Abandon paths that have greater total risk than previous best found.
if current in best and best[current] <= total_risk:
return
# Exciting times... this is the best path to this coordinate so far!
best[current] = total_risk
# Great! We've reached the end.
if current == goal:
print(best[current])
return
# Let's continue the search.
new_visited = visited.copy()
new_visited.append(current)
x, y = current
for dx, dy in [(1, 0), (0, 1), (-1, 0), (0, -1)]: # Search West, East, North and South.
find_least_risk((x + dx, y + dy), goal, new_visited, total_risk)
cavern, best = {}, {}
f = open('input.txt')
t = f.read()
f.close()
cavern_x, cavern_y = 0, 0
for row in t.split('\n'):
cavern_x = 0
for risk in row:
cavern[cavern_x, cavern_y] = int(risk)
cavern_x += 1
cavern_y += 1
# Fix a gatepost error.
cavern_x -= 1
cavern_y -= 1
cavern[(0, 0)] = 0
find_least_risk(current=(0, 0), goal=(cavern_x, cavern_y), visited=[], total_risk=0)
```
#### File: advent-of-code-2021/16-packet-decoder/solution16.py
```python
def slicer(s: str, slice_pos: int) -> tuple:
"""Return a tuple of strings which are the parm string cleaved at its parm slice position."""
return s[0:slice_pos], s[slice_pos:]
def decode(bits: str) -> (str, int):
global VERSION_SUM
print()
print('parsing new packets')
print('bits:', bits)
# Every packet begins with a standard header: the first three bits encode the packet version.
packet_version_raw, bits = slicer(bits, 3)
packet_version = int(packet_version_raw, 2)
print('packet_version:', packet_version)
VERSION_SUM += packet_version
# ... and the next three bits encode the packet type ID.
packet_type_id_raw, bits = slicer(bits, 3)
packet_type_id = int(packet_type_id_raw, 2)
print('packet_type_id:', packet_type_id)
# Packets with type ID 4 represent a literal value.
if packet_type_id == 4:
print('parsing a literal')
literal_value_raw = ''
continue_ind = '1'
while continue_ind == '1':
five_bits, bits = slicer(bits, 5)
continue_ind, num_bits_raw = slicer(five_bits, 1)
literal_value_raw += num_bits_raw
print(' literal_value_raw:', literal_value_raw)
literal_value = int(literal_value_raw, 2)
print(' literal_value:', literal_value)
return bits, literal_value
# # The three unlabeled 0 bits at the end are extra due to the hexadecimal representation and should be ignored.
# _, bits = slicer(bits, 3)
# Every other type of packet (any packet with a type ID other than 4) represent an operator...
else:
length_type_id, bits = slicer(bits, 1)
operands = []
# If the length type ID is 0, then the next 15 bits are a number that represents the total length in bits of the
# sub-packets contained by this packet.
if length_type_id == '0':
sub_packet_length_raw, bits = slicer(bits, 15)
sub_packet_length = int(sub_packet_length_raw, 2)
print('sub_packet_length:', sub_packet_length)
sub_packet, bits = slicer(bits, sub_packet_length)
print('sub_packet:', sub_packet)
while len(sub_packet) > 0:
sub_packet, number = decode(sub_packet)
operands.append(number)
# If the length type ID is 1, then the next 11 bits are a number that represents the number of sub-packets
# immediately contained by this packet.
else:
assert length_type_id == '1'
num_of_sub_packets_raw, bits = slicer(bits, 11)
num_of_sub_packets = int(num_of_sub_packets_raw, 2)
print('num_of_sub_packets:', num_of_sub_packets)
for sub_packet_iterations in range(num_of_sub_packets):
bits, number = decode(bits)
operands.append(number)
print('operands:', operands)
# Packets with type ID 0 are sum packets - their value is the sum of the values of their sub-packets. If they
# only have a single sub-packet, their value is the value of the sub-packet.
if packet_type_id == 0:
return bits, sum(operands)
# Packets with type ID 1 are product packets - their value is the result of multiplying together the values of
# their sub-packets. If they only have a single sub-packet, their value is the value of the sub-packet.
elif packet_type_id == 1:
product = 1
for term in operands:
product *= term
return bits, product
# Packets with type ID 2 are minimum packets - their value is the minimum of the values of their sub-packets.
elif packet_type_id == 2:
return bits, min(operands)
# Packets with type ID 3 are maximum packets - their value is the maximum of the values of their sub-packets.
if packet_type_id == 3:
return bits, max(operands)
# Packets with type ID 5 are greater than packets - their value is 1 if the value of the first sub-packet is
# greater than the value of the second sub-packet; otherwise, their value is 0. These packets always have
# exactly two sub-packets.
if packet_type_id == 5:
if operands[0] > operands[1]:
return bits, 1
else:
return bits, 0
# Packets with type ID 6 are less than packets - their value is 1 if the value of the first sub-packet is less
# than the value of the second sub-packet; otherwise, their value is 0. These packets always have exactly two
# sub-packets.
if packet_type_id == 6:
if operands[0] < operands[1]:
return bits, 1
else:
return bits, 0
# Packets with type ID 7 are equal to packets - their value is 1 if the value of the first sub-packet is equal
# to the value of the second sub-packet; otherwise, their value is 0. These packets always have exactly two
# sub-packets.
if packet_type_id == 7:
if operands[0] == operands[1]:
return bits, 1
else:
return bits, 0
return bits, 0
def decode_hex(h: str) -> int:
h_to_b = {'0': '0000', '1': '0001', '2': '0010', '3': '0011', '4': '0100', '5': '0101', '6': '0110', '7': '0111',
'8': '1000', '9': '1001', 'A': '1010', 'B': '1011', 'C': '1100', 'D': '1101', 'E': '1110', 'F': '1111'}
start_bits = ''
for digit in h:
start_bits += h_to_b[digit]
# Discard the leftover, un-parsed string.
_, result = decode(start_bits)
return result
assert slicer(s='123456789', slice_pos=2) == ('12', '3456789')
VERSION_SUM = 0
decode_hex('8A004A801A8002F478')
assert VERSION_SUM == 16
VERSION_SUM = 0
decode_hex('620080001611562C8802118E34')
assert VERSION_SUM == 12
VERSION_SUM = 0
decode_hex('C0015000016115A2E0802F182340')
assert VERSION_SUM == 23
VERSION_SUM = 0
decode_hex('A0016C880162017C3686B18A3D4780')
assert VERSION_SUM == 31
assert decode_hex('D2FE28') == 2021
assert decode_hex('C200B40A82') == 3
assert decode_hex('04005AC33890') == 54
assert decode_hex('880086C3E88112') == 7
assert decode_hex('CE00C43D881120') == 9
assert decode_hex('D8005AC2A8F0') == 1
assert decode_hex('F600BC2D8F') == 0
assert decode_hex('9C005AC2F8F0') == 0
assert decode_hex('9C0141080250320F1802104A08') == 1
f = open('input.txt')
t = f.read()
f.close()
VERSION_SUM = 0
part2 = decode_hex(t)
print('Part 1:', VERSION_SUM)
print('Part 2:', part2)
```
#### File: advent-of-code-2021/18-snailfish/solution18.py
```python
import json
from itertools import permutations
def str_to_list(s: str) -> list:
"""For the parm list, return the equivalent string."""
if s.count('[') <= 1:
# print(s)
return json.loads(s)
# Strip off outer pair of square brackets.
inner = s[1:-1]
elements = []
depth = 0
this_element = ''
for c in inner:
if c == ',' and depth == 0:
elements.append(str_to_list(this_element))
this_element = ''
else:
this_element += c
if c == '[':
depth += 1
if c == ']':
depth -= 1
elements.append(str_to_list(this_element))
return elements
def list_to_str(parm_list: list) -> str:
return (str(parm_list)).replace(' ', '')
def first_num(s: str) -> (int, int):
"""For parm string, return the start and end positions of the first number in the string."""
start_num, end_num = None, None
for i in range(len(s)):
if s[i].isdigit():
if start_num is None:
start_num = i
else: # Not a digit.
if start_num is not None and end_num is None:
end_num = i - 1 # Gatepost!
# Deal with all digits string case.
if start_num is not None and end_num is None:
end_num = len(s) - 1
return start_num, end_num
def last_num(s: str) -> (int, int):
"""For parm string, return the start and end positions of the last number in the string."""
start, end = first_num(s[::-1]) # Obtain position of first number in reversed string.
if start is None:
return None, None
return len(s) - end - 1, len(s) - start - 1
def first_10_or_more(s: str) -> (int, int):
start_num, end_num = None, None
for i in range(len(s)):
if s[i].isdigit():
if start_num is None:
start_num = i
else: # Not a digit.
if start_num is not None and end_num is None:
end_num = i - 1 # Gatepost!
if start_num == end_num: # 1 digit number, so restart the search.
start_num, end_num = None, None
# Deal with all digits string case.
if start_num is not None and end_num is None:
end_num = len(s) - 1
if start_num == end_num: # 1 digit number.
return None, None
return start_num, end_num
def begin_explode(num, depth: int) -> (list, int, int):
if type(num) == int:
return num, None, None
if depth == 4:
left = num[0]
right = num[1]
return 'X', left, right
left = num[0]
right = num[1]
stop_exploding = False
inside_left, add_left, add_right = begin_explode(left, depth + 1)
if add_left is not None or add_right is not None:
stop_exploding = True
inside_right = right
if not stop_exploding:
inside_right, add_left, add_right = begin_explode(right, depth + 1)
return [inside_left, inside_right], add_left, add_right
def add_to_first_num(s: str, add_on: int) -> str:
start, end = first_num(s)
if start is None:
return s
before = s[0:start]
num = str(int(s[start:end + 1]) + add_on)
after = s[end + 1:]
return before + num + after
def add_to_last_num(s: str, add_on: int) -> str:
start, end = last_num(s)
if start is None:
return s
before = s[0:start]
num = str(int(s[start:end + 1]) + add_on)
after = s[end + 1:]
return before + num + after
def explode(s: str) -> str:
result, add_left, add_right = begin_explode(str_to_list(s), 0)
# print(result, add_left, add_right)
result_str = list_to_str(result)
if 'X' not in result_str:
return s
left, right = result_str.split("'X'")
new_left = add_to_last_num(left, add_left)
new_right = add_to_first_num(right, add_right)
return new_left + '0' + new_right
def split(s: str) -> str:
start, end = first_10_or_more(s)
if start is None:
return s
before = s[0:start]
num = int(s[start:end + 1])
after = s[end + 1:]
return before + '[' + str(num // 2) + ',' + str(round((num + 0.1) / 2)) + ']' + after
def reduce(s: str) -> str:
new_str = explode(s)
if new_str != s:
return reduce(new_str)
else: # Explode had no effect, so lets try a split.
new_str = split(s)
if new_str != s:
return reduce(new_str)
return s
def add(s1: str, s2: str) -> str:
return reduce('[' + s1 + ',' + s2 + ']')
def magnitude(num) -> int:
if type(num) == int:
return num
left = num[0]
right = num[1]
return 3 * magnitude(left) + 2 * magnitude(right)
assert str_to_list('[1,2]') == [1, 2]
assert str_to_list('[[1,2],3]') == [[1, 2], 3]
assert str_to_list('[9,[8,7]]') == [9, [8, 7]]
assert str_to_list('[[1,9],[8,5]]') == [[1, 9], [8, 5]]
assert str_to_list('[[[[1,2],[3,4]],[[5,6],[7,8]]],9]') == [[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], 9]
assert str_to_list('[[[9,[3,8]],[[0,9],6]],[[[3,7],[4,9]],3]]') == [[[9, [3, 8]], [[0, 9], 6]], [[[3, 7], [4, 9]], 3]]
assert str_to_list('[[[[1,3],[5,3]],[[1,3],[8,7]]],[[[4,9],[6,9]],[[8,2],[7,3]]]]') == [[[[1, 3], [5, 3]], [[1, 3], [8, 7]]], [[[4, 9], [6, 9]], [[8, 2], [7, 3]]]]
assert list_to_str([1, 2]) == '[1,2]'
assert list_to_str([[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], 9]) == '[[[[1,2],[3,4]],[[5,6],[7,8]]],9]'
assert first_num(',4],4],[7,[[8,4],9]]],[1,1]]') == (1, 1)
assert first_num('abcde123zse345fg') == (5, 7)
assert first_num('1234') == (0, 3)
assert first_num('abcdefg') == (None, None)
assert first_num('') == (None, None)
assert last_num('1234') == (0, 3)
assert last_num('abcde123ss10a') == (10, 11)
assert last_num('dddffes') == (None, None)
assert last_num('') == (None, None)
assert add_to_first_num('abcde123zse345fg', 10) == 'abcde133zse345fg'
assert add_to_first_num('abcdezsefg', 10) == 'abcdezsefg'
assert add_to_first_num('abcd0eert', 12) == 'abcd12eert'
assert add_to_last_num('abcde123zse345fg', 10) == 'abcde123zse355fg'
assert add_to_last_num('abcdezsefg', 10) == 'abcdezsefg'
assert add_to_last_num('abcd0eert', 12) == 'abcd12eert'
assert explode('[[[[[9,8],1],2],3],4]') == '[[[[0,9],2],3],4]'
assert explode('[[[[[4,3],4],4],[7,[[8,4],9]]],[1,1]]') == '[[[[0,7],4],[7,[[8,4],9]]],[1,1]]'
assert explode('[[[[0,7],4],[7,[[8,4],9]]],[1,1]]') == '[[[[0,7],4],[15,[0,13]]],[1,1]]'
assert first_10_or_more('12345efg') == (0, 4)
assert first_10_or_more('a3bc100efg') == (4, 6)
assert first_10_or_more('123') == (0, 2)
assert first_10_or_more('1') == (None, None)
assert first_10_or_more('dfgsdsgsdf') == (None, None)
assert first_10_or_more('4dfdf3dfdfk9dffff0') == (None, None)
assert split('10') == '[5,5]'
assert split('11') == '[5,6]'
assert split('12') == '[6,6]'
assert split('[[[[0,7],4],[15,[0,13]]],[1,1]]') == '[[[[0,7],4],[[7,8],[0,13]]],[1,1]]'
assert split('[[[[0,7],4],[[7,8],[0,13]]],[1,1]]') == '[[[[0,7],4],[[7,8],[0,[6,7]]]],[1,1]]'
assert split('[[[[0,7],4],[[7,8],[0,13]]],[1,21]]') == '[[[[0,7],4],[[7,8],[0,[6,7]]]],[1,21]]'
assert reduce('[[[[[4,3],4],4],[7,[[8,4],9]]],[1,1]]') == '[[[[0,7],4],[[7,8],[6,0]]],[8,1]]'
assert add('[1,2]', '[[3,4],5]') == '[[1,2],[[3,4],5]]'
assert add('[[[[4,3],4],4],[7,[[8,4],9]]]', '[1,1]') == reduce('[[[[[4,3],4],4],[7,[[8,4],9]]],[1,1]]')
assert add('[[[0,[4,5]],[0,0]],[[[4,5],[2,6]],[9,5]]]', '[7,[[[3,7],[4,3]],[[6,3],[8,8]]]]') == '[[[[4,0],[5,4]],[[7,7],[6,0]]],[[8,[7,7]],[[7,9],[5,0]]]]'
assert add('[[[[4,0],[5,4]],[[7,7],[6,0]]],[[8,[7,7]],[[7,9],[5,0]]]]', '[[2,[[0,8],[3,4]]],[[[6,7],1],[7,[1,6]]]]') == '[[[[6,7],[6,7]],[[7,7],[0,7]]],[[[8,7],[7,7]],[[8,8],[8,0]]]]'
assert magnitude([9, 1]) == 29
assert magnitude([[9, 1], [1, 9]]) == 129
assert magnitude([[1, 2], [[3, 4], 5]]) == 143
assert magnitude([[[[8, 7], [7, 7]], [[8, 6], [7, 7]]], [[[0, 7], [6, 6]], [8, 7]]]) == 3488
f = open('input.txt')
t = f.read()
f.close()
numbers = []
so_far = None
for line in t.split('\n'):
numbers.append(line)
# print(so_far, line)
if so_far is None:
so_far = line
else:
so_far = add(so_far, line)
print('Part 1:', magnitude(str_to_list(so_far)))
largest = 0
for n1, n2 in permutations(numbers, 2):
m = magnitude(str_to_list(add(n1, n2)))
largest = max(largest, m)
print('Part 2:', largest)
```
#### File: advent-of-code-2021/21-dirac-dice/solution21_1.py
```python
class Die:
def __init__(self):
self.rolls = 0 # Number of times the dies has been rolled.
self.value = None # Value the die returned last time it was rolled.
def roll(self):
if self.value is None: # This die always rolls 1 first.
self.value = 1
else:
self.value += 1
if self.value >= 101:
self.value = 1 # ... and so on up to 100, after which it starts over at 1 again.
self.rolls += 1
class Player:
def __init__(self, start: int):
self.space = start # Current space the player is on.
self.score = 0
self.winner = False
class Game:
def __init__(self, p1_start: int, p2_start: int):
self.players = [Player(p1_start), Player(p2_start)]
self.die = Die()
def one_roll(self, player_num: int):
curr_space = self.players[player_num].space
self.die.roll()
new_space = (curr_space - 1 + self.die.value) % 10 + 1
self.players[player_num].space = new_space
def turn(self, player_num: int):
for rolls in range(3):
self.one_roll(player_num)
self.players[player_num].score += self.players[player_num].space
if self.players[player_num].score >= 1000:
self.players[player_num].winner = True
test_die = Die()
assert test_die.rolls == 0
test_die.roll()
assert test_die.value == 1
assert test_die.rolls == 1
for i in range(100):
test_die.roll()
assert test_die.value == 1
assert test_die.rolls == 101
test_game = Game(4, 8)
assert test_game.die.rolls == 0
assert test_game.die.value is None
assert test_game.players[0].space == 4
assert test_game.players[1].space == 8
test_game.one_roll(player_num=0)
assert test_game.players[0].space == 5
test_game.one_roll(player_num=0)
test_game.one_roll(player_num=0)
assert test_game.players[0].space == 10
test_game.turn(1)
assert test_game.players[1].space == 3
assert test_game.players[1].score == 3
test_game.one_roll(player_num=0)
test_game.one_roll(player_num=0)
test_game.one_roll(player_num=0)
assert test_game.players[0].space == 4
# ----
my_game = Game(10, 6)
curr_player = 0
game_over = False
while not game_over:
my_game.turn(curr_player)
game_over = my_game.players[curr_player].winner
curr_player = (curr_player + 1) % 2
print(my_game.die.rolls)
print(my_game.players[curr_player].score)
print(my_game.die.rolls * my_game.players[curr_player].score)
```
#### File: advent-of-code-2021/23-amphipod/solution23_1.py
```python
import queue
def manhattan(t1b: tuple, t2b: tuple) -> int:
"""For parm pair of coordinate tuples, each (x, y). Return the Manhattan distance between them."""
t1x, t1y = t1b
t2x, t2y = t2b
return abs(t1x - t2x) + abs(t1y - t2y)
def distance(origin: (int, int), destination: (int, int), positions: dict):
species = positions[origin]
energy_per_step = {'A': 1, 'B': 10, 'C': 100, 'D': 1000}[species]
return energy_per_step * manhattan(origin, destination)
def dict_hash(di: dict):
# Based on one of the answers here, https://stackoverflow.com/questions/5884066/hashing-a-dictionary
return hash(frozenset(di.items()))
def home_column(species: str) -> int:
return {'A': 3, 'B': 5, 'C': 7, 'D': 9}[species]
def in_hallway(position: (int, int)) -> bool:
_, row = position
return row == 1
def other_species(species: str) -> list:
return {'A': ['B', 'C', 'D'],
'B': ['A', 'C', 'D'],
'C': ['A', 'B', 'D'],
'D': ['A', 'B', 'C']}[species]
def print_burrow(creatures: dict):
graphic = """#############
#...........#
###.#.#.#.###
#.#.#.#.#
#########"""
x, y = 0, 0
for g in graphic:
if (x, y) in creatures:
print(creatures[x, y], end='')
else:
print(g, end='')
x += 1
if g == '\n':
x = 0
y += 1
print()
def str_to_creatures(s: str) -> dict:
creatures = {}
x, y = 0, 0
for c in s:
if c in ['A', 'B', 'C', 'D']:
creatures[(x, y)] = c
x += 1
if c == '\n':
x = 0
y += 1
return creatures
def currently_reachable(position: (int, int), creature_positions: dict, found_so_far: list) -> list:
"""For the creature at parm position. Return list of all possible places it can reach right now."""
# No move possible.
if len(edges[position]) == 0:
return []
for candidate_pos in edges[position]:
# Check no creature in the candidate position.
# Also check it is not a position that we've found already.
if candidate_pos not in creature_positions and candidate_pos not in found_so_far:
found_so_far.append(candidate_pos)
currently_reachable(candidate_pos, creature_positions, found_so_far)
return found_so_far
def possible_moves(position: (int, int), all_positions: dict) -> list:
"""For the creature at parm position. Return a list of its possible next moves."""
candidates_moves = currently_reachable(position, all_positions, [])
valid_moves = []
if in_hallway(position):
species = all_positions[position]
home_col = home_column(species)
for this_candidate in candidates_moves:
(x, y) = this_candidate
if x == home_col: # Will only move from hallway to its own home column.
# Check there no creatures of other species in the home column.
all_my_species = True
for check_y in range(2, 3 + 1):
if (x, check_y) in all_positions:
if all_positions[x, check_y] in other_species(species):
all_my_species = False
if all_my_species:
# Check row is first unoccupied one.
first_unoccupied = None
for check_y in range(3, 2 - 1, -1):
if first_unoccupied is None and (x, check_y) not in all_positions:
first_unoccupied = check_y
if y == first_unoccupied:
valid_moves.append(this_candidate)
else: # In a home. So only places to go are hallways.
for this_candidate in candidates_moves:
if in_hallway(this_candidate):
valid_moves.append(this_candidate)
return valid_moves
# (1, 1) (2, 1) (4, 1) (6, 1) (8, 1) (10, 1) (11, 1)
# (3, 2) (5, 2) (7, 2) (9, 2)
# (3, 3) (5, 3) (7, 3) (9, 3)
# (3, 4) (5, 4) (7, 4) (9, 4)
# (3, 5) (5, 5) (7, 5) (9, 5)
all_home_hash = dict_hash({(3, 2): 'A', (3,3): 'A', (5,2): 'B', (5,3): 'B', (7, 2): 'C', (7, 3): 'C', (9, 2): 'D', (9, 3): 'D'})
# hallway = [(1, 1), (2, 1), (4, 1), (6, 1), (8, 1), (10, 1), (11, 1)]
edges_1_way = [(1, 1, 2, 1), (2, 1, 4, 1), (4, 1, 6, 1), (6, 1, 8, 1), (8, 1, 10, 1), (10, 1, 11, 1),
(2, 1, 3, 2), (4, 1, 3, 2), (4, 1, 5, 2), (6, 1, 5, 2), (6, 1, 7, 2), (8, 1, 7, 2),
(8, 1, 9, 2), (10, 1, 9, 2),
(3, 2, 3, 3), (5, 2, 5, 3), (7, 2, 7, 3), (9, 2, 9, 3)]
# All edges are bi-directional. So work out the opposite directions.
edges_2_ways = edges_1_way.copy()
for xo, yo, xd, yd in edges_1_way:
edges_2_ways.append((xd, yd, xo, yo))
# k = Origin coordinates (x, y).
# v = List of possible destinations from the origin, each (x, y).
edges = {}
for (xo, yo, xd, yd) in edges_2_ways:
if (xo, yo) in edges:
destinations = edges[(xo, yo)]
destinations.append((xd, yd))
else:
edges[(xo, yo)] = [(xd, yd)]
home_diagram = """#############
#...........#
###B#C#B#D###
#A#D#C#A#
#########"""
home_creatures = str_to_creatures(home_diagram)
print_burrow(home_creatures)
print((3, 2), home_creatures[(3, 2)], possible_moves((3,2), home_creatures))
print((3, 3), home_creatures[(3, 3)], possible_moves((3, 3), home_creatures))
print()
assert manhattan((3, 2), (2, 1)) == 2
assert manhattan((2, 1), (3, 2)) == 2
assert manhattan((11, 1), (10, 1)) == 1
assert manhattan((3, 2), (3, 3)) == 1
assert distance((3, 2), (1, 1), home_creatures) == 30 # 'B' moves 3 squares.
assert distance((9, 2), (1, 1), home_creatures) == 9000 # 'D' moves 9 squares.
assert dict_hash({(3, 2): 'A', (3, 3): 'B'}) == dict_hash({(3, 2): 'A', (3, 3): 'B'}) # Not random.
assert dict_hash({(3, 2): 'A', (3, 3): 'B'}) == dict_hash({(3, 3): 'B', (3, 2): 'A'}) # Not random.
assert dict_hash({(3, 2): 'A', (3, 3): 'B'}) != dict_hash({(3, 2): 'A', (3, 4): 'B'}) # Change a key.
assert dict_hash({(3, 2): 'A', (3, 3): 'B'}) != dict_hash({(3, 2): 'A', (3, 3): 'C'}) # Change a value.
assert home_column('A') == 3
assert home_column('D') == 9
assert in_hallway((1, 1)) is True
assert in_hallway((3, 2)) is False
assert other_species('A') == ['B', 'C', 'D']
assert other_species('D') == ['A', 'B', 'C']
diagram2 = """#############
#.....B.....#
###B#C#.#D###
#A#D#C#A#
#########"""
creatures2 = str_to_creatures(diagram2)
print_burrow(creatures2)
print((3, 2), creatures2[(3, 2)], possible_moves((3,2), creatures2))
print((6, 1), creatures2[(6, 1)], possible_moves((6, 1), creatures2))
print()
diagram3 = """#############
#.....B...C.#
###B#C#.#D###
#A#D#.#A#
#########"""
creatures3 = str_to_creatures(diagram3)
print_burrow(creatures3)
print((9, 2), creatures3[(9, 2)], possible_moves((9, 2), creatures3))
print((10, 1), creatures3[(10, 1)], possible_moves((10, 1), creatures3))
print((6, 1), creatures3[(6, 1)], possible_moves((6, 1), creatures3))
print()
diagram4 = """#############
#...B.B...C.#
###.#C#.#D###
#A#D#.#A#
#########"""
creatures4 = str_to_creatures(diagram4)
print_burrow(creatures4)
print((5, 2), creatures4[(5, 2)], possible_moves((5, 2), creatures4))
print()
diagram5 = """#############
#C.........D#
###B#B#.#.###
#A#D#C#A#
#########"""
creatures5 = str_to_creatures(diagram5)
print_burrow(creatures5)
print((1, 1), creatures5[(1, 1)], possible_moves((1, 1), creatures5))
print((11, 1), creatures5[(11, 1)], possible_moves((11, 1), creatures5))
print()
f = open('input.txt')
t = f.read()
f.close()
amphipods = str_to_creatures(t)
source_hash = dict_hash(amphipods)
# self.graph[source_hash] = self.amphipods
pq = queue.PriorityQueue()
unfinished = set()
discovered = {}
dist = {}
# for v_hash in self.graph:
# dist[v_hash] = sys.maxsize
#
# if v_hash != source_hash:
# pq.put((dist[v_hash], v_hash))
# unfinished.add(v_hash)
# dist[source] ← 0
dist[source_hash] = 0
unfinished.add(source_hash)
discovered[source_hash] = amphipods
pq.put((dist[source_hash], source_hash))
while not pq.empty():
print('pq.qsize(), len(unfinished):', pq.qsize(), len(unfinished))
_, u = pq.get()
if u in unfinished:
unfinished.remove(u)
u_positions = discovered[u]
for u_creature in u_positions:
species = u_positions[u_creature]
for pick_move in possible_moves(u_creature, u_positions):
v_positions = u_positions.copy()
del v_positions[u_creature]
v_positions[pick_move] = species
v_hash = dict_hash(v_positions)
# print('u_creature', u_creature)
# print('pick_move', pick_move)
# print('species', species)
# print('u_positions', u_positions)
# print('v_positions', v_positions)
alt = dist[u] + distance(u_creature, pick_move, u_positions)
if v_hash in unfinished:
alt = dist[u] + distance(u_creature, pick_move, u_positions)
if alt < dist[v_hash]:
dist[v_hash] = alt
#
# if v_hash in unfinished:
pq.put((alt, v_hash))
elif v_hash not in discovered:
discovered[v_hash] = v_positions
pq.put((alt, v_hash))
unfinished.add(v_hash)
dist[v_hash] = alt
# return dist[], prev[]
print(dist[all_home_hash])
# print(len(unfinished), len(dist), len(discovered))
``` |
{
"source": "johntelforduk/aoc-py-2018",
"score": 4
} |
#### File: aoc-py-2018/aoc-d14/aoc-d14.py
```python
def int_list_to_str(list): # Convert a list of integers to a string.
result = ""
for i in list:
result = result + str(i)
return (result)
class Kitchen:
def __init__(self, target_pattern):
self.recipes = [] # List of recipes created so far.
self.num_recipes = 0 # Number of recipes produced so far.\
self.elves = [] # List of Elves working in the kitchen.
self.num_elves = 0 # Number of elves working in the kitchen.
self.target_pattern = target_pattern # The pattern of recipes that the kitchen is looking for.
self.target_found = False # Has the target recipe pattern been found yet?
self.target_found_recipes = 0 # Target pattern found after this many recipes.
def __str__(self):
recs = ""
for r in range(0, self.num_recipes):
recipe_str = str(self.recipes[r])
if r == self.elves[0]: # Round brackets around 1st elf's current recipe.
recs = recs + "(" + recipe_str + ")"
elif r == self.elves[1]:
recs = recs + "[" + recipe_str + "]" # Square brackets around 2ns elf's current recipe.
else:
recs = recs + " " + recipe_str + " "
return recs
def add_recipe(self, new_recipe):
self.num_recipes = self.num_recipes + 1
self.recipes.append(new_recipe)
def add_elf(self, start_recipe):
self.add_recipe(start_recipe)
self.elves.append(self.num_recipes - 1)
self.num_elves = self.num_elves + 1
def check_target_found(self):
if self.num_recipes >= len(self.target_pattern) and self.target_found is False:
last_few_recipes = \
self.recipes[(self.num_recipes - len(self.target_pattern)) \
:(self.num_recipes + len(self.target_pattern))]
if self.target_pattern == last_few_recipes:
self.target_found = True
self.target_found_recipes = self.num_recipes
def new_recipe(self):
rec1 = self.recipes[self.elves[0]] # 1st elf's current recipe.
rec2 = self.recipes[self.elves[1]] # 2nd elf's current recipe.
new_rec = rec1 + rec2 # Calculate the new recipte.
new_rec_div10 = new_rec // 10 # New recipe DIV 10
new_rec_mod10 = new_rec % 10 # New recipe MOD 10
if new_rec_div10 > 0:
self.add_recipe(new_rec_div10)
self.check_target_found()
self.add_recipe(new_rec_mod10)
self.check_target_found()
def move_elves(self):
for e in range (0, self.num_elves):
steps = self.recipes[self.elves[e]] + 1 # How many steps forwards for this elf.
self.elves[e] = (self.elves[e] + steps) % self.num_recipes # Move him that far, looping back if necessary.
practices = 793031
choc_kitchen = Kitchen([7,9,3,0,3,1]) # Create a hot chocolate kitchen, that will look for the parm pattern.
choc_kitchen.add_elf(3) # Elf #1 makes recipe 3.
choc_kitchen.add_elf(7) # Elf #2 makes recipe 7.
if practices < 50:
print(choc_kitchen)
p1_done = False
p2_done = False
while p1_done is False or p2_done is False:
choc_kitchen.new_recipe()
choc_kitchen.move_elves()
if practices < 50:
print (choc_kitchen)
else:
if (choc_kitchen.num_recipes % 100000) == 0:
print ("Num recipes so far : %d" % choc_kitchen.num_recipes)
# Part 1 of the puzzle.
if choc_kitchen.num_recipes > (practices + 10) and p1_done is False:
print ("After %d recipes, the scores of the next ten would be %s." \
% (practices, int_list_to_str(choc_kitchen.recipes[(practices):(practices + 10)])))
p1_done = True
# Part 2 of the puzzle.
if choc_kitchen.target_found and p2_done is False:
print ("%s first appears after %d recipes." \
% (int_list_to_str(choc_kitchen.target_pattern), \
choc_kitchen.target_found_recipes - len(choc_kitchen.target_pattern)))
p2_done = True
``` |
{
"source": "johntelforduk/deep-racer",
"score": 4
} |
#### File: johntelforduk/deep-racer/cartesian_coordinates.py
```python
import math
def translation(vertex, delta):
"""Move, or slide, a coordinate in 2d space."""
[vertex_x, vertex_y] = vertex
[delta_x, delta_y] = delta
return [vertex_x + delta_x, vertex_y + delta_y]
def scale(vertex, scale_factor):
"""Move a coordinate closer / further from origin.
If done for all vertices in a 2d shape, it has the effect of changing the size of the whole shape."""
[vertex_x, vertex_y] = vertex
return [vertex_x * scale_factor, vertex_y * scale_factor]
def rotate_around_origin(vertex, rotation_degrees):
"""For parm coordinate and rotation in degrees, return its new coordinate after rotation."""
[vertex_x, vertex_y] = vertex
rotation_radians = math.radians(rotation_degrees)
# Method is described here,
# https://en.wikipedia.org/wiki/Rotation_of_axes#Derivation
return[vertex_x * math.cos(rotation_radians) + vertex_y * math.sin(rotation_radians),
- vertex_x * math.sin(rotation_radians) + vertex_y * math.cos(rotation_radians)
]
def rotate_around_a_point(vertex, pivot, rotation_degrees):
"""Rotate a parm coordinate around a parm pivot point by parm number of degrees."""
[pivot_x, pivot_y] = pivot
# Method has 3 steps,
# 1. Move the vertex so that centre of rotations is now the origin.
# 2. Rotate around origins.
# 3. Do the opposite of move in 1.
moved_vertex = translation(vertex, [-pivot_x, -pivot_y]) # Step 1.
rotated_vertex = rotate_around_origin(moved_vertex, rotation_degrees) # Step 2.
re_moved_vertex = translation(rotated_vertex, pivot) # Step 3.
return re_moved_vertex
```
#### File: johntelforduk/deep-racer/parse_logs.py
```python
def filename_to_list_of_strings(filename):
output = []
with open(filename) as fileobj:
for line in fileobj:
output.append((line.replace('\n', '')).split(' '))
return output
# For example,
# input_list = [['foo', 'bar', 'harry'], ['foo', 'miss', 'david']]
# target = 'bar'
# output = [['foo', 'bar', 'harry']]
def filter_by_2nd_item(input_list, target):
output = []
for item in input_list:
if item[1] == target:
output.append(item)
return output
# Make a list of waypoints out of filtered list of waypoint strings.
def make_list_of_waypoints(input_list):
output = []
pos = -1
for item in input_list[0]: # Every item in the list is same, so arbitrarily pick 1st one.
pos += 1
if pos >= 2: # Ignore the first two items in the list,
tuple_pos = (pos - 2) % 3
if tuple_pos == 0:
i1 = item
elif tuple_pos == 1:
i2 = item
else:
waypoint = {}
waypoint['waypoint'] = int(i1)
waypoint['x'] = float(i2)
waypoint['y'] = float(item)
output.append(waypoint) # Append the tuple.
return output
# 'true' -> True, 'false' -> False
def string_to_bool(s):
return s == 'true'
def make_list_of_statuses(input_list):
output = []
for s in input_list:
status = {}
status['timestamp'] = float(s[2])
status['all_wheels_on_track'] = string_to_bool(s[3])
status['x'] = float(s[4])
status['y'] = float(s[5])
status['distance_from_center'] = float(s[6])
status['is_left_of_center'] = string_to_bool(s[7])
status['heading'] = float(s[8])
status['progress'] = float(s[9])
status['steps'] = int(s[10])
status['speed'] = float(s[11])
status['steering_angle'] = float(s[12])
status['track_width'] = float(s[13])
status['max_speed'] = float(s[14])
status['max_steer'] = float(s[15])
status['near_centre_of_track'] = string_to_bool(s[16])
status['quite_near_centre_of_track'] = string_to_bool(s[17])
status['heading_in_right_direction'] = string_to_bool(s[18])
status['turning_hard'] = string_to_bool(s[19])
status['going_straight'] = string_to_bool(s[20])
status['going_fast'] = string_to_bool(s[21])
status['going_slowly'] = string_to_bool(s[22])
status['correcting_course'] = string_to_bool(s[23])
status['rule_number'] = int(s[24])
status['rule_description'] = s[25].replace('_', ' ')
status['reward_level'] = s[26].replace('_', ' ')
status['score'] = float(s[27])
output.append(status)
return output
```
#### File: johntelforduk/deep-racer/test_cartesian_coordinates.py
```python
import cartesian_coordinates as cc
import unittest
from math import sqrt
class TestCartesianCoordinates(unittest.TestCase):
def test_translation(self):
self.assertEqual(cc.translation([2.0, 3.0], [10.0, 11.0]), [12.0, 14.0])
def test_scale(self):
self.assertEqual(cc.scale([2.0, 3.0], 5), [10.0, 15.0])
def test_rotate_around_origin(self):
new_position = cc.rotate_around_origin([1.0, 0.0], 90)
self.assertAlmostEqual(new_position[0], 0.0) # X
self.assertAlmostEqual(new_position[1], -1.0) # Y
new_position = cc.rotate_around_origin([1.0, 0.0], -45)
self.assertAlmostEqual(new_position[0], sqrt(2) / 2) # X
self.assertAlmostEqual(new_position[1], sqrt(2) / 2) # Y
def test_rotate_around_a_point(self):
self.assertEqual(cc.rotate_around_a_point([2.0, 3.0], [2.0, 2.0], 90.0), [3.0, 2.0])
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "johntelforduk/grammar-bot",
"score": 3
} |
#### File: johntelforduk/grammar-bot/server.py
```python
from flask import Flask, render_template
from datetime import datetime
import tweepy
from sys import argv
from grammar_bot import GrammarBot
from recent_tweets import RecentTweets
from dotenv import load_dotenv
from os import getenv
DEBUG_ENABLED = '-d' in argv
def debug(msg):
"""If in debug mode, send a debug message to stdout."""
if DEBUG_ENABLED:
print("Debug: {}".format(msg))
def now_str() -> str:
"""Return the time right now as a nicely formatted string."""
time_list = str(datetime.now()).split('.')
return time_list[0]
def obtain_twitter_api():
"""Do Twitter authentication. Return a Tweepy API object."""
consumer_key = getenv('API_KEY')
consumer_secret = getenv('API_SECRET_KEY')
access_token = getenv('ACCESS_TOKEN')
access_token_secret = getenv('ACCESS_TOKEN_SECRET')
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
return tweepy.API(auth)
def check_tweet(status, mode: str):
"""Check whether the parm tweet needs to be 'corrected'. If it does, then send the user a tweet that
corrects it!"""
user_id = str(status.user.id) # Most of the Twitter APIs use string user IDs.
tweet_id = status.id
user_screen_name = '@' + status.user.screen_name # Put an @ on front of user name.
text = status.text
debug('user_id={} tweet_id={} user_screen_name={} text={}'.format(user_id, tweet_id, user_screen_name, text))
# Skip tweets which are retweets. The key word might be in the quoted tweet.
if text[0:3] != 'RT ':
suggestion = bot.grammar_check(text)
debug('suggestion={}'.format(suggestion))
# Skip tweets that the bot didn't find grammatical errors in.
if suggestion is not '':
# Is this tweet more recent than last tweet we corrected for this user?
if tweet_id > recent_tweets.recent_tweet_id(user_id):
reply_text = 'Hi ' + user_screen_name + ", I think you meant '" + suggestion + "'."
debug('tweet_id={} user_id={}'.format(tweet_id, user_id))
print()
print('{}: {}'.format(user_screen_name, text))
print('@HelperGrammar: {}'.format(reply_text))
api.update_status(status=reply_text,
in_reply_to_status_id=tweet_id,
)
# Put row into the DynamoDB table.
status = {'user_id': user_id,
'tweet_id': tweet_id,
'timestamp': now_str(),
'user_screen_name': user_screen_name,
'text': text,
'reply_text': reply_text}
recent_tweets.put_recent_tweet(parm_user_id=user_id, parm_status=status, parm_mode=mode)
def check_recent_tweets(user_id: str):
"""Look at the most recent 20 tweets in this user's timeline, since last tweet that the bot corrected.
Check each of these tweets to see whether it needs to have its grammar corrected."""
debug('user_id={}'.format(user_id))
most_recent_tweet_id = recent_tweets.recent_tweet_id(user_id)
tweets = api.user_timeline(user_id=user_id)
for this_tweet in tweets:
if this_tweet.id > most_recent_tweet_id:
check_tweet(status=this_tweet, mode='user_timeline')
# Override tweepy.StreamListener to add logic to on_status.
class MyStreamListener(tweepy.StreamListener):
def on_status(self, status):
check_tweet(status, mode='stream')
bot = GrammarBot()
load_dotenv(verbose=True) # Set operating system environment variables based on contents of .env file.
api = obtain_twitter_api()
recent_tweets = RecentTweets(table_name=getenv('TWITTER_TABLE'))
# Get a list of our followers. For each follower, check whether they have any recent tweets that need correcting.
follower_ids, follower_names = [], []
for this_follower in tweepy.Cursor(api.followers).items():
follower_ids.append(str(this_follower.id))
follower_names.append('@' + this_follower.screen_name)
check_recent_tweets(user_id=str(this_follower.id))
myStreamListener = MyStreamListener()
myStream = tweepy.Stream(auth=api.auth, listener=myStreamListener)
# is_async=True will make the Twitter stream processor run on its own thread.
myStream.filter(follow=follower_ids, is_async=True)
# ... back on the main thread.
app = Flask(__name__)
@app.route("/")
def dashboard():
last_tweets = []
for each_follower in follower_ids:
most_recent_tweet = recent_tweets.get_recent_tweet(parm_user_id=each_follower)
if 'user_id' in most_recent_tweet:
last_tweets.append(most_recent_tweet)
return render_template('dashboard.html',
parm_time=now_str(),
parm_followers=follower_names,
parm_last_tweets=last_tweets)
app.run()
``` |
{
"source": "johntelforduk/simple-table",
"score": 4
} |
#### File: johntelforduk/simple-table/simple_table.py
```python
from os import path
import json
class SimpleTable:
def __init__(self, filename: str):
"""Create a table that will be made durable by writing to parm filename."""
self.filename = filename # JSON file that the table will be persisted to.
self.rows = [] # List of rows in the table.
# Read any previous contents of the file into memory.
if path.exists(self.filename):
f = open(self.filename, 'r')
whole_text = (f.read())
self.rows = json.loads(whole_text)
f.close()
def insert(self, row: list):
"""Insert the parm row into the table."""
self.rows.append(row)
def commit(self):
"""Write the table to durable storage."""
f = open(self.filename, 'w')
f.writelines(json.dumps(self.rows))
f.close()
``` |
{
"source": "johntellsall/cdktf-ex",
"score": 2
} |
#### File: src/handlers/test_app.py
```python
"test Lambda handler"
import logging
import app
def test_lambda_handler(caplog):
"""
test Lambda handler, including logging
"""
caplog.set_level(logging.INFO)
response = app.lambda_handler({"beer": 1}, None)
assert response == {"body": "Hello world!", "statusCode": 200}
assert 'Received : {"beer": 1}' in caplog.text
``` |
{
"source": "johntellsall/minibatch",
"score": 3
} |
#### File: minibatch/tests/test_minibatch.py
```python
from multiprocessing import Process
from time import sleep
from unittest import TestCase
from minibatch import Stream, Buffer, setup
from minibatch.tests.util import delete_database
class MiniBatchTests(TestCase):
def setUp(self):
self.url = 'mongodb://localhost/test'
delete_database(url=self.url)
self.db = setup(url=self.url)
def test_stream(self):
"""
Test a stream writes to a buffer
"""
stream = Stream.get_or_create('test')
stream.append({'foo': 'bar1'})
stream.append({'foo': 'bar2'})
count = len(list(doc for doc in Buffer.objects.all()))
self.assertEqual(count, 2)
def test_fixed_size(self):
"""
Test batch windows of fixed sizes work ok
"""
from minibatch import streaming
def consumer():
# note the stream decorator blocks the consumer and runs the decorated
# function asynchronously upon the window criteria is satisfied
@streaming('test', size=2, keep=True)
def myprocess(window):
try:
db = setup(self.url)
db.processed.insert_one({'data': window.data or {}})
except Exception as e:
print(e)
return window
# start stream consumer
proc = Process(target=consumer)
proc.start()
# fill stream
stream = Stream.get_or_create('test')
for i in range(10):
stream.append({'index': i})
# give it some time to process
sleep(5)
proc.terminate()
# expect 5 entries, each of length 2
data = list(doc for doc in self.db.processed.find())
count = len(data)
self.assertEqual(count, 5)
self.assertTrue(all(len(w) == 2 for w in data))
def test_timed_window(self):
"""
Test batch windows of fixed sizes work ok
"""
from minibatch import streaming
def consumer():
# note the stream decorator blocks the consumer and runs the decorated
# function asynchronously upon the window criteria is satisfied
@streaming('test', interval=1, keep=True)
def myprocess(window):
try:
db = setup(url=self.url)
db.processed.insert_one({'data': window.data or {}})
except Exception as e:
print(e)
return window
# start stream consumer
proc = Process(target=consumer)
proc.start()
# fill stream
stream = Stream.get_or_create('test')
for i in range(10):
stream.append({'index': i})
sleep(.5)
# give it some time to process
sleep(5)
proc.terminate()
# expect at least 5 entries (10 x .5 = 5 seconds), each of length 1-2
data = list(doc for doc in self.db.processed.find())
count = len(data)
self.assertGreater(count, 5)
self.assertTrue(all(len(w) >= 2 for w in data))
def test_timed_window_relaxed(self):
"""
Test batch windows of fixed sizes work ok
"""
from minibatch import streaming
def consumer():
# note the stream decorator blocks the consumer and runs the decorated
# function asynchronously upon the window criteria is satisfied
@streaming('test', interval=1, relaxed=True, keep=True)
def myprocess(window):
try:
db = setup(self.url)
db.processed.insert_one({'data': window.data or {}})
except Exception as e:
print(e)
return window
# start stream consumer
proc = Process(target=consumer)
proc.start()
# fill stream
stream = Stream.get_or_create('test')
for i in range(10):
stream.append({'index': i})
sleep(.5)
# give it some time to process
sleep(5)
proc.terminate()
# expect at least 5 entries (10 x .5 = 5 seconds), each of length 1-2
data = list(doc for doc in self.db.processed.find())
count = len(data)
self.assertGreater(count, 5)
self.assertTrue(all(len(w) >= 2 for w in data))
```
#### File: minibatch/minibatch/window.py
```python
import datetime
from minibatch import Buffer, Stream
from minibatch.models import Window
class WindowEmitter(object):
"""
a window into a stream of buffered objects
WindowEmitter.run() implements the generic emitter protocol as follows:
1. determine if a window is ready to be processed
2. retrieve the data from the buffer to create a Window
3. process the data (i.e. mark the buffered data processed)
4. run the emit function on the window
Note that run() is blocking. Between running the protocol,
it will sleep to conserve resources.
Each time run() wakes up, it will call the following methods in turn:
window_ready() - called to determine if the buffer contains enough
data for a window.
query() - return the Buffer objects to process
process() - process the data
timestamp() - timestamp the stream for the next processing
commit() - commit processed data back to the buffer. by
default this means removing the objects from the
buffer and deleting the window.
sleep() - sleep until the next round
Use timestamp() to mark the stream (or the buffer data) for the next
round. Use sleep() to set the amount of time to sleep. Depending on
the emitter's semantics this may be a e.g. a fixed interval or some function
of the data.
WindowEmitter implements several defaults:
process() - mark all data returned by query() as processed
sleep() - sleep self.interval / 2 seconds
undo() - called if the emit function raises an exception. marks
the data returned by query() as not processed and deletes
the window
For examples of how to implement a custom emitter see TimeWindow, CountWindow
and SampleFunctionWindow.
Note there should only be one WindowEmitter per stream. This is a
a limitation of the Buffer's way of marking documentes as processed (a boolean
flag). This decision was made in favor of performance and simplicity. Supporting
concurrent emitters would mean each Buffer object needs to keep track of which
emitter has processed its data and make sure Window objects are processed by
exactly one emitter.
"""
def __init__(self, stream, interval=None, processfn=None, emitfn=None,
emit_empty=False):
self.stream_name = stream
self.interval = interval
self.emit_empty = emit_empty
self.emitfn = emitfn
self.processfn = processfn
self._stream = None
self._window = None # current window if any
self._delete_on_commit = True
def query(self, *args):
raise NotImplemented()
def window_ready(self):
""" return a tuple of (ready, qargs) """
raise NotImplemented()
def timestamp(self, query_args):
self.stream.modify(query={}, last_read=datetime.datetime.now())
@property
def stream(self):
if self._stream:
return self._stream
self._stream = Stream.get_or_create(self.stream_name)
return self._stream
def process(self, qs):
if self.processfn:
return self.processfn(qs)
data = []
for obj in qs:
obj.modify(processed=True)
data.append(obj)
return data
def undo(self, qs):
for obj in qs:
obj.modify(processed=False)
if self._window:
self._window.delete()
return qs
def persist(self, flag=True):
self._delete_on_commit = not flag
def commit(self, qs, window):
if not self._delete_on_commit:
window.modify(processed=True)
return
for obj in qs:
obj.delete()
window.delete()
def emit(self, qs):
self._window = Window(stream=self.stream.name,
data=[obj.data for obj in qs]).save()
if self.emitfn:
self._window = self.emitfn(self._window) or self._window
return self._window
def sleep(self):
import time
time.sleep((self.interval or self.stream.interval) / 2.0)
def run(self):
while True:
ready, query_args = self.window_ready()
if ready:
qs = self.query(*query_args)
qs = self.process(qs)
if qs or self.emit_empty:
try:
window = self.emit(qs)
except Exception as e:
self.undo(qs)
print(str(e))
else:
self.commit(qs, window)
finally:
self.timestamp(*query_args)
self.sleep()
class FixedTimeWindow(WindowEmitter):
"""
a fixed time-interval window
Yields windows of all data retrieved in fixed intervals of n
seconds. Note that windows are created in fixed-block sequences,
i.e. in steps of n_seconds since the start of the stream. Empty
windows are also emitted. This guarantees that any window
contains only those documents received in that particular window.
This is useful if you want to count e.g. the number of events
per time-period.
Usage:
@stream(name, interval=n_seconds)
def myproc(window):
# ...
"""
def __init__(self, *args, **kwargs):
super(FixedTimeWindow, self).__init__(*args, **kwargs)
self.emit_empty = True
def window_ready(self):
stream = self.stream
last_read = stream.last_read
now = datetime.datetime.now()
max_read = last_read + datetime.timedelta(seconds=self.interval)
return now > max_read, (last_read, max_read)
def query(self, *args):
last_read, max_read = args
fltkwargs = dict(created__gte=last_read, created__lte=max_read)
return Buffer.objects.no_cache().filter(**fltkwargs)
def timestamp(self, *args):
last_read, max_read = args
self.stream.modify(query=dict(last_read__gte=last_read), last_read=max_read)
self.stream.reload()
def sleep(self):
import time
# we have strict time windows, only sleep if we are up to date
if self.stream.last_read > datetime.datetime.now() - datetime.timedelta(seconds=self.interval):
# sleep slightly longer to make sure the interval is complete
# and all data had a chance to accumulate. if we don't do
# this we might get empty windows on accident, resulting in
# lost data
time.sleep(self.interval + 0.25)
class RelaxedTimeWindow(WindowEmitter):
"""
a relaxed time-interval window
Every interval n_seconds, yields windows of all data in the buffer
since the last successful retrieval of data. This does _not_
guarantee the data retrieved is in a specific time range. This is
useful if you want to retrieve data every n_seconds but do not
care when the data was inserted into the buffer.
Usage:
@stream(name, interval=n_seconds)
def myproc(window):
# ...
"""
def window_ready(self):
stream = self.stream
last_read = stream.last_read
max_read = datetime.datetime.now()
return True, (last_read, max_read)
def query(self, *args):
last_read, max_read = args
fltkwargs = dict(created__gt=last_read, created__lte=max_read,
processed=False)
return Buffer.objects.no_cache().filter(**fltkwargs)
def timestamp(self, *args):
last_read, max_read = args
self.stream.modify(query=dict(last_read=last_read), last_read=max_read)
self.stream.reload()
class CountWindow(WindowEmitter):
def window_ready(self):
qs = Buffer.objects.no_cache().filter(processed=False).limit(self.interval)
self._data = list(qs)
return len(self._data) >= self.interval, ()
def query(self, *args):
return self._data
def timestamp(self, *args):
self.stream.modify(query={}, last_read=datetime.datetime.now())
def sleep(self):
import time
time.sleep(0.1)
``` |
{
"source": "johntellsall/shotglass",
"score": 3
} |
#### File: shotglass/ex-treemap/make_squarify.py
```python
import argparse
import os
import sys
from collections import Counter
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
import pandas as pd
import squarify
PREFIX = "/Users/johnmitchell/jsrc/shotglass/SOURCE/"
def zap_prefix(path):
if not path.startswith(PREFIX):
return path
rest = path[len(PREFIX) :]
# TODO strip next dir name
return rest
def make_squarify_files(project_data_path):
df = pd.read_pickle(project_data_path)
df = df[df.linecount > 0]
squarify.plot(sizes=df.linecount, label=df.name, alpha=0.8)
plt.axis("off")
name = os.path.splitext(os.path.basename(project_data_path))[0]
out_path = f"{name}.png"
print(out_path)
plt.savefig(out_path)
def make_squarify(project_data_path):
df = pd.read_pickle(project_data_path)
dir_size = Counter()
for item in df.itertuples():
path = zap_prefix(item.path)
dir_ = os.path.dirname(path)
dir_size[dir_] += item.linecount
items = dir_size.most_common() # sort
dir_items = pd.DataFrame(items, columns=("dir", "dir_linecount"))
dir_items = dir_items[dir_items.dir_linecount > 0]
squarify.plot(sizes=dir_items.dir_linecount, label=dir_items.dir, alpha=0.8)
plt.axis("off")
name = os.path.splitext(os.path.basename(project_data_path))[0]
plt.title(name.title())
out_path = f"{name}.png"
print(out_path)
plt.savefig(out_path)
def main():
parser = argparse.ArgumentParser(description="TODO")
parser.add_argument(
"data_paths", metavar="N", type=argparse.FileType("r"), nargs="+"
)
parser.add_argument("--dir", action="store_true")
args = parser.parse_args()
render = make_squarify if args.dir else make_squarify_files
for path in args.data_paths:
render(path)
if __name__ == "__main__":
main()
```
#### File: shotglass/ex-treemap/simple.py
```python
config = {'global': 1}
class Klass:
def printme(self):
print('me')
def double(num):
return 2*num
def func():
assert double(3) == 6
if __name__ == '__main__':
func()
```
#### File: shotglass/ex-treemap/test_ctags.py
```python
import make_ctags
def test_compile():
df = make_ctags.compile(None, paths=['simple.py'])
assert len(df) == 1
assert df.columns.tolist() == ['path', 'name', 'ctags_raw', 'linecount']
```
#### File: shotglass/app/hilbert.py
```python
from math import log, ceil
from functools import lru_cache, reduce
@lru_cache(maxsize=None)
def int_to_Hilbert(i, nD=2): # Default is the 2D Hilbert walk.
index_chunks = unpack_index(i, nD)
nChunks = len(index_chunks)
mask = 2 ** nD - 1
start, end = initial_start_end(nChunks, nD)
coord_chunks = [0] * nChunks
for j, i in enumerate(index_chunks):
coord_chunks[j] = gray_encode_travel(start, end, mask, i)
start, end = child_start_end(start, end, mask, i)
return pack_coords(coord_chunks, nD)
def Hilbert_to_int(coords):
nD = len(coords)
coord_chunks = unpack_coords(coords)
nChunks = len(coord_chunks)
mask = 2 ** nD - 1
start, end = initial_start_end(nChunks, nD)
index_chunks = [0] * nChunks
for j in range(nChunks):
i = gray_decode_travel(start, end, mask, coord_chunks[j])
index_chunks[j] = i
start, end = child_start_end(start, end, mask, i)
return pack_index(index_chunks, nD)
def initial_start_end(nChunks, nD):
# This orients the largest cube so that
# its start is the origin (0 corner), and
# the first step is along the x axis, regardless of nD and nChunks:
return 0, 2 ** ((-nChunks - 1) % nD) # in Python 0 <= a % b < b.
# Unpacking arguments and packing results of int <-> Hilbert functions.
# nD == # of dimensions.
# A "chunk" is an nD-bit int (or Python long, aka bignum).
# Lists of chunks are highest-order first.
# Bits within "coord chunks" are x highest-order, y next, etc.,
# i.e., the same order as coordinates input to Hilbert_to_int()
# and output from int_to_Hilbert().
# unpack_index( int index, nD ) --> list of index chunks.
#
def unpack_index(i, nD):
p = 2 ** nD # Chunks are like digits in base 2**nD.
nChunks = max(1, int(ceil(log(i + 1, p)))) # num of digits
chunks = [0] * nChunks
for j in range(nChunks - 1, -1, -1):
chunks[j] = i % p
i /= p
return chunks
def pack_index(chunks, nD):
p = 2 ** nD # Turn digits mod 2**nD back into a single number:
return reduce(lambda n, chunk: n * p + chunk, chunks)
# unpack_coords( list of nD coords ) --> list of coord chunks each nD bits.
def unpack_coords(coords):
biggest = reduce(max, coords) # the max of all coords
nChunks = max(1, int(ceil(log(biggest + 1, 2)))) # max # of bits
return transpose_bits(coords, nChunks)
def pack_coords(chunks, nD):
return transpose_bits(chunks, nD)
# transpose_bits --
# Given nSrcs source ints each nDests bits long,
# return nDests ints each nSrcs bits long.
# Like a matrix transpose where ints are rows and bits are columns.
# Earlier srcs become higher bits in dests;
# earlier dests come from higher bits of srcs.
def transpose_bits(srcs, nDests):
srcs = list(srcs) # Make a copy we can modify safely.
dests = [0] * nDests
# Break srcs down least-significant bit first, shifting down:
for j in range(nDests - 1, -1, -1):
# Put dests together most-significant first, shifting up:
dest = 0
for ksrc in srcs:
dest = dest * 2 + ksrc % 2
srcs = [int(val / 2) for val in srcs]
dests[j] = dest
return dests
# Gray encoder and decoder from http://en.wikipedia.org/wiki/Gray_code :
#
def gray_encode(bn):
assert bn >= 0
assert type(bn) is int
return bn ^ int(bn / 2)
def gray_decode(n):
assert type(n) is int
sh = 1
while True:
div = n >> sh
n ^= div
if div <= 1:
return n
sh <<= 1
# gray_encode_travel -- gray_encode given start and end using bit rotation.
# Modified Gray code. mask is 2**nbits - 1, the highest i value, so
# gray_encode_travel( start, end, mask, 0 ) == start
# gray_encode_travel( start, end, mask, mask ) == end
# with a Gray-code-like walk in between.
# This method takes the canonical Gray code, rotates the output word bits,
# then xors ("^" in Python) with the start value.
#
def gray_encode_travel(start, end, mask, i):
i = int(i)
travel_bit = start ^ end
modulus = mask + 1 # == 2**nBits
# travel_bit = 2**p, the bit we want to travel.
# Canonical Gray code travels the top bit, 2**(nBits-1).
# So we need to rotate by ( p - (nBits-1) ) == (p + 1) mod nBits.
# We rotate by multiplying and dividing by powers of two:
gray_i = i ^ int(i / 2) # gray encode(i)
g = gray_i * (travel_bit * 2)
return ((g | int(g / modulus)) & mask) ^ start
def gray_decode_travel(start, end, mask, g):
travel_bit = start ^ end
modulus = mask + 1 # == 2**nBits
rg = (g ^ start) * int(modulus / (travel_bit * 2))
return gray_decode((rg | int(rg / modulus)) & mask)
# child_start_end( parent_start, parent_end, mask, i )
# e -- Get start & end for child.
# i is the parent's step number, between 0 and mask.
# Say that parent( i ) =
# gray_encode_travel( parent_start, parent_end, mask, i ).
# And child_start(i) and child_end(i) are what child_start_end()
# should return -- the corners the child should travel between
# while the parent is in this quadrant or child cube.
# o child_start( 0 ) == parent( 0 ) (start in a corner)
# o child_end( mask ) == parent( mask ) (end in a corner)
# o child_end(i) - child_start(i+1) == parent(i+1) - parent(i)
# (when parent bit flips, same bit of child flips the opposite way)
# Those constraints still leave choices when nD (# of bits in mask) > 2.
# Here is how we resolve them when nD == 3 (mask == 111 binary),
# for parent_start = 000 and parent_end = 100 (canonical Gray code):
# i parent(i) child_
# 0 000 000 start(0) = parent(0)
# 001 end(0) = parent(1)
# ^ (flip) v
# 1 001 000 start(1) = parent(0)
# 010 end(1) = parent(3)
# ^ v
# 2 011 000 start(2) = parent(0)
# 010 end(2) = parent(3)
# v ^
# 3 010 011 start(3) = parent(2)
# 111 end(3) = parent(5)
# ^ v
# 4 110 011 start(4) = parent(2)
# 111 end(4) = parent(5)
# ^ v
# 5 111 110 start(5) = parent(4)
# 100 end(5) = parent(7)
# v ^
# 6 101 110 start(6) = parent(4)
# 100 end(6) = parent(7)
# v ^
# 7 100 101 start(7) = parent(6)
# 100 end(7) = parent(7)
# This pattern relies on the fact that gray_encode_travel()
# always flips the same bit on the first, third, fifth, ... and last flip.
# The pattern works for any nD >= 1.
#
@lru_cache(maxsize=None)
def child_start_end(parent_start, parent_end, mask, i):
i = int(i)
start_i = max(0, (i - 1) & ~1) # next lower even number, or 0
end_i = min(mask, (i + 1) | 1) # next higher odd number, or mask
child_start = gray_encode_travel(parent_start, parent_end, mask, start_i)
child_end = gray_encode_travel(parent_start, parent_end, mask, end_i)
return child_start, child_end
```
#### File: management/commands/adjust_index.py
```python
import sys
# import ctags
from django.core.management.base import BaseCommand, CommandError
from app.models import SourceLine
class Command(BaseCommand):
help = "beer"
def add_arguments(self, parser):
parser.add_argument("--project")
# parser.add_argument('--prefix', default='')
# parser.add_argument('--verbose', action='store_true')
def handle(self, *args, **options):
project = options["project"]
lines = SourceLine.objects.filter(project=project).order_by("path")
print(f"LINES: {project=}")
for line in lines:
print(vars(lines))
# prefix = options['prefix']
# rows = []
# while True:
# if entry['kind']:
# path = entry['file']
# if prefix and path.startswith(prefix):
# path = path[len(prefix):].lstrip('/')
# length = 123
# rows.append(SourceLine(name=entry['name'],
# project=options['project'],
# path=path,
# line_number=entry['lineNumber'],
# length=length,
# kind=entry['kind']))
# if options['verbose']:
# print rows[-1].__dict__
# status = tagFile.findNext(entry)
# if not status:
# break
# SourceLine.objects.bulk_create(rows)
```
#### File: management/commands/draw.py
```python
from django.core.management.base import BaseCommand
from app import draw
from app.models import SourceLine
class Command(BaseCommand):
help = "beer"
def add_arguments(self, parser):
parser.add_argument("projects", nargs="+")
def get_projects(self, projects):
if projects != ["all"]:
return projects
return SourceLine.projects()
def handle(self, *args, **options):
themeClass = draw.ThemeRainbow
for project in self.get_projects(options["projects"]):
print("***", project)
grid = draw.SimpleDraw().draw(project, theme=themeClass())
depth = None
argname = "path"
detail = "_{}".format(depth) if depth else ""
argname2 = argname.split("_")[0]
path = "{}_{}{}.png".format(project, argname2, detail)
grid.render(path)
print(path)
```
#### File: management/commands/funcsize.py
```python
import itertools
import operator
import numpy as np
import matplotlib.pyplot as plt
from django.core.management.base import BaseCommand
from app import models
class Command(BaseCommand):
help = "beer"
def add_arguments(self, parser):
parser.add_argument("projects", nargs="+")
def handle(self, *args, **options):
fs = 10 # fontsize
versions = (
models.SourceLine.objects.filter(project__startswith="django-")
.order_by("project")
.values_list("project", "progradon__complexity")
)
for vers, complexity_iter in itertools.groupby(
versions, key=operator.itemgetter(1)
):
print vers, ":"
print "-", ", ".join(str(x) for x in complexity_iter)
data = models.SourceLine.objects.filter(project="django-1.0.1").values_list(
"progradon__complexity", flat=True
)
plt.boxplot(data) # , labels=labels)
plt.show()
# xs, ys, areas = zip(*data)
# ys = areas
# colors = np.random.rand(len(xs))
# plt.scatter(xs, ys, c=colors) # s=areas)
# plt.xlabel('file index')
# plt.ylabel('version index')
plt.savefig("z.png")
# plt.savefig('z.svg')
```
#### File: management/commands/pygdex.py
```python
import sys
import pygments.lexers
from pygments.token import Name, Punctuation
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = "beer"
def add_arguments(self, parser):
parser.add_argument("paths", nargs="+")
# parser.add_argument('--index', action="store_true")
def handle(self, *args, **options):
for path in options["paths"]:
lex = pygments.lexers.get_lexer_for_filename(path)
name = None
for tokentype, value in lex.get_tokens(open(path).read()):
if tokentype is Name:
name = value
elif tokentype is Punctuation and value == "(":
print "NAME", name
name = None
# import ipdb ; ipdb.set_trace()
```
#### File: management/commands/spider.py
```python
import itertools
import os
import re
import subprocess
from django.core.management.base import BaseCommand
from palettable import colorbrewer
from PIL import Image, ImageDraw, ImageFont
IMAGE_WIDTH = 1000
IMAGE_HEIGHT = 1000
COL_WIDTH, COL_HEIGHT = 100, 1000
COL_GAP = 10
def serpentine_iter(width):
y = 0
while True:
for x in range(width):
yield x, y
for x in range(width):
yield width - x - 1, y + 1
y += 2
def render_highlight(path):
cmd = ["source-highlight", "-i", path]
output = subprocess.check_output(cmd, text=True)
output = re.compile("^.+<pre><tt>", re.DOTALL).sub("", output)
return output.split("\n")
def get_colormap():
cmap_obj = colorbrewer.qualitative.Set3_12
cmap_colors = list(map(tuple, cmap_obj.colors))
return itertools.cycle(cmap_colors)
def get_count(paths):
output = subprocess.check_output(["wc", "--lines"] + paths)
wordcount_re = re.compile(
r"^\s* ([0-9]+)" r"\s+ (.+) $", re.MULTILINE | re.VERBOSE
)
matches = wordcount_re.finditer(output)
return {path: int(count) for count, path in (m.groups() for m in matches)}
class Render(object):
def __init__(self, draw, x, y):
self.draw = draw
self.x, self.y = x, y
self.symbol_re = re.compile("<(.+?)>([^<]*)")
self.colors = ["black"]
def add_text(self, text):
raise NotImplementedError
def add_line(self, line):
line = "<x>" + line # process text before HTML
mgroups = (match.groups() for match in self.symbol_re.finditer(line))
for sym, text in mgroups:
if sym.startswith("font "):
self.colors.append(sym.split('"')[1])
elif sym == "/font":
self.colors.pop()
if text:
self.add_text(text)
self.y += 1
# TODO: trim lines outside column (~80)
class RenderSource(Render):
"""
draw individual source code lines with colors + indent
"""
def __init__(self, *args, **kwargs):
self.relx = None
super(RenderSource, self).__init__(*args, **kwargs)
def add_line(self, line):
self.relx = 0
super(RenderSource, self).add_line(line)
def add_text(self, text):
if text.startswith(" "):
orig_len = len(text)
text = text.lstrip(" ")
self.relx += orig_len - len(text)
self.draw.line(
(self.x + self.relx, self.y, self.x + self.relx + len(text), self.y),
fill=self.colors[-1],
)
self.relx += len(text) + 1
class RenderFile(Render):
def add_line(self, line):
self.draw.line(
(self.x, self.y, self.x + COL_WIDTH - COL_GAP, self.y), fill=self.colors[-1]
)
self.y += 1
def render_file(path, renderObj):
hlines = render_highlight(path)
for line in hlines:
renderObj.add_line(line)
if renderObj.y >= IMAGE_HEIGHT:
renderObj.y = 0
renderObj.x += COL_WIDTH
def render_blocks(image, paths):
"""
draw each file as a colored block, annotated with filename
"""
if 1:
CMAP_OBJ = colorbrewer.qualitative.Set3_12
CMAP_COLORS = list(map(tuple, CMAP_OBJ.colors))
colormap = itertools.cycle(CMAP_COLORS)
renderClass = RenderFile
draw = ImageDraw.Draw(image)
rend = renderClass(draw=draw, x=0, y=0)
# X: size in points, not pixels
fnt = ImageFont.truetype("Umpush-Light.ttf", size=14)
text_color = (0, 0, 0, 128)
for path in paths:
text_args = dict(
xy=(rend.x, rend.y), text=os.path.basename(path), font=fnt, fill=text_color
)
rend.colors = [next(colormap)]
render_file(path, rend)
draw.text(**text_args)
return image
# XX merge render_* functions
def render_source(image, paths):
"""
draw each line of source one pixel high, syntaxed colored, like a compressed minimap
"""
renderClass = RenderSource
draw = ImageDraw.Draw(image)
rend = renderClass(draw=draw, x=0, y=0)
for path in paths:
render_file(path, rend)
def render_diff(image, paths):
"""
draw each file as a colored slice, in prep to showing differences between versions.
XXX not useful atm
"""
count_dict = get_count(paths)
draw = ImageDraw.Draw(image)
scale = IMAGE_HEIGHT / float(count_dict["total"])
colormap_iter = get_colormap()
y = 0
for path in sorted(count_dict):
next_y = y + count_dict[path] * scale
color = next(colormap_iter)
draw.rectangle((0, y, COL_WIDTH - COL_GAP, next_y), fill=color, outline="black")
y = next_y
class Command(BaseCommand):
help = __doc__
def add_arguments(self, parser):
parser.add_argument("--output", default="z.png")
parser.add_argument("--style", default="source")
parser.add_argument("paths", nargs="+")
def handle(self, *args, **options):
im = Image.new("RGB", (IMAGE_WIDTH, IMAGE_HEIGHT), color="white")
render = render_source
if options["style"] == "blocks":
render = render_blocks
elif options["style"] == "diff":
render = render_diff
render(image=im, paths=options["paths"])
im.save(options["output"])
```
#### File: management/commands/words.py
```python
import collections
import logging
import re
import sys
from django.core.management.base import BaseCommand
from app.models import SourceFile
logging.basicConfig(
format="%(asctime)-15s %(levelname)-8s %(message)s",
stream=sys.stderr,
level=logging.DEBUG,
)
# disable db query logs
logging.getLogger("django.db.backends").propagate = False
logger = logging.getLogger(__name__)
def words(project):
camelcase_pat = re.compile("[A-Z][a-z]*")
print("*", project.upper())
namepaths = SourceFile.objects.filter(project=project).values_list("name", "path")
# import ipdb ; ipdb.set_trace()
path_words = collections.defaultdict(collections.Counter)
for num, (name, path) in enumerate(namepaths):
names = [name.lower()]
if "_" in name:
names = name.lower().split("_")
elif camelcase_pat.match(name):
names = camelcase_pat.findall(name.lower())
path_words[path].update([_f for _f in names if _f])
for path, words in sorted(path_words.items()):
relpath = re.sub("^.+?/", "", path)
common = [(word, count) for word, count in words.most_common(3) if count > 1]
if common:
print("{:30} {}".format(relpath, common if common else ""))
class Command(BaseCommand):
help = "beer"
def add_arguments(self, parser):
parser.add_argument("projects", nargs="+")
def handle(self, *args, **options):
projects = options["projects"]
if projects == ["all"]:
projects = SourceFile.projects()
for project in options["projects"]:
words(project)
```
#### File: app/tests/test_perf.py
```python
import cProfile
import pstats
import pytest
from django.test import TestCase
from app import grid, models, render
# PERFORMANCE TEST:
# py.test -s app/tests/test_perf::ProfileDraw
#
# TODO: disable this except when explicitly called
@pytest.mark.skip(reason="performance test only")
class ProfileDraw(TestCase):
fixtures = ["diagram-django"] # slow + useful
fixtures = ["diagram-min"] # minimal
def setUp(self):
stub = models.SourceLine.objects.create(
kind="k", length=3, line_number=2, name="name", path="path"
)
models.DiagramSymbol.objects.update(sourceline=stub)
def test_rawdraw(self):
def rawdraw():
diagram = render.Diagram.FromDB()
mygrid = grid.Grid(None, None)
diagram.draw(mygrid)
prof_name = "rawdraw-{}.prof".format(self.fixtures[0])
cProfile.runctx(
rawdraw.func_code, # pylint: disable=no-member
globals=globals(),
locals={},
filename=prof_name,
)
p = pstats.Stats(prof_name)
p.strip_dirs().sort_stats("cumtime").print_stats(20)
```
#### File: app/tests/test_views.py
```python
from pytest import mark
from app import views
class AttrDict(dict):
__getattr__ = dict.__getitem__
# @mark.django_db
# def test_render():
# # just make sure it doesn't crash
# # X: depends on "flask" in database; add fixture
# views.render(None, project='flask')
# @mark.django_db
# def test_draw():
# # just make sure it doesn't crash
# req = AttrDict(GET={})
# views.draw(req, project='flask')
``` |
{
"source": "johnterickson/Mesh",
"score": 3
} |
#### File: Mesh/theory/compute_exp_Y.py
```python
from __future__ import division
from choose import compute_q, nCr, compute_p3
import cPickle as pickle
def prob(a,b,c, numStrings, numOnes, p1, p2, p3):
p4 = 1-p1-p2-p3
d = numStrings-2-a-b-c
return (nCr(numStrings-2,a)*(p1**a)*nCr(numStrings-2-a, b)*(p2**b)*nCr(numStrings-2-a-b, c)*(p3**c)*(p4**d))
def lookup(bound_id, length, numOnes, numStrings):
path = "bounds/{}".format(bound_id)
with open(path, "rb") as file:
bound_dict = pickle.load(file)
try:
bound = bound_dict[(length, numOnes, numStrings)]
except KeyError:
return None, bound_dict
return bound, None
def store(bound_id, bound_dict):
path = "bounds/{}".format(bound_id)
with open(path, "wb") as file:
pickle.dump(bound_dict, file)
def compute_exp_Y(length, numOnes, numStrings):
q = compute_q(length, numOnes)
p3 = compute_p3(length, numOnes)
p1 = q-p3
p2 = p1
p4 = 1-p1-p2-p3
sum = 0
for a in range(numStrings-2+1):
for b in range(numStrings-2-a+1):
for c in range(numStrings-2-a-b+1):
add = min(1/(a+c+1), 1/(b+c+1))*prob(a,b,c,numStrings, numOnes,p1,p2,p3)
# add = min(1/(a+c), 1/(b+c))*prob(a,b,c,numStrings, numOnes,p1,p2,p3)
sum += add
# sum += min(1/(a+c+1), 1/(b+c+1))*prob(a,b,c,numStrings,p1,p2,p3)
sum *= q
return sum*nCr(numStrings,2)
def compute_degree_bound(length, numOnes, numStrings):
print length
print numOnes
q = compute_q(length, numOnes)
exp_degree = (numStrings-1)*q
a = exp_degree
b = exp_degree
return numStrings/2*(a/(b+1))
def compute_isolated_edge_bound(length, numOnes, numStrings):
q = compute_q(length, numOnes)
p3 = compute_p3(length, numOnes)
m = numStrings
bound1 = (m-1)*q*(1-(2*q)+p3)**(m-2)
bound2 = 2- 2*(1-q)**(m-1) - (m-1)*q
return (m/2)*max(bound1,bound2)
# return (m/2)*bound2
def compute_degreeplusone_bound(length, numOnes, numStrings):
q = compute_q(length, numOnes)
p3 = compute_p3(length, numOnes)
p1 = q-p3
p2 = p1
p4 = 1-p1-p2-p3
sum = 0
for a in range(numStrings-2+1):
for b in range(numStrings-2-a+1):
for c in range(numStrings-2-a-b+1):
add = min(1/(a+c+2), 1/(b+c+2))*prob(a,b,c,numStrings, numOnes,p1,p2,p3)
# add = .5*(1/(a+c+2) + 1/(b+c+2))*prob(a,b,c,numStrings, numOnes,p1,p2,p3)
sum += add
sum *= q
return sum*nCr(numStrings,2)
def compute_improved_degreeplusone_bound(length, numOnes, numStrings):
bound, bound_dict = lookup("impdeg+1", length, numOnes, numStrings)
if bound:
print 'value already exists, retrieving from database'
return bound
q = compute_q(length, numOnes)
p3 = compute_p3(length, numOnes)
p1 = q-p3
p2 = p1
p4 = 1-p1-p2-p3
sum = 0
for a in range(numStrings-2+1):
for b in range(numStrings-2-a+1):
for c in range(numStrings-2-a-b+1):
if a+c+1==1 and b+c+1==1:
add = 1*prob(a,b,c,numStrings, numOnes,p1,p2,p3)
elif a+c+1==1:
add = (1/(b+c+1))*prob(a,b,c,numStrings, numOnes,p1,p2,p3)
elif b+c+1==1:
add = (1/(a+c+1))*prob(a,b,c,numStrings, numOnes,p1,p2,p3)
# elif a+c+1==2 and b+c+1==2:
## add = .5*prob(a,b,c,numStrings, numOnes,p1,p2,p3)
elif (a+c+1==2 and b+c+1==3) or (a+c+1==3 and b+c+1==2):
add = prob(a,b,c,numStrings, numOnes,p1,p2,p3)/3.0
# elif a+c+1 != b+c+1:
# add = min(1/(a+c+1), 1/(b+c+1))*prob(a,b,c,numStrings, numOnes,p1,p2,p3)
else:
add = min(1/(a+c+2), 1/(b+c+2))*prob(a,b,c,numStrings, numOnes,p1,p2,p3)
# add = .5*(1/(a+c+2) + 1/(b+c+2))*prob(a,b,c,numStrings, numOnes,p1,p2,p3)
sum += add
sum *= q
result = sum*nCr(numStrings,2)
bound_dict[(length, numOnes, numStrings)] = result
store("impdeg+1", bound_dict)
print 'value did not already exist, writing to database'
return result
def boundRetrieve(identifier):
fetcher = { "impdeg+1": (compute_improved_degreeplusone_bound)
}
return fetcher[identifier]
if __name__ == '__main__':
#print prob(18,2,12,80,0.03125,0.03125,0.9375)/31
# print compute_exp_Y(32, 13, 80)
print compute_improved_degreeplusone_bound(16, 4, 100)
```
#### File: Mesh/theory/maxmatch_vs_E[Y].py
```python
from __future__ import division
from createRandomString import *
from makeGraph import *
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import networkx as nx
import numpy as np
import time
from compute_exp_Y import compute_exp_Y, compute_degree_bound, compute_isolated_edge_bound, compute_degreeplusone_bound, compute_improved_degreeplusone_bound
from choose import compute_q
def experiment(length, ones_range_min, ones_range_max, reps, numStrings):
strings = []
ones = []
maxmatch_avg = []
maxmatch_std_dev = []
# greedymatch_avg = []
# greedymatch_std_dev = []
y_estimate = []
justy = []
raw = []
qs = []
for numOnes in range(ones_range_min, ones_range_max+1):
ones.append(numOnes)
q = compute_q(length, numOnes)
qs.append(q*100)
# qs.append(compute_q(length, numOnes)*100)
freed_pages_maxmatching = []
freed_pages_greedymatching = []
for iterations in range (reps):
for i in range(numStrings):
strings.append(createRandomString(length, numOnes))
# strings = createIndependentRandomStrings(length = length, numStrings = numStrings, q = q)
graph = makeGraph(strings)
frdpgs_maxmatching = len(nx.max_weight_matching(graph))/2
perc = (frdpgs_maxmatching/numStrings)*100
freed_pages_maxmatching.append(perc)
strings = []
m = np.asarray(freed_pages_maxmatching)
raw.append(freed_pages_maxmatching)
m_a = np.mean(m)
maxmatch_avg.append(m_a)
m_s = np.std(m)
maxmatch_std_dev.append(m_s)
# y = compute_exp_Y(length, numOnes, numStrings)
# y = compute_degreeplusone_bound(length, numOnes, numStrings)
y = compute_improved_degreeplusone_bound(length, numOnes, numStrings)
# y_est_raw = max(y,compute_degree_bound(length, numOnes, numStrings),compute_isolated_edge_bound(length, numOnes, numStrings))
# y_est_raw = compute_isolated_edge_bound(length, numOnes, numStrings)
# y_est_raw = compute_degree_bound(length, numOnes, numStrings)
# y_est_raw = y
yperc = (math.floor(y)/numStrings)*100
y_estimate.append(yperc)
# mistakes = {}
# for i in range(len(raw)):
# oops = []
# for entry in raw[i]:
# if entry < y_estimate[i]:
# oops.append(entry)
# mistakes[i+1] = oops
# print 'mistakes:'
# print mistakes
# use this version of mistakes
mistakes = {}
for i in range(len(raw)):
oops = 0
for entry in raw[i]:
if entry < y_estimate[i]:
oops += 1
mistakes[i+1] = oops
print 'mistakes:'
print mistakes
print 'E[Y]:'
ey = {}
for i in range(len(y_estimate)):
ey[i+1] = y_estimate[i]
print ey
#
# yperc = (y/numStrings)*100
# justy.append(yperc)
# c = np.asarray(freed_pages_greedymatching)
# c_a = np.mean(c)
# greedymatch_avg.append(c_a)
# c_s = np.std(c)
# greedymatch_std_dev.append(c_s)
return ones, maxmatch_avg, maxmatch_std_dev, y_estimate, justy, qs
def plot_it(length, ones_range_min, ones_range_max, reps, numStrings):
ones, match_avg, match_std_dev, y_estimate, justy, qs = experiment(length, ones_range_min, ones_range_max, reps, numStrings)
# print y_estimate
plt.errorbar(np.asarray(ones), np.asarray(match_avg), np.asarray(match_std_dev), markersize=3, lw=1, fmt='-o')
# plt.errorbar(np.asarray(ones), np.asarray(color_avg), np.asarray(color_std_dev), markersize=3, lw=1, fmt='-o')
plt.errorbar(np.asarray(ones), np.asarray(y_estimate), np.zeros(len(ones)), markersize=3, lw=1, fmt='-o')
# plt.errorbar(np.asarray(ones), np.asarray(justy), np.zeros(len(ones)), markersize=3, lw=1, fmt='-o')
# plt.plot(np.asarray(ones), y_estimate, markersize = 3, lw=1, fmt='o')
# plt.errorbar(np.asarray(ones), np.asarray(qs), np.zeros(len(ones)), markersize=3, lw=1, fmt='-o')
plt.ylim([0,60])
# plt.xlim([10, 14])
plt.ylabel('Percentage of pages freed')
plt.xlabel('Number of objects per page')
blue_patch = mpatches.Patch(color='blue', label='max matching')
green_patch = mpatches.Patch(color = 'green', label = 'lower bound')
red_patch = mpatches.Patch(color = 'red', label = 'q')
plt.legend(handles=[blue_patch, green_patch])
# plt.legend(handles=[blue_patch, green_patch, red_patch])
plt.title('MAX MATCHING VS LOWER BOUND \n{}-object pages, {} pages'.format(length, numStrings))
# plt.show()
# plt.savefig('maxvE[Y]{},{}'.format(length, numStrings) + '.png', dpi = 1000)
plt.savefig('maxvdeg+1imp++{},{}'.format(length, numStrings) + '.png', dpi = 1000)
# plt.savefig('manystrings.png', dpi = 1000)
plt.close()
if __name__ == '__main__':
#length = [32,64]
length = [32]
ones_range_min = 1
ones_range_max = 32
reps = 10
# numStrings = [80,100,150,200]
numStrings= [80]
start = time.time()
for l in length:
for n in numStrings:
plot_it(l, ones_range_min, int(l/2), reps, n)
# plot_it(l, 10, 13, 10, n)
print 'max match vs E[Y] plot {},{} done'.format(l,n)
end = time.time()
print('making this took {} seconds'.format(end-start) )
```
#### File: Mesh/theory/meshers.py
```python
import random
import numpy as np
from makeGraph import makeGraph
import networkx as nx
from mesh_util import Splitter, hamming, traverse, occupancySort, formatStrings, simpleGreedyTraverse, greedyTraverse
import time
def simpleMesher(strings, stdOutput=False):
"""
Attempts to mesh the first string in the list with the second, etc.
Returns number of successful meshes.
"""
meshes = 0
# try to mesh each string
for i in range(0, len(strings), 2):
str1 = strings[i]
str2 = strings[i + 1]
num = [int(x) for x in list(str1)]
num2 = [int(x) for x in list(str2)]
if np.dot(num, num2) == 0:
meshes += 1
if stdOutput:
return 100 * float(meshes) / len(strings)
return meshes
# def randomMesher(strings, attempts):
# """DEPRECATED"""
# s = [x for x in strings]
# matched_strings = []
# for i in range(attempts):
# pair = random.sample(s,2)
# str1 = pair[0]
# str2 = pair[1]
# num = [int(x) for x in list(str1)]
# num2 = [int(x) for x in list(str2)]
# if np.dot(num, num2) == 0:
# #print('removing {} and {}'.format(str1, str2))
# matched_strings.append(str1)
# matched_strings.append(str2)
# s.remove(str1)
# s.remove(str2)
# if len(s) < 2:
# return matched_strings
# return matched_strings
def randomMesher(strings, attempts, display=False, stdOutput=False):
length = len(strings[0])
totalStrings = len(strings)
strings = [long(string, base=2) for string in strings]
meshes = []
for k in range(attempts):
matched = []
random.shuffle(strings)
dim = len(strings)
for i in range(dim - 2, -2, -2):
num1 = strings[i]
num2 = strings[i + 1]
if num1 & num2 == 0:
matched.append(i)
for x in matched:
meshes.append((strings[x], strings[x + 1]))
for x in matched:
del strings[x + 1]
del strings[x]
formatstring = "{0:0" + str(length) + "b}"
meshes = [(formatstring.format(num), formatstring.format(num2))
for (num, num2) in meshes]
if display:
print "meshes:"
print meshes
if stdOutput:
return 100 * float(len(meshes)) / totalStrings
return len(meshes)
def _greedyMesher(strings, stdOutput=False):
"""DEPRECATED
Meshes a list of strings using a greedy first-match technique. Returns
the number of matched pairs after available matches are exhausted."""
s = strings
matched_strings = []
unmatched_strings = []
matched = []
for i in range(len(s)):
for j in range(i + 1, len(s)):
if i not in matched and j not in matched:
num = [int(x) for x in list(s[i])]
num2 = [int(x) for x in list(s[j])]
if np.dot(num, num2) == 0:
matched.append(i)
matched.append(j)
matched_strings += [s[x] for x in matched]
unmatched_strings += [s[x] for x in range(len(s)) if x not in matched]
if stdOutput == True:
return 100 * len(matched_strings) / (2 * len(strings))
else:
return matched_strings, unmatched_strings
def greedyMesher(strings, stdOutput=False, cutoff=None):
length = len(strings)
new_strings = formatStrings(strings)
meshes = []
occupancySort(new_strings)
simpleGreedyTraverse(meshes, new_strings, cutoff)
if stdOutput:
return 100 * len(meshes) / length
else:
return meshes
# def splitter(strings, length, splitting_string = 0):
# splitting_strings = []
# num_splitters = int(math.log(length,2))+1
# for i in range(1,num_splitters):
# split_string = ""
# for j in range(2**(i-1)):
# split_string = split_string + (("1" * int((length/(2**i)))) + ("0" * (int(length/(2**i)))))
# splitting_strings.append(split_string)
# if splitting_string >= num_splitters-1:
# return bucket1, bucket2
# split = splitting_strings[splitting_string]
# bucket1 = []
# bucket2 = []
# for s in strings:
# diff = hamming(s[0], split)
# if diff < int(length * 0.5):
# bucket1.append(s)
# elif diff == int(length * 0.5):
# if random.randint(0,1):
# bucket1.append(s)
# else:
# bucket2.append(s)
# else:
# bucket2.append(s)
# return bucket1, bucket2
#
# def splitAgain(bucket1, bucket2, length, method):
# try:
# new_bucket1, new_bucket2 = splitter(bucket1+bucket2, length, method)
# except IndexError:
# return bucket1, bucket2
# return new_bucket1, new_bucket2
def splittingMesher(strings, attempts, splittingMethod=0, display=False, stdOutput=False, extra=True):
if display:
print "using Splitting Mesher"
length = len(strings[0])
new_strings = formatStrings(strings)
splt = Splitter(length)
bucket1, bucket2 = splt.split(strings=new_strings)
meshes = []
for k in range(attempts):
# if k == attempts/2:
# print "rebucketing at halfway point"
# print bucket1, bucket2
# bucket1, bucket2 = splt.split(bucket1 = bucket1, bucket2 = bucket2)
random.shuffle(bucket1)
random.shuffle(bucket2)
try:
# print bucket1, bucket2, meshes
done = traverse(meshes, bucket1=bucket1,
bucket2=bucket2, extra=extra)
# print bucket1, bucket2, meshes
# print 'that was round {}'.format(k)
except AssertionError:
# print "rebucketing because one bucket is empty"
bucket1, bucket2 = splt.split(bucket1=bucket1, bucket2=bucket2)
continue
if done:
# print "all done, ending early at attempt {}".format(k)
break
if display:
print "meshes:"
print meshes
if stdOutput:
return 100 * float(len(meshes)) / len(strings)
return len(meshes)
def randomSplittingMesher(strings, attempts, display=False, stdOutput=False):
"""randomly splits string list into two lists, and then tries to mesh pairs
between the lists. for comparison purposes only, not an actual useful meshing
method."""
if display:
print "using random Splitting Mesher"
bucket1, bucket2 = [], []
length = len(strings[0])
# if splittingMethod == "left":
# splittingString = ("1" * (length/2)) + ("0" * (length/2))
# elif splittingMethod == "checkers":
# splittingString = ("10" * (length/2))
for string in strings:
s = long(string, base=2)
if random.randint(0, 1):
bucket1.append(s)
else:
bucket2.append(s)
formatstring = "{0:0" + str(length) + "b}"
# print "bucket1:"
# print [formatstring.format(item) for item in bucket1]
# print "bucket2:"
# print [formatstring.format(item) for item in bucket2]
# print "\n"
# print "bucket2: {0:08b}\n".format(bucket2)
meshes = []
for k in range(attempts):
random.shuffle(bucket1)
random.shuffle(bucket2)
# print "shuffles: {},\n{}".format(bucket1, bucket2)
dim = min(len(bucket1), len(bucket2))
if dim == 0:
break
matched = []
if dim == 1:
# print "checking {} and {}".format(bucket1[0], bucket2[0])
num1 = bucket1[0]
num2 = bucket2[0]
if num1 & num2 == 0:
matched.append(0)
for i in range(dim - 1, 0, -1):
# print "checking {} and {}".format(bucket1[i], bucket2[i])
num1 = bucket1[i]
num2 = bucket2[i]
if num1 & num2 == 0:
matched.append(i)
for x in matched:
meshes.append((bucket1[x], bucket2[x]))
for x in matched:
del bucket1[x]
del bucket2[x]
# meshes = [(num.toBinaryString(), num2.toBinaryString()) for (num, num2) in meshes]
meshes = [(formatstring.format(num), formatstring.format(num2))
for (num, num2) in meshes]
if display:
print "meshes:"
print meshes
if stdOutput:
return 100 * float(len(meshes)) / len(strings)
return len(meshes)
def greedySplittingMesher(strings, display=False, std_output=True, cutoff=None):
"""
Given a list of strings, splits that list into two lists based off
of a distance measure and then exhaustively checks pairs between
the two lists for meshes, greedily taking any it finds. Sorts the
lists in increasing order of occupancy so sparse/sparse meshes are
likely to be discovered. Can specify a cutoff probability below
which potential meshes will not be considered - this saves a lot
of time without affecting performance too much.
"""
if display:
print "using greedy splitting mesher"
length = len(strings[0]) # length of each string, e.g. 4 for '0100'
start = time.time()
new_strings = formatStrings(strings)
splt = Splitter(length)
bucket1, bucket2 = splt.split(strings=new_strings)
# print "preliminaries took {}".format(time.time()-start)
start = time.time()
meshes = []
# sorts buckets into low -> high occupancy
occupancySort(bucket1)
occupancySort(bucket2)
# print "sorting took {}".format(time.time()-start)
start = time.time()
done = greedyTraverse(meshes, bucket1=bucket1,
bucket2=bucket2, cutoff=cutoff)
# print "traversal took {}".format(time.time()-start)
if display:
print "meshes:"
print meshes
if std_output:
return 100 * float(len(meshes)) / len(strings)
else:
return len(meshes)
def doubleSplittingMesher(strings, attempts, display=False, stdOutput=False):
"""This function is temporary. I will soon merge it with splittingMesher to allow for arbitrary levels of splitting
in the same function."""
if display:
print "using double Splitting Mesher"
buckets = [[], []], [[], []]
length = len(strings[0])
numStrings = len(strings)
splittingString1 = ("1" * (length / 2)) + ("0" * (length / 2))
splittingString2 = ("10" * (length / 2))
for string in strings:
s = long(string, base=2)
diff = hamming(string, splittingString1)
diff2 = hamming(string, splittingString2)
if diff < int(length * 0.5):
id1 = 0
elif diff == int(length * 0.5):
if random.randint(0, 1):
id1 = 0
else:
id1 = 1
else:
id1 = 1
if diff2 < int(length * 0.5):
id2 = 0
elif diff == int(length * 0.5):
if random.randint(0, 1):
id2 = 0
else:
id2 = 1
else:
id2 = 1
buckets[id1][id2].append(s)
formatstring = "{0:0" + str(length) + "b}"
for layer in buckets:
for thing in layer:
print len(thing)
# print buckets
meshes = []
check1 = True
check2 = True
for k in range(attempts):
dim1 = min(len(buckets[0][0]), len(buckets[1][1]))
dim2 = min(len(buckets[0][1]), len(buckets[1][0]))
# print dim1, dim2
if dim1 == 0:
if check1:
print 'found meshes for everything in set 1, so stopped after {} attempts'.format(k)
check1 = False
else:
matched1 = []
if dim1 == 1:
num1 = buckets[0][0][0]
num2 = buckets[1][1][0]
if num1 & num2 == 0:
matched1.append(0)
for i in range(dim1 - 1, 0, -1):
num1 = buckets[0][0][i]
num2 = buckets[1][1][i]
if num1 & num2 == 0:
matched1.append(i)
for x in matched1:
meshes.append((buckets[0][0][x], buckets[1][1][x]))
for x in matched1:
del buckets[0][0][x]
del buckets[1][1][x]
if dim2 == 0:
if check2:
print 'found meshes for everything in set 2, so stopped after {} attempts'.format(k)
check2 = False
else:
matched2 = []
if dim2 == 1:
num1 = buckets[0][1][0]
num2 = buckets[1][0][0]
if num1 & num2 == 0:
matched2.append(0)
for i in range(dim2 - 1, 0, -1):
num1 = buckets[0][1][i]
num2 = buckets[1][0][i]
if num1 & num2 == 0:
matched2.append(i)
for x in matched2:
meshes.append((buckets[0][1][x], buckets[1][0][x]))
for x in matched2:
del buckets[0][1][x]
del buckets[1][0][x]
meshes = [(formatstring.format(num), formatstring.format(num2))
for (num, num2) in meshes]
if display:
print "meshes:"
print meshes
if stdOutput:
return 100 * float(len(meshes)) / len(strings)
return len(meshes)
def maxMatchingMesher(strings, stdOutput=False):
"""Converts the string set into a meshing graph and finds the maximum matching on said graph."""
graph = makeGraph(strings)
meshes = len(nx.max_weight_matching(graph)) / 2
if stdOutput:
return 100 * float(meshes) / len(strings)
return meshes
def color_counter(graph):
"""interprets a coloring on a graph as a meshing."""
color = nx.greedy_color(graph)
i = 0
for key, value in color.iteritems():
i = max(i, value)
return i + 1
def optimalMesher(strings, stdOutput=False):
"""Converts the string set into a meshing graph and finds a greedy coloring on the complement of said graph."""
graph = makeGraph(strings)
graph_c = nx.complement(graph)
meshes = len(strings) - color_counter(graph_c)
if stdOutput:
return 100 * float(meshes) / len(strings)
return meshes
def mesherRetrieve(identifier):
fetcher = {"simple": (simpleMesher),
"dumb": (randomMesher),
"greedy": (greedyMesher),
"split": (splittingMesher),
"greedysplit": (greedySplittingMesher),
"doubsplit": (doubleSplittingMesher),
"randsplit": (randomSplittingMesher),
"maxmatch": (maxMatchingMesher),
"color": (optimalMesher)
}
return fetcher[identifier]
if __name__ == '__main__':
# print splittingMesher(["00000001", "11111110", "11100000", "00000111"], 10, display = True)
# print splitter([(("1" * 16),0)], 16)
# print greedySplittingMesher(["00000001", "11111110", "11100000", "00000111"], display = True, stdOutput = False)
# meshes = []
# strings = formatStrings(["00000001", "11111110", "11100000", "00000111"])
# simpleGreedyTraverse(meshes, strings)
# print meshes, strings
print greedyMesher(["00000001", "11111110", "11100000", "00000111"])
```
#### File: Mesh/theory/mesh_util.py
```python
import math
import operator
from itertools import izip, imap
import random
from scipy.misc import comb as fast_nCr
from scipy.special import gamma
def formatStrings(strings):
"""Adds extra data to a list of strings for ease of meshing. Replaces each
string in the list with a tuple (A,B,C,D). A = original string. B = binary
representation for fast arithmetic. C = occupancy. D = flag that indicates
whether the string has been meshed(initially set to False)."""
new_strings = []
for string in strings:
#new_strings.append((string, long(string, base=2)))
new_strings.append(
(string, long(string, base=2), string.count("1"), False))
return new_strings
def hamming(str1, str2):
"""Calculates the Hamming distance between two strings of equal length."""
# if type(str1) == long:
# str1 = bin(str1)[2:].rjust(len(str2),"0")
assert len(str1) == len(str2)
ne = operator.ne
return sum(imap(ne, str1, str2))
def fast_q(length, occ1, occ2):
"""computes the probability that two strings with given occupancies will
mesh."""
result = float((fast_nCr(length - occ2, occ1))) / (fast_nCr(length, occ1))
return result
def faster_q(length, occ1, occ2):
numerator = 1
for i in range(length - occ1, length - occ1 - occ2, -1):
print(i)
numerator *= i
denominator = 1
for i in range(length, length - occ2, -1):
denominator *= i
return float(numerator) / float(denominator)
def generate_cutoffs(bkt1, length, cutoff):
"""returns a dict indexed by string occupancy, value is the cutoff occupancy
for potential meshes (if you encounter a higher occupancy during a greedy
search for a mesh, stop)."""
cutoffs = {}
for s in bkt1:
occ1 = s[2]
if occ1 not in cutoffs.keys():
cutoffs[occ1] = float('inf')
# only calculate cutoffs for every 5th occupancy, to save time
for occ2 in range(0, int(length / 2), 5):
if faster_q(length, occ1, occ2) < cutoff:
cutoffs[occ1] = occ2
break
return cutoffs
class Splitter(object):
"""
Encapsulates splitting behavior for a trial.
Keeps track of multiple different splitting strings and can
automatically cycle through them if required.
"""
def __init__(self, length):
self.length = length
self.splitting_strings = []
self.num_splitters = int(math.log(length, 2))
# print self.num_splitters
for i in range(1, self.num_splitters + 1):
split_string = ""
for j in range(2**(i - 1)):
split_string = split_string + \
(("1" * int((length / (2**i)))) +
("0" * (int(length / (2**i)))))
self.splitting_strings.append(split_string)
# print self.splitting_strings
print 'Splitter(%d): %d splitters with strings: %s' % \
(length, self.num_splitters, self.splitting_strings)
self.current_method = 0
def _splitter(self, strings, advance):
"""splits the given string set based on the current splitting string.
optionally advances to the next splitting string for future splittings."""
split = self.splitting_strings[self.current_method]
if advance:
self.current_method = self.current_method + 1
bucket1 = []
bucket2 = []
for s in strings:
diff = hamming(s[0], split)
if diff < int(self.length * 0.5):
bucket1.append(s)
elif diff == int(self.length * 0.5):
if random.randint(0, 1):
bucket1.append(s)
else:
bucket2.append(s)
else:
bucket2.append(s)
return bucket1, bucket2
def split(self, strings=[], bucket1=[], bucket2=[], advance=True):
"""the outward-facing method for splitting. gracefully handles both
a single string set and a """
# print 'trying to split. current method is {}'.format(self.current_method)
if strings == [] and bucket1 == [] and bucket2 == []:
raise Exception('must provide split method with nonempty input')
if strings != []:
return self._splitter(strings, advance)
else:
if self.current_method >= self.num_splitters:
return bucket1, bucket2
else:
return self._splitter(bucket1 + bucket2, advance)
def advance(self):
self.current_method = self.current_method + 1
def occupancySort(strings):
"""Modifies given list of strings in place, sorting them in order of
increasing occupancy."""
# strings.sort(key = lambda x: x[0].count("1"))
strings.sort(key=lambda x: x[2])
def simple_traverse(meshes, strings, dim=0):
"""probes a list of strings for meshable pairs. the first string is checked
against the second, third/fourth, etc. mesh and unmeshed string lists are
modified in place. returns True if all strings have been meshed; else returns
False."""
# print 'here are the strings passed to simple_traverse', strings
# print 'and dim is', dim
matched = []
for i in range(len(strings) - 2, -1 + dim, -2):
num1 = strings[i][1]
num2 = strings[i + 1][1]
# print num1, num2
if num1 & num2 == 0:
matched.append(i)
meshes.append((strings[i], strings[i + 1]))
# meshes.append(strings[i+1])
# print "adding mesh {}, {}".format(strings[i], strings[i+1])
for x in matched:
del strings[x + 1]
del strings[x]
if len(strings) == 0:
return True
return False
def traverse(meshes, bucket1=None, bucket2=None, strings=None, extra=False):
"""looks for meshable pairs between the buckets. modifies the buckets and
the list of found meshes in place. returns whether or not meshing is done.
throws an assertion error if only one bucket has anything in it, so the
caller can resplit the buckets or whatever."""
if strings != None:
# print 'found strings'
return simple_traverse(strings, meshes)
if bucket1 == None or bucket2 == None:
raise Exception(
'must pass either buckets or string set to traverse function')
dim = min(len(bucket1), len(bucket2))
if len(bucket1) == len(bucket2) == 0:
return True
assert dim != 0
matched = []
if dim == 1:
num1 = bucket1[0][1]
num2 = bucket2[0][1]
if num1 & num2 == 0:
matched.append(0)
for i in range(dim - 1, 0, -1):
num1 = bucket1[i][1]
num2 = bucket2[i][1]
if num1 & num2 == 0:
matched.append(i)
for x in matched:
meshes.append((bucket1[x], bucket2[x]))
# if one bucket is larger than the other, mesh remaining strings among themselves
if extra:
# print 'extra'
if len(bucket1) != len(bucket2):
# print bucket1, bucket2
# print 'chosing one'
bucket = max([bucket1, bucket2], key=lambda x: len(x))
# print '{} chosen'.format(bucket)
simple_traverse(meshes, bucket, dim)
# print bucket
for x in matched:
del bucket1[x]
del bucket2[x]
return False
def simpleGreedyTraverse(meshes, strings, cutoff=None):
"""given a list of strings, exhaustively checks the first string for meshes,
then the second, etc. found meshes are removed from the list. ends when all
pairs of remaining strings have been checked. returns whether or not all
strings have been meshed."""
length = len(strings)
strlength = len(strings[0][0])
# matched = []
if cutoff:
cutoffs = generate_cutoffs(strings, strlength, cutoff)
for i in range(length):
# if the current string has already been meshed, skip it
if strings[i][3]:
continue
if cutoff:
current_cutoff = cutoffs[strings[i][2]]
for j in range(i + 1, length):
# if current string has already been meshed, skip it
if strings[j][3]:
continue
if cutoff and strings[j][2] >= current_cutoff:
break
# if i not in matched and j not in matched: (should be unnecessary now, test soon)
if not strings[i][3] and not strings[j][3]:
num1 = strings[i][1]
num2 = strings[j][1]
if num1 & num2 == 0:
# matched.append(i)
# matched.append(j)
strings[i] = (strings[i][0], strings[i]
[1], strings[i][2], True)
strings[j] = (strings[j][0], strings[j]
[1], strings[j][2], True)
meshes.append((strings[i], strings[j]))
break
for string1, string2 in meshes:
strings.remove(string1)
strings.remove(string2)
if len(strings) == 0:
return True
return False
def greedyTraverse(meshes, bucket1=None, bucket2=None, strings=None, cutoff=None):
"""
Looks for meshable pairs between the buckets greedily (looks
first at all potential meshes with the first string in bucket1 and
anything in bucket 2, then the second string in bucket 2 with
everything in bucket 2, etc. adds found pairs to meshes in
place. returns whether or not all strings have been meshed.
"""
# if only one string list is supplied, search it exhaustively for
# pairs using a simpler function
if strings != None:
return simpleGreedyTraverse(meshes, strings, cutoff)
if bucket1 == None or bucket2 == None:
raise Exception(
'must pass either buckets or string set to traverse function')
strlength = len(bucket1[0][0])
len1, len2 = len(bucket1), len(bucket2)
assert len1 != 0 and len2 != 0
if cutoff:
cutoffs = generate_cutoffs(bucket1, strlength, cutoff)
for i in range(len1):
if cutoff:
bkt1cutoff = cutoffs[bucket1[i][2]]
for j in range(len2):
# notice when (due to occupancy ordering) there is little hope of finding more meshes
# for the ith string in bucket 1
if cutoff and bucket2[j][2] >= bkt1cutoff:
# print "doing a break!"
break
if not bucket1[i][3] and not bucket2[j][3]:
num1 = bucket1[i][1]
num2 = bucket2[j][1]
if num1 & num2 == 0:
bucket1[i] = (bucket1[i][0], bucket1[i]
[1], bucket1[i][2], True)
bucket2[j] = (bucket2[j][0], bucket2[j]
[1], bucket2[j][2], True)
meshes.append((bucket1[i], bucket2[j]))
for string1, string2 in meshes:
# print "removing {} from bucket1 and {} from bucket2".format(string1, string2)
bucket1.remove(string1)
bucket2.remove(string2)
if len(bucket1) == len(bucket2) == 0:
return True
return False
if __name__ == '__main__':
bkt1 = formatStrings([("11100000"), ("11111000")])
bkt2 = formatStrings([("00011111"), ("00000111")])
meshes = []
greedyTraverse(meshes, bucket1=bkt1, bucket2=bkt2, cutoff=None)
# occupancySort(bkt1)
print bkt1, bkt2, meshes
# print fast_q(64, 25,13)
# print generate_cutoffs(bkt1, 8)
# print generate_cutoffs(bkt2, 8)
```
#### File: Mesh/theory/test.py
```python
from __future__ import division
import logging
import math
from choose import nCr
import numpy as np
from scipy.misc import comb
import createRandomString as c
import meshers
import time
import random
import functools
import json
import pickle
import os
from mesh_util import occupancySort, formatStrings, fast_q
from createRandomString import createIndependentRandomStrings
#logging.getLogger('').handlers = []
#logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
#logging.debug('This is a log message.')
#logging.info('test')
#logging.warning('double test')
#
#strings = createIndependentRandomStrings(4,10,numOnes = 2)
#new_strings = []
#for string in strings:
# new_strings.append((string, long(string, base=2)))
#print new_strings
#print "\n \n \n"
##occupancySort(strings)
#new_strings.sort(key = lambda x: x[0].count("1"))
#print new_strings
strings = createIndependentRandomStrings(256, 10000, numOnes = 5)
strings = formatStrings(strings)
occs = [x[2] for x in strings]
print np.mean(occs)
print np.std(occs)
def faster_q(length, occ1, occ2):
numerator = 1
for i in range(length-occ1, length-occ1-occ2, -1):
numerator *= i
denominator = 1
for i in range(length, length-occ2, -1):
denominator *= i
return float(numerator)/float(denominator)
length = 128
start = time.time()
for occ1 in range(0,50):
for occ2 in range(0,50):
result1 = fast_q(length, occ1, occ2)
t1 = time.time() - start
start = time.time()
for occ1 in range(0,50):
for occ2 in range(0,50):
result2 = faster_q(length, occ1, occ2)
t2 = time.time()-start
print 'fast_q got {} in {} ms'.format(result1, t1)
print 'faster_q got {} in {} ms'.format(result2, t2)
``` |
{
"source": "johnteslade/azulejo",
"score": 3
} |
#### File: azulejo/azulejo/arrange_base.py
```python
import logging
from .geometry import Geometry
class ArrangeBase(object):
""" A base class for defining an action to arrange window(s) """
def __init__(self, screen_in):
""" Initialiser """
self._screen = screen_in # Main screen object
#variable to hold the amount of windows since the last arrangement
self.arrangement_size = 0
def do(self, params):
""" Main function that performs the arrangement """
raise NotImplementedError
@staticmethod
def parse_simple_math_expressions(expression, subst_vars):
""" Parses the string expression and evaluates it """
expression = str(expression)
for subst in subst_vars.keys():
expression = expression.replace(subst, str(subst_vars[subst]))
return int(eval(expression))
def create_geometry(self, geometry_in, monitor):
""" Creates the geometry for the config input """
monitor_geometry = self._screen.get_monitor_geometry(monitor)
# Parse the string values coming in
geometry_out_list = [
self.parse_simple_math_expressions(
coord,
{'w': monitor_geometry.width, 'h': monitor_geometry.height}
)
for coord in geometry_in]
# Create final geometry (account for the x and y of the monitor)
geometry_out = Geometry(
x=geometry_out_list[0] + monitor_geometry.x,
y=geometry_out_list[1] + monitor_geometry.y,
width=geometry_out_list[2],
height=geometry_out_list[3]
)
logging.debug("Possible geometry = {}".format(geometry_out))
return geometry_out
def get_possible_positions(self, positions, monitor=None):
""" Function to create all possible window positions """
return [self.create_geometry(pos, monitor) for pos in positions]
```
#### File: azulejo/azulejo/arrange_multiple_windows.py
```python
from .arrange_base import ArrangeBase
class ArrangeMultipleWindows(ArrangeBase):
""" Class to arrange multiple windows """
def do(self, arrangement):
""" Main function that performs the arrangement """
self._screen.move_windows(
self.get_possible_positions(arrangement)
)
```
#### File: azulejo/azulejo/arrange_rotate.py
```python
from collections import deque
from .arrange_base import ArrangeBase
class ArrangeRotate(ArrangeBase):
""" Class to rotate through windows """
def do(self, dummy):
""" Main function that performs the arrangement """
windows = self._screen.get_all_windows()
amount_of_windows = len(windows)
if amount_of_windows > self.arrangement_size:
windows = windows[:self.arrangement_size]
geos = []
for window in windows:
window_geo = window.get_geometry()
window_geo = window_geo[:4]
geos.append(window_geo)
# do the actual rotations, lets use deque as it's dramatically more
# efficient than a trivial shift implementation
windows_deq = deque(windows)
windows_deq.rotate(1)
rotation_len = len(windows_deq)
i = 0
while i < rotation_len:
geometry_list_args = [0, 255]
index = rotation_len - (i + 1) #again, start by the tail
geometry_list_args.extend([int(x) for x in geos[index]])
windows_deq[index].unmaximize()
windows_deq[index].set_geometry(*geometry_list_args)
i += 1
#(windows_deq[0]).activate(int(time.time()))
#not sure why it doesn't work. if uncommented causes other windows
# beyond the rotated ones to hide behind current ones even after
# pressing ctrl+tab
```
#### File: azulejo/test/key_binder.py
```python
class KeyBinderDummy(object):
"""Class used to allow keybindings to be caught and to be actioned."""
def __init__(self):
self.bindings = []
self.saved_obj = None
def bind(self, action, dispatcher, dispatcher_params):
""" Bind a key press """
self.bindings.append({
'action': action,
'dispatcher': dispatcher,
'dispatcher_params': dispatcher_params,
})
def action_key(self, action):
""" Actions a key press by calling the relavent dispatcher """
key_found = [x for x in self.bindings if x['action'] == action]
assert len(key_found) == 1
func = key_found[0]['dispatcher']
func(key_found[0]['dispatcher_params'])
```
#### File: azulejo/test/screen_mock_base.py
```python
class ScreenMockBase(object):
""" Base mock object for the screen """
monitor_geometry = []
windows = []
def get_all_windows(self):
""" Gets all windows in the screen """
return self.windows
def get_monitor_geometry(self, monitor=None):
"""Returns a rectangle with geometry of the specified monitor.
If no monitor uses one with active window.
"""
if monitor == None:
monitor = self.get_active_window_monitor()
return self.monitor_geometry[monitor]
def get_active_window(self):
""" Returns the active window """
active_windows = [x for x in self.windows if x['active'] == True]
assert len(active_windows) == 1
return active_windows[0]
def get_active_window_monitor(self):
""" Returns the monitor of the currently active window """
for x in range(len(self.monitor_geometry)):
monitor = self.monitor_geometry[x]
window = self.get_active_window()['geometry']
if (window.x >= monitor.x) \
and (window.x < monitor.x + monitor.width) \
and (window.y >= monitor.y) \
and (window.y < monitor.y + monitor.height):
return x
# If we get here then we have a mismatch between windows and monitors
raise RuntimeError
def get_active_window_geometry(self):
""" Returns the geometry of the current active window """
return self.get_active_window()['geometry']
def move_active_window(self, new_geometry):
""" Moves the active window the specified geometry """
for x in range(len(self.windows)):
if self.windows[x]['active']:
self.windows[x]['geometry'] = new_geometry
break
def move_windows(self, new_geometry_list):
""" Moves the active window the specified geometry """
for x in range(len(new_geometry_list)):
self.windows[x]['geometry'] = new_geometry_list[x]
def maximise_active_window(self):
""" Maximises the active window """
monitor_size = self.get_monitor_geometry(
self.get_active_window_monitor())
self.move_active_window(monitor_size)
def get_number_monitors(self):
""" Returns the number of monitors in use """
return len(self.monitor_geometry)
def update(self):
""" Forces and update """
# Nothing to do in the mock
pass
``` |
{
"source": "johntfoster/IGA",
"score": 3
} |
#### File: johntfoster/IGA/IGA_PD_example.py
```python
import IGA
import numpy as np
import matplotlib.pyplot as plt
#get_ipython().magic(u'matplotlib inline')
# In[2]:
def run_case_1(num_knots, order, delta, norm, quad_degree=10):
h = 1.0 / num_knots
if delta > h:
num_boundary_elements = np.ceil(delta / h)
else:
num_boundary_elements = 1
omega_p1 = np.linspace(-delta, 0, num=(num_boundary_elements + 1))
omega = np.linspace(0, 1, num=(num_knots+1))
omega_p2 = np.linspace(1, 1 + delta, num=(num_boundary_elements + 1))
knot_vector = np.r_[-delta * np.ones(order), omega_p1[:-1], omega[:-1], omega_p2, np.ones(order) * (1 + delta)]
iga = IGA.PD1D(knot_vector, order, delta)
iga.degree = quad_degree
u = lambda x: x * (1 - x)
b = lambda x: np.ones(x.shape[0])
iga.compute_solutions(u, b, num_boundary_elements)
return iga.compute_error(norm=norm)
# In[ ]:
dofs = np.array([100,700])
errs = [ run_case_1(num_knots, order=1, delta=0.25, norm=2, quad_degree=4) for num_knots in dofs ]
# In[ ]:
# In[ ]:
#Fit a straight line
coefs = np.polyfit(np.log10(1.0 / dofs), np.log10(errs), 1)
y = 10 ** (coefs[0] * np.log10(1.0 / dofs) + coefs[1])
#Plot
plt.loglog(1.0 / dofs, y, 'b-')
plt.loglog(1.0 / dofs, errs, 'b^')
plt.xlabel("$\log_{10} h$")
plt.ylabel("$\log_{10} \Vert Error \Vert_{L_2}$");
```
#### File: johntfoster/IGA/IGA.py
```python
import sys
import numpy as np
from scipy.special import legendre
import scipy.sparse
from scipy.sparse.linalg import spsolve
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
class Bspline(object):
"""
Numpy implementation of Cox - de Boor algorithm in 1D
inputs:
knot_vector: Python list or Numpy array containing knot vector
entries
order: Order of interpolation, e.g. 0 -> piecewise constant between
knots, 1 -> piecewise linear between knots, etc.
outputs:
basis object that is callable to evaluate basis functions at given
values of knot span
"""
def __init__(self, knot_vector, order):
"""Initialize attributes"""
self.knot_vector = np.array(knot_vector)
self.p = order
def __basis0(self, xi):
"""Order zero basis"""
cond1 = np.array(self.knot_vector[:-1]) <= xi[:, None]
cond2 = xi[:, None] < np.array(self.knot_vector[1:])
return np.where(cond1 & cond2, 1.0, 0.0)
def __basis(self, xi, p, compute_derivatives=False):
"""
Recursive Cox - de Boor function to compute basis functions and
optionally their derivatives.
"""
if p == 0:
return self.__basis0(xi)
else:
basis_p_minus_1 = self.__basis(xi, p - 1)
first_term_numerator = xi[:, np.newaxis] - self.knot_vector[:-p]
first_term_denominator = self.knot_vector[p:] - self.knot_vector[:-p]
second_term_numerator = self.knot_vector[(p + 1):] - xi[:, np.newaxis]
second_term_denominator = (self.knot_vector[(p + 1):] -
self.knot_vector[1:-p])
#Change numerator in last recursion if derivatives are desired
if compute_derivatives and p == self.p:
first_term_numerator = np.ones((len(xi),
len(first_term_denominator))) * p
second_term_numerator = np.ones((len(xi),
len(second_term_denominator))) * -p
#Disable divide by zero error because we check for it
with np.errstate(divide='ignore', invalid='ignore'):
first_term = np.where(first_term_denominator != 0.0,
(first_term_numerator /
first_term_denominator), 0.0)
second_term = np.where(second_term_denominator != 0.0,
(second_term_numerator /
second_term_denominator), 0.0)
return (first_term[:,:-1] * basis_p_minus_1[:,:-1] +
second_term * basis_p_minus_1[:,1:])
def __call__(self, xi):
"""
Convenience function to make the object callable.
"""
return self.__basis(xi, self.p, compute_derivatives=False)
def d(self, xi):
"""
Convenience function to compute derivate of basis functions.
"""
return self.__basis(xi, self.p, compute_derivatives=True)
def plot(self):
"""
Convenience function to plot basis functions over full
range of knots.
"""
x_min = np.min(self.knot_vector)
x_max = np.max(self.knot_vector)
x = np.linspace(x_min, x_max, num=1000, endpoint=False)
N = self(x).T
for n in N:
plt.plot(x,n)
return plt.show()
def dplot(self):
"""
Convenience function to plot derivatives of basis functions over
full range of knots.
"""
x_min = np.min(self.knot_vector)
x_max = np.max(self.knot_vector)
x = np.linspace(x_min, x_max, num=1000, endpoint=False)
N = self.d(x).T
for n in N:
plt.plot(x,n)
return plt.show()
class NURBS_2D_Shape_Functions(Bspline):
def __init__(self, knot_vector_1, p_1, knot_vector_2, p_2, weights):
self.N = Bspline(knot_vector_1, p_1)
self.M = Bspline(knot_vector_2, p_2)
self.weights = weights
def __call__(self, xi, eta, derivative=None):
numerator = (np.einsum('...i,...j', self.M(eta), self.N(xi)) *
self.weights)
W = np.einsum('...i,...j,ij', self.M(eta), self.N(xi), self.weights)
R = numerator / W[:, None, None]
if derivative == 'xi':
dW = np.einsum('...i,...j,ij', self.M(eta), self.N.d(xi), self.weights)
R = (np.einsum('...i,...j', self.M(eta), self.N.d(xi)) * self.weights
+ dW[:, None, None] * R) / W[:, None, None]
if derivative == 'eta':
dW = np.einsum('...i,...j,ij', self.M.d(eta), self.N(xi), self.weights)
R = (np.einsum('...i,...j', self.M.d(eta), self.N(xi)) * self.weights
+ dW[:, None, None] * R) / W[:, None, None]
return R
def d_xi(self, xi, eta):
return self.__call__(xi, eta, derivative='xi')
def d_eta(self, xi, eta):
return self.__call__(xi, eta, derivative='eta')
def plot(self, shape_function_number=0, derivative=None):
xi_min = np.min(self.N.knot_vector)
xi_max = np.max(self.N.knot_vector)
eta_min = np.min(self.M.knot_vector)
eta_max = np.max(self.M.knot_vector)
xi = np.linspace(xi_min, xi_max, num=50, endpoint=False)
eta = np.linspace(eta_min, eta_max, num=50, endpoint=False)
x, y = np.meshgrid(xi, eta)
basis = self(x.flatten(), y.flatten(), derivative)
z = [basis[:,i,j].reshape(x.shape) for i in range(basis.shape[1]) for j in range(basis.shape[2])]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
surf = ax.plot_surface(x, y, z[shape_function_number], rstride=1,
cstride=1, cmap=cm.coolwarm, linewidth=0,
antialiased=False)
fig.colorbar(surf, shrink=0.5, aspect=5)
class IGA2D(NURBS_2D_Shape_Functions):
def __init__(self, knot_vector_1, p_1, knot_vector_2, p_2,
control_points, weights):
self.R = NURBS_2D_Shape_Functions(knot_vector_1, p_1,
knot_vector_2, p_2,
weights)
self.x = control_points[:,:,0].flatten()
self.y = control_points[:,:,1].flatten()
self.num_of_basis_functions_1 = (self.R.N.knot_vector.shape[0] -
self.R.N.p - 1)
self.num_of_basis_functions_2 = (self.R.M.knot_vector.shape[0] -
self.R.M.p - 1)
self.num_of_global_basis_functions = (self.num_of_basis_functions_1 *
self.num_of_basis_functions_2)
self.num_of_elements = ((self.num_of_basis_functions_1 - self.R.N.p) *
(self.num_of_basis_functions_2 - self.R.M.p))
self.K = np.zeros((self.num_of_global_basis_functions,
self.num_of_global_basis_functions))
self.F = np.zeros(self.num_of_global_basis_functions)
self.nurbs_coords = self.__build_nurbs_coord_array()
self.connectivity_array = self.__build_connectivity_array()
def __build_nurbs_coord_array(self):
"""
Builds an array of coordinates in index space where each global basis
function begins, the position in the array is the global basis id
"""
#Index arrays in each basis direction
i_arr = np.arange(self.num_of_basis_functions_1, dtype=np.int)
j_arr = np.arange(self.num_of_basis_functions_2, dtype=np.int)
#Construct the coordinate array
return np.array([ (i, j) for j in j_arr for i in i_arr ], dtype=np.int)
def __build_connectivity_array(self):
"""
Builds an array that relates the local basis function #'s for each
element to the global basis function #'s. Ordering starts with the
lower-left hand corner of an element in index space and moves
backwards starting in the xi direction, followed by the eta
direction row-by-row
"""
#The total # of basis functions
number_of_basis_functions = self.num_of_global_basis_functions
#The global basis function id's
global_basis_ids = np.arange(number_of_basis_functions, dtype=np.int)
#Here we reshape the array to mimic the layout in basis index space,
#this makes finding the "lower left-hand" corner easier
global_basis_ids.shape = (self.num_of_basis_functions_2,
self.num_of_basis_functions_1)
#i_arr and j_arr are convenience indices for iterating through the
#basis functions to determine the "lower left-hand" corner of the
#elements. This procedure accounts for elements of zero measure due
#to possibly open knot vectors.
i_arr = np.arange(self.R.N.p, self.num_of_basis_functions_1, dtype=np.int)
j_arr = np.arange(self.R.M.p, self.num_of_basis_functions_2, dtype=np.int)
#Array of element corner indices pairs, i.e. (i,j)
elem_corner = [(i,j) for j in j_arr for i in i_arr]
#Constructs the connectivity array. This does a slice from the element
#corner location (i,j), backwards by p_1 in the \xi direction and p_2
#in the \eta direction to get all basis functions that have support on
#each element. The it flatten's the matrix to make an array and reverses
#the order with the [::-1] to be consistent with the convention that
#the arrays start with the corner basis id and move backwards in \xi
#and \eta. Excuse the
return np.array([(global_basis_ids[(j-self.R.M.p):(j+1),(i-self.R.N.p):(i+1)].flatten())[::-1]
for i,j in elem_corner])
def __compute_element_stiffness(self):
"""
Computes the element stiffness matrix
"""
con = self.connectivity_array
number_of_basis_functions = self.num_of_global_basis_functions
number_of_elements = self.num_of_elements
#The knot indices cooresponding to the nurbs coordinates
#where elements begin
ni = self.nurbs_coords[con[:,0],0]
nj = self.nurbs_coords[con[:,0],1]
#Compute the Gauss quadrature points to integrate each shape function
#to full order
xi_, wt_xi_ = np.polynomial.legendre.leggauss(self.R.N.p + 1)
eta_, wt_eta_ = np.polynomial.legendre.leggauss(self.R.M.p + 1)
#Create all the quadrature point tuples
xi, eta = np.meshgrid(xi_, eta_)
wt_xi, wt_eta = np.meshgrid(wt_xi_, wt_eta_)
#Flatten arrays containing quadrature points and weights
xi = xi.flatten()
eta = eta.flatten()
wt_xi = wt_xi.flatten()
wt_eta = wt_eta.flatten()
#Takes Gauss integration points into parameter space, has structure
#xi_1 -> xi_1_el1, xi_1_el2, xi_1_el3, ...
#xi_2 -> xi_2_el1, xi_2_el2, xi_2_el3, ...
#flattened into one long array
xi = (((self.R.N.knot_vector[ni+1] - self.R.N.knot_vector[ni]) * xi[:, np.newaxis] +
(self.R.N.knot_vector[ni+1] + self.R.N.knot_vector[ni])) / 2.0).flatten()
eta = (((self.R.M.knot_vector[nj+1] - self.R.M.knot_vector[nj]) * eta[:, np.newaxis] +
(self.R.M.knot_vector[nj+1] + self.R.M.knot_vector[nj])) / 2.0).flatten()
#Evaluate basis functions. 1st axis is the # of Gauss integration points, 2nd
#axis is # of elements, 3rd is values of shape functions
dRdxi = self.R.d_xi(xi, eta).reshape(-1, number_of_elements, number_of_basis_functions)
dRdeta = self.R.d_eta(xi, eta).reshape(-1, number_of_elements, number_of_basis_functions)
#Store only the shape function values with support on an element
#shape=(# Gauss points, # of elements, # of nonzero values of shape functions)
dRdxi = dRdxi[:, np.arange(con.shape[0])[:, np.newaxis], con]
dRdeta = dRdeta[:, np.arange(con.shape[0])[:, np.newaxis], con]
#These are dot products, x = x_i . R_i, broadcast to every integration point
#shape = (# Gauss points, # of elements)
J11 = np.sum(self.x[con] * dRdxi, axis=2)
J12 = np.sum(self.y[con] * dRdxi, axis=2)
J21 = np.sum(self.x[con] * dRdeta, axis=2)
J22 = np.sum(self.y[con] * dRdeta, axis=2)
#Compute the determinate of J and inverse
detJ = J11 * J22 - J12 * J21
Jinv11 = J22 / detJ
Jinv12 = -J12 / detJ
Jinv21 = -J21 / detJ
Jinv22 = J11 / detJ
#Gradient of mapping between Gauss coords and parametric coords
dxidxi = (self.R.N.knot_vector[ni+1] - self.R.N.knot_vector[ni]) / 2.0
detadeta = (self.R.M.knot_vector[nj+1] - self.R.M.knot_vector[nj]) / 2.0
#Jacobian determinate of mapping from physical to Gauss coords.
#Uses the fact that det(A*B) = det(A) * deta(B) and
#det(B) is product along diagonal for a diagonal matrix
#
#Also multiply the quadrature weights in at this point
detJ = detJ * dxidxi * detadeta * wt_xi[:, None] * wt_eta[:, None]
#The shape functions in physical coordinates
self.dRdx = (dRdxi * Jinv11[:, None, np.arange(Jinv11.shape[0])] +
dRdeta * Jinv12[:, None, np.arange(Jinv12.shape[0])])
self.dRdy = (dRdxi * Jinv21[:, None, np.arange(Jinv21.shape[0])] +
dRdeta * Jinv22[:, None, np.arange(Jinv22.shape[0])])
#The element stiffness matrices.
return np.sum((np.einsum('...i,...j', self.dRdx, self.dRdx) +
np.einsum('...i,...j', self.dRdy, self.dRdy)) *
detJ[:,:,None,None], axis=0)
def assemble(self):
ke = self.__compute_element_stiffness()
for i in range(self.num_of_elements):
idx_grid = np.ix_(self.connectivity_array[i],
self.connectivity_array[i])
self.K[idx_grid] += ke[i]
def apply_bcs(self, basis_ids, values):
row_replace = np.zeros(self.num_of_global_basis_functions)
for value_idx, basis_id in enumerate(basis_ids):
self.K[basis_id] = row_replace
self.K[basis_id, basis_id] = 1
self.F[basis_id] = values[value_idx]
def solve(self):
self.K = scipy.sparse.csr_matrix(self.K)
self.solution = spsolve(self.K, self.F)
def get_solution(self):
return self.solution
def plot_solution(self):
xi_min = np.min(self.R.N.knot_vector)
xi_max = np.max(self.R.N.knot_vector)
eta_min = np.min(self.R.M.knot_vector)
eta_max = np.max(self.R.M.knot_vector)
xi = np.linspace(xi_min, xi_max, num=50, endpoint=False)
eta = np.linspace(eta_min, eta_max, num=50, endpoint=False)
x, y = np.meshgrid(xi, eta)
basis = self.R(x.flatten(), y.flatten())
z = np.einsum('...ij,ij', basis,
self.solution.reshape(basis.shape[1:])).reshape(x.shape)
plot = plt.contourf(x, y, z, cmap="coolwarm")
plt.colorbar(plot, orientation='horizontal', shrink=0.6);
plt.clim(0,100)
plt.axes().set_aspect('equal')
def plot_knot_mesh(self):
xi_min = np.min(self.R.N.knot_vector)
xi_max = np.max(self.R.N.knot_vector)
eta_min = np.min(self.R.M.knot_vector)
eta_max = np.max(self.R.M.knot_vector)
xi = np.linspace(xi_min, xi_max, num=50, endpoint=False)
eta = np.linspace(eta_min, eta_max, num=50, endpoint=False)
for knot in self.R.M.knot_vector:
knot_array = np.ones_like(eta) * knot
basis = self.R(knot_array, eta)
line_x = np.einsum('...ij,ij', basis, self.x.reshape(basis.shape[1:])).flatten()
line_y = np.einsum('...ij,ij', basis, self.y.reshape(basis.shape[1:])).flatten()
class PD1D(Bspline):
def __init__(self, knot_vector, p, delta):
"""
Initializes 1D isogeometric peridynamics problem
"""
self.degree = 10
self.delta = delta
self.N = Bspline(knot_vector, p)
self.num_of_basis_functions = (self.N.knot_vector.shape[0] -
self.N.p - 1)
def build_connectivity_array(self):
i_arr = (np.unique(self.N.knot_vector,
return_counts=True)[1]).cumsum()[:-1] - 1
index_arr = np.arange(self.N(np.array([0.0])).shape[0], dtype=np.int)
return np.array([ index_arr[i:(i + self.N.p + 1)] for i in i_arr])
def __compute_stiffness(self):
"""
Computes the full stiffness matrix with `degree` integration points
"""
#Ensure even number of quadrature points are used
try:
if self.degree % 2 != 0:
raise ValueError("'degree' must be even to avoid singular kernel evaluation during quadrature.")
except ValueError as msg:
print(msg)
return
#Generate quadrature points
xi, wts = np.polynomial.legendre.leggauss(self.degree)
#Determine upper and lower bounds for quadrature on each element
b = self.N.knot_vector[(self.N.p + 2):-(self.N.p + 1), None]
a = self.N.knot_vector[(self.N.p + 1):-(self.N.p + 2), None]
#The integration points in parameter space
x = (((b - a) * xi + b + a) / 2.0).ravel()
#The total number on integration points over the `elements`, i.e. not
#over the horizons
num_elem_quad_points = x.shape[0]
#Evaluate the shape functions at x
Nx = self.N(x).reshape(num_elem_quad_points, -1)
#The upper and lower bounds of integration over each family
d = x[:,None] + self.delta
c = x[:,None] - self.delta
#The integration points for each horizon in parameter space
y = (((d - c) * xi + d + c) / 2.0)
#Evaluation shape functions at each y
Ny = self.N(y.ravel()).reshape(num_elem_quad_points, xi.shape[0], -1)
#The total number of global shape functions
num_global_sf = Nx.shape[1]
#Evaluate the "inner" integral over y
inner = ((d - c) / 2 * np.sum((Nx[:,None,:] - Ny) /
np.abs(x[:,None] - y)[:,:,None] * wts[None,:,None], axis=1))
#The shape of the element stiffness matrix
ke_shape = (-1, self.degree, num_global_sf, num_global_sf)
#Evaluate the outer integral and assemble stiffness matrix
self.K = (np.sum((b[:,None] - a[:,None]) / 2 *
np.sum(np.einsum('...i,...j', Nx, inner).reshape(*ke_shape) *
wts[None, :, None, None], axis=1), axis=0) /
self.delta / self.delta)
return
def __compute_body_force_term(self, bfun):
"""
Performs quadrature on the RHS of the peridynamic equation with a
given body force funtion, b(x). Quadrature is performed at the same
order of quadrature as the stiffness matrix.
"""
#Generate quadrature points
xi, wts = np.polynomial.legendre.leggauss(self.degree)
#Determine upper and lower bounds for quadrature on each element
b = self.N.knot_vector[(self.N.p + 2):-(self.N.p + 1), None]
a = self.N.knot_vector[(self.N.p + 1):-(self.N.p + 2), None]
#The integration points in parameter space
x = (((b - a) * xi + b + a) / 2.0).ravel()
#The total number on integration points over the `elements`, i.e. not
#over the horizons
num_elem_quad_points = x.shape[0]
#Evaluate shape functions
Nx = self.N(x)
#Total # of shape functions
num_global_sf = Nx.shape[1]
#Evaluate body force function at quadrature points
bx = bfun(x)
#Multiply quadrature weights in
Nx = (((b[:,None] - a[:,None]) / 2 *
(Nx.reshape(-1,xi.shape[0], num_global_sf) *
wts[None, :, None])).reshape(-1,num_global_sf))
#Integrate rhs
self.b = np.dot(Nx.T, bx)
return
def manufacture_solution(self, ufun, num_boundary_elements):
"""
Manufactures a solution on the domain (0,1) from the stiffness
matrix and ufun. Quadrature performed with `degree` points.
"""
#The total number of basis functions in the boundary region
nbb = num_boundary_elements * (self.N.p)
self.__compute_stiffness()
#The stiffness matrix excluding boundary terms
print(self.K)
A = self.K[nbb:-nbb,nbb:-nbb]
print(A)
#Discrete domain
x = np.linspace(0.0, 1.0, num=(A.shape[0] + 1), endpoint=False)[1:]
#Evaluate shape functions at discrete points
NN = self.N(x)[:,nbb:-nbb]
#Manufacture control variables
d = np.dot(np.linalg.inv(NN), ufun(x))
#Manufacture solution
self.sol = np.dot(A, d)
return
def compute_rhs(self, ufun, bfun, number_of_boundary_elements):
try:
if self.K is None:
raise ValueError("You must generate the stiffness matrix first")
except ValueError as msg:
print(msg)
return
#The total number of basis functions in the boundary region
nbb = num_boundary_elements * (self.N.p)
self.__compute_body_force_term(bfun)
self.rhs = (self.b[nbb:-nbb] -
np.einsum('...i,i', self.K[nbb:-nbb,0:nbb],
ufun(np.linspace(-self.delta, 0, num=nbb))) -
np.einsum('...i,i', self.K[nbb:-nbb,-nbb:],
ufun(np.linspace(1.0, 1.0 + self.delta, num=nbb))))
def compute_solutions(self, u, b, num_boundary_elements):
self.manufacture_solution(u, num_boundary_elements)
self.compute_rhs(u, b, num_boundary_elements)
def compute_error(self, norm=2):
return np.linalg.norm(self.sol - self.rhs, ord=norm)
``` |
{
"source": "johntfoster/PyTriFD",
"score": 3
} |
#### File: examples/one_dim_nonlinear_diffusion/1dfd.py
```python
from PyTriFD import FD
import matplotlib.pyplot as plt
class OneDimNonlinearDiffusion(FD):
def parse_additional_inputs(self):
self.k = self.inputs['material parameters']['k']
return
def residual_operator(self, my_field_overlap_sorted,
my_field_overlap_sorted_old=None):
u = my_field_overlap_sorted[0]
residual = ((u[:-2] - 2*u[1:-1] + u[2:]) /
(self.deltas[0] ** 2.0) - self.k * u[1:-1] * u[1:-1])
return residual
def plot_solution(self):
nodes = self.get_nodes_on_rank0()
u = self.get_solution_on_rank0()
if self.rank == 0:
fig, ax = plt.subplots()
ax.plot(nodes[0], u)
plt.show()
if __name__ == "__main__":
problem = OneDimNonlinearDiffusion('inputs.yml')
problem.solve()
u = problem.plot_solution()
``` |
{
"source": "johnthagen/cpplint-junit",
"score": 3
} |
#### File: johnthagen/cpplint-junit/cpplint_junit.py
```python
import argparse
import collections
import os
import re
import sys
from typing import Dict, List
from xml.etree import ElementTree
from exitstatus import ExitStatus
class CpplintError(object):
def __init__(self, file: str, line: int, message: str) -> None:
"""Constructor.
Args:
file: File error originated on.
line: Line error originated on.
message: Error message.
"""
self.file = file
self.line = line
self.message = message
def parse_arguments() -> argparse.Namespace:
parser = argparse.ArgumentParser(description='Converts cpplint output to JUnit XML format.')
parser.add_argument('input_file', type=str, help='cpplint stdout text file.')
parser.add_argument('output_file', type=str, help='JUnit XML output file.')
return parser.parse_args()
def parse_cpplint(file_name: str) -> Dict[str, List[CpplintError]]:
"""Parses a cpplint output file.
Args:
file_name: cpplint output file.
Returns:
Parsed errors grouped by file name.
Raises:
IOError: File does not exist (More specifically FileNotFoundError on Python 3).
"""
with open(file_name, 'rt') as file:
lines = file.readlines()
errors = collections.defaultdict(list)
for line in lines:
line = line.rstrip()
match = re.search(r'(\S+):(\d+):\s+(.+)', line)
if match is not None:
error = CpplintError(file=match.group(1),
line=int(match.group(2)),
message=match.group(3))
errors[error.file].append(error)
return errors
def generate_test_suite(errors: Dict[str, List[CpplintError]]) -> ElementTree.ElementTree:
"""Creates a JUnit XML tree from parsed cpplint errors.
Args:
errors: Parsed cpplint errors.
Returns:
XML test suite.
"""
test_suite = ElementTree.Element('testsuite')
test_suite.attrib['errors'] = str(len(errors))
test_suite.attrib['failures'] = str(0)
test_suite.attrib['name'] = 'cpplint errors'
test_suite.attrib['tests'] = str(len(errors))
test_suite.attrib['time'] = str(1)
for file_name, errors in errors.items():
test_case = ElementTree.SubElement(test_suite,
'testcase',
name=os.path.relpath(file_name))
for error in errors:
ElementTree.SubElement(test_case,
'error',
file=os.path.relpath(error.file),
line=str(error.line),
message='{}: {}'.format(error.line, error.message))
return ElementTree.ElementTree(test_suite)
def main() -> ExitStatus: # pragma: no cover
"""Main function.
Returns:
Exit code.
"""
args = parse_arguments()
try:
errors = parse_cpplint(args.input_file)
except IOError as e:
print(str(e))
return ExitStatus.failure
if len(errors) > 0:
tree = generate_test_suite(errors)
tree.write(args.output_file, encoding='utf-8', xml_declaration=True)
return ExitStatus.success
if __name__ == '__main__': # pragma: no cover
sys.exit(main())
``` |
{
"source": "johnthagen/exitstatus",
"score": 3
} |
#### File: johnthagen/exitstatus/tests.py
```python
import sys
import unittest
from exitstatus import ExitStatus
class ExitStatusTestCase(unittest.TestCase):
def test_equality(self) -> None:
self.assertEqual(ExitStatus.success, 0)
self.assertEqual(ExitStatus.failure, 1)
def test_value(self) -> None:
self.assertEqual(ExitStatus.success.value, 0)
self.assertEqual(ExitStatus.failure.value, 1)
def test_identity(self) -> None:
self.assertIs(ExitStatus.success, ExitStatus.success)
self.assertIsNot(ExitStatus.success, ExitStatus.failure)
def test_sys_exit(self) -> None:
with self.assertRaises(SystemExit):
sys.exit(ExitStatus.success)
def test_exit(self) -> None:
with self.assertRaises(SystemExit):
exit(ExitStatus.success)
``` |
{
"source": "johnthagen/pytap2",
"score": 3
} |
#### File: src/pytap2/__init__.py
```python
import atexit
import enum
import fcntl
import os
import struct
from typing import Optional
TUNSETIFF = 0x400454CA
IFF_NO_PI = 0x1000
"""Mask to disable packet information from being prepended to packets sent through TUN/TAP."""
PACKET_INFO_SIZE = 4
"""Size of packet information, in bytes, prepended to packets sent in TUN/TAP when packet
information is enabled."""
@enum.unique
class TapMode(enum.Enum):
"""The device mode.
Values correlate to corresponding values needed for ioctl calls.
"""
Tun = 0x0001
"""IFF_TUN."""
Tap = 0x0002
"""IFF_TAP"""
class TapDevice:
"""Tun/Tap device object."""
def __init__(
self,
mode: TapMode = TapMode.Tun,
name: Optional[str] = None,
dev: str = "/dev/net/tun",
mtu: int = 1500,
enable_packet_info: bool = False,
) -> None:
"""Initialize TUN/TAP device object.
Args:
mode: Select tun or tap device mode.
name: The name of the new device. If not supplied, a default
will be provided. An integer will be added to build the real device name.
dev: The device node name the control channel is connected to.
mtu: The MTU size, in bytes, to be applied to the new device.
enable_packet_info: Whether or not to enable packet information header to be
prepended to each
"""
self._mode = mode
# Create interface name to request from tuntap module.
if name is None:
if self._mode is TapMode.Tun:
self._name = "tun%d"
elif self._mode is TapMode.Tap:
self._name = "tap%d"
else:
self._name = name + "%d"
# Open control device and request interface.
self._fd = os.open(dev, os.O_RDWR)
self._enable_packet_info = enable_packet_info
mode_value = self._mode.value
if not self._enable_packet_info:
mode_value |= IFF_NO_PI
ifs = fcntl.ioctl(
self._fd,
TUNSETIFF, # type: ignore
struct.pack("16sH", self._name.encode(), mode_value),
)
# Retrieve real interface name from control device.
self._name = ifs[:16].strip(b"\x00").decode()
self._mtu = mtu
# Properly close device on exit.
atexit.register(self.close)
def __enter__(self) -> "TapDevice":
self.up()
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
@property
def name(self) -> str:
"""The device name."""
return self._name
@property
def mode(self) -> TapMode:
"""The device mode (tap or tun)."""
return self._mode
@property
def mtu(self) -> int:
"""The device MTU."""
return self._mtu
@property
def is_packet_information_enabled(self) -> bool:
"""Whether packet information header is enabled for this device."""
return self._enable_packet_info
@property
def fd(self) -> int:
"""The device file descriptor."""
return self._fd
def fileno(self) -> int:
"""The device file descriptor.
This method is named specifically so that the object can be
passed to select.select() calls.
"""
return self._fd
def read(self, num_bytes: Optional[int] = None) -> bytes:
"""Read data from the device.
Args:
num_bytes: The number of bytes to read. If not specified, the MTU size is used,
including the optional packet information header if enabled on a the device.
"""
if num_bytes is None:
num_bytes = self._mtu
# If packet information is enabled, 4 extra bytes will be appended to a packet
# that is potentially already the maximum MTU size, so ensure that by
# default we can read one entire MTU-sized packet and this header.
if self._enable_packet_info:
num_bytes += PACKET_INFO_SIZE
return os.read(self._fd, num_bytes)
def write(self, data: bytes) -> None:
"""Write data to the device. No care is taken for MTU limitations or similar."""
os.write(self._fd, data)
def ifconfig(self, **args):
"""Issue ifconfig command on the device.
Keyword Args:
address: IP address of the device, can be in CIDR notation (see man ifconfig).
netmask: Network mask.
network: Network base address, normally set automatically.
broadcast: Broadcast address, normally set automatically.
mtu: Link MTU, this will also affect the read() method.
hwclass: Hardware class, normally ether for ethernet.
hwaddress: Hardware (MAC) address, in conjunction with hwclass.
"""
# TODO: New systems like Ubuntu 17.04 do not come with ifconfig pre-installed.
ifconfig_cmd = "ifconfig {} ".format(self._name)
try:
ifconfig_cmd = "{} {} ".format(ifconfig_cmd, args["address"])
except KeyError:
pass
try:
ifconfig_cmd = "{} {} {} ".format(ifconfig_cmd, "netmask", args["netmask"])
except KeyError:
pass
try:
ifconfig_cmd = "{} {} {} ".format(ifconfig_cmd, "network", args["network"])
except KeyError:
pass
try:
ifconfig_cmd = "{} {} {} ".format(ifconfig_cmd, "broadcast", args["broadcast"])
except KeyError:
pass
try:
ifconfig_cmd = "{} {} {} ".format(ifconfig_cmd, "mtu", args["mtu"])
except KeyError:
pass
try:
ifconfig_cmd = "{} {} {} {} ".format(
ifconfig_cmd, "hw", args["hwclass"], args["hwaddress"]
)
except KeyError:
pass
ret = os.system(ifconfig_cmd)
if ret != 0:
raise IfconfigError("ifconfig command failed.")
# Save MTU if ifconfig was successful so buffer sizes can be adjusted.
try:
self._mtu = args["mtu"]
except KeyError:
pass
def up(self) -> None:
"""Bring up device. This will effectively run "ifconfig up" on the device."""
ret = os.system("ifconfig {} up".format(self._name))
if ret != 0:
raise IfconfigError()
def down(self) -> None:
"""Bring down device. This will effectively call "ifconfig down" on the device."""
ret = os.system("ifconfig {} down".format(self._name))
if ret != 0:
raise IfconfigError()
def close(self) -> None:
"""Close the control channel.
This will effectively drop all locks and remove the TUN/TAP device.
You must manually take care that your code does not try to operate on the interface
after closing the control channel.
"""
try:
os.close(self._fd)
except OSError:
pass
class IfconfigError(Exception):
"""Exception thrown if an ifconfig command returns with a non-zero exit status."""
``` |
{
"source": "JohnTheCoolingFan/Motorchik",
"score": 3
} |
#### File: JohnTheCoolingFan/Motorchik/motorchik_setup.py
```python
import argparse
import getpass
import json
import os.path as p
def get_argparser():
argparser = argparse.ArgumentParser(description='Motorchik, discord bot with extensive per-guild configuration directly in discord chat.')
argparser.add_argument('-s', '--setup', action='store_true')
return argparser
def interactive_setup():
print('Welcome to Motorchik interactive setup. It will ask for some parameters and create a new config file with entered parameters')
config_data = dict()
bot_token = getpass.getpass('Please enter bot\'s token (will not be echoed): ')
config_data['token'] = bot_token
log_channel_id = input('(Optional) Log channel id. Leave empty for none: ')
if log_channel_id:
try:
config_data['log_channel'] = int(log_channel_id)
except ValueError:
config_data['log_channel'] = 0
else:
config_data['log_channel'] = 0
attempt_input = True
while attempt_input:
print('''
Config storage method is how guild and user settings are going to be stored. Enter the name or number.
1. mongo (default)''')
config_storage_method = input('Choice: ')
if config_storage_method in ['1', 'mongo']:
config_data['mongo'] = setup_mongo()
attempt_input = False
elif config_storage_method == '':
print('Choosing default method: mongo')
attempt_input = False
config_data['mongo'] = setup_mongo()
else:
print('Invalid input')
config_file_path = 'config.json'
if p.exists(config_file_path):
print('config.json already exists! New config will be written to config.json.new')
config_file_path = 'config.json.new'
with open(config_file_path, 'w') as config_file:
json.dump(config_data, config_file)
print('Finished setup. Motorchik is ready to start.')
def setup_mongo():
print('You have chosen to store guild settings in MongoDB.')
mongo_host = input('MongoDB host address: ')
mongo_port = input('MongoDB server port (optional if specified in the host address): ')
if mongo_port:
try:
mongo_port = int(mongo_port)
except ValueError:
mongo_port = ''
print('For remote access, MongoDB requires to have a user and a password. If you plan to host MongoDB on the same machine as Motorchik, leave the next two empty.')
mongo_username = input('MongoDB username: ')
mongo_password = <PASSWORD>('MongoDB user password (will not be echoed): ')
print('Finished setting up MongoDB credentials')
return {'host': mongo_host, 'port': mongo_port, 'username': mongo_username, 'password': <PASSWORD>}
if __name__ == '__main__':
interactive_setup()
``` |
{
"source": "john-the-dev/leetcode",
"score": 4
} |
#### File: leetcode/Array/1480RunningSumArray.py
```python
from common import *
class Solution:
'''
Use previous sum to calculate current sum.
O(n) runtime, O(1) storage.
Beat 64% runtime, 99.9% storage of all Leetcode submissions.
'''
def runningSum(self, nums: List[int]) -> List[int]:
out,n = [],len(nums)
if n == 0: return out
out.append(nums[0])
for i in range(1,n):
out.append(out[i-1]+nums[i])
return out
# Tests.
assert(Solution().runningSum([1,2,3,4]) == [1,3,6,10])
assert(Solution().runningSum([1,1,1,1,1]) == [1,2,3,4,5])
assert(Solution().runningSum([3,1,2,10,1]) == [3,4,6,16,17])
```
#### File: leetcode/Array/14LongestCommonPrefix.py
```python
class Solution:
'''
Horizontal scan of strings.
O(N) runtime, O(1) storage, in which N is total # of characters.
Beat 93% runtime, 5% storage of all Leetcode submissions. The storage part of Leetcode is confusing as there is not much storage being used here while it is only 5%.
'''
def longestCommonPrefix(self, strs):
if len(strs) == 0: return ''
i = 0
while True:
c,match = None,True
for s in strs:
if i >= len(s):
match = False
break
if c == None:
c = s[i]
elif c != s[i]:
match = False
break
if not match: break
i += 1
return strs[0][:i]
# Tests.
assert(Solution().longestCommonPrefix(["flower","flow","flight"]) == "fl")
assert(Solution().longestCommonPrefix(["dog","racecar","car"]) == "")
assert(Solution().longestCommonPrefix(["","flow","flight"]) == "")
assert(Solution().longestCommonPrefix(["flower","flow",""]) == "")
assert(Solution().longestCommonPrefix([]) == "")
```
#### File: leetcode/Array/209MinimumSizeSubarraySum.py
```python
from common import *
import sys
class Solution:
'''
Sliding window from left, slide j to reach target, slide i to shrink.
O(n) runtime, O(1) storage.
Beat 54% runtime, 79% storage of all Leetcode submissions.
'''
def minSubArrayLen(self, s: int, nums: List[int]) -> int:
i,j,n,out = 0,0,len(nums),[sys.maxsize, 0]
while i < n:
while j < n and out[1] < s:
out[1] += nums[j]
j += 1
if out[1] >= s: out[0] = min(out[0], j-i)
out[1] -= nums[i]
i += 1
return out[0] if out[0] != sys.maxsize else 0
# Tests.
assert(Solution().minSubArrayLen(s = 7, nums = [2,3,1,2,4,3]) == 2)
assert(Solution().minSubArrayLen(s = 4, nums = [1,4,4]) == 1)
assert(Solution().minSubArrayLen(s = 11, nums = [1,1,1,1,1,1,1,1]) == 0)
```
#### File: leetcode/Array/271EncodeDecodeStrings.py
```python
from common import *
'''
Encode to numbers and decode from numbers.
O(N) runtime for both encode and decode, in which N is total # of characters in strs. O(N) storage.
Beat 5% runtime, 29% storage of all Leetcode submissions.
'''
class Codec:
def toNum(self, s):
num,zero,i,n = 0,0,0,len(s)
while i < n and ord(s[i]) == 0:
zero += 1
i += 1
while i < n:
num = num << 8
num += ord(s[i])
i += 1
return [zero,num]
def toStr(self, zero, num):
s = []
while num > 0:
s.append(chr(num % 256))
num = num >> 8
s.extend([chr(0)]*zero)
return ''.join(s[::-1])
def encode(self, strs: [str]) -> str:
"""Encodes a list of strings to a single string.
"""
out = []
for s in strs:
zero,num = self.toNum(s)
out.append('{}:{}'.format(zero,num))
return ','.join(out)
def decode(self, s: str) -> [str]:
"""Decodes a single string to a list of strings.
"""
out = []
strs = s.split(',') if len(s) > 0 else []
for s in strs:
zero,num = s.split(':')
out.append(self.toStr(int(zero),int(num)))
return out
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.decode(codec.encode(strs))
# Tests.
codec = Codec()
strs = ['Great','Nice']
encoded = codec.encode(strs)
assert(codec.decode(encoded) == strs)
strs = ['{}leading'.format(chr(0)),'Nice']
encoded = codec.encode(strs)
assert(codec.decode(encoded) == strs)
strs = ['{}l:eadi.ng'.format(chr(0)),'{}leading,{}'.format(chr(0),chr(1))]
encoded = codec.encode(strs)
assert(codec.decode(encoded) == strs)
strs = []
encoded = codec.encode(strs)
assert(codec.decode(encoded) == strs)
```
#### File: leetcode/Array/280WiggleSort.py
```python
from common import *
class Solution:
'''
Loop through and exchange if nums[i] and nums[i+1] not satisfy rule.
O(n) runtime, O(1) storage.
Beat 84% runtime, 99% storage of all Leetcode submissions.
'''
def wiggleSort(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
for i in range(len(nums)-1):
if i % 2 == 0:
if nums[i] > nums[i+1]: nums[i],nums[i+1] = nums[i+1],nums[i]
else:
if nums[i] < nums[i+1]: nums[i],nums[i+1] = nums[i+1],nums[i]
# Tests.
nums = [3,5,2,1,6,4]
Solution().wiggleSort(nums)
assert(nums == [3,5,1,6,2,4])
nums = [1,2,3]
Solution().wiggleSort(nums)
assert(nums == [1,3,2])
nums = [2,1]
Solution().wiggleSort(nums)
assert(nums == [1,2])
```
#### File: leetcode/Array/301RemoveInvalidParentheses.py
```python
from common import *
class Solution:
'''
DFS + backtracking of prefix, stack, removals.
O(2^n) runtime, O(n) storage.
Beat 5% runtime, 56% storage of all Leetcode submissions.
'''
def removeInvalidParentheses(self, s: str) -> List[str]:
minimum,n,out = float('inf'),len(s),set()
def dfs(prefix, stack, i, removals, find_minimum):
nonlocal minimum, out
if i == n:
if not stack:
if find_minimum:
minimum = min(removals, minimum)
elif removals == minimum:
out.add(''.join(prefix))
return
if s[i] == '(':
prefix.append(s[i])
stack.append(s[i])
dfs(prefix, stack, i+1, removals, find_minimum)
stack.pop()
prefix.pop()
elif s[i] == ')':
if stack and stack[-1] == '(':
prefix.append(s[i])
stack.pop()
dfs(prefix, stack, i+1, removals, find_minimum)
stack.append('(')
prefix.pop()
else:
prefix.append(s[i])
dfs(prefix, stack, i+1, removals, find_minimum)
prefix.pop()
dfs(prefix, stack, i+1, removals+1, find_minimum)
dfs([],[],0,0,True)
dfs([],[],0,0,False)
return list(out)
# Tests.
assert_list_noorder(Solution().removeInvalidParentheses("()())()"), ["(())()","()()()"])
assert_list_noorder(Solution().removeInvalidParentheses("(a)())()"), ["(a())()","(a)()()"])
assert_list_noorder(Solution().removeInvalidParentheses(")("), [""])
```
#### File: leetcode/Array/362DesignHitCounter.py
```python
from common import *
'''
list, binary search on timestamp.
O(1) runtime for init, hit, O(log(n)) runtime for getHits. O(n) storage in which n is # of seconds with hits.
Beat 85% runtime, 50% storage of all Leetcode submissions.
'''
class HitCounter:
def __init__(self):
"""
Initialize your data structure here.
"""
self.arr = []
def hit(self, timestamp: int) -> None:
"""
Record a hit.
@param timestamp - The current timestamp (in seconds granularity).
"""
if self.arr and self.arr[-1][0] == timestamp:
self.arr[-1][1] += 1
else:
self.arr.append([timestamp, 1])
def getHits(self, timestamp: int) -> int:
"""
Return the number of hits in the past 5 minutes.
@param timestamp - The current timestamp (in seconds granularity).
"""
def binarySearch(timestamp, leftOrRight = True):
i,j = 0,len(self.arr)
while i < j:
k = (i+j) // 2
if leftOrRight:
if self.arr[k][0] < timestamp:
i = k+1
else:
j = k
else:
if self.arr[k][0] <= timestamp:
i = k+1
else:
j = k
return i
j = binarySearch(timestamp, False)
i = binarySearch(timestamp-299, True)
out = 0
for k in range(i, j):
out += self.arr[k][1]
return out
# Your HitCounter object will be instantiated and called as such:
# obj = HitCounter()
# obj.hit(timestamp)
# param_2 = obj.getHits(timestamp)
assert_call_sequence(globals(),["HitCounter","hit","hit","hit","getHits","hit","getHits","getHits"],[[],[1],[2],[2],[4],[300],[300],[301]],[[None,None,None,None,3,None,4,3]])
```
#### File: leetcode/Array/468ValidateIPAddress.py
```python
from common import *
class Solution:
'''
Parsing rules.
O(n) runtime, O(n) storage, in which n is length of the IP.
Beat 45% runtime, 29% storage of all Leetcode submissions.
'''
def validIPAddress(self, IP: str) -> str:
if IP.find('.') > 0:
parts = IP.split('.')
if len(parts) != 4: return 'Neither'
for part in parts:
if (not part.isdigit()) or (len(part) > 3) or (len(part) > 1 and part[0] == '0') or (int(part) >= 256): return 'Neither'
return 'IPv4'
elif IP.find(':') > 0:
parts = IP.split(':')
if len(parts) != 8: return 'Neither'
for part in parts:
if len(part) == 0 or len(part) > 4: return 'Neither'
for c in part:
if c >= 'a' and c <= 'f': continue
if c >= 'A' and c <= 'F': continue
if c >= '0' and c <= '9': continue
return 'Neither'
return 'IPv6'
else:
return 'Neither'
'''
Improve storage complexity by counting occurence of the delimiter before spliting.
O(n) runtime, O(1) storage.
Beat 77% runtime, 9% storage of all Leetcode submissions.
Note by counting the delimiter occurence, we make sure there are constant # of parts for IPv4 and IPv6 check. So storage is O(1).
'''
def validIPAddress2(self, IP: str) -> str:
if IP.count('.') == 3:
parts = IP.split('.')
if len(parts) != 4: return 'Neither'
for part in parts:
if (not part.isdigit()) or (len(part) > 3) or (len(part) > 1 and part[0] == '0') or (int(part) >= 256): return 'Neither'
return 'IPv4'
elif IP.count(':') == 7:
parts = IP.split(':')
if len(parts) != 8: return 'Neither'
for part in parts:
if len(part) == 0 or len(part) > 4: return 'Neither'
for c in part:
if c >= 'a' and c <= 'f': continue
if c >= 'A' and c <= 'F': continue
if c >= '0' and c <= '9': continue
return 'Neither'
return 'IPv6'
else:
return 'Neither'
# Tests.
assert(Solution().validIPAddress("172.16.254.1") == 'IPv4')
assert(Solution().validIPAddress("2001:0db8:85a3:0:0:8A2E:0370:7334") == 'IPv6')
assert(Solution().validIPAddress("256.256.256.256") == 'Neither')
assert(Solution().validIPAddress("2001:0db8:85a3:0:0:8A2E:0370:7334:") == 'Neither')
assert(Solution().validIPAddress("1e1.4.5.6") == 'Neither')
assert(Solution().validIPAddress2("172.16.254.1") == 'IPv4')
assert(Solution().validIPAddress2("2001:0db8:85a3:0:0:8A2E:0370:7334") == 'IPv6')
assert(Solution().validIPAddress2("256.256.256.256") == 'Neither')
assert(Solution().validIPAddress2("2001:0db8:85a3:0:0:8A2E:0370:7334:") == 'Neither')
assert(Solution().validIPAddress2("1e1.4.5.6") == 'Neither')
```
#### File: leetcode/Array/523ContinuousSubarraySum.py
```python
class Solution:
'''
Hash map to memorize sum[:i] % k so that when see it again at j we know there is a subarray [i+1:j+1] whose sum is multiples of k.
O(n) runtime, O(min(k,n)) storage.
Beat 99.7% runtime, 6.7% storage of all Leetcode submissions. The reason for bad storage is that brute forcing has O(1).
'''
def checkSubarraySum(self, nums, k):
h,n,v = {0:-1},len(nums),0
if k == 0:
for i in range(n-1):
if nums[i] == 0 and nums[i+1] == 0: return True
else:
for i in range(n):
v += nums[i]
m = v % k
if m in h:
if i-h[m] >= 2: return True
else:
h[m] = i
return False
# Tests.
assert(Solution().checkSubarraySum([23, 2, 4, 6, 7], 6) == True)
assert(Solution().checkSubarraySum([23, 2, 6, 4, 7], 6) == True)
assert(Solution().checkSubarraySum([23, 2, 7], 6) == False)
assert(Solution().checkSubarraySum([], 6) == False)
assert(Solution().checkSubarraySum([23,2,6,4,7], 0) == False)
assert(Solution().checkSubarraySum([6,4,7], 6) == False)
```
#### File: leetcode/Array/528RandomPick.py
```python
from common import *
import random
'''
Prefix sum with binary search.
O(n) runtime for __init__ and O(log(n)) runtime for pickIndex, O(1) storage as we use in-place storage.
Beat 63% runtime, 50% storage of all Leetcode submissions.
'''
class Solution:
def __init__(self, w):
for i in range(1,len(w)):
w[i] += w[i-1]
self.w = w
def pickIndex(self) -> int:
pick = random.randint(1,self.w[-1])
i,j = 0,len(self.w)
while i < j:
k = (i+j) // 2
if self.w[k] >= pick:
j = k
else:
i = k+1
return i
# Your Solution object will be instantiated and called as such:
# obj = Solution(w)
# param_1 = obj.pickIndex()
# Tests.
assert_call_sequence(globals(),["Solution","pickIndex"],[[[1]],[]],[[None,0]])
assert_call_sequence(globals(),["Solution","pickIndex","pickIndex"],[[[1,3]],[],[]],[[None,1,1],[None,1,0],[None,0,1],[None,0,0]])
```
#### File: leetcode/Array/759EmployeeFreeTime.py
```python
class Interval:
def __init__(self, start: int = None, end: int = None):
self.start = start
self.end = end
from common import *
import sys
import heapq
class Solution:
'''
Calculate free time for each employee, then merge the free time.
O(N^2) runtime, O(N) storage, in which N is # of Intervals.
Beat 5% runtime, 86% storage of all Leetcode submissions.
'''
def employeeFreeTime(self, schedule: '[[Interval]]') -> '[Interval]':
freetime,low,high = [],sys.maxsize,-sys.maxsize-1
for employee in schedule:
for worktime in employee:
low = min(low, worktime.start)
high = max(high, worktime.end)
for employee in schedule:
freetime.append([])
if employee[0].start > low: freetime[-1].append(Interval(low,employee[0].start))
for i in range(1,len(employee)):
if employee[i].start > employee[i-1].end: freetime[-1].append(Interval(employee[i-1].end,employee[i].start))
if employee[-1].end < high: freetime[-1].append(Interval(employee[-1].end,high))
def merge(free1, free2):
free,i1,i2,n1,n2 = [],0,0,len(free1),len(free2)
while i1 < n1 and i2 < n2:
if free1[i1].start > free2[i2].start: free1,free2,i1,i2,n1,n2 = free2,free1,i2,i1,n2,n1
if free1[i1].end > free2[i2].start:
if free1[i1].end > free2[i2].end:
free.append(Interval(free2[i2].start,free2[i2].end))
i2 += 1
else:
free.append(Interval(free2[i2].start,free1[i1].end))
i1 += 1
else:
i1 += 1
return free
out = freetime[0]
for i in range(1,len(freetime)):
out = merge(out,freetime[i])
return out
'''
Priority queue to find all busy intervals first, then compute free time.
O(Nlog(k)) runtime, O(k) storage, in which N is # of intervals, k is # of employees.
Beat 25% runtime, 76% storage of all Leetcode submissions.
'''
def employeeFreeTime2(self, schedule: '[[Interval]]') -> '[Interval]':
k = len(schedule)
q,out = [],[]
for i in range(k):
if len(schedule[i]) > 0: q.append([schedule[i][0].start,schedule[i][0].end,i,0])
heapq.heapify(q)
while q:
start,end,i,j = heapq.heappop(q)
if not out or out[-1][1] < start:
out.append([start,end])
else:
out[-1][1] = max(out[-1][1],end)
if j < len(schedule[i])-1: heapq.heappush(q,[schedule[i][j+1].start,schedule[i][j+1].end,i,j+1])
for i in range(1,len(out)):
out[i-1] = Interval(out[i-1][1],out[i][0])
if out: out.pop()
return out
# Tests.
def assert_out(expected_out, actual_out):
assert(len(expected_out) == len(actual_out))
for i in range(len(expected_out)):
assert(expected_out[i].start == actual_out[i].start)
assert(expected_out[i].end == actual_out[i].end)
expected_out = [Interval(3,4)]
actual_out = Solution().employeeFreeTime([[Interval(1,2),Interval(5,6)],[Interval(1,3)],[Interval(4,10)]])
assert_out(expected_out, actual_out)
expected_out = [Interval(5,6),Interval(7,9)]
actual_out = Solution().employeeFreeTime([[Interval(1,3),Interval(6,7)],[Interval(2,4)],[Interval(2,5),Interval(9,12)]])
assert_out(expected_out, actual_out)
expected_out = [Interval(3,4)]
actual_out = Solution().employeeFreeTime2([[Interval(1,2),Interval(5,6)],[Interval(1,3)],[Interval(4,10)]])
assert_out(expected_out, actual_out)
expected_out = [Interval(5,6),Interval(7,9)]
actual_out = Solution().employeeFreeTime2([[Interval(1,3),Interval(6,7)],[Interval(2,4)],[Interval(2,5),Interval(9,12)]])
assert_out(expected_out, actual_out)
```
#### File: leetcode/Array/767ReorganizeString.py
```python
from common import *
from collections import defaultdict
import heapq
class Solution:
'''
Priority queue to use high frequency characters first.
O(nlog(a)) runtime, O(a) storage, where n is length of S and a is # of unique characters.
Beat 73% runtime, 35% storage of all Leetcode submissions.
'''
def reorganizeString(self, S: str) -> str:
h = defaultdict(int)
for c in S:
h[c] += 1
q = []
for c in h:
q.append([-h[c],c])
heapq.heapify(q)
out = []
while q:
f,c = heapq.heappop(q)
if not out or out[-1] != c:
out.append(c)
if f < -1: heapq.heappush(q, [f+1,c])
else:
if not q: return ""
f2,c2 = heapq.heappop(q)
out.append(c2)
if f2 < -1: heapq.heappush(q, [f2+1,c2])
heapq.heappush(q, [f,c])
return ''.join(out)
# Tests.
assert(Solution().reorganizeString("aab") == "aba")
assert(Solution().reorganizeString("aaab") == "")
```
#### File: leetcode/Array/76MinimumWindowSubstring.py
```python
from common import *
import sys
class Solution:
'''
letter count table as signature, special compare, sliding window.
O(m+n) runtime, O(1) storage.
Beat 5% runtime, 16% storage of all Leetcode submissions.
'''
def minWindow(self, s: str, t: str) -> str:
sigT,sigS,b = [0]*58,[0]*58,ord('A')
for c in t:
sigT[ord(c)-b] += 1
def compareSig(sigS, sigT):
for i in range(len(sigT)):
if sigS[i] < sigT[i]: return -1
for i in range(len(sigT)):
if sigT[i] > 0 and sigS[i] > sigT[i]: return 1
return 0
i,j,n,out = 0,0,len(s),[0,sys.maxsize]
while j < n:
while j < n:
sigS[ord(s[j])-b] += 1
j += 1
if compareSig(sigS,sigT) >= 0: break
while i < j and compareSig(sigS,sigT) >= 0:
if j-i < out[1]-out[0]: out = [i,j]
sigS[ord(s[i])-b] -= 1
i += 1
return s[out[0]:out[1]] if out[1] != sys.maxsize else ""
# Tests.
assert(Solution().minWindow("ADOBECODEBANC", "ABC") == "BANC")
assert(Solution().minWindow("a", "a") == "a")
assert(Solution().minWindow("a", "aa") == "")
```
#### File: leetcode/Array/937ReorderDataLogFiles.py
```python
from common import *
class Solution:
'''
Sort based on 2 keys.
O(MNlog(N)) runtime, O(MN) storage, in which M is maximum size of a single log and N is the size of logs.
Beat 98% runtime, 11% storage of all Leetcode submissions.
'''
def reorderLogFiles(self, logs: List[str]) -> List[str]:
letter_logs = []
digit_logs = []
for i,log in enumerate(logs):
k = log.find(' ')
if log[k+1].isdigit():
digit_logs.append(log)
else:
letter_logs.append([log[k+1:],log[:k]])
letter_logs.sort()
out = []
for log,id in letter_logs:
out.append('{} {}'.format(id,log))
for log in digit_logs:
out.append(log)
return out
'''
Use the sort() method with custom defined key.
O(MNlog(N)) runtime, O(MN) storage.
Beat 74% runtime, 17% storage of all Leetcode submissions.
'''
def reorderLogFiles2(self, logs: List[str]) -> List[str]:
def getkey(log):
id,rest = log.split(' ',maxsplit=1)
return [0,rest,id] if rest[0].isalpha() else [1,'','']
logs.sort(key=getkey)
return logs
# Tests.
assert(Solution().reorderLogFiles(["dig1 8 1 5 1","let1 art can","dig2 3 6","let2 own kit dig","let3 art zero"]) == ["let1 art can","let3 art zero","let2 own kit dig","dig1 8 1 5 1","dig2 3 6"])
assert(Solution().reorderLogFiles(["dig1 8 1 5 1"]) == ["dig1 8 1 5 1"])
assert(Solution().reorderLogFiles([]) == [])
assert(Solution().reorderLogFiles(["a1 9 2 3 1","g1 act car","zo4 4 7","ab1 off key dog","a8 act zoo","a2 act car"]) == ["a2 act car","g1 act car","a8 act zoo","ab1 off key dog","a1 9 2 3 1","zo4 4 7"]) # Test when letter logs tie we need to use id to sort.
assert(Solution().reorderLogFiles2(["dig1 8 1 5 1","let1 art can","dig2 3 6","let2 own kit dig","let3 art zero"]) == ["let1 art can","let3 art zero","let2 own kit dig","dig1 8 1 5 1","dig2 3 6"])
assert(Solution().reorderLogFiles2(["dig1 8 1 5 1"]) == ["dig1 8 1 5 1"])
assert(Solution().reorderLogFiles2([]) == [])
assert(Solution().reorderLogFiles2(["a1 9 2 3 1","g1 act car","zo4 4 7","ab1 off key dog","a8 act zoo","a2 act car"]) == ["a2 act car","g1 act car","a8 act zoo","ab1 off key dog","a1 9 2 3 1","zo4 4 7"]) # Test when letter logs tie we need to use id to sort.
```
#### File: leetcode/Array/973KClosestPointsOrigin.py
```python
from common import *
import heapq
class Solution:
'''
Priority queue to store top K items. Key of the queue is suqare of Euclidean distance.
O(nlog(k)) runtime, O(K) storage.
Beat 86% runtime, 5% storage of all Leetcode submissions.
'''
def kClosest(self, points, K):
q = []
for i,j in points:
d = i**2+j**2
if len(q) < K:
heapq.heappush(q,[-d,[i,j]])
elif -q[0][0] > d:
heapq.heappop(q)
heapq.heappush(q,[-d,[i,j]])
out = []
for d,point in q:
out.append(point)
return out
assert_list_noorder(Solution().kClosest([[1,3],[-2,2]], 1),[[-2,2]])
assert_list_noorder(Solution().kClosest([[3,3],[5,-1],[-2,4]], 2),[[3,3],[-2,4]])
assert_list_noorder(Solution().kClosest([[-2,2]], 1),[[-2,2]])
```
#### File: leetcode/Array/ReorderedPower2.py
```python
class Solution:
'''
Frequency table based signature.
O(n) runtime, O(1) storage.
33 min.
'''
def reorderedPowerOf2(self, N: int) -> bool:
def sig(n):
h = [0]*10
while n > 0:
d = n % 10
n = n // 10
h[d] += 1
return h
h = sig(N)
high = 0
for i in range(len(h)-1,-1,-1):
if h[i] > 0:
j = h[i]
while j > 0:
high += high*10+i
j -= 1
c = 1
while c <= high:
if sig(c) == h: return True
c = 2*c
return False
# Tests.
assert(Solution().reorderedPowerOf2(1) == True)
assert(Solution().reorderedPowerOf2(10) == False)
assert(Solution().reorderedPowerOf2(16) == True)
assert(Solution().reorderedPowerOf2(24) == False)
assert(Solution().reorderedPowerOf2(46) == True)
```
#### File: leetcode/Graph/785IsGraphBipartite.py
```python
from common import *
class Solution:
'''
Coloring with depth first search: color first node as red, then any nodes connecting to it is black, then any nodes connecting to nodes of nodes is red again.
O(m) runtime, O(n) storage, in which m is # of edges and n is # of nodes.
Beat 97% runtime, 19% storage of all Leetcode submissions.
Note we must do it through dfs. Otherwise, we don't know how to color the initial node.
'''
def isBipartite(self, graph: List[List[int]]) -> bool:
n = len(graph)
colors = [None]*n
def dfs(i,color):
if colors[i] == color: return True
if colors[i] != None and colors[i] != color: return False
colors[i] = color
for j in graph[i]:
if not dfs(j,-color): return False
return True
for i in range(n):
if colors[i] == None:
if not dfs(i,1): return False
return True
# Tests.
assert(Solution().isBipartite([[1,3], [0,2], [1,3], [0,2]]) == True)
assert(Solution().isBipartite([[1,2,3], [0,2], [0,1,3], [0,2]]) == False)
assert(Solution().isBipartite([[3],[2,4],[1],[0,4],[1,3]]) == True)
```
#### File: leetcode/Tree/545BoundaryBinaryTree.py
```python
from common import *
class Solution:
'''
Iterative and recursive traverse of the tree to find left, right, and leaves of the tree.
O(n) runtime, O(n) storage.
Beat 44% runtime, 42% storage of all Leetcode submissions.
'''
def boundaryOfBinaryTree(self, root: TreeNode) -> List[int]:
left,right,leaves = [],[],[]
curr = root.left
while curr != None:
left.append(curr.val)
curr = curr.left if curr.left != None else curr.right
if left: left.pop()
curr = root.right
while curr != None:
right.append(curr.val)
curr = curr.right if curr.right != None else curr.left
if right: right.pop()
def getLeaves(node):
nonlocal leaves
if node.left == None and node.right == None:
if node != root: leaves.append(node.val)
return
if node.left != None: getLeaves(node.left)
if node.right != None: getLeaves(node.right)
getLeaves(root)
return [root.val] + left + leaves + right[::-1]
# Tests.
assert(Solution().boundaryOfBinaryTree(Codec().deserialize('[1,null,2,3,4]')) == [1,3,4,2])
assert(Solution().boundaryOfBinaryTree(Codec().deserialize('[1,2,3,4,5,6,null,null,null,7,8,9,10]')) == [1,2,4,7,8,9,10,6,3])
assert(Solution().boundaryOfBinaryTree(Codec().deserialize('[1]')) == [1])
```
#### File: leetcode/Tree/common.py
```python
from typing import List # This ensures the compile can accept Leetcode Python 3 syntax: https://leetcode.com/discuss/general-discussion/270755/in-python-3-the-parameter-type-define-list-always-reports-an-error-in-ide
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Codec:
'''
Breath first search to locate nodes in order and put them in result string. Put 'null' for empty left or right unless no value after.
O(n) runtime both for serilaize and deserialize. O(n) storage for both as well.
Beat 89% runtime, 63% storage of all Leetcode submissions.
'''
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
layer,out = [root],[]
while layer:
new_layer = []
is_all_none = True
for node in layer:
if node != None:
is_all_none = False
break
for node in layer:
if node == None:
if not is_all_none: out.append('null')
else:
out.append(str(node.val))
new_layer.append(node.left)
new_layer.append(node.right)
layer = new_layer
while out and out[-1] == 'null':
out.pop()
return '[{}]'.format(','.join(out))
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
i,n = 1,len(data)-1
root,layer = None,[]
while i < n:
if root == None:
j = data.find(',',i)
if j == -1: j = n
root = TreeNode(int(data[i:j]))
layer.append(root)
i = j+1
else:
new_layer = []
for node in layer:
if i >= n: break
j = data.find(',',i)
if j == -1: j = n
k = data.find(',',j+1)
if k == -1: k = n
if j > i and data[i:j] != 'null':
node.left = TreeNode(int(data[i:j]))
new_layer.append(node.left)
if k > j+1 and data[j+1:k] != 'null':
node.right = TreeNode(int(data[j+1:k]))
new_layer.append(node.right)
i = k+1
layer = new_layer
return root
# Check if two trees are the same.
def assert_tree(root1, root2):
if root1 == None:
assert(root2 == None)
else:
assert(root1 != None and root2 != None and root1.val == root2.val)
if root1.left == None:
assert(root2.left == None)
else:
assert_tree(root1.left, root2.left)
if root1.right == None:
assert(root2.right == None)
else:
assert_tree(root1.right, root2.right)
# Assert a sequence of calls return expected results.
def assert_call_sequence(context, calls, args, expected_outputs):
assert(len(calls) == len(args) == len(expected_outputs[0]))
assert(len(calls) >= 1)
output = []
# Default first item in calls is to create the class object of the solution and the remaining is to call methods of the object.
solution = context[calls[0]](*args[0])
output.append(None)
for i in range(1,len(calls)):
call = getattr(solution, calls[i])
output.append(call(*args[i]))
assert(output in expected_outputs)
``` |
{
"source": "JohnTheNerd/WOLRelay",
"score": 2
} |
#### File: JohnTheNerd/WOLRelay/main.py
```python
import datetime
import multiprocessing
import multiprocessing.dummy
import os
import json
import re
import traceback
import functools
import logging
import time
import scapy
from scapy.all import sniff
from wakeonlan import send_magic_packet
from flask import Flask, request, abort, send_from_directory
from werkzeug.exceptions import NotFound
app = Flask(__name__)
scriptPath = os.path.dirname(os.path.realpath(__file__))
configPath = os.path.join(scriptPath, 'config.json')
config = open(configPath).read()
config = json.loads(config)
logging.basicConfig()
logger = logging.getLogger("WOLRelay")
logger.setLevel(config['logLevel'])
multiprocessingManager = multiprocessing.Manager()
ARPTable = multiprocessingManager.dict()
@app.before_request
def beforeRequest():
# optionally mitigate against DNS rebinding
if 'hosts' in config.keys():
splitHost = request.host
if ':' in splitHost:
splitHost = request.host.split(':')[0]
if splitHost != "localhost" and splitHost != "127.0.0.1": # whitelist localhost because of Docker health checks
if splitHost not in config['hosts']:
abort(403)
def processARP(packets):
for packet in packets:
if packet.type == 2054: # only process ARP packets
if packet.op == 2: # only process ARP *reply* packets
if packet.hwsrc.upper() in ARPTable.keys(): # only process packets from MAC addresses we care about
mac = packet.hwsrc
ip = packet.psrc
logging.debug('IP ' + ip + ' is assigned to ' + mac + ' as of ' + datetime.datetime.now().isoformat() + "Z")
name = ARPTable[mac.upper()]['name']
ARPTable[mac.upper()] = {
"name": name,
"mac": mac.upper(),
"ip": ip,
"lastSeen": datetime.datetime.now().isoformat() + "Z"
}
def sniffARPPackets(interface = None):
if interface:
try:
sniff(prn=processARP, iface=interface, filter="(arp[6:2] = 2)") # run scapy with BPF for ARP packets with opcode 2
except Exception:
logger.warning("Running scapy in filtered mode failed, filtering without the help of Berkeley Packet Filtering. This is going to be VERY slow and unreliable. You should try installing tcpdump if you're on Linux, and Npcap if you're on Windows.")
traceback.print_exc()
sniff(prn=processARP) # filtering failed, fall back to inspecting every packet
else:
try:
sniff(prn=processARP, filter="(arp[6:2] = 2)") # run scapy with BPF for ARP packets with opcode 2
except Exception:
logger.warning("Running scapy in filtered mode failed, filtering without the help of Berkeley Packet Filtering. This is going to be VERY slow and unreliable. You should try installing tcpdump if you're on Linux, and Npcap if you're on Windows.")
traceback.print_exc()
sniff(prn=processARP) # filtering failed, fall back to inspecting every packet
def sendARPRequest(interface, destination):
logger.debug('sending ARP request to ' + destination)
scapy.layers.l2.arping(destination, iface=interface, timeout=0, cache=True, verbose=False)
def scanNetwork(scanInterface = None):
while True:
try:
pool = multiprocessing.dummy.Pool(processes=10)
processes = []
for network, netmask, _, interface, address, _ in scapy.config.conf.route.routes:
if interface:
if interface != scanInterface:
continue
else:
# skip loopback network and default gw
if network == 0 or interface == 'lo' or address == '127.0.0.1' or address == '0.0.0.0':
continue
if netmask <= 0 or netmask == 0xFFFFFFFF:
continue
# skip docker interface
if interface.startswith('docker') or interface.startswith('br-'):
continue
subnet = '.'.join(address.split('.')[:-1])
IPRange = [subnet + '.' + str(i) for i in range(1, 254)]
boundARPRequest = functools.partial(sendARPRequest, interface)
processes.append(pool.map_async(boundARPRequest, IPRange))
for process in processes:
process.get()
pool.close()
pool.join()
except:
logger.warning('scanning the network failed! exception details: ' + traceback.format_exc())
finally:
time.sleep(config['arp']['scanInterval'])
"""
For a given MAC address, returns the IP address and the timestamp for when we recorded it.
Returns HTTP501 if ARP is disabled from the configuration file.
Returns HTTP400 if the MAC address is invalid or does not exist in our ARP table.
Returns HTTP204 if the MAC address does not have a corresponding IP address yet.
@mac MAC address to scan ARP table for. If undefined, data for all MAC addresses will be returned.
"""
@app.route('/status')
def status():
mac = None
if 'mac' in request.args:
mac = request.args.get('mac')
mac = mac.upper()
if 'arp' not in config.keys():
return (json.dumps({"error": "ARP is disabled in the configuration file!"}), 501)
if mac:
if mac not in ARPTable.keys():
return (json.dumps({"error": "The given MAC address is not defined in the configuration file!"}), 400)
if not ARPTable[mac]:
return (json.dumps({"error": "We don't have any information about this MAC address yet!"}), 204)
return json.dumps(ARPTable[mac])
else:
result = []
for mac in ARPTable.keys():
result.append(ARPTable[mac])
return json.dumps(result)
"""
Sends a Wake-on-LAN "magic packet" to the specified MAC address.
Returns HTTP400 if the MAC address appears to be invalid.
@mac MAC address to send packet to.
"""
@app.route('/wake', methods=['POST'])
def wakeDevice():
mac = request.json['mac']
mac = mac.upper()
try:
send_magic_packet(mac, ip_address=config['broadcastAddress'], port=config['broadcastPort'])
return json.dumps({"error": None})
except Exception:
return (json.dumps({"error": traceback.format_exc()}), 500)
# hackity hack
# serve static files from the static directory
# this is so that the user doesn't need to configure a webserver to run and/or debug
# but it's encouraged to do so anyway for performance reasons
@app.route('/<path:path>')
def staticHost(path):
try:
return send_from_directory(os.path.join(scriptPath, 'static'), path)
except NotFound as e:
if path.endswith("/"):
return send_from_directory(os.path.join(scriptPath, 'static'), path + "index.html")
raise e
@app.route('/')
def staticIndex():
return send_from_directory(os.path.join(scriptPath, 'static'), "index.html")
if __name__ == '__main__':
if 'arp' in config.keys():
if 'scanInterfaces' in config['arp'].keys():
for interface in config['arp']['scanInterfaces']:
sniffingProcess = multiprocessing.Process(target=sniffARPPackets, args=[interface])
sniffingProcess.start()
else:
sniffingProcess = multiprocessing.Process(target=sniffARPPackets)
sniffingProcess.start()
for device in config['arp']['devices']:
name = device['name']
mac = device['mac']
ARPTable[mac.upper()] = {
"name": name,
"mac": mac.upper(),
"ip": None,
"lastSeen": None
}
if 'scanInterval' in config['arp'].keys():
if 'scanInterfaces' in config['arp'].keys():
for interface in config['arp']['scanInterfaces']:
scanningProcess = multiprocessing.Process(target=scanNetwork, args=[interface])
scanningProcess.start()
else:
scanningProcess = multiprocessing.Process(target=scanNetwork)
scanningProcess.start()
app.run(config['localIP'], port=config['APIPort'], threaded=True)
``` |
{
"source": "johntheprime/apiflask",
"score": 3
} |
#### File: apiflask/tests/test_openapi_info.py
```python
from openapi_spec_validator import validate_spec
from apiflask import APIFlask
def test_info_title_and_version(app):
assert app.title == 'APIFlask'
assert app.version == '0.1.0'
app = APIFlask(__name__, title='Foo', version='1.0')
assert app.spec['info']['title'] == 'Foo'
assert app.spec['info']['version'] == '1.0'
def test_other_info_fields(app, client):
assert app.description is None
assert app.terms_of_service is None
assert app.contact is None
assert app.license is None
app.description = 'My API'
app.terms_of_service = 'http://example.com/terms/'
app.contact = {
'name': 'API Support',
'url': 'http://www.example.com/support',
'email': '<EMAIL>'
}
app.license = {
'name': 'Apache 2.0',
'url': 'http://www.apache.org/licenses/LICENSE-2.0.html'
}
rv = client.get('/openapi.json')
assert rv.status_code == 200
validate_spec(rv.json)
assert rv.json['info']['description'] == 'My API'
assert rv.json['info']['termsOfService'] == 'http://example.com/terms/'
assert rv.json['info']['contact'] == {
'name': 'API Support',
'url': 'http://www.example.com/support',
'email': '<EMAIL>'
}
assert rv.json['info']['license'] == {
'name': 'Apache 2.0',
'url': 'http://www.apache.org/licenses/LICENSE-2.0.html'
}
def test_auto_info_description(test_apps):
from auto_description import app
assert app.description is None
rv = app.test_client().get('/openapi.json')
assert rv.status_code == 200
validate_spec(rv.json)
assert rv.json['info']['description'] == 'Some description for my API.'
# reset the app status
app._spec = None
def test_auto_info_description_precedence(test_apps):
from auto_description import app
app.description = 'new decription'
rv = app.test_client().get('/openapi.json')
assert rv.json['info']['description'] == 'new decription'
# reset the app status
app._spec = None
app.description = None
``` |
{
"source": "JohntheProgrammer92/pyConverter",
"score": 3
} |
#### File: pyConverter/pythonImageConverter/pic.py
```python
import sys
import os
from PIL import Image
from pythonImageConverter.pyImgConvertGUI.gui import execute
"""
USAGE:
pic [input filename] [output filename]
example: pic test.png test.jpg
pic [input directory] [output file type]
example: pic ./res/ jpg
"""
def get_dirList(path):
"""
Return a sorted list of contents from the directory
"""
dirList = os.listdir(path)
dirList.sort()
return dirList
def main(args=None):
if args is None:
args = sys.argv
if args[1] != "GUI":
if len(args) != 3:
print("""
USAGE:
pic [input filename] [output filename]
example: pic test.png test.jpg
pic [input directory] [output file type]
example: pic ./res/ jpg
""")
else:
if os.path.isdir(args[1]) == False:
try:
im = Image.open(args[1])
rgb_im = im.convert('RGB')
rgb_im.save(args[2], quality = 95)
except Exception as e:
if hasattr(e,"msg"):
print(e.msg)
else:
print(e)
else:
files = get_dirList(args[1])
try:
for i in files:
i = args[1] + i
im = Image.open(i)
rgb_im = im.convert('RGB')
name = i.split('.')
name[2] = args[2]
newName = '.'.join(name)
rgb_im.save(newName, quality = 95)
except Exception as e:
if hasattr(e,"msg"):
print(e.msg)
else:
print(e)
else:
execute()
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[0:]))
except KeyboardInterrupt:
pass
``` |
{
"source": "johnTheSloth/TwitterWall",
"score": 3
} |
#### File: TwitterWall/twitterwall/session.py
```python
import requests
import base64
def twitter_session(api_key, api_secret):
session = requests.Session()
secret = '{}:{}'.format(api_key, api_secret)
secret64 = base64.b64encode(secret.encode('ascii')).decode('ascii')
headers = {
'Authorization': 'Basic {}'.format(secret64),
'Host': 'api.twitter.com',
}
r = session.post('https://api.twitter.com/oauth2/token',
headers=headers,
data={'grant_type': 'client_credentials'})
bearer_token = r.json()['access_token']
def bearer_auth(req):
req.headers['Authorization'] = 'Bearer ' + bearer_token
return req
session.auth = bearer_auth
return session
``` |
{
"source": "john-tho/script.module.pycaption",
"score": 3
} |
#### File: lib/pycaption/__init__.py
```python
from .base import (
CaptionConverter, CaptionNode, Caption, CaptionList, CaptionSet)
from .srt import SRTReader, SRTWriter
from .webvtt import WebVTTReader, WebVTTWriter
from .exceptions import (
CaptionReadError, CaptionReadNoCaptions, CaptionReadSyntaxError)
__all__ = [
'CaptionConverter',
'SRTReader', 'SRTWriter',
'WebVTTReader', 'WebVTTWriter',
'CaptionReadError', 'CaptionReadNoCaptions', 'CaptionReadSyntaxError',
'detect_format', 'CaptionNode', 'Caption', 'CaptionList', 'CaptionSet'
]
SUPPORTED_READERS = (
WebVTTReader, SRTReader)
def detect_format(caps):
"""
Detect the format of the provided caption string.
:returns: the reader class for the detected format.
"""
for reader in SUPPORTED_READERS:
if reader().detect(caps):
return reader
return None
``` |
{
"source": "johntiger1/blog-posts",
"score": 3
} |
#### File: blog-posts/scripts/plot_roc.py
```python
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from sklearn.metrics import confusion_matrix, roc_auc_score, roc_curve
from sklearn.metrics import f1_score, roc_auc_score, precision_recall_curve, roc_curve
def plot_conf_matrix_and_roc(estimator, X, y, figure_size=(16, 6)):
"""
Plot both confusion matrix and ROC curce on the same figure.
Parameters:
-----------
estimator : sklearn.estimator
model to use for predicting class probabilities.
X : array_like
data to predict class probabilities.
y : array_like
true label vector.
figure_size : tuple (optional)
size of the figure.
Returns:
--------
plot : matplotlib.pyplot
plot confusion matrix and ROC curve.
"""
# Compute tpr, fpr, auc and confusion matrix
fpr, tpr, thresholds = roc_curve(y, estimator.predict_proba(X)[:, 1])
auc = roc_auc_score(y, estimator.predict_proba(X)[:, 1])
conf_mat_rf = confusion_matrix(y, estimator.predict(X))
# Define figure size and figure ratios
plt.figure(figsize=figure_size)
gs = GridSpec(1, 2, width_ratios=(1, 2))
# Plot confusion matrix
ax0 = plt.subplot(gs[0])
ax0.matshow(conf_mat_rf, cmap=plt.cm.Reds, alpha=0.2)
for i in range(2):
for j in range(2):
ax0.text(x=j, y=i, s=conf_mat_rf[i, j], ha="center", va="center")
plt.title("Confusion matrix", y=1.1, fontdict={"fontsize": 20})
plt.xlabel("Predicted", fontdict={"fontsize": 14})
plt.ylabel("Actual", fontdict={"fontsize": 14})
# Plot ROC curce
ax1 = plt.subplot(gs[1])
ax1.plot(fpr, tpr, label="auc = {:.3f}".format(auc))
plt.title("ROC curve", y=1, fontdict={"fontsize": 20})
ax1.plot([0, 1], [0, 1], "r--")
plt.xlabel("False positive rate", fontdict={"fontsize": 16})
plt.ylabel("True positive rate", fontdict={"fontsize": 16})
plt.legend(loc="lower right", fontsize="medium");
def plot_roc(estimators, X, y, figure_size=(16, 6)):
"""
Plot both confusion matrix and ROC curce on the same figure.
Parameters:
-----------
estimators : dict
key, value for model name and sklearn.estimator to use for predicting
class probabilities.
X : array_like
data to predict class probabilities.
y : array_like
true label vector.
figure_size : tuple (optional)
size of the figure.
Returns:
--------
plot : matplotlib.pyplot
plot confusion matrix and ROC curve.
"""
plt.figure(figsize=figure_size)
for estimator in estimators.keys():
# Compute tpr, fpr, auc and confusion matrix
fpr, tpr, thresholds = roc_curve(y, estimators[estimator].predict_proba(X)[:, 1])
auc = roc_auc_score(y, estimators[estimator].predict_proba(X)[:, 1])
# Plot ROC curce
plt.plot(fpr, tpr, label="{}: auc = {:.3f}".format(estimator, auc))
plt.title("ROC curve", y=1, fontdict={"fontsize": 20})
plt.legend(loc="lower right", fontsize="medium")
plt.plot([0, 1], [0, 1], "--")
plt.xlabel("False positive rate", fontdict={"fontsize": 16})
plt.ylabel("True positive rate", fontdict={"fontsize": 16});
def plot_roc_and_pr_curves(models, X_train, y_train, X_valid, y_valid, roc_title, pr_title, labels):
"""
Plot roc and PR curves for all models.
Arguments
---------
models : list
list of all models.
X_train : list or 2d-array
2d-array or list of training data.
y_train : list
1d-array or list of training labels.
X_valid : list or 2d-array
2d-array or list of validation data.
y_valid : list
1d-array or list of validation labels.
roc_title : str
title of ROC curve.
pr_title : str
title of PR curve.
labels : list
label of each model to be displayed on the legend.
"""
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
if not isinstance(X_train, list):
for i, model in enumerate(models):
model_fit = model.fit(X_train, y_train)
model_probs = model.predict_proba(X_valid)[:, 1:]
model_preds = model.predict(X_valid)
model_auc_score = roc_auc_score(y_valid, model_probs)
# model_f1_score = f1_score(y_valid, model_preds)
fpr, tpr, _ = roc_curve(y_valid, model_probs)
precision, recall, _ = precision_recall_curve(y_valid, model_probs)
axes[0].plot(fpr, tpr, label=f"{labels[i]}, auc = {model_auc_score:.3f}")
axes[1].plot(recall, precision, label=f"{labels[i]}")
else:
for i, model in enumerate(models):
model_fit = model.fit(X_train[i], y_train[i])
model_probs = model.predict_proba(X_valid[i])[:, 1:]
model_preds = model.predict(X_valid[i])
model_auc_score = roc_auc_score(y_valid[i], model_probs)
# model_f1_score = f1_score(y_valid[i], model_preds)
fpr, tpr, _ = roc_curve(y_valid[i], model_probs)
precision, recall, _ = precision_recall_curve(y_valid[i], model_probs)
axes[0].plot(fpr, tpr, label=f"{labels[i]}, auc = {model_auc_score:.3f}")
axes[1].plot(recall, precision, label=f"{labels[i]}")
axes[0].legend(loc="lower right")
axes[0].set_xlabel("FPR")
axes[0].set_ylabel("TPR")
axes[0].set_title(roc_title)
axes[1].legend()
axes[1].set_xlabel("recall")
axes[1].set_ylabel("precision")
axes[1].set_title(pr_title)
plt.tight_layout()
```
#### File: blog-posts/scripts/utils.py
```python
import matplotlib.pyplot as plt
import numpy as np
from bandit_algorithms.epsilon_greedy.epsilon_greedy_algorithm import (
EpsilonGreedy,
AnnealingEpsilonGreedy
)
from bandit_algorithms.softmax.softmax_algorithm import (Softmax,
AnnealingSoftmax)
from bandit_algorithms.upper_confidence_bound.ucb import UCB
from testing.test_bandit_algorithms import BernoulliArm, test_algorithm
ALGORITHMS = {
"epsilon-Greedy": EpsilonGreedy,
"Softmax": Softmax,
"Annealing epsilon-Greedy": AnnealingEpsilonGreedy,
"Annealing Softmax": AnnealingSoftmax,
"UCB": UCB
}
def plot_algorithm(
alg_name="epsilon-Greedy", arms=None, best_arm_index=None,
hyper_params=None, num_simulations=1000, horizon=100, label=None,
fig_size=(18, 6)):
# Check if the algorithm doesn't have hyperparameter
if hyper_params is None:
# Run the algorithm
algo = ALGORITHMS[alg_name]()
chosen_arms, average_rewards, cum_rewards = test_algorithm(
algo, arms, num_simulations, horizon)
average_probs = np.where(chosen_arms == best_arm_index, 1, 0).sum(
axis=0) / num_simulations
# Plot the 3 metrics of the algorithm
fig, axes = plt.subplots(1, 3, figsize=fig_size)
axes[0].plot(average_probs)
axes[0].set_xlabel("Time", fontsize=14)
axes[0].set_ylabel("Probability of Selecting Best Arm", fontsize=14)
axes[0].set_title(
f"Accuray of {alg_name} alg.", y=1.05, fontsize=16)
axes[0].set_ylim([0, 1.05])
axes[1].plot(average_rewards)
axes[1].set_xlabel("Time", fontsize=14)
axes[1].set_ylabel("Average Reward", fontsize=14)
axes[1].set_title(
f"Avg. Rewards of {alg_name} alg.", y=1.05, fontsize=16)
axes[1].set_ylim([0, 1.0])
axes[2].plot(cum_rewards)
axes[2].set_xlabel("Time", fontsize=14)
axes[2].set_ylabel("Cumulative Rewards of Chosen Arm", fontsize=14)
axes[2].set_title(
f"Cumulative Rewards of {alg_name} alg.", y=1.05, fontsize=16)
plt.tight_layout()
else:
fig, axes = plt.subplots(1, 3, figsize=fig_size)
for hyper_param in hyper_params:
# Run the algorithm
algo = ALGORITHMS[alg_name](hyper_param)
chosen_arms, average_rewards, cum_rewards = test_algorithm(
algo, arms, num_simulations, horizon)
average_probs = np.where(chosen_arms == best_arm_index, 1, 0).sum(
axis=0) / num_simulations
# Plot the 3 metrics of the algorithm
axes[0].plot(average_probs, label=f"{label} = {hyper_param}")
axes[0].set_xlabel("Time", fontsize=14)
axes[0].set_ylabel(
"Probability of Selecting Best Arm", fontsize=14)
axes[0].set_title(
f"Accuray of {alg_name} alg.", y=1.05, fontsize=16)
axes[0].legend()
axes[0].set_ylim([0, 1.05])
axes[1].plot(average_rewards, label=f"{label} = {hyper_param}")
axes[1].set_xlabel("Time", fontsize=14)
axes[1].set_ylabel("Average Reward", fontsize=14)
axes[1].set_title(
f"Avg. Rewards of {alg_name} alg.", y=1.05, fontsize=16)
axes[1].legend()
axes[1].set_ylim([0, 1.0])
axes[2].plot(cum_rewards, label=f"{label} = {hyper_param}")
axes[2].set_xlabel("Time", fontsize=14)
axes[2].set_ylabel("Cumulative Rewards of Chosen Arm", fontsize=14)
axes[2].set_title(
f"Cumulative Rewards of {alg_name} alg.", y=1.05, fontsize=16)
axes[2].legend(loc="lower right")
plt.tight_layout()
def compare_algorithms(
algorithms=None, arms=None, best_arm_index=None, num_simulations=1000,
horizon=100, fig_size=(18, 6)):
fig, axes = plt.subplots(1, 3, figsize=(16, 6))
# Loop over all algorithms
for algorithm in algorithms:
# Run the algorithm
algo = ALGORITHMS[algorithm]
chosen_arms, average_rewards, cum_rewards = test_algorithm(
algo(), arms, num_simulations, horizon)
average_probs = np.where(chosen_arms == best_arm_index, 1, 0).sum(
axis=0) / num_simulations
# Plot the 3 metrics
axes[0].plot(average_probs, label=algo.__name__)
axes[0].set_xlabel("Time", fontsize=12)
axes[0].set_ylabel("Probability of Selecting Best Arm", fontsize=12)
axes[0].set_title(
f"Accuray of Different Algorithms", y=1.05, fontsize=14)
axes[0].set_ylim([0, 1.05])
axes[0].legend(loc="lower right")
axes[1].plot(average_rewards, label=algo.__name__)
axes[1].set_xlabel("Time", fontsize=12)
axes[1].set_ylabel("Average Reward", fontsize=12)
axes[1].set_title(
f"Average Rewards of Different Algorithms", y=1.05, fontsize=14)
axes[1].set_ylim([0, 1.0])
axes[1].legend(loc="lower right")
axes[2].plot(cum_rewards, label=algo.__name__)
axes[2].set_xlabel("Time", fontsize=12)
axes[2].set_ylabel("Cumulative Rewards of Chosen Arm", fontsize=12)
axes[2].set_title(
f"Cumulative Rewards of Different Algorithms", y=1.05, fontsize=14)
axes[2].legend(loc="lower right")
plt.tight_layout()
``` |
{
"source": "johntiger1/multimodal_fairness",
"score": 2
} |
#### File: multimodal_fairness/IanFairnessHackery/evaluate_phenotype_preds.py
```python
from mimic3models import metrics
import sys
import os
import numpy as np
PRED_TASKS = {
"Acute and unspecified renal failure" : False,
"Acute cerebrovascular disease" : False,
"Acute myocardial infarction" : False,
"Cardiac dysrhythmias" : False,
"Chronic kidney disease" : False,
"Chronic obstructive pulmonary disease and bronchiectasis" : False,
"Complications of surgical procedures or medical care" : False,
"Conduction disorders" : False,
"Congestive heart failure" : False,
"nonhypertensive" : False,
"Coronary atherosclerosis and other heart disease" : False,
"Diabetes mellitus with complications" : False,
"Diabetes mellitus without complication" : False,
"Disorders of lipid metabolism" : False,
"Essential hypertension" : False,
"Fluid and electrolyte disorders" : False,
"Gastrointestinal hemorrhage" : False,
"Hypertension with complications and secondary hypertension" : False,
"Other liver diseases" : False,
"Other lower respiratory disease" : False,
"Other upper respiratory disease" : False,
"Pleurisy" : False,
"pneumothorax" : False,
"pulmonary collapse" : False,
"Pneumonia (except that caused by tuberculosis or sexually transmitted disease)" : False
}
def read_file(path):
predictions = []
labels = []
with open(path, 'r') as fr:
fr.readline()
for line in fr:
line = line.strip()
vals = line.split(",")
predictions.append(float(vals[2]))
labels.append(int(vals[3]))
return np.array(predictions), np.array(labels)
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Must provide path to folder containing the prediction csv's in id/episode/pred/label format, and with" +
" filenames that are prefixed by the condition")
exit(-1)
merged_pred = None
merged_Y = None
indir = sys.argv[1]
for filename in os.listdir(indir):
prefixes = PRED_TASKS.keys()
matches = filter(lambda x: filename.startswith(x), prefixes)
# SKIP non-matches files
if len(matches) == 0:
continue
# Make sure only one file for this task
assert(not PRED_TASKS[matches[0]])
PRED_TASKS[matches[0]] = True
print("Evaluating {}".format(matches[0]))
match_pred, match_Y = read_file(os.path.join(indir, filename))
if merged_pred is None:
merged_pred = np.expand_dims(match_pred.copy(), axis=0)
merged_Y = np.expand_dims(match_Y.copy(), axis=0)
else:
merged_pred =np.concatenate((merged_pred, np.expand_dims(match_pred, axis=0)), axis=0)
merged_Y =np.concatenate((merged_Y, np.expand_dims(match_Y ,axis=0)), axis=0)
#print(merged_X.shape)
#print(merged_Y.shape)
metrics.print_metrics_binary(match_Y, match_pred)
print("----------------------------------------")
print("\n==========================================")
print("Evaluating all together:")
metrics.print_metrics_multilabel(merged_Y.T, merged_pred.T)
for key in PRED_TASKS:
if PRED_TASKS[key] != True:
print("WARNING: Data for task {} missing?".format(key))
```
#### File: multimodal_fairness/IanFairnessHackery/generate_sensitive.py
```python
import pandas as pd
PATH_TO_BENCHMARK_STAYS = "../data/root/all_stays.csv"
PATH_TO_MIMIC_ADMISSIONS = "/home/administrator/00Projects/Fairness/MIMIC_III/MIMIC_III/ADMISSIONS.csv"
#PATH_TO_MIMIC_ADMISSIONS = "/h/shossain/multimodal_fairness/data/physionet.org/files/mimiciii/1.4/ADMISSIONS.csv"
PATH_TO_MIMIC_ADMISSIONS = "/scratch/gobi1/johnchen/new_git_stuff/multimodal_fairness/data/physionet.org/files/mimiciii/1.4/ADMISSIONS.csv"
# ../data/physionet.org/files/mimiciii/1.4/ADMISSIONS.csv
# Define data processing helpers:
# Aggregator such that if element of group is different, then UNKNOWN is returned, else value is returned
def unk_if_diff(x):
default = None
for i, val in enumerate(x):
if i == 0:
default = val
else:
if val != default:
return "UNKNOWN"
return default
# Shorten things to first - or / if applicable
def clean(x):
if pd.isna(x):
return "UNKNOWN"
elif x in ["NOT SPECIFIED", "UNOBTAINABLE", "UNABLE TO OBTAIN", "PATIENT DECLINED TO ANSWER", "UNKNOWN (DEFAULT)"]:
return "UNKNOWN"
elif x == "HISPANIC OR LATINO":
return "HISPANIC"
def truncate(x, pattern):
ind = x.find(pattern)
if ind != -1:
return x[:ind]
return x
x = truncate(x,'-')
x = truncate(x,'/')
return x.strip()
#count = benchmark_df["SUBJECT_ID"].value_counts()
#print(count[count > 2])
#print(benchmark_df.loc[benchmark_df["SUBJECT_ID"] == 27374])
#benchmark_df.loc[benchmark_df["SUBJECT_ID"] == 27374,"ETHNICITY"] = "UNKNOWN/NOT SPECIFIED"
#benchmark_df.drop_duplicates(inplace=True)
#count = benchmark_df["SUBJECT_ID"].value_counts()
#print(count[count > 1].index)
def print_db(sensitive_dataframe):
# My sanity checking
print("Ethnicities in data:")
print(sensitive_dataframe['ETHNICITY'].value_counts())
print("Sex in data:")
print(sensitive_dataframe['GENDER'].value_counts())
print("Insurance in data:")
print(sensitive_dataframe['INSURANCE'].value_counts())
print("Religion in data:")
print(sensitive_dataframe['RELIGION'].value_counts())
print("Marital status in data:")
print(sensitive_dataframe['MARITAL_STATUS'].value_counts())
# Load sensitive features from benchmark
benchmark_df = pd.read_csv(PATH_TO_BENCHMARK_STAYS)
benchmark_df = benchmark_df[["SUBJECT_ID","ETHNICITY","GENDER"]]
# Process to ensure SUBJECT_ID unique, and truncate descriptors
benchmark_df = benchmark_df.groupby("SUBJECT_ID").agg(unk_if_diff)
benchmark_df= benchmark_df.applymap(clean)
#print(benchmark_df)
# Load sensitive features from mimic, repeat processing
mimic_df = pd.read_csv(PATH_TO_MIMIC_ADMISSIONS, engine="python")
mimic_df = mimic_df[["SUBJECT_ID","INSURANCE","RELIGION", "MARITAL_STATUS"]]
mimic_df = mimic_df.groupby("SUBJECT_ID").agg(unk_if_diff)
mimic_df = mimic_df.applymap(clean)
#print(mimic_df)
# Do a join to get all of the sensitive attributes in a single dataframe
joined = benchmark_df.merge(mimic_df, on="SUBJECT_ID", how="inner", validate="one_to_one")
#print(joined)
joined.to_csv("full_detail_sensitive.csv")
print("ORIGINAL CLASSES")
print_db(joined)
original = joined.copy()
# Create first version of sensitive attributes - merge ethnicities into WHITE & NON_WHITE & save
joined["ETHNICITY"] = joined["ETHNICITY"].apply(lambda x: x if x == "WHITE" else "NON_WHITE")
joined.to_csv("sensitive_bin_eth.csv")
# Create first version of sensitive attributes - merge ethnicities into WHITE & NON_WHITE & save
joined = original.copy()
joined["INSURANCE"] = joined["INSURANCE"].apply(lambda x: x if x == "Medicare" else "Non-Medicare")
joined.to_csv("sensitive_bin_ins.csv")
# Create second version of sensitive attributes - bin ethnicities into 5 groups & save
joined = original.copy()
joined["ETHNICITY"] = joined["ETHNICITY"].apply(lambda x: x if x in ["WHITE", "BLACK","HISPANIC","ASIAN"] else "OTHER")
# Save results
joined.to_csv("sensitive_5_eth.csv")
# Create third version of sensitive attributes - LOSES DATA! Removes all with UNK ethnicity
joined = joined[joined.ETHNICITY != "OTHER"]
joined.to_csv("partial_sensitive_4_eth.csv")
print("4 ethnicity variant")
print_db(joined)
# Create fourth version of sensitive attributes - LOSES DATA! Removes all with UNK Insurance
# and merged medicaid with government
joined = joined[joined.INSURANCE != "UNKNOWN"]
joined["INSURANCE"] = joined["INSURANCE"].apply(lambda x: x if x in ["Private", "Medicare","Self Pay"] else "Gov/Medicaid")
joined.to_csv("partial_sensitive_all_4.csv")
print("4 ethnicity/insurance variant")
print_db(joined)
```
#### File: multimodal_fairness/mimic3models/fair_classifier.py
```python
import numpy as np
from fairlearn.postprocessing import ThresholdOptimizer
from sklearn.metrics import roc_auc_score
class pseudo_classifier:
""" Note that the actual classifier is already trained (unaware as it ought to be) and
post-processing does not require access to the underlying features. So this class is
basically a wrapper around that prediction so we can pass it nicely onto the
fairlearn framework
"""
def __init__(self, train_X, train_Y, train_score_Y, sensitive_train, \
test_X=None, test_Y=None, test_score_Y=None, sensitive_test=None, \
sensitive_features_dict=None):
self.train_X = train_X
self.train_Y = train_Y
self.train_score_Y = train_score_Y
self.sensitive_train = sensitive_train
self.test_X = test_X
self.test_Y = test_Y
self.test_score_Y = test_score_Y
self.sensitive_test = sensitive_test
self.sensitive_features_dict = sensitive_features_dict
self.train_size = len(self.train_X)
self.trained = False
self.groups = None
def fit(self, X, y):
""" No need to implement this as it is already taken care of. We simply need
to map
"""
self.answers_map = {}
for i, sample in enumerate(self.train_X):
self.answers_map[(sample[0], sample[1])] = (self.sensitive_train[i], self.train_score_Y[i], self.train_Y[i])
if self.test_X is not None:
for i, sample in enumerate(self.test_X):
self.answers_map[(sample[0], sample[1])] = (self.sensitive_test[i], self.test_score_Y[i], self.test_Y[i])
pass
def predict(self, samples, sensitive_features=None):
# predict the outcome of the model on the given samples. If samples
# is none, then the self.test_data will be used
# return the predictions scores
out = np.ones(len(samples))
for i, sample in enumerate(samples):
out[i] = self.answers_map[(sample[0], sample[1])][1]
return out
def predict_hard(self, samples, sensitive_features=None):
# predict the outcome of the model on the given samples. If samples
# is none, then the self.test_data will be used
# return the loss as well as the predictions
scores = np.round(self.predict(samples, sensitive_features))
return scores
def get_group_confusion_matrix(self, sensitive_features, X, true_Y):
# For a trained classifier, get the true positive and true negative rates based on
# group identity. Dobased on groups (currently only works for binary)
# sensitive_index is the index of the sensitive attribute.
#
# Two returned dictionaries
groups = np.unique(sensitive_features)
y_pred_probs = self.predict(X, sensitive_features)
y_pred = np.round(y_pred_probs)
micro_acc = 1 - np.sum(np.power(true_Y - y_pred, 2))/len(true_Y)
print("Overall Accuracy: ", micro_acc)
micro_auc = roc_auc_score(true_Y, y_pred_probs)
print("Overall AUC: ", micro_auc)
macro_acc = 0
macro_auc = 0
out_dict = {} # The format is: {group:[tp, fp, tn, fn]}
for index, group in enumerate(groups):
indicies = np.where(sensitive_features==group)[0]
true_class = true_Y[indicies]
pred_class = y_pred[indicies]
true_pos_index = np.where(true_class==1)[0]
true_neg_index = np.where(true_class==0)[0]
if len(true_pos_index) == 0 or len(true_neg_index) == 0:
print("No True positives of true negatives in this group")
continue
tp = len(np.where(pred_class[true_pos_index]==1)[0])/len(true_pos_index)
tn = len(np.where(pred_class[true_neg_index]==0)[0])/len(true_neg_index)
fp = len(np.where(pred_class[true_neg_index]==1)[0])/len(true_neg_index)
fn = len(np.where(pred_class[true_pos_index]==0)[0])/len(true_pos_index)
auc = roc_auc_score(true_class, y_pred_probs[indicies])
macro_auc += auc
accuracy = 1 - np.sum(np.power(true_class - pred_class, 2))/len(true_class)
macro_acc += accuracy
out_dict[group] = [tp, tn, fp, fn, accuracy, auc]
print(group, "confusion matrix")
if tp == 0 and fp == 0:
print("None classified as Positive in group", group)
print("\t Group Accuracy: ", accuracy)
else:
precision = tp / (tp + fp)
recall = tp / (tp + fn)
if (precision+recall) != 0:
f1 = 2*precision*recall/(precision+recall)
print("\t F1 score: ", f1)
else:
print("\t F1 score: Undefined, precision=recall=0")
print("\t AUC: ", auc)
print("\t Group Accuracy: ", accuracy)
print("\t True positive rate:", tp)
print("\t True negative rate:", tn)
print("\t False positive rate:", fp)
print("\t False negative rate:", fn)
macro_acc /= len(groups)
macro_auc /= len(groups)
return out_dict, {"Accuracy": (micro_acc, macro_acc), "AUC": (micro_auc, macro_auc)}
class fair_classifier(pseudo_classifier):
def __init__(self, train_X, train_y, train_score_y, sensitive_train, \
test_X, test_y, test_score_y, sensitive_test, metric, sensitive_features_dict=None, HARD=False):
self.train_X = train_X
self.train_Y = train_y
if HARD:
self.train_score_Y = np.round(train_score_y)
else:
self.train_score_Y = train_score_y
self.sensitive_train = sensitive_train
self.test_X = test_X
self.test_Y = test_y
if HARD:
self.test_score_Y = np.round(test_score_y)
else:
self.test_score_Y = test_score_y
self.sensitive_test = sensitive_test
self.sensitive_features_dict = sensitive_features_dict
self.erm_classifier = pseudo_classifier(self.train_X, self.train_Y, self.train_score_Y, \
self.sensitive_train, self.test_X, self.test_Y, self.test_score_Y, self.sensitive_test)
assert(metric in ["equalized_odds", "demographic_parity"])
self.metric = metric
def fit(self):
self.erm_classifier.fit(self.train_X, self.train_Y)
self.model = ThresholdOptimizer(estimator=self.erm_classifier, constraints=self.metric, prefit=True)
self.model.fit(self.train_X, self.train_Y, sensitive_features=self.sensitive_train)
def predict(self, x_samples, sensitive_features):
y_samples = self.model.predict(x_samples, sensitive_features=sensitive_features)
return y_samples
def get_accuracy(self, X, y_true, sensitive_features):
y_pred = self.predict(X, sensitive_features)
return 1 - np.sum(np.power(y_pred - y_true, 2))/len(y_true)
def predict_prob(self, x_samples, sensitive_features):
y_samples = self.model._pmf_predict(x_samples, sensitive_features=sensitive_features)
return y_samples
def get_avg_group_confusion_matrix(self, sensitive_features, X, true_Y):
# produces average tp/fp/tn/fn/acc per group
# Basically get_group_confusion_matrix but modified to return average values where possible
# For a trained classifier, get the true positive and true negative rates based on
# group identity. Dobased on groups (currently only works for binary)
# sensitive_index is the index of the sensitive attribute.
groups = np.unique(sensitive_features)
tp_rate = {}
fp_rate = {}
tn_rate = {}
fn_rate = {}
true_pos_index = np.where(true_Y == 1)
true_neg_index = np.where(true_Y == 0)
# Calculate probability of classification for each input
y_pred_prob = self.predict_prob(X, sensitive_features)
# Calculate average probability of correct classification (i.e. expected accuracy)
avg_micro_acc = (np.sum(y_pred_prob[true_pos_index][:,1]) + np.sum(y_pred_prob[true_neg_index][:,0])) / len(true_Y)
print("Average Overall Accuracy: ", avg_micro_acc)
micro_auc = roc_auc_score(true_Y, y_pred_prob[:,1])
print("Overall AUC: ", micro_auc)
out_dict = {} # The format is: {group:[tp, fp, tn, fn]}
avg_macro_acc = 0
macro_auc = 0
for index, group in enumerate(groups):
indicies = np.where(sensitive_features == group)[0]
true_class = true_Y[indicies]
pred_prob = y_pred_prob[indicies]
true_pos_index = np.where(true_class == 1)[0]
true_neg_index = np.where(true_class == 0)[0]
if len(true_pos_index) == 0 or len(true_neg_index) == 0:
print("No True positives or no true negatives in this group")
continue
# Find avg rates (i.e. avg probability of tp/tn/fp/fn)
tp = np.sum(pred_prob[true_pos_index][:,1]) / len(true_pos_index)
tn = np.sum(pred_prob[true_neg_index][:,0]) / len(true_neg_index)
fp = np.sum(pred_prob[true_neg_index][:,1]) / len(true_neg_index)
fn = np.sum(pred_prob[true_pos_index][:,0]) / len(true_pos_index)
tp_rate[group] = tp
tn_rate[group] = tn
fp_rate[group] = fp
fn_rate[group] = fn
# Expected accuracy
accuracy = (np.sum(pred_prob[true_pos_index][:,1]) + np.sum(pred_prob[true_neg_index][:,0])) / len(true_class)
avg_macro_acc += accuracy
auc = roc_auc_score(true_class, pred_prob[:,1])
macro_auc += auc
out_dict[group] = [tp, tn, fp, fn, accuracy, auc]
print(group, "average confusion matrix")
if tp == 0 and fp == 0:
print("None classified as Positive in group", group)
print("\t Average Group Accuracy: ", accuracy)
else:
# Can't compute F1 out of these since dealing with average values
#precision = tp / (tp + fp)
#recall = tp / (tp + fn)
#f1 = 2 * precision * recall / (precision + recall)
#print("\t F1 score: ", f1)
print("\t Average Group Accuracy: ", accuracy)
print("\t Group AUC: ", auc)
print("\t Average True positive rate:", tp)
print("\t Average True negative rate:", tn)
print("\t Average False positive rate:", fp)
print("\t Average False negative rate:", fn)
avg_macro_acc /= len(groups)
macro_auc /= len(groups)
return out_dict, {"Accuracy": (avg_micro_acc, avg_macro_acc), "AUC": (micro_auc, macro_auc)}
```
#### File: new_allen_nlp/Mortality/MortalityReader.py
```python
import tempfile
from typing import Dict, Iterable, List, Tuple
from overrides import overrides
import torch
import allennlp
from allennlp.data import DataLoader, DatasetReader, Instance, Vocabulary
from allennlp.data.fields import LabelField, TextField, MetadataField, MultiLabelField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, WhitespaceTokenizer
from allennlp.models import Model
from allennlp.modules import TextFieldEmbedder, Seq2VecEncoder
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules.token_embedders import Embedding
from allennlp.modules.seq2vec_encoders import BagOfEmbeddingsEncoder, CnnEncoder
'''transformer stuff'''
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
from allennlp.data.token_indexers import PretrainedTransformerIndexer
# from allennlp.modules.text_field_embedders import
from allennlp.modules.token_embedders import PretrainedTransformerEmbedder
from allennlp.modules.seq2vec_encoders import BertPooler
from allennlp.nn import util
from allennlp.training.metrics import CategoricalAccuracy, Auc
from allennlp.training.optimizers import AdamOptimizer
from allennlp.training.trainer import Trainer, GradientDescentTrainer
from allennlp.training.util import evaluate
# import the regularization
from allennlp.nn.regularizers import L2Regularizer, RegularizerApplicator
import pandas as pd
import os
import gc
from tqdm.auto import tqdm
import sys
sys.path.append("/scratch/gobi1/johnchen/new_git_stuff/multimodal_fairness/")
from src.preprocessing.text_preprocessing import preprocess_mimic
import torch
import matplotlib.pyplot as plt
from CONST import LOGGER_NAME
'''
get the logger, if it is available
'''
import logging
import numpy as np
# logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(LOGGER_NAME)
logger.debug("hello")
@DatasetReader.register("MortalityReader")
class MortalityReader(DatasetReader):
def __init__(self,
lazy: bool = True,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
max_tokens: int = 768*4,
train_listfile: str = "/scratch/gobi1/johnchen/new_git_stuff/multimodal_fairness/data/in-hospital-mortality/train/listfile.csv",
test_listfile: str = "/scratch/gobi1/johnchen/new_git_stuff/multimodal_fairness/data/in-hospital-mortality/test/listfile.csv",
notes_dir: str = "/scratch/gobi1/johnchen/new_git_stuff/multimodal_fairness/data/extracted_notes",
skip_patients_file: str ="/scratch/gobi1/johnchen/new_git_stuff/multimodal_fairness/data/extracted_notes/null_patients.txt",
stats_write_dir: str="/scratch/gobi1/johnchen/new_git_stuff/multimodal_fairness/data/extracted_notes/",
all_stays: str = "/scratch/gobi1/johnchen/new_git_stuff/multimodal_fairness/data/root/all_stays.csv",
limit_examples: int = None,
use_preprocessing: bool = False,
num_classes: int=2,
mode: str='train',
data_type: str="MORTALITY",
args=None,
hadm2eps_path: str="/scratch/gobi1/johnchen/new_git_stuff/multimodal_fairness/data/extracted_notes/hadm2episode.dict"
):
super().__init__(lazy)
self.tokenizer = tokenizer or WhitespaceTokenizer()
self.token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer()}
self.max_tokens = max_tokens
self.train_listfile = train_listfile
self.test_listfile = test_listfile
self.notes_dir = notes_dir
self.use_preprocessing = use_preprocessing
logger.critical(f"we are getting the max tokens {self.max_tokens} "
f"and use_preproc is {self.use_preprocessing}")
self.null_patients = []
with open(skip_patients_file, "r") as file:
for line in file:
self.null_patients.append(line.strip())
self.stats_write_dir = stats_write_dir
self.all_stays_path = all_stays
self.all_stays_df = self.get_all_stays()
self.limit_examples = limit_examples
self.cur_examples = 0
self.lengths = []
self.num_classes = num_classes
self.mode = mode
self.sampled_idx = {}
self.data_type = data_type
self.args = args
self.get_idx() #realistically, only the train_idx will be set, and we simply need to compare against
# self.null_patients
self.vocab = None
self.hadm2eps_path = hadm2eps_path
self.listfile_df = pd.read_csv(train_listfile)
if self.data_type == "PHENOTYPING" or self.data_type == "DECOMPENSATION":
self.labels_start_idx = 2
elif self.data_type == "MORTALITY":
self.labels_start_idx = 1
self.labels = list(self.listfile_df.columns[self.labels_start_idx:])
# def set_mode(self, mode: str):
# if mode == "train":
# self.limit_examples = None
# else:
#
# pass
def get_idx(self):
train_sampler = self.get_sampler(self.train_listfile)
self.sampled_idx["train"] = list(train_sampler)
self.train_sampler = train_sampler
test_sampler = self.get_sampler(self.test_listfile)
self.sampled_idx["valid"] = list(test_sampler)
self.test_sampler = test_sampler
if self.limit_examples:
self.sampled_idx["train"] = self.sampled_idx["train"][:self.limit_examples]
self.sampled_idx["valid"] = self.sampled_idx["valid"][:self.limit_examples]
def get_label_stats(self, file_path: str):
'''
Gets label (mortality) stats
'''
# get stats on the dataset listed at _path_
from collections import defaultdict
self.stats = defaultdict(int)
with open(file_path, "r") as file:
file.readline() # could also pandas readcsv and ignore first line
for line in file:
info_filename, label = line.split(",")
self.stats[int(label)] +=1
return self.stats
'''
Parses the line, according to the mode. Returns a dict with the proper keys set
Could also have implemented this with a DF instead
'''
def parse_line(self, line):
info_dict = {}
mapping_dict = {}
if self.data_type == "MORTALITY":
headers = ["filename", "label"]
elif self.data_type == "DECOMPENSATION":
headers = ["filename", "time", "label"]
elif self.data_type == "PHENOTYPING":
headers = ["filename", "time", "label"]
else:
headers = ["filename", "time", "label"]
for i,header in enumerate(headers):
mapping_dict[header] = i #can also use a dict comprehension here
info_array = line.split(",")
for key in mapping_dict:
if key == "label":
info_dict[key] = int(info_array[mapping_dict[key]])
elif key == "time":
info_dict[key] = float(info_array[mapping_dict[key]])
else:
info_dict[key] = info_array[mapping_dict[key]]
return info_dict
'''
Reads in all the labels, and forms a sampler, according to a balanced approach.
'''
def get_sampler(self, listfile: str = ""):
self.labels = []
# read out the personal statement, and expand upon this
# current events
# politics and area studies
# controversial issues: pipeline protests
# saudi arms deal!
# conservative, etc.
# african and canadian politics
# excerpt from the referee letter!
# sampling_num_classes
sampling_num_classes = None
if self.data_type == "DECOMPENSATION" or self.data_type == "MORTALITY":
sampling_num_classes = 2
else:
sampling_num_classes = 25
self.class_counts = np.zeros(sampling_num_classes) # fix sampling for phenotypes
with open(listfile, "r") as file:
file.readline()
for line in file:
info_dict = self.parse_line(line)
self.labels.append([info_dict["label"]])
self.class_counts[int(info_dict["label"])] += 1
# now, we assign the weights to ALL the class labels
self.class_weights = 1/self.class_counts
# essentially, assign the weights as the ratios, from the self.stats stuff
all_label_weights = self.class_weights[self.labels].squeeze() #produce an array of size labels, but looking up the value in class weights each time
num_samples = self.limit_examples if self.limit_examples else len(all_label_weights)
num_samples = min(num_samples, len(all_label_weights))
if self.args.sampler_type == "balanced":
sampler = torch.utils.data.sampler.WeightedRandomSampler(weights=all_label_weights,
num_samples=num_samples,
replacement = False)
elif self.args.sampler_type == "random":
sampler = torch.utils.data.sampler.SubsetRandomSampler(indices=[i for i in range(len(all_label_weights))])
# sampler = list(sampler)[:num_samples]
else:
logger.critical("Weird sampler specified \n")
sampler = None
return sampler
def get_sampler_from_dataset(self, dataset):
self.labels = []
self.class_counts = np.zeros(2)
for data in dataset: # could replace this with an arbitrary data source, and we just yield from it
info_dict = data.fields
label = int(info_dict["label"].label)
self.labels.append(label)
self.class_counts[label] += 1
# now, we assign the weights to ALL the class labels
self.class_weights = 1/self.class_counts
# essentially, assign the weights as the ratios, from the self.stats stuff
all_label_weights = self.class_weights[self.labels] #produce an array of size labels, but looking up the value in class weights each time
num_samples = self.limit_examples if self.limit_examples else len(all_label_weights)
num_samples = min(num_samples, len(all_label_weights))
if self.args.sampler_type == "balanced":
sampler = torch.utils.data.sampler.WeightedRandomSampler(weights=all_label_weights,
num_samples=num_samples,
replacement = False)
else:
sampler = torch.utils.data.sampler.SubsetRandomSampler(indices=[i for i in range(len(all_label_weights))])
# sampler = list(sampler)[:num_samples]
return sampler #now that we have a sampler, we can do things: pass it into the dataloader
'''
Creates and saves a histogram of the note lengths
'''
def make_lengths_histogram(self):
pass
'''
Gets stats for the data listed at the datapath
'''
def get_note_stats(self, file_path, name="train"):
print(f"in note stats, the logger is {logger} and we have {__name__}")
print(logger.getEffectiveLevel())
from collections import defaultdict
self.note_stats = defaultdict(list)
exclusions = 0
num_examples = 0
with open(file_path, "r") as file:
for line in file:
num_examples+=1
with open(file_path, "r") as file, \
open(os.path.join(self.stats_write_dir, "note_lengths.txt") , "a") as note_length_file:
file.readline() # could also pandas readcsv and ignore first line
for example_number,line in enumerate(tqdm(file, total=num_examples)):
if self.mode != "test" and self.limit_examples and example_number > self.limit_examples:
break
info_filename, label = line.split(",")
info = info_filename.split("_")
patient_id = info[0]
# verify string inside a list of string
if patient_id not in self.null_patients: # could also just do try except here
eps = int("".join([c for c in info[1] if c.isdigit()]))
notes = pd.read_pickle(os.path.join(self.notes_dir, patient_id, "notes.pkl"))
notes[["CHARTTIME", "STORETIME", "CHARTDATE"]] = notes[["CHARTTIME", "STORETIME", "CHARTDATE"]].apply(pd.to_datetime)
# fill in the time, do two passes. Any not caught in the first pass will get helped by second
notes["CHARTTIME"] = notes["CHARTTIME"].fillna(notes["STORETIME"])
notes["CHARTTIME"] = notes["CHARTTIME"].fillna(value=notes["CHARTDATE"].map(lambda x: pd.Timestamp(x) + pd.Timedelta(days=1) - pd.Timedelta(seconds=1)))
assert len(notes[notes["CHARTTIME"].isnull()]) == 0 # all of them should have been filled in.
# now, let's sort the notes
episode_specific_notes = notes[notes["EPISODES"] == eps].copy(deep=True)
# hadm_id = episode_specific_notes.groupby(["HADM_ID"]).agg({""}) # hadm ids seem to 1-to1 correspond to episodes
hadm_id = episode_specific_notes["HADM_ID"]
one_hadm_id = hadm_id.unique()
logger.info(type(one_hadm_id))
assert (one_hadm_id.shape[0]) == 1
assert len(one_hadm_id) == 1
icu_intime = self.all_stays_df[ self.all_stays_df["HADM_ID"] == one_hadm_id[0]]
# we are assuming that the intime is not null
intime_date = pd.Timestamp(icu_intime["INTIME"].iloc[0]) # iloc will automatically extract once you get to the base element
intime_date_plus_time = pd.Timestamp(intime_date) + pd.Timedelta(days=2)
# all notes up to two days. Including potentially previous events.
mask = ( episode_specific_notes["CHARTTIME"] > intime_date) & (episode_specific_notes["CHARTTIME"] <= intime_date_plus_time)
all_mask = (episode_specific_notes["CHARTTIME"] <= intime_date_plus_time)
time_episode_specific_notes = episode_specific_notes[mask].copy(deep=True)
logger.debug("Went from {} to {} notes\n".format(len(episode_specific_notes), len(time_episode_specific_notes)))
if len(time_episode_specific_notes) > 0:
text_df = time_episode_specific_notes
text_df.sort_values("CHARTTIME", ascending=True, inplace=True) # we want them sorted by increasing time
# unlike the other one, we found our performance acceptable. Therefore, we use only the first note.
text = " ".join(text_df["TEXT"].tolist()) #assuming sorted order
tokens = self.tokenizer.tokenize(text)
if patient_id in self.note_stats:
logger.info("Encountering the patient another time, for another episode {} {}".format(patient_id, eps))
self.note_stats[patient_id].append(len(tokens) )# the same patient id can be encoutnered for multiple episodes
if int(patient_id)%1000==0:
logger.info("text for patient {} \n: {}".format(patient_id,text))
logger.info("end of text for patient {} \n".format(patient_id))
else:
logger.warning("No text found for patient {}. This is with the time hour {} window\n. ".format(patient_id, 48))
exclusions +=1
'''below code is functionally useless; much better to visualize with plot'''
fig, ax = plt.subplots()
# let's flatten a dictionary of lists.
note_lengths = []
for lst in self.note_stats.values():
note_lengths.extend(lst)
ax.hist(note_lengths, range=(0, max(note_lengths)), bins=100, rwidth=0.9 )
ax.set_title("Histogram of total note lengths")
fig.savefig(os.path.join(self.stats_write_dir, f"{name}_decomp_note_length_hist.png"))
logger.critical("For {} With decompensation windowing, removed {}\n".format(name, exclusions))
return self.note_stats
def get_all_stays(self):
my_stays_df = pd.read_csv(self.all_stays_path)
return my_stays_df
@overrides
def _read(self, listfile_path: str) -> Iterable[Instance]:
'''NOTE: because this is an overrides, it CANNOT accept another arg!'''
'''This function is only expected to be called with lazy set to FALSE. '''
'''Expect: one instance per line'''
logger.critical("read method is called")
if self.mode != "test":
sampled_idx = self.sampled_idx[self.mode]
listfile_df = pd.read_csv(listfile_path)
def str_build(row):
labels = []
for col in row.index:
if row[col] == 1:
labels.append(col)
# labels.join()
return labels
for idx,row in listfile_df.iterrows():
if self.mode == "test" or idx in sampled_idx : #when test, use everything
list_labels = str_build(row.iloc[self.labels_start_idx:])
multi_labels = MultiLabelField(list_labels)
time = row.get("period_length", default=48) # float(48) #hardcode to 48
info = row.get("stay").split("_")
label = row.get("y_true", -1)
patient_id = info[0]
# verify string inside a list of string
# null patients are thrown out. But only on a task specific basis.
if patient_id not in self.null_patients: # could also just do try except here
eps = int("".join([c for c in info[1] if c.isdigit()]))
notes = pd.read_pickle(os.path.join(self.notes_dir, patient_id, "notes.pkl"))
notes[["CHARTTIME", "STORETIME", "CHARTDATE"]] = notes[["CHARTTIME", "STORETIME", "CHARTDATE"]].apply(pd.to_datetime)
# fill in the time, do two passes. Any not caught in the first pass will get helped by second
notes["CHARTTIME"] = notes["CHARTTIME"].fillna(notes["STORETIME"])
notes["CHARTTIME"] = notes["CHARTTIME"].fillna(value=notes["CHARTDATE"].map(lambda x: pd.Timestamp(x) + pd.Timedelta(days=1) - pd.Timedelta(seconds=1)))
assert len(notes[notes["CHARTTIME"].isnull()]) == 0 # all of them should have been filled in.
# now, let's sort the notes
episode_specific_notes = notes[notes["EPISODES"] == eps]
hadm_id = episode_specific_notes["HADM_ID"]
one_hadm_id = hadm_id.unique()
if len(one_hadm_id) <= 0:
logger.critical("MISSING DATA FOR PATIENT EPS TIME {} {} {}. Skipping\n".format(patient_id, eps, time ))
continue
if self.data_type != "PHENOTYPING":
icu_intime = self.all_stays_df[self.all_stays_df["HADM_ID"] == one_hadm_id[0]]
# we are assuming that the intime is not null
intime_date = pd.Timestamp(icu_intime["INTIME"].iloc[
0]) # iloc will automatically extract once you get to the base element
intime_date_plus_time = pd.Timestamp(intime_date) + pd.Timedelta(hours=int(time))
# all notes up to two days. Including potentially previous events.
mask = (episode_specific_notes["CHARTTIME"] > intime_date) & (
episode_specific_notes["CHARTTIME"] <= intime_date_plus_time)
time_episode_specific_notes = episode_specific_notes[mask].copy(deep=True)
else:
time_episode_specific_notes = episode_specific_notes.copy(deep=True)
if len(time_episode_specific_notes) > 0:
text_df = time_episode_specific_notes
text_df.sort_values("CHARTTIME", ascending=False, inplace=True) # we want them sorted by increasing time
# unlike the other one, we found our performance acceptable. Therefore, we use only the first note.
text = " ".join(text_df["TEXT"].tolist())
if self.use_preprocessing:
token_sent_stream = preprocess_mimic(text)
tokens = []
cur_tokens = 0
for i,token_sent in enumerate(token_sent_stream):
if cur_tokens > self.max_tokens: break
cur_tokens += len(token_sent.split())
tokens.append(token_sent)
text = " ".join(tokens) #overwrite the text!
tokens = self.tokenizer.tokenize(text)[:self.max_tokens]
text_field = TextField(tokens, self.token_indexers)
label_field = LabelField(str(label).strip())
meta_data_field = MetadataField({"patient_id": patient_id,
"episode": eps,
"hadm_id": one_hadm_id[0], # just the specific value
"time" : time #yield the time too
})
fields = {'text': text_field, 'label': multi_labels, "metadata": meta_data_field}
yield Instance(fields)
# after the generator yields, code will return here. (think of yield as a pause)
# self.cur_examples += 1
else:
logger.warning("No text found for patient {}".format(patient_id))
# in this case, we ignore the patient
else:
logger.debug("we are skipping some indices {}".format(idx))
```
#### File: old_allen_mort_pred/tests/model_test.py
```python
from allennlp.common.testing import ModelTestCase
class AcademicPaperClassifierTest(ModelTestCase):
def setUp(self):
super(AcademicPaperClassifierTest, self).setUp()
self.set_up_model('tests/fixtures/academic_paper_classifier.json',
'tests/fixtures/s2_papers.jsonl')
def test_model_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
if __name__ == "__main__":
test = AcademicPaperClassifierTest()
test.setUp()
test.test_model_can_train_save_and_load()
# print()
```
#### File: text_mortality/dataset_readers/SemanticScholarDatasetReader.py
```python
from typing import Dict
import json
import logging
from overrides import overrides
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import LabelField, TextField
from allennlp.data.instance import Instance
from allennlp.data.tokenizers import Tokenizer, SpacyTokenizer
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@DatasetReader.register("s2_papers")
class SemanticScholarDatasetReader(DatasetReader):
"""
Reads a JSON-lines file containing papers from the Semantic Scholar database, and creates a
dataset suitable for document classification using these papers.
Expected format for each input line: {"paperAbstract": "text", "title": "text", "venue": "text"}
The JSON could have other fields, too, but they are ignored.
The output of ``read`` is a list of ``Instance`` s with the fields:
title: ``TextField``
abstract: ``TextField``
label: ``LabelField``
where the ``label`` is derived from the venue of the paper.
Parameters
----------
lazy : ``bool`` (optional, default=False)
Passed to ``DatasetReader``. If this is ``True``, training will start sooner, but will
take longer per batch. This also allows training with datasets that are too large to fit
in memory.
tokenizer : ``Tokenizer``, optional
Tokenizer to use to split the title and abstrct into words or other kinds of tokens.
Defaults to ``WordTokenizer()``.
token_indexers : ``Dict[str, TokenIndexer]``, optional
Indexers used to define input token representations. Defaults to ``{"tokens":
SingleIdTokenIndexer()}``.
"""
def __init__(self,
lazy: bool = False,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None) -> None:
super().__init__(lazy)
self._tokenizer = tokenizer or SpacyTokenizer()
# this is the token 2 index, mapping!
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
@overrides
def _read(self, file_path):
'''
You could also read a csv, etc.
The purpose of this is to provide examples to consume.
'''
with open(cached_path(file_path), "r") as data_file:
logger.info("Reading instances from lines in file at: %s", file_path)
for line in data_file:
line = line.strip("\n")
if not line:
continue
paper_json = json.loads(line)
title = paper_json['title']
abstract = paper_json['paperAbstract']
venue = paper_json['venue']
yield self.text_to_instance(title, abstract, venue)
@overrides
def text_to_instance(self, title: str, abstract: str, venue: str = None) -> Instance: # type: ignore
'''
Converts text to instances. Specifies what an Instance looks like.
An instance is created from a set of fields.
Note that our tokenizer is left unspecified
(allows us to use character level tokens).
And our token_indexer is also left unspecified. Allows us to use GloVe word embeddings.
'''
# pylint: disable=arguments-differ
tokenized_title = self._tokenizer.tokenize(title)
tokenized_abstract = self._tokenizer.tokenize(abstract)
title_field = TextField(tokenized_title, self._token_indexers)
abstract_field = TextField(tokenized_abstract, self._token_indexers)
fields = {'title': title_field, 'abstract': abstract_field}
if venue is not None:
fields['label'] = LabelField(venue)
return Instance(fields)
``` |
{
"source": "johntiger1/vaal_querying",
"score": 3
} |
#### File: johntiger1/vaal_querying/plotting_ci.py
```python
import numpy as np
import scipy as sp
import scipy.stats as stats
import matplotlib.pyplot as plt
'''
t is number of standard deviations
'''
def plot_ci_manual(t, s_err, n, x, x2, y2, ax=None):
"""Return an axes of confidence bands using a simple approach.
Notes
-----
.. math:: \left| \: \hat{\mu}_{y|x0} - \mu_{y|x0} \: \right| \; \leq \; T_{n-2}^{.975} \; \hat{\sigma} \; \sqrt{\frac{1}{n}+\frac{(x_0-\bar{x})^2}{\sum_{i=1}^n{(x_i-\bar{x})^2}}}
.. math:: \hat{\sigma} = \sqrt{\sum_{i=1}^n{\frac{(y_i-\hat{y})^2}{n-2}}}
References
----------
.. [1] <NAME>. "Curve fitting," Jupyter Notebook.
http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/CurveFitting.ipynb
"""
if ax is None:
ax = plt.gca()
ci = t * s_err * np.sqrt(1/n + (x2 - np.mean(x))**2 / np.sum((x - np.mean(x))**2))
ax.fill_between(x2, y2 + ci, y2 - ci, color="#b9cfe7", edgecolor="")
return ax
'''
x2, y2 are the modelling response.
x2: linspace from min(x) to max(x)
y2: conditional mean response
X: the actual data (needed to compute the standard deviation)
t: width of the CI, in std. devs.
'''
def plot_ci_normal_dist(t, x2, y2, means, num_samples, ax=None, color="#b9cfe7"):
import matplotlib
from matplotlib import colors
new_colour = colors.to_rgba(color, alpha=0.23)
print()
"""Return an axes of confidence bands using a simple approach.
Notes
-----
.. math:: \left| \: \hat{\mu}_{y|x0} - \mu_{y|x0} \: \right| \; \leq \; T_{n-2}^{.975} \; \hat{\sigma} \; \sqrt{\frac{1}{n}+\frac{(x_0-\bar{x})^2}{\sum_{i=1}^n{(x_i-\bar{x})^2}}}
.. math:: \hat{\sigma} = \sqrt{\sum_{i=1}^n{\frac{(y_i-\hat{y})^2}{n-2}}}
References
----------
.. [1] <NAME>. "Curve fitting," Jupyter Notebook.
http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/CurveFitting.ipynb
"""
# we can compute the stddev via a built in, or explicitly.
# let's try it explicitly
# assert means.shape[1] == X.shape[1] == 25
# assert means.shape[0] == 1
from matplotlib import cm
means = means.reshape((-1, len(means)))
std_devs = np.sqrt(means * (100-means)/num_samples)
ci = t*std_devs
if ax is None:
ax = plt.gca()
ci = ci.squeeze()
# print(matplotlib.colors.cnames[color])
ax.fill_between(x2, y2 + ci, y2 - ci, color=new_colour, edgecolor="")
return ax
def plot_ci_bootstrap(xs, ys, resid, nboot=500, ax=None):
"""Return an axes of confidence bands using a bootstrap approach.
Notes
-----
The bootstrap approach iteratively resampling residuals.
It plots `nboot` number of straight lines and outlines the shape of a band.
The density of overlapping lines indicates improved confidence.
Returns
-------
ax : axes
- Cluster of lines
- Upper and Lower bounds (high and low) (optional) Note: sensitive to outliers
References
----------
.. [1] <NAME>. "Visualizing Confidence Intervals", Various Consequences.
http://www.variousconsequences.com/2010/02/visualizing-confidence-intervals.html
"""
if ax is None:
ax = plt.gca()
bootindex = sp.random.randint
for _ in range(nboot):
resamp_resid = resid[bootindex(0, len(resid) - 1, len(resid))]
# Make coeffs of for polys
pc = sp.polyfit(xs, ys + resamp_resid, 1)
# Plot bootstrap cluster
ax.plot(xs, sp.polyval(pc, xs), "b-", linewidth=2, alpha=3.0 / float(nboot))
return ax
'''
x is simply a lin space, (1 to T).
Y is the entire batch of accuracies, i.e. N x T (N is the number of samples, T is the number of timesteps)
We use the simplest method for plotting the classifier performance: just compute the standard deviation at each timestep.
To make it a "plot", we also fit a simple regression curve.
The exact methodology:
- compute the mean acc at each timestep
- fit a 1D polynomial (regression) for the mean
- compute the standard deviations around the mean, at each timestep
- fill in the area between the +2/-2 deviations around the mean
'''
'''
loads data, for graphing purposes
'''
def load_data(path, pattern="kl_penalty"):
# we can glob the entire path
#
import os
import numpy as np
all_accs = np.zeros((25,100))
for root, dirs, files in os.walk(path):
for dir in dirs:
if pattern in dir:
print(dir)
ind = int(dir.split("_")[-1])
print(ind)
with open(os.path.join(root, dir,"accs.txt"), "r") as file:
counter = 0
for line in (file):
if ";" in line:
if counter==100:
print(ind, counter)
# print(counter)
acc = line.split(";")[0]
all_accs[ind,counter] = float(acc)
counter+=1
# break
# print(file.readlines(1))
# print(all_accs)
print(all_accs.shape)
# open with w => overwrite!
return all_accs
pass
def load_data_baselines(path, pattern="kl_penalty", mode="kl_penalty", num_samples=29):
# we can glob the entire path
#
import os
import numpy as np
all_accs = np.zeros((num_samples,100))
for root, dirs, files in os.walk(path):
for dir in dirs:
print(root, dirs)
if pattern in dir and root==path:
# print(dir)
ind = int(dir.split("_")[-1])
# print(ind)
if ind >= num_samples: continue
if mode == "kl_penalty":
with open(os.path.join(root, dir,"accs.txt"), "r") as file:
counter = 0
for line in (file):
if ";" in line:
if counter==100:
print(ind, counter)
# print(counter)
acc = line.split(";")[0]
all_accs[ind,counter] = float(acc)
counter+=1
elif mode == "uncertainty" or mode == "random":
# print(dir)
#
# print(os.path.join(root, dir, mode + "_current_accs.txt"))
with open(os.path.join(root, dir, mode + "_current_accs.txt"), "r") as file:
counter = 0
for line in (file):
if " " in line:
if counter==100:
print(ind, counter)
# print(counter)
acc = line.split(" ")[0]
all_accs[ind,counter] = float(acc)
counter+=1
# break
# print(file.readlines(1))
# print(all_accs)
print(all_accs.shape)
# open with w => overwrite!
return all_accs
pass
def stddev_plot(x,y):
fig,ax = plt.subplots()
ax.plot(x,y)
fig.show()
pass
def gen_ci_plot(accs, fig, ax, color="g"):
num_samples = accs.shape[0]
x = np.arange(0, accs.shape[1])
y = np.mean(accs, axis=0)
t = 2
# Modeling with Numpy
def equation(a, b):
"""Return a 1D polynomial."""
return np.polyval(a, b)
p, cov = np.polyfit(x, y, 1, cov=True) # parameters and covariance from of the fit of 1-D polynom.
y_model = equation(p, x) # model using the fit parameters; NOTE: parameters here are coefficients
# Plotting --------------------------------------------------------------------
# fig, ax = plt.subplots(figsize=(8, 6))
# Data
ax.plot(
x, y, "o", color=color, markersize=8,
markeredgewidth=1, markeredgecolor=color, markerfacecolor="None",
)
# Fit
ax.plot(x, y_model, "-", color=color, linewidth=1.5, alpha=0.5, label="r={}".format(p))
x2 = np.linspace(np.min(x), np.max(x), len(x))
y2 = equation(p, x2)
# Confidence Interval (select one)
# plot_ci_manual(t, s_err, n, x, x2, y2, ax=ax)
# plot_ci_bootstrap(x, y, resid, ax=ax)
means = y
# means = means.reshape((-1, len(means)))
std_devs = np.sqrt(means * (100 - means) / num_samples)
std_vars = means * (100 - means) / num_samples
std_devs_across = np.std(means)
print(color, std_devs_across) #lower stddev
# ax.plot(x, std_vars, label="std_vars", color=color)
plot_ci_normal_dist(t, x2, y2, y,num_samples, ax=ax, color=color)
# # Prediction Interval
# pi = t * s_err * np.sqrt(1 + 1 / n + (x2 - np.mean(x)) ** 2 / np.sum((x - np.mean(x)) ** 2))
# ax.fill_between(x2, y2 + pi, y2 - pi, color="None", linestyle="--")
# ax.plot(x2, y2 - pi, "--", color="0.5", label="95% Prediction Limits")
# ax.plot(x2, y2 + pi, "--", color="0.5")
# Figure Modifications --------------------------------------------------------
# Borders
ax.spines["top"].set_color("0.5")
ax.spines["bottom"].set_color("0.5")
ax.spines["left"].set_color("0.5")
ax.spines["right"].set_color("0.5")
ax.get_xaxis().set_tick_params(direction="out")
ax.get_yaxis().set_tick_params(direction="out")
ax.xaxis.tick_bottom()
ax.yaxis.tick_left()
# Labels
plt.title("Fit Plot for Query Methods", fontsize="14", fontweight="bold")
plt.xlabel("Queries")
plt.ylabel("Accuracy")
plt.xlim(np.min(x) - 1, np.max(x) + 1)
# Custom legend
handles, labels = ax.get_legend_handles_labels()
display = (0, 1)
anyArtist = plt.Line2D((0, 1), (0, 0), color=color) # create custom artists
if ax.get_legend():
ax.get_legend().remove()
ax.legend(loc="center right")
legend = plt.legend(
[handle for i, handle in enumerate(handles) if i in display] + [anyArtist],
[label for i, label in enumerate(labels) if i in display] + ["95% Confidence Limits"],
loc=9, bbox_to_anchor=(0, -0.21, 1., 0.102), ncol=3, mode="expand"
)
frame = legend.get_frame().set_edgecolor("0.5")
# Save Figure
plt.tight_layout()
plt.savefig("filename.png", bbox_extra_artists=(legend,), bbox_inches="tight")
fig.show()
return fig, ax
if __name__ == "__main__":
accs = load_data_baselines("/scratch/gobi1/johnchen/vaal_results")
random_accs = load_data_baselines("/scratch/gobi1/johnchen/vaal_results", mode="random")
uncertainty_accs = load_data_baselines("/scratch/gobi1/johnchen/vaal_results", mode="uncertainty")
# accs = accs[:,:30]
# random_accs = random_accs[:,:30]
# uncertainty_accs = uncertainty_accs[:,:30]
# Computations ----------------------------------------------------------------
# Raw Data
'''trying the normal equation line fit'''
'''
x = np.arange(0,all_accs.shape[1])
x = np.reshape(x,(1,100))
x = np.repeat(x, 25, axis=0)
y = all_accs
'''
'''
Couple approaches: either normal equation line fit. Or, we can do just on the mean
'''
'''trying the regular mean fit'''
fig, ax = plt.subplots(figsize=(8, 6))
#
# ax.set_color_cycle(['red', 'black', 'yellow'])
# fig, ax = gen_ci_plot(accs, fig, ax, color="g")
# fig, ax = gen_ci_plot(random_accs, fig, ax, color="r")
# fig, ax = gen_ci_plot(uncertainty_accs, fig, ax, color="b")
fig, ax = gen_ci_plot(accs, fig, ax, color="g")
fig, ax = gen_ci_plot(random_accs, fig, ax, color="r")
fig, ax = gen_ci_plot(uncertainty_accs, fig, ax, color="b")
# fig.legend(loc="center right")
pass
```
#### File: vaal_querying/rl/sample_cartpole.py
```python
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch
import gym
from torch.autograd import Variable
import random
HIDDEN_LAYER = 24 # NN hidden layer size
LR = 0.01
GAMMA = 0.99
INPUT_SIZE = 4
OUTPUT_SIZE = 2
ENV = gym.make('CartPole-v0').unwrapped
HISTORY = []
class Network(nn.Module):
def __init__(self):
super(Network, self).__init__()
self.l1 = nn.Linear(INPUT_SIZE, HIDDEN_LAYER)
# nn.init.xavier_uniform(self.l1.weight)
self.l2 = nn.Linear(HIDDEN_LAYER, OUTPUT_SIZE)
# nn.init.xavier_uniform(self.l2.weight)
def forward(self, x):
x = F.relu(self.l1(x))
# print(x.shape)
new_x = self.l2(x)
# print(new_x.shape)
x = F.softmax(new_x)
return x
model = Network()
use_cuda = torch.cuda.is_available()
if use_cuda:
model.cuda()
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor
optim = torch.optim.Adam(model.parameters(), lr=LR)
def discount_rewards(r):
discounted_r = torch.zeros(r.size())
running_add = 0
for t in reversed(range(len(r))):
running_add = running_add * GAMMA + r[t]
discounted_r[t] = running_add
print(discounted_r)
return discounted_r
'''
We run however many of these episodes, as necessary!
'''
def run_episode(net, e, env):
state = env.reset()
reward_sum = 0
xs = FloatTensor([])
ys = FloatTensor([])
rewards = FloatTensor([])
steps = 0
action_predictions = FloatTensor([])
while True:
# env.render()
x = FloatTensor([state])
xs = torch.cat([xs, x]) #queue: FIFO; append to end
action_prob = net(Variable(x))
action_predictions = torch.cat([action_predictions, action_prob])
# select an action depends on probability
action = 0 if random.random() < action_prob.data[0][0] else 1
y = FloatTensor([[1, 0]] if action == 0 else [[0, 1]])
ys = torch.cat([ys, y])
state, reward, done, _ = env.step(action)
rewards = torch.cat([rewards, FloatTensor([[reward]])])
reward_sum += reward
steps += 1
if done or steps >= 500:
adv = discount_rewards(rewards)
# adv = (adv - adv.mean())
adv = (adv - adv.mean())/(adv.std() + 1e-7)
# print(adv)
loss = learn(xs, ys, adv, action_predictions)
HISTORY.append(reward_sum)
print("[Episode {:>5}] steps: {:>5} loss: {:>5}".format(e, steps, loss))
if sum(HISTORY[-5:])/5 > 490:
return True
else:
return False
def learn(x, y, adv, action_predictions):
# Loss function, ∑ Ai*logp(yi∣xi), but we need fake lable Y due to autodiff
# action_pred = model(Variable(x))
# y = Variable(y, requires_grad=True)
print("y shape and AP shape")
print(y.shape)
print(action_predictions.shape)
adv = Variable(adv).cuda()
# print(action_pred)
log_lik = -y * torch.log(action_predictions)
# print(y)
log_lik_adv = log_lik * adv
# print(torch.sum(log_lik_adv, 1))
loss = torch.sum(log_lik_adv, 1).mean()
optim.zero_grad()
loss.backward()
optim.step()
return loss.item()
for e in range(10000):
complete = run_episode(model, e, ENV)
if complete:
print('complete...!')
break
# import matplotlib.pyplot as plt
# from moviepy.editor import ImageSequenceClip
#
# def botPlay(env):
# state = env.reset()
# steps = 0
# frames = []
# while True:
# frame = env.render(mode='rgb_array')
# frames.append(frame)
# action = torch.max(model(Variable(FloatTensor([state]))), 1)[1].data[0]
# next_state, reward, done, _ = env.step(action)
#
# state = next_state
# steps += 1
#
# if done or steps >= 1000:
# break
#
# clip = ImageSequenceClip(frames, fps=20)
# clip.write_gif('4_policy_gradient_play.gif', fps=20)
#
# def plot_durations(d):
# plt.figure(2)
# plt.clf()
# plt.title('Training...')
# plt.xlabel('Episode')
# plt.ylabel('Duration')
# plt.plot(d)
#
# plt.savefig('4_policy_gradient_score.png')
#
# botPlay(ENV)
# plot_durations(HISTORY)
``` |
{
"source": "JohnTocher/descrobbler",
"score": 3
} |
#### File: JohnTocher/descrobbler/mylast.py
```python
import os
import sys
import pylast
try:
API_KEY = os.environ["LASTFM_API_KEY"]
API_SECRET = os.environ["LASTFM_API_SECRET"]
except KeyError:
API_KEY = "my_api_key"
API_SECRET = "my_apy_secret"
try:
lastfm_username = os.environ["LASTFM_USERNAME"]
lastfm_password_hash = os.environ["LASTFM_PASSWORD_HASH"]
print("Environment variables for user OK")
except KeyError:
# In order to perform a write operation you need to authenticate yourself
lastfm_username = "my_username"
# You can use either use the password, or find the hash once and use that
lastfm_password_hash = <PASSWORD>("<PASSWORD>")
print(lastfm_password_hash)
# lastfm_password_hash = "<PASSWORD>"
print("Environment variables for user missing! So far:")
print(f"API_KEY: {API_KEY}")
print(f"API_SECRET: {API_SECRET}")
print(f"LFM USER: {lastfm_username}")
print(f"LPW HASH: {lastfm_password_hash}")
lastfm_network = pylast.LastFMNetwork(
api_key=API_KEY,
api_secret=API_SECRET,
username=lastfm_username,
password_hash=<PASSWORD>,
)
def track_and_timestamp(track):
return f"{track.playback_date}\t{track.track}"
def print_track(track):
print(track_and_timestamp(track))
TRACK_SEPARATOR = " - "
def split_artist_track(artist_track):
artist_track = artist_track.replace(" – ", " - ")
artist_track = artist_track.replace("“", '"')
artist_track = artist_track.replace("”", '"')
(artist, track) = artist_track.split(TRACK_SEPARATOR)
artist = artist.strip()
track = track.strip()
print("Artist:\t\t'" + artist + "'")
print("Track:\t\t'" + track + "'")
# Validate
if len(artist) == 0 and len(track) == 0:
sys.exit("Error: Artist and track are blank")
if len(artist) == 0:
sys.exit("Error: Artist is blank")
if len(track) == 0:
sys.exit("Error: Track is blank")
return (artist, track)
``` |
{
"source": "johntoms/aliyun-sdk",
"score": 2
} |
#### File: aliyun-sdk/aliyun_sdk/client.py
```python
from aliyun_sdk.common import AliyunCommon
from aliyun_sdk.oss import AliyunOSS
from . import retry_for_requests
def get_config(c):
return {
'access_key_id': c.get('AccessKeyId'),
'access_key_secret': c.get('AccessKeySecret'),
'role_name': c.get('RoleName'),
}
class AliyunClient(object):
def __init__(self, config=None):
self.config = config
self.common_client = AliyunCommon(**get_config(config))
self.oss_client = AliyunOSS(**get_config(config))
def verify(self):
return self.common_client.verify()
@retry_for_requests
def common(self, product, timeout=10, **biz_params):
return self.common_client.__getattr__(product)(timeout=timeout, **biz_params)
@retry_for_requests
def oss(self, method, timeout=10, **biz_params):
return self.oss_client.__getattr__(method)(timeout=timeout, **biz_params)
```
#### File: aliyun-sdk/aliyun_sdk/__init__.py
```python
import json
# Part3 Packages
import xmltodict
import requests
from retry import retry
retry_for_requests = retry((requests.ConnectionError, requests.Timeout), tries=3, delay=1, backoff=2, jitter=(1, 2))
def parse_response(response):
resp_content_type = response.headers.get('content-type') or ''
resp_content_type = resp_content_type.lower().split(';')[0].strip()
if resp_content_type == 'application/json':
return json.loads(response.text)
elif resp_content_type == 'text/xml':
return xmltodict.parse(response.text)
else:
try:
return json.loads(response.text)
except ValueError:
try:
return xmltodict.parse(response.text)
except xmltodict.expat.ExpatError:
return response.content
except:
raise
except:
raise
``` |
{
"source": "John-Tonny/Electrum-vircle",
"score": 2
} |
#### File: packages/aiohttp_socks/connector.py
```python
import socket
from aiohttp import TCPConnector
from aiohttp.abc import AbstractResolver
from .proto import SocksVer
from .helpers import create_socket_wrapper, parse_socks_url
class NoResolver(AbstractResolver):
async def resolve(self, host, port=0, family=socket.AF_INET):
return [{'hostname': host,
'host': host, 'port': port,
'family': family, 'proto': 0,
'flags': 0}]
async def close(self):
pass # pragma: no cover
class SocksConnector(TCPConnector):
def __init__(self, socks_ver=SocksVer.SOCKS5,
host=None, port=None,
username=None, password=<PASSWORD>,
rdns=False, family=socket.AF_INET, **kwargs):
if rdns:
kwargs['resolver'] = NoResolver()
super().__init__(**kwargs)
self._socks_ver = socks_ver
self._socks_host = host
self._socks_port = port
self._socks_username = username
self._socks_password = password
self._rdns = rdns
self._socks_family = family
# noinspection PyMethodOverriding
async def _wrap_create_connection(self, protocol_factory,
host, port, **kwargs):
sock = create_socket_wrapper(
loop=self._loop,
socks_ver=self._socks_ver,
host=self._socks_host, port=self._socks_port,
username=self._socks_username, password=self._socks_password,
rdns=self._rdns, family=self._socks_family)
await sock.connect((host, port))
return await super()._wrap_create_connection(
protocol_factory, None, None, sock=sock.socket, **kwargs)
@classmethod
def from_url(cls, url, **kwargs):
socks_ver, host, port, username, password = parse_socks_url(url)
return cls(socks_ver=socks_ver, host=host, port=port,
username=username, password=password, **kwargs)
```
#### File: packages/aiohttp_socks/helpers.py
```python
import asyncio
import socket
from urllib.parse import urlparse, unquote
from .proto import SocksVer, Socks4SocketWrapper, Socks5SocketWrapper
def create_socket_wrapper(loop, socks_ver, host=None, port=None,
username=None, password=<PASSWORD>,
rdns=True, family=socket.AF_INET):
if socks_ver == SocksVer.SOCKS4:
return Socks4SocketWrapper(
loop=loop, host=host, port=port,
user_id=username, rdns=rdns)
if socks_ver == SocksVer.SOCKS5:
return Socks5SocketWrapper(
loop=loop, host=host, port=port,
username=username, password=password, rdns=rdns, family=family)
raise ValueError('Invalid socks ver: %s' % socks_ver) # pragma: no cover
def parse_socks_url(url):
parsed = urlparse(url)
scheme = parsed.scheme
if scheme == 'socks5':
socks_ver = SocksVer.SOCKS5
elif scheme == 'socks4':
socks_ver = SocksVer.SOCKS4
else:
raise ValueError('Invalid scheme component: %s'
% scheme) # pragma: no cover
host = parsed.hostname
if not host:
raise ValueError('Empty host component') # pragma: no cover
try:
port = parsed.port
except (ValueError, TypeError): # pragma: no cover
raise ValueError('Invalid port component')
try:
username, password = (unquote(parsed.username),
unquote(parsed.password))
except (AttributeError, TypeError):
username, password = '', ''
return socks_ver, host, port, username, password
async def open_connection(socks_url=None, host=None, port=None, *,
socks_ver=SocksVer.SOCKS5,
socks_host='127.0.0.1', socks_port=1080,
username=None, password=<PASSWORD>, rdns=True,
family=socket.AF_INET,
loop=None, **kwargs):
if host is None or port is None:
raise ValueError('host and port must be specified') # pragma: no cover
if loop is None:
loop = asyncio.get_event_loop()
if socks_url is not None:
socks_ver, socks_host, socks_port, username, password \
= parse_socks_url(socks_url)
sock = create_socket_wrapper(
loop=loop,
socks_ver=socks_ver, host=socks_host, port=socks_port,
username=username, password=password, rdns=rdns, family=family)
await sock.connect((host, port))
return await asyncio.open_connection(
loop=loop, host=None, port=None, sock=sock.socket, **kwargs)
async def create_connection(socks_url=None, protocol_factory=None,
host=None, port=None, *,
socks_ver=SocksVer.SOCKS5,
socks_host='127.0.0.1', socks_port=1080,
username=None, password=<PASSWORD>, rdns=True,
family=socket.AF_INET,
loop=None, **kwargs):
if protocol_factory is None:
raise ValueError('protocol_factory '
'must be specified') # pragma: no cover
if host is None or port is None:
raise ValueError('host and port '
'must be specified') # pragma: no cover
if loop is None:
loop = asyncio.get_event_loop()
if socks_url is not None:
socks_ver, socks_host, socks_port, username, password \
= parse_socks_url(socks_url)
sock = create_socket_wrapper(
loop=loop,
socks_ver=socks_ver, host=socks_host, port=socks_port,
username=username, password=password, rdns=rdns, family=family)
await sock.connect((host, port))
return await loop.create_connection(
protocol_factory=protocol_factory,
host=None, port=None, sock=sock.socket, **kwargs)
``` |
{
"source": "John-Tonny/electrumx",
"score": 2
} |
#### File: electrumx/server/env.py
```python
import re
from ipaddress import IPv4Address, IPv6Address
from aiorpcx import Service, ServicePart
from electrumx.lib.coins import Coin
from electrumx.lib.env_base import EnvBase
class ServiceError(Exception):
pass
class Env(EnvBase):
'''Wraps environment configuration. Optionally, accepts a Coin class
as first argument to have ElectrumX serve custom coins not part of
the standard distribution.
'''
# Peer discovery
PD_OFF, PD_SELF, PD_ON = ('OFF', 'SELF', 'ON')
SSL_PROTOCOLS = {'ssl', 'wss'}
KNOWN_PROTOCOLS = {'ssl', 'tcp', 'ws', 'wss', 'rpc'}
def __init__(self, coin=None):
super().__init__()
self.obsolete(["MAX_SUBSCRIPTIONS", "MAX_SUBS", "MAX_SESSION_SUBS", "BANDWIDTH_LIMIT",
"HOST", "TCP_PORT", "SSL_PORT", "RPC_HOST", "RPC_PORT", "REPORT_HOST",
"REPORT_TCP_PORT", "REPORT_SSL_PORT", "REPORT_HOST_TOR",
"REPORT_TCP_PORT_TOR", "REPORT_SSL_PORT_TOR"])
# Core items
self.db_dir = self.required('DB_DIRECTORY')
self.daemon_url = self.required('DAEMON_URL')
if coin is not None:
assert issubclass(coin, Coin)
self.coin = coin
else:
coin_name = self.required('COIN').strip()
network = self.default('NET', 'mainnet').strip()
self.coin = Coin.lookup_coin_class(coin_name, network)
# Peer discovery
self.peer_discovery = self.peer_discovery_enum()
self.peer_announce = self.boolean('PEER_ANNOUNCE', True)
self.force_proxy = self.boolean('FORCE_PROXY', False)
self.tor_proxy_host = self.default('TOR_PROXY_HOST', 'localhost')
self.tor_proxy_port = self.integer('TOR_PROXY_PORT', None)
# Misc
self.db_engine = self.default('DB_ENGINE', 'leveldb')
self.banner_file = self.default('BANNER_FILE', None)
self.tor_banner_file = self.default('TOR_BANNER_FILE',
self.banner_file)
self.anon_logs = self.boolean('ANON_LOGS', False)
self.log_sessions = self.integer('LOG_SESSIONS', 3600)
self.log_level = self.default('LOG_LEVEL', 'info').upper()
self.donation_address = self.default('DONATION_ADDRESS', '')
self.drop_client = self.custom("DROP_CLIENT", None, re.compile)
self.blacklist_url = self.default('BLACKLIST_URL', self.coin.BLACKLIST_URL)
self.cache_MB = self.integer('CACHE_MB', 1200)
self.reorg_limit = self.integer('REORG_LIMIT', self.coin.REORG_LIMIT)
# Server limits to help prevent DoS
self.max_send = self.integer('MAX_SEND', self.coin.DEFAULT_MAX_SEND)
self.max_sessions = self.sane_max_sessions()
self.cost_soft_limit = self.integer('COST_SOFT_LIMIT', 1000)
self.cost_hard_limit = self.integer('COST_HARD_LIMIT', 10000)
self.bw_unit_cost = self.integer('BANDWIDTH_UNIT_COST', 5000)
self.initial_concurrent = self.integer('INITIAL_CONCURRENT', 10)
self.request_sleep = self.integer('REQUEST_SLEEP', 2500)
self.request_timeout = self.integer('REQUEST_TIMEOUT', 30)
self.session_timeout = self.integer('SESSION_TIMEOUT', 600)
# Services last - uses some env vars above
self.services = self.services_to_run()
if {service.protocol for service in self.services}.intersection(self.SSL_PROTOCOLS):
self.ssl_certfile = '/etc/electrumx/server.crt'#self.required('SSL_CERTFILE')
self.ssl_keyfile = '/etc/electrumx/server.key'#self.required('SSL_KEYFILE')
self.report_services = self.services_to_report()
def sane_max_sessions(self):
'''Return the maximum number of sessions to permit. Normally this
is MAX_SESSIONS. However, to prevent open file exhaustion, ajdust
downwards if running with a small open file rlimit.'''
env_value = self.integer('MAX_SESSIONS', 1000)
# No resource module on Windows
try:
import resource
nofile_limit = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
# We give the DB 250 files; allow ElectrumX 100 for itself
value = max(0, min(env_value, nofile_limit - 350))
if value < env_value:
self.logger.warning('lowered maximum sessions from {:,d} to {:,d} '
'because your open file limit is {:,d}'
.format(env_value, value, nofile_limit))
except ImportError:
value = 512 # that is what returned by stdio's _getmaxstdio()
return value
def _parse_services(self, services_str, default_func):
result = []
for service_str in services_str.split(','):
if not service_str:
continue
try:
service = Service.from_string(service_str, default_func=default_func)
except Exception as e:
raise ServiceError(f'"{service_str}" invalid: {e}') from None
if service.protocol not in self.KNOWN_PROTOCOLS:
raise ServiceError(f'"{service_str}" invalid: unknown protocol')
result.append(service)
# Find duplicate addresses
service_map = {service.address: [] for service in result}
for service in result:
service_map[service.address].append(service)
for address, services in service_map.items():
if len(services) > 1:
raise ServiceError(f'address {address} has multiple services')
return result
def services_to_run(self):
def default_part(protocol, part):
return default_services.get(protocol, {}).get(part)
default_services = {protocol: {ServicePart.HOST: 'all_interfaces'}
for protocol in self.KNOWN_PROTOCOLS}
default_services['rpc'] = {ServicePart.HOST: 'localhost', ServicePart.PORT: 8000}
services = self._parse_services(self.default('SERVICES', 'tcp://:50001,ssl://:50002,ws://:50003,wss://:50004,rpc://:8000'), default_part)
# Find onion hosts
for service in services:
if str(service.host).endswith('.onion'):
raise ServiceError(f'bad host for SERVICES: {service}')
return services
def services_to_report(self):
services = self._parse_services(self.default('REPORT_SERVICES', ''), None)
for service in services:
if service.protocol == 'rpc':
raise ServiceError(f'bad protocol for REPORT_SERVICES: {service.protocol}')
if isinstance(service.host, (IPv4Address, IPv6Address)):
ip_addr = service.host
if (ip_addr.is_multicast or ip_addr.is_unspecified or
(ip_addr.is_private and self.peer_announce)):
raise ServiceError(f'bad IP address for REPORT_SERVICES: {ip_addr}')
elif service.host.lower() == 'localhost':
raise ServiceError(f'bad host for REPORT_SERVICES: {service.host}')
return services
def peer_discovery_enum(self):
pd = self.default('PEER_DISCOVERY', 'on').strip().lower()
if pd in ('off', ''):
return self.PD_OFF
elif pd == 'self':
return self.PD_SELF
else:
return self.PD_ON
``` |
{
"source": "JohnTorian/afterglow-core",
"score": 3
} |
#### File: users/versions/3_add_user_first_last_name.py
```python
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3'
down_revision = '2'
branch_labels = None
depends_on = None
def upgrade():
with op.batch_alter_table(
'users', recreate='always',
table_args=(
sa.CheckConstraint(
'username is null or length(username) <= 255'),
sa.CheckConstraint('email is null or length(email) <= 255'),
sa.CheckConstraint(
'password is null or length(password) <= 255'),
sa.CheckConstraint(
'first_name is null or length(first_name) <= 255'),
sa.CheckConstraint(
'last_name is null or length(last_name) <= 255'),
sa.CheckConstraint(
'auth_methods is null or length(auth_methods) <= 255'),
sa.CheckConstraint(
'settings is null or length(settings) <= 1048576'),
),
table_kwargs=dict(sqlite_autoincrement=True)) as batch_op:
batch_op.add_column(sa.Column('first_name', sa.String, default=''))
batch_op.add_column(sa.Column('last_name', sa.String, default=''))
def downgrade():
with op.batch_alter_table('users', recreate='always') as batch_op:
batch_op.drop_column('first_name')
batch_op.drop_column('last_name')
```
#### File: afterglow-core/afterglow_core/__init__.py
```python
import datetime
import json
import os
from typing import Any, Dict as TDict, Optional
from flask_cors import CORS
from marshmallow import missing
from werkzeug.datastructures import CombinedMultiDict, MultiDict
from flask import Flask, Response, request
from .schemas import AfterglowSchema
__all__ = ['app', 'json_response']
class PrefixMiddleware(object):
def __init__(self, application, prefix=''):
self.app = application
self.prefix = prefix
def __call__(self, environ, start_response):
if environ['PATH_INFO'].startswith(self.prefix):
environ['PATH_INFO'] = environ['PATH_INFO'][len(self.prefix):]
environ['SCRIPT_NAME'] = self.prefix
return self.app(environ, start_response)
else:
start_response('404', [('Content-Type', 'text/plain')])
return ["This url does not belong to the app.".encode()]
class AfterglowSchemaEncoder(json.JSONEncoder):
"""
JSON encoder that can serialize AfterglowSchema class instances
"""
def default(self, obj):
if isinstance(obj, type(missing)):
return None
if isinstance(obj, AfterglowSchema):
return obj.dump(obj)
if isinstance(obj, datetime.datetime):
return obj.isoformat(' ')
return super(AfterglowSchemaEncoder, self).default(obj)
def json_response(obj: Any = '', status_code: Optional[int] = None,
headers: Optional[TDict[str, str]] = None) -> Response:
"""
Serialize a Python object to a JSON-type flask.Response
:param obj: object to serialize; can be a Resource instance or a compound
object (list, dict, ...) possibly including Resource instances
:param int status_code: optional HTTP status code; defaults to 200 - OK
:param dict headers: optional extra HTTP headers
:return: Flask response object with mimetype set to application/json
"""
if obj == '' or status_code == 204:
resp = Response('', 204, headers=headers)
del resp.headers['Content-Type']
return resp
if status_code is None:
status_code = 200
return Response(
json.dumps(obj, cls=AfterglowSchemaEncoder), status_code,
mimetype='application/json', headers=headers)
app = Flask(__name__)
cors = CORS(app, resources={'/api/*': {'origins': '*'}})
app.config.from_object('afterglow_core.default_cfg')
app.config.from_envvar('AFTERGLOW_CORE_CONFIG', silent=True)
if app.config.get('APP_PREFIX'):
app.wsgi_app = PrefixMiddleware(
app.wsgi_app, prefix=app.config.get('APP_PREFIX'))
if app.config.get('PROFILE'):
# Enable profiling
from werkzeug.middleware.profiler import ProfilerMiddleware
app.config['DEBUG'] = True
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[10])
if app.config.get('OAUTH2_ALLOW_HTTP') or app.config.get('DEBUG'):
os.environ['AUTHLIB_INSECURE_TRANSPORT'] = '1'
@app.before_request
def resolve_request_body() -> None:
"""
Before every request, combine `request.form` and `request.get_json()` into
`request.args`
"""
ds = [request.args, request.form]
body = request.get_json()
if body:
ds.append(MultiDict(body.items()))
# Replace immutable Request.args with the combined args dict
# noinspection PyPropertyAccess
request.args = CombinedMultiDict(ds)
if app.config.get('AUTH_ENABLED'):
# Initialize user authentication and enable non-versioned /users routes
# and Afterglow OAuth2 server at /oauth2
from . import auth
# Define API resources and endpoints
from .resources import *
from .views import *
```
#### File: afterglow_core/models/source_extraction.py
```python
from __future__ import annotations
from datetime import datetime
from typing import Optional
from marshmallow.fields import Integer, String
from numpy import sqrt, log, rad2deg, void
from astropy.wcs import WCS
from ..schemas import AfterglowSchema, DateTime, Float
__all__ = [
'IAstrometry', 'IFwhm', 'ISourceId', 'ISourceMeta', 'SourceExtractionData',
'sigma_to_fwhm'
]
sigma_to_fwhm = 2.0*sqrt(2*log(2))
class ISourceMeta(AfterglowSchema):
"""
Metadata for the source::
file_id: data file ID
time: exposure start time
filter: filter name
telescope: telescope name
exp_length: exposure length in seconds
"""
file_id: int = Integer()
time: datetime = DateTime()
filter: str = String()
telescope: str = String()
exp_length: float = Float()
class IAstrometry(AfterglowSchema):
ra_hours: float = Float()
dec_degs: float = Float()
pm_sky: float = Float()
pm_pos_angle_sky: float = Float()
x: float = Float()
y: float = Float()
pm_pixel: float = Float()
pm_pos_angle_pixel: float = Float()
pm_epoch: datetime = DateTime()
flux: float = Float()
class IFwhm(AfterglowSchema):
fwhm_x: float = Float()
fwhm_y: float = Float()
theta: float = Float()
class ISourceId(AfterglowSchema):
id: str = String()
class SourceExtractionData(ISourceMeta, IAstrometry, IFwhm, ISourceId):
"""
Description of object returned by source extraction
"""
def __init__(self, source: Optional[SourceExtractionData] = None,
row: Optional[void] = None, x0: int = 0, y0: int = 0,
wcs: Optional[WCS] = None, **kwargs):
"""
Create source extraction data class instance from another source
extraction data object or from a NumPy source table row
:param source: create from another source extraction data object ("copy
constructor")
:param row: source table row
:param x0: X offset to convert from source table coordinates to global
image coordinates; used only with `row`
:param y0: Y offset to convert from source table coordinates to global
image coordinates; used only with `row`
:param wcs: optional WCS structure; if present, compute RA/Dec; used
only with `row`
:param kwargs: see :class:`ISourceMeta` and :class:`ISourceId`
"""
super().__init__(source, **kwargs)
if row is not None:
self.x = row['x'] + x0
self.y = row['y'] + y0
self.fwhm_x = row['a']*sigma_to_fwhm
self.fwhm_y = row['b']*sigma_to_fwhm
self.theta = rad2deg(row['theta'])
self.flux = row['flux']
if wcs is not None:
# Apply astrometric calibration
self.ra_hours, self.dec_degs = wcs.all_pix2world(self.x, self.y, 1)
self.ra_hours /= 15
```
#### File: resources/job_plugins/alignment_job.py
```python
from typing import List as TList
from marshmallow.fields import String, Integer, List, Nested
from astropy.wcs import WCS
from skylib.combine.alignment import apply_transform_stars, apply_transform_wcs
from ...models import Job, JobResult, SourceExtractionData
from ...schemas import AfterglowSchema, Boolean
from ...errors import AfterglowError, ValidationError
from ..data_files import (
create_data_file, get_data_file_data, get_data_file_db, get_root,
save_data_file)
from .cropping_job import run_cropping_job
__all__ = ['AlignmentJob']
class AlignmentSettings(AfterglowSchema):
ref_image: str = String(default='central')
wcs_grid_points: int = Integer(default=0)
prefilter: bool = Boolean(default=True)
class AlignmentJobResult(JobResult):
file_ids: TList[int] = List(Integer(), default=[])
class AlignmentJob(Job):
"""
Image alignment job
"""
type = 'alignment'
description = 'Align Images'
result: AlignmentJobResult = Nested(AlignmentJobResult, default={})
file_ids: TList[int] = List(Integer(), default=[])
settings: AlignmentSettings = Nested(AlignmentSettings, default={})
sources: TList[SourceExtractionData] = List(
Nested(SourceExtractionData), default=[])
inplace: bool = Boolean(default=False)
crop: bool = Boolean(default=False)
def run(self):
settings = self.settings
# Load data files
file_ids = list(self.file_ids)
if not file_ids:
return
adb = get_data_file_db(self.user_id)
try:
# Get reference image index and the corresponding data file ID
try:
if settings.ref_image == 'first':
ref_image = 0
elif settings.ref_image == 'last':
ref_image = len(file_ids) - 1
elif settings.ref_image == 'central':
ref_image = len(file_ids)//2
elif settings.ref_image.strip().startswith('#'):
# 0-based index in file_ids
ref_image = int(settings.ref_image.strip()[1:])
if not 0 <= ref_image < len(file_ids):
raise ValidationError(
'settings.ref_image',
'Reference image index out of range', 422)
else:
# Data file ID
ref_image = int(settings.ref_image)
try:
ref_image = file_ids.index(ref_image)
except ValueError:
# Not in file_ids; implicitly add
file_ids.append(ref_image)
ref_image = len(file_ids) - 1
except AfterglowError:
raise
except Exception:
raise ValidationError(
'settings.ref_image',
'Reference image must be "first", "last", "central", or '
'data file ID, or #file_no', 422)
ref_file_id = file_ids[ref_image]
if self.sources:
# Source-based alignment
if any(not hasattr(source, 'file_id')
for source in self.sources):
raise ValueError(
'Missing data file ID for at least one source')
# Extract alignment stars for reference image
ref_sources = [
source for source in self.sources
if getattr(source, 'file_id', None) == ref_file_id]
ref_stars = {getattr(source, 'id', None): (source.x, source.y)
for source in ref_sources}
if not ref_stars:
raise ValueError(
'Missing alignment stars for reference image')
if None in ref_stars and len(ref_sources) > 1:
# Cannot mix sources with and without ID
raise ValueError('Missing reference image source ID')
else:
# WCS-based alignment
ref_stars = {}
# Load data and extract WCS for reference image
ref_data, ref_hdr = get_data_file_data(self.user_id, ref_file_id)
ref_height, ref_width = ref_data.shape
# noinspection PyBroadException
try:
ref_wcs = WCS(ref_hdr)
if not ref_wcs.has_celestial:
ref_wcs = None
except Exception:
ref_wcs = None
if ref_wcs is None and not ref_stars:
raise ValueError('Reference image has no WCS')
for i, file_id in enumerate(file_ids):
try:
if i != ref_image:
# Load and transform the current image based on either
# star coordinates or WCS
data, hdr = get_data_file_data(self.user_id, file_id)
if ref_stars:
# Extract current image sources that are also
# present in the reference image
img_sources = [
source for source in self.sources
if getattr(source, 'file_id', None) == file_id]
img_stars = {getattr(source, 'id', None):
(source.x, source.y)
for source in img_sources}
if None in img_stars and len(img_sources) > 1:
raise ValueError('Missing source ID')
src_stars, dst_stars = [], []
for src_id, src_star in img_stars.items():
try:
dst_star = ref_stars[src_id]
except KeyError:
pass
else:
src_stars.append(src_star)
dst_stars.append(dst_star)
if not src_stars:
raise ValueError('Missing alignment star(s)')
data = apply_transform_stars(
data, src_stars, dst_stars, ref_width,
ref_height, prefilter=settings.prefilter)
nref = len(src_stars)
hist_msg = '{:d} star{}'.format(
nref, 's' if nref > 1 else '')
else:
# Extract current image WCS
# noinspection PyBroadException
try:
wcs = WCS(hdr)
if not wcs.has_celestial:
wcs = None
except Exception:
wcs = None
if wcs is None:
raise ValueError('Missing WCS')
data = apply_transform_wcs(
data, wcs, ref_wcs, ref_width, ref_height,
grid_points=settings.wcs_grid_points,
prefilter=settings.prefilter)
hist_msg = 'WCS'
hdr.add_history(
'Aligned using {} with respect to data file '
'{:d}'.format(hist_msg, ref_file_id))
# Copy WCS from reference image if any
if ref_wcs is not None:
# Preserve epoch of observation
orig_kw = {
name: (hdr[name], hdr.comments[name])
if hdr.comments[name] else hdr[name]
for name in ('DATE-OBS', 'MJD-OBS')
if name in hdr
}
# Remove the possible alternative WCS
# representations to avoid WCS compatibility issues
# and make the WCS consistent
for name in (
'CD1_1', 'CD1_2', 'CD2_1', 'CD2_2',
'PC1_1', 'PC1_2', 'PC2_1', 'PC2_2',
'CDELT1', 'CDELT2', 'CROTA1', 'CROTA2'):
try:
del hdr[name]
except KeyError:
pass
hdr.update(ref_wcs.to_header(relax=True))
for name, val in orig_kw.items():
hdr[name] = val
else:
data, hdr = ref_data, ref_hdr
if not self.inplace:
# Don't create a new data file for reference image that
# was not listed in file_ids but was instead passed in
# settings.ref_image
if i != ref_image or ref_file_id in self.file_ids:
hdr.add_history(
'Original data file ID: {:d}'.format(file_id))
try:
file_id = create_data_file(
adb, None, get_root(self.user_id), data,
hdr, duplicates='append',
session_id=self.session_id).id
adb.commit()
except Exception:
adb.rollback()
raise
elif i != ref_image: # not replacing reference image
try:
save_data_file(
adb, get_root(self.user_id), file_id, data, hdr)
adb.commit()
except Exception:
adb.rollback()
raise
if i != ref_image or ref_file_id in self.file_ids:
self.result.file_ids.append(file_id)
except Exception as e:
self.add_error(
'Data file ID {}: {}'.format(file_ids[i], e))
finally:
self.update_progress((i + 1)/len(file_ids)*100)
finally:
adb.remove()
# Optionally crop aligned files in place
if self.crop:
run_cropping_job(self, None, self.result.file_ids, inplace=True)
```
#### File: resources/job_plugins/field_cal_job.py
```python
from datetime import datetime
from typing import List as TList
from marshmallow.fields import Integer, List, Nested
import numpy
from astropy.wcs import WCS
from ...models import (
Job, JobResult, FieldCal, FieldCalResult, Mag, PhotSettings)
from ..data_files import get_data_file_fits, get_image_time
from ..field_cals import get_field_cal
from ..catalogs import catalogs as known_catalogs
from .catalog_query_job import run_catalog_query_job
from .source_extraction_job import (
SourceExtractionSettings, run_source_extraction_job)
from .photometry_job import get_source_xy, run_photometry_job
__all__ = ['FieldCalJob']
class FieldCalJobResult(JobResult):
data: TList[FieldCalResult] = List(Nested(FieldCalResult), default=[])
class FieldCalJob(Job):
type = 'field_cal'
description = 'Photometric Calibration'
result: FieldCalJobResult = Nested(FieldCalJobResult, default={})
file_ids: TList[int] = List(Integer(), default=[])
field_cal: FieldCal = Nested(FieldCal, default={})
source_extraction_settings: SourceExtractionSettings = Nested(
SourceExtractionSettings, default=None)
photometry_settings: PhotSettings = Nested(PhotSettings, default=None)
def run(self):
if not getattr(self, 'file_ids', None):
return
# If ID or name is supplied for the field cal, this is a reference
# to a stored field cal; get it from the user's field cal table and
# optionally override fields that were explicitly set by the user
field_cal = self.field_cal
id_or_name = getattr(field_cal, 'id', None)
if id_or_name is None:
id_or_name = getattr(field_cal, 'name', None)
if id_or_name is not None:
stored_field_cal = get_field_cal(self.user_id, id_or_name)
for name, val in field_cal.to_dict().items():
if name not in ('id', 'name'):
setattr(stored_field_cal, name, val)
field_cal = stored_field_cal
catalog_sources = getattr(field_cal, 'catalog_sources', None)
if not catalog_sources and not getattr(field_cal, 'catalogs', None):
raise ValueError(
'Missing either catalog sources or catalog list in field '
'cal{}'.format(
' "{}"'.format(field_cal.name)
if getattr(field_cal, 'name', None) else ''))
if catalog_sources:
# Convert catalog magnitudes to Mag instances (not deserialized
# automatically)
for source in catalog_sources:
for name, val in getattr(source, 'mags', {}).items():
if isinstance(val, dict):
source.mags[name] = Mag(**val)
else:
# No input catalog sources, query the specified catalogs
catalog_sources = run_catalog_query_job(
self, field_cal.catalogs, file_ids=self.file_ids)
# Make sure that each input catalog source has a unique ID; it will
# be used later to match photometry results to catalog sources
prefix = '{}_{}_'.format(
datetime.utcnow().strftime('%Y%m%d%H%M%S'), self.id)
source_ids = set()
for i, source in enumerate(catalog_sources):
id = getattr(source, 'id', None)
if id is None:
# Auto-assign source ID
source.id = id = prefix + str(i + 1)
if getattr(source, 'file_id', None) is not None:
id = (id, source.file_id)
if id in source_ids:
if isinstance(id, tuple):
raise ValueError(
'Non-unique source ID "{0[0]}" for file ID '
'{0[1]}'.format(id))
else:
raise ValueError('Non-unique source ID "{}"'.format(id))
source_ids.add(id)
if getattr(self, 'source_extraction_settings', None) is not None:
# Detect sources using settings provided and match them to input
# catalog sources by XY position in each image
tol = getattr(field_cal, 'source_match_tol', None)
if tol is None:
raise ValueError('Missing catalog source match tolerance')
if tol <= 0:
raise ValueError(
'Positive catalog source match tolerance expected')
epochs, wcss = {}, {}
matching_catalog_sources = []
detected_sources = run_source_extraction_job(
self, self.source_extraction_settings, self.file_ids,
update_progress=False)
if not detected_sources:
raise RuntimeError('Could not detect any sources')
for source in detected_sources:
file_id = source.file_id
catalog_source, match_found = None, False
for catalog_source in catalog_sources:
if getattr(catalog_source, 'file_id', None) is None or \
catalog_source.file_id == file_id:
try:
epoch = epochs[file_id]
except KeyError:
# noinspection PyBroadException
try:
with get_data_file_fits(
self.user_id, file_id) as f:
epoch = get_image_time(f[0].header)
except Exception:
epoch = None
epochs[file_id] = epoch
try:
wcs = wcss[file_id]
except KeyError:
# noinspection PyBroadException
try:
with get_data_file_fits(
self.user_id, file_id) as f:
wcs = WCS(f[0].header)
if not wcs.has_celestial:
wcs = None
except Exception:
wcs = None
wcss[file_id] = wcs
x, y = get_source_xy(catalog_source, epoch, wcs)
if numpy.hypot(x - source.x, y - source.y) < tol:
if any(source1.id == catalog_source.id and
(getattr(source1, 'file_id', None) is
None or source1.file_id == file_id)
for source1 in matching_catalog_sources):
self.add_warning(
'Data file ID {}: Multiple matches for '
'catalog source "{}" within {} '
'pixel{}'.format(
file_id, catalog_source.id, tol,
'' if tol == 1 else 's'))
break
match_found = True
break
if match_found:
# Copy catalog source data to extracted source and set
# the latter as a new catalog source
for attr in ('id', 'catalog_name', 'mags', 'label',
'mag', 'mag_error'):
val = getattr(catalog_source, attr, None)
if val is not None:
setattr(source, attr, val)
matching_catalog_sources.append(source)
if not matching_catalog_sources:
raise RuntimeError(
'Could not match any detected sources to the catalog '
'sources provided')
catalog_sources = matching_catalog_sources
if getattr(self, 'photometry_settings', None) is not None:
# Do batch photometry using refstar positions; explicitly disable
# photometric calibration even if present in data file headers
# by setting field_cal_results to False since we need raw
# (uncalibrated) mags here
phot_data = [source for source in run_photometry_job(
self, self.photometry_settings, self.file_ids, catalog_sources)
if source.mag]
if not phot_data:
raise RuntimeError('No catalog sources could be photometered')
else:
# If photometry is disabled, use instrumental magnitudes provided
# by the user
phot_data = catalog_sources
if len(self.file_ids) > 1:
if any(getattr(source, 'file_id', None) is None
for source in phot_data):
raise ValueError(
'"file_id" is required for all sources when photometry '
'is not enabled')
else:
# Assume the same file ID for all sources if processing a single
# file
file_id = self.file_ids[0]
for source in phot_data:
if getattr(source, 'file_id', None) is None:
source.file_id = file_id
if any(getattr(source, 'mag', None) is None
for source in phot_data):
raise ValueError(
'"mag" is required for all sources when photometry is not '
'enabled')
# Get filters from data file headers (will need them to map
# to catalog mags
filters = {}
for source in phot_data:
file_id = source.file_id
try:
source.filter = filters[file_id]
except KeyError:
# noinspection PyBroadException
try:
with get_data_file_fits(self.user_id, file_id) as f:
source.filter = f[0].header.get('FILTER')
except Exception:
source.filter = None
filters[file_id] = source.filter
min_snr = getattr(field_cal, 'min_snr', None)
max_snr = getattr(field_cal, 'max_snr', None)
if min_snr or max_snr:
# Exclude sources based on SNR
if not min_snr:
min_snr = 0
if not max_snr:
max_snr = numpy.inf
new_phot_data = []
for source in phot_data:
mag_error = getattr(source, 'mag_error', 0)
if mag_error and not min_snr <= 1/mag_error <= max_snr:
continue
new_phot_data.append(source)
phot_data = new_phot_data
if not phot_data:
raise RuntimeError('All sources violate SNR constraints')
if getattr(field_cal, 'source_inclusion_percent', None):
# Keep only sources that are present in the given fraction of images
nmin = max(int(field_cal.source_inclusion_percent/100 *
len(self.file_ids) + 0.5), 1)
source_ids_to_keep, source_ids_to_remove = [], []
for source in phot_data:
id = source.id
if id in source_ids_to_keep or id in source_ids_to_remove:
continue
if len([s for s in phot_data if s.id == id]) < nmin:
source_ids_to_remove.append(id)
else:
source_ids_to_keep.append(id)
if source_ids_to_remove:
if source_ids_to_keep:
phot_data = [source for source in phot_data
if source.id in source_ids_to_keep]
else:
raise ValueError(
'No sources found that are present in ' +
'all images' if nmin == len(self.file_ids) else
'at least one image' if nmin == 1 else
'at least {:d} images'.format(nmin))
# Initialize custom filter mapping
filter_lookup = {
catalog_name: known_catalogs[catalog_name].filter_lookup
for catalog_name in {catalog_source.catalog_name
for catalog_source in catalog_sources
if getattr(catalog_source, 'catalog_name', '')}
if catalog_name in known_catalogs and
getattr(known_catalogs[catalog_name], 'filter_lookup', None)
}
for catalog_name, lookup in getattr(
field_cal, 'custom_filter_lookup', {}).items():
filter_lookup.setdefault(catalog_name, {}).update(lookup)
# For each data file ID, match photometry results to catalog sources
# and use (mag, ref_mag) pairs to obtain zero point
result_data = []
context = dict(numpy.__dict__)
eps = 1e-7
for file_id in self.file_ids:
sources = []
for source in phot_data:
if source.file_id == file_id:
for catalog_source in catalog_sources:
if catalog_source.id == source.id:
# Get reference magnitude for the current filter
flt = getattr(source, 'filter', None)
# noinspection PyBroadException
try:
source.catalog_name = \
catalog_source.catalog_name
expr = filter_lookup[source.catalog_name][flt]
# Evaluate magnitude expression in the
# NumPy-enabled context extended with mags
# available for the current catalog source
ctx = dict(context)
ctx.update(
{f: m.value
for f, m in catalog_source.mags.items()})
try:
mag = Mag(value=eval(expr, ctx, {}))
except Exception:
# Could not compute reference magnitude
# (e.g. missing the given filter); retry
# by getting magnitude directly by filter
# name
raise Exception()
else:
# Calculate the resulting magnitude error
# by coadding contributions from each filter
err = 0
for f, m in catalog_source.mags.items():
e = getattr(m, 'error', None)
if e:
# Partial derivative of final mag
# with resp. to the current filter
ctx[f] += eps
# noinspection PyBroadException
try:
err += ((
eval(expr, ctx, {}) -
mag.value)/eps*e)**2
except Exception:
pass
finally:
ctx[f] = m.value
if err:
mag.error = numpy.sqrt(err)
except Exception:
# No custom filter expression for the current
# filter+catalog combination; try filter name
# as is
try:
mag = catalog_source.mags[flt]
except (AttributeError, KeyError):
# No magnitude available for the current
# filter+catalog; skip this source
continue
m = getattr(mag, 'value', None)
if m is None:
# Missing catalog magnitude value
continue
source.ref_mag = m
e = getattr(mag, 'error', None)
if e:
source.ref_mag_error = e
sources.append(source)
break
if not sources:
self.add_error('Data file ID {}: No calibration sources'.format(
file_id))
continue
mags, mag_errors, ref_mags, ref_mag_errors = numpy.transpose([
(source.mag, getattr(source, 'mag_error', None) or 0,
source.ref_mag, getattr(source, 'ref_mag_error', None) or 0)
for source in sources
])
n = len(sources)
d = ref_mags - mags
m0 = d.mean()
m0_error = numpy.sqrt((mag_errors**2 + ref_mag_errors**2).sum())/n
if abs(m0_error) < 1e-7:
if n > 1:
m0_error = d.std()/numpy.sqrt(n)
else:
m0_error = None
result_data.append(FieldCalResult(
file_id=file_id,
phot_results=sources,
zero_point=m0,
zero_point_error=m0_error,
))
# Update photometric calibration info in data file header
try:
with get_data_file_fits(self.user_id, file_id, 'update') as f:
hdr = f[0].header
hdr['PHOT_M0'] = m0, 'Photometric zero point'
if m0_error:
hdr['PHOT_M0E'] = (
m0_error, 'Photometric zero point error')
if getattr(field_cal, 'name', None):
hdr['PHOT_CAL'] = field_cal.name, 'Field cal name'
elif getattr(field_cal, 'id', None):
hdr['PHOT_CAL'] = field_cal.id, 'Field cal ID'
except Exception as e:
self.add_warning(
'Data file ID {}: Error saving photometric calibration '
'info to FITS header'.format(file_id, e))
self.result.data = result_data
```
#### File: resources/job_plugins/stacking_job.py
```python
from typing import List as TList
from marshmallow.fields import Integer, List, Nested, String
from skylib.combine.stacking import combine
from ... import app
from ...models import Job, JobResult
from ...schemas import AfterglowSchema, Float
from ..data_files import (
create_data_file, get_data_file_data, get_data_file_db, get_root)
__all__ = ['StackingJob']
class StackingSettings(AfterglowSchema):
mode: str = String(default='average')
scaling: str = String(default=None)
rejection: str = String(default=None)
percentile: int = Integer(default=50)
lo: float = Float(default=0)
hi: float = Float(default=100)
class StackingJobResult(JobResult):
file_id: int = Integer()
class StackingJob(Job):
type = 'stacking'
description = 'Stack Images'
result: StackingJobResult = Nested(StackingJobResult, default={})
file_ids: TList[int] = List(Integer(), default=[])
# alignment_settings: AlignmentSettings = Nested(
# AlignmentSettings, default={})
stacking_settings: StackingSettings = Nested(StackingSettings, default={})
def run(self):
settings = self.stacking_settings
if settings.mode not in ('average', 'sum', 'percentile', 'mode'):
raise ValueError(
'Stacking mode must be "average", "sum", "percentile", or '
'"mode"')
if settings.scaling is not None and \
settings.scaling.lower() not in ('none', 'average', 'median',
'mode'):
raise ValueError(
'Stacking mode must be "none", "average", "median", or "mode"')
if settings.scaling is not None:
settings.scaling = settings.scaling.lower()
if settings.scaling == 'none':
settings.scaling = None
if settings.rejection is not None and \
settings.rejection.lower() not in ('none', 'chauvenet', 'iraf',
'minmax', 'sigclip'):
raise ValueError(
'Rejection mode must be "none", "chauvenet", "iraf", "minmax", '
'or "sigclip"')
if settings.rejection is not None:
settings.rejection = settings.rejection.lower()
if settings.rejection == 'none':
settings.rejection = None
lo, hi = settings.lo, settings.hi
if settings.rejection == 'iraf':
if lo is not None:
if lo % 1:
raise ValueError(
'Number of lowest values to clip for rejection=iraf '
'must be integer')
lo = int(lo)
if hi is not None:
if hi % 1:
raise ValueError(
'Number of highest values to clip for rejection=iraf '
'must be integer')
hi = int(hi)
# Load data files
if not self.file_ids:
return
data_files = [get_data_file_data(self.user_id, file_id)
for file_id in self.file_ids]
# Check data dimensions
shape = data_files[0][0].shape
for i, data_file in enumerate(list(data_files[1:])):
if data_file[0].shape != shape:
self.add_error(
'Data file {0} shape mismatch: expected {1[1]}x{1[0]}, got '
'{2[1]}x{2[0]}'.format(
self.file_ids[i + 1], shape, data_file[0].shape))
data_files.remove(data_file)
# Combine using the given settings
data, header = combine(
data_files, mode=settings.mode, scaling=settings.scaling,
rejection=settings.rejection, percentile=settings.percentile,
lo=lo, hi=hi, max_mem_mb=app.config.get('JOB_MAX_RAM'),
callback=self.update_progress)[0]
# Create a new data file in the given session and return its ID
adb = get_data_file_db(self.user_id)
try:
self.result.file_id = create_data_file(
adb, None, get_root(self.user_id), data, header,
duplicates='append', session_id=self.session_id).id
adb.commit()
except Exception:
adb.rollback()
raise
finally:
adb.remove()
```
#### File: public_api/v1/field_cals.py
```python
from flask import Response, request
from .... import app, auth, json_response
from ....models import FieldCal
from ....resources.field_cals import *
from ....schemas.api.v1 import FieldCalSchema
from . import url_prefix
resource_prefix = url_prefix + 'field-cals/'
@app.route(resource_prefix[:-1], methods=['GET', 'POST'])
@auth.auth_required('user')
def field_cals() -> Response:
"""
Return or create field cal(s)
GET /field-cals
- return a list of all user's field cals
POST /field-cals?name=...
- create field cal with the given name and parameters
:return:
GET: JSON response containing a list of serialized field cals
POST: JSON-serialized field cal
"""
if request.method == 'GET':
# List all field cals
return json_response(
[FieldCalSchema(cal)
for cal in query_field_cals(auth.current_user.id)])
if request.method == 'POST':
# Create field cal
return json_response(FieldCalSchema(create_field_cal(
auth.current_user.id,
FieldCal(
FieldCalSchema(_set_defaults=True, **request.args.to_dict()),
_set_defaults=True))), 201)
@app.route(resource_prefix + '<id_or_name>', methods=['GET', 'PUT', 'DELETE'])
@auth.auth_required('user')
def field_cal(id_or_name: str) -> Response:
"""
Return, update, or delete a field cal
GET /field-cals/[id or name]
- return a single field cal with the given ID or name
PUT /field-cals/[id or name]?...
- update field cal parameters
DELETE /field-cals/[id or name]
- delete the given field cal
:param id_or_name: field cal ID (integer) or name
:return:
GET, PUT: JSON-serialized field cal
DELETE: empty response
"""
cal = get_field_cal(auth.current_user.id, id_or_name)
if request.method == 'GET':
# Return specific field cal resource
return json_response(FieldCalSchema(cal))
if request.method == 'PUT':
# Update field cal
return json_response(FieldCalSchema(update_field_cal(
auth.current_user.id, cal.id,
FieldCal(FieldCalSchema(**request.args.to_dict()),
only=list(request.args.keys())))))
if request.method == 'DELETE':
# Delete field cal
delete_field_cal(auth.current_user.id, cal.id)
return json_response()
``` |
{
"source": "john-tornblom/mltree_check",
"score": 2
} |
#### File: john-tornblom/mltree_check/mltree_check.py
```python
import z3
class Walker(object):
symtab = None
feature_names = None
def __init__(self, feature_names=None):
self.symtab = dict()
self.feature_names = feature_names
if feature_names is not None:
for name in feature_names:
self.symtab[name] = z3.Real(name)
def accept(self, tree, parent_id, node_id):
left_id = tree.children_left[node_id]
right_id = tree.children_right[node_id]
if left_id < 0 or right_id < 0:
return self.terminal(tree, node_id)
sym = self.symbol(tree, node_id)
cond = sym <= tree.threshold[node_id]
iftrue = self.accept(tree, node_id, left_id)
iffalse = self.accept(tree, node_id, right_id)
return [z3.If(cond, t, f) for t, f in zip(iftrue, iffalse)]
def terminal(self, tree, node_id):
if tree.n_outputs != 1:
raise Exception('Unsupported value type in terminal')
return tree.value[node_id][0, :]
def symbol(self, tree, node_id):
idx = tree.feature[node_id]
if idx < 0:
idx += tree.n_features
if self.feature_names is None:
name = 'x%d' % (idx + 1)
else:
name = self.feature_names[idx]
if not name in self.symtab:
self.symtab[name] = z3.Real(name)
return self.symtab[name]
def translate(tree, feature_names=None, target_names=None):
w = Walker(feature_names)
symbols = dict()
res = w.accept(tree.tree_, 0, 0)
for idx, y in enumerate(res):
if target_names is None:
name = 'y%d' % (idx+1)
else:
name = target_names[idx]
symbols[name] = y
symbols.update(w.symtab)
return symbols
def check(*args):
s = z3.Solver()
s.add(*args)
res = s.check()
if res.r > 0:
return s.model()
``` |
{
"source": "johntremblay/sb",
"score": 2
} |
#### File: speechbrain/nnet/attention.py
```python
import torch
from torch.nn.parameter import Parameter
from torch.nn import Linear
import logging
import torch.nn as nn
import warnings
from torch.nn.init import xavier_uniform_
from torch.nn.init import constant_
from torch.nn.init import xavier_normal_
from torch.nn.functional import linear, softmax, dropout
from torch.nn.functional import pad
import numpy as np
import math
import torch.nn.functional as F
from typing import Optional
from speechbrain.dataio.dataio import length_to_mask
from speechbrain.nnet.attention_utilities.longformer_diagonaled_mm_tvm import (
mask_invalid_locations,
)
from speechbrain.nnet.attention_utilities.longformer_sliding_chunks import (
sliding_chunks_matmul_qk,
sliding_chunks_matmul_pv,
)
from speechbrain.nnet.attention_utilities.longformer_sliding_chunks import (
sliding_chunks_no_overlap_matmul_qk,
sliding_chunks_no_overlap_matmul_pv,
)
from speechbrain.nnet.attention_utilities.linformer_utilities import get_EF
logger = logging.getLogger(__name__)
class ContentBasedAttention(nn.Module):
""" This class implements content-based attention module for seq2seq
learning.
Reference: NEURAL MACHINE TRANSLATION BY JOINTLY LEARNING TO ALIGN
AND TRANSLATE, Bahdanau et.al. https://arxiv.org/pdf/1409.0473.pdf
Arguments
---------
attn_dim : int
Size of the attention feature.
output_dim : int
Size of the output context vector.
scaling : float
The factor controls the sharpening degree (default: 1.0).
Example
-------
>>> enc_tensor = torch.rand([4, 10, 20])
>>> enc_len = torch.ones([4]) * 10
>>> dec_tensor = torch.rand([4, 25])
>>> net = ContentBasedAttention(enc_dim=20, dec_dim=25, attn_dim=30, output_dim=5)
>>> out_tensor, out_weight = net(enc_tensor, enc_len, dec_tensor)
>>> out_tensor.shape
torch.Size([4, 5])
"""
def __init__(self, enc_dim, dec_dim, attn_dim, output_dim, scaling=1.0):
super(ContentBasedAttention, self).__init__()
self.mlp_enc = nn.Linear(enc_dim, attn_dim)
self.mlp_dec = nn.Linear(dec_dim, attn_dim)
self.mlp_attn = nn.Linear(attn_dim, 1, bias=False)
self.mlp_out = nn.Linear(enc_dim, output_dim)
self.scaling = scaling
self.softmax = nn.Softmax(dim=-1)
# reset the encoder states, lengths and masks
self.reset()
def reset(self):
"""Reset the memory in the attention module.
"""
self.enc_len = None
self.precomputed_enc_h = None
self.mask = None
def forward(self, enc_states, enc_len, dec_states):
"""Returns the output of the attention module.
Arguments
---------
enc_states : torch.Tensor
The tensor to be attended.
enc_len : torch.Tensor
The real length (without padding) of enc_states for each sentence.
dec_states : torch.Tensor
The query tensor.
"""
if self.precomputed_enc_h is None:
self.precomputed_enc_h = self.mlp_enc(enc_states)
self.mask = length_to_mask(
enc_len, max_len=enc_states.size(1), device=enc_states.device
)
dec_h = self.mlp_dec(dec_states.unsqueeze(1))
attn = self.mlp_attn(
torch.tanh(self.precomputed_enc_h + dec_h)
).squeeze(-1)
# mask the padded frames
attn = attn.masked_fill(self.mask == 0, -np.inf)
attn = self.softmax(attn * self.scaling)
# compute context vectors
# [B, 1, L] X [B, L, F]
context = torch.bmm(attn.unsqueeze(1), enc_states).squeeze(1)
context = self.mlp_out(context)
return context, attn
class LocationAwareAttention(nn.Module):
"""This class implements location-aware attention module for seq2seq learning.
Reference: Attention-Based Models for Speech Recognition, Chorowski et.al.
https://arxiv.org/pdf/1506.07503.pdf
Arguments
---------
attn_dim : int
Size of the attention feature.
output_dim : int
Size of the output context vector.
conv_channels : int
Number of channel for location feature.
kernel_size : int
Kernel size of convolutional layer for location feature.
scaling : float
The factor controls the sharpening degree (default: 1.0).
Example
-------
>>> enc_tensor = torch.rand([4, 10, 20])
>>> enc_len = torch.ones([4]) * 10
>>> dec_tensor = torch.rand([4, 25])
>>> net = LocationAwareAttention(
... enc_dim=20,
... dec_dim=25,
... attn_dim=30,
... output_dim=5,
... conv_channels=10,
... kernel_size=100)
>>> out_tensor, out_weight = net(enc_tensor, enc_len, dec_tensor)
>>> out_tensor.shape
torch.Size([4, 5])
"""
precomputed_enc_h: Optional[torch.Tensor]
def __init__(
self,
enc_dim,
dec_dim,
attn_dim,
output_dim,
conv_channels,
kernel_size,
scaling=1.0,
):
super(LocationAwareAttention, self).__init__()
self.mlp_enc = nn.Linear(enc_dim, attn_dim)
self.mlp_dec = nn.Linear(dec_dim, attn_dim)
self.mlp_attn = nn.Linear(attn_dim, 1, bias=False)
self.conv_loc = nn.Conv1d(
1,
conv_channels,
kernel_size=2 * kernel_size + 1,
padding=kernel_size,
bias=False,
)
self.mlp_loc = nn.Linear(conv_channels, attn_dim)
self.mlp_attn = nn.Linear(attn_dim, 1, bias=False)
self.mlp_out = nn.Linear(enc_dim, output_dim)
self.scaling = scaling
self.softmax = nn.Softmax(dim=-1)
# reset the encoder states, lengths and masks
self.reset()
def reset(self):
"""Reset the memory in attention module.
"""
self.enc_len = None
self.precomputed_enc_h = None
self.mask = None
self.prev_attn = None
def forward(self, enc_states, enc_len, dec_states):
"""Returns the output of the attention module.
Arguments
---------
enc_states : torch.Tensor
The tensor to be attended.
enc_len : torch.Tensor
The real length (without padding) of enc_states for each sentence.
dec_states : torch.Tensor
The query tensor.
"""
if self.precomputed_enc_h is None:
self.precomputed_enc_h = self.mlp_enc(enc_states)
self.mask = length_to_mask(
enc_len, max_len=enc_states.size(1), device=enc_states.device
)
# multiply mask by 1/Ln for each row
self.prev_attn = self.mask * (1 / enc_len.float()).unsqueeze(1)
# compute location-aware features
# [B, 1, L] -> [B, C, L]
attn_conv = self.conv_loc(self.prev_attn.unsqueeze(1))
# [B, C, L] -> [B, L, C] -> [B, L, F]
attn_conv = self.mlp_loc(attn_conv.transpose(1, 2))
dec_h = self.mlp_dec(dec_states.unsqueeze(1))
attn = self.mlp_attn(
torch.tanh(self.precomputed_enc_h + dec_h + attn_conv)
).squeeze(-1)
# mask the padded frames
attn = attn.masked_fill(self.mask == 0, -np.inf)
attn = self.softmax(attn * self.scaling)
# set prev_attn to current attn for the next timestep
self.prev_attn = attn.detach()
# compute context vectors
# [B, 1, L] X [B, L, F]
context = torch.bmm(attn.unsqueeze(1), enc_states).squeeze(1)
context = self.mlp_out(context)
return context, attn
class KeyValueAttention(nn.Module):
""" This class implements a single-headed key-value attention module for seq2seq
learning.
Reference: "Attention Is All You Need" by Vaswani et al., sec. 3.2.1
Arguments
---------
enc_dim : int
Size of the encoder feature vectors from which keys and values are computed.
dec_dim : int
Size of the decoder feature vectors from which queries are computed.
attn_dim : int
Size of the attention feature.
output_dim : int
Size of the output context vector.
Example
-------
>>> enc_tensor = torch.rand([4, 10, 20])
>>> enc_len = torch.ones([4]) * 10
>>> dec_tensor = torch.rand([4, 25])
>>> net = KeyValueAttention(enc_dim=20, dec_dim=25, attn_dim=30, output_dim=5)
>>> out_tensor, out_weight = net(enc_tensor, enc_len, dec_tensor)
>>> out_tensor.shape
torch.Size([4, 5])
"""
def __init__(self, enc_dim, dec_dim, attn_dim, output_dim):
super(KeyValueAttention, self).__init__()
self.key_linear = nn.Linear(enc_dim, attn_dim)
self.query_linear = nn.Linear(dec_dim, attn_dim)
self.value_linear = nn.Linear(enc_dim, output_dim)
self.scaling = torch.sqrt(torch.tensor(attn_dim).float())
# reset the encoder states, lengths and masks
self.reset()
def reset(self):
"""Reset the memory in the attention module.
"""
self.values = None
self.keys = None
self.mask = None
def forward(self, enc_states, enc_len, dec_states):
"""Returns the output of the attention module.
Arguments
---------
enc_states : torch.Tensor
The tensor to be attended.
enc_len : torch.Tensor
The real length (without padding) of enc_states for each sentence.
dec_states : torch.Tensor
The query tensor.
"""
if self.keys is None:
self.keys = self.key_linear(enc_states)
self.values = self.value_linear(enc_states)
self.mask = length_to_mask(
enc_len, max_len=enc_states.size(1), device=enc_states.device
).unsqueeze(2)
query = self.query_linear(dec_states).unsqueeze(2)
scores = torch.matmul(self.keys, query) / self.scaling
scores = scores.masked_fill(self.mask == 0, -np.inf)
normalized_scores = scores.softmax(1).transpose(1, 2)
out = torch.matmul(normalized_scores, self.values).squeeze(1)
return out, normalized_scores
class MultiheadAttention(nn.Module):
""" The class is a wrapper of MultiHead Attention for torch.nn.MultiHeadAttention.
Reference: https://pytorch.org/docs/stable/nn.html
Arguments
----------
num_heads : int
parallel attention heads.
dropout : float
a Dropout layer on attn_output_weights (default: 0.0).
bias : bool
add bias as module parameter (default: True).
add_bias_kv : bool
add bias to the key and value sequences at dim=0.
add_zero_attn : bool
add a new batch of zeros to the key and value sequences at dim=1.
kdim : int
total number of features in key (default: None).
vdim : int
total number of features in value (default: None).
Example
-------
>>> inputs = torch.rand([8, 60, 512])
>>> net = MultiheadAttention(nhead=8, d_model=inputs.shape[-1])
>>> outputs, attn = net(inputs, inputs, inputs)
>>> outputs.shape
torch.Size([8, 60, 512])
"""
def __init__(
self,
nhead,
d_model,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
kdim=None,
vdim=None,
):
super().__init__()
self.att = nn.MultiheadAttention(
embed_dim=d_model,
num_heads=nhead,
dropout=dropout,
bias=bias,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
kdim=kdim,
vdim=vdim,
)
def forward(
self,
query,
key,
value,
attn_mask: Optional[torch.Tensor] = None,
key_padding_mask: Optional[torch.Tensor] = None,
):
"""
Arguments
----------
query : tensor
(N, L, E) where L is the target sequence length,
N is the batch size, E is the embedding dimension.
key : tensor
(N, S, E) where S is the source sequence length,
N is the batch size, E is the embedding dimension.
value : tensor
(N, S, E) where S is the source sequence length,
N is the batch size, E is the embedding dimension.
key_padding_mask : tensor
(N, S) where N is the batch size, S is the source sequence
length. If a ByteTensor is provided, the non-zero positions will
be ignored while the position with the zero positions will be
unchanged. If a BoolTensor is provided, the positions with the
value of True will be ignored while the position with the value
of False will be unchanged.
attn_mask : tensor
2D mask (L, S) where L is the target sequence length, S is
the source sequence length.
3D mask (N*num_heads, L, S) where N is the batch
size, L is the target sequence length, S is the source sequence
length. attn_mask ensure that position i is allowed to attend the
unmasked positions. If a ByteTensor is provided, the non-zero
positions are not allowed to attend while the zero positions will
be unchanged. If a BoolTensor is provided, positions with True is
not allowed to attend while False values will be unchanged. If a
FloatTensor is provided, it will be added to the attention weight.
Outputs
-------
attn_output : tensor
(L, N, E) where L is the target sequence length, N is the
batch size, E is the embedding dimension.
attn_output_weights : tensor
(N, L, S) where N is the batch size, L is the target
sequence length, S is the source sequence length.
"""
# give tensors of shape (time, batch, fea)
query = query.permute(1, 0, 2)
key = key.permute(1, 0, 2)
value = value.permute(1, 0, 2)
output, attention = self.att(
query,
key,
value,
attn_mask=attn_mask,
key_padding_mask=key_padding_mask,
)
# reshape the output back to (batch, time, fea)
output = output.permute(1, 0, 2)
return output, attention
class PositionalwiseFeedForward(nn.Module):
"""The class implements the positional-wise feed forward module in
“Attention Is All You Need”.
Arguments
----------
d_ffn: int
Dimension of representation space of this positional-wise feed
forward module.
input_shape : tuple
Expected shape of the input. Alternatively use ``input_size``.
input_size : int
Expected size of the input. Alternatively use ``input_shape``.
dropout: float
Fraction of outputs to drop.
activation: torch class
activation functions to be applied (Recommendation: ReLU, GELU).
Example
-------
>>> inputs = torch.rand([8, 60, 512])
>>> net = PositionalwiseFeedForward(256, input_size=inputs.shape[-1])
>>> outputs = net(inputs)
>>> outputs.shape
torch.Size([8, 60, 512])
"""
def __init__(
self,
d_ffn,
input_shape=None,
input_size=None,
dropout=0.1,
activation=nn.ReLU,
):
super().__init__()
if input_shape is None and input_size is None:
raise ValueError("Expected one of input_shape or input_size")
if input_size is None:
input_size = input_shape[-1]
self.ffn = nn.Sequential(
nn.Linear(input_size, d_ffn),
activation(),
nn.Dropout(dropout),
nn.Linear(d_ffn, input_size),
)
def forward(self, x):
# give a tensor of shap (time, batch, fea)
x = x.permute(1, 0, 2)
x = self.ffn(x)
# reshape the output back to (batch, time, fea)
x = x.permute(1, 0, 2)
return x
class LongformerSelfAttention(nn.Module):
"""
This class comes from: https://github.com/allenai/longformer
Longformer is an open-source project developed by the Allen Institute for Artificial Intelligence (AI2).
AI2 is a non-profit institute with the mission to contribute to humanity through high-impact AI research and
engineering.
The Longformer paper:
@article{
Beltagy2020Longformer,
title={Longformer: The Long-Document Transformer},
author={<NAME> and <NAME> and <NAME>},
journal={arXiv:2004.05150},
year={2020}
}
Parts of the code found herein were modified by: <NAME> (<EMAIL>) in order
to fit SpeechBrain's interface.
"""
def __init__(
self,
layer_id,
num_attention_heads,
hidden_size,
attention_probs_dropout_prob,
attention_window,
attention_mode,
attention_dilation,
):
super(LongformerSelfAttention, self).__init__()
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads)
)
self.num_heads = num_attention_heads
self.head_dim = int(hidden_size / num_attention_heads)
self.embed_dim = hidden_size
self.attention_dilation = attention_dilation # Not implemented yet
self.query = nn.Linear(hidden_size, self.embed_dim)
self.key = nn.Linear(hidden_size, self.embed_dim)
self.value = nn.Linear(hidden_size, self.embed_dim)
self.query_global = nn.Linear(hidden_size, self.embed_dim)
self.key_global = nn.Linear(hidden_size, self.embed_dim)
self.value_global = nn.Linear(hidden_size, self.embed_dim)
self.dropout = attention_probs_dropout_prob
self.layer_id = layer_id
self.attention_window = attention_window[self.layer_id]
self.attention_dilation = self.attention_dilation[self.layer_id]
self.attention_mode = attention_mode
assert self.attention_window > 0
assert self.attention_dilation > 0
assert self.attention_mode in [
"sliding_chunks",
"sliding_chunks_no_overlap",
]
if self.attention_mode in [
"sliding_chunks",
"sliding_chunks_no_overlap",
]:
assert (
self.attention_dilation == 1
), "dilatation is not implemented yet"
def forward(
self, hidden_states, output_attentions=False,
):
hidden_states = hidden_states.transpose(0, 1)
seq_len, bsz, embed_dim = hidden_states.size()
assert embed_dim == self.embed_dim
q = self.query(hidden_states)
k = self.key(hidden_states)
v = self.value(hidden_states)
q /= math.sqrt(self.head_dim)
q = q.view(seq_len, bsz, self.num_heads, self.head_dim).transpose(0, 1)
k = k.view(seq_len, bsz, self.num_heads, self.head_dim).transpose(0, 1)
if self.attention_mode == "sliding_chunks":
attn_weights = sliding_chunks_matmul_qk(
q, k, self.attention_window, padding_value=0
)
elif self.attention_mode == "sliding_chunks_no_overlap":
attn_weights = sliding_chunks_no_overlap_matmul_qk(
q, k, self.attention_window, padding_value=0
)
else:
raise False
mask_invalid_locations(
attn_weights, self.attention_window, self.attention_dilation, False
)
assert list(attn_weights.size())[:3] == [bsz, seq_len, self.num_heads]
assert attn_weights.size(dim=3) in [
self.attention_window * 2 + 1,
self.attention_window * 3,
]
attn_weights_float = F.softmax(
attn_weights, dim=-1, dtype=torch.float32
) # use fp32 for numerical stability
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = F.dropout(
attn_weights_float.type_as(attn_weights),
p=self.dropout,
training=self.training,
)
v = v.view(seq_len, bsz, self.num_heads, self.head_dim).transpose(0, 1)
attn = 0
if self.attention_mode == "sliding_chunks":
attn += sliding_chunks_matmul_pv(
attn_probs, v, self.attention_window
)
elif self.attention_mode == "sliding_chunks_no_overlap":
attn += sliding_chunks_no_overlap_matmul_pv(
attn_probs, v, self.attention_window
)
else:
raise False
attn = attn.type_as(hidden_states)
assert list(attn.size()) == [
bsz,
seq_len,
self.num_heads,
self.head_dim,
]
attn = (
attn.transpose(0, 1).reshape(seq_len, bsz, embed_dim).contiguous()
)
context_layer = attn.transpose(0, 1)
if output_attentions:
# without global attention, return local attention probabilities
# batch_size x num_heads x sequence_length x window_size
# which is the attention weights of every token attending to its neighbours
attn_weights = attn_weights.permute(0, 2, 1, 3)
outputs = (
(context_layer, (attn_weights.sum(dim=1) / self.num_heads))
if output_attentions
else (context_layer,)
)
return outputs
class LinearMultiheadAttention(nn.Module):
"""
This class comes from (it was adjusted to fit SpeechBrain's design):
https://github.com/kuixu/Linear-Multihead-Attention
"""
__annotations__ = {
"bias_k": torch._jit_internal.Optional[torch.Tensor],
"bias_v": torch._jit_internal.Optional[torch.Tensor],
}
__constants__ = [
"q_proj_weight",
"k_proj_weight",
"v_proj_weight",
"in_proj_weight",
"e_proj_weight",
"f_proj_weight",
]
def __init__(
self,
embed_dim,
num_heads,
dropout=0.1,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
kdim=None,
vdim=None,
seq_len=512,
proj_k=128,
param_sharing="none",
method="convolution",
layerwise_proj=None,
):
super(LinearMultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = (
self.kdim == embed_dim and self.vdim == embed_dim
)
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
if self._qkv_same_embed_dim is False:
self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
self.register_parameter("in_proj_weight", None)
else:
self.in_proj_weight = Parameter(
torch.empty(3 * embed_dim, embed_dim)
)
self.register_parameter("q_proj_weight", None)
self.register_parameter("k_proj_weight", None)
self.register_parameter("v_proj_weight", None)
self.method = method
if param_sharing != "layerwise":
self.e_proj = get_EF(
seq_len,
proj_k,
method=self.method,
head_dim=self.embed_dim,
bias=True,
)
if param_sharing == "key_value":
self.f_proj = self.e_proj
elif param_sharing == "layerwise":
self.layerwise_proj = layerwise_proj
self.f_proj = self.e_proj = self.layerwise_proj
else:
self.f_proj = get_EF(
seq_len,
proj_k,
method=self.method,
head_dim=self.embed_dim,
bias=True,
)
if bias:
self.in_proj_bias = Parameter(torch.empty(3 * embed_dim))
else:
self.register_parameter("in_proj_bias", None)
self.out_proj = Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.empty(1, 1, embed_dim))
self.bias_v = Parameter(torch.empty(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
def _reset_parameters(self):
if self._qkv_same_embed_dim:
xavier_uniform_(self.in_proj_weight)
else:
xavier_uniform_(self.q_proj_weight)
xavier_uniform_(self.k_proj_weight)
xavier_uniform_(self.v_proj_weight)
if self.method == "learnable":
xavier_uniform_(self.e_proj.weight)
xavier_uniform_(self.f_proj.weight)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.0)
constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
xavier_normal_(self.bias_k)
if self.bias_v is not None:
xavier_normal_(self.bias_v)
# if self.method == "learnable":
# if self.e_proj.bias is not None:
# xavier_normal_(self.e_proj.bias)
# if self.f_proj.bias is not None:
# xavier_normal_(self.f_proj.bias)
def __setstate__(self, state):
# Support loading old MultiheadAttention checkpoints generated by v1.1.0
if "_qkv_same_embed_dim" not in state:
state["_qkv_same_embed_dim"] = True
super(LinearMultiheadAttention, self).__setstate__(state)
def forward(
self,
query,
key,
value,
attn_mask: Optional[torch.Tensor] = None,
key_padding_mask: Optional[torch.Tensor] = None,
need_weights: Optional[bool] = True,
):
# type: (Tensor, Tensor, Tensor, Optional[Tensor], bool, Optional[Tensor]) -> Tuple[Tensor, Optional[Tensor]]
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. This is an additive mask
(i.e. the values will be added to the attention layer). A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
Shape:
- Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)`, ByteTensor, where N is the batch size, S is the source sequence length.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length.
- Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
if not self._qkv_same_embed_dim:
# print(self.e_proj_weight.size())
return linear_multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
self.in_proj_weight,
self.in_proj_bias,
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout,
self.out_proj.weight,
self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask,
need_weights=need_weights,
attn_mask=attn_mask,
use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight,
k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight,
e_proj=self.e_proj,
f_proj=self.f_proj,
method=self.method,
)
else:
# print(self.e_proj_weight.size())
return linear_multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
self.in_proj_weight,
self.in_proj_bias,
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout,
self.out_proj.weight,
self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask,
need_weights=need_weights,
attn_mask=attn_mask,
e_proj=self.e_proj,
f_proj=self.f_proj,
method=self.method,
)
def linear_multi_head_attention_forward(
query, # type: Tensor
key, # type: Tensor
value, # type: Tensor
embed_dim_to_check, # type: int
num_heads, # type: int
in_proj_weight, # type: Tensor
in_proj_bias, # type: Tensor
bias_k, # type: Optional[Tensor]
bias_v, # type: Optional[Tensor]
# bias_e, # type: Optional[Tensor]
# bias_f, # type: Optional[Tensor]
add_zero_attn, # type: bool
dropout_p, # type: float
out_proj_weight, # type: Tensor
out_proj_bias, # type: Tensor
training=True, # type: bool
key_padding_mask=None, # type: Optional[Tensor]
need_weights=True, # type: bool
attn_mask=None, # type: Optional[Tensor]
use_separate_proj_weight=False, # type: bool
q_proj_weight=None, # type: Optional[Tensor]
k_proj_weight=None, # type: Optional[Tensor]
v_proj_weight=None, # type: Optional[Tensor]
e_proj=None, # type: Optional[Tensor]
f_proj=None, # type: Optional[Tensor]
method="learnable", # type: str
static_k=None, # type: Optional[Tensor]
static_v=None, # type: Optional[Tensor]
):
# type: (...) -> Tuple[Tensor, Optional[Tensor]]
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
bias_e, bias_f: bias of the two linear projection to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
e_proj_weight, f_proj_weight: linear projection weight.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
# give tensors of shape (time, batch, feature)
query = query.permute(1, 0, 2)
key = key.permute(1, 0, 2)
value = value.permute(1, 0, 2)
tgt_len, bsz, embed_dim = query.size()
if method == "learnable":
proj_k, seq_len = e_proj.weight.size()
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.size(0) == value.size(0) and key.size(1) == value.size(1)
head_dim = embed_dim // num_heads
assert (
head_dim * num_heads == embed_dim
), "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
if not use_separate_proj_weight:
if torch.equal(query, key) and torch.equal(key, value):
# self-attention
q, k, v = linear(query, in_proj_weight, in_proj_bias).chunk(
3, dim=-1
)
elif torch.equal(key, value):
# encoder-decoder attention
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
if key is None:
assert value is None
k = None
v = None
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
k, v = linear(key, _w, _b).chunk(2, dim=-1)
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = linear(value, _w, _b)
else:
q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight)
len1, len2 = q_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == query.size(-1)
k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight)
len1, len2 = k_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == key.size(-1)
v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight)
len1, len2 = v_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == value.size(-1)
if in_proj_bias is not None:
q = linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim])
k = linear(
key,
k_proj_weight_non_opt,
in_proj_bias[embed_dim: (embed_dim * 2)],
)
v = linear(
value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2):]
)
else:
q = linear(query, q_proj_weight_non_opt, in_proj_bias)
k = linear(key, k_proj_weight_non_opt, in_proj_bias)
v = linear(value, v_proj_weight_non_opt, in_proj_bias)
q = q * scaling
if attn_mask is not None:
assert (
attn_mask.dtype == torch.float32
or attn_mask.dtype == torch.float64
or attn_mask.dtype == torch.float16
or attn_mask.dtype == torch.uint8
or attn_mask.dtype == torch.bool
), "Only float, byte, and bool types are supported for attn_mask, not {}".format(
attn_mask.dtype
)
if attn_mask.dtype == torch.uint8:
warnings.warn(
"Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead."
)
attn_mask = attn_mask.to(torch.bool)
if attn_mask.dim() == 2:
attn_mask = attn_mask.unsqueeze(0)
if list(attn_mask.size()) != [1, query.size(0), key.size(0)]:
raise RuntimeError(
"The size of the 2D attn_mask is not correct."
)
elif attn_mask.dim() == 3:
if list(attn_mask.size()) != [
bsz * num_heads,
query.size(0),
key.size(0),
]:
raise RuntimeError(
"The size of the 3D attn_mask is not correct."
)
else:
raise RuntimeError(
"attn_mask's dimension {} is not supported".format(
attn_mask.dim()
)
)
# attn_mask's dim is 3 now.
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:
warnings.warn(
"Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead."
)
key_padding_mask = key_padding_mask.to(torch.bool)
if bias_k is not None and bias_v is not None:
if static_k is None and static_v is None:
k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
else:
assert static_k is None, "bias cannot be added to static key."
assert static_v is None, "bias cannot be added to static value."
else:
assert bias_k is None
assert bias_v is None
##======= linformer =========##
if method == "learnable":
k = k.permute(1, 2, 0)
k = linear(k, e_proj.weight[:, 0:tgt_len], e_proj.bias)
v = v.permute(1, 2, 0)
v = linear(v, f_proj.weight[:, 0:tgt_len], f_proj.bias)
elif method == "convolution":
k = k.permute(1, 2, 0)
v = v.permute(1, 2, 0)
k = e_proj(k)
v = f_proj(v)
q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if static_k is not None:
assert static_k.size(0) == bsz * num_heads
assert static_k.size(2) == head_dim
k = static_k
if static_v is not None:
assert static_v.size(0) == bsz * num_heads
assert static_v.size(2) == head_dim
v = static_v
src_len = k.size(1)
if add_zero_attn:
src_len += 1
k = torch.cat(
[
k,
torch.zeros(
(k.size(0), 1) + k.size()[2:],
dtype=k.dtype,
device=k.device,
),
],
dim=1,
)
v = torch.cat(
[
v,
torch.zeros(
(v.size(0), 1) + v.size()[2:],
dtype=v.dtype,
device=v.device,
),
],
dim=1,
)
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_output_weights.size()) == [
bsz * num_heads,
tgt_len,
src_len,
]
if attn_mask is not None:
if attn_mask.dtype == torch.bool:
attn_output_weights.masked_fill_(attn_mask, float("-inf"))
else:
attn_output_weights += attn_mask
attn_output_weights = softmax(attn_output_weights, dim=-1)
attn_output_weights = dropout(
attn_output_weights, p=dropout_p, training=training
)
attn_output = torch.bmm(attn_output_weights, v)
assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim]
attn_output = (
attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
)
attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
# reshape the output back to (batch, time, feature)
attn_output = attn_output.permute(1, 0, 2)
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(
bsz, num_heads, tgt_len, src_len
)
return attn_output, (attn_output_weights.sum(dim=1) / num_heads)
else:
return attn_output, None
``` |
{
"source": "johntsi/preast_qa",
"score": 3
} |
#### File: preast_qa/general_modules/create_embeddings.py
```python
import torch
import os
from tqdm import tqdm
from my_tokenizer import construct_tokenizer
def create_embeddings(glove_path):
"""
Creates an embedding matrix for all the tokens in the vocabulary and saves them in a .pt file
Load 300d cached glove embeddings and
loop through the vocabulary tokens from the tokenizer to init them with
(1) zeros if the token is the padding token
(2) the glove vector if the token is part of glove
(3) the glove vector plus some guassian noise if the sub-token is part of glove
(4) random normal vector if completelly unknown
Args:
glove_path: str
The path for the 300d glove emebeddings
Download from: https://nlp.stanford.edu/projects/glove/
"""
glove_vocab, glove_token2id, glove_vectors, d_emb = torch.load(glove_path)
glove_vocab = set(glove_vocab)
tokenizer = construct_tokenizer()
not_found = []
new_vectors = torch.zeros((len(tokenizer.vocab), d_emb), dtype = torch.float)
for idx in tqdm(tokenizer.vocab.values()):
token = tokenizer.ids_to_tokens[idx]
if (token == tokenizer.pad_token):
vector = torch.zeros((1, d_emb), dtype = torch.float)
elif token in glove_vocab:
vector = glove_vectors[glove_token2id[token]].unsqueeze(0)
elif "##" in token:
reduced_token = token[2:]
if reduced_token in glove_vocab:
# plus some gaussian noise
vector = glove_vectors[glove_token2id[reduced_token]].unsqueeze(0) + torch.normal(0, 0.005, size = (1, d_emb))
else:
not_found.append(token)
vector = torch.normal(0, 0.01, size = (1, d_emb))
else:
not_found.append(token)
vector = torch.normal(0, 0.01, size = (1, d_emb))
new_vectors[int(idx)] = vector
print(f"{len(not_found)} tokens and subtokens were not found in pre-trained glove")
embeddings_path = os.path.join(os.path.dirname(glove_path), "embeddings.pt")
torch.save(new_vectors, embeddings_path)
print(f"Saved embeddings in {embeddings_path}")
```
#### File: preast_qa/general_modules/my_losses.py
```python
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import BCEWithLogitsLoss
class DecoderLoss(nn.Module):
"""
Loss function for answer generation
computes the negative log-likelihood of the ground truth tokens
averaged per number of valid tokens
"""
def __init__(self, pad_idx, unk_idx):
super(DecoderLoss, self).__init__()
self.pad_idx = pad_idx
self.unk_idx = unk_idx
def forward(self, probs, a_trg):
"""
Args:
probs: the probabilities of the model over the extended vocabulary for each token
(3d float tensor) [n_ans x T x d_ext_vocab]
a_trg: the indices of the ground truth tokens in the extended vocabulary
(2d long tensor) [n_ans x T]
Retuns:
dec_loss: negative average log-likelihood for each example
(1d float tensor) [n_ans]
"""
current_device = torch.device(f"cuda:{torch.cuda.current_device()}") if torch.cuda.is_available() else torch.device("cpu")
# take into account only non-padding and non-unkown tokens
# [n_ans x T]
mask = (a_trg != self.pad_idx) * (a_trg != self.unk_idx)
mask = mask.to(current_device)
# number of valid tokens per example
n_steps = mask.sum(axis = 1) # (1d long) [n_ans]
# probabilities of the ground-truth (valid positions) in the sequences (2d float) [n_ans x T]
target_probs = torch.gather(probs, 2, a_trg.unsqueeze(2)).squeeze(2).masked_fill(mask == 0, 1)
# negative average log-probabilities
dec_loss = - torch.log(target_probs).sum(dim = 1) / n_steps # (1d float) [n_ans]
return dec_loss
class RankerLoss(nn.Module):
"""
Loss functions for the pointwise and pairiwse rankers
"""
def __init__(self, args, label_smoothing = False):
super(RankerLoss, self).__init__()
self.method = args.rnk_method
if self.method == "pointwise":
self.loss_fn = BCEWithLogitsLoss(reduction = "none")
self.label_smoothing = label_smoothing
self.epsilon = args.epsilon_smoothing
else:
self.K = args.max_num_passages
def forward(self, rnk_scores, rel):
"""
Args:
rnk_scores: the scores of the ranker module
Pointwise ranker: (2d float tensor) [bs x K]
Pairwise ranker: (4f float tensor) [bs x K x K x 3]
rel: ground-truth relevance labels for each passage
(2d long tensor) [bs x K]
Retuns:
rnk_loss:
Pointwise: negative average log-likelihood of the correct labels for example
averaged per number of passages in the example
Pairwise: negative average log-likelihood of the correct labels for each example
averaged per number of comparisons K**2
(1d float tensor) [n_ans]
"""
if self.method == "pointwise":
rel = rel.float()
if self.label_smoothing:
rel = rel * (1 - self.epsilon)
# ranking loss per example averaged over the number of available passages
rnk_loss = self.loss_fn(rnk_scores, rel).mean(dim = 1)
else:
current_device = torch.device(f"cuda:{torch.cuda.current_device()}") if torch.cuda.is_available() else torch.device("cpu")
# batch size and number of passages
bs, K = rel.size()
# transform the pointwise relevance labels to pairwise relevance labels
# (3d float tensor) [bs x K x K
r = torch.ones([bs, K, K], dtype = torch.long, device = current_device)
r = r.masked_fill(rel.unsqueeze(2) > rel.unsqueeze(1), 2)
r = r.masked_fill(rel.unsqueeze(2) < rel.unsqueeze(1), 0)
# negative average log likelihood of the correct pairwise labels per example
# averaged per number of comparisons
rnk_loss = - torch.log(torch.gather(F.softmax(rnk_scores, dim = -1),
3, r.unsqueeze(3)).squeeze(3)).sum(dim = (1, 2)) / K**2
return rnk_loss
class ClassifierLoss(nn.Module):
def __init__(self, args, label_smoothing=False):
super(ClassifierLoss, self).__init__()
self.loss_fn = BCEWithLogitsLoss(reduction="none")
# positive class label smoothing for regularization
self.epsilon = args.epsilon_smoothing
self.label_smoothing = label_smoothing
def forward(self, scores, ans):
"""
Args:
scores: non-probabilites scores from the classifier module
(1d float tensor) [bs]
ans: ground-truth labels for the answerability of each example
(1d long tensor) [bs]
Retuns:
cls_loss: negative log-likelihood for each example
(1d float tensor) [bs]
"""
ans = ans.float()
if self.label_smoothing:
ans = ans * (1 - self.epsilon)
cls_loss = self.loss_fn(scores, ans)
return cls_loss
```
#### File: preast_qa/general_modules/postprocess_decoded_seq.py
```python
def postprocess_decoded_seq(answers):
"""
Corrects for some extra spaces that are created by the decode method
of the tokenizer like in numerical strings
example: 1, 000, 000 --> 1,000,000
Args:
answers: list[str]
Returns:
new_answers: list[str]
"""
new_answers = []
for answer in answers:
parts = answer.split(", ")
if len(parts) > 1:
try:
new0 = parts[0]
for i in range(1, len(parts)):
if new0[-1].isnumeric() and parts[i][0].isnumeric():
if len(parts[i]) > 3 and parts[i][3].isnumeric():
new0 = ", ".join([new0, parts[i]])
else:
new0 = ",".join([new0, parts[i]])
else:
new0 = ", ".join([new0, parts[i]])
except IndexError:
print("--> IndexError:", answer)
new0 = answer
else:
new0 = answer
parts = new0.split(". ")
if len(parts) > 1:
new1 = parts[0]
for i in range(1, len(parts)):
try:
if new1[-1].isnumeric() and parts[i][0].isnumeric():
new1 = ".".join([new1, parts[i]])
else:
new1 = ". ".join([new1, parts[i]])
except IndexError:
new1 = parts[1]
else:
new1 = new0
parts = new1.split(" : ")
if len(parts) > 1:
new2 = parts[0]
for i in range(1, len(parts)):
if new2[-1].isnumeric() and parts[i][0].isnumeric():
new2 = ":".join([new2, parts[i]])
else:
new2 = " : ".join([new2, parts[i]])
else:
new2 = new1
new_answers.append(new2)
return new_answers
```
#### File: preast_qa/models/attention.py
```python
import torch
from torch import nn
import torch.nn.functional as F
from math import sqrt
class MultiHeadAttention(nn.Module):
"""
Multi Head Attention module as described in "Attention is all you need"
After applying a linear transformation splits keys, values and queries in n heads
Then calculates the scaled similarity scores between queries and keys and
normallizes them along the dimension corresponding to the keys
Finally transforms the values according to the normallized scores and applies
another linear transformation
"""
def __init__(self, heads, d_model, dropout_rate):
super(MultiHeadAttention, self).__init__()
self.d_model = d_model
self.d_k = d_model // heads
self.h = heads
self.q_linear = nn.Linear(d_model, d_model, bias=False)
self.v_linear = nn.Linear(d_model, d_model, bias=False)
self.k_linear = nn.Linear(d_model, d_model, bias=False)
self.dropout = nn.Dropout(dropout_rate)
self.out_linear = nn.Linear(d_model, d_model, bias=False)
self.temperature = sqrt(self.d_k)
def forward(self, k, v, q, mask=None):
"""
k == v always
k: (3d, float tensor), [bs x seq_len_k x d_model]
v: (3d, float tensor), [bs x seq_len_v x d_model]
q: (3d, float tensor), [bs x seq_len_q x d_model]
the mask corresponds to k and is (3d) [bs x seq_len_k x 1] or [bs x seq_len_k x seq_len_k] (decoder)
if k is passages(encoder) dim(0) = bs x K
if k is passages(decoder) dim(1) = K x seq_len_passages
if k is the concatinated passages and questions then dim(0) = bs x (K + 1)
Args:
k, v, q: (3d float tensors)
mask: (3d long tensor) corresponding to k
Returns: (3d float tensor), same shape as k and v
"""
# get batch size and sequence lengths
bs, seq_len_k, _ = k.size()
seq_len_q = q.size(1)
# perform linear operation
k = self.k_linear(k)
v = self.v_linear(v)
q = self.q_linear(q)
# split into h heads
k = k.view(bs, seq_len_k, self.h, self.d_k).transpose(
1, 2
) # [bs x heads x seq_len_k x d_k]
v = v.view(bs, seq_len_k, self.h, self.d_k).transpose(
1, 2
) # [bs x heads x seq_len_k x d_k]
q = q.view(bs, seq_len_q, self.h, self.d_k).transpose(
1, 2
) # [bs x heads x seq_len_q x d_k]
# calculate the scaled similarity scores between queries and keys
scores = (
torch.matmul(q, k.transpose(-2, -1)) / self.temperature
) # (4d) [bs x heads x seq_len_q x seq_len_k]
# apply the key mask
if mask is not None:
mask = mask.unsqueeze(1).transpose(-2, -1) # (4d) [bs x 1 x 1 x seq_len_k]
scores = scores.masked_fill(
mask == 0, -1e9
) # (4d) [bs x heads x seq_len_q x seq_len_k]
# normallize scores along the seq_len_k dimension and apply dropout
scores = self.dropout(F.softmax(scores, dim=-1))
# transform the values by multiplying them with the normallized scores
attn = torch.matmul(scores, v) # (4d) [bs x heads x seq_len_q x d_k]
# concatinate the heads along the last dimension
attn_concat = attn.transpose(1, 2).reshape(bs, seq_len_q, self.d_model)
# apply final linear transformation
out = self.out_linear(attn_concat) # (3d): [bs x seq_len_q x d_model]
return out
class DualAttention(nn.Module):
"""
Dual Attention Module as implemented in Xiong et al 2017
First calculates a similarity matrix between Ex and Ey
Then gets A and B by normallizing along the last dimensions
And finally obtains dual representations Gx and Gy by a series
of matrix multiplications and concatinations
"""
def __init__(self, d_model, dropout_rate=0):
super(DualAttention, self).__init__()
self.similarity_linear = nn.Linear(3 * d_model, 1, bias=False)
self.dropout = nn.Dropout(dropout_rate)
self.d_model = d_model
def forward(self, Ex, Ey, mask_x, mask_y):
"""
Given 2 sequence representations X and Y and their masks
produces bidirectionally informed representations
Gx (Y-->X) and Gy (X-->Y)
The X-sequence can be 4d, where some extra steps are applied
Args
Ex: 4d float tensor [bs x K x len_x x d_model] or
3d float tensor [bs x len_x x d_model]
Ey: 3d float tensor [bs x len_y x d_model]
mask_x: 4d bool tensor [bs x K x len_x x 1] or
3d bool tensor [bs x len_x x 1]
mask_y: 3d bool tensor [bs x len_y x 1]
Returns
Gx: 4d float tensor [bs x K x len_x x 5 * d_model] or
3d float tensor [bs x len_x x 5 * d_model]
Gy: 3d float tensor [bs x len_y x 5 * d_model]
"""
if len(Ex.size()) == 3:
bs, len_x, _ = Ex.size()
len_y = Ey.size(1)
Ex = Ex.view(bs, len_x, 1, self.d_model).expand(bs, len_x, len_y, self.d_model)
Ey = Ey.view(bs, 1, len_y, self.d_model).expand(bs, len_x, len_y, self.d_model)
mask_x = mask_x.view(bs, len_x, 1)
mask_y = mask_y.view(bs, 1, len_y)
# 4d float tensor [bs x len_x x len_y x 3 * d_model]
E_cat = torch.cat([Ex, Ey, torch.mul(Ex, Ey)], dim = -1)
# 3d float tensor [bs x len_x x len_y]
U = self.similarity_linear(E_cat).squeeze(-1)
U = U.masked_fill(mask_x * mask_y == 0, -1e9)
A = self.dropout(F.softmax(U, dim = 2))
B = self.dropout(F.softmax(U, dim = 1).transpose(-2, -1))
# reduce the extra dimension
Ex = torch.narrow(Ex, 2, 0, 1).squeeze(2)
Ey = torch.narrow(Ey, 1, 0, 1).squeeze(1)
A_1bar = torch.matmul(A, Ey) # [bs x len_x x d_model]
B_1bar = torch.matmul(B, Ex) # [bs x len_y x d_model]
A_2bar = torch.matmul(A, B_1bar) # [bs x len_x x d_model]
B_2bar = torch.matmul(B, A_1bar) # [bs x len_y x d_model]
Gx = torch.cat([Ex, A_1bar, A_2bar, torch.mul(Ex, A_1bar), torch.mul(Ex, A_2bar)], dim = -1)
Gy = torch.cat([Ey, B_1bar, B_2bar, torch.mul(Ey, B_1bar), torch.mul(Ey, B_2bar)], dim = -1)
else:
bs, K, len_x, _ = Ex.size()
len_y = Ey.size(1)
# create an extra dimension in Ey
Ey = Ey.unsqueeze(1).expand(bs, K, len_y, self.d_model) # (4d): [bs x K x len_y x d_model]
# concatinate Ex, Ey and their element-wise multiplication along the last dimension
# (5d): [bs x K x len_x x len_y x 3 * d_model]
E_cat = torch.cat(
[
Ex.unsqueeze(3).expand(bs, K, len_x, len_y, self.d_model),
Ey.unsqueeze(2).expand(bs, K, len_x, len_y, self.d_model),
torch.mul(
Ex.unsqueeze(3).expand(bs, K, len_x, len_y, self.d_model),
Ey.unsqueeze(2).expand(bs, K, len_x, len_y, self.d_model),
),
],
dim=-1,
)
# similarity between embeddings of the X and Y sequences
# reduces the last dimension by multiplying E_cat with a vector
U = self.similarity_linear(E_cat).squeeze(-1) # (4d): [bs x K x len_x x len_y]
# apply the two masks
U = U.masked_fill(mask_y.unsqueeze(1).transpose(-1, -2) == 0, -1e9).masked_fill(
mask_x == 0, -1e9
)
# normallize along the len_y to get weights A and along the len_x dimension to get weights B
A = self.dropout(F.softmax(U, dim=-1)) # (4d): [bs x K x len_x x len_y]
B = self.dropout(F.softmax(U, dim=-2)).transpose(-2, -1) # (4d): [bs x K x len_y x len_x]
# get updated representations
A_1bar = torch.matmul(A, Ey) # (4d) [bs x K x len_x x d_model]
B_1bar = torch.matmul(B, Ex) # (4d): [bs x K x len_y x d_model]
A_2bar = torch.matmul(A, B_1bar) # (4d): [bs x K x len_x x d_model]
B_2bar = torch.matmul(B, A_1bar) # (4d): [bs x K x len_y x d_model]
# reduce the extra dimension in the y-sequence
Ey = torch.narrow(Ey, 1, 0, 1).squeeze(1) # [bs x len_y x d_model]
# Get a unique representation for question by taking the max along the K dimension
B_1bar_m, _ = torch.max(B_1bar, dim=1) # [bs x len_y x d_model]
B_2bar_m, _ = torch.max(B_2bar, dim=1) # [bs x len_y x d_model]
# DCN for dual attention representations
# Gp (4d): [bs x K x len_x x 5 * d_model]
# Gq (3d): [bs x len_y x 5 * d_model]
Gx = torch.cat([Ex, A_1bar, A_2bar, torch.mul(Ex, A_1bar), torch.mul(Ex, A_2bar)], dim=-1)
Gy = torch.cat([Ey, B_1bar_m, B_2bar_m, torch.mul(Ey, B_1bar_m), torch.mul(Ey, B_2bar_m)], dim=-1)
return Gx, Gy
class TripleAttention(nn.Module):
"""
Extension of the DualAttention but for three sequences
Obtains globally updated representations for the three sequences
"""
def __init__(self, d_model, dropout_rate = 0):
super(TripleAttention, self).__init__()
self.pq_similarity_linear = nn.Linear(3 * d_model, 1, bias = False)
self.pa_similarity_linear = nn.Linear(3 * d_model, 1, bias = False)
self.qa_similarity_linear = nn.Linear(3 * d_model, 1, bias = False)
self.dropout = nn.Dropout(dropout_rate)
self.d_model = d_model
def forward(self, Ep, Eq, Ea, mask_p, mask_q, mask_a):
"""
Args
Ep: 3d float tensor [bs x L x d_model]
Eq: 3d float tensor [bs x J x d_model]
Ea: 3d float tensor [bs x N x d_model]
mask_p: 3d bool tensor [bs x L x 1]
mask_q: 3d bool tensor [bs x J x 1]
mask_a: 3d bool tensor [bs x J x 1]
Returns
Gp: 3d float tensor [bs x L x 9 * d_model]
Gy: 3d float tensor [bs x J x 9 * d_model]
Ga: 3d float tensor [bs x N x 9 * d_model]
"""
bs = len(Ep)
L, J, N = Ep.size(1), Eq.size(1), Ea.size(1)
Epq = Ep.view(bs, L, 1, self.d_model).expand(bs, L, J, self.d_model)
Eqp = Eq.view(bs, 1, J, self.d_model).expand(bs, L, J, self.d_model)
Epa = Ep.view(bs, L, 1, self.d_model).expand(bs, L, N, self.d_model)
Eap = Ea.view(bs, 1, N, self.d_model).expand(bs, L, N, self.d_model)
Eqa = Eq.view(bs, J, 1, self.d_model).expand(bs, J, N, self.d_model)
Eaq = Ea.view(bs, 1, N, self.d_model).expand(bs, J, N, self.d_model)
Upq = self.pq_similarity_linear(torch.cat([Epq, Eqp, torch.mul(Epq, Eqp)], dim = -1)).squeeze(-1) # [bs x L x J]
Upa = self.pa_similarity_linear(torch.cat([Epa, Eap, torch.mul(Epa, Eap)], dim = -1)).squeeze(-1) # [bs x L x N]
Uqa = self.qa_similarity_linear(torch.cat([Eqa, Eaq, torch.mul(Eqa, Eaq)], dim = -1)).squeeze(-1) # [bs x J x N]
Upq = Upq.masked_fill(mask_p * mask_q.transpose(1, 2) == 0, -1e9)
Upa = Upa.masked_fill(mask_p * mask_a.transpose(1, 2) == 0, -1e9)
Uqa = Uqa.masked_fill(mask_q * mask_a.transpose(1, 2) == 0, -1e9)
Apq = self.dropout(F.softmax(Upq, dim = 2)) # [bs x L x J]
Bpq = self.dropout(F.softmax(Upq, dim = 1).transpose(1, 2)) # [bs x J x L]
Apa = self.dropout(F.softmax(Upa, dim = 2)) # [bs x L x N]
Bpa = self.dropout(F.softmax(Upa, dim = 1).transpose(1, 2)) # [bs x N x L]
Aqa = self.dropout(F.softmax(Uqa, dim = 2)) # [bs x J x N]
Bqa = self.dropout(F.softmax(Uqa, dim = 1).transpose(1, 2)) # [bs x N x J]
Apq_bar = torch.matmul(Apq, Eq) # [bs x L x d_model]
Bpq_bar = torch.matmul(Bpq, Ep) # [bs x J x d_model]
Apa_bar = torch.matmul(Apa, Ea) # [bs x L x d_model]
Bpa_bar = torch.matmul(Bpa, Ep) # [bs x N x d_model]
Aqa_bar = torch.matmul(Aqa, Ea) # [bs x J x d_model]
Bqa_bar = torch.matmul(Bqa, Eq) # [bs x N x d_model]
Gp = torch.cat([Ep, Apq_bar, Apa_bar, torch.mul(Ep, Apq_bar), torch.mul(Ep, Apa_bar),
torch.matmul(Apq, Bpq_bar), torch.matmul(Apq, Aqa_bar),
torch.matmul(Apa, Bpa_bar), torch.matmul(Apa, Bqa_bar)], dim = -1)
Gq = torch.cat([Eq, Bpq_bar, Aqa_bar, torch.mul(Eq, Bpq_bar), torch.mul(Eq, Aqa_bar),
torch.matmul(Bpq, Apq_bar), torch.matmul(Bpq, Apa_bar),
torch.matmul(Aqa, Bpa_bar), torch.matmul(Aqa, Bqa_bar)], dim = -1)
Ga = torch.cat([Ea, Bpa_bar, Bqa_bar, torch.mul(Ea, Bpa_bar), torch.mul(Ea, Bqa_bar),
torch.matmul(Bpa, Apq_bar), torch.matmul(Bpa, Apa_bar),
torch.matmul(Bqa, Bpq_bar), torch.matmul(Bqa, Aqa_bar)], dim = -1)
return Gp, Gq, Ga
class AdditiveAttention(nn.Module):
"""
Attention module that computes a distribution over the tokens in the key,
as well as a context vector, given the tokens in the query. The implementation
is very similar to Bahdanau et al 2015.
Here it is used only to compute the additive attention between the questions
and the answers at the last part of the decoder. The context vector is used to
estimate the lambdas, which are the weights of the distributions in the copying
mechanism and the alphas are used to define the distribution over the tokens
in the question.
"""
def __init__(self, d_model):
super(AdditiveAttention, self).__init__()
self.d_model = d_model
self.q_linear = nn.Linear(d_model, d_model)
self.k_linear = nn.Linear(d_model, d_model, bias=False)
self.energy_layer = nn.Sequential(nn.Tanh(), nn.Linear(d_model, 1, bias=False))
def forward(self, k, v, q, mask):
"""
Args
k: output of the encoder for question representations (Mq)
(3d float tensor) [bs x J x d_model]
v: is the same as k
q: output of the decoder for answer representation (Ma)
(3d float tensor) [bs x T x d_model]
mask: corresponding mask for the keys
(3d long tensor) [bs x J x 1]
Returns
context: a vector representation for the answers informed by the questions
(3d float tensor) [bs x T x d_model]
alphas: a distribution over the tokens in the questions
(3d float tensor) [bs x T x J]
"""
J = k.size(-2) # seq len for questions
T = q.size(-2) # seq len for ansers
bs = k.size(0) # batch size
# linear transformation of key and query
k = self.k_linear(k)
q = self.q_linear(q)
# create extra dimensions in k and q that corresponds to the seq_len of the other one
k = k.unsqueeze(2).expand(bs, J, T, self.d_model) # (4d) [bs x J x T x d_model]
q = (
q.unsqueeze(2).expand(bs, T, J, self.d_model).transpose(1, 2)
) # (4d) [bs x J x T x d_model]
# this layer adds the representation of every token in the question (J)
# to every token in the representation of the answer (T), thats why we expanded the dimensions before
# then reduces the last dimension of the tokens (d_model) by passing them through a (d_model x 1) weight
scores = self.energy_layer(k + q).squeeze(-1) # (3d) [bs x J x T]
# apply the mask by zeroing all the correspoding positions
scores = scores.masked_fill(mask == 0, -1e9)
# normallization across the question tokens dimension (J) since this is the key
# to get a distribution over the questions tokens for each example and for each decoding step "t"
alphas = F.softmax(scores.transpose(-2, -1), dim=-1) # (3d) [bs x T x J]
# reduce the seq_len_questions dimension to get a context vector
context = torch.bmm(alphas, v) # [bs x T x d_model]
return context, alphas
class CombinedAttention(nn.Module):
"""
This is very similar to the Additive Attention module, it computes the context and alphas
using the passage representations as keys and values and the answer representations as queries.
The alphas are modified by the betas, which are the passage relevancies, as calculated by the
ranker module, thus performing a sort of sentence level attention, where the model learns to
attend only to the relevant passages.
"""
def __init__(self, d_model):
super(CombinedAttention, self).__init__()
self.d_model = d_model
self.q_linear = nn.Linear(d_model, d_model)
self.k_linear = nn.Linear(d_model, d_model, bias=False)
self.energy_layer = nn.Sequential(nn.Tanh(), nn.Linear(d_model, 1, bias=False))
def forward(self, k, v, q, mask, betas):
"""
Args
k: output of the encoder for passage representations (Mp)
(4d float tensor) [bs x K x L x d_model]
v: is the same as k
q: output of the decoder for answer representation (Ma)
(3d float tensor) [bs x T x d_model]
mask: corresponding mask for the keys
(4d long tensor) [bs x K x L x 1]
betas: the relevance scores from the ranker
(2d float tensor) [n_ans x K]
Returns
context: a vector representation for the answers informed by the passages
(3d float tensor) [bs x T x d_model]
modified_alphas: a distribution over the tokens in the passages (renormallized by the betas)
(3d float tensor) [bs x T x K * L]
"""
# dimension sizes
# batch size (bs)
# num passages (K)
# seq_len_passages (L)
# seq_len_answers (T)
# concatinated length of passages (KL)
bs, K, L, _ = k.size()
T = q.size(1)
KL = int(K * L)
# merge the K and seq_len_passages
# basically this concatinates all the passages in an example making one long sequence
# with length K * L
k = k.view(bs, KL, self.d_model)
mask = mask.view(bs, KL, 1)
# linear transformation on keys and queries
k = self.k_linear(k)
q = self.q_linear(q)
# create extra dimensions in k and q that corresponds to the seq_len of the other one
# the same is done for the mask
k = k.unsqueeze(2).expand(
bs, KL, T, self.d_model
) # (4d) [bs x KL x T x d_model]
q = (
q.unsqueeze(2).expand(bs, T, KL, self.d_model).transpose(1, 2)
) # (4d) [bs x KL x T x d_model]
# this layer adds the representation of every token in the concatination of the passages (KL)
# to every token in the representation of the answer (T), thats why we expanded the dimensions bewfore
# then reduces the last dimension of the tokens (d_model) by passing them through a d_model x 1 weight
scores = self.energy_layer(k + q).squeeze(-1) # (3d) [bs x KL x T]
# apply the mask
scores = scores.masked_fill(mask == 0, -1e9)
# normallization across the passage dimension (KL) since this is the key
alphas = F.softmax(scores, dim=1) # (3d) [bs x KL x T]
alphas = alphas.view(bs, K, L, T)
# renormallize with the relevance scores
modified_alphas = alphas * betas.view(bs, K, 1, 1)
modified_alphas /= modified_alphas.sum(dim=(1, 2), keepdims=True)
modified_alphas = modified_alphas.view(bs, KL, T).transpose(
-2, -1
) # [bs x T x KL]
v = v.view(bs, KL, self.d_model)
# reduce the seq_len_questions dimension to get a context vector
context = torch.bmm(modified_alphas, v) # (3d) [bs x seq_len_aswers x d_model]
return context, modified_alphas
```
#### File: preast_qa/models/classifier.py
```python
import torch
from torch import nn
class Classifier(nn.Module):
"""
Classification module for answer possibility of each example
"linear" corresponds to the classification method of Nishida et al.
"max" corresponds to the max pooling method of this implementation
"""
def __init__(self, args):
super(Classifier, self).__init__()
self.d_model = args.d_model
self.K = args.max_num_passages
self.method = args.cls_method
if self.method == "linear":
self.cls_layer = nn.Linear(self.K * self.d_model, 1, bias=False)
elif self.method == "max":
self.cls_layer = nn.Linear(self.d_model, 1, bias=False)
def forward(self, Mp_cls):
"""
Args
Mp_cls: the first token of every passage (cls token),
(3d float tensor) [bs x K x d_model]
Returns
cls_scores: the score that the question is answerable given the set of passasges
(1d float tensor) [bs]
"""
# input for the classifier
if self.method == "linear":
# concatinate the cls tokens along the model dimension
x = Mp_cls.reshape(-1, self.K * self.d_model) # (2d) [bs x d_model * K]
elif self.method == "max":
# max pooling along the number of passages
x = torch.max(Mp_cls, dim=1)[0] # (2d) [bs x d_model]
# pass them through the cls layer to get the scores per example
cls_scores = self.cls_layer(x).squeeze(-1) # (1d) [bs]
return cls_scores
```
#### File: preast_qa/models/embedder.py
```python
import torch
from torch import nn
import math
class Embedder(nn.Module):
"""
Projects the indices of a sequnce using a linear layer and adds positional embeddings
"""
def __init__(self, glove_vectors, pad_idx, dropout_rate, max_seq_len = 100):
"""
glove vectors: the pre-trained glove embeddings of all the tokens in the fixed vocabulary
(2d float tensor) [d_fixed x d_emb]
pad_idx: int
dropout_rate: float
max_seq_len: int
"""
super(Embedder, self).__init__()
d_emb = glove_vectors.size(1)
# initialize embeddings matrix
self.embedding_layer = nn.Embedding.from_pretrained(
glove_vectors, padding_idx=pad_idx, freeze=False
)
# initialize positional embeddings matrix
positional_encoder = torch.zeros(max_seq_len, d_emb)
position = torch.arange(0, max_seq_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_emb, 2).float() * (-math.log(10000.0) / d_emb))
positional_encoder[:, 0::2] = torch.sin(position * div_term)
positional_encoder[:, 1::2] = torch.cos(position * div_term)
positional_encoder = positional_encoder.unsqueeze(0)
self.register_buffer("positional_encoder", positional_encoder)
# initialie dropout layer
self.dropout = nn.Dropout(dropout_rate)
def forward(self, x):
"""
Args:
x The indices of a sequence in the fixed vocabulary
(2d long tensor) [batch_size x sequence_length]
Returns:
Embeddings for sequence x
(3d float tensor) [batch_size x sequence_length x d_emb]
"""
seq_len = x.size(1)
emb = self.embedding_layer(x)
return self.dropout(emb + self.positional_encoder[0, :seq_len, :])
```
#### File: preast_qa/models/sublayers.py
```python
import torch
from torch import nn
import torch.nn.functional as F
class HighwayNetwork(nn.Module):
"""
Highway network as implemented in Srivastava et al 2015
It is used to fuse the embeddings of GloVe and ELMo
before entering the Reader or Decoder modules
"""
def __init__(self, d_emb, dropout_rate, non_lin = "relu", num_layers = 2):
super(HighwayNetwork, self).__init__()
non_lin = non_lin.lower()
assert non_lin in ["relu", "tanh"]
self.HighwayLayers = nn.Sequential(*[HighwayLayer(d_emb, dropout_rate) for _ in range(num_layers)])
def forward(self, x):
"""
Args
x: (3d float tensor)
Returns
y: (3d float tensor), same dmensionality as x
"""
y = self.HighwayLayers(x)
return y
class HighwayLayer(nn.Module):
"""
Highway layer module for use in the Highway network module
"""
def __init__(self, d_emb, dropout_rate, non_lin = "relu"):
super(HighwayLayer, self).__init__()
self.H = nn.Sequential(nn.Linear(d_emb, d_emb),
nn.ReLU() if non_lin == "relu" else nn.Tanh())
self.T = nn.Sequential(nn.Linear(d_emb, d_emb),
nn.Sigmoid())
self.dropout = nn.Dropout(dropout_rate)
def forward(self, x):
"""
Args
x: (3d float tensor)
Returns
y: (3d float tensor), same dmensionality as x
"""
T = self.T(x)
y = self.dropout(torch.mul(self.H(x), T) + torch.mul(x, 1 - T))
return y
class FeedForwardNetwork(nn.Module):
"""
2-layer feed-forward network module with d_inner != d_model
and a GELu activation in between. It is used as the last sublayer of
the decoder module
"""
def __init__(self, d_model, d_inner):
super(FeedForwardNetwork, self).__init__()
self.lin1 = nn.Linear(d_model, d_inner)
self.lin2 = nn.Linear(d_inner, d_model)
def forward(self, x):
"""
Args
x: (3d float tensor)
Returns
y: (3d float tensor), same dmensionality as x
"""
y = self.lin2(F.gelu(self.lin1(x)))
return y
```
#### File: preast_qa/question_answering/qa_train_helpers.py
```python
import torch
from torch.utils.data import DataLoader, BatchSampler
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import numpy as np
import os
from datetime import datetime
import seaborn as sns
sns.set()
from qa_metrics import calculate_ranking_metrics, calculate_auc_score
from qa_dataset import QaDataset
from qa_batch_sampler import QaBatchSampler
from qa_collate_fn import qa_collate_fn
from qa_model import QaModel
from my_losses import DecoderLoss, RankerLoss, ClassifierLoss
def init_dataloader(args, n_gpu):
"""
Initializes dataloader for training
Args:
args: argument parser object
n_gpu: int
Returns:
dataloader: torch.utils.data.DataLoader
"""
if args.custom_batch_sampler:
dataset = QaDataset(args)
my_batch_sampler = BatchSampler(QaBatchSampler(dataset,
args.batch_size,
max(n_gpu, 1),
args.num_answerable_per_batch),
args.batch_size,
drop_last = True)
dataloader = DataLoader(dataset,
batch_sampler = my_batch_sampler,
collate_fn = qa_collate_fn,
num_workers = args.num_workers,
pin_memory = args.pin_memory and n_gpu > 0)
else:
dataloader = DataLoader(QaDataset(args),
args.batch_size,
collate_fn = qa_collate_fn,
shuffle = True,
num_workers = args.num_workers,
pin_memory = args.pin_memory and n_gpu > 0,
drop_last = True)
print(f"Initialized dataset with {len(dataloader.dataset)} examples")
print(f"Initialized dataloader with {dataloader.num_workers} number of workers")
print(f" pin memory option set to {dataloader.pin_memory}")
print(f" custom batch sampler set to {args.custom_batch_sampler}")
return dataloader
def init_model(args, dataloader, main_device, device_list, checkpoint = None):
"""
Initializes model in training mode
Args:
args: argument parser object
dataloader: torch.utils.data.dataloader
main_device: torch.device
device_list: list[torch.device]
checkpoint: dict (see ssave)
Returns:
model: torch.nn.module
"""
model = QaModel(args, dataloader.dataset.fixed_token2id, main_device, only_encoder = not args.include_dec)
if checkpoint:
model.load_state_dict({k: v.data if ("positional" in k)
else checkpoint["model"][k] for k, v in model.state_dict().items()})
print(f"Loaded checkpoint from run: {args.run_name}")
# Optionally parallelize model
if len(device_list) > 1:
model = torch.nn.DataParallel(model, device_ids = device_list, output_device = main_device)
# send to device and start training mode
model.to(main_device)
model.train()
return model
def init_loss_fns(args, dataloader, main_device, device_list):
"""
Intialize the loss functions for the three tasks
Args:
args: argument parser object
dataloader: torch.data.utils.Dataloader
main_device: torch.device
device_list: list[torch.device]
Returns:
dec_loss_fn: torch.nn.module
rnk_loss_fn: torch.nn.module
cls_loss_fn: torch.nn.module
"""
# initilaize individual loss functions and total_loss_fn for to combine them
dec_loss_fn = DecoderLoss(dataloader.dataset.pad_idx, dataloader.dataset.unk_idx)
rnk_loss_fn = RankerLoss(args, label_smoothing = True)
cls_loss_fn = ClassifierLoss(args, label_smoothing = True)
if len(device_list) > 1:
dec_loss_fn = torch.nn.DataParallel(dec_loss_fn, device_ids = device_list, output_device = main_device)
rnk_loss_fn = torch.nn.DataParallel(rnk_loss_fn, device_ids = device_list, output_device = main_device)
cls_loss_fn = torch.nn.DataParallel(cls_loss_fn, device_ids = device_list, output_device = main_device)
dec_loss_fn = dec_loss_fn.to(main_device)
rnk_loss_fn = rnk_loss_fn.to(main_device)
cls_loss_fn = cls_loss_fn.to(main_device)
return dec_loss_fn, rnk_loss_fn, cls_loss_fn
def plot_grad_flow(named_parameters, global_train_step, writer, gradient_save_path):
"""
Plots the gradients flowing through different layers in the net during training.
Can be used for checking for possible gradient vanishing / exploding problems.
Taken from https://discuss.pytorch.org/t/check-gradient-flow-in-network/15063/10
Args:
named_parameters: model parameters
global_train_step: int
writer: torch writer object
gradient_save_path: str
"""
ave_grads = []
max_grads = []
layers = []
for n, p in named_parameters:
if p.requires_grad and ("bias" not in n) and (p.grad is not None):
writer.add_histogram(n, p.grad, global_train_step)
if torch.max(p.grad).item() != .0:
layers.append(n)
ave_grads.append(p.grad.abs().mean())
max_grads.append(p.grad.abs().max())
else:
layers.append(n)
ave_grads.append(-0.5)
max_grads.append(-1)
plt.bar(np.arange(len(max_grads)), max_grads, alpha = 0.1, lw = 1, color = "c")
plt.bar(np.arange(len(max_grads)), ave_grads, alpha = 0.1, lw = 1, color = "b")
plt.hlines(0, 0, len(ave_grads) + 1, lw = 0.5, color = "k", alpha = 0.1)
plt.xticks(range(0, len(ave_grads), 1), layers, rotation = 90, fontsize = 2.5)
plt.yticks(fontsize = 6)
plt.xlim(left = -0.75, right = len(ave_grads))
plt.ylim(bottom = -0.001, top = 0.02) # zoom in on the lower gradient regions
plt.xlabel("Layers", fontsize = 6)
plt.ylabel("average gradient", fontsize = 6)
plt.title("Gradient flow", fontsize = 6)
plt.grid(True)
plt.legend([Line2D([0], [0], color = "c", lw = 4),
Line2D([0], [0], color = "b", lw = 4),
Line2D([0], [0], color = "k", lw = 4)], ['max-gradient', 'mean-gradient', 'zero-gradient'])
plt.tight_layout()
file_path = os.path.join(gradient_save_path, str(global_train_step) + ".png")
plt.savefig(file_path, dpi = 400)
def get_store_dicts():
"""
To store stats and metrics during training
"""
results_init = {"loss": {"dec": [], "rnk": [], "cls": []},
"ranking": {"trg": [], "prob": []},
"classification": {"trg": [], "prob": []},
"style": [],
"lambdas": {"passage": [], "question": [], "vocabulary": []}}
performance = {"loss": {}, "ranking": {}, "classification": {}, "other": {}, "lambdas": {}}
return results_init, performance
def pprint_and_log(writer, results, performance, global_train_step, lr, args):
"""
calculates performance of the sampled iterations during the last <args.print_and_log_every> iterations
prints performance
logs performance to the summary writer
Args:
writer: torch writer object
results: dict[dict] (from get_store_dicts)
performance: dict[dict] (from get_store_dicts)
global_train_step: int
lr: float
args: argument parser object
"""
performance["loss"]["rnk"] = np.mean(results["loss"]["rnk"])
performance["loss"]["cls"] = np.mean(results["loss"]["cls"])
performance["other"]["learning_rate"] = lr
performance["ranking"]["map"], performance['ranking']["mrr"] = calculate_ranking_metrics(*results["ranking"].values())
performance["classification"]["auc"] = calculate_auc_score(*results["classification"].values())
if args.include_dec:
mask_qa = np.array([1 if style == "qa" else 0 for style in results["style"]]).astype(np.bool)
mask_nlg = ~mask_qa
results["loss"]["dec"] = np.array(results["loss"]["dec"])
performance["loss"]["dec"] = results["loss"]["dec"].mean()
performance["loss"]["dec_qa"] = results["loss"]["dec"][mask_qa].mean() if mask_qa.any() else 0
performance["loss"]["dec_nlg"] = results["loss"]["dec"][mask_nlg].mean() if mask_nlg.any() else 0
results["lambdas"]["passage"] = np.array(results["lambdas"]["passage"])
results["lambdas"]["question"] = np.array(results["lambdas"]["question"])
results["lambdas"]["vocabulary"] = np.array(results["lambdas"]["vocabulary"])
performance["lambdas"]["passage_all"] = np.mean(results["lambdas"]["passage"])
performance["lambdas"]["question_all"] = np.mean(results["lambdas"]["question"])
performance["lambdas"]["vocabulary_all"] = np.mean(results["lambdas"]["vocabulary"])
performance["lambdas"]["passage_qa"] = results["lambdas"]["passage"][mask_qa].mean() if mask_qa.any() else 0
performance["lambdas"]["question_qa"] = results["lambdas"]["question"][mask_qa].mean() if mask_qa.any() else 0
performance["lambdas"]["vocabulary_qa"] = results["lambdas"]["vocabulary"][mask_qa].mean() if mask_qa.any() else 0
performance["lambdas"]["passage_nlg"] = results["lambdas"]["passage"][mask_nlg].mean() if mask_nlg.any() else 0
performance["lambdas"]["question_nlg"] = results["lambdas"]["question"][mask_nlg].mean() if mask_nlg.any() else 0
performance["lambdas"]["vocabulary_nlg"] = results["lambdas"]["vocabulary"][mask_nlg].mean() if mask_nlg.any() else 0
performance["loss"]["total"] = performance["loss"]["dec"] + args.gamma_rnk * performance["loss"]["rnk"] + \
args.gamma_cls * performance["loss"]["cls"]
performance["other"]["nlg_percent"] = mask_nlg.mean()
performance["other"]["ans_percent"] = len(results["loss"]["dec"]) / len(results["loss"]["cls"])
else:
performance["loss"]["total"] = args.gamma_rnk * performance["loss"]["rnk"] + \
args.gamma_cls * performance["loss"]["cls"]
performance["loss"]["dec"] = 0
dt = datetime.now().time().replace(microsecond = 0)
print("[{}] Step {}: DEC {:.4f} | RNK {:.4f} | CLS {:.4f} | TTL {:.4f} | MAP {:.4f} | AUC {:.4f} | lr = {:.6f}".format(
dt, str(global_train_step).zfill(5), performance["loss"]["dec"],
performance["loss"]["rnk"], performance["loss"]["cls"],
performance["loss"]["total"], performance["ranking"]["map"],
performance["classification"]["auc"], performance["other"]["learning_rate"]))
if writer is not None:
for field in ["loss", "ranking", "classification", "lambdas", "other"]:
for metric in performance[field].keys():
writer.add_scalar(field + "/" + metric, performance[field][metric], global_train_step)
```
#### File: preast_qa/style_transfer/st_eval.py
```python
from torch.utils.data import DataLoader
import torch
import os
from multiprocessing import cpu_count
import json
from tqdm import tqdm
import sys
os.chdir("style_transfer")
sys.path.append("./../models")
sys.path.append("./../general_modules")
from st_dataset import StDataset
from st_argument_parser_helper import parse_arguments
from st_collate_fn import st_collate_fn
from st_eval_helpers import get_architecture, decode_fn, init_eval_model
from postprocess_decoded_seq import postprocess_decoded_seq
from my_tokenizer import construct_tokenizer
tokenizer = construct_tokenizer()
def eval(args):
assert args.mode in ["eval", "infer"]
# number of cpu and gpu devices
n_gpu = torch.cuda.device_count()
n_cpu = cpu_count()
print(f"Number of cuda devices: {n_gpu} | Number of CPU cores: {n_cpu}")
# specify main device and all devices (if gpu available)
device_list = [torch.device(f"cuda:{i}") for i in range(n_gpu)]
main_device = device_list[0] if n_gpu > 0 else torch.device("cpu")
print(f"Main device: {main_device}")
print(f"Parallel devices = {device_list}")
checkpoint_path = os.path.join(args.checkpoint_path, args.run_name)
specific_checkpoint = os.listdir(checkpoint_path)[-1] if args.run_subname == "" else args.run_subname
specific_checkpoint_path = os.path.join(checkpoint_path, specific_checkpoint)
checkpoint = torch.load(specific_checkpoint_path, map_location = main_device)
print(f"Loaded checkpoint: {specific_checkpoint} of run: {args.run_name}")
args = get_architecture(checkpoint["args"], args)
if args.eval_path == "":
eval_path = os.path.join("evaluation", args.run_name, specific_checkpoint.split("_")[-1].split(".")[0])
else:
eval_path = args.eval_path
os.makedirs(eval_path, exist_ok = True)
dataloader = DataLoader(StDataset(args),
args.batch_size,
collate_fn = st_collate_fn,
shuffle = False,
num_workers = args.num_workers,
drop_last = False)
print(f"Initialized dataset with {len(dataloader.dataset)} examples")
model = init_eval_model(args, dataloader.dataset.fixed_token2id, main_device, device_list, checkpoint["model"])
with torch.no_grad():
for i, batch in tqdm(enumerate(iter(dataloader)), miniters = 1000):
take_eval_step(batch, model, main_device, eval_path, args.mode)
def take_eval_step(batch, model, main_device, eval_path, mode):
# Representations of the sequences in the fixed vocabulary (indices)
passage_fixed_vectors = batch[0].to(main_device) # 2d long tensor [batch_size x seq_len_passage]
query_fixed_vectors = batch[1].to(main_device) # (2d long tensor [batch_size x seq_len_question]
qa_answer_fixed_vectors = batch[2].to(main_device) # 2d long tensor [batch_size x seq_len_qa]
# Representation of the concatination of passage, question and qa_answer in the extended vocabulary
source_ext_vectors = batch[5].to(main_device) # (2d long tensor) [batch_size x seq_len_passage + seq_len_question + seq_len_answer]
if batch[6] is not None:
batch_ext_token2id, is_extended = batch[6], True
else:
batch_ext_token2id, is_extended = tokenizer.vocab, False
# the target nlg answers for each example (empty if in inference mode)
trg_nlg_answers = batch[7]
query_ids = batch[8]
d_ext_vocab = source_ext_vectors.max().item() + 1
del batch
# forward pass
preds, avg_lambdas, lengths = model(passage_fixed_vectors, query_fixed_vectors, qa_answer_fixed_vectors,
source_ext_vectors, d_ext_vocab, autoregressive = True)
# optionally add the extra tokens in the tokenizer
if is_extended:
extra_tokens = list(batch_ext_token2id.keys())[len(tokenizer.vocab):]
tokenizer.add_tokens(extra_tokens)
# decode the predictions into strings
pred_nlg_answers = decode_fn(preds, tokenizer)
pred_nlg_answers = postprocess_decoded_seq(pred_nlg_answers)
# restore tokenizer to each original vocabulary
if is_extended:
tokenizer.remove_tokens(extra_tokens)
if mode == "infer":
with open(os.path.join(eval_path, "predictions_infer.json"), "a") as f:
for pred_nlg, q_id in zip(pred_nlg_answers, query_ids):
json.dump({"answers": [pred_nlg], "query_id": int(q_id)}, f)
f.write("\n")
else:
with open(os.path.join(eval_path, "predictions_eval.json"), "a") as f:
for pred_nlg, q_id in zip(pred_nlg_answers, query_ids):
json.dump({"answers": [pred_nlg], "query_id": int(q_id)}, f)
f.write("\n")
with open(os.path.join(eval_path, "reference_eval.json"), "a") as f:
for trg_nlg, q_id in zip(trg_nlg_answers, query_ids):
json.dump({"answers": trg_nlg, "query_id": int(q_id)}, f)
f.write("\n")
with open(os.path.join(eval_path, "lambdas.json"), "a") as f:
for l, q_id in zip(avg_lambdas.tolist(), query_ids):
json.dump({"lambdas": {"vocabulary": l[0], "question": l[1], "qa_answer": l[2], "passage": l[3]},
"query_id": int(q_id)}, f)
f.write("\n")
with open(os.path.join(eval_path, "lengths.json"), "a") as f:
for length, q_id in zip(lengths.tolist(), query_ids):
json.dump({"length": length, "query_id": int(q_id)}, f)
f.write("\n")
if __name__ == '__main__':
args = parse_arguments()
eval(args)
```
#### File: preast_qa/style_transfer/st_model.py
```python
import torch
from torch import nn
import torch.nn.functional as F
import os
from create_embeddings import create_embeddings
from embedder import Embedder
from reader import StReader
from transformer_blocks import StTransformerDecoder
from multi_src_pointer_gen import StMultiSrcPointerGen
import my_constants
class StModel(nn.Module):
"""
Transformer-based encoder-decoder that learns a function
f(passage, question, extractive answers) --> abstractive answer
The encoder is made of shared and sequence-specific transformer encoder-blocks
for the three input sequences and several dual attention modules
to blend cross-sequence information
The decoder is modified transformer decoder, where each layers has 2 more encoder-decoder
attention sublayers to account for the extra input sequences
A multi-source-pointer-generator is employed to either copy from the three input sequences
or generate a token from the fixed vocabulary.
"""
def __init__(self, args, fixed_token2id):
"""
Args:
args: argument parser object
fixed_token2id: dict[str: int]
"""
super(StModel, self).__init__()
# load glove embeddings for the fixed vocabulary
data_path = "./../data"
if "embeddings.pt" not in os.listdir(data_path):
glove_path = os.path.join(data_path, args.embeddings_name)
create_embeddings(glove_path)
glove_vectors = torch.load(os.path.join(data_path, "embeddings.pt"))
self.pad_idx = fixed_token2id[my_constants.pad_token]
self.eos_idx = fixed_token2id[my_constants.eos_token]
self.unk_idx = fixed_token2id[my_constants.unk_token]
self.bos_idx = fixed_token2id[my_constants.nlg_token]
self.cls_idx = fixed_token2id[my_constants.cls_token]
self.qa_idx = fixed_token2id[my_constants.qa_token]
self.max_seq_len_passage = args.max_seq_len_passage
self.max_seq_len_question = args.max_seq_len_question
self.max_seq_len_qa_answer = args.max_seq_len_qa_answer
self.max_seq_len_nlg_answer = args.max_seq_len_nlg_answer
max_seq_len = max(self.max_seq_len_passage, self.max_seq_len_question, self.max_seq_len_qa_answer, self.max_seq_len_nlg_answer)
self.d_vocab, self.d_emb = glove_vectors.size()
self.d_model = args.d_model
self.embedder = Embedder(glove_vectors, self.pad_idx, args.emb_dropout_rate, max_seq_len)
self.reader = StReader(args, self.d_emb, self.pad_idx)
self.decoder = StTransformerDecoder(args, self.d_emb)
# special token mask (exclude this from output vocabulary distribution)
special_mask_idx = [self.pad_idx, self.unk_idx, self.bos_idx, self.cls_idx, self.qa_idx]
self.multi_source_pointer_gen = StMultiSrcPointerGen(self.d_model, self.d_vocab, self.d_emb, special_mask_idx)
# whether to share the weights of input and output embeddings
if args.tie_embeddings:
self.multi_source_pointer_gen.vocab_generator[1].weight = nn.Parameter(self.embedder.embedding_layer.weight.data)
def forward(self, p, q, qa, source_ext, d_ext_vocab, nlg = None, autoregressive = False):
"""
Inputs
p: The indices of the tokens in the passages
(2d long tensor) [batch_size x seq_len_passages]
q: The indices of the tokens in the question
(2d long tensor) [batch_size x seq_len_question]
qa: The indices of the tokens in the QA answer
(2d long tensor) [batch_size x seq_len_qa]
source_ext: The indicies of the tokens of the concatination of passages, question and QA answer
(2d long tesnor) [batch_size x num_passages * seq_len_passages + seq_len_question + seq_len_qa]
d_ext_vocab: the size of the extended vocabulary (int)
nlg: The indices of the tokens in the NLG answer
(2d long tensor) [batch_size x seq_len_nlg]
autoregressive: whether the model is in autoregressive mode (bool)
Returns
(Regular)
dec_scores: The probabilities in the extedned vocabulary of the predictions of the NLG answers
(3d float tensor) [batch_size x seq_len_nlg x d_ext_vocab]
(Autoregressive)
preds: Indices in the extended vocabulary of the NLG predictions
(2d long tensor) [batch_size x seq_len_nlg]
avg_lambdas: the average weight of each distribution for each answer
(2d float tensor) [batch_size x 4]
lengths: the length of each answer
(1d long tensor) [bs]
"""
bs = p.size(0) # batch_size
d_ext_vocab = max(d_ext_vocab, self.d_vocab)
current_device = torch.device(f"cuda:{torch.cuda.current_device()}") if torch.cuda.is_available() else torch.device("cpu")
mask_p, mask_q, mask_qa, mask_nlg = self.create_masks(p, q, qa, nlg, device = current_device)
# pass the sequences trough the embedding layer
# 3d float tensor [bs x len x d_emb]
p_emb = self.embedder(p)
q_emb = self.embedder(q)
qa_emb = self.embedder(qa)
# pass the sequences through the reader
# 3d float tensors [bs x len x d_model]
Mp, Mq, Mqa = self.reader(p_emb, q_emb, qa_emb, mask_p, mask_q, mask_qa)
if not autoregressive:
nlg_emb = self.embedder(nlg)
# pass the outputs of the encoder and the answer input tokens through the decoder
Mnlg = self.decoder(Mp, Mq, Mqa, nlg_emb, mask_p, mask_q, mask_qa, mask_nlg) # (3d): [bs x T x d_model]
# pass the betas, the outputs of the encoder and the decoder through the multi-source pointer generator
# to get the predictions for the answer sequence, along with the source tokens for estimating the final distr
# (3d float): [bs x T x d_ext_vocab]
dec_scores, lambdas = self.multi_source_pointer_gen(Mp, Mq, Mqa, Mnlg, mask_p, mask_q, mask_qa,
source_ext, current_device, d_ext_vocab)
return dec_scores, lambdas
else:
cond1, cond2 = True, True
preds, sum_lambdas, lengths = None, None, None
not_completed = torch.ones(bs, dtype = torch.bool, device = current_device)
decoding_step = 1
bos_init = torch.ones(bs, dtype = torch.long, device = current_device) * self.bos_idx
seq_indices = bos_init.clone()
while cond1 and cond2:
num_not_completed = not_completed.long().sum()
# (3d): [num_not_completed x decoding_step x d_emb]
nlg_emb = self.embedder(seq_indices[not_completed].view(num_not_completed, decoding_step))
# pass the outputs of the encoder and the answer input tokens through the decoder
Mnlg = self.decoder(Mp[not_completed], Mq[not_completed], Mqa[not_completed],
nlg_emb, mask_p[not_completed], mask_q[not_completed], mask_qa[not_completed]) # (3d): [num_not_completed x decoding_step x d_model]
# pass the betas, the outputs of the encoder and the decoder through the multi-source pointer generator
# to get the predictions for the answer sequence, along with the source tokens for estimating the final distr
# dec_scores: (3d): [num_not_completed x decoding_step x d_ext_vocab]
dec_scores, lambdas = self.multi_source_pointer_gen(Mp[not_completed], Mq[not_completed],
Mqa[not_completed], Mnlg, mask_p[not_completed], mask_q[not_completed],
mask_qa[not_completed], source_ext[not_completed],
current_device, d_ext_vocab)
dec_scores_i = torch.zeros((bs, d_ext_vocab), dtype = torch.float, device = current_device)
dec_scores_i[:, self.pad_idx] = 1.
dec_scores_i[not_completed, :] = dec_scores[:, -1, :]
preds_i = torch.argmax(dec_scores_i, dim = -1)
# append to the predicitons and lambdas of this decoding step to parent tensors
if preds is not None:
preds = torch.cat([preds, preds_i.unsqueeze(1)], dim = 1) # [bs x decoding_step]
sum_lambdas[not_completed] += lambdas[:, -1, :] # [bs x 4]
lengths += not_completed.long() # [bs]
# initialize the parents tensors with the first decoding step predictions and lambdas
else:
preds = preds_i.contiguous().unsqueeze(1) # [bs x 1]
sum_lambdas = lambdas[:, -1, :] # [bs x 4]
lengths = torch.ones(bs, dtype = torch.long, device = current_device) # [bs]
# prepare the extra input for the next decoding step
# if the prediction was not in the fixed vocab, we correct it with the unk token
seq_indices = torch.cat([bos_init.unsqueeze(1), preds], dim = 1)
seq_indices[(seq_indices > self.d_vocab - 1)] = self.unk_idx
# get the new not_completed mask
not_completed = (seq_indices != self.eos_idx).all(dim = 1)
# update conditions for stopping
cond1 = not_completed.any().item()
cond2 = seq_indices.size(-1) < self.max_seq_len_nlg_answer
# increase the decoding step
decoding_step += 1
padding = self.max_seq_len_nlg_answer - preds.size(1) - 1
if padding > 0:
preds = F.pad(preds, (0, padding), "constant", self.pad_idx)
avg_lambdas = sum_lambdas / lengths.unsqueeze(1)
return preds, avg_lambdas, lengths
def create_masks(self, p, q, qa, nlg = None, device = None):
"""
Args:
p: 2d long tensor [bs x L]
q: 2d long tensor [bs x J]
qa: 2d long tensor [bs x N]
nlg: 2d long tensor [bs x T]
device: torch.device
Returns:
mask_p: 3d bool tensor [bs x L x 1]
mask_q: 3d bool tensor [bs x J x 1]
mask_qa: 3d bool tensor [bs x N x 1]
mask_nlg: 3d bool tensor [bs x T x T]
"""
# create standard masks
mask_p = (p != self.pad_idx).unsqueeze(-1)
mask_q = (q != self.pad_idx).unsqueeze(-1)
mask_a = (qa != self.pad_idx).unsqueeze(-1)
if nlg is not None:
# standard mask for answer
mask_nlg_src = (nlg != self.pad_idx).unsqueeze(-1) # (3d): [batch_size x seq_len_answer x 1]
# no-peak into the future mask for answer
T = nlg.size(1)
mask_nlg_trg = torch.tril(torch.ones([1, T, T], dtype = torch.float, device = device)) # (3d) [1 x seq_len_answer x seq_len_answer]
# combined mask for answer
mask_nlg = torch.mul(mask_nlg_src, mask_nlg_trg).transpose(-1, -2).bool()
else:
mask_nlg = None
return mask_p, mask_q, mask_a, mask_nlg
```
#### File: preast_qa/style_transfer/st_train.py
```python
import torch
from tensorboardX import SummaryWriter
import numpy as np
import os
from datetime import datetime
from multiprocessing import cpu_count
import gc
import random
import sys
os.chdir("style_transfer")
sys.path.append("./../models")
sys.path.append("./../general_modules")
from st_argument_parser_helper import parse_arguments
from st_train_helpers import init_dataloader, init_model, pprint_and_log
from my_losses import DecoderLoss
from my_train_helpers import init_optimizer, ssave
def train(args):
args.mode = "train"
# number of cpu and gpu devices
n_gpu = torch.cuda.device_count()
n_cpu = cpu_count()
print(f"Number of cuda devices: {n_gpu} | Number of CPU cores: {n_cpu}")
# specify main device and all devices (if gpu available)
device_list = [torch.device(f"cuda:{i}") for i in range(n_gpu)]
main_device = device_list[0] if n_gpu > 0 else torch.device("cpu")
print(f"Main device: {main_device}")
print(f"Parallel devices = {device_list}")
if args.deterministic:
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.enabled = False
torch.backends.cudnn.deterministic = True
# initialize cuDNN backend
if args.cudnn_backend and n_gpu > 0:
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.enabled = True
torch.backends.cudnn.deterministic = False
# paths for saveing gradient flow and checkpoints
if args.saving:
checkpoint_path = os.path.join(args.checkpoint_path, args.run_name)
else:
checkpoint_path = None
# load checkpoint and fetch its arguments
if args.load_checkpoint:
# most recent checkpoint if not specificed
specific_checkpoint = os.listdir(checkpoint_path)[-1] if args.run_subname == "" else args.run_subname
specific_checkpoint_path = os.path.join(checkpoint_path, specific_checkpoint)
checkpoint = torch.load(specific_checkpoint_path, map_location = main_device)
args = checkpoint["args"]
print(f"Loaded arguments from {specific_checkpoint_path}")
else:
checkpoint = None
# initialize dataloader module
dataloader = init_dataloader(args)
# initialize masque model, optionally load checkpoint and wrap in DataParallel
model = init_model(args, dataloader, main_device, device_list, checkpoint)
# intialize custom optimizer, optionally load state from checkpoint
optimizer, current_epoch, current_train_step, global_train_step = init_optimizer(args, model, dataloader, checkpoint)
results = {"loss": [],
"lambdas": {"vocab": [], "question": [], "qa_answer": [], "passage": []}}
# initialize summary writer
writer = SummaryWriter(os.path.join("runs", args.run_name)) if args.saving else None
# initilaize the loss function
loss_fn = DecoderLoss(dataloader.dataset.pad_idx, dataloader.dataset.unk_idx)
if n_gpu > 1:
loss_fn = torch.nn.DataParallel(loss_fn, device_ids = device_list, output_device = main_device)
loss_fn = loss_fn.to(main_device)
# create folders for saving gradient flow and checkpoint if need
if not bool(checkpoint) and args.saving:
os.mkdir(checkpoint_path)
gc.collect()
for epoch in range(current_epoch, args.max_epochs):
for train_step, batch in enumerate(dataloader, start = current_train_step):
global_train_step += 1
try:
take_train_step(batch, model, optimizer, loss_fn, main_device, results)
except RuntimeError as e:
# to catch OOM errors
print("[{}]".format(datetime.now().time().replace(microsecond = 0)), global_train_step, e)
del batch
gc.collect()
for device_id in range(n_gpu):
with torch.cuda.device(f"cuda:{device_id}"):
torch.cuda.empty_cache()
# empty cache after the first (optimizing) iteration
if args.cudnn_backend and global_train_step == 1:
gc.collect()
for device_id in range(n_gpu):
with torch.cuda.device(f"cuda:{device_id}"):
torch.cuda.empty_cache()
# print and log to the summary writer
if (not global_train_step % args.print_and_log_every) and global_train_step:
pprint_and_log(writer, results, global_train_step, optimizer.get_learning_rate())
results = {"loss": [],
"lambdas": {"vocab": [], "question": [], "qa_answer": [], "passage": []}}
# save checkpoint
if (not global_train_step % args.save_every) and global_train_step:
ssave(model, optimizer, args, epoch, current_train_step, global_train_step,
checkpoint_path, "ST_model")
current_train_step = 0
gc.collect()
print("[{}] Finished epoch {}".format(datetime.now().time().replace(microsecond = 0), epoch))
if bool(writer):
ssave(model, optimizer, args, epoch + 1, current_train_step, global_train_step,
checkpoint_path, "ST_model")
if writer is not None:
writer.close()
def take_train_step(batch, model, optimizer, loss_fn, device, results):
# Representations of the sequences in the fixed vocabulary (indices)
passage_fixed_vectors = batch[0].to(device) # 2d long tensor [batch_size x seq_len_passage]
query_fixed_vectors = batch[1].to(device) # (2d long tensor [batch_size x seq_len_question]
qa_answer_fixed_vectors = batch[2].to(device) # 2d long tensor [batch_size x seq_len_qa]
nlg_answer_src_vectors = batch[3].to(device) # (2d long tensor) [batch_size x seq_len_nlg - 1]
# Representation of the NLG answer in the extended vocabulary (shifted, ends with eos token)
nlg_answer_trg_vectors = batch[4].to(device) # (2d long tensor) [batch_size x seq_len_nlg - 1]
# Representation of the concatination of passage, question and qa_answer in the extended vocabulary
source_ext_vectors = batch[5].to(device) # (2d long tensor) [batch_size x seq_len_passage + seq_len_question + seq_len_answer]
d_ext_vocab = source_ext_vectors.max().item() + 1
del batch
# forward pass
dec_scores, lambdas = model(passage_fixed_vectors, query_fixed_vectors, qa_answer_fixed_vectors,
source_ext_vectors, d_ext_vocab, nlg_answer_src_vectors)
# calculate loss per example
loss = loss_fn(dec_scores, nlg_answer_trg_vectors)
# add the average loss to the computational graph
loss.mean().backward()
# clip gradients
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
# apply gradients
optimizer.step()
optimizer.zero_grad()
# store losses and lambdas per example
with torch.no_grad():
lambda_vocab, lambda_question, lambda_qa_answer, lambda_passage = torch.split(lambdas.mean(dim = 1), 1, dim = -1)
results["loss"].extend(loss.tolist())
results["lambdas"]["vocab"].extend(lambda_vocab.tolist())
results["lambdas"]["question"].extend(lambda_question.tolist())
results["lambdas"]["qa_answer"].extend(lambda_qa_answer.tolist())
results["lambdas"]["passage"].extend(lambda_passage.tolist())
if __name__ == '__main__':
args = parse_arguments()
train(args)
``` |
{
"source": "johnttan/lolesports_scraper",
"score": 3
} |
#### File: johnttan/lolesports_scraper/playerdatabase.py
```python
import json
import pymongo
from pprint import pprint
from pymongo import MongoClient
from config import configuration as cfg
from playerinfo import playerinfo
client = MongoClient(cfg['deployurl'])
db = client[cfg['deploydatabase']]
cUsers = db.Users
cPlayers = db.Players
cGames = db.Games
cPlayers.create_index("playername", unique=True)
def initializeplayers():
for playername, info in playerinfo.items():
player = {
'playername': playername,
'teamname': info[1],
'role': info[0],
'statistics': {},
'latestgame': {},
'gamesplayed': []
}
cPlayers.update({'playername':playername}, player, upsert=True)
def updateplayerdata():
for playername, info in playerinfo.items():
gamesplayed = []
gamesplayedarray = []
latestgameplayedid = 0
gamesplayedquery = cGames.find({'playerlist': playername},)
for game in gamesplayedquery:
gamesplayed.append(game)
for game in gamesplayed:
gamesplayedarray.append(game['gameID'])
for game in gamesplayed:
if game['gameID'] == max(gamesplayedarray):
latestgame = game
cPlayers.update({'playername':playername}, {'$set':{'gamesplayed':gamesplayed, 'latestgame':latestgame}})
def updateplayerdatabygame(game):
for playername, player in game['players'].items():
for player in cPlayers.find({'playername':playername}):
if not any(game1['gameID'] == game['gameID'] for game1 in player[gamesplayed]):
cPlayers.update({'playername':playername}, {'$set':{'gamesplayed':gamesplayed}})
exports = {
'initializeplayers': initializeplayers,
'updateplayerdata': updateplayerdata,
'updateplayerdatabygame': updateplayerdatabygame
}
```
#### File: johnttan/lolesports_scraper/scoring.py
```python
from scoreformula import scoreformuladict as calculate
from scoreconfig import scoreconfiguration as sconfig
from pprint import pprint
def calctotalscore(score):
return (sum(score.values()))
def calcdeathscore(playername, game, player):
return calculate['deathscore'](player['deaths'])
def calcwinscore(playername, game, player):
return calculate['winscore'](player['win'])
def calccsscore(playername, game, player):
cs = player['minion_kills']
for playername1, player1 in game['players'].items():
if playername1 != playername and player1['role'] == player['role']:
oppcs = player1['minion_kills']
role = player['role']
try:
oppcs
except NameError:
print('{} wrong role'.format(playername))
csscore = calculate['csscore'](cs, oppcs, role)
return csscore
def calcgoldscore(playername, game, player):
gold = player['total_gold']
gamegold = 0
for playername1, player1 in game['players'].items():
gamegold += player1['total_gold']
if player1['teamname'] == player['teamname'] and player1['role'] == player['role']:
oppgold = player1['total_gold']
goldscore = calculate['goldscore'](gold, oppgold, gamegold)
return goldscore
def calcpartscore(playername, game, player):
kills = player['kills']
assists = player['assists']
teamkills = 0
for playername1, player1 in game['players'].items():
if player1['teamname'] == player['teamname']:
teamkills += player1['kills']
partscore = calculate['partscore'](kills, assists, teamkills)
return partscore
def calckdascore(playername, game, player):
kills = player['kills']
deaths = player['deaths']
assists = player['assists']
role = player['role']
kdascore = calculate['kdascore'](kills, deaths, assists, role)
return kdascore
"""
Takes in game dictionary, playername
returns score dictionary for particular player
"""
def calcplayerscore(playername, game, player):
score = {}
for scorefield in sconfig['scorearray']:
score[scorefield + 'score'] = globals()['calc' + scorefield + 'score'](playername, game, player)
score['totalscore'] = calctotalscore(score)
return score
"""
Takes in game dictionary,
returns game dictionary with all scores calculated.
This is imported in databasescoring.py
"""
def calcgamescore(game):
for playername, player in game['players'].items():
player['score'] = calcplayerscore(playername, game, player)
return game
exports = {
'calcgamescore': calcgamescore
}
``` |
{
"source": "johnttaylor/foxtail",
"score": 2
} |
#### File: nqbp/nqbplib/my_globals.py
```python
import os
import logging
# Globals
_NQBP_WORK_ROOT = ''
_NQBP_PRJ_DIR = ''
_NQBP_PKG_ROOT = ''
_NQBP_PRE_PROCESS_SCRIPT = None
_NQBP_PRE_PROCESS_SCRIPT_ARGS = ''
_NQBP_INITIAL_LIBDIRS = []
_NQBP_XPKG_MODEL = None
# Initialize globals
OUT = logging.getLogger( 'nqbp' )
#
def NQBP_VERSION():
return "v0.1.1"
#
def NQBP_TEMP_EXT():
return '_temp_nqbp'
#
def NQBP_NAME_LIBDIRS():
return 'libdirs.b'
#
def NQBP_NAME_SOURCES():
return 'sources.b'
def NQBP_XPKG_MODEL_OUTCAST():
return 'outcast'
def NQBP_XPKG_MODEL_MIXED():
return 'mixed'
def NQBP_XPKG_MODEL_LEGACY():
return 'legacy'
def NQBP_WRKPKGS_DIRNAME():
if ( NQBP_XPKG_MODEL() == NQBP_XPKG_MODEL_OUTCAST() ):
return 'xpkgs'
else:
return 'xsrc'
def NQBP_PUBLICAPI_DIRNAME():
if NQBP_XPKG_MODEL() == NQBP_XPKG_MODEL_OUTCAST():
return NQBP_WORK_ROOT() + os.sep + 'xinc' + os.sep + 'src'
else:
return NQBP_PKG_ROOT() + os.sep + 'xsrc'
#
def NQBP_PKG_TOP():
return 'top'
#
def NQBP_PRJ_DIR_MARKER1():
return "projects"
#
def NQBP_PRJ_DIR_MARKER2():
return "tests"
#
def NQBP_PRJ_DIR( newval=None ):
global _NQBP_PRJ_DIR
if ( newval != None ):
if ( newval.endswith(os.sep) ):
newval [:-1]
_NQBP_PRJ_DIR = newval
return _NQBP_PRJ_DIR
#
def NQBP_WORK_ROOT( newval=None ):
global _NQBP_WORK_ROOT
if ( newval != None ):
if ( newval.endswith(os.sep) ):
newval [:-1]
_NQBP_WORK_ROOT = newval
return _NQBP_WORK_ROOT
#
def NQBP_PKG_ROOT( newval=None ):
global _NQBP_PKG_ROOT
if ( newval != None ):
if ( newval.endswith(os.sep) ):
newval [:-1]
_NQBP_PKG_ROOT = newval
return _NQBP_PKG_ROOT
def NQBP_PKG_NAME():
return NQBP_PKG_ROOT().replace( NQBP_WORK_ROOT(), "" )
#
def NQBP_PRE_PROCESS_SCRIPT( newval=None ):
global _NQBP_PRE_PROCESS_SCRIPT
if ( newval != None ):
_NQBP_PRE_PROCESS_SCRIPT = newval
return _NQBP_PRE_PROCESS_SCRIPT
#
def NQBP_PRE_PROCESS_SCRIPT_ARGS( newval=None ):
global _NQBP_PRE_PROCESS_SCRIPT_ARGS
if ( newval != None ):
_NQBP_PRE_PROCESS_SCRIPT_ARGS = newval
return _NQBP_PRE_PROCESS_SCRIPT_ARGS
#
def NQBP_XPKG_MODEL():
global _NQBP_XPKG_MODEL
if ( _NQBP_XPKG_MODEL == None ):
val = os.environ.get('NQBP_XPKG_MODEL')
if ( val == NQBP_XPKG_MODEL_OUTCAST() ):
_NQBP_XPKG_MODEL = NQBP_XPKG_MODEL_OUTCAST()
elif ( val == NQBP_XPKG_MODEL_MIXED() ):
_NQBP_XPKG_MODEL = NQBP_XPKG_MODEL_MIXED();
else:
_NQBP_XPKG_MODEL = NQBP_XPKG_MODEL_LEGACY();
return _NQBP_XPKG_MODEL
```
#### File: windows/avr_gcc_arduino/atmega328p_uno.py
```python
import sys, os
from nqbplib import base
from nqbplib import utils
from nqbplib import my_globals
class ToolChain( base.ToolChain ):
#--------------------------------------------------------------------------
def __init__( self, exename, prjdir, build_variants, env_tools, env_support, env_cc_ver, env_bsp_ver, default_variant='release', env_error=None ):
base.ToolChain.__init__( self, exename, prjdir, build_variants, default_variant )
self._ccname = 'AVR-GCC-ATMega328p Ardunio'
self._cc = os.path.join( env_tools, env_cc_ver, 'hardware', 'tools', 'avr', 'bin', 'avr-gcc' )
self._asm = os.path.join( env_tools, env_cc_ver, 'hardware', 'tools', 'avr', 'bin', 'avr-gcc' )
self._ld = os.path.join( env_tools, env_cc_ver, 'hardware', 'tools', 'avr', 'bin', 'avr-gcc' )
self._ar = os.path.join( env_tools, env_cc_ver, 'hardware', 'tools', 'avr', 'bin', 'avr-gcc-ar' )
self._objcpy = os.path.join( env_tools, env_cc_ver, 'hardware', 'tools', 'avr', 'bin', 'avr-objcopy' )
self._asm_ext = 'asm'
self._asm_ext2 = 'S'
# Cache potential error for environment variables not set
self._env_error = env_error;
self._clean_pkg_dirs.extend( ['arduino', '_arduino'] )
# set the name of the linker output (not the final output)
self._link_output = '-o ' + exename + '.elf'
# Define paths
core_path = os.path.join(my_globals.NQBP_WORK_ROOT(), env_support, 'arduino', 'hardware', 'avr', env_bsp_ver, 'cores', 'arduino' )
hardware_path = os.path.join(my_globals.NQBP_WORK_ROOT(), env_support, 'arduino', 'hardware', 'avr', env_bsp_ver, 'variants', 'standard' )
self._base_release.inc = self._base_release.inc + " -I{} -I{} ".format(core_path, hardware_path)
#
common_flags = ' -mmcu=atmega328p -Os'
link_and_compile_flags = ' -flto'
asm_and_compile_flags = ' -MMD -DARDUINO_AVR_UNO -DARDUINO_ARCH_AVR'
self._base_release.cflags = self._base_release.cflags + common_flags + link_and_compile_flags + asm_and_compile_flags + ' -ffunction-sections -fdata-sections'
self._base_release.c_only_flags = self._base_release.c_only_flags + ' -std=gnu11 -fno-fat-lto-objects'
self._base_release.cppflags = self._base_release.cppflags + ' -std=gnu++11 -fpermissive -fno-exceptions -fno-rtti -fno-threadsafe-statics'
self._base_release.asmflags = common_flags + asm_and_compile_flags + ' -c -x assembler-with-cpp'
self._base_release.linklibs = ' -Wl,--start-group -lm -Wl,--end-group'
self._base_release.linkflags = common_flags + link_and_compile_flags + ' -fuse-linker-plugin -Wl,--gc-sections'
self._ar_options = 'rcs ' + self._ar_library_name
self._debug_release.cflags = self._debug_release.cflags + ' -DCFG_DEBUG=2'
self._debug_release.asmflags = self._debug_release.asmflags + ' -DCFG_DEBUG=2'
self._optimized_release.cflags = self._optimized_release.cflags + ' -DCFG_DEBUG=0'
self._optimized_release.asmflags = self._optimized_release.asmflags + ' -DCFG_DEBUG=2'
#
# Build Config/Variant: "xyz"
#
# Common/base options, flags, etc.
#self._base_xyz = self._base_release.copy()
#self._base_xyz.cflags = '-c -DBUILD_TIME_UTC={:d}'.format(self._build_time_utc)
# Optimized options, flags, etc.
#self._optimized_xyz = self._optimized_release.copy()
# Debug options, flags, etc.
#self._debug_xyz = self._debug_release.copy()
# Create new build variant - but ONLY if it DOES NOT already exist
#if ( not self._bld_variants.has_key('xyz') ):
# self._bld_variants['xyz'] = { 'nop':'none' }
# Add to dictionary of options for the 'xyz' build variant
#self._bld_variants['xyz']['base'] = self._base_xyz
#self._bld_variants['xyz']['optimized'] = self._optimized_xyz
#self._bld_variants['xyz']['debug'] = self._debug_xyz
#--------------------------------------------------------------------------
def link( self, arguments, inf, local_external_setting, variant ):
# Run the linker
base.ToolChain.link(self, arguments, inf, local_external_setting, variant )
# switch to the build variant output directory
vardir = '_' + self._bld
utils.push_dir( vardir )
# Output Banner message
self._printer.output("= Creating EEPROM (eep) file ..." )
# construct objcopy command
options = '-O ihex -j .eeprom --set-section-flags=.eeprom=alloc,load --no-change-warnings --change-section-lma .eeprom=0'
objcpy = '{} {} {} {}'.format( self._objcpy,
options,
self._final_output_name + '.elf',
self._final_output_name + '.eep'
)
# Generate the .HEX file
if ( arguments['-v'] ):
self._printer.output( objcpy )
if ( utils.run_shell(self._printer, objcpy) ):
self._printer.output("=")
self._printer.output("= Build Failed: Failed to create .EEP file from the .ELF" )
self._printer.output("=")
sys.exit(1)
# Output Banner message
self._printer.output("= Creating HEX file ..." )
# construct objcopy command
options = '-O ihex -R .eeprom'
objcpy = '{} {} {} {}'.format( self._objcpy,
options,
self._final_output_name + '.elf',
self._final_output_name + '.hex'
)
# Generate the .HEX file
if ( arguments['-v'] ):
self._printer.output( objcpy )
if ( utils.run_shell(self._printer, objcpy) ):
self._printer.output("=")
self._printer.output("= Build Failed: Failed to create .HEX file from the .ELF" )
self._printer.output("=")
sys.exit(1)
# Return to project dir
utils.pop_dir()
#--------------------------------------------------------------------------
def get_asm_extensions(self):
extlist = [ self._asm_ext, self._asm_ext2 ]
return extlist
#--------------------------------------------------------------------------
def validate_cc( self ):
if ( self._env_error != None ):
exit( "ERROR: The {} environment variable is not set.".format( self._env_error) )
return base.ToolChain.validate_cc(self)
```
#### File: windows/mingw_w64/console_exe.py
```python
import sys
from nqbplib import base
from nqbplib import utils
class ToolChain( base.ToolChain ):
def __init__( self, exename, prjdir, build_variants, default_variant='release' ):
base.ToolChain.__init__( self, exename, prjdir, build_variants, default_variant )
self._ccname = 'Mingw_W64'
# more stuff to clean
self._clean_list.extend( ['xml'] )
# Disable the following 'string warnings' because they generate false-positives (i.e. issues with GCC itself)
self._base_release.cflags = self._base_release.cflags + '-Wno-stringop-truncation -Wno-stringop-overflow '
# Force ANSI standard printfs/scanf
self._base_release.cflags = self._base_release.cflags + '-D__USE_MINGW_ANSI_STDIO=1'
# Turn off ALL optimization on the debug build
self._debug_release.cflags = self._debug_release.cflags + ' -O0'
# statically link my C/C++ libraries
#self._base_release.linkflags = self._base_release.linkflags + ' -static -static-libgcc -static-libstdc++ '
#
# Build Config/Variant: "xyz"
#
# Common/base options, flags, etc.
#self._base_xyz = self._base_release.copy()
#self._base_xyz.cflags = '-c -DBUILD_TIME_UTC={:d}'.format(self._build_time_utc)
# Optimized options, flags, etc.
#self._optimized_xyz = self._optimized_release.copy()
# Debug options, flags, etc.
#self._debug_xyz = self._debug_release.copy()
# Create new build variant - but ONLY if it DOES NOT already exist
#if ( not self._bld_variants.has_key('xyz') ):
# self._bld_variants['xyz'] = { 'nop':'none' }
# Add to dictionary of options for the 'xyz' build variant
#self._bld_variants['xyz']['base'] = self._base_xyz
#self._bld_variants['xyz']['optimized'] = self._optimized_xyz
#self._bld_variants['xyz']['debug'] = self._debug_xyz
#--------------------------------------------------------------------------
def validate_cc( self ):
t = base.ToolChain.validate_cc(self)
if ( not '64' in t[1].decode() ):
utils.output( "ERROR: Incorrect build of GCC (target does NOT equal 64Bit version)" )
sys.exit(1)
return t
```
#### File: nqbp/other/genfsm_base.py
```python
import sys
import os
import subprocess
#
from nqbplib.docopt.docopt import docopt
from nqbplib import utils
#
usage = """
genfsm - Generates source code from Cadifra FSM Diagrams (.cdd files)
===============================================================================
Usage: genfsm [options] <basename> <namespaces> [<sinelabore>...]
Arguments:
<basename> Base name for the FSM. The Cadifra diagram must have this
same file name. All generated output files will have the
<basename> as part of their file names. <basename> IS
case sensitive!
<namespaces> The encapsulated namespace(s) for the generated files. The
Format is: 'Rte::Db::Record'
<sinelabore> Optional arguments passed directly to the Sinelabore code
generator
Options:
-d NEVENTS When NEVENTS is greater then 0 code for an event queue is
generated where NEVENTS is the size of the event queue.
[Default: 0]
-h, --help Display command help.
NOTES:
o The environment variable SINELABORE_PATH is required/used to specify
the location of the Sinelabore code generator JAR files (and it
supporting JAR files).
o The script assumes that Graphviz's dot.exe is in the command path.
GraphViz is used to generated the FSM diagram for Doxygen. See
http://www.graphviz.org
"""
copyright_header = """* This file is part of the Colony.Core Project. The Colony.Core Project is an
* open source project with a BSD type of licensing agreement. See the license
* agreement (license.txt) in the top/ directory or on the Internet at
* http://integerfox.com/colony.core/license.txt
*
* Copyright (c) 2014-2020 <NAME>
*
* Redistributions of the source code must retain the above copyright notice."""
#
import subprocess
import re
import sys
#------------------------------------------------------------------------------
# Parse command line
def run( argv, copyright=None ):
global copyright_header
# Process command line args...
args = docopt(usage, version="0.0.1", options_first=True )
sargs = ' '.join( args['<sinelabore>'] )
# Check the environment variables
sinpath = os.environ.get( "SINELABORE_PATH" )
if ( sinpath == None ):
exit( "ERROR: The SINELABORE_PATH environment variable is not set." )
# Set copyright header (if specified)
if ( copyright != None ):
copyright_header = copyright
# Convert namespace arg to list
names = args['<namespaces>'].split('::')
# Filenames
fsmdiag = args['<basename>'] + ".cdd"
base = args['<basename>'] + "Context_"
evque = args['<basename>'] + "EventQueue_"
fsm = args['<basename>']
cfg = 'codegen.cfg'
# Generated File names
oldfsm = fsm + '.h'
oldfsmcpp = fsm + '.cpp'
oldevt = fsm + '_ext.h'
oldtrace = fsm + '_trace.h'
oldtrace2 = fsm + '_trace.java'
newfsm = fsm + '_.h'
newfsmcpp = fsm + '_.cpp'
newevt = fsm + '_ext_.h'
newtrace = fsm + '_trace_.h'
# Delete 'optional' old/previous files
utils.delete_file( evque + ".h" )
utils.delete_file( evque + ".cpp" )
# Create the config file for Sinelabore
geneatedCodegenConfig( cfg, base, names )
# Build Sinelabore command
cmd = 'java -jar -Djava.ext.dirs={} {}/codegen.jar {} -p CADIFRA -doxygen -o {} -l cppx -Trace {}'.format( sinpath, sinpath, sargs, fsm, fsmdiag )
cmd = utils.standardize_dir_sep( cmd )
# Invoke Sinelabore command
print(cmd)
p = subprocess.Popen( cmd, shell=True )
r = p.communicate()
if ( p.returncode != 0 ):
exit("ERROR: Sinelabore encounterd an error or failed to run." )
# Clean-up config file (don't want it being checked into version control)
os.remove( cfg )
# Mangle the event names so that I can have many FSMs in the same namespace with the same event names
eventList = get_events_names( oldevt )
mangle_event_names( oldevt, eventList, fsm, ' ' )
mangle_event_names( oldfsmcpp, eventList, fsm, '"', '0', '=' )
mangle_event_names( oldtrace, eventList, fsm, '"' )
cleanup_global_define( oldevt, fsm, names )
# Generate Context/Base class
actions, guards = getContextMethods( fsmdiag )
generatedContextClass( base, names, getHeader(), actions, guards )
# Generated event queuue class
depth = args['-d'].strip()
if ( depth != '0' ):
generateEventClass( evque, names, fsm, newfsm, depth )
# Post process the generated file(s) to work better with Doxygen
cleanup_for_doxygen( fsm + ".h", args['<namespaces>'] + "::" + fsm )
cleanup_for_doxygen( oldtrace )
cleanup_for_doxygen( oldevt )
# Post process the generated file(s)
cleanup_trace( oldfsmcpp, names, fsm, oldfsm, oldtrace, newtrace )
cleanup_includes( oldfsm, names, oldfsm, newfsm, oldevt, newevt, base + '.h' )
cleanup_includes( oldfsmcpp, names, oldfsm, newfsm, oldevt, newevt, base + '.h' )
# Housekeeping for naming convention
utils.delete_file( newfsm )
utils.delete_file( newfsmcpp )
utils.delete_file( newevt )
utils.delete_file( newtrace )
utils.delete_file( oldtrace2 ) # remove unwanted JAVA file
os.rename( oldfsm, newfsm )
os.rename( oldfsmcpp, newfsmcpp )
os.rename( oldevt, newevt )
os.rename( oldtrace, newtrace )
#------------------------------------------------------------------------------
def get_events_names( ext_header_file ):
found_start = False
events = []
with open( ext_header_file ) as inf:
for line in inf:
line = line.lstrip()
# Capture events
if ( found_start ):
if ( line.find("_NO_MSG") == -1 ):
event = line.strip().split('=')[0]
event = event.strip().split(',')[0]
events.append( event.strip() )
# All events found -->can exit the function
else:
break;
# Find the start of the events
if ( line.startswith( "enum" ) ):
found_start = True
# Return found events
return events
#
def mangle_event_names( file, events, prefix, pre_del1, pre_del2=None, pre_del3=None ):
tmpfile = file + ".tmp"
found_indexes = False
with open( file ) as inf:
with open( tmpfile, "w") as outf:
for line in inf:
# Replace event names
for e in events:
# Brute force approach to replacing whole event names (i.e. properly handle evStop & evStopped)
old_ev = pre_del1 + e
new_ev = pre_del1 + prefix + "_" + e
line = line.replace(old_ev,new_ev,1)
if ( pre_del2 != None ):
old_ev = pre_del2 + e
new_ev = pre_del2 + prefix + "_" + e
line = line.replace(old_ev,new_ev,1)
if ( pre_del3 != None ):
old_ev = pre_del3 + e
new_ev = pre_del3 + prefix + "_" + e
line = line.replace(old_ev,new_ev,1)
# Fix event name indexes
if ( found_indexes ):
line = fix_indexes( line, prefix );
found_indexes = False
elif ( line.find( "const unsigned short evt_idx[]={" ) != -1 ):
found_indexes = True
outf.write( line )
os.remove( file )
os.rename( tmpfile, file )
#
def fix_indexes( line, prefix ):
line = line.replace('};','').strip()
k = len(prefix) + 1
offsets = line.strip().split(",")
idx = 0
newoffsets = []
# Strip off potential trailing ',' (e.g. happens when there are no 'true' events)
if ( offsets[len(offsets)-1] == '' ):
del offsets[len(offsets)-1]
for i in offsets:
n = int(i)
newidx = n + idx * k
idx += 1
newoffsets.append( str(newidx) )
newline = " " + ','.join(newoffsets) + '};\n'
return newline
#
def cleanup_for_doxygen( headerfile, classname='<not-used>' ):
tmpfile = headerfile + ".tmp"
skip_state = 0
with open( headerfile ) as inf:
with open( tmpfile, "w") as outf:
for line in inf:
if ( line.startswith( "namespace") and skip_state == 0 ):
outf.write( "#ifndef DOXYGEN_WILL_SKIP_THIS\n\n");
outf.write( line );
skip_state += 1
continue
if ( line.startswith( "#endif") and skip_state == 1):
outf.write( "#endif // !DOXYGEN_WILL_SKIP_THIS\n\n");
outf.write( line );
skip_state += 1
continue
if ( line.find( 'Here is the graph that shows the state machine' ) == -1 ):
outf.write( line )
else:
outf.write( "/** \class {}\n\nHere is the graph that shows the state machine this class implements\n\n\dot\n".format( classname ) )
os.remove( headerfile )
os.rename( tmpfile, headerfile )
#
def cleanup_global_define( headerfile, fsm_name, namespaces ):
tmpfile = headerfile + ".tmp"
skip_state = 0
with open( headerfile ) as inf:
with open( tmpfile, "w") as outf:
for line in inf:
if ( line.startswith( '#define InnermostStates ' )-1 ):
outf.write( line )
else:
tokens = line.split()
outf.write( '#define {}{}_InnermostStates {};\n'.format(flatten_namespaces(namespaces), fsm_name, tokens[2]))
os.remove( headerfile )
os.rename( tmpfile, headerfile )
#
def cleanup_includes( headerfile, namespaces, oldfsm, newfsm, oldevt, newevt, base ):
tmpfile = headerfile + ".tmp"
path = path_namespaces( namespaces )
with open( headerfile ) as inf:
with open( tmpfile, "w") as outf:
for line in inf:
if ( line.find( '#include "{}"'.format(oldfsm) ) != -1):
outf.write( '#include "{}{}"\n'.format(path, newfsm) )
elif ( line.find( '#include "{}"'.format(oldevt) ) != -1) :
outf.write( '#include "{}{}"\n'.format(path, newevt) )
elif ( line.find( '#include "{}"'.format(base) ) != -1) :
outf.write( '#include "{}{}"\n'.format(path, base) )
else:
outf.write( line )
os.remove( headerfile )
os.rename( tmpfile, headerfile )
#
def cleanup_trace( cppfile, namespaces, base, oldfsm, old_trace_headerfile, new_trace_headerfile ):
# Add xx_trace_.h include to xxx_.cpp
tmpfile = cppfile + ".tmp"
path = path_namespaces( namespaces )
newstate = 'stateVars = stateVarsCopy;'
newcount = 0
# Update the state machine .cpp file with Tracing tweaks
prev_line = ''
with open( cppfile ) as inf:
with open( tmpfile, "w") as outf:
for line in inf:
# Remove Trace call from the initialize method
if ( f"{base}TraceEvent(" in line and f"{base}::initialize()" in prev_line ):
pass
# Keep the current line...
else:
outf.write( line )
if ( line.find( '#include "{}"'.format(oldfsm) ) != -1):
outf.write( '#include "{}{}"\n'.format(path, new_trace_headerfile) )
# Add trace for transitioned TO state (but skip the initialize() method because trace + statically created instance DON'T mix)
elif ( line.find( newstate ) != -1 ):
newcount += 1
if ( newcount > 1 ):
outf.write( ' CPL_SYSTEM_TRACE_MSG( SECT_, ( " New State=%s", getNameByState(getInnermostActiveState()) ));\n' )
prev_line = line
os.remove( cppfile )
os.rename( tmpfile, cppfile )
# add CPL trace hooks
tmpfile = old_trace_headerfile + ".tmp"
path = path_namespaces( namespaces )
trace_fn = 'TraceEvent(int evt);'
enum = 'enum ' + base + 'TraceEvent'
comment = '/* Simulation which'
with open( old_trace_headerfile ) as inf:
with open( tmpfile, "w") as outf:
for line in inf:
if ( line.find( '#define' ) != -1):
outf.write( line )
outf.write( '\n' )
outf.write( '#include "Cpl/System/Trace.h"\n' )
outf.write( '\n' )
outf.write( '/// Trace Section\n' )
outf.write( '#define SECT_ "{}::{}"\n'.format( "::".join(namespaces), base ) )
outf.write( '\n' )
elif ( line.find( trace_fn ) != -1 ):
outf.write( '#define {}TraceEvent(a) CPL_SYSTEM_TRACE_MSG( SECT_, ( " Old State=%s, Event=%s", getNameByState(getInnermostActiveState()), {}TraceEvents[a] ));\n'.format( base, base) )
elif ( line.find( enum ) != -1 ):
pass
elif ( line.find( comment ) != -1 ):
pass
else:
outf.write( line )
os.remove( old_trace_headerfile )
os.rename( tmpfile, old_trace_headerfile )
#------------------------------------------------------------------------------
def getContextMethods( fname ):
actions = []
guards = []
with open(fname) as f:
for line in f:
g = re.search(r'[a-zA-Z0-9]+\(\)(?!\;)',line)
a = re.search(r'[a-zA-Z0-9]+\(\)\;', line)
if ( g != None ):
guards.append( g.group(0) )
if ( a != None ):
actions.append( a.group(0).split(';')[0] )
# Remove any duplicates from the grep'd methods
return sorted(list(set(actions))), sorted(list(set(guards)))
def path_namespaces( namespaces ):
flat = ''
for n in namespaces:
flat += n + "/"
return flat
def flatten_namespaces( namespaces ):
flat = ""
for n in namespaces:
flat += n + "_"
return flat
def nested_namespaces( namespaces ):
nest = ""
for n in namespaces:
nest += "namespace {} {} ".format(n,'{')
return nest
def end_nested_namespaces( namespaces ):
nest = ""
for n in namespaces:
nest += "};"
nest += " /// end namespace(s)"
return nest
def cfg_namespaces( namespaces ):
nest = "*"
for n in namespaces:
if ( nest == "*" ):
nest = n + " "
else:
nest += "{} namespace {} ".format('{',n)
return nest
def end_cfg_namespaces( namespaces ):
nest = ""
count = len(namespaces)
if ( count > 1 ):
for n in range(1,count):
nest += "};"
return nest
def generatedContextClass( class_name, namespaces, header, actions, guards ):
fname = class_name + '.h'
flat = flatten_namespaces(namespaces)
with open(fname,"w") as f:
f.write( "#ifndef {}{}x_h_\n".format( flat, class_name ) )
f.write( "#define {}{}x_h_\n".format( flat, class_name ) )
f.write( header )
f.write( "\n\n/* This file is auto-generated DO NOT MANUALLY EDIT this file! */\n\n" )
f.write( "\n" )
f.write( "/// Namespace(s)\n" )
f.write( "{}\n".format( nested_namespaces(namespaces) ) )
f.write( "\n\n" )
f.write( "/// Context (aka actions/guards) for my Finite State Machine\n" )
f.write( "class {}\n".format( class_name ) )
f.write( "{\n" )
f.write( "public:\n" )
for a in actions:
f.write( " /// Action\n" )
f.write( " virtual void {} noexcept = 0;\n".format( a ) )
f.write( "\n" )
f.write( "\n" )
f.write( "public:\n" )
for g in guards:
f.write( " /// Guard\n" )
f.write( " virtual bool {} noexcept = 0;\n".format( g ) )
f.write( "\n" )
f.write( "\n" )
f.write( "public:\n" )
f.write( " /// Virtual Destructor\n" )
f.write( " virtual ~{}(){}{}\n".format( class_name, "{","}" ) )
f.write( "\n" )
f.write( "};\n" )
f.write( "\n" )
f.write( "{}\n".format( end_nested_namespaces(namespaces) ) )
f.write( "#endif /// end header latch\n" )
def generateEventClass( class_name, namespaces, parent_class, parent_header, depth ):
fname = class_name + '.h'
flat = flatten_namespaces(namespaces)
path = path_namespaces( namespaces )
macroname = parent_class.upper()
with open(fname,"w") as f:
f.write( "#ifndef {}{}x_h_\n".format( flat, class_name ) )
f.write( "#define {}{}x_h_\n".format( flat, class_name ) )
f.write( getHeader() )
f.write( "\n\n/* This file is auto-generated DO NOT MANUALLY EDIT this file! */\n\n" )
f.write( "\n" )
f.write( '#include "{}{}"\n'.format(path, parent_header) )
f.write( '#include "Cpl/Container/RingBuffer.h"\n' )
f.write( "\n\n" )
f.write( "/// Namespace(s)\n" )
f.write( "{}\n".format( nested_namespaces(namespaces) ) )
f.write( "\n\n" )
f.write( "/// Event Queue for FSM events.\n" )
f.write( "class {}: public {}, public Cpl::Container::RingBuffer<{}_EVENT_T>\n".format( class_name, parent_class, macroname ) )
f.write( "{\n" )
f.write( "protected:\n" )
f.write( " /// Memory for Event queue\n" )
f.write( " {}_EVENT_T m_eventQueMemory[{}];\n".format( macroname, depth) )
f.write( "\n")
f.write( " /// Flag for tracking re-entrant events\n" )
f.write( " bool m_processingFsmEvent;\n" )
f.write( "\n")
f.write( "public:\n" )
f.write( " /// Constructor\n" )
f.write( " {}();\n".format( class_name) )
f.write( "\n")
f.write( "public:\n" )
f.write( " /// This method properly queues and process event messages\n" )
f.write( " virtual void generateEvent( {}_EVENT_T msg );\n".format( macroname ) )
f.write( "};\n" )
f.write( "\n" )
f.write( "{}\n".format( end_nested_namespaces(namespaces) ) )
f.write( "#endif /// end header latch\n" )
fname = class_name + '.cpp'
flat = flatten_namespaces(namespaces)
with open(fname,"w") as f:
f.write( getHeader() )
f.write( "\n\n/* This file is auto-generated DO NOT MANUALLY EDIT this file! */\n\n" )
f.write( "\n" )
f.write( '#include "{}.h"\n'.format( class_name ) )
f.write( '#include "Cpl/System/FatalError.h"\n' )
f.write( '#include "Cpl/System/Trace.h"\n' )
f.write( "\n" )
f.write( '#define SECT_ "{}::{}"\n'.format( "::".join(namespaces), parent_class ) )
f.write( "\n" )
f.write( "/// Namespace(s)\n" )
f.write( "{}\n".format( nested_namespaces(namespaces) ) )
f.write( "\n\n" )
f.write( "{}::{}()\n".format( class_name, class_name ) )
f.write( ":Cpl::Container::RingBuffer<{}_EVENT_T>( {}, m_eventQueMemory )\n".format( macroname, depth ) )
f.write( ",m_processingFsmEvent(false)\n" )
f.write( " {\n" )
f.write( " }\n" )
f.write( "\n\n" )
f.write( "void {}::generateEvent( {}_EVENT_T msg )\n".format( class_name, macroname ) )
f.write( " {\n" )
f.write( " // Queue my event\n" )
f.write( " if ( !add( msg ) )\n" )
f.write( " {\n" )
f.write( ' Cpl::System::FatalError::logf( "{}::{}: - Buffer Overflow!" );\n'.format( "::".join(namespaces), class_name ) )
f.write( " }\n" )
f.write( "\n" )
f.write( " // Protect against in-thread 'feedback loops' that can potentially generate events\n" )
f.write( " if ( !m_processingFsmEvent )\n" )
f.write( " {\n" )
f.write( " m_processingFsmEvent = true;\n" )
f.write( " while( remove( msg ) )\n" )
f.write( " {\n" )
f.write( ' CPL_SYSTEM_TRACE_MSG( SECT_, ("Processing event:= %s, current state=%s ...", getNameByEvent(msg), getNameByState(getInnermostActiveState())) );\n' )
f.write( " if ( processEvent(msg) == 0 )\n" )
f.write( " {\n" )
f.write( ' CPL_SYSTEM_TRACE_MSG( SECT_, (" Event IGNORED:= %s", getNameByEvent(msg)) );\n' )
f.write( " }\n" )
f.write( ' CPL_SYSTEM_TRACE_MSG( SECT_, (" Event Completed:= %s, end state=%s", getNameByEvent(msg), getNameByState(getInnermostActiveState())) );\n' )
f.write( " }\n" )
f.write( "\n" )
f.write( " m_processingFsmEvent = false;\n" )
f.write( " }\n" )
f.write( " }\n" )
f.write( "\n" )
f.write( "{}\n".format( end_nested_namespaces(namespaces) ) )
#------------------------------------------------------------------------------
def geneatedCodegenConfig( fname, base, names ):
cfg = '''# Output configuration options for the given language. Pipe them into a file for further use!
#
#Allows to define naming conventions for events
PrefixEvents=
#
#Allows to define naming conventions for simple states
PrefixSimpleStates=
#
#Allows to define naming conventions for composite states
PrefixCompositeStates=
#
#Allows to define naming conventions for choice states
PrefixChoice=
#
#Path to 'dot.exe'.
#DotPath="C:\\Program Files\\Graphviz2.22\\bin\\dot.exe"
#DotPath=/usr/local/bin/dot
DotPath=
#
#Port the graphical statediagram.simulator listens for event strings.
UdpPort=4445
#
#Options 'yes' and 'no' are possible. If set to 'yes' only hot transitions are shown
ShowOnlyHotTransitions=no
#
#It is possible to limit the length of the event text. This keeps the image compact.
NumberOfTransitionChars=32
#
#If set to 'yes' only correct models can be saved.
SaveCheckedOnly=yes
#
#If set to 'yes' action code is displayed in the state diagram of the integrated statediagram.editor.
DisplayEntryExitDoCode=yes
#
#Limit action code in the integrated statediagram.editor to the given number of chars.
NumberOfEntryExitDoCodeChars=32
#
#
#Defines the text each generated file starts with.
Copyright=$$HEADER$$
#
#Defines if real tabs or spaces are used for indentation.
Realtab=no
#
#If realtab is 'no' this key defines how many spaces to use per tab
Tabsize=4
#
#Some systems can use special compiler keywords to place the debug strings in program memory or a specifc segment
TypeOfDbgString=const char
#
#If set to 'no' the data and time info is supressed in each file header
IncludeDateFileHeaders=no
#
#Optional namespace used in the generated C#, Java and C++ file.
Namespace=$$NAMESPACE_START$$
NamespaceEnd=$$NAMESPACE_END$$
#
#Define a base classes for the generated machine class.
BaseClassMachine=$$BASE$$
#
#Define an optional base classes for the generated state classes.
BaseClassStates=
#
#If set to yes virtual create methods are gen- erated in the factory class.
CreateFactoryMethodsVirtual=No
#
#If set to yes all state classes are generated into a single cpp/h file.
CreateOneCppStateHeaderFileOnly=Yes
#
#If set to 'yes' a destructor for the state mchine class is generated. If set to 'virtual' a virtual destructor is generated. If set to 'no' no destructor is generated.
StateMachineClassHasDestructor=no
#
#If set to 'yes' separte state classes are used. Otherwise action code is completely inlined into the state machine code
SeparateStateClasses=no
#
'''
# Replace tokens
cfg = cfg.replace( "$$BASE$$", base )
cfg = cfg.replace( "$$HEADER$$", getHeaderCfg() )
cfg = cfg.replace( "$$NAMESPACE_START$$", cfg_namespaces(names) )
cfg = cfg.replace( "$$NAMESPACE_END$$", end_cfg_namespaces(names) )
# create file
with open(fname,"w") as f:
f.write( cfg )
#------------------------------------------------------------------------------
def getHeader():
return '/*-----------------------------------------------------------------------------\n' + copyright_header + '\n*----------------------------------------------------------------------------*/\n/** @file */\n\n'
def getHeaderCfg():
text = copyright_header.replace('\n', r'\n')
return r'/*-----------------------------------------------------------------------------\n' + text + r'\n*----------------------------------------------------------------------------*/\n/** @file */\n\n'
```
#### File: nqbp/other/nrf_util_base.py
```python
import sys
import os
from nqbplib.docopt.docopt import docopt
from nqbplib import utils
#------------------------------------------------------------------------------
# Create a dumb downed printer class to be compatible with utils.run_shell() method
class Printer:
def __init__(self):
pass
def output(self,line):
print(line)
#------------------------------------------------------------------------------
def get_default_zipfile_name( default_dir='_arduino'):
zipfile = None
# Make sure the default directory exists...
if ( os.path.isdir(default_dir) ):
files = utils.dir_list_filter_by_ext(default_dir, ['zip'])
# set the default HEX file is ONE and ONLY ONE Hex file exists in the default directory
if ( len(files) == 1 ):
zipfile = os.path.join(default_dir, files[0] )
return zipfile
#------------------------------------------------------------------------------
def run( argv ):
# Parse command line
args = docopt(__doc__, version="0.1")
# Get environment variable for where the arduino tools are located
ARDUINO_TOOLS = os.environ.get( 'ARDUINO_TOOLS' )
if ( ARDUINO_TOOLS == None ):
print("The environment variable - ARDUINO_TOOLS - is NOT set.")
sys.exit(1)
ARDUINO_BSP_VER = os.environ.get( 'ARDUINO_BSP_VER' )
if ( ARDUINO_BSP_VER == None ):
print("The environment variable - ARDUINO_BSP_VER - is NOT set.")
sys.exit(1)
ARDUINO_NRF_UTIL_VER = os.environ.get( 'ARDUINO_NRF_UTIL_VER' )
if ( ARDUINO_NRF_UTIL_VER == None ):
print("The environment variable - ARDUINO_NRF_UTIL_VER - is NOT set.")
sys.exit(1)
# Default tool stuff
nrfutil = os.path.join(ARDUINO_TOOLS, 'hardware', 'nrf52', ARDUINO_BSP_VER, 'tools', ARDUINO_NRF_UTIL_VER, 'binaries', 'win32', 'nrfutil' )
# Get hex file to program
zipfile = get_default_zipfile_name();
if ( args['<zipfile>'] ):
zipfile = args['<zipfile>']
if ( zipfile == None ):
print("No ZIP file was specified OR multiple ZIP files exist in the default directory")
sys.exit(1)
# build options....
verbose = ' --verbose ' if args['-v'] else ''
comport = ' --port '+args['-p']
baud = ' -b '+args['-b']
command = ''
if ( not args['--nozip'] ):
target = ' dfu serial -pkg {}'.format(zipfile)
command = target + comport + baud
extra = '' if not args['--extra'] else ' ' + args['--extra']
options = verbose + command + extra
# Run NRFUTIL
printer = Printer()
cmd = nrfutil + ' ' + options
if ( args['-v'] ):
print(cmd)
if (utils.run_shell(printer, cmd, False) ):
print()
print('** A FAILURE occurred while attempting to run nrfutil')
print()
sys.exit(1)
``` |
{
"source": "johnttaylor/Outcast",
"score": 2
} |
#### File: Outcast/bin/evie.py
```python
import sys
import os
from subprocess import call
from docopt.docopt import docopt
import utils
from my_globals import EVIE_VERSION
from my_globals import OUTCAST_SCM_ADOPTED_TOOL
#------------------------------------------------------------------------------
def load_command( scm, name ):
try:
command_module = __import__("scm.{}.{}".format(scm, name), fromlist=[scm])
except ImportError:
exit("%r is not a Evie command. Use 'evie help' for list of commands." % name)
return command_module
#------------------------------------------------------------------------------
def display_command_list(scm):
import pkgutil
p = __import__("scm.{}".format(scm),fromlist=['git'] )
print( ' ' )
print( "Type 'evie help <command>' for details. Type 'evie --help' for base usage." )
print( "-------------------------------------------------------------------------------" )
for importer, modname, ispkg in pkgutil.iter_modules(p.__path__):
if ( not ispkg ):
cmd = load_command( scm, modname )
cmd.display_summary()
print( ' ' )
def display_scm_types_list():
print( ' ' )
print( "Type 'evie --help' for additional help." )
print( "-------------------------------------------------------------------------------" )
bpath = os.path.join( os.path.dirname(__file__), 'scm' )
if ( os.path.exists( bpath ) ):
files = os.listdir(bpath)
for f in files:
if ( os.path.isdir(os.path.join(bpath,f)) ):
print(f)
print( ' ' )
#------------------------------------------------------------------------------
# Parse command line
args = docopt(__doc__, version=EVIE_VERSION(), options_first=True )
# Display list of build engines supported
if ( args['--qry'] ):
display_scm_types_list()
else:
# Determine which SCM tool to use
scm = os.environ.get( OUTCAST_SCM_ADOPTED_TOOL() )
if ( scm == None ):
scm = 'git'
if ( args['--scm'] ):
scm = args['--scm']
args['--scm'] = scm
# Trap help on a specific command
if ( args['<command>'] == 'help' ):
# Display list of commands if none specified
if ( args['<args>'] == [] ):
display_command_list(scm)
# Display command specific help
else:
load_command( scm, args['<args>'][0] ).run( args, ['--help'] )
# Trap no command specified
elif ( args['<command>'] == None ):
docopt(__doc__,argv=['--help'])
# Run the command (if it exists)
else:
# Set quite & verbose modes
utils.set_quite_mode( args['-q'] )
utils.set_verbose_mode( args['-v'] )
# run the command
load_command( scm, args['<command>'] ).run( args, [args['<command>']] + args['<args>'] )
```
#### File: Outcast/bin/orc.py
```python
import sys
import os
from subprocess import call
from docopt.docopt import docopt
from my_globals import ORC_VERSION
from my_globals import OUTCAST_SCM_ADOPTED_TOOL
from my_globals import OUTCAST_SCM_PRIMARY_TOOL
from my_globals import PACKAGE_ROOT
import utils
#------------------------------------------------------------------------------
def load_command( name ):
try:
command_module = __import__("commands.{}".format(name), fromlist=["commands"])
except ImportError:
exit("{} is not a Orc command. Use 'orc help' for list of commands.".format(name) )
return command_module
#------------------------------------------------------------------------------
def display_command_list():
import pkgutil
import commands
p = commands
print( ' ' )
print( "Type 'orc help <command>' for more details. Type 'orc --help' for base usage." )
print( "-------------------------------------------------------------------------------" )
# Display the list of commands
for importer, modname, ispkg in pkgutil.iter_modules(p.__path__):
if ( not ispkg ):
cmd = load_command( modname )
cmd.display_summary()
print( ' ' )
#------------------------------------------------------------------------------
# MAIN
if __name__ == '__main__':
# Parse command line
args = docopt(__doc__, version=ORC_VERSION(), options_first=True )
# Determine which 'adopted' SCM tool to use
scm = os.environ.get( OUTCAST_SCM_ADOPTED_TOOL() )
if ( scm == None ):
scm = 'git'
if ( args['--scm'] ):
scm = args['--scm']
args['--scm'] = scm
# Determine which 'primary' SCM tool to use
scm = os.environ.get( OUTCAST_SCM_PRIMARY_TOOL() )
if ( scm == None ):
scm = 'git'
if ( args['--primary-scm'] ):
scm = args['--primary-scm']
args['--primary-scm'] = scm
# Trap the special where option
if ( args['--where'] ):
print(__file__)
exit(0)
# Trap help on a specific command
if ( args['<command>'] == 'help' ):
# Display list of commands if none specified
if ( args['<args>'] == [] ):
display_command_list()
# Display command specific help
else:
load_command( args['<args>'][0] ).run( args, ['--help'] )
# Run the command (if it exists)
else:
# Set quite & verbose modes
utils.set_quite_mode( args['-q'] )
utils.set_verbose_mode( args['-v'] )
# Set the current working directory to the root directory
repo_root = utils.find_root( args['--primary-scm'], args['-v'] )
utils.push_dir( repo_root )
PACKAGE_ROOT( repo_root )
# run the command
load_command( args['<command>'] ).run( args, [args['<command>']] + args['<args>'] )
```
#### File: bin/scm/copy.py
```python
def display_summary():
print("{:<13}{}".format( 'copy', "Creates a non-tracked/local copy of a SCM Repository" ))
# DOCOPT command line definition
USAGE = """
Creates a non-tracked/local copy of the specified repository/branch/reference
===============================================================================
usage: evie [common-opts] copy [options] <dst> <repo> <origin> <id>
evie [common-opts] copy get-success-msg
evie [common-opts] copy error
Arguments:
<dst> Parent directory for where the copy is placed. The
directory is specified as a relative path to the root
of primary repository.
<repo> Name of the repository to copy.
<origin> Path/URL to the repository.
<id> Label/Tag/Hash/Version of code to be copied.
get-success-msg Returns a SCM specific message that informs the end user
of additional action(s) that may be required when
the command is successful
get-error-msg Returns a SCM specific message that informs the end user
of additional action(s) that may be required when
the command fails
Options:
-p PKGNAME Specifies the Package name if different from the <repo>
name
-b BRANCH Specifies the source branch in <repo>. The use/need
of this option in dependent on the <repo> SCM type.
--force Forces a true copy/clone to be created. The default
behavior is SCM type specific.
Options:
-h, --help Display help for this command.
Notes:
o The command MUST be run in the root of the primary respostiory.
"""
```
#### File: scm/git/copy.py
```python
""
import os, sys
import utils
from docopt.docopt import docopt
import scm.copy
import scm.git.mount
#---------------------------------------------------------------------------------------------------------
def display_summary():
scm.copy.display_summary()
#------------------------------------------------------------------------------
def run( common_args, cmd_argv ):
args = docopt(scm.copy.USAGE, argv=cmd_argv)
# Use the mount command so as to have consistent pre/post GIT behavior with adopting non-integrated packages
if ( not args['--force'] ):
cmd_argv[0] = 'mount'
cmd_argv.insert(1, '--noro')
scm.git.mount.run( common_args, cmd_argv )
# Do a brute force copy
else:
# -b option is not supported/needed
if ( args['-b'] != None ):
sys.exit( "The '-b' option is not supported/needed. Use a 'remote-ref' as the <id> argument" )
# Default Package name
pkg = args['<repo>']
if ( args['-p'] ):
pkg = args['-p']
# Make sure the destination directory exists
dst = os.path.join( os.getcwd(), args['<dst>'] )
utils.print_verbose( f"Destination for the copy: {dst}" )
utils.mkdirs( dst )
# Create a clone of the repo
# NOTE: I hate cloning the entire repo - but I have not found a way to get JUST a snapshot by a remote-ref
cmd = f'git clone --branch {args["<id>"]} --depth=1 {args["<origin>"]}/{args["<repo>"]}.git {pkg}'
utils.push_dir( dst )
t = utils.run_shell( cmd, common_args['-v'] )
utils.pop_dir()
if ( utils.is_error(t) ): # Clean-up dst dir if there was failure
utils.remove_tree( dst )
utils.check_results( t, f"ERROR: Failed the retreive/clone the specified package/repository. Note: the <id> ({args['<id>']}) MUST be a git TAG." )
# Remove the .git directoy since this is a non-tracked copy
gitdir = os.path.join( dst, pkg, ".git" )
utils.remove_tree( gitdir, warn_msg="Not able to remove the .git directory for local copy" )
```
#### File: scm/git/umount.py
```python
import os, sys
import utils
from docopt.docopt import docopt
import scm.umount
#---------------------------------------------------------------------------------------------------------
def display_summary():
scm.umount.display_summary()
#------------------------------------------------------------------------------
def run( common_args, cmd_argv ):
args = docopt(scm.umount.USAGE, argv=cmd_argv)
# Success Msg
if ( args['get-success-msg'] ):
print( "Repo unmount. You will need to perform a 'git add/rm' to remove the deleted files" )
return
# Error Msg
if ( args['get-error-msg'] ):
print( "" ) # No addition info
return
# -b option is not supported/needed
if ( args['-b'] != None ):
sys.exit( "The '-b' option is not supported/needed. Use a 'remote-ref' as the <id> argument" )
# Default Package name
pkg = args['<repo>']
if ( args['-p'] ):
pkg = args['-p']
# Set the foreign package directory to be deleted
dst = os.path.join( args['<dst>'] , pkg )
if ( not os.path.isdir(dst) ):
sys.exit( f"ERROR: The Package/Directory - {dst} - does not exist." )
utils.print_verbose( f"Package/directory being removed: {dst}" )
# The is no 'git subtree rm' command -->we just simply delete the package directory
utils.set_tree_readonly( dst, False )
utils.remove_tree( dst )
```
#### File: scm/git/update.py
```python
import os
import utils
from docopt.docopt import docopt
import scm.update
#------------------------------------------------------------------------------
def display_summary():
scm.update.display_summary()
#------------------------------------------------------------------------------
def run( common_args, cmd_argv ):
args = docopt(__doc__, argv=cmd_argv)
# -b option is not supported/needed
if ( args['-b'] != None ):
sys.exit( "The '-b' option is not supported/needed. Use a 'remote-ref' as the <id> argument" )
# Default Package name
pkg = args['<repo>']
if ( args['-p'] ):
pkg = args['-p']
# Set directory for the subtree directory
dst = os.path.join( args['<dst>'], pkg )
dst = utils.force_unix_dir_sep(dst)
utils.print_verbose( f"Location of the copy being updated: {dst}" )
# Update the 'subtree'
cmd = f'git subtree pull --prefix {dst} {args["<origin>"]}/{args["<repo>"]}.git {args["<id>"]} --squash'
t = utils.run_shell( cmd, common_args['-v'] )
utils.check_results( t, "ERROR: Failed the update a subtree for the specified package/repository." )
```
#### File: bin/scm/mount.py
```python
def display_summary():
print("{:<13}{}".format( 'mount', "Creates a semi-tracked/local copy of a SCM Repository" ))
# DOCOPT command line definition
USAGE = """
Creates a semi-tracked/local copy of the specified repository/branch/reference
===============================================================================
usage: evie [common-opts] mount [options] <dst> <repo> <origin> <id>
evie [common-opts] mount [options] get-success-msg
evie [common-opts] mount [options] get-error-msg
Arguments:
<dst> PARENT directory for where the copy is placed. The
directory is specified as a relative path to the root
of primary repository.
<repo> Name of the repository to mount
<origin> Path/URL to the repository
<id> Label/Tag/Hash/Version of code to be mounted
get-success-msg Returns a SCM specific message that informs the end user
of additional action(s) that may be required when
the command is successful
get-error-msg Returns a SCM specific message that informs the end user
of additional action(s) that may be required when
the command fails
Options:
-p PKGNAME Specifies the Package name if different from the <repo>
name
-b BRANCH Specifies the source branch in <repo>. The use/need
of this option in dependent on the <repo> SCM type.
--noro Do NOT mark the package as read-only in the file system
-h, --help Display help for this command
Notes:
o The command MUST be run in the root of the primary respostiory.
o The 'mount' command is different from the 'copy' in that creates
semi-tracked clone of the repository which can be updated directly from
the source <repo> at a later date (think git subtrees)
"""
```
#### File: scm/none/umount.py
```python
import os
import utils
from docopt.docopt import docopt
import scm.umount
#---------------------------------------------------------------------------------------------------------
def display_summary():
scm.umount.display_summary()
#------------------------------------------------------------------------------
def run( common_args, cmd_argv ):
args = docopt(scm.umount.USAGE, argv=cmd_argv)
# Return 'error' since this is just a stub
exit(1)
```
#### File: bin/scm/rm.py
```python
def display_summary():
print("{:<13}{}".format( 'rm', "Removes a previously copied SCM Repository" ))
# DOCOPT command line definition
USAGE="""
Removes a previously 'copied' repository
===============================================================================
usage: evie [common-opts] rm [options] <dst> <repo> <origin> <id>
evie [common-opts] rm [options] get-success-msg
evie [common-opts] rm [options] get-error-msg
Arguments:
<dst> PARENT directory for where the package was copied. The
directory is specified as a relative path to the root
of primary repository.
<repo> Name of the repository to remove
<origin> Path/URL to the repository
<id> Label/Tag/Hash/Version of code to be remove
get-success-msg Returns a SCM specific message that informs the end user
of additional action(s) that may be required when
the command is successful
get-error-msg Returns a SCM specific message that informs the end user
of additional action(s) that may be required when
the command fails
Options:
-p PKGNAME Specifies the Package name if different from the <repo>
name
-b BRANCH Specifies the source branch in <repo>. The use/need
of this option in dependent on the <repo> SCM type.
Options:
-h, --help Display help for this command
Notes:
o The command MUST be run in the root of the primary respostiory.
o This command only applied to repositories previously mounted using
the 'copy' command.
"""
```
#### File: bin/scm/update.py
```python
def display_summary():
print("{:<13}{}".format( 'update', "Updates a previously mounted SCM Repository" ))
# DOCOPT command line definition
USAGE = """
Updates a previously mounted repository
===============================================================================
usage: evie [common-opts] update [options] <dst> <repo> <origin> <id>
evie [common-opts] update [options] get-success-msg
evie [common-opts] update [options] get-error-msg
Arguments:
<dst> PARENT directory for where the copy-to-be-updated is
located. The directory is specified as a relative path to
the root of primary repository.<pkg>
<repo> Name of the repository to update
<origin> Path/URL to the repository
<id> Label/Tag/Hash/Version of code to be updated
get-success-msg Returns a SCM specific message that informs the end user
of additional action(s) that may be required when
the command is successful
get-error-msg Returns a SCM specific message that informs the end user
of additional action(s) that may be required when
the command fails
Options:
-b BRANCH Specifies the source branch in <repo>. The use/need
of this option in dependent on the <repo> SCM type.
Options:
-h, --help Display help for this command
Notes:
o The command MUST be run in the root of the primary respostiory.
"""
``` |
{
"source": "johnturnerTCC/aws-iamctl",
"score": 2
} |
#### File: aws-iamctl/iamctl/harvester.py
```python
# http://www.apache.org/licenses/LICENSE-2.0
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import boto3
import json
from botocore.exceptions import ClientError
import re
import fnmatch
import logging
import logging.config
import csv
import sys
import os
import argparse
import time
from datetime import datetime
from progress.bar import ChargingBar, Bar
from pyfiglet import Figlet
from colorama import init,Fore, Back, Style
from terminaltables import SingleTable
from os.path import expanduser
from os import path
class Harvester:
def close_file_handler(self):
self.extract_file.close()
def read_iam_file(self):
with open('iam.json') as json_file:
return json.load(json_file)
def return_service_iam_actions(self,service_prefix):
for p in self.iam_reference['serviceMap']:
if (self.iam_reference['serviceMap'][p]['StringPrefix'] == service_prefix):
return self.iam_reference['serviceMap'][p]['Actions']
def return_service_arns(self):
arns=[]
for p in self.iam_reference['serviceMap']:
if ('ARNRegex' in self.iam_reference['serviceMap'][p]):
arns.append({'ARNRegex':self.iam_reference['serviceMap'][p]['ARNRegex'], 'StringPrefix':self.iam_reference['serviceMap'][p]['StringPrefix']})
return arns
def match_action_regex(self, match_action, service_prefix):
matches = []
actions = self.return_service_iam_actions(service_prefix)
for action in actions or []:
if fnmatch.fnmatch(action, match_action):
matches.append(action)
return matches
def match_resource_regex(self, match_resource):
matches = []
arns = self.return_service_arns()
for arn in arns or []:
arn_regex = re.compile(arn['ARNRegex'])
if arn_regex.match(match_resource):
matches.append(arn)
return matches
def get_iam_roles(self):
paginator = self.client.get_paginator('list_roles')
response_iterator = paginator.paginate(
PaginationConfig = {
'PageSize': 1000,
'StartingToken': None})
roles = response_iterator.build_full_result()
self.logger.info("Number of roles: %d",len(roles['Roles']))
return roles['Roles']
def get_role_inline_policies(self, role_name):
return self.client.list_role_policies(
RoleName = role_name
)
def get_role_attached_policies(self, role_name):
return self.client.list_attached_role_policies(
RoleName = role_name
)
def get_policy(self, policy_arn):
return self.client.get_policy(PolicyArn = policy_arn)
def get_policy_version(self,policy_arn, version_id):
return self.client.get_policy_version(PolicyArn = policy_arn, VersionId = version_id)
def get_role_policy(self, rolename, inline_policy_name):
return self.client.get_role_policy(RoleName = rolename, PolicyName = inline_policy_name)
def get_role(self, role_name):
return self.client.get_role(RoleName = role_name)
def parse_statement_action(self,action_tag ,statement_action):
actions = []
if(statement_action == "*"):
self.logger.info("All Actions against all Services")
actions.append({'service':'*' , action_tag:'*'})
else:
self.logger.debug("Statement Action: " + statement_action)
self.logger.debug(action_tag+": " + statement_action.encode("utf-8").decode().split(':')[1])
self.logger.debug("service: " + statement_action.encode("utf-8").decode().split(':')[0])
action_matches = self.match_action_regex(statement_action.encode("utf-8").decode().split(':')[1], statement_action.encode("utf-8").decode().split(':')[0])
for action in action_matches or []:
actions.append({'service' : statement_action.encode("utf-8").decode().split(':')[0], action_tag:action})
self.logger.info("Statement Action: " + statement_action.encode("utf-8").decode().split(':')[0]+" : " + action )
return actions
def parse_statement_resource(self,resource_tag, statement_resource):
resources = []
if(statement_resource == "*"):
self.logger.info("All resources for all Services")
resources.append({'service' : '*' , resource_tag : '*'})
else:
resource_matches = self.match_resource_regex(statement_resource)
for resource in resource_matches:
resources.append({'service' : resource['StringPrefix'], resource_tag : statement_resource})
self.logger.info("Statement Resource: " + resource['StringPrefix'] + " : " + statement_resource)
return resources
def mux(self,action_tag,actions,resource_tag,resources):
#actions structure is: service, action
#resources sturcture is: service, arn
#muxedup structure is: service, action, arn
self.logger.debug("I am muxed up and I received this actions:")
self.logger.debug(str(actions))
self.logger.debug("I am muxed up and I received this resources:")
self.logger.debug(str(resources))
muxedup=[]
for action in actions:
for resource in resources:
if ((action['service'] == resource['service']) or (action['service'] == "*") or (resource['service'] == "*")):
muxedup.append({'service': action['service'], 'action' : action[action_tag], 'arn' : resource[resource_tag]})
return muxedup
def parse_policy(self,policy_document):
# instantiate empty policy array and policy statement array
policy_statement_array = []
parsed_policy = []
# determining if there is a single statement or an array of statements in the policy document
# and appending those statement(s) to policy_statement_array
#
if not isinstance(policy_document['Statement'], list):
policy_statement_array.append(policy_document['Statement'])
else:
policy_statement_array = policy_document['Statement']
# code that parses each policy statement into its components
# and calls parse_statement_action for action/notaction, parse_statement_resource for resource/notresource block
for policy_statement in policy_statement_array:
self.logger.info("Statement Effect: "+policy_statement['Effect'])
actions = []
statement_has_action = 'Action'
# Checking if statement has action or notaction block
if policy_statement.get('Action',False):
statement_has_action = 'Action'
else:
statement_has_action = 'NotAction'
# checking if Action is single item or a list
if not isinstance(policy_statement[statement_has_action], list):
actions=actions + self.parse_statement_action(statement_has_action, policy_statement[statement_has_action])
else:
for statement_action in policy_statement[statement_has_action]:
actions = actions+self.parse_statement_action(statement_has_action, statement_action)
resources=[]
statement_has_resource = 'Resource'
# Checking if statment has resource or notresource block
if policy_statement.get('Resource',False):
statement_has_resource = 'Resource'
else:
statement_has_resource = 'NotResource'
self.logger.debug("Statement Resource: "+str(policy_statement[statement_has_resource]))
if not isinstance(policy_statement[statement_has_resource], list):
resources=resources+self.parse_statement_resource(statement_has_resource, policy_statement[statement_has_resource])
else:
for statement_resource in policy_statement[statement_has_resource]:
resources = resources + self.parse_statement_resource(statement_has_resource, statement_resource)
muxed_up=self.mux(statement_has_action,actions,statement_has_resource,resources)
self.logger.debug("Going to print Muxed up results for: ")
self.logger.debug(str(muxed_up))
parsed_policy.append({'effect' : policy_statement['Effect'], 'action_resources' : muxed_up })
return parsed_policy
def write_out_exhaust(self, role):
#self.logger.info("here is the exhaust",str(exhaust))
#exhaust: data: List<role>
#role: rolename:string,trust:string,parsed_policies: List<policy>
#policy: policyname: string, parsed_statements: List<statement>
#statement: effect: string, action_resources: List<action_resource>
#action_resource: service, action, arn
#Final write out (vsad, somesuffix, trust, policyname, effect, service, action, resource)
#MVP write out (rolename, trust, policyname, effect, service, action, resource)
csv_out = self.csv_out
for policy in role['policies']:
self.logger.info("here is the policy",str(policy))
if policy['type'] == "trust":
for statement in policy['statements']:
csv_out.writerow((role['name'],role['path'],policy['name'],policy['type'], statement['effect'],statement['service'], statement['action'], None ,statement['principal']))
else:
for statement in policy['statements']:
for action_resource in statement['action_resources']:
csv_out.writerow((role['name'], role['path'], policy['name'], policy['type'], statement['effect'], action_resource['service'], action_resource['action'], action_resource['arn'], None))
def get_role_trust(self, roleresponse):
trustlist = []
for Statement in roleresponse['Statement']:
for principal in Statement['Principal'].values():
if isinstance(principal, list):
for subvalue in principal:
trustlist.append({'effect' : Statement['Effect'], 'service' : 'sts', 'action' : 'AssumeRole', 'principal' : subvalue})
else:
trustlist.append({'effect' : Statement['Effect'], 'service' : 'sts', 'action' : 'AssumeRole', 'principal' : principal})
self.logger.info(trustlist)
return {'name': 'trust', 'type' : 'trust', 'statements' : trustlist}
def process_role_attached_policies(self, attached_policies):
parsed_attached_policies = []
for attached_policy in attached_policies:
policyresponse = self.get_policy(attached_policy['PolicyArn'])['Policy']
self.logger.debug(str(policyresponse))
policyversion = self.get_policy_version(attached_policy['PolicyArn'], policyresponse['DefaultVersionId'])['PolicyVersion']
policy_document = policyversion['Document']
self.logger.info("Attached Policy Name: " + attached_policy['PolicyName'])
self.logger.debug(str(policy_document))
parsed_policy = self.parse_policy(policy_document)
parsed_attached_policies.append({'name': attached_policy['PolicyName'], 'type' : 'managed', 'statements' : parsed_policy})
return parsed_attached_policies
def process_role_inline_policies(self, rolename, inline_policies):
parsed_attached_policies = []
for inline_policy_name in inline_policies:
policyresponse = self.get_role_policy(rolename, inline_policy_name)
policy_document = policyresponse['PolicyDocument']
self.logger.info("Inline Policy Name: " + inline_policy_name)
self.logger.debug(str(policy_document))
parsed_policy = self.parse_policy(policy_document)
parsed_attached_policies.append({'name': inline_policy_name, 'type' : 'inline', 'statements' : parsed_policy})
return parsed_attached_policies
def get_role_trust_policies(self, role_name):
parsed_trust_policies=[]
roleresponse= self.get_role(role_name )
self.logger.info("Going to print the trust policy next")
self.logger.debug(str(roleresponse['Role']['AssumeRolePolicyDocument']))
trustresponse = self.get_role_trust(roleresponse['Role']['AssumeRolePolicyDocument'])
self.logger.info("Going to print the trust policy next again")
self.logger.info(trustresponse)
parsed_trust_policies.append(trustresponse)
return parsed_trust_policies
def process_role(self, role):
parsed_policies = []
self.logger.info("\nRole Name: " + role['RoleName'])
#tbd call trust policies processor
trust_policy = self.get_role_trust_policies(role['RoleName'])
self.logger.debug("Going to print Processed Trust Policy")
self.logger.debug(str(trust_policy))
parsed_policies.extend(trust_policy)
inline_policies = self.get_role_inline_policies(role['RoleName'])['PolicyNames']
self.logger.debug("Going to print Raw Inline Policies")
self.logger.debug(str(inline_policies))
processed_inline_policies=self.process_role_inline_policies(role['RoleName'], inline_policies)
self.logger.debug("Going to print Processed Inline Policies")
self.logger.debug(str(processed_inline_policies))
parsed_policies.extend(processed_inline_policies)
attached_policies = self.get_role_attached_policies(role['RoleName'])['AttachedPolicies']
self.logger.debug("Going to print Raw Attached Policies")
self.logger.debug(str(attached_policies))
processed_attached_policies = self.process_role_attached_policies(attached_policies)
self.logger.debug("Going to print Processed Attached Policies")
self.logger.debug(str(processed_attached_policies))
parsed_policies.extend(processed_attached_policies)
return parsed_policies
def harvest_iam_roles_from_account(self):
roles=self.get_iam_roles()
self.logger.info("Number of roles: %d", len(roles))
#bar = ProgressBar('Something')
bar = ChargingBar('Harvesting IAM Roles from '+self.account_tag, max=len(roles),suffix='%(index)d/%(max)d - %(eta)ds')
for role in roles:
parsed_policies = self.process_role(role)
self.write_out_exhaust({'name': role['RoleName'],'path':role['Path'],'policies':parsed_policies})
bar.next()
self.close_file_handler()
bar.finish()
def __init__(self, cli_profile_name, account_tag, output_directory):
# create self.logger, TBD change this to get logging conf based on class name
self.logger = logging.getLogger(__name__)
self.iam_reference = self.read_iam_file()
self.cli_profile_name = cli_profile_name
self.account_tag = account_tag
self.output_directory = output_directory
# Any clients created from this session will use credentials
# from the [dev] section of ~/.aws/credentials.
self.client = boto3.Session(profile_name=cli_profile_name).client('iam')
self.filename = self.output_directory + '/' + account_tag + '_' + cli_profile_name + '_iam_tuples.csv'
self.extract_file = open(self.filename, "w", newline = '')
self.csv_out = csv.writer(self.extract_file)
self.csv_out.writerow(('rolename', 'path', 'policyname', 'policytype', 'effect', 'service', 'action', 'arn', 'principal'))
``` |
{
"source": "johntzwei/bleurt",
"score": 2
} |
#### File: bleurt/bleurt/checkpoint.py
```python
import json
import os
import tensorflow.compat.v1 as tf
flags = tf.flags
logging = tf.logging
FLAGS = flags.FLAGS
CONFIG_FILE = "bleurt_config.json"
WEIGHTS_FILE = os.path.join("variables", "variables")
def get_bleurt_params_from_flags_or_ckpt():
"""Reads BLEURT's parameters from either flags or a json config file."""
logging.info("Reading BLEURT parameters")
if FLAGS.init_bleurt_checkpoint:
logging.info("Reading paramter from BLEURT checkpoint: {}".format(
FLAGS.init_bleurt_checkpoint))
config = read_bleurt_config(FLAGS.init_bleurt_checkpoint)
logging.info("Reads parameters from flags.")
vocab_file = config["vocab_file"]
do_lower_case = config["do_lower_case"]
max_seq_length = config["max_seq_length"]
bert_config_file = config["bert_config_file"]
init_checkpoint = config["tf_checkpoint_variables"]
# The following test gives the user the option to override `max_seq_length`.
# This should only be used during fine-tuning.
if FLAGS.max_seq_length:
logging.warning("Overriding `max_seq_length`. This could have unintended"
" consequences.")
max_seq_length = FLAGS.max_seq_length
else:
logging.info("Reads parameters from flags.")
assert FLAGS.vocab_file, "vocab_file missing"
vocab_file = FLAGS.vocab_file
assert FLAGS.do_lower_case, "do_lower_case missing"
do_lower_case = FLAGS.do_lower_case
assert FLAGS.max_seq_length, "max_seq_length missing"
max_seq_length = FLAGS.max_seq_length
assert FLAGS.bert_config_file, "config_file missing"
bert_config_file = FLAGS.bert_config_file
assert FLAGS.init_checkpoint, "init_checkpoint missing"
init_checkpoint = FLAGS.init_checkpoint
return {
"vocab_file": vocab_file,
"do_lower_case": do_lower_case,
"max_seq_length": max_seq_length,
"bert_config_file": bert_config_file,
"init_checkpoint": init_checkpoint
}
def read_bleurt_config(path):
"""Reads and checks config file from a BLEURT checkpoint."""
assert tf.io.gfile.exists(path), \
"Could not find BLEURT checkpoint {}".format(path)
config_path = os.path.join(path, CONFIG_FILE)
assert tf.io.gfile.exists(config_path), \
("Could not find BLEURT config file {}. Are you sure {}"
" is a valid checkpoint?").format(config_path, path)
logging.info("Config file found, reading.")
with tf.io.gfile.GFile(config_path, "r") as f:
raw_config = f.read()
bleurt_config = json.loads(raw_config)
logging.info("Will load checkpoint {}".format(bleurt_config["name"]))
logging.info("Performs basic checks...")
for k in bleurt_config:
v = bleurt_config[k]
logging.info("... {}:{}".format(k, v))
if not isinstance(v, str):
continue
if k.endswith("_file") or k.endswith("_dir"):
fname = os.path.join(path, bleurt_config[k])
assert tf.io.gfile.exists(fname), "File {} missing.".format(fname)
bleurt_config[k] = fname
bleurt_config["chkpt_dir"] = path
bleurt_config["tf_checkpoint_variables"] = os.path.join(path, WEIGHTS_FILE)
return bleurt_config
def finalize_bleurt_checkpoint(tf_export_path):
"""Makes a BLEURT checkpoint from A TF Estimator export."""
logging.info("Finalizing BLEURT checkpoint.")
assert tf.io.gfile.exists(tf_export_path), "SavedModel export not found!"
bleurt_params = get_bleurt_params_from_flags_or_ckpt()
vocab_file = os.path.join(tf_export_path, "vocab.txt")
tf.io.gfile.copy(bleurt_params["vocab_file"], vocab_file, overwrite=True)
bert_config_file = os.path.join(tf_export_path, "bert_config.json")
tf.io.gfile.copy(
bleurt_params["bert_config_file"], bert_config_file, overwrite=True)
bleurt_config = {
"name": FLAGS.bleurt_checkpoint_name,
"vocab_file": "vocab.txt",
"bert_config_file": "bert_config.json",
"do_lower_case": bleurt_params["do_lower_case"],
"max_seq_length": bleurt_params["max_seq_length"]
}
config_string = json.dumps(bleurt_config)
config_file = os.path.join(tf_export_path, "bleurt_config.json")
with tf.io.gfile.GFile(config_file, "w+") as f:
f.write(config_string)
logging.info("BLEURT checkpoint created.")
``` |
{
"source": "johnuberbacher/Django-Blog",
"score": 2
} |
#### File: Django-Blog/djangoBlogApp/views.py
```python
from django.shortcuts import render, get_object_or_404
from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView
from .models import Post, Category
from .forms import PostForm, UpdatePostForm
from django.urls import reverse_lazy, reverse
from django.http import HttpResponseRedirect
class HomeView(ListView):
model = Post
template_name = 'home.html'
ordering = ['-id']
class ArticleDetailView(DetailView):
model = Post
template_name = 'details.html'
def get_context_data(self, *args, **kwargs):
context = super(ArticleDetailView, self).get_context_data(*args, **kwargs)
getDetailInfo = get_object_or_404(Post, id=self.kwargs['pk'])
total_likes = getDetailInfo.total_likes() #models.py function
context['related_posts'] = Post.objects.all()
context['total_likes'] = total_likes
return context
class AddPostView(CreateView):
model = Post
form_class = PostForm
template_name = 'add_post.html'
#fields = '__all__'
class UpdatePostView(UpdateView):
model = Post
form_class = UpdatePostForm
template_name = 'update_post.html'
class DeletePostView(DeleteView):
model = Post
template_name = 'delete_post.html'
success_url = reverse_lazy('home')
class AddCategoryView(CreateView):
model = Category
template_name = 'add_category.html'
fields = '__all__'
def CategoryView(request, categories):
category_posts = Post.objects.filter(category=categories.replace('-',' '))
return render(request, 'categories.html', {'categories': categories.title().replace('-', ' '), 'category_posts': category_posts})
def LikeView(request, pk):
post = get_object_or_404(Post, id=request.POST.get('post_id'))
post.likes.add(request.user)
return HttpResponseRedirect(reverse('article_detail', args=[str(pk)] ))
``` |
{
"source": "johnugeorge/medperf",
"score": 3
} |
#### File: commands/dataset/dataset.py
```python
import typer
from medperf.commands.dataset import (
DatasetsList,
DatasetRegistration,
DataPreparation,
DatasetBenchmarkAssociation,
)
from medperf.decorators import clean_except
import medperf.config as config
app = typer.Typer()
@app.command("ls")
@clean_except
def datasets(
all: bool = typer.Option(False, help="Get all datasets from the platform")
):
"""Lists all datasets from the user by default.
Use all to get all datasets in the platform
"""
ui = config.ui
comms = config.comms
comms.authenticate()
DatasetsList.run(comms, ui)
@app.command("create")
@clean_except
def create(
benchmark_uid: int = typer.Option(
..., "--benchmark", "-b", help="UID of the desired benchmark"
),
data_path: str = typer.Option(
..., "--data_path", "-d", help="Location of the data to be prepared"
),
labels_path: str = typer.Option(
..., "--labels_path", "-l", help="Labels file location"
),
):
"""Runs the Data preparation step for a specified benchmark and raw dataset
"""
comms = config.comms
ui = config.ui
comms.authenticate()
data_uid = DataPreparation.run(benchmark_uid, data_path, labels_path, comms, ui)
DatasetRegistration.run(data_uid, comms, ui)
DatasetBenchmarkAssociation.run(data_uid, benchmark_uid, comms, ui)
ui.print("✅ Done!")
@app.command("submit")
@clean_except
def register(
data_uid: str = typer.Option(
..., "--data_uid", "-d", help="Unregistered Dataset UID"
)
):
"""Submits an unregistered Dataset instance to the backend
"""
comms = config.comms
ui = config.ui
comms.authenticate()
DatasetRegistration.run(data_uid, comms, ui)
ui.print("✅ Done!")
@app.command("associate")
@clean_except
def associate(
data_uid: str = typer.Option(
..., "--data_uid", "-d", help="Registered Dataset UID"
),
benchmark_uid: int = typer.Option(
..., "-benchmark_uid", "-b", help="Benchmark UID"
),
):
"""Associate a registered dataset with a specific benchmark.
The dataset and benchmark must share the same data preparation cube.
"""
comms = config.comms
ui = config.ui
comms.authenticate()
DatasetBenchmarkAssociation.run(data_uid, benchmark_uid, comms, ui)
ui.print("✅ Done!")
```
#### File: commands/dataset/list.py
```python
from tabulate import tabulate
from medperf.ui import UI
from medperf.comms import Comms
from medperf.entities import Dataset
class DatasetsList:
@staticmethod
def run(comms: Comms, ui: UI, all: bool = False):
"""List all local and remote users created by user.
Use "all" to list all remote datasets in the platform
Args:
comms (Comms): Communications instance
ui (UI): UI instance
all (bool, optional): List all datasets in the platform. Defaults to False.
"""
# Get local and remote datasets
local_dsets = Dataset.all(ui)
if all:
remote_dsets = comms.get_datasets()
else:
remote_dsets = comms.get_user_datasets()
local_uids = set([dset.generated_uid for dset in local_dsets])
remote_uids = set([dset["generated_uid"] for dset in remote_dsets])
# Build data table
headers = ["UID", "Name", "Data Preparation Cube UID", "Registered", "Local"]
# Get local dsets information
local_dsets_data = [
[
dset.generated_uid,
dset.name,
dset.preparation_cube_uid,
dset.uid is not None,
dset.generated_uid in remote_uids,
]
for dset in local_dsets
]
# Get remote dsets information filtered by local
remote_dsets_data = [
[dset["generated_uid"], dset["name"], "-", True, False]
for dset in remote_dsets
if dset["generated_uid"] not in local_uids
]
# Combine dsets
dsets_data = local_dsets_data + remote_dsets_data
tab = tabulate(dsets_data, headers=headers)
ui.print(tab)
```
#### File: commands/dataset/submit.py
```python
from medperf.ui import UI
from medperf.comms import Comms
from medperf.entities import Dataset
from medperf.utils import pretty_error
class DatasetRegistration:
@staticmethod
def run(data_uid: str, comms: Comms, ui: UI):
"""Registers a database to the backend.
Args:
data_uid (str): UID Hint of the unregistered dataset
"""
dset = Dataset(data_uid, ui)
if dset.uid:
pretty_error(
"This dataset has already been registered.", ui, add_instructions=False
)
if dset.request_registration_approval(ui):
ui.print("Uploading...")
dset.upload(comms)
dset.set_registration()
```
#### File: medperf/commands/login.py
```python
import os
import stat
from medperf.ui import UI
from medperf.comms import Comms
import medperf.config as config
from medperf.utils import storage_path
class Login:
@staticmethod
def run(comms: Comms, ui: UI):
"""Login to the medperf server. Must be done only once.
"""
cred_path = storage_path(config.credentials_path)
comms.login(ui)
token = comms.token
if os.path.exists(cred_path):
os.remove(cred_path)
with open(cred_path, "w") as f:
f.write(token)
os.chmod(cred_path, stat.S_IREAD)
```
#### File: commands/mlcube/associate.py
```python
from medperf.ui import UI
from medperf.comms import Comms
class AssociateCube:
@classmethod
def run(cls, cube_uid: str, benchmark_uid: int, comms: Comms, ui: UI):
"""Associates a cube with a given benchmark
Args:
cube_uid (str): UID of model MLCube
benchmark_uid (int): UID of benchmark
comms (Comms): Communication instance
ui (UI): UI instance
"""
with ui.interactive():
ui.text = "Creating association request"
comms.associate_cube(cube_uid, benchmark_uid)
ui.print("Association request created")
```
#### File: medperf/entities/dataset.py
```python
from typing import List
import yaml
import os
import logging
from medperf.ui import UI
from medperf.comms import Comms
import medperf.config as config
from medperf.utils import (
get_dsets,
approval_prompt,
pretty_error,
storage_path,
dict_pretty_print,
)
class Dataset:
"""
Class representing a Dataset
Datasets are stored locally in the Data Owner's machine. They contain
information regarding the prepared dataset, such as name and description,
general statistics and an UID generated by hashing the contents of the
data preparation output.
"""
def __init__(self, data_uid: int, ui: UI):
"""Creates a new dataset instance
Args:
data_uid (int): The dataset UID as found inside ~/medperf/data/
Raises:
NameError: If the dataset with the given UID can't be found, this is thrown.
"""
data_uid = self.__full_uid(data_uid, ui)
self.data_uid = data_uid
self.dataset_path = os.path.join(
storage_path(config.data_storage), str(data_uid)
)
self.data_path = os.path.join(self.dataset_path, "data")
registration = self.get_registration()
self.uid = registration["uid"]
self.name = registration["name"]
self.description = registration["description"]
self.location = registration["location"]
self.preparation_cube_uid = registration["data_preparation_mlcube"]
self.generated_uid = registration["generated_uid"]
self.input_data_hash = registration["input_data_hash"]
self.split_seed = registration["split_seed"]
self.metadata = registration["metadata"]
self.status = registration["status"]
self.state = registration["state"]
@property
def registration(self):
return {
"uid": self.uid,
"name": self.name,
"description": self.description,
"location": self.location,
"data_preparation_mlcube": self.preparation_cube_uid,
"input_data_hash": self.input_data_hash,
"generated_uid": self.generated_uid,
"split_seed": self.split_seed,
"metadata": self.metadata,
"status": self.status,
"state": self.state,
}
@classmethod
def all(cls, ui: UI) -> List["Dataset"]:
"""Gets and creates instances of all the locally prepared datasets
Returns:
List[Dataset]: a list of Dataset instances.
"""
logging.info("Retrieving all datasets")
data_storage = storage_path(config.data_storage)
try:
uids = next(os.walk(data_storage))[1]
except StopIteration:
logging.warning("Couldn't iterate over the dataset directory")
pretty_error("Couldn't iterate over the dataset directory")
tmp_prefix = config.tmp_reg_prefix
dsets = []
for uid in uids:
not_tmp = not uid.startswith(tmp_prefix)
reg_path = os.path.join(data_storage, uid, config.reg_file)
registered = os.path.exists(reg_path)
if not_tmp and registered:
dsets.append(cls(uid, ui))
return dsets
def __full_uid(self, uid_hint: str, ui: UI) -> str:
"""Returns the found UID that starts with the provided UID hint
Args:
uid_hint (int): a small initial portion of an existing local dataset UID
Raises:
NameError: If no dataset is found starting with the given hint, this is thrown.
NameError: If multiple datasets are found starting with the given hint, this is thrown.
Returns:
str: the complete UID
"""
dsets = get_dsets()
match = [uid for uid in dsets if uid.startswith(str(uid_hint))]
if len(match) == 0:
pretty_error(f"No dataset was found with uid hint {uid_hint}.", ui)
elif len(match) > 1:
pretty_error(f"Multiple datasets were found with uid hint {uid_hint}.", ui)
else:
return match[0]
def get_registration(self) -> dict:
"""Retrieves the registration information.
Returns:
dict: registration information as key-value pairs.
"""
regfile = os.path.join(self.dataset_path, config.reg_file)
with open(regfile, "r") as f:
reg = yaml.safe_load(f)
return reg
def set_registration(self):
regfile = os.path.join(self.dataset_path, config.reg_file)
with open(regfile, "w") as f:
yaml.dump(self.registration, f)
def request_association_approval(self, benchmark: "Benchmark", ui: UI) -> bool:
"""Prompts the user for aproval regarding the association of the dataset
with a given benchmark.
Args:
benchmark (Benchmark): Benchmark to be associated with
Returns:
bool: Wether the user approved the association or not
"""
msg = "Please confirm that you would like to associate"
msg += f" the dataset {self.name} with the benchmark {benchmark.name}."
msg += " [Y/n]"
approved = approval_prompt(msg, ui,)
return approved
def request_registration_approval(self, ui: UI) -> bool:
"""Prompts the user for approval concerning uploading the registration to the backend.
Returns:
bool: Wether the user gave consent or not.
"""
if self.status == "APPROVED":
return True
dict_pretty_print(self.registration, ui)
ui.print(
"Above is the information and statistics that will be registered to the database"
)
approved = approval_prompt(
"Do you approve the registration of the presented data to the MLCommons comms? [Y/n] ",
ui,
)
self.status = "APPROVED"
return approved
def upload(self, comms: Comms):
"""Uploads the registration information to the comms.
Args:
comms (Comms): Instance of the comms interface.
"""
dataset_uid = comms.upload_dataset(self.registration)
self.uid = dataset_uid
return self.uid
```
#### File: medperf/entities/result.py
```python
import os
import yaml
import logging
from typing import List
from medperf.ui import UI
from medperf import config
from medperf.comms import Comms
from medperf.utils import storage_path, approval_prompt, dict_pretty_print, results_ids
class Result:
"""
Class representing a Result entry
Results are obtained after successfully running a benchmark
execution flow. They contain information regarding the
components involved in obtaining metrics results, as well as the
results themselves. This class provides methods for working with
benchmark results and how to upload them to the backend.
"""
def __init__(
self, result_path: str, benchmark_uid: str, dataset_uid: str, model_uid: str,
):
"""Creates a new result instance
Args:
result_path (str): Location of the reuslts.yaml file.
benchmark_uid (str): UID of the executed benchmark.
dataset_uid (str): UID of the dataset used.
model_uid (str): UID of the model used.
"""
self.path = result_path
self.benchmark_uid = benchmark_uid
self.dataset_uid = dataset_uid
self.model_uid = model_uid
self.status = "PENDING"
self.results = {}
self.get_results()
self.uid = self.results.get("uid", None)
@classmethod
def all(cls, ui: UI) -> List["Result"]:
"""Gets and creates instances of all the user's results
"""
logging.info("Retrieving all results")
results_ids_tuple = results_ids(ui)
results_storage = storage_path(config.results_storage)
results = []
for result_ids in results_ids_tuple:
b_id, m_id, d_id = result_ids
results_file = os.path.join(
results_storage, b_id, m_id, d_id, config.results_filename
)
results.append(cls(results_file, b_id, d_id, m_id))
return results
def todict(self):
with open(self.path, "r") as f:
results = yaml.safe_load(f)
result_dict = {
"name": f"{self.benchmark_uid}_{self.model_uid}_{self.dataset_uid}",
"results": results,
"metadata": {},
"approval_status": self.status,
"benchmark": self.benchmark_uid,
"model": self.model_uid,
"dataset": self.dataset_uid,
}
return result_dict
def request_approval(self, ui: UI) -> bool:
"""Prompts the user for approval concerning uploading the results to the comms
Returns:
bool: Wether the user gave consent or not
"""
if self.status == "APPROVED":
return True
dict_pretty_print(self.todict(), ui)
ui.print("Above are the results generated by the model")
approved = approval_prompt(
"Do you approve uploading the presented results to the MLCommons comms? [Y/n]",
ui,
)
return approved
def upload(self, comms: Comms):
"""Uploads the results to the comms
Args:
comms (Comms): Instance of the communications interface.
"""
result_uid = comms.upload_results(self.todict())
self.uid = result_uid
self.results["uid"] = result_uid
self.set_results()
def set_results(self):
write_access = os.access(self.path, os.W_OK)
logging.debug(f"file has write access? {write_access}")
if not write_access:
logging.debug("removing outdated and inaccessible results")
os.remove(self.path)
with open(self.path, "w") as f:
yaml.dump(self.results, f)
def get_results(self):
with open(self.path, "r") as f:
self.results = yaml.safe_load(f)
```
#### File: commands/dataset/test_submit.py
```python
import pytest
from medperf.entities import Dataset
from medperf.tests.utils import rand_l
from medperf.commands.dataset import DatasetRegistration
PATCH_REGISTER = "medperf.commands.dataset.submit.{}"
@pytest.fixture
def dataset(mocker):
dset = mocker.create_autospec(spec=Dataset)
mocker.patch(PATCH_REGISTER.format("Dataset"), return_value=dset)
return dset
@pytest.mark.parametrize("data_uid", [str(x) for x in rand_l(1, 5000, 5)])
def test_run_retrieves_specified_dataset(mocker, comms, ui, dataset, data_uid):
# Arrange
dataset.uid = None
spy = mocker.patch(PATCH_REGISTER.format("Dataset"), return_value=dataset)
# Act
DatasetRegistration.run(data_uid, comms, ui)
# Assert
spy.assert_called_once_with(data_uid, ui)
@pytest.mark.parametrize("uid", rand_l(1, 5000, 5))
def test_run_fails_if_dataset_already_registered(mocker, comms, ui, dataset, uid):
# Arrange
dataset.uid = uid
spy = mocker.patch(
PATCH_REGISTER.format("pretty_error"),
side_effect=lambda *args, **kwargs: exit(),
)
# Act
with pytest.raises(SystemExit):
DatasetRegistration.run("1", comms, ui)
# Assert
spy.assert_called_once()
def test_run_passes_if_dataset_has_no_uid(mocker, comms, ui, dataset):
# Arrange
dataset.uid = None
spy = mocker.patch(
PATCH_REGISTER.format("pretty_error"),
side_effect=lambda *args, **kwargs: exit(),
)
# Act
DatasetRegistration.run("1", comms, ui)
# Assert
spy.assert_not_called()
def test_run_requests_approval(mocker, comms, ui, dataset):
# Arrange
dataset.uid = None
spy = mocker.patch.object(
dataset, "request_registration_approval", return_value=True
)
# Act
DatasetRegistration.run("1", comms, ui)
# Assert
spy.assert_called_once()
@pytest.mark.parametrize("approved", [True, False])
class TestWithApproval:
def test_run_uploads_dataset_if_approved(
self, mocker, comms, ui, dataset, approved
):
# Arrange
dataset.uid = None
mocker.patch.object(
dataset, "request_registration_approval", return_value=approved
)
spy = mocker.patch.object(dataset, "upload")
# Act
DatasetRegistration.run("1", comms, ui)
# Assert
if approved:
spy.assert_called_once()
else:
spy.assert_not_called()
def test_run_updates_registration_if_approved(
self, mocker, comms, ui, dataset, approved
):
# Arrange
dataset.uid = None
mocker.patch.object(
dataset, "request_registration_approval", return_value=approved
)
spy = mocker.patch.object(dataset, "set_registration")
# Act
DatasetRegistration.run("1", comms, ui)
# Assert
if approved:
spy.assert_called_once()
else:
spy.assert_not_called()
```
#### File: commands/result/test_create.py
```python
import pytest
from unittest.mock import call
from medperf.tests.utils import rand_l
from medperf.commands.result import BenchmarkExecution
from medperf.entities import Dataset, Benchmark, Cube
PATCH_EXECUTION = "medperf.commands.result.create.{}"
@pytest.fixture
def cube(mocker):
def cube_gen():
cube = mocker.create_autospec(spec=Cube)
cube.uid = 1
return cube
return cube_gen
@pytest.fixture
def execution(mocker, comms, ui, cube):
mock_dset = mocker.create_autospec(spec=Dataset)
mock_bmark = mocker.create_autospec(spec=Benchmark)
mocker.patch(PATCH_EXECUTION.format("init_storage"))
mocker.patch(PATCH_EXECUTION.format("Dataset"), side_effect=mock_dset)
mocker.patch(PATCH_EXECUTION.format("Benchmark"), side_effect=mock_bmark)
exec = BenchmarkExecution(0, 0, 0, comms, ui)
exec.dataset.uid = 1
exec.dataset.data_uid = "data_uid"
exec.dataset.preparation_cube_uid = "prep_cube"
exec.benchmark.data_preparation = "prep_cube"
exec.benchmark.models = [0]
exec.evaluator = cube()
exec.model_cube = cube()
return exec
def test_validate_fails_if_preparation_cube_mismatch(mocker, execution):
# Arrange
execution.dataset.preparation_cube_uid = "dset_prep_cube"
execution.benchmark.data_preparation = "bmark_prep_cube"
spy = mocker.patch(
PATCH_EXECUTION.format("pretty_error"),
side_effect=lambda *args, **kwargs: exit(),
)
# Act
with pytest.raises(SystemExit):
execution.validate()
# Assert
spy.assert_called_once()
@pytest.mark.parametrize("model_uid", rand_l(5, 5000, 5))
def test_validate_fails_if_model_not_in_benchmark(mocker, execution, model_uid):
# Arrange
execution.model_uid = model_uid # model not in benchmark
spy = mocker.patch(
PATCH_EXECUTION.format("pretty_error"),
side_effect=lambda *args, **kwargs: exit(),
)
# Act
with pytest.raises(SystemExit):
execution.validate()
# Assert
spy.assert_called_once()
def test_validate_passes_under_right_conditions(mocker, execution):
# Arrange
spy = mocker.patch(PATCH_EXECUTION.format("pretty_error"))
# Act
execution.validate()
# Assert
spy.assert_not_called()
@pytest.mark.parametrize("evaluator_uid", rand_l(1, 5000, 5))
@pytest.mark.parametrize("model_uid", rand_l(1, 5000, 5))
def test_get_cubes_retrieves_expected_cubes(
mocker, execution, evaluator_uid, model_uid
):
# Arrange
spy = mocker.patch(
PATCH_EXECUTION.format("BenchmarkExecution._BenchmarkExecution__get_cube")
)
execution.benchmark.evaluator = evaluator_uid
execution.model_uid = model_uid
evaluator_call = call(evaluator_uid, "Evaluator")
model_call = call(model_uid, "Model")
calls = [evaluator_call, model_call]
# Act
execution.get_cubes()
# Assert
spy.assert_has_calls(calls)
@pytest.mark.parametrize("cube_uid", rand_l(1, 5000, 5))
@pytest.mark.parametrize("name", [str(x) for x in rand_l(1, 500, 1)])
def test__get_cube_retrieves_cube(mocker, execution, cube_uid, name):
# Arrange
comms = execution.comms
spy = mocker.patch(PATCH_EXECUTION.format("Cube.get"))
mocker.patch(PATCH_EXECUTION.format("check_cube_validity"))
# Act
execution._BenchmarkExecution__get_cube(cube_uid, name)
# Assert
spy.assert_called_once_with(cube_uid, comms)
def test__get_cube_checks_cube_validity(mocker, execution, cube):
# Arrange
ui = execution.ui
mocker.patch(PATCH_EXECUTION.format("Cube.get"), return_value=cube)
spy = mocker.patch(PATCH_EXECUTION.format("check_cube_validity"))
# Act
execution._BenchmarkExecution__get_cube(1, "test")
# Assert
spy.assert_called_once_with(cube, ui)
def test_run_cubes_executes_expected_cube_tasks(mocker, execution):
# Arrange
execution.dataset.data_path = "data_path"
execution.model_cube.cube_path = "cube_path"
model_spy = mocker.patch.object(execution.model_cube, "run")
eval_spy = mocker.patch.object(execution.evaluator, "run")
mocker.patch("os.path.join", return_value="")
mocker.patch(
PATCH_EXECUTION.format("results_path"), return_value="",
)
# Act
execution.run_cubes()
# Assert
assert model_spy.call_count == 1
assert model_spy.call_args_list[0][1]["task"] == "infer"
assert eval_spy.call_count == 1
assert eval_spy.call_args_list[0][1]["task"] == "evaluate"
def test_run_executes_expected_flow(mocker, comms, ui, execution):
# Arrange
val_spy = mocker.patch(PATCH_EXECUTION.format("BenchmarkExecution.validate"))
get_spy = mocker.patch(PATCH_EXECUTION.format("BenchmarkExecution.get_cubes"))
run_spy = mocker.patch(PATCH_EXECUTION.format("BenchmarkExecution.run_cubes"))
mocker.patch(PATCH_EXECUTION.format("cleanup"))
# Act
BenchmarkExecution.run(1, 1, 1, comms, ui)
# Assert
val_spy.assert_called_once()
get_spy.assert_called_once()
run_spy.assert_called_once()
```
#### File: tests/mocks/cube.py
```python
class MockCube:
def __init__(self, is_valid):
self.name = "Test"
self.valid = is_valid
def is_valid(self):
return self.valid
def run(self):
pass
```
#### File: medperf/ui/stdin.py
```python
from contextlib import contextmanager
from .ui import UI
class StdIn(UI):
"""
Class for using sys.stdin/sys.stdout exclusively. Used mainly for automating
execution with class-like objects. Using only basic IO methods ensures that
piping from the command-line. Should not be used in normal execution, as
hidden prompts and interactive prints will not work as expected.
"""
def print(self, msg: str = ""):
return print(msg)
def print_error(self, msg: str):
return self.print(msg)
def start_interactive(self):
pass
def stop_interactive(self):
pass
@contextmanager
def interactive(self):
yield self
@property
def text(self):
return ""
@text.setter
def text(self, msg: str = ""):
return
def prompt(self, msg: str) -> str:
return input(msg)
def hidden_prompt(self, msg: str) -> str:
return self.prompt(msg)
```
#### File: medperf/ui/ui.py
```python
from abc import ABC, abstractmethod
from contextlib import contextmanager
class UI(ABC):
@abstractmethod
def print(self, msg: str = ""):
"""Display a message to the interface. If on interactive session overrides
previous message
"""
@abstractmethod
def print_error(self, msg: str):
"""Display an error message to the interface"""
@abstractmethod
def start_interactive(self):
"""Initialize an interactive session for animations or overriding messages.
If the UI doesn't support this, the function can be left empty.
"""
@abstractmethod
def stop_interactive(self):
"""Terminate an interactive session.
If the UI doesn't support this, the function can be left empty.
"""
@abstractmethod
@contextmanager
def interactive(self):
"""Context managed interactive session. Expected to yield the same instance
"""
@abstractmethod
def text(self, msg: str):
"""Displays a messages that overwrites previous messages if they were created
during an interactive session.
If not supported or not on an interactive session, it is expected to fallback
to the UI print function.
Args:
msg (str): message to display
"""
@abstractmethod
def prompt(msg: str) -> str:
"""Displays a prompt to the user and waits for an answer"""
@abstractmethod
def hidden_prompt(self, msg: str) -> str:
"""Displays a prompt to the user and waits for an aswer. User input is not displayed
Args:
msg (str): message to use for the prompt
Returns:
str: user input
"""
```
#### File: cli/medperf/utils.py
```python
from __future__ import annotations
from pexpect import spawn
import logging
from typing import List, Tuple
from datetime import datetime
import hashlib
import os
from shutil import rmtree
import tarfile
import yaml
from pathlib import Path
from colorama import Fore, Style
import re
import medperf.config as config
from medperf.ui import UI
def storage_path(subpath: str):
"""Helper funciton that converts a path to storage-related path"""
return os.path.join(config.storage, subpath)
def get_file_sha1(path: str) -> str:
"""Calculates the sha1 hash for a given file.
Args:
path (str): Location of the file of interest.
Returns:
str: Calculated hash
"""
BUF_SIZE = 65536
sha1 = hashlib.sha1()
with open(path, "rb") as f:
while True:
data = f.read(BUF_SIZE)
if not data:
break
sha1.update(data)
return sha1.hexdigest()
def init_storage():
"""Builds the general medperf folder structure.
"""
parent = config.storage
data = storage_path(config.data_storage)
cubes = storage_path(config.cubes_storage)
results = storage_path(config.results_storage)
tmp = storage_path(config.tmp_storage)
dirs = [parent, data, cubes, results, tmp]
for dir in dirs:
if not os.path.isdir(dir):
logging.info(f"Creating {dir} directory")
os.mkdir(dir)
def cleanup():
"""Removes clutter and unused files from the medperf folder structure.
"""
if os.path.exists(storage_path(config.tmp_storage)):
logging.info("Removing temporary data storage")
rmtree(storage_path(config.tmp_storage), ignore_errors=True)
dsets = get_dsets()
prefix = config.tmp_reg_prefix
unreg_dsets = [dset for dset in dsets if dset.startswith(prefix)]
for dset in unreg_dsets:
logging.info("Removing unregistered dataset")
dset_path = os.path.join(storage_path(config.data_storage), dset)
if os.path.exists(dset_path):
rmtree(dset_path, ignore_errors=True)
def get_dsets() -> List[str]:
"""Retrieves the UID of all the datasets stored locally.
Returns:
List[str]: UIDs of prepared datasets.
"""
dsets = next(os.walk(storage_path(config.data_storage)))[1]
return dsets
def pretty_error(msg: str, ui: "UI", clean: bool = True, add_instructions=True):
"""Prints an error message with typer protocol and exits the script
Args:
msg (str): Error message to show to the user
clean (bool, optional):
Run the cleanup process before exiting. Defaults to True.
add_instructions (bool, optional):
Show additional instructions to the user. Defualts to True.
"""
logging.warning(
"MedPerf had to stop execution. See logs above for more information"
)
if msg[-1] != ".":
msg = msg + "."
if add_instructions:
msg += f" See logs at {config.log_file} for more information"
ui.print_error(msg)
if clean:
cleanup()
exit(1)
def cube_path(uid: int) -> str:
"""Gets the path for a given cube.
Args:
uid (int): Cube UID.
Returns:
str: Location of the cube folder structure.
"""
return os.path.join(storage_path(config.cubes_storage), str(uid))
def generate_tmp_datapath() -> Tuple[str, str]:
"""Builds a temporary folder for prepared but yet-to-register datasets.
Returns:
str: General temporary folder location
str: Specific data path for the temporary dataset
"""
dt = datetime.utcnow()
ts = str(int(datetime.timestamp(dt)))
tmp = config.tmp_reg_prefix + ts
out_path = os.path.join(storage_path(config.data_storage), tmp)
out_path = os.path.abspath(out_path)
out_datapath = os.path.join(out_path, "data")
if not os.path.isdir(out_datapath):
logging.info(f"Creating temporary dataset path: {out_datapath}")
os.makedirs(out_datapath)
return out_path, out_datapath
def check_cube_validity(cube: "Cube", ui: "UI"):
"""Helper function for pretty printing the cube validity process.
Args:
cube (Cube): Cube to check for validity
ui (UI): Instance of an UI implementation
"""
logging.info(f"Checking cube {cube.name} validity")
ui.text = "Checking cube MD5 hash..."
if not cube.is_valid():
pretty_error("MD5 hash doesn't match")
logging.info(f"Cube {cube.name} is valid")
ui.print(f"> {cube.name} MD5 hash check complete")
def untar_additional(add_filepath: str) -> str:
"""Untars and removes the additional_files.tar.gz file
Args:
add_filepath (str): Path where the additional_files.tar.gz file can be found.
Returns:
str: location where the untared files can be found.
"""
logging.info(f"Uncompressing additional_files.tar.gz at {add_filepath}")
addpath = str(Path(add_filepath).parent)
tar = tarfile.open(add_filepath)
tar.extractall(addpath)
tar.close()
os.remove(add_filepath)
return addpath
def approval_prompt(msg: str, ui: "UI") -> bool:
"""Helper function for prompting the user for things they have to explicitly approve.
Args:
msg (str): What message to ask the user for approval.
Returns:
bool: Wether the user explicitly approved or not.
"""
logging.info("Prompting for user's approval")
approval = None
while approval is None or approval not in "yn":
approval = ui.prompt(msg.strip() + " ").lower()
logging.info(f"User answered approval with {approval}")
return approval == "y"
def dict_pretty_print(in_dict: dict, ui: "UI"):
"""Helper function for distinctively printing dictionaries with yaml format.
Args:
in_dict (dict): dictionary to print
"""
logging.debug(f"Printing dictionary to the user: {in_dict}")
ui.print()
ui.print("=" * 20)
in_dict = {k: v for (k, v) in in_dict.items() if v is not None}
ui.print(yaml.dump(in_dict))
ui.print("=" * 20)
def combine_proc_sp_text(proc: spawn, ui: "UI") -> str:
"""Combines the output of a process and the spinner.
Joins any string captured from the process with the
spinner current text. Any strings ending with any other
character from the subprocess will be returned later.
Args:
proc (spawn): a pexpect spawned child
ui (UI): An instance of an UI implementation
Returns:
str: all non-carriage-return-ending string captured from proc
"""
static_text = ui.text
proc_out = ""
while proc.isalive():
line = byte = proc.read(1)
while byte and not re.match(b"[\r\n]", byte):
byte = proc.read(1)
line += byte
if not byte:
break
line = line.decode("utf-8", "ignore")
if line:
# add to proc_out list for logging
proc_out += line
ui.text = (
f"{static_text} {Fore.WHITE}{Style.DIM}{line.strip()}{Style.RESET_ALL}"
)
return proc_out
def get_folder_sha1(path: str) -> str:
"""Generates a hash for all the contents of the folder. This procedure
hashes all of the files in the folder, sorts them and then hashes that list.
Args:
path (str): Folder to hash
Returns:
str: sha1 hash of the whole folder
"""
hashes = []
for root, _, files in os.walk(path, topdown=False):
for file in files:
filepath = os.path.join(root, file)
hashes.append(get_file_sha1(filepath))
hashes = sorted(hashes)
sha1 = hashlib.sha1()
for hash in hashes:
sha1.update(hash.encode("utf-8"))
return sha1.hexdigest()
def results_path(benchmark_uid, model_uid, data_uid):
out_path = storage_path(config.results_storage)
bmark_uid = str(benchmark_uid)
model_uid = str(model_uid)
data_uid = str(data_uid)
out_path = os.path.join(out_path, bmark_uid, model_uid, data_uid)
out_path = os.path.join(out_path, config.results_filename)
return out_path
def results_ids(ui: UI):
results_storage = storage_path(config.results_storage)
results_ids = []
try:
bmk_uids = next(os.walk(results_storage))[1]
for bmk_uid in bmk_uids:
bmk_storage = os.path.join(results_storage, bmk_uid)
model_uids = next(os.walk(bmk_storage))[1]
for model_uid in model_uids:
bmk_model_storage = os.path.join(bmk_storage, model_uid)
data_uids = next(os.walk(bmk_model_storage))[1]
bmk_model_data_list = [
(bmk_uid, model_uid, data_uid) for data_uid in data_uids
]
results_ids += bmk_model_data_list
except StopIteration:
msg = "Couldn't iterate over the results directory"
logging.warning(msg)
pretty_error(msg, ui)
return results_ids
def setup_logger(logger, log_lvl):
fh = logging.FileHandler(config["log_file"])
fh.setLevel(log_lvl)
logger.addHandler(fh)
def list_files(startpath):
tree_str = ""
for root, dirs, files in os.walk(startpath):
level = root.replace(startpath, "").count(os.sep)
indent = " " * 4 * (level)
tree_str += "{}{}/\n".format(indent, os.path.basename(root))
subindent = " " * 4 * (level + 1)
for f in files:
tree_str += "{}{}\n".format(subindent, f)
return tree_str
```
#### File: chexpert_prep/project/check.py
```python
import pandas as pd
import argparse
import os
import yaml
from utils import get_image_data_df
class Checker:
def __init__(self, data_path, data_file):
self.data_path = data_path
self.data_file = os.path.join(data_path, data_file)
self.df = pd.read_csv(self.data_file)
def run(self):
self.__check_na()
self.__check_images_data()
def __check_na(self):
na_series = self.df.isna().sum()
na_cols = ", ".join(na_series[na_series != 0].index)
assert na_series.sum() == 0, f"Some columns contain null values: {na_cols}"
def __check_images_data(self):
img_data = get_image_data_df(self.df, self.data_path)
assert img_data["width"].min() >= 320, "Image width is less than 320"
assert img_data["height"].min() >= 320, "Image width is less than 320"
assert img_data["min"].min() >= 0, "Image pixel range goes below 0"
assert (
img_data["max"].max() > 1
), "Image pixel is in float format. 8 byte format expected"
assert img_data["max"].max() <= 255, "Image pixel range goes beyond 255"
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_path", "--data-path", type=str, required=True, help="prepared data path"
)
parser.add_argument(
"--params_file",
"--params-file",
type=str,
required=True,
help="Configuration file for the data-preparation step",
)
args = parser.parse_args()
with open(args.params_file, "r") as f:
params = yaml.full_load(f)
checker = Checker(args.data_path, params["output_datafile"])
checker.run()
```
#### File: chexpert_prep/project/utils.py
```python
import os
from PIL import Image
import numpy as np
def get_image_data_df(df, data_path):
img_df = df.apply(
lambda x: get_image_data(x["Path"], data_path), axis=1, result_type="expand"
)
img_df = img_df.rename(
{0: "width", 1: "height", 2: "min", 3: "max", 4: "mean", 5: "std"}, axis=1
)
return img_df
def get_image_data(img_path, data_path):
img_path = os.path.join(data_path, img_path)
with Image.open(img_path) as im:
img = np.array(im)
w, h = img.shape
min_val = img.min()
max_val = img.max()
mean_val = img.mean()
std_val = img.std()
return [w, h, min_val, max_val, mean_val, std_val]
```
#### File: metric/project/metric_utils.py
```python
import os
import matplotlib.pyplot as plt
# import cv2
from PIL import Image
import numpy as np
import nibabel as nib
# !pip install -q torch_snippets pytorch_model_summary
# from torch_snippets import *
from torchvision import transforms
# from sklearn.model_selection import train_test_split
from torch.utils.data import Dataset, DataLoader
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models import vgg16_bn
import time
from typing import List
class CTScanDataset(Dataset):
"""
KAGGLE dataset.
Accredit to https://pytorch.org/tutorials/beginner/data_loading_tutorial.html
"""
def __init__(self, data_dir, transform=None):
"""
Args:
data_dir: data folder directory
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.root_dir = data_dir
self.files = os.listdir(data_dir)
self.transform = transform
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
# file_path = os.path.join(self.root_dir, self.files[idx])
with open(os.path.join(self.root_dir, self.files[idx]), "rb") as f:
data = np.load(f)
feature = data[:-1, :, :].astype("float32")
label = data[-1, :, :].astype("int8")
if self.transform:
sample = self.transform(sample)
return feature, label
class ComboLoss(nn.Module):
def __init__(self, weight=None, size_average=True, alpha=0.5, ce_ratio=0.5):
super(ComboLoss, self).__init__()
self.alpha = alpha
self.ce_ratio = ce_ratio
def forward(self, inputs, targets, smooth=1):
e = 0.0000001
inputs = torch.sigmoid(inputs)
# flatten label and prediction tensors
inputs = inputs.contiguous().view(-1)
targets = targets.contiguous().view(-1)
# True Positives, False Positives & False Negatives
intersection = (inputs * targets).sum()
dice = (2.0 * intersection + smooth) / (inputs.sum() + targets.sum() + smooth)
inputs = torch.clamp(inputs, e, 1.0 - e)
out = -(
self.alpha * (targets * torch.log(inputs))
+ ((1 - self.alpha) * (1.0 - targets) * torch.log(1.0 - inputs))
)
weighted_ce = out.mean(-1)
combo = (self.ce_ratio * weighted_ce) - ((1 - self.ce_ratio) * dice)
return combo
def conv(in_channels, out_channels):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
)
def up_conv(in_channels, out_channels):
return nn.Sequential(
nn.ConvTranspose2d(in_channels, out_channels, kernel_size=2, stride=2),
nn.ReLU(inplace=True),
)
# Calculate DICE score
def dice_coef(y_true, y_pred, smooth=0.0001):
y_true_f = y_true.flatten()
y_pred_f = y_pred.flatten()
# should y_pred be a label?
intersection = torch.sum(y_true_f * y_pred_f)
return (2.0 * intersection + smooth) / (
torch.sum(y_true_f) + torch.sum(y_pred_f) + smooth
)
# Calculate DICE score for multiple labels
def dice_coef_multilabel(y_true, y_pred, numLabels):
dice = 0
dice_classes = []
for index in range(numLabels):
dice_class = dice_coef(y_true[:, index, :], y_pred[:, index, :])
dice += dice_class
dice_classes.append(dice_class)
return dice / numLabels, dice_classes
```
#### File: metric/project/mlcube.py
```python
import os
import yaml
import typer
import subprocess
app = typer.Typer()
def exec_python(cmd: str) -> None:
"""Execute a python script as a subprocess
Args:
cmd (str): command to run as would be written inside the terminal
"""
splitted_cmd = cmd.split()
process = subprocess.Popen(splitted_cmd, cwd=".")
process.wait()
@app.command("evaluate")
def evaluate(
labels: str = typer.Option(..., "--labels"),
predictions: str = typer.Option(..., "--predictions"),
parameters_file: str = typer.Option(..., "--parameters_file"),
output_path: str = typer.Option(..., "--output_path"),
):
labels_csv = labels
preds_csv = predictions
cmd = f"python3 app.py --labels_csv={labels_csv} --preds_csv={preds_csv} --parameters_file={parameters_file} --output_file={output_path}"
exec_python(cmd)
"""
@app.command("infer")
def infer(
labels: str = typer.Option(..., "--labels"),
predictions: str = typer.Option(..., "--predictions"),
parameters_file: str = typer.Option(..., "--parameters_file"),
output_path: str = typer.Option(..., "--output_path"),
):
with open(params_file, "r") as f:
params = yaml.safe_load(f)
#model_file = os.path.join(model_info, "unet_full.pth")
#names_file = os.path.join(data_path, "0024_49.npy")
#names_file = os.path.join(data_path)
#uppercase = params["uppercase"]
# data, additional files, output
#cmd = f"python3 app.py --data_p = {data_path} --model_info={model_file} --out={out_path}"
cmd = f"python3 app.py --labels_csv={labels_csv} --preds_csv={preds_csv} --parameters_file={parameters_file} --output_file={output_path}"
exec_python(cmd)
#if uppercase:
# cmd += f" --uppercase={uppercase}"
print("exec cmd")
exec_python(cmd)
"""
@app.command("hotfix")
def hotfix():
pass
if __name__ == "__main__":
print("Starting app")
app()
```
#### File: server/benchmark/admin.py
```python
from django.contrib import admin
from .models import Benchmark
class BenchmarkAdmin(admin.ModelAdmin):
list_display = (
"name",
"description",
"docs_url",
"owner",
"is_valid",
"is_active",
"metadata",
"user_metadata",
"demo_dataset_tarball_url",
"demo_dataset_tarball_hash",
"demo_dataset_generated_uid",
"data_preparation_mlcube",
"reference_model_mlcube",
"data_evaluator_mlcube",
"dataset_list",
"model_list",
"approved_at",
"approval_status",
"created_at",
"modified_at",
)
def dataset_list(self, obj):
return ",".join([gp.dataset.name for gp in obj.benchmarkdataset_set.all()])
dataset_list.short_description = "Registered Datasets"
def model_list(self, obj):
return ",".join([gp.model_mlcube.name for gp in obj.benchmarkmodel_set.all()])
model_list.short_description = "Registered Models"
admin.site.register(Benchmark, BenchmarkAdmin)
```
#### File: server/benchmarkdataset/views.py
```python
from .models import BenchmarkDataset
from django.http import Http404
from rest_framework.generics import GenericAPIView
from rest_framework.response import Response
from rest_framework import status
from .permissions import IsAdmin, IsDatasetOwner, IsBenchmarkOwner
from .serializers import (
BenchmarkDatasetListSerializer,
DatasetApprovalSerializer,
)
class BenchmarkDatasetList(GenericAPIView):
permission_classes = [IsAdmin | IsBenchmarkOwner | IsDatasetOwner]
serializer_class = BenchmarkDatasetListSerializer
queryset = ""
def post(self, request, format=None):
"""
Associate a dataset to a benchmark
"""
serializer = BenchmarkDatasetListSerializer(
data=request.data, context={"request": request}
)
if serializer.is_valid():
serializer.save(initiated_by=request.user)
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class BenchmarkDatasetApproval(GenericAPIView):
serializer_class = BenchmarkDatasetListSerializer
queryset = ""
def get_object(self, pk):
try:
return BenchmarkDataset.objects.filter(dataset__id=pk)
except BenchmarkDataset.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
"""
Retrieve all benchmarks associated with a dataset
"""
benchmarkdataset = self.get_object(pk)
serializer = BenchmarkDatasetListSerializer(benchmarkdataset, many=True)
return Response(serializer.data)
class DatasetApproval(GenericAPIView):
permission_classes = [IsAdmin | IsBenchmarkOwner | IsDatasetOwner]
serializer_class = DatasetApprovalSerializer
queryset = ""
def get_object(self, dataset_id, benchmark_id):
try:
return BenchmarkDataset.objects.filter(
dataset__id=dataset_id, benchmark__id=benchmark_id
)
except BenchmarkDataset.DoesNotExist:
raise Http404
def get(self, request, pk, bid, format=None):
"""
Retrieve approval status of benchmark dataset associations
"""
benchmarkdataset = self.get_object(pk, bid)
serializer = DatasetApprovalSerializer(benchmarkdataset, many=True)
return Response(serializer.data)
def put(self, request, pk, bid, format=None):
"""
Update approval status of the last benchmark dataset association
"""
benchmarkdataset = self.get_object(pk, bid).order_by("-created_at").first()
serializer = DatasetApprovalSerializer(
benchmarkdataset, data=request.data, context={"request": request}
)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, bid, format=None):
"""
Delete a benchmark dataset association
"""
benchmarkdataset = self.get_object(pk, bid)
benchmarkdataset.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
```
#### File: server/benchmark/serializers.py
```python
from rest_framework import serializers
from django.utils import timezone
from .models import Benchmark
class BenchmarkSerializer(serializers.ModelSerializer):
class Meta:
model = Benchmark
fields = "__all__"
read_only_fields = ["owner", "approved_at", "approval_status"]
def validate(self, data):
owner = self.context["request"].user
pending_benchmarks = Benchmark.objects.filter(
owner=owner, approval_status="PENDING"
)
if len(pending_benchmarks) > 0:
raise serializers.ValidationError(
"User can own at most one pending benchmark"
)
return data
class BenchmarkApprovalSerializer(serializers.ModelSerializer):
class Meta:
model = Benchmark
read_only_fields = ["owner", "approved_at"]
fields = "__all__"
def update(self, instance, validated_data):
for k, v in validated_data.items():
setattr(instance, k, v)
if instance.approval_status != "PENDING":
instance.approved_at = timezone.now()
instance.save()
return instance
def validate(self, data):
owner = self.instance.owner
if "approval_status" in data:
if (
data["approval_status"] == "PENDING"
and self.instance.approval_status != "PENDING"
):
pending_benchmarks = Benchmark.objects.filter(
owner=owner, approval_status="PENDING"
)
if len(pending_benchmarks) > 0:
raise serializers.ValidationError(
"User can own at most one pending benchmark"
)
if (
data["approval_status"] != "PENDING"
and self.instance.state == "DEVELOPMENT"
):
raise serializers.ValidationError(
"User cannot approve or reject when benchmark is in development stage"
)
if self.instance.state == "OPERATION":
editable_fields = [
"is_valid",
"is_active",
"user_metadata",
"approval_status",
]
for k, v in data.items():
if k not in editable_fields:
if v != getattr(self.instance, k):
raise serializers.ValidationError(
"User cannot update non editable fields in Operation mode"
)
return data
```
#### File: server/dataset/models.py
```python
from django.db import models
from django.contrib.auth.models import User
class Dataset(models.Model):
DATASET_STATE = (
("DEVELOPMENT", "DEVELOPMENT"),
("OPERATION", "OPERATION"),
)
name = models.CharField(max_length=20)
description = models.CharField(max_length=20, blank=True)
location = models.CharField(max_length=100, blank=True)
owner = models.ForeignKey(User, on_delete=models.PROTECT)
input_data_hash = models.CharField(max_length=128)
generated_uid = models.CharField(max_length=128, unique=True)
split_seed = models.IntegerField()
data_preparation_mlcube = models.ForeignKey(
"mlcube.MlCube",
on_delete=models.PROTECT,
related_name="benchmark_preprocessor_mlcube",
)
is_valid = models.BooleanField(default=True)
state = models.CharField(
choices=DATASET_STATE, max_length=100, default="DEVELOPMENT"
)
generated_metadata = models.JSONField(default=dict, blank=True, null=True)
user_metadata = models.JSONField(default=dict, blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
class Meta:
ordering = ["modified_at"]
```
#### File: server/dataset/serializers.py
```python
from rest_framework import serializers
from .models import Dataset
class DatasetSerializer(serializers.ModelSerializer):
class Meta:
model = Dataset
fields = "__all__"
read_only_fields = ["owner"]
class DatasetDetailSerializer(serializers.ModelSerializer):
class Meta:
model = Dataset
fields = "__all__"
read_only_fields = ["owner"]
def validate(self, data):
if self.instance.state == "OPERATION":
editable_fields = ["is_valid", "user_metadata"]
for k, v in data.items():
if k not in editable_fields:
if v != getattr(self.instance, k):
raise serializers.ValidationError(
"User cannot update non editable fields in Operation mode"
)
return data
```
#### File: server/mlcube/models.py
```python
from django.db import models
from django.contrib.auth.models import User
class MlCube(models.Model):
MLCUBE_STATE = (
("DEVELOPMENT", "DEVELOPMENT"),
("OPERATION", "OPERATION"),
)
name = models.CharField(max_length=20, unique=True)
git_mlcube_url = models.CharField(max_length=256)
git_parameters_url = models.CharField(max_length=256)
tarball_url = models.CharField(max_length=256, blank=True)
tarball_hash = models.CharField(max_length=100, blank=True)
owner = models.ForeignKey(User, on_delete=models.PROTECT)
state = models.CharField(
choices=MLCUBE_STATE, max_length=100, default="DEVELOPMENT"
)
is_valid = models.BooleanField(default=True)
metadata = models.JSONField(default=dict, blank=True, null=True)
user_metadata = models.JSONField(default=dict, blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = "MlCubes"
ordering = ["modified_at"]
```
#### File: server/mlcube/views.py
```python
from django.http import Http404
from rest_framework.generics import GenericAPIView
from rest_framework.response import Response
from rest_framework import status
from .models import MlCube
from .serializers import MlCubeSerializer, MlCubeDetailSerializer
from .permissions import IsAdmin, IsMlCubeOwner
class MlCubeList(GenericAPIView):
serializer_class = MlCubeSerializer
queryset = ""
def get(self, request, format=None):
"""
List all mlcubes
"""
mlcubes = MlCube.objects.all()
serializer = MlCubeSerializer(mlcubes, many=True)
return Response(serializer.data)
def post(self, request, format=None):
"""
Creates a new mlcube
"""
serializer = MlCubeSerializer(data=request.data)
if serializer.is_valid():
serializer.save(owner=request.user)
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class MlCubeDetail(GenericAPIView):
serializer_class = MlCubeDetailSerializer
queryset = ""
def get_permissions(self):
if self.request.method == "PUT":
self.permission_classes = [IsAdmin | IsMlCubeOwner]
elif self.request.method == "DELETE":
self.permission_classes = [IsAdmin]
return super(self.__class__, self).get_permissions()
def get_object(self, pk):
try:
return MlCube.objects.get(pk=pk)
except MlCube.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
"""
Retrieve a mlcube instance.
"""
mlcube = self.get_object(pk)
serializer = MlCubeDetailSerializer(mlcube)
return Response(serializer.data)
def put(self, request, pk, format=None):
"""
Update a mlcube instance.
"""
mlcube = self.get_object(pk)
serializer = MlCubeDetailSerializer(mlcube, data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
"""
Delete a mlcube instance.
"""
mlcube = self.get_object(pk)
mlcube.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
```
#### File: server/user/views.py
```python
from django.http import Http404
from rest_framework.generics import GenericAPIView
from rest_framework.response import Response
from rest_framework import status
from django.contrib.auth.models import User
from .serializers import UserSerializer
from .permissions import IsAdmin, IsOwnUser
class UserList(GenericAPIView):
permission_classes = [IsAdmin]
serializer_class = UserSerializer
queryset = ""
def get(self, request, format=None):
"""
List all users
"""
users = User.objects.all()
serializer = UserSerializer(users, many=True)
return Response(serializer.data)
def post(self, request, format=None):
"""
Creates a new user
"""
serializer = UserSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class UserDetail(GenericAPIView):
serializer_class = UserSerializer
queryset = ""
def get_permissions(self):
if self.request.method == "PUT" or self.request.method == "GET":
self.permission_classes = [IsAdmin | IsOwnUser]
elif self.request.method == "DELETE":
self.permission_classes = [IsAdmin]
return super(self.__class__, self).get_permissions()
def get_object(self, pk):
try:
return User.objects.get(pk=pk)
except User.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
"""
Retrieve a user instance.
"""
user = self.get_object(pk)
serializer = UserSerializer(user)
return Response(serializer.data)
def put(self, request, pk, format=None):
"""
Update a user instance.
"""
user = self.get_object(pk)
serializer = UserSerializer(user, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
"""
Delete a user instance.
"""
user = self.get_object(pk)
user.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
``` |
{
"source": "johnugeorge/mlcube",
"score": 2
} |
#### File: mlcube_singularity/mlcube_singularity/singularity_run.py
```python
import os
import logging
import typing as t
from omegaconf import DictConfig, OmegaConf
from mlcube.shell import Shell
from mlcube.runner import (Runner, RunnerConfig)
__all__ = ['Config', 'SingularityRun']
from mlcube.validate import Validate
logger = logging.getLogger(__name__)
class Config(RunnerConfig):
""" Helper class to manage `singularity` environment configuration."""
DEFAULT = OmegaConf.create({
'runner': 'singularity',
'image': '${singularity.image}',
'image_dir': '${runtime.workspace}/.image',
'singularity': 'singularity',
'build_args': '--fakeroot',
'build_file': 'Singularity.recipe'
})
@staticmethod
def merge(mlcube: DictConfig) -> None:
mlcube.runner = OmegaConf.merge(mlcube.runner, mlcube.get('singularity', OmegaConf.create({})))
@staticmethod
def validate(mlcube: DictConfig) -> None:
validator = Validate(mlcube.runner, 'runner')
validator.check_unknown_keys(Config.DEFAULT.keys())\
.check_values(['image', 'image_dir', 'singularity'], str, blanks=False)
class SingularityRun(Runner):
CONFIG = Config
def __init__(self, mlcube: t.Union[DictConfig, t.Dict], task: t.Text) -> None:
super().__init__(mlcube, task)
def configure(self) -> None:
"""Build Singularity Image on a current host."""
s_cfg: DictConfig = self.mlcube.runner
# Get full path to a singularity image. By design, we compute it relative to {mlcube.root}/workspace.
image_uri: t.Text = os.path.join(s_cfg.image_dir, s_cfg.image)
if os.path.exists(image_uri):
logger.info("Image found (%s).", image_uri)
return
# Make sure a directory to store image exists. If paths are like "/opt/...", the call may fail.
os.makedirs(os.path.dirname(image_uri), exist_ok=True)
# Let's assume build context is the root MLCube directory
recipe_path: t.Text = self.mlcube.runtime.root
recipe_file: t.Text = os.path.join(recipe_path, s_cfg.build_file)
if not os.path.exists(recipe_file):
raise IOError(f"Singularity recipe not found: {recipe_file}")
Shell.run(
'cd', recipe_path, ';',
s_cfg.singularity, 'build', s_cfg.build_args, image_uri, s_cfg.build_file
)
def run(self) -> None:
""" """
image_uri: t.Text = os.path.join(self.mlcube.runner.image_dir, self.mlcube.runner.image)
if not os.path.exists(image_uri):
self.configure()
# Deal with user-provided workspace
Shell.sync_workspace(self.mlcube, self.task)
mounts, task_args = Shell.generate_mounts_and_args(self.mlcube, self.task)
logger.info(f"mounts={mounts}, task_args={task_args}")
volumes = Shell.to_cli_args(mounts, sep=':', parent_arg='--bind')
Shell.run(self.mlcube.runner.singularity, 'run', volumes, image_uri, ' '.join(task_args))
``` |
{
"source": "johnurbanik/ABM-SEIR",
"score": 3
} |
#### File: agentseir_vi/api_client/client.py
```python
import json
import requests
class APIClient:
"""
Simple API client for the Covid19 Tracking project API (https://covidtracking.com/data/api).
TODO: Incorporate pandas for JSON/CSV parsing so that data comes back in
a more standard, structured, usable format without all the extra columns.
For now, example of such code is available in notebooks/initialInvestigation
TODO: Extend this to query for specific states.
"""
ROUTES = {
'states': {'url': '/states', 'filter': 'state'},
'states_info': {'url': '/states/info', 'filter': 'state'},
'us': {'url': '/us'},
}
def __init__(self):
self._cache = {}
def clear(self):
"""Class instances cache all data indefinitely, call this to clear"""
self._cache = {}
def states(self, *states):
return self._get('states', states)
def states_info(self, *states):
return self._get('states_info', states)
def us(self):
return self._get('us')
def _get(self, key, filter_by=None):
route = self.ROUTES[key]
url = "https://covidtracking.com/api%s" % rout['url']
if key not in self._cache:
self._cache[key] = requests.get(url).json()
if filter_by:
fKey = route['filter']
return list(filter(lambda x: x[fKey] in filter_by, self._cache[key]))
else:
return self._cache[key]
```
#### File: agentseir_vi/model/base.py
```python
import json
import mesa
from mesa import Agent, Model
from mesa.time import BaseScheduler
from mesa.datacollection import DataCollector
import jax.numpy as np
import jax.random as jrand
import numpyro as ny
import numpyro.distributions as dist
from agentseir_vi.model.utils import log_to_file
class Epidemic(BaseScheduler):
'''
A scheduler that steps through time for the agents of a model.
As opposed to modeling individual agents, as in standard mesa models, agents and their properties are
kept in a set of numpy arrays. This allows for use of numpyro and drastically improves the performance.
Thus, a scheduler over a model is used, and agent actions are controlled directly.
'''
def __init__(self, model):
# For now, just use super.
# If we want to change how time is stepped through (i.e. different phases)
# this would be where to do it.
self.PRNGKey = jrand.PRNGKey(0) # TODO: Better seed
self.params = self.model.params
super().__init__(model)
def step(self):
self.update_agent_states() # Update the counts of the various states
self.update_counts()
if self.model.log_file:
self.model.write_logs() # Write some logs, if enabled
# NB: At some point, it may make sense to make agents move. For now, they're static.
############################# Transmission logic ###################################
# Get all the currently contagious agents, and have them infect new agents.
# TODO: include hospital transmission, vary transmissability by state.
contagious = np.asarray(
(self.model.epidemic_state == self.model.STATE_PRESYMPTOMATIC) |
(self.model.epidemic_state == self.model.STATE_ASYMPTOMATIC) |
(self.model.epidemic_state == self.model.STATE_SYMPTOMATIC)
).nonzero()
# For each contagious person, infect some of its neighbors based on their hygiene and the contagious person's social radius.
# Use jax.random instead of numpyro here to keep these deterministic.
# TODO: figure out a way to do this in a (more) vectorized manner. Probably some sort of kernel convolution method with each radius. Should also look into numpyro's scan.
for x,y in zip(*contagious):
radius = self.model.social_radius[x, y]
base_isolation = self.model.base_isolation[x, y]
nx, ny = np.meshgrid( # pylint: disable=unbalanced-tuple-unpacking
np.arange(x - radius, x + radius),
np.arange(y - radius, y + radius)
)
neighbor_inds = np.vstack([nx.ravel(), ny.ravel()])
# Higher base_isolation leads to less infection.
# TODO: modify isolation so that symptomatic agents isolate more.
infection_attempts = jrand.choice(
self.PRNGKey,
neighbor_inds,
shape=int(len(neighbor_inds) * (1 - base_isolation))
)
potentially_infected_hygiene = self.model.hygiene[infection_attempts[:, 0], infection_attempts[:, 1]]
susceptible = self.model.epidemic_state[infection_attempts[:, 0], infection_attempts[:, 1]] == self.model.STATE_SUSCEPTIBLE
indexer = jrand.bernoulli(
self.PRNGKey,
potentially_infected_hygiene.ravel(),
len(infection_attempts)
)
got_infected = np.zeros(self.model.epidemic_state.shape, dtype=np.bool_)
got_infected[potentially_infected_hygiene[indexer]] = True
# Set the date to become infectious
self.model.epidemic_state[got_infected & susceptible] = self.model.STATE_EXPOSED
self.model.date_infected[got_infected & susceptible] = self.time
self.model.date_contagious[got_infected & susceptible] = self.time + self.params.EXPOSED_PERIOD
def update_agent_states(self):
# Get all of the newly contagious agents, swap them to being contagious
newly_contagious = np.asarray(
(self.model.date_contagious <= self.time) &
(self.model.epidemic_state < self.model.STATE_PRESYMPTOMATIC)
).nonzero()
self.model.epidemic_state[newly_contagious] = self.model.STATE_PRESYMPTOMATIC
# Also set the time in which they will become symptomatic, recover, die, etc.
# This could also be done at transmission time.
self.model.date_symptomatic[newly_contagious] = self.time + self.params.INCUBATION_PERIOD
self.model.date_recovered[newly_contagious] = self.time + self.params.RECOVERY_PERIOD
self.model.date_hospitalized[newly_contagious] = self.time + self.params.INCUBATION_PERIOD + self.params.SYMPTOM_TO_HOSP_PERIOD
self.model.date_died[newly_contagious] = self.time + self.params.RECOVERY_PERIOD + self.params.SYMPTOM_TO_HOSP_PERIOD + self.params.HOSP_DEATH_PERIOD
# Progress presymptomatic to asymptomatic/symptomatic
newly_asymptomatic = np.asarray(
(self.model.epidemic_state == self.model.STATE_PRESYMPTOMATIC) &
(self.model.date_symptomatic <= self.time) &
~(self.params.SYMPTOMATIC)
).nonzero()
self.model.epidemic_state[newly_asymptomatic] = self.model.STATE_ASYMPTOMATIC
newly_symptomatic = np.asarray(
(self.model.epidemic_state == self.model.STATE_PRESYMPTOMATIC) &
(self.model.date_symptomatic <= self.time) &
~(self.params.SYMPTOMATIC)
).nonzero()
self.model.epidemic_state[newly_symptomatic] = self.model.STATE_SYMPTOMATIC
# Progress symptomatic to hospital
newly_hospitalized = np.asarray(
(self.model.will_be_hospitalized) &
(self.model.date_hospitalied <= self.time) &
(self.model.epidemic_state == self.model.STATE_SYMPTOMATIC)
).nonzero()
self.model.epidemic_state[newly_hospitalized] = self.model.STATE_HOSPITALIZED
# Progress hospitalized to death
newly_dead = np.asarray(
(self.model.epidemic_state == self.model.STATE_HOSPITALIZED) &
(self.model.will_die) &
(self.model.date_died <= self.time)
).nonzero()
self.model.epidemic_state[newly_dead] = self.model.STATE_DEAD
# Progress recovered to recovered if they won't die
newly_recovered = np.asarray(
~(self.model.will_die) &
(self.model.date_recovered <= self.time) &
(self.model.epidemic_state < self.model.STATE_RECOVERED) # Small optimization?
).nonzero()
self.model.epidemic_state[newly_recovered] = self.model.STATE_RECOVERED
def update_counts(self):
self.model.susceptible_count[self.time] = (self.model.epidemic_state == self.model.STATE_SUSCEPTIBLE).sum()
self.model.exposed_count[self.time] = (self.model.epidemic_state == self.model.STATE_EXPOSED).sum()
self.model.presymptomatic_count[self.time] = (self.model.epidemic_state == self.model.STATE_PRESYMPTOMATIC).sum()
self.model.asymptomatic_count[self.time] = (self.model.epidemic_state == self.model.STATE_ASYMPTOMATIC).sum()
self.model.symptomatic_count[self.time] = (self.model.epidemic_state == self.model.STATE_SYMPTOMATIC).sum()
self.model.hospitalized_count[self.time] = (self.model.epidemic_state == self.model.STATE_HOSPITALIZED).sum()
self.model.infected_count[self.time] = self.model.presymptomatic_count[self.time] + self.model.asymptomatic_count[self.time] + self.model.symptomatic_count[self.time] + self.model.hospitalized_count[self.time]
self.model.recovered_count[self.time] = (self.model.epidemic_state == self.model.STATE_RECOVERED).sum()
self.model.died_count[self.time] = (self.model.epidemic_state == self.model.STATE_DEAD).sum()
class Population(Model):
'''
A population is a square grid of agents.
For now, they stay in the same place, and infect other agents around them
based on their risk tolerance, social radius, and their neighbors hygiene.
Additionally, agents modify their social radius based on sampling their neighborhood and their base_isolation.
'''
STATE_SUSCEPTIBLE = 0
STATE_EXPOSED = 1
STATE_PRESYMPTOMATIC = 2
STATE_SYMPTOMATIC = 3
STATE_ASYMPTOMATIC = 4
STATE_HOSPITALIZED = 5
STATE_DEAD = 6
STATE_RECOVERED = 7
def __init__(self, N, params, log_file=None):
'''
params: class or dict containing the global parameters for the model.
'''
self.log_file = log_file
self.params = params
self.num_steps = N
# Agents
# self.people = None # ID array for easy reference
self.age = None # Float 0-100
self.sex = None # Boolean, since we don't have good data for intersex people
self.risk_tolerance = None # Float, 0-1
self.risk_factors = None # Integer 0-5, number of co-morbidities
self.hygiene = None # Float, 0-1
'''
0 -> can stay home
1 -> private commuter
2 -> public transit commuter
3 -> essential worker
'''
self.worker_class = None
'''
0-> susceptible, 1-> exposed, 2-> presymptomatic,
3 -> symptomatic, 4-> asymptomatic, 5 -> hospitalized, 6 -> dead, 7 -> recovered
'''
self.epidemic_state = None
# self.location = None # Position on grid
self.social_radius = None # Int 0-10, how far out grid do they socialize with
self.base_isolation = None # How much attention they are paying to growth of epidemic locally
# Dates are all ints for the step number
self.date_infected = None
self.date_contagious = None
self.date_recovered = None
self.date_hospitalized = None
self.date_died = None
# Global params:
# self.lockdown_level = 0.0 # Float 0-1
# Counters
self.susceptible_count = np.zeros(shape=self.num_steps, dtype=np.int_)
self.exposed_count = np.zeros(shape=self.num_steps, dtype=np.int_)
self.presymptomatic_count = np.zeros(shape=self.num_steps, dtype=np.int_)
self.asymptomatic_count = np.zeros(shape=self.num_steps, dtype=np.int_)
self.symptomatic_count = np.zeros(shape=self.num_steps, dtype=np.int_)
self.hospitalized_count = np.zeros(shape=self.num_steps, dtype=np.int_)
self.infected_count = np.zeros(shape=self.num_steps, dtype=np.int_)
self.recovered_count = np.zeros(shape=self.num_steps, dtype=np.int_)
self.died_count = np.zeros(shape=self.num_steps, dtype=np.int_)
self.scheduler = Epidemic(self)
def init_agents(self, num_steps=1000, pop_size=(1e2, 1e2), initial_infections=2):
self.age = ny.sample('age', dist.Uniform(0, 100))
self.sex = ny.sample('sex', dist.Binomial(1, .5))
self.risk_tolerance = ny.sample('risk', dist.Beta(2, 5))
# self.risk_factors = ny.sample('health', dist.Binomial(5, .3))
self.hygiene = ny.sample('hygiene', dist.Beta(2, 5))
# self.worker_type = ny.sample('worker_type', dist.Categorical((.6, .1, .2, .1)))
self.epidemic_state = ny.sample('state', dist.Binomial(1, initial_infections/pop_size[0]*pop_size[1]))
self.social_radius = ny.sample('radius', dist.Binomial(10, .2))
self.base_isolation = ny.sample('base_isolation', dist.Beta(2, 2))
# TODO: make these depend on risk factors as well
self.will_be_hospitalized = ny.sample('hosp', dist.Binomial(1, self.params.HOSP_AGE_MAP[self.age]))
self.will_die = ny.sample('die', dist.Binomial(1, self.params.DEATH_MAP[self.age]))
# The lengths of the infection are handled on a per agent basis via scenarios, these are just placeholders.
self.date_infected = np.where(self.epidemic_state > 0, np.zeros(shape=pop_size), np.full(shape=pop_size, fill_value=np.inf))
self.date_contagious = np.where(self.epidemic_state > 0, np.ceil(self.params.EXPOSED_PERIOD), np.full(shape=pop_size, fill_value=np.inf))
self.date_symptomatic = np.full(shape=pop_size, fill_value=np.inf)
self.date_recovered = np.full(shape=pop_size, fill_value=np.inf)
self.date_hospitalized = np.full(shape=pop_size, fill_value=np.inf)
self.date_died = np.full(shape=pop_size, fill_value=np.inf)
def step(self):
self.scheduler.step()
def write_logs(self):
current_time = self.scheduler.time
data = dict(
current_susceptible=int(self.susceptible_count),
current_exposed=int(self.exposed_count),
current_presymptomtic=int(self.presymptomatic_count),
current_asymptomatic=int(self.asymptomatic_count),
current_symptomatic=int(self.symptomatic_count),
current_hospitalized=int(self.hospitalized_count),
current_infected=int(self.infected_count),
current_recovered=int(self.recovered_count),
current_dead=int(self.died_count),
total_contagious_count=int((self.date_contagious < current_time).sum()),
total_infected_count=int((self.date_infected < current_time).sum()),
total_hospitalized_count=int((self.date_hospitalized < current_time).sum()),
)
info = json.dumps(data)
log_to_file(self.log_file, info)
``` |
{
"source": "johnurbanik/prospector",
"score": 3
} |
#### File: prospector/pages/onboarding.py
```python
import streamlit as st
import pandas as pd
def onboarding(state):
st.title("20 Questions, Belief Distribution Edition")
st.write("---")
prompt = st.text_input("What is your Question?", state.q.prompt or "")
state.q.set_prompt(prompt) # For Validation
support_options = ['(-inf,inf)', '[a, inf)', '[a,b]']
support = st.selectbox("What type of support does your question have?", support_options, support_options.index(state.q.get_support_type()))
dtype_options = ['Numeric', 'Date']
state.q.dtype = st.selectbox("Are the values of your forecast domain dates or numbers?", dtype_options, dtype_options.index(state.q.dtype))
if support == support_options[0]:
neg_inf = True
bot_label = "What is the value you expect almost all results to be above"
else:
neg_inf = False
bot_label = "What is the minimum value that your question may occupy?"
if state.q.dtype == 'Numeric':
bottom = st.number_input(bot_label, value=0)
else:
bottom = pd.to_datetime(
st.date_input(
bot_label,
max_value=(pd.Timestamp.today() + pd.DateOffset(years=100)).to_pydatetime()
)
)
if support_options.index(support) in [0, 1]:
pos_inf = True
top_label = "What is the value you expect almost all results to be below"
else:
pos_inf = False
top_label = "What is the maximum value that your question may occupy?"
if state.q.dtype == 'Numeric':
top = st.number_input(top_label, value=100)
else:
top = pd.to_datetime(
st.date_input(
top_label,
value=(pd.Timestamp.today() + pd.DateOffset(years=10)).to_pydatetime(),
max_value=(pd.Timestamp.today() + pd.DateOffset(years=100)).to_pydatetime()
)
)
state.q.set_domain(bottom, top, neg_inf, pos_inf)
if st.button("Submit"):
state.page = "Dashboard"
state.q.initialize_program()
```
#### File: prospector/utilities/engine.py
```python
import streamlit as st
import numpy as np
from scipy.optimize import differential_evolution as dev
from scipy.optimize import Bounds, LinearConstraint, minimize
from mystic.coupler import and_
from mystic.solvers import diffev2, fmin_powell
from mystic.symbolic import generate_penalty, generate_constraint, generate_solvers, generate_conditions, replace_variables
from utilities.np_helper import shift
import json
# The evaluations are stochastic, so we can't cache for now.
# @st.cache(allow_output_mutation=True)
def evaluate(program):
if "objective" in program:
n = program["num_bins"]
# pen_statements = replace_variables(
# "\n".join([p["penalty"] for p in program["answers"]]),
# variables=[f"bin_{i}" for i in range(n)], markers="x"
# )
# pens = generate_penalty(generate_conditions(pen_statements, nvars=n))
# constraint_statements = replace_variables(
# "\n".join([p["constraint"] for p in program["constraints"]]),
# variables=[f"bin_{i}" for i in range(n)], markers="x"
# )
# cons = generate_constraint(generate_solvers(constraint_statements, nvars=n))
# # results = fmin_powell(
# # cost=program["objective"], x0=[1.0/n] * n,
# # bounds=program["bounds"],
# # constraints=cons,
# # penalty=pens, disp=False,
# # full_output=True
# # )
# results = diffev2(
# cost=program["objective"], x0=[(0.0, 1.0/n)] * n,
# npop=10*n, bounds=program["bounds"],
# scale=0.7, cross=0.5,
# maxiter=300,
# constraints=cons,
# penalty=pens, disp=False,
# full_output=True
# )
# st.write(results)
# return results
obj = [
lambda x: np.sum(x * np.ma.log(x).filled(0)),
# lambda x: np.nansum((shift(x, -1) - x)**2) * .5*n + np.nansum((shift(x, -2) - x)**2) * .25*n
]
pens = obj + [p['np_pen'] for p in program['answers']]
def obj_fun(z, pens):
return sum(f(z) for f in pens)
import inspect
return minimize(obj_fun, args=pens, x0=np.ones(n)/n, bounds=Bounds(np.zeros(n),np.ones(n)), constraints=LinearConstraint(np.ones(n), 1, 1))
else:
return ""
``` |
{
"source": "JohnUrban/pufferfish",
"score": 2
} |
#### File: pufferfish/pufferfish/CovBedClass.py
```python
import os
from collections import defaultdict
import numpy as np
import pandas as pd
##np.seterr(divide='raise', invalid='raise')
np.seterr(divide='ignore', invalid='raise')
##np.seterr(divide='ignore', invalid='ignore')
import rpy2.robjects as robjects
ksmooth = robjects.r['ksmooth']
kmeans = robjects.r['kmeans']
intvec = robjects.IntVector
fltvec = robjects.FloatVector
matrixr = robjects.r.matrix
r = robjects.r
from rpy2.robjects.packages import SignatureTranslatedAnonymousPackage as stap
from puffR import *
puffR = stap(puffRstring, 'puffR')
import sys, datetime
from scipy.stats import spearmanr
import pandas as pd ## 2020 - potentially begin converting to pandas code where better
#from scipy.stats.mstats import winsorize
def s50(counts, stages, x=[50]):
"""
counts,stages,x lists
Returns sX for all x for a list of numbers "counts".
Default: 50
Assumes all values in list x are between 0 and 100.
Interpretation: Returns stage at which >= 50% of reads is hit.
"""
n = len(counts)
x_to_stage = {e:0 for e in x}
count_sum = sum(counts)
total = 0
i=0
for e in sorted(x):
target = count_sum*e/100.0
while total < target and counts:
total += counts[i]
lastcount = counts[i]
laststage = stages[i]
i+=1
try:
x_to_stage[e] = laststage
except UnboundLocalError:
x_to_stage[e] = "."
return x_to_stage
TMP_DIR = ".pufferfish_tmp_dir"
TMP_DIR = TMP_DIR[1:]
class CovBed(object):
def __init__(self,
covbedfile,
count_only=False,
replace=False,
replace_with='0',
replace_this='.',
stringcols=False):
## "replace" means if you see the "replace_this" character in the count column, make it "replace_with"
## Made to deal with "." --> 0 by default when replace used.
self.fopen = False
self.connection = None
self.file = covbedfile
self.stringcols = stringcols
self.start = {}
self.end = {}
self.count = {}
self.chromosomes = set([])
self.median = None
self.mean = None
self.sd = None
self.mad = None
self.rank = None
self.rankstd = None
self.sum = None
self.nbins = None
self.localmeds = None
self.count_only=count_only ## When False, start/end dicts are initialized, but remain empty: useful when comparing 2 bedgraphs of identical coords. See also MultiCovBed (though that currently requires a "stage file")
self._extract_data(replace, replace_with, replace_this)
def open(self):
if self.fopen:
self.close()
self.connection = open(self.file, 'r')
self.fopen = True
def close(self):
self.connection.close()
def _add_chromosome(self, chrom):
self.chromosomes.add(chrom)
self.start[chrom] = []
self.end[chrom] = []
self.count[chrom] = []
def _update_data(self, chrom, start, end, count):
if chrom not in self.chromosomes:
self._add_chromosome(chrom)
if not self.count_only: ## This allows the start/end dicts to be initialized, but remain empty
self.start[chrom].append(start)
self.end[chrom].append(end)
self.count[chrom].append(count)
def _finalize_data(self):
## convert set to list
self.chromosomes = sorted(list(self.chromosomes))
#convert lists to np arrays
for chrom in self.chromosomes:
self.start[chrom] = np.array(self.start[chrom])
self.end[chrom] = np.array(self.end[chrom])
self.count[chrom] = np.array(self.count[chrom])
def _extract_data(self, replace=False, replace_with='0', replace_this='.'):
self.open()
for line in self.connection:
chrom, start, end, count = line.strip().split()
if replace and count == replace_this:
count = replace_with
if self.stringcols:
self._update_data(chrom, start, end, float(count))
else:
self._update_data(chrom, int(start), int(end), float(count))
### JAN 9, 2018 -- I changed above int(float(count)) to float(count)
### At this point, I don't know what it might break...
## But now that I am using this more generally for signal data - not just counts - I need it as float
self._finalize_data()
self.close()
def get_mean(self):
if self.mean is None:
counts = np.concatenate(self.count.values())
self.mean = float(np.mean(counts))
self.sd = float(np.std(counts))
return self.mean
def get_sd(self):
if self.sd is None:
counts = np.concatenate(self.count.values())
self.mean = float(np.mean(counts))
self.sd = float(np.std(counts,ddof=1))
return self.sd
def _get_median(self):
counts = np.concatenate(self.count.values())
self.median = float(np.median(counts))
def _safe_median_(self, x, unsafe=0, safe=1, trimmedMeanFirst=True, meanFirst=True, extreme=0.25):
''' if median is unsafe, make it safe.
Optionally try chekcing and using safety of mean first'''
ans = np.median(x)
if ans == unsafe and trimmedMeanFirst:
sys.stderr.write('Unsafe 1\n')
sort = sorted(ans)
l = len(sort)
if l > 3: ## other wise, depending on extreme it will exclude none, or sides (equiv to median), or all
e = int(round(extreme*l))
if e < (l-e):
sort = sort[e:(l-e)]
ans = np.mean(sort)
if ans == unsafe and meanFirst:
sys.stderr.write('Unsafe 2\n')
ans = np.mean(x)
## Checks median or mean depending on safety of median and meanfirst option.
if ans == unsafe:
sys.stderr.write('Unsafe 3\n')
ans = safe
return ans
def _get_local_medians(self, halfwidth=10, besafe=True, unsafe=0, safe=1, trimmedMeanFirst=True, meanFirst=True, extreme=0.25):
''' halfwidth is the number of bins to each side.
First position, A, is halfwidth
Last position, Z, is L-halfwidth
Positions 0 to A get local median from 0-A+halfwidth+1.
Positions Z+1 to L get local median from Z-halfwidth to L.
All others get position, P-haldwidth to P+halfwidth+1.
Note: when besafe=True, if the median is the unsafe value, it is changed to a safe value.
when meanfirst=True and/or trimmedMeanFirst=True, it will check if the mean and/or trimmedMean gives a safe value, and use that.
trimmed mean takes precedent over mean; mean takes precedent over default safe value.
Use case: you likely plan to do local median normalization, but 0s will give ZeroDivError.
Pseudocounts are sometimes used to prevent this, as well as eliminating 0 bins.
This is just a catch/check.
For trimming, the extreme proportion is substracted from both ends of sorted values before taking mean. (e.g. mean of bins 5:14 inclusive for 25% of 20 bins)'''
self.localmeds = {}
for chrom in self.chromosomes:
counts = np.array(self.count[chrom])
L = len(self.count[chrom])
A = halfwidth
Z = L-halfwidth
self.localmeds[chrom] = np.zeros(L)
if halfwidth > L:
# Entire contig is just locally median norm'd
self.localmeds[chrom][0:L] = self._safe_median_( x = counts,
unsafe=unsafe, safe=safe,
trimmedMeanFirst=trimmedMeanFirst,
meanFirst=meanFirst, extreme=extreme )
else:
# Init positions
self.localmeds[chrom][0:A] = self._safe_median_( x = counts[0:(A+halfwidth+1)],
unsafe=unsafe, safe=safe,
trimmedMeanFirst=trimmedMeanFirst,
meanFirst=meanFirst, extreme=extreme )
# End positions
self.localmeds[chrom][(Z+1):L] = self._safe_median_( counts[(Z-halfwidth):L],
unsafe=unsafe, safe=safe,
trimmedMeanFirst=trimmedMeanFirst,
meanFirst=meanFirst, extreme=extreme )
if Z >= A:
''' If Z < A, then entire contig has already been median norm'd with end positions overwriting overlapping start positions.
If Z > A, then there is at least one position in the middle.'''
# Middle positions:
for i in range(A,Z+1):
self.localmeds[chrom][i] = self._safe_median_( x = counts[(i-halfwidth):(i+halfwidth+1)],
unsafe=unsafe, safe=safe,
trimmedMeanFirst=trimmedMeanFirst,
meanFirst=meanFirst, extreme=extreme )
def _get_mad(self, relearn=False):
if self.median is None or relearn:
self._get_median()
counts = np.concatenate(self.count.values())
absdiffs = np.abs((counts - self.get_median()))
self.mad = float(np.median(absdiffs))
def _get_sum(self):
counts = np.concatenate(self.count.values())
self.sum = float(np.sum(counts))
def _get_nbins(self):
self.nbins = len(np.concatenate(self.count.values()))
def _rank_data(self):
counts = np.concatenate(self.count.values())
ranks = np.array(pd.Series(counts).rank())
assert len(counts) == len(ranks)
self.rankdict = {counts[i]:ranks[i] for i in range(len(counts))}
self.rank = {}
for chrom in self.count.keys():
self.rank[chrom] = [self.rankdict[e] for e in self.count[chrom]]
def _rank_standardize_data(self):
counts = np.concatenate(self.count.values())
ranks = np.array(pd.Series(counts).rank())
assert len(counts) == len(ranks)
highest = self.get_nbins()
lowest = 1.0
M = (lowest+highest)/2.0
self.rankdict = {counts[i]:((ranks[i]-M)/M) for i in range(len(counts))}
self.rankstd = {}
for chrom in self.count.keys():
self.rankstd[chrom] = [self.rankdict[e] for e in self.count[chrom]]
## counts = []
## for chrom in self.chromosomes:
## counts.append(self.count[chrom])
## self.median = float(np.median(counts))
def get_median(self, relearn=False):
if self.median is None or relearn:
self._get_median()
return self.median
def get_mad(self, relearn=False):
if self.mad is None or relearn:
self._get_mad()
return self.mad
def get_nbins(self):
if self.nbins is None:
self._get_nbins()
return self.nbins
def median_normalize_x(self, x, relearn=False):
#x is np.array
return x/self.get_median(relearn=relearn)
def robust_z_normalize_x(self, x, relearn=False):
#x is np.array
return (x-self.get_median(relearn=relearn))/self.get_mad(relearn=relearn)
def median_normalize_data(self, relearn=False):
if relearn:
self._get_median()
for chrom in self.chromosomes:
self.count[chrom] = self.median_normalize_x(self.count[chrom])
def local_median_normalize_data(self, halfwidth=10, relearn=False):
''' halfwidth is the number of bins to each side.
First position, A, is halfwidth
Last position, Z, is L-halfwidth
Positions 0 to A get local median from 0-A+halfwidth+1.
Positions Z+1 to L get local median from Z-halfwidth to L.
All others get position, P-haldwidth to P+halfwidth+1.'''
if relearn or self.localmeds is None:
self._get_local_medians(halfwidth=halfwidth)
for chrom in self.chromosomes:
self.count[chrom] = np.array(self.count[chrom]) / self.localmeds[chrom]
def robust_z_normalize_data(self, relearn=False):
if relearn:
self._get_median()
self._get_mad()
for chrom in self.chromosomes:
self.count[chrom] = self.robust_z_normalize_x(self.count[chrom])
def rank_normalize_data(self, relearn=False):
if self.rank is None or relearn:
self._rank_data()
for chrom in self.chromosomes:
self.count[chrom] = self.rank[chrom]
def rank_standardize_data(self, relearn=False):
if self.rankstd is None or relearn:
self._rank_standardize_data()
for chrom in self.chromosomes:
self.count[chrom] = self.rankstd[chrom]
def spxr_normalize_data(self, x=1e6, relearn=False):
''' '''
if self.sum is None or relearn:
self._get_sum()
scale_factor = float(x) / self.sum
self.scale_data(scale=scale_factor)
def scale_data(self, scale=1):
for chrom in self.chromosomes:
self.count[chrom] = scale*self.count[chrom]
def log2_data(self, scale=1):
for chrom in self.chromosomes:
self.count[chrom] = np.log2(self.count[chrom])
def log10_data(self, scale=1):
for chrom in self.chromosomes:
self.count[chrom] = np.log10(self.count[chrom])
def expanded_bdg(self, bdg):
##bdg is just what should be in the 4th column
string = ''
for chrom in self.chromosomes:
for i in range(len(self.start[chrom])):
string += ('\t').join([chrom, str(self.start[chrom][i]), str(self.end[chrom][i]), str(bdg[chrom][i])]) + "\n"
return string
def expanded_bdg_two_cols(self, bdg1, bdg2):
string = ''
for chrom in self.chromosomes:
for i in range(len( self.start[chrom] )):
string += ('\t').join([chrom, str(self.start[chrom][i]), str(self.end[chrom][i]), str(bdg1[chrom][i]), str(bdg2[chrom][i])]) + "\n"
return string
def collapsed_bdg(self, bdg):
##bdg is just what should be in the 4th column
string = ''
for chrom in self.chromosomes:
if len(self.start[chrom]) > 1:
#init
start = self.start[chrom][0]
value = bdg[chrom][0]
for i in range(1, len(self.start[chrom]) ):
if bdg[chrom][i] != value:
string += ('\t').join([chrom, str(start), str(self.end[chrom][i-1]), str(value)]) + "\n"
start = self.start[chrom][i]
value = bdg[chrom][i]
##finish chrom
string += ('\t').join([chrom, str(start), str(self.end[chrom][i]), str(value)]) + "\n"
else: #only 1 bin (very tiny contig)
string += ('\t').join([chrom, str(self.end[chrom][0]), str(self.end[chrom][0]), str(bdg[chrom][0])]) + "\n"
return string
def get_bdg(self, bdg, collapsed=False):
if not collapsed:
return self.expanded_bdg(bdg)
else:
return self.collapsed_bdg(bdg)
def filtered_bdg(self, relation = ">", value = 0, bdg=None):
##bdg is just what should be in the 4th column
## for this might typically be self.count
if bdg is None:
bdg = self.count
string = ''
if relation == "gt":
keep = lambda x: x > value
elif relation == "ge":
keep = lambda x: x >= value
elif relation == "lt":
keep = lambda x: x < value
elif relation == "le":
keep = lambda x: x <= value
elif relation == "eq":
keep = lambda x: x == value
elif relation == "ne":
keep = lambda x: x != value
for chrom in self.chromosomes:
for i in range(len(self.start[chrom])):
if keep(bdg[chrom][i]):
string += ('\t').join([chrom, str(self.start[chrom][i]), str(self.end[chrom][i]), str(bdg[chrom][i])]) + "\n"
return string
def __str__(self):
return self.get_bdg(self.count)
def get_chromosomes(self):
return self.chromosomes
def get_start_dict(self):
return self.start
def get_end_dict(self):
return self.end
def get_count_dict(self):
return self.count
def ksmooth_counts(self, bw=10000, rescueNaN=False, localWindow=5):
for chrom in self.chromosomes:
x = self.start[chrom]
y = self.count[chrom]
k = ksmooth(x = fltvec(x), y = fltvec(y), bandwidth = bw)
self.count[chrom] = np.array(k[1])
## RESCUE NANs
if rescueNaN:
# 1. Compute global mean of self.count[chrom]
mu = np.nanmean(self.count[chrom])
# 2. Calculate local means (pd and convert to np)
new = np.array(pd.DataFrame(self.count[chrom]).rolling(3, center=True, axis=0, min_periods=1).mean().transpose())[0]
# 3. Replace any NaN in new with global mean
new = np.where(np.isnan(new),mu,new)
# 4. Replace any NaN in self.count[chrom] w/ corresponding value in new
final = np.where(np.isnan(self.count[chrom]),new,self.count[chrom])
# 5. Overwrite self.count[chrom] - I know this can be done in #4, but felt like keeping it separate.
self.count[chrom] = final
def computeSkew(self):
''' Converts values, V, in counts to skew = (V[i]-V[i-1]) / (V[i]+V[i-1]).
For i in 2:N. The first element is 0.'''
for chrom in self.chromosomes:
n = len(self.count[chrom])
Next = self.count[chrom][1:n]
Prev = self.count[chrom][:n-1]
Diff = (Next - Prev)*100.0 ## 100.0 to ensure floats
Sum = (Next + Prev)*1.0 ## 1.0 to ensure floats
Skew = [0.0] + list(Diff / Sum)
self.count[chrom] = np.array(Skew)
def computePercentChange(self):
''' Converts values, V, in counts to skew = (V[i]-V[i-1]) / V[i-1].
For i in 2:N. The first element is 0.'''
for chrom in self.chromosomes:
n = len(self.count[chrom])
Next = self.count[chrom][1:n]
Prev = self.count[chrom][:n-1]
Diff = (Next - Prev)*100.0 ## 100.0 to ensure floats, and make as Pct
PctChange = [0.0] + list(Diff / Prev)
self.count[chrom] = np.array(PctChange)
def computeSkewChange(self):
self.computeSkew()
for chrom in self.chromosomes:
n = len(self.count[chrom])
Next = self.count[chrom][1:n]
Prev = self.count[chrom][:n-1]
Diff = (Next - Prev)*1.0 ## 1.0 to ensure floats
PctChange = [0.0] + list(Diff / 200.0) ## /200 b/c Skew was mult by 100 and I want to divide by 2 (similar to Hyrien segmentaiton of RFD)
self.count[chrom] = np.array(PctChange)
def computePercentChangeDerivative(self):
''' Converts values, V, in counts to skew = (V[i]-V[i-1]) / V[i-1].
For i in 2:N. The first element is 0.'''
self.computePercentChange()
self.computePercentChange()
def normalize_to_other(self, other, pseudocount=0.01):
#other is another CovBed object with same bins from same genome
for chrom in self.chromosomes:
#print chrom
self.count[chrom] = (np.array(self.count[chrom])+pseudocount)/(np.array(other.count[chrom])+pseudocount)
def _opt_handle_zero_bins(self, other, chrom, setToControlDist=False, pseudoZeroBins=False, addMinOtherPlusOneToBoth=False):
t = np.array(self.count[chrom])
c = np.array(other.count[chrom])
if setToControlDist:
## Assumes we are working with Z scores
c = other.get_mad() * c + other.get_median()
t = other.get_mad() * t + other.get_median()
if pseudoZeroBins:
## slightly modify bins that have 0 in control (to avoid division)
g = c == 0
ng = c != 0
m = np.abs(c[ng]).min() ## /10.0
t[g] = t[g]+m
c[g] = c[g]+m
if addMinOtherPlusOneToBoth:
## Shift entire distro up -- not meant to be used with pseudoZeroBins, but won't throw error either.
m = np.abs(c.min()) + 1
t = t + m
c = c + m
return t, c
def pct_diff_from_other(self, other, setToControlDist=False, pseudoZeroBins=False, addMinOtherPlusOneToBoth=False):
#other is another CovBed object with same bins from same genome
#I'm not supporting pseudo counts at this point -- in favor of removing 0 bins from both samples
for chrom in self.chromosomes:
t, c = self._opt_handle_zero_bins(other, chrom, setToControlDist, pseudoZeroBins, addMinOtherPlusOneToBoth)
self.count[chrom] = 100.0*(t-c)/c
def pct_skew_given_other(self, other, setToControlDist=False, pseudoZeroBins=False, addMinOtherPlusOneToBoth=False):
#other is another CovBed object with same bins from same genome
#I'm not supporting pseudo counts at this point -- in favor of removing 0 bins from both samples
for chrom in self.chromosomes:
t, c = self._opt_handle_zero_bins(other, chrom, setToControlDist, pseudoZeroBins, addMinOtherPlusOneToBoth)
###self.count[chrom] = 100.0*(np.array(self.count[chrom]) - np.array(other.count[chrom]))/(np.abs(np.array(self.count[chrom])) + np.abs(np.array(other.count[chrom])))
self.count[chrom] = 100.0*(t - c)/(np.abs(t) + np.abs(c))
def subtract_other(self, other, pseudocount=0.01):
#other is another CovBed object with same bins from same genome
for chrom in self.chromosomes:
#print chrom
self.count[chrom] = np.array(self.count[chrom]) - np.array(other.count[chrom])
def normalize_with_glocalMedRatioNorm(self, other=None, pseudocount=0.01, globalweight=1, minlocalbins=3, minpropdata=0.1):
#other is another CovBed object with same bins from same genome
covd = self.create_local_medRatioNorm_dict(other=other,
pseudocount=pseudocount)
#print 'globalweight', globalweight
covmeds = self.get_params_from_local_medRatioNorm_dict_with_glocalweighting(covd=covd,
globalweight=globalweight,
minlocalbins=minlocalbins,
minpropdata=minpropdata)
for chrom in self.chromosomes:
norms = np.array(map(lambda x: covmeds[x], self.count[chrom]))
if other is not None:
self.count[chrom] = (1.0/norms)*(np.array(self.count[chrom])+pseudocount)/(np.array(other.count[chrom])+pseudocount)
else:
self.count[chrom] = norms*(np.array(self.count[chrom])+pseudocount)
print("INDEV")
#print(covd)
#print()
#print(covmeds)
#print()
#return (covd, covmeds)
def impute_zeros(self, bw):
'''When requiring mapq to be stringent, it leaves stretches of 0 that could benefit from being imputed.
The 0 score often causes a state change in HMMs and can also lead to very inflated scores in FE after pseudocount added (if numerator is non-zero - e.g. 10/0.1 = 100)
So this smooths the counts... and only uses the resulting smoothed values to substitute 0s.
This means in very long 0 regions (e.g. contigs with no coverage), the score will remain 0 as desired.'''
for chrom in self.chromosomes:
x = self.start[chrom]
y = self.count[chrom]
k = puffR.impute_zeros(x = fltvec(x), y = fltvec(y), bw = bw)
self.count[chrom] = np.array(k)
def global_winsorize_counts(self, floor=0.00001, ceiling=0.99999):
'''Set anything below the value of the floor quantile to that value.
Set anything above the value of the ceiling quantile to that value.'''
counts = np.concatenate(self.count.values())
wange = np.quantile(counts, [floor, ceiling])
for chrom in self.chromosomes:
self.count[chrom][self.count[chrom] < wange[0]] = wange[0]
self.count[chrom][self.count[chrom] > wange[1]] = wange[1]
def local_winsorize_counts(self, floor=0.0001, ceiling=0.9999):
'''Set anything below the value of the floor quantile to that value.
Set anything above the value of the ceiling quantile to that value.'''
counts = np.concatenate(self.count.values())
gwange = np.quantile(counts, [floor, ceiling])
for chrom in self.chromosomes:
counts = np.array(self.counts[chrom])
cwange = np.quantile(counts, [floor, ceiling])
pass
def create_local_medRatioNorm_dict(self, other=None, pseudocount=0.0):
#other is another CovBed object with same bins from same genome
covd = defaultdict(list)
for chrom in self.chromosomes:
if other is not None:
ratios = (np.array(self.count[chrom])+pseudocount)/(np.array(other.count[chrom])+pseudocount)
else:
ratios = np.array(self.count[chrom])+pseudocount
for i in range(len(self.count[chrom])):
latecov = self.count[chrom][i]
ratio = ratios[i]
covd[latecov].append(ratio)
return covd
def get_params_from_local_medRatioNorm_dict_with_globalweighting(self, covd, globalweight=30):
#covd = output from create_local_medRatioNorm_dict
#globalweight = how many global_med values to add to a local bin
global_med = [np.median(np.concatenate(covd.values()))]*globalweight
covmeds = dict()
for key in sorted(covd.keys()):
ratios = np.concatenate([global_med,covd[key]])
covmeds[key] = np.median(ratios)
return covmeds
def get_params_from_local_medRatioNorm_dict_with_glocalweighting(self, covd, globalweight=10, minlocalbins=3, minpropdata=0.1):
#covd = output from create_local_medRatioNorm_dict
#globalweight = how many global_med values to add to a local bin
N = len(np.concatenate(covd.values()))
pN = round(minpropdata*N)
#print 'pN', pN
global_med = [np.median(np.concatenate(covd.values()))]*globalweight
#print 'G', global_med
covmeds = dict()
latecovs = sorted(covd.keys())
halfbins = int((minlocalbins+1)//2) ## halves from odds rounded up
n = len(latecovs)
for i in range(n):
## FIRST SATISFY MIN BINS
if minlocalbins >=3:
l = max(0, i-halfbins)
r = min(i+halfbins, n-1)
nl = i-l
nr = r - i
if l == 0:
add = minlocalbins - nl
r += add
if r > n-1: ## CATCH -- this shouldn't happen
print "WARN: get_params_from_local_medRatioNorm_dict_with_glocalweighting #1"
r = n-1
if r == n-1:
sub = minlocalbins - nr
l -= sub
if l < 0: ## CATCH -- this shouldn't happen
print "WARN: get_params_from_local_medRatioNorm_dict_with_glocalweighting #2"
l = 0
bins = [latecovs[j] for j in range(l,r)]
else:
bins = [latecovs[i]] # [covd[j] for j in range(i,i+minlocalbins)]
nbins = len(bins)
#print "nbins post minbins", nbins
## SECOND, SATISFY MIN PROP OF DATA
direction = 'R'
localvals = list(np.concatenate([covd[latecov] for latecov in bins]))
nvals = len(localvals)
finiteloop=n
while nvals < pN:
if direction == 'R' and r <= n-1:
# use r before r change since it only went up to but did not include r before
localvals.append(covd[latecovs[r]])
r += 1
direction = 'L'
nvals = len(localvals)
finiteloop=n
elif direction == 'L' and l > 0:
# use l AFTER l change since used l already
l-=1
localvals.append(covd[latecovs[l]])
direction = 'R'
nvals = len(localvals)
finiteloop=n
else:
finiteloop-=1
if finiteloop <= 0:
print "WARN: get_params_from_local_medRatioNorm_dict_with_glocalweighting #3"
break
## GET GLOCAL MEDIAN RATIO
ratios = np.concatenate([global_med, localvals])
#print ratios
covmeds[latecovs[i]] = np.median(ratios)
return covmeds
class MultiCovBed(object):
## assumes all covbeds in list have same exact elements in 1st 3 colums and are all identically ordered
## this should be true if all are outputs from getcov using the same genome file (just different bams)
def __init__(self, stagefile):
# 'stagefile' is a FOFN-like file with 2 columns: 1=stage_integer,2=bincounts_filepaths
## where stage integer is time points -- e.g. 1,2,3,4,5
## stage_ints can be used on replicates (i.e. can use same int >1 time)
self.stagefile = stagefile
self.nfiles = 0
self.nbins = 0
self.covbeds = {}
self.stages = {}
self.stagelist = []
self.files = {}
self.count = {}
self.median = {}
self.start = {}
self.end = {}
self.chromosomes = set([])
self._parse_stagefile()
self._extract_data()
self.corscores = {}
self.rksmooth = None
self.smooth_corscores = {}
self.cor_states = {}
self.s50 = {}
self.a50 = {}
self.filtered = {'start':{}, 'end':{}, 'count':{k:{} for k in range(self.nfiles)}}
self.filteredchromosomes = []
self.ntest = None
######################
## INITIALIZATION
######################
def _parse_stagefile(self):
i = 0
with open(self.stagefile) as f:
for line in f:
stage, fname = line.strip().split()
self.stages[i] = int(stage)
self.files[i] = fname
self.stagelist.append(int(stage))
i+=1
self.nfiles = len(self.files.keys())
def _extract_data(self):
for i in sorted(self.files.keys()):
self.add_covbed(findex = i)
def add_covbed(self, findex):
covbed = CovBed(self.files[findex])
if not self.chromosomes:
self._initialize(covbed)
self.count[findex] = covbed.get_count_dict()
def _initialize(self, covbed):
self.start = covbed.get_start_dict()
self.end = covbed.get_end_dict()
self.chromosomes = sorted(list(covbed.get_chromosomes()))
######################
## Operations
######################
def _get_median(self, findex):
counts = np.concatenate(self.count[findex].values())
self.median[findex] = float(np.median(counts))
def get_median(self, findex, refresh=False):
if refresh:
self._get_median(findex)
try:
return self.median[findex]
except KeyError as e:
self._get_median(findex)
return self.median[findex]
def _refresh_medians(self):
## will re-calculate medians every time called
for findex in range(self.nfiles):
self._get_median(findex)
def normalize(self, x, denom):
#x is np.array
# denom is float
return x/denom
def normalize_findex_by_x(self,findex, x):
for chrom in self.chromosomes:
self.count[findex][chrom] = self.normalize(self.count[findex][chrom], x)
def nomalize_data_by_xdict(self,xdict):
for findex in range(self.nfiles):
self.normalize_findex_by_x(findex, xdict[findex])
def median_normalize_findex(self,findex, refresh=False):
self.normalize_findex_by_x(findex, x = self.get_median(findex, refresh))
def median_normalize_data(self, refresh=False):
for findex in range(self.nfiles):
self.median_normalize_findex(findex, refresh)
def ksmooth_counts(self, bw=10000):
for chrom in self.chromosomes:
for findex in range(self.nfiles):
x = self.start[chrom]
y = self.count[findex][chrom]
k = ksmooth(x = fltvec(x), y = fltvec(y), bandwidth = bw)
self.count[findex][chrom] = np.array(k[1])
def expanded_bdg(self, bdg):
string = ''
for chrom in self.chromosomes:
for i in range(len( self.start[chrom] )):
string += ('\t').join([chrom, str(self.start[chrom][i]), str(self.end[chrom][i]), str(bdg[chrom][i])]) + "\n"
return string
def expanded_bdg_two_cols(self, bdg1, bdg2):
string = ''
for chrom in self.chromosomes:
for i in range(len( self.start[chrom] )):
string += ('\t').join([chrom, str(self.start[chrom][i]), str(self.end[chrom][i]), str(bdg1[chrom][i]), str(bdg2[chrom][i])]) + "\n"
return string
def collapsed_bdg(self, bdg):
string = ''
for chrom in self.chromosomes:
#init
start = self.start[chrom][0]
value = bdg[chrom][0]
for i in range(1, len(self.start[chrom]) ):
if bdg[chrom][i] != value:
string += ('\t').join([chrom, str(start), str(self.end[chrom][i-1]), str(value)]) + "\n"
start = self.start[chrom][i]
value = bdg[chrom][i]
##finish chrom
string += ('\t').join([chrom, str(start), str(self.end[chrom][i]), str(value)]) + "\n"
return string
def get_bdg(self, bdg, collapsed=False):
if not collapsed:
return self.expanded_bdg(bdg)
else:
return self.collapsed_bdg(bdg)
def find_slopes(self, stagelist=''):
if not stagelist:
stagelist = self.stagelist
self.slopes = {}
for chrom in self.chromosomes:
self.slopes[chrom] = []
for i in range(len(self.start[chrom])):
counts = [self.count[j][chrom][i] for j in range(self.nfiles)]
try:
slope = np.polyfit(x = stagelist, y = counts, deg = 1)[0]
except FloatingPointError:
slope = 0
self.slopes[chrom].append(slope)
def get_slope_bdg(self, collapsed=False):
##assumes self.slopes already present
return self.get_bdg(self.slopes, collapsed)
def cor_score(self, stagelist=''):
if not stagelist:
stagelist = self.stagelist
for chrom in self.chromosomes:
self.corscores[chrom] = []
for i in range(len(self.start[chrom])):
counts = [self.count[j][chrom][i] for j in range(self.nfiles)]
try:
## score = np.nan_to_num( np.corrcoef(x = [stagelist, counts])[0,1] )
score = np.corrcoef(x = [stagelist, counts])[0,1]
except FloatingPointError:
score = 0
self.corscores[chrom].append(score)
def get_corscore_bdg(self, collapsed=False):
if not self.corscores:
self.cor_score()
return self.get_bdg(self.corscores, collapsed)
def ksmooth_corscores(self, bw=10000):
if not self.corscores:
self.cor_score()
for chrom in self.chromosomes:
x = self.start[chrom]
y = self.corscores[chrom]
k = ksmooth(x = fltvec(x), y = fltvec(y), bandwidth = bw)
self.smooth_corscores[chrom] = np.array(k[1])
def get_smooth_corscore_bdg(self, collapsed=False):
return self.get_bdg(self.smooth_corscores, collapsed)
def get_cor_states(self, smoothed=False, emodel="normal"):
if not self.corscores:
self.cor_score()
if smoothed and not self.smooth_corscores:
sys.stderr.write("Smoothing cor scores with default bandwidth")
self.ksmooth_corscores()
if smoothed:
scores = self.smooth_corscores
else:
scores = self.corscores
for chrom in self.chromosomes:
## sys.stderr.write( chrom + "\n" )
v = puffR.viterbi_puff(emissions = puffR.emissions, transitions = puffR.transitions, initial = puffR.initial, states = intvec([1,2,3]), emitted_data = fltvec(scores[chrom]), emodel = emodel, logprobs=False)
## f = puffR.forward_puff(emissions = puffR.emissions, transitions = puffR.transitions, initial = puffR.initial, states = intvec([-1,0,1]), emitted_data = fltvec(scores[chrom]), emodel = emodel, logprobs=False)
## b = puffR.backward_puff(emissions = puffR.emissions, transitions = puffR.transitions, initial = puffR.initial, states = intvec([-1,0,1]), emitted_data = fltvec(scores[chrom]), emodel = emodel, logprobs=False)
self.cor_states[chrom] = np.array(list(v[0]))
def get_cor_state_bdg(self, collapsed=False):
return self.get_bdg(self.cor_states, collapsed)
def calc_s50(self, x=[50], stagelist=''):
if not stagelist:
stagelist = self.stagelist
for chrom in self.chromosomes:
self.s50[chrom] = []
for i in range(len(self.start[chrom])):
counts = [self.count[j][chrom][i] for j in range(self.nfiles)]
ans = s50(counts,stagelist,x=x)
ans = ans[x[0]]
self.s50[chrom].append(ans)
def get_s50_bdg(self):
if not self.s50:
self.calc_s50()
string = ''
for chrom in self.chromosomes:
for i in range(len( self.start[chrom] )):
if self.s50[chrom][i] != ".":
string += ('\t').join([chrom, str(self.start[chrom][i]), str(self.end[chrom][i]), str(self.s50[chrom][i])]) + "\n"
return string
def analyze_state0_bins(self):
## assumes median normalized (or otherwise)
if not self.corscores:
self.cor_score()
if not self.cor_states:
self.get_cor_states()
counts = {k:[] for k in range(self.nfiles)}
for chrom in self.chromosomes:
for i in range(len( self.start[chrom] )):
if self.cor_states[chrom][i] == 2:#states are 1,2,3 for -1,0,1
for j in range(self.nfiles):
counts[j].append( self.count[j][chrom][i] )
## mean would be skewed above 1 since even if the count of numbers < 1 equals the count of numbers > 1, the mean will be >1.
total_median = [np.median(counts.values())]
medians = {findex:np.median(counts[findex]) for findex in range(self.nfiles)}
self.state0_medians = {0:medians,1:total_median}
def _need_more_cycles(self):
for m in self.state0_medians[0].values():
if m != 1:
return True
return False
def _normalize_data_by_state0_median(self):
for findex in range(self.nfiles):
if self.state0_medians[0][findex] != 1:
self.normalize_findex_by_x(findex, self.state0_medians[0][findex])
def find_cn1(self, n_iter=10, max_empty_bin_pct=0.4, max_offending_samples_pct=0.4, verbose=True):
if verbose:
sys.stderr.write(str(datetime.datetime.now()) +": ..CN=1 - getting medians before and after filtering..\n")
self._refresh_medians() ## get medians for first time
if verbose:
sys.stderr.write(str(datetime.datetime.now()) +": ..CN=1 - medians before filtering were %s...\n" % (str(self.median)))
sys.stderr.write(str(datetime.datetime.now()) +": ..CN=1 - filtering..\n")
self.filter_null_contigs(max_empty_bin_pct, max_offending_samples_pct) ## filter bad contigs
self._refresh_medians() ## get medians after filtration
if verbose:
sys.stderr.write(str(datetime.datetime.now()) +": ..CN=1 - medians after filtering were %s...\n" % (str(self.median)))
sys.stderr.write(str(datetime.datetime.now()) +": ..CN=1 - median normalizing..\n")
self.median_normalize_data() # median norm
self._refresh_medians() ## get medians after normalization
if verbose:
sys.stderr.write(str(datetime.datetime.now()) +": ..CN=1 - medians (of all bins) after normalizing should be 1 and were %s...\n" % (str(self.median)))
sys.stderr.write(str(datetime.datetime.now()) +": ..CN=1 - getting bin correlations \n")
self.cor_score() # give correlation score to each bin
if verbose:
sys.stderr.write(str(datetime.datetime.now()) +": ..CN=1 - getting state path \n")
self.get_cor_states() # get state path through bins
self.analyze_state0_bins() # get median of 0-correlation (most likely cn=1) bins
for i in range(n_iter):
if verbose:
sys.stderr.write(str(datetime.datetime.now()) +": ..CN=1 iter %d starting 0-corr medians: %s...\n" % (i+1, str(self.state0_medians)))
if self._need_more_cycles(): # if 0-corr median from any sample != 1, re-normalize by 0-corr median, est new correlations, get updated statepath, get new 0corr medians
self._normalize_data_by_state0_median()
self.cor_score()
self.get_cor_states()
self.analyze_state0_bins()
else:
if verbose:
sys.stderr.write(str(datetime.datetime.now()) +": ..CN=1 iter %d canceled - 0-corr medians all = 1...\n" % (i+1))
break
def filter_null_contigs(self, max_empty_bin_pct=0.4, max_offending_samples_pct=0.4):
##default: if >40% of samples have >40% empty bins on a chrom, remove it
sample_thres = max_offending_samples_pct*self.nfiles
for chrom in self.chromosomes:
nbins = len(self.start[chrom])
bin_thres = max_empty_bin_pct*nbins
n_bad_samp = 0
for findex in range(self.nfiles):
n_bad_bins = len(self.count[findex][chrom][self.count[findex][chrom] == 0])
if n_bad_bins > bin_thres:
n_bad_samp += 1
if n_bad_samp > sample_thres:
self.filtered['start'][chrom] = self.start.pop(chrom)
self.filtered['end'][chrom] = self.end.pop(chrom)
for findex in range(self.nfiles):
counts = (self.count[findex]).pop(chrom)
self.filtered['count'][findex][chrom] = counts
self.filteredchromosomes.append(chrom)
self.chromosomes.remove(chrom)
def pct_state(self, state=3):
total = 0
nstate = 0
for chrom in self.chromosomes:
total += len(self.cor_states[chrom])
nstate += sum(self.cor_states[chrom][self.cor_states[chrom] == state])
return 100.0*nstate/total
def n_state(self, state=3):
nstate = 0
for chrom in self.chromosomes:
nstate += sum(self.cor_states[chrom][self.cor_states[chrom] == state])
return nstate
def eFDR1(self, stage=3, n_iter=10):
## shuffles stages -- not best way since many permutations uphold correlations...
## assumes find_cn1 already run
if self.ntest is None:
self.ntest = self.n_state(3)
controls = []
for i in range(n_iter):
stagelist = self.stagelist[:] #clone it
np.random.shuffle(stagelist)
sys.stderr.write(str(stagelist)+"\n") ## PRINT TEMP
self.cor_score(stagelist)
self.get_cor_states()
controls.append(self.n_state(3))
return self.ntest, controls, 100.0*np.array(controls)/self.ntest
def eFDR2(self):
##
pass
def pval(self):
# for each bin, shuffle scores, take cor, store -- get 1000 of these, p ~ n_gt_cor/N
# BH correct p-values - only keep bins with q < 0.1
## OR -- take state+ bins, combine counts in all for each stage, do this
## that would be less tests and it would not treat all bins independently - would treat regions independently
## hmmm.... maybe not worth doing this at all...
pass
def discretize_cor_values(self):
self.dcorscores = {}
for chrom in self.chromosomes:
self.dcorscores[chrom] = map(round, np.array(self.corscores[chrom])*4+5) #9-sided dice emission symbols 1-9 (for Rindexing) where 1-4 neg, 5=0, 6-9pos
######################
## Printing etc
######################
def get_counts_bdg(self):
string = ''
for chrom in self.chromosomes:
for i in range(len( self.start[chrom] )):
string += ('\t').join([chrom, str(self.start[chrom][i]), str(self.end[chrom][i])] + [str(self.count[j][chrom][i]) for j in range(self.nfiles)]) + "\n"
return string
def get_filtered_contigs_bdg(self):
string = ''
for chrom in self.filteredchromosomes:
for i in range(len( self.filtered['start'][chrom] )):
string += ('\t').join([chrom, str(self.filtered['start'][chrom][i]), str(self.filtered['end'][chrom][i])] + [str(self.filtered['count'][j][chrom][i]) for j in range(self.nfiles)]) + "\n"
return string
def __str__(self):
return self.get_counts_bdg()
## DOES NOT BEHAVE IN USEFUL WAY - i.e. results not more useful than s50 (probably less so).
## def calc_a50(self,x=[50]):
## ## ASSUMES COUNTS ARE MEDIAN NORMALIZED
## ## for all cn=1 areas, s50=3 -- i.e. 50% of normalized read counts seen halfway through
## ## a50 is trying to mask cn=1 to highlight when 50% of amplification is done
## ## it does this by subtracting 1 from the median normalized counts (and taking max of that or 0 to avoid negatives)
## ## --> and later ignoring anything that is "."
## ## this is experimental - not totally sure it will give what I want
## for chrom in self.chromosomes:
## self.a50[chrom] = []
## for i in range(len(self.start[chrom])):
## counts = [max([self.count[j][chrom][i]-1,0]) for j in range(self.nfiles)]
## ans = s50(counts,self.stagelist,x=x)
## ans = ans[x[0]]
## self.a50[chrom].append(ans)
##
## def get_a50_bdg(self):
## if not self.a50:
## self.calc_a50()
## string = ''
## for chrom in self.chromosomes:
## for i in range(len( self.start[chrom] )):
## if self.a50[chrom][i] != ".":
## string += ('\t').join([chrom, str(self.start[chrom][i]), str(self.end[chrom][i]), str(self.a50[chrom][i])]) + "\n"
#### if i > 20: break
#### if i > 20: break
## return string
```
#### File: pufferfish/pufferfish/findpuffs.py
```python
import sys, os, gzip
import datetime
from CovBedClass import *
import cPickle as pickle
from loaddata import load_pickle
def run(parser, args):
if not args.quiet:
sys.stderr.write(str(datetime.datetime.now()) +": ..initializing...\n")
if args.input:
if not args.quiet:
sys.stderr.write(str(datetime.datetime.now()) +": ..Loading files...\n")
f = MultiCovBed(args.input)
elif args.inpickle:
if not args.quiet:
sys.stderr.write(str(datetime.datetime.now()) +": ..Un-pickling...\n")
f = load_pickle(args.inpickle)
f.filter_null_contigs(args.max_empty_bin_pct, args.max_offending_samples_pct) ## filter bad contigs
if args.smoothbeforemedian:
f.ksmooth_counts(bw=args.smoothbeforemedian) ## smooth before finding median
## if args.mediannorm:
if not args.quiet:
sys.stderr.write(str(datetime.datetime.now()) +": ..median normalizing...\n")
f.median_normalize_data()
if args.smoothbeforecor:
f.ksmooth_counts(bw=args.smoothbeforecor) ## smooth counts before getting cor in bins - prob should not do both b4median and b4cor
## if args.calcbincors:
if not args.quiet:
sys.stderr.write(str(datetime.datetime.now()) +": ..calc corscores...\n")
f.cor_score()
## if args.smoothcors:
if not args.quiet:
sys.stderr.write(str(datetime.datetime.now()) +": ..ksmoothing corscores...\n")
f.ksmooth_corscores(bw=args.corsmoothbandwidth)
if not args.quiet:
sys.stderr.write(str(datetime.datetime.now()) +": ..getting viterbi state paths...\n")
f.get_cor_states()
if not args.quiet:
sys.stderr.write(str(datetime.datetime.now()) +": ..getting slopes in bins...\n")
f.find_slopes()
if not args.quiet:
sys.stderr.write(str(datetime.datetime.now()) +": ..Pickling...\n")
if not args.outpickle.endswith('.gz'):
args.outpickle += '.gz'
if os.path.exists(args.outpickle):
os.remove(args.outpickle)
with gzip.open(args.outpickle,'wb') as pkfile:
pickle.dump(f, pkfile)
##sys.stderr.write(str(datetime.datetime.now()) +": ..printing meta-bdg with median norm scores from all files...\n")
##print f
##print f.get_corscor_bdg()
##sys.stderr.write(str(datetime.datetime.now()) +": ..printing ksmoothed corscores...\n")
##print f.get_smooth_corscor_bdg()
##sys.stderr.write(str(datetime.datetime.now()) +": ..printing viterbi state paths...\n")
##print f.get_cor_state_bdg()
##sys.stderr.write(str(datetime.datetime.now()) +": ..analyzing 0-state bins...\n")
##f.analyze_state0_bins()
##print f.state0_medians
```
#### File: pufferfish/pufferfish/generate.py
```python
import sys, datetime
from CovBedClass import *
from pk2txt import bdgmsg, newmsg
from normalize import protocol1, protocol2, protocol3, protocol4, protocol5, protocol6, normalize
from hmm_fxns_for_R import *
def run(parser, args):
bedgraph = CovBed(args.bedgraph)
if not args.quiet:
newmsg("finding state path")
## CONSTRUCT EMISSIONS PROBABILITY MATRIX FOR R
eprobs, nstates = help_get_emission_probs(args.mu, args.sigma, args.mu_scale)
## CONSTRUCT TRANSITIONS PROBABILITY MATRIX FOR R
tprobs = help_get_transition_probs(args.leave_special_state, args.leave_other, args.special_idx, nstates)
## CONSTRUCT INITIAL PROBABILITY MATRIX FOR R
iprobs = help_get_initial_probs(nstates, args.special_idx, args.init_special, args.initialprobs)
## HIDDEN MARKOV MODEL: Find most probable path through states
statepath, emitted_data = generate_hmmR(bedgraph, args.emodel, eprobs=eprobs, tprobs=tprobs, iprobs=iprobs)
##
if not args.quiet:
bdgmsg("state path", False)
sys.stdout.write(bedgraph.expanded_bdg_two_cols(emitted_data, statepath))
```
#### File: pufferfish/pufferfish/hmm_state_mean_correction.py
```python
import sys, argparse, pybedtools, scipy.stats
from collections import defaultdict
import numpy as np
from CovBedClass import *
from pk2txt import bdgmsg, newmsg
parser = argparse.ArgumentParser(description="""
Given:
(i) HMM STATE bedGraph from pufferfish
(ii) Target signal bedGraph to normalize
Find mean of each state.
Return (and/or):
(i) Normalized target bedGraph -- where each target value is divided by the mean of its HMM state
The desired outcome is that the target signal becomes centered on CN=1.
Downstream steps can then make the assumption of CN=1 for all bins.
(ii) Mean level bedGraph where each bin is assigned the mean of its state.
Can be used to:
- visually compare to original signal
- use awk on target bdg vs mean level bdg downstream to normalize, signif test, or other
""", formatter_class= argparse.RawTextHelpFormatter)
parser.add_argument('--signal', '-i', '-f',
type= str,
help='''Path to signal bedGraph that usually has cols: chr, start, end, signal-to-correct.
Can tell algo what col to look at.''')
parser.add_argument('--states', '-i2', '-f2',
type=str,
help='''Path to bedGraph that usually has cols: chr, start, end, HMM state.
Can tell algo what col to look at.
chr/start/end should be identical in values and sort order as --signal.''')
parser.add_argument('--signalcol', '-s',
type=int, default=4,
help='''1-based column that signal found in. Default = 4''')
parser.add_argument('--statecol', '-S',
type=int, default=4,
help='''1-based column that signal found in. Default = 4''')
parser.add_argument('--chrcol',
type=int, default=1,
help='''1-based column that chr/seq name found in. Default = 1''')
parser.add_argument('--startcol',
type=int, default=2,
help='''1-based column that start coordinate found in. Default = 2''')
parser.add_argument('--endcol',
type=int, default=3,
help='''1-based column that end coordinate found in. Default = 3''')
parser.add_argument('--levels',
type= str,
help='''By default, the CN normalized bedGraph is written to stdout.
Using this flag and providing a file name tells the program to also write
a bedGraph to that file name for the means of the states over each bin.
''')
parser.add_argument('--levels_only',
action='store_true', default=False,
help='''By default, the CN normalized bedGraph is written to stdout.
Using this flag tells the program to only return the levels bedGraph (to stdout by default if --levels not used).
''')
parser.add_argument('--normbdg',
type= str, default=False,
help='''By default, the CN normalized bedGraph is written to stdout.
This redirects it into a filename provided.
''')
parser.add_argument('-c', '--collapsed', action='store_true', default=False,
help='''Return collapsed variable-step bedGraph instead of expanded single-step bedGraph.
This is often a much smaller file.''')
parser.add_argument('-q', '--quiet', action='store_true', default=False,
help='''QUIET.''')
args = parser.parse_args()
sigcol = args.signalcol-1
statecol = args.statecol-1
chrcol = args.chrcol-1
startcol = args.startcol-1
endcol = args.endcol-1
##def run(parser, args):
if not args.quiet:
sys.stderr.write(str(datetime.datetime.now()) +": ..Loading files...\n")
signal = CovBed(args.signal)
states = CovBed(args.states, count_only=True)
## FIRST GET STATE MEANS
if not args.quiet:
sys.stderr.write(str(datetime.datetime.now()) +": ..Learning state means...\n")
## MEANS: Sum up data over each state and tally the number of times the state was observed.
statesum = defaultdict(float) ## Sum of signal over each state
stateobs = defaultdict(float) ## Number times state is observed.
for chrom in states.chromosomes:
numbins = len(states.count[chrom])
for i in range(numbins):
state = states.count[chrom][i]
emission = signal.count[chrom][i]
statesum[state] += emission
stateobs[state] += 1.0
## MEANS: Divide Sum of data over each state by number of times the state was observed.
statemeans = defaultdict(float)
for state, n in stateobs.iteritems():
statemeans[state] = statesum[state] / float(n)
## NORMALIZE BY STATE MEANS
if not args.quiet:
sys.stderr.write(str(datetime.datetime.now()) +": ..Normalizing emissions to state means...\n")
## NORM: Create dictionary that contains chroms w/ lists of signal/statemean
normsig = defaultdict(list)
levels = defaultdict(list)
for chrom in signal.chromosomes:
numbins = len(signal.count[chrom])
for i in range(numbins):
state = states.count[chrom][i]
emission = signal.count[chrom][i]
statemean = statemeans[state]
levels[chrom].append( statemean )
normsig[chrom].append( emission / statemean )
## OUTPUT:
if not args.levels_only:
if args.normbdg:
normsigout = open(args.normbdg, 'w')
else:
normsigout = sys.stdout
if not args.quiet:
sys.stderr.write(str(datetime.datetime.now()) +": ..Writing normalized signal bedGraph...\n")
normsigout.write(signal.get_bdg(normsig, args.collapsed))
if args.normbdg:
normsigout.close()
if args.levels or args.levels_only:
if args.levels:
levelsout = open(args.levels, 'w')
elif args.levels_only: ## "levels_only and levels" already taken care of by first condition
levelsout = sys.stdout
if not args.quiet:
sys.stderr.write(str(datetime.datetime.now()) +": ..Writing state level means bedGraph...\n")
levelsout.write(signal.get_bdg(levels, args.collapsed))
if args.levels:
levelsout.close()
```
#### File: pufferfish/pufferfish/pufferfish_main.py
```python
import os.path
import sys
import argparse
#logger
import logging
logger = logging.getLogger('pufferfish')
# pufferfish imports
#import pufferfish.version
pfv = '0.0.0'
## FUNCTIONS
def run_subtool(parser, args):
## the function to be used in subparser.set_defaults(func=func)
## it selects the module that the sub-parser is made for
## then uses the "run" function in that module
if args.command == 'mapreads':
import mapreads as submodule
elif args.command == 'getcov':
import getcov as submodule
elif args.command == 'findpuffs':
import findpuffs as submodule
elif args.command == 'dump':
import pk2txt as submodule
elif args.command == 'puffcn':
import cn as submodule
elif args.command == 'puffcnpy':
import cnpy as submodule
elif args.command == 'summits':
import findsummits as submodule
elif args.command == 'normalize':
import normalize as submodule
elif args.command == 'hmm':
import generalhmm as submodule
elif args.command == 'generate':
import generate as submodule
elif args.command == 'filter':
import filterfish as submodule
elif args.command == 'help':
import helper as submodule
# run the chosen submodule
submodule.run(parser, args)
## This subclass is used as parser_class for submodule sub-parsers
class ArgumentParserWithDefaults(argparse.ArgumentParser):
## Child/sub of argparse.ArgumentParser class (the super)
def __init__(self, *args, **kwargs):
super(ArgumentParserWithDefaults, self).__init__(*args, **kwargs)
## Add arguments that can be used by all sub-commands
self.add_argument("-q", "--quiet", help='''Do not output warnings to stderr.''',
action="store_true",
dest="quiet")
def main():
logging.basicConfig()
## Create top-level parser
parser = argparse.ArgumentParser(prog="pufferfish", description=''' PufferFish - HMM-based approach(es) to finding and analyzing developmentally regulated amplicons (genomic sites that are programmed to increase in copy number over time).''',
formatter_class=argparse.RawTextHelpFormatter)#ArgumentDefaultsHelpFormatter
parser.add_argument('-v', '--version', help='''Installed pufferfish version.''',
action='version',
version='%(prog)s ' + pfv)#str(pufferfish.version.__version__))
subparsers = parser.add_subparsers(title='[sub-commands]', dest='command',
parser_class=ArgumentParserWithDefaults)
## Create a sub-command parser for mapreads
parser_mapreads = subparsers.add_parser('mapreads',
help='''Depends on Bowtie2 and SAMtools.
Maps fasta/fastq files to genome (can provide bt2 index or fasta reference (which will first be converted to bt2 index).
Maps reads to genome and filters out unmapped reads before sorting and indexing.''')
parser_mapreads.add_argument('fastxfiles', metavar='fastxfiles', nargs='+', type=str,
help='''Paths to as many fasta/fastq files as youd like to map. Can be gz or bz2.''')
parser_mapreads_reftype = parser_mapreads.add_mutually_exclusive_group(required=True)
parser_mapreads_reftype.add_argument('-b', '--bt2', type=str,
help='''Path to bt2 index prefix.''')
parser_mapreads_reftype.add_argument('-r', '--ref_fasta', type=str,
help='''Path to reference fasta. A bt2 index will be generated in same dir.''')
parser_mapreads.add_argument('--threads', '-p', type=int, default=1, help='''Number of parallel threads for bowtie2 to use. Default: 1.''')
parser_mapreads.add_argument('--dry', action='store_true', default=False, help='''Only writes out the commands that will be used if set.''')
parser_mapreads.set_defaults(func=run_subtool)
## ## Create a sub-command parser for filterdup
## parser_mapreads = subparsers.add_parser('filterdup',
## help='''Depends on Picard Tools 2.1.1, BEDtools, pybedtools, pysam.
##Remove optical duplicates and marks PCR duplicates.
##All PCR duplicates except K at a given site are removed.
##K is determined by a binomial calculuation using a bin size and number of reads in a given bin.
##Then any duplicates in that bin are subject to filtering down to K.
##1. Remove optical duplicates and mark PCR duplicates.
##2. Make bins
##3. Get read count in those bins
##4. For each bin, check if there are marked reads. If so, calculate K and filter.
##5. write remaining reads as you go...
##''')
## parser_filterdup.add_argument('bams', metavar='bams', nargs='+',
## type=str,
## help=''' Paths to BAM files that need duplicate filtering.''')
## parser_filterdup.add_argument('-g', '--genome', type=str,
## help='''Path to BEDtools genome file describing reference reads were mapped to.''')
## parser_filterdup.add_argument()
## parser_filterdup.add_argument('--dry', action='store_true', default=False, help='''Only writes out the commands that will be used if set.''')
##
## parser_filterdup.set_defaults(func=run_subtool)
##
## Create sub-command parser for getcov
## TODO add filterdup possibility from macs2... rm pcr dups
parser_getcov = subparsers.add_parser('getcov',
help=''' Depends on SAMtools and BEDtools.''')
parser_getcov.add_argument('bams', metavar='bams', nargs='+',
type=str,
help=''' Paths to as many bam files as you need to get coverage for.
Can include a file-of-filenames (FOFN) and tarballs as well.''')
parser_getcov.add_argument('-f','--filterdup', type=str,
help='''Provide /path/to/picard.jar (picard v2.1.1 or higher)''')
parser_getcov.add_argument('-g', '--genome', type=str, required=True,
help='''Path to file.genome as needed and defined by BEDTools. See "bedtools makewindows" or "bedtools coverage"''')
parser_getcov.add_argument('-w', '--window', type=str, default='500',
help='''Integer window size - will be counting in windows of this size. Default: 500.''')
parser_getcov.add_argument('-s', '--step', type=str, default='500',
help='''Integer step size - will slide window over this much. Default: 500.''')
parser_getcov.add_argument('-Q', '--mapq', type=str, default='0',
help='''Integer mapq cut-off - only include reads with mapping quality >= INT. Default: 0.''')
parser_getcov.add_argument('-m', '--picardmem', type=str, default='4g',
help='''Provide memory needed/available to Picard MarkDuplicates as integer_letter string, such as 500m, 1g, 2g, 64g, etc. Default: 4g.''')
parser_getcov.add_argument('--keepopt',action='store_true', default=False, help='''Optical duplicates are removed by default. This flag says to mark them instead.''')
parser_getcov.add_argument('--rmdup',action='store_true', default=False, help='''PCR duplicates are marked by default. This flag will result in removing them (as well as optical duplicates).''')
parser_getcov.add_argument('--dry',action='store_true', default=False, help='''Only writes out the commands that will be used if set.''')
parser_getcov.add_argument('--clean',action='store_true',default=False,help='''Remove intermediate files... Default: False.''')
parser_getcov.add_argument('--force',action='store_true',default=False,help='''Ignore assertions. Good for made-up filenames when debugging in dry-runs. Do not use this for real run. Default: False.''')
parser_getcov.set_defaults(func=run_subtool)
## Create sub-command parser for findpuffs
parser_findpuffs = subparsers.add_parser('findpuffs',
help='''Take in getcov bedGraphs, do stuff.''')
parser_findpuffs_input = parser_findpuffs.add_mutually_exclusive_group(required=True)
parser_findpuffs_input.add_argument('-i','--input', type=str,
help='''Input file -- a tab-sep file with 2 columns (stage number, filename) with a single line for all getcov bedGraph files you wish to include.
Example:
1\tstage1.bedGraph''')
parser_findpuffs_input.add_argument('-ip','--inpickle', type=str,
help='''Pickle file (e.g. data.pk) containing already processed getcov bedGraphs as MultiCovBed object.''')
parser_findpuffs.add_argument('-op','--outpickle', type=str, default='data.fp.pk',
help='''Name for output pickle file (e.g. data.fp.pk.gz) that will contain the MultiCovBed object made when this is run.
Pickled data is automatically gzipped. If .gz not at end of given filename, it will be added.
If the filename exists it will be erased and written over.
Default: data.fp.pk.gz''')
parser_findpuffs.add_argument('-s1', '--smoothbeforemedian', default=False, type=int,
help='''Smooth counts in bins before finding the median bin counts (and before median normalization).
Must provide integer window size to smooth in (should be longer than bin size)
-- e.g. if bin size = 500, smoothing bandwidth could be 10000.
One probably should not do both --smoothbeforemedian and --smoothbeforecor. Pick one or none.''')
parser_findpuffs.add_argument('-s2', '--smoothbeforecor', default=False, type=int,
help='''Smooth counts in bins after median normalization, but before finding correlations in each bin.
Must provide integer window size to smooth in (should be longer than bin size)
-- e.g. if bin size = 500, smoothing bandwidth could be 10000.
One probably should not do both --smoothbeforemedian and --smoothbeforecor. Pick one or none.''')
parser_findpuffs.add_argument('-bw', '--corsmoothbandwidth', type=int, default=15000,
help='''For smoothing correlation scores. Provide integer window size to smooth in (should be longer than bin size)
-- e.g. if bin size = 500, smoothing bandwidth could be 10000. Default: 15000.''')
parser_findpuffs.add_argument('-mep', '--max_empty_bin_pct', type=float, default=0.4,
help='''For filtering contigs out, contig is allowed to have up to X (proportion between 0 and 1) bins with 0 coverage. Default: 0.4.''')
parser_findpuffs.add_argument('-mop', '--max_offending_samples_pct', type=float, default=0.4,
help='''For filtering contigs out, contig is allowed to exceed max_empty_bin_pct in Y (proportion between 0 and 1) of the samples. Default: 0.4.''')
parser_findpuffs.set_defaults(func=run_subtool)
## Create sub-command for dump
parser_dump = subparsers.add_parser('dump',
help=''' Take in pickled object containing MultiCovBed object where statepath has been found.
Output bedGraph of statepath and/or BED file containing coordinates of states.''')
parser_dump.add_argument('-ip', '--inpickle', type=str, required=True,
help='''Path to input pickle.''')
parser_dump.add_argument('-p','--prefix', type=str, required=True,
help='''Output files with provided --prefix''')
parser_dump.add_argument('-c', '--counts', default=False, action='store_true',
help='''Output counts as expanded single-step, bedGraph-like file. Counts will be normalized.
Columns 4+ will have counts from files in order files were given.
Can use this with awk to create bedGraphs for each -- e.g. for i in {4..8}; do awk -v "i=$i" 'OFS="\t" {print $1,$2,$3,$i}' q30.w500.s500.default.counts.bedGraph > q30.w500.s500.default.counts.$i.bedGraph; done ''')
parser_dump.add_argument('-fc', '--filtered_counts', default=False, action='store_true',
help='''Output counts from filtered contigs as expanded single-step, bedGraph-like file. Counts will likely NOT be normalized (as filtereing is done prior to median normalization).
Columns 4+ will have counts from files in order files were given.
Can use this with awk to create bedGraphs for each -- e.g. awk 'OFS="\t" {print $1,$2,$3,$4}' filtered_counts.bedGraph > file.filtered_counts.bedGraph ''')
parser_dump.add_argument('-vc','--viterbi_collapsed', default=False, action='store_true',
help='''Output viterbi statepath expanded single-step bedGraph with provided --prefix''')
parser_dump.add_argument('-ve','--viterbi_expanded', default=False, action='store_true',
help='''Output viterbi statepath collapsed varstep bedGraph with provided --prefix''')
parser_dump.add_argument('-cc','--correlations_collapsed', default=False, action='store_true',
help='''Output bin correlation scores as collapsed varstep bedGraph with provided --prefix''')
parser_dump.add_argument('-ce','--correlations_expanded', default=False, action='store_true',
help='''Output bin correlation scores as expanded single-step bedGraph with provided --prefix''')
parser_dump.add_argument('-scc','--smoothed_correlations_collapsed', default=False, action='store_true',
help='''Output smoothed bin correlation scores as collapsed varstep bedGraph with provided --prefix''')
parser_dump.add_argument('-sce','--smoothed_correlations_expanded', default=False, action='store_true',
help='''Output smoothed bin correlation scores as expanded single-step bedGraph with provided --prefix''')
parser_dump.add_argument('-sc','--slopes_collapsed', default=False, action='store_true',
help='''Output bin slopes as collapsed varstep bedGraph with provided --prefix''')
parser_dump.add_argument('-se','--slopes_expanded', default=False, action='store_true',
help='''Output bin slopes as expanded single-step bedGraph with provided --prefix''')
parser_dump.set_defaults(func=run_subtool)
## create sub-sommand for puffcn (puff copy number)
parser_puffcn = subparsers.add_parser('puffcn',
help = '''Given a latest-stage sample (where all or most puffs have grown) and an optional earliest stage sample
(for additional Fold-enrichment normalization), define whether a region is best explained by cn=1,2,4,8,16,32,64.
Ideally, this can give an idea of where replication forks approximately reach from each firing.''')
parser_puffcn.add_argument('-l','--latestage', type=str, required=True,
help='''Provide path to bedGraph (e.g. made from getcov) for a late stage sample.''')
parser_puffcn.add_argument('-e','--earlystage', type=str, required=False, default=False,
help=''' Optional: Provide path to bedGraph (e.g. made from getcov) for an early stage sample. This is used after smoothing and median normalization to further normalize the late-stage sample (e.g. can correct for sequencing biases)''')
parser_puffcn.add_argument('--replace', action='store_true', default=False,
help='''Turn on "replace" functionality. By default this will replace '.' in the count column of bedGraphs with '0'.
Use --replace_with and --replace_this to change.''')
parser_puffcn.add_argument('--replace_this', type=str, default='.',
help='''Used with --replace. Specify the character in count column to replace. Default = '.' ''')
parser_puffcn.add_argument('--replace_with', type=str, default='0',
help='''Used with --replace. Specify the character to replace the --replace_this character with.
Must be a string that can be converted to a float. Default = '0' ''')
## PROTOCOLS
parser_puffcn_protocol = parser_puffcn.add_mutually_exclusive_group(required=True)
parser_puffcn_protocol.add_argument('-1', '--protocol1', action='store_true', default=False,
help='''Late stage (and early stage if present) bin counts are median normalized.
Then late stage is normalized to early stage if available.
Then the HMM is run.''')
parser_puffcn_protocol.add_argument('-2', '--protocol2', action='store_true', default=False,
help='''Late stage (and early stage if present) bin counts are first smoothed with bandwidth given by --bandwidth.
Then they are median normalized.
Then late stage is normalized to early stage if available.
Then the HMM is run.''')
parser_puffcn_protocol.add_argument('-3', '--protocol3', action='store_true', default=False,
help='''Late stage (and early stage if present) bin counts are first median normalized.
Then they are smoothed with bandwidth given by --bandwidth.
Then late stage is normalized to early stage if available.
Then the HMM is run.
Note: if early is not present, this is same as protocol 4.''')
parser_puffcn_protocol.add_argument('-4', '--protocol4', action='store_true', default=False,
help='''Late stage (and early stage if present) bin counts are first median normalized.
Then late stage is normalized to early stage if available.
Then late/early is smoothed with bandwidth given by --bandwidth.
Then the HMM is run.
Note: if early is not present, this is same as protocol 3.''')
parser_puffcn_protocol.add_argument('-5', '--protocol5', action='store_true', default=False,
help='''Late stage is normalized to early stage if available.
Then late/early is smoothed with bandwidth given by --bandwidth. (i.e. L/E -> smooth)
Then the HMM is run.
Note: if early is not present, this is same as protocol 6.''')
parser_puffcn_protocol.add_argument('-6', '--protocol6', action='store_true', default=False,
help='''Late stage (and early stage if present) bin counts are first smoothed with bandwidth given by --bandwidth.
Then late stage is normalized to early stage if available. (i.e. smooth -> L/E)
Then the HMM is run.
Note: if early is not present, this is same as protocol 5.''')
parser_puffcn_protocol.add_argument('-7', '--protocol7', action='store_true', default=False,
help='''Late stage is normalized to early stage if available.
Then the HMM is run.
Note: No median normalization or smoothing is performed. If only late is given, then this is just an identity/pass-through function.''')
parser_puffcn_protocol.add_argument('-8', '--protocol8', action='store_true', default=False,
help='''SKEW. Only accepts one file (e.g. latestage).
For file with N rows, returns N-1 rows.
For each value, V[i], in 4th column, for i in 2:N, it computes Skew = (V[i]-V[i-1]) / (V[i]+V[i-1])''')
parser_puffcn_protocol.add_argument('-9', '--protocol9', action='store_true', default=False,
help='''PERCENT CHANGE. Only accepts one file (e.g. latestage).
For file with N rows, returns N-1 rows.
For each value, V[i], in 4th column, for i in 2:N, it computes Skew = 100*(V[i]-V[i-1]) / V[i-1]''')
parser_puffcn_protocol.add_argument('-10', '--protocol10', action='store_true', default=False,
help='''SKEW CHANGE or SKEW DERIVATIVE. Only accepts one file (e.g. latestage).
For file with N rows, returns N-1 rows.
For each value, V[i], in 4th column, for i in 2:N, it computes Skew = 100*(V[i]-V[i-1]) / V[i-1]''')
parser_puffcn_protocol.add_argument('-11', '--protocol11', action='store_true', default=False,
help='''PERCENT CHANGE DERIVATIVE. Only accepts one file (e.g. latestage).
For file with N rows, returns N-1 rows.
For each value, V[i], in 4th column, for i in 2:N, it computes Skew = 100*(V[i]-V[i-1]) / V[i-1]''')
parser_puffcn_protocol.add_argument('-12', '--protocol12', action='store_true', default=False,
help=''' Median ratio normalization. Late is normalized to early. Then those ratios are median normalized.
This is similar to a global version of what's used in DEseq2 (or TMM for EdgeR).''')
parser_puffcn_protocol.add_argument('-13', '--protocol13', action='store_true', default=False,
help=''' Median ratio normalization with pre-smoothing. Late (and early if present) is smoothed. Late is normalized to early. Then those ratios are median normalized.
This is similar to a global version of what's used in DEseq2 (or TMM for EdgeR).''')
parser_puffcn_protocol.add_argument('-14', '--protocol14', action='store_true', default=False,
help=''' Median ratio normalization with post-smoothing. Late is normalized to early. Then those ratios are smoothed. Then median normalized.
This is similar to a global version of what's used in DEseq2 (or TMM for EdgeR).''')
parser_puffcn_protocol.add_argument('-15', '--protocol15', action='store_true', default=False,
help=''' Median ratio normalization with end-smoothing. Late is normalized to early. Then those ratios are median normalized. Then smoothed.
This is similar to a global version of what's used in DEseq2 (or TMM for EdgeR).''')
parser_puffcn_protocol.add_argument('-16', '--protocol16', action='store_true', default=False,
help=''' Glocal Median ratio normalization. Late is normalized to early. Then those ratios are median normalized based on coverage value in late.
This is similar to a coverage-based local version of what's used in DEseq2 (or TMM for EdgeR).''')
parser_puffcn_protocol.add_argument('-17', '--protocol17', action='store_true', default=False,
help=''' First normalizes late stage to median (X/Med), and scales it with --scalecov to make the median equal scalecov.
It does the same to early if present.
Then Late is normalized to Early (FE) using given pseudocount (not pseudocount given has same ratio to median in both samples).
It then does median ratio normalization.
It then returns it as is or logged if --log10 or --log2 specified''')
parser_puffcn_protocol.add_argument('-18', '--protocol18', action='store_true', default=False,
help='''Robust Z scores. If early (control) sample given, first is control norm (late/early, test/control) followed by robust Z. The fold-change calculation is not treated specially as it might be with other options. However, the robust Z should be the same or similar even if the t/c was not then median ratio normalized, for example, since that is just a scaling factor.''')
parser_puffcn_protocol.add_argument('-19', '--protocol19', action='store_true', default=False,
help='''Rank scores. If early (control) sample given, first is control norm (late/early, test/control) followed by ranking. The fold-change calculation is not treated specially as it might be with other options. However, the rank should be the same or similar even if the t/c was not then median ratio normalized, for example, since that is just a scaling factor.''')
parser_puffcn_protocol.add_argument('-20', '--protocol20', action='store_true', default=False,
help='''Robust Z score differences between two samples. Requires both late (test) and early (control) samples. Robust Z scores are calculated independently for each sample. Then R_control is subtracted from R_test = R_t - R_c.''')
parser_puffcn_protocol.add_argument('-21', '--protocol21', action='store_true', default=False,
help='''Rank score differences between two samples. Requires both late (test) and early (control) samples. Rank scores are calculated independently for each sample. Then R_control is subtracted from R_test = R_t - R_c.''')
parser_puffcn_protocol.add_argument('-22', '--protocol22', action='store_true', default=False,
help='''Signal Per Million Reads (Or Counts or Per Million whatever can be summed up in the 4th column). Bin_spmr = 1e6*Bin/Sum(Bins).
Use --SPXR to change scaling factor from 1e6 to X. If early (control) given, both are SPMR'd independently, then late/early (test/control).
When an early (control) sample is provided, you may also want to check the default pseudocount applied.''')
parser_puffcn_protocol.add_argument('-23', '--protocol23', action='store_true', default=False,
help='''Rank standardize scores. First rank, then subtract and divide by middle: (r-M)/M, where r is a bins rank, and M is the theoretical middle rank: M=(min+max)/2. If early (control) sample given, first is control norm (late/early, test/control) followed by ranking. The fold-change calculation is not treated specially as it might be with other options. However, the rank should be the same or similar even if the t/c was not then median ratio normalized, for example, since that is just a scaling factor.''')
parser_puffcn.add_argument('--stringcols', action='store_true', default=False,
help='''Just treat columns other than 4 as strings...''')
parser_puffcn.add_argument('--log2', action='store_true', default=False,
help='''Return log2 values. Default = False.''')
parser_puffcn.add_argument('--log10', action='store_true', default=False,
help='''Return log10 values. Default = False.''')
parser_puffcn.add_argument('--scalecov', type=float, default=1,
help='''Multiply coverage by this as part of protocol 17.''')
parser_puffcn.add_argument('--SPXR', type=float, default=1e6,
help='''In essence, this is like --scalecov with a different default: 1e6.''')
parser_puffcn.add_argument('--pseudoZeroBins', action='store_true', default=False,
help='''Not to be confused with --pseudo. This option applies only to protocols 24-27 right now. It only need be used when there are zeros in the control (early) sample. In protocols 26 and 27, this is likely to happen from the robust z-score pre-processing. If an error is thrown, try --pseudoZeroBins or --addMinOtherPlusOneToBoth. --pseudoZeroBins adds min(abs(values))/10 to bins in both samples that were 0 in contorl (early). --addMinOtherPlusOneToBoth shifts both distributions up by min(control values)+1, setting the min control value to 1. These are not meant to be used together, but won't throw an error if they are.''')
parser_puffcn.add_argument('--addMinOtherPlusOneToBoth', action='store_true', default=False,
help='''This option applies only to protocols 24-27 right now. It only need be used when there are zeros in the control (early) sample. In protocols 26 and 27, this is likely to happen from the robust z-score pre-processing. If an error is thrown, try --pseudoZeroBins or --addMinOtherPlusOneToBoth. --pseudoZeroBins adds min(abs(values))/10 to bins in both samples that were 0 in contorl (early). --addMinOtherPlusOneToBoth shifts both distributions up by min(control values)+1, setting the min control value to 1. These are not meant to be used together, but won't throw an error if they are.''')
parser_puffcn.add_argument('-m', '--emodel', type=str, default='normal',
help='''Specify emissions model to assume for HMM. Options: normal, exponential, poisson, geometric, gamma, and discrete. Default: normal.
Note that all you ever need to do is given the expected means and standard deviations for each state from sampled data.
The normal model will use those directly. Poisson will use the means as lambda. Exponential will use the means as B w/ rate 1/B. Geometric will also use the means as 1/mu.
Gamma will estimate alpha and beta (shape and scale) parameters from the means and standard deviations - if you have A/B in mind, convert to mu and sigma by A*B and (A*B^2)^0.5.
Note that the exponential is same as the gamma when shape is set to 1, making mu and sigma equal B. Thus, if you do gamma w/ muscale=1, then you should get same as exponential.
"Discrete" is when you have a finite number of categories that occur at various frequencies like the sides of a coin or dice. This expects a nState X nSymbol matrix in --mu.
"Discrete" assumes the symbols emitted are sequential integers from 1:N where N = number states.''')
parser_puffcn.add_argument('-p', '--path', type=str, default='viterbi',
help='''Specify whether to take state path defined by viterbi or posterior decoding. Options: viterbi, posterior. Default: viterbi.''')
parser_puffcn.add_argument('-s', '--scale', action='store_true', default=False,
help='''Before going back into the HMM, re-scale counts back towards original magnitude by multiplying by median.
This will also scale the HMM state means by the median to cooperate.''')
parser_puffcn.add_argument('-c', '--collapsed', action='store_true', default=False,
help='''Return collapsed variable-step bedGraph instead of expanded single-step bedGraph.
This is often a much smaller file.''')
parser_puffcn.add_argument('-ps', '--pseudo', type=float, default=0.1,
help=''' Before normalizing late to early, add this pseudocount to all counts in order to avoid division by zero.
Should be between 0 and 1.
Should be small enough to not change other values much,
but big enough such that numbers divided by 0+pseudo do not become massive.
Default: 0.1.''')
parser_puffcn.add_argument('-bw', '--bandwidth', type=int, default=2500,
help=''' If kernel smoothing, specify bandwidth (int).
Bandwidth should be bigger when no early stage normalization to try to smooth out sequencing biases, mappability biases, etc.
Default: 2500.''')
parser_puffcn.add_argument('--endsmoothing', action='store_true', default=False,
help=''' Add smoothing to the absolute end of any of the protocols for more flexibility here. This comes after log-transformation steps, for example, which optionally comes at the end of any protocol.''')
parser_puffcn.add_argument('--replaceNaN', action='store_true', default=False,
help=''' If kernel smoothing, NaNs can be generated. This option replaces those with local averages (see --localwinsize, default=5 bins). In cases where localaverages return NaN (very rare), it fills NaN with the global average for the given chrom/sequence (not whole geneome, so still local-ish).''')
parser_puffcn.add_argument('--localwinsize', type=int, default=5,
help=''' If kernel smoothing and/or using --replaceNan, this specifies the number of bins to use (centered on this so odd numbers preferable). Default = 5.''')
parser_puffcn.add_argument('--impute', type=int, default=False,
help=''' If imputing, specify bandwidth (int) for kernel smoothing.
This bandwidth is generally longer than the one you would provide for regular smoothing.
Only bins with a count of 0 will take on smoothed (imputed) values.
Try: 10000.
NOTE: In practice, this lead to its own set of problems and I do not recommend using it in its current form.''')
parser_puffcn.add_argument('--counts', type=str, default=False,
help=''' Use this flag and specify an output prefix for the final normalized late stage bin counts bedGraph.''')
parser_puffcn.set_defaults(func=run_subtool)
parser_puffcn.add_argument('--levels', action='store_true', default=False,
help=''' Use this flag to output levels bdg instead of state bdg.
Levels are the means 1,2,4,8,16,32,64.
These are obtained by 2**(state-1).''')
parser_puffcn.add_argument('--mu', '--discreteEmat', type=str, default='1,2,4,8,16,32,64',
help=''' PuffCN has been optimized for mapping DNA puffs in the fungus fly.
The default state means were previously hard-coded.
This option allows some flexibility from the command-line to change the state means.
Default: 1,2,4,8,16,32,64
To change: Provide comma-seprated list of state means.
The number of states will be calculated from this list.
If changing state sigmas (used in normal model), it must have same number of states represented.
NOTE: If using exponential or geometric distribution, provide the expected mean RCN values of the states
as you would for normal or poisson models. This script will automatically take their inverses to work
in the exponential and geometric models.
NOTE2: For "--emodel discrete" can call --mu as --discreteEmat for better-readability at commandline.
Instead of a comma-sep list of means, provide comma-/semicolon-separated values to make up a nState X nSymbol matrix.
Example of a 3-state x 4 symbol matrix: "0.97,0.01,0.01,0.01;0.01,0.97,0.01,0.01;0.01,0.01,0.01,0.97".
That can be thought of as 3 4-sided dice.''')
parser_puffcn.add_argument('--sigma', type=str, default=None,
help=''' PuffCN has been optimized for mapping DNA puffs in the fungus fly.
The default state sigmas (stdevs) were previously hard-coded.
This option allows some flexibility from the command-line to change the state sigmas.
Default: if not changed, defaults to square root of state means (Poisson-like).
To change: Provide comma-seprated list of state sigmas.
Alternatively: Use --mu_scale (default False) with a scaling factor multiplied against the MUs.
The number of states is calculated from this state mean list, which defaults to 7.
If changing state sigmas (used in normal model), it must have same number of states represented as state means.''')
parser_puffcn.add_argument('--mu_scale', type=float, default=None,
help=''' See --sigma for more details on sigmas.
Use this to scale means (--mu) to use as stdevs (sigma) instead of taking square roots of means.
For example, --mu_scale 0.5 will use mu*0.5 as the stdev.''')
## parser_puffcn.add_argument('--changestate', type=float, default=0.001,
## help=''' PuffCN has been optimized for mapping DNA puffs in the fungus fly.
##The default transition probabilities were previously hard-coded.
##For now, there are two parameters for transition probabilities: changing states or staying in a state.
##They are shard by all states.
##This option allows some flexibility from the command-line to change the probability of changing states.
##Default: 0.001.
##NOTE: To ensure transition probabilities from state i to j sum to 1,
## the final transition probabilities will actually be X/sum(all trans i to j),
## where X is 0.001 by default or what user has given.
##''')
##
## parser_puffcn.add_argument('--samestate', type=float, default=0.999,
## help=''' PuffCN has been optimized for mapping DNA puffs in the fungus fly.
##The default transition probabilities were previously hard-coded.
##For now, there are two parameters for transition probabilities: changing states or staying in a state.
##They are shard by all states.
##This option allows some flexibility from the command-line to change the probability of staying in the current state (not changing).
##Default: 0.999.
##NOTE: To ensure transition probabilities from state i to j sum to 1,
## the final transition probabilities will actually be X/sum(all trans i to j),
## where X is 0.999 by default or what user has given.
##''')
parser_puffcn.add_argument('--special_idx', type=int, default=0,
help='''Only for use if you're very familiar with the program (and change defaults).
The default state means is 1,2,4,8,16,32,64.
The default index for the mean that represents copy number 1 is 0.
In this lingo - CN=1 is the special state, and the 0-based index of the special state in that list is 0.
If you were to change parameters that affect where the special state is in the list, make sure to change this index.
This index is only used to help construct initial probabilities and transition probabilies.
If understood, it can be used to designate any single special state (not necessarily the one that corresponds to CN=1).
The other parameters to use with this are:
--init_special (probability of starting in the special state (usually CN=1).
The probabity of starting in a another state (usually copy number variant states) defaults to (1-init_special)/(nstates-1).
--prob_leave_special
--prob_stay_special
--prob_other_to_special
--prob_other_to_other
--prob_other_to_self
Alternative to, an initial probability vector can be given with --initialprobs
''')
parser_puffcn.add_argument('--init_special', type=float, default=0.997,
help='''Probability of starting in the 'special state' (usually copy number = 1). Default: 0.997.
The probabity of starting in a another state (usually copy number variant states) defaults to (1-init_special)/(nstates-1).
''')
parser_puffcn.add_argument('--leave_special_state', type=float, default=0.001,
help='''Probability of leaving the 'special state' (usually copy number = 1).
Default: 0.001.
If number is betwen 0 and 1, it will be assumed a probability.
If number given is > 1, then it will be treated as the average length (number of bins) of the special state.
For example, if 1000 is given, it will be 1/1000 = 0.001.
In terms of bp lengths, one would need to multiply n_bins * bin_length OR divide bp length by bin_length
Thus, if you want to see a change every 500 kb w/ 500 bp bins, then 500kb/500 = 1 kb = 1000 -- which will be interpreted as 0.001.
Or as another example, if you expect to see a change every 2 Mb with 100 bp bins, then 2e6/1e2 = 1e4 = 10 kb = 10000, interpreted as 0.0001.
The probability of staying in this state is the complement: 1-p
''')
parser_puffcn.add_argument('--leave_other', type=str, default=None,
help='''Probability of leaving one of the other states.
This defaults to --leave_special_state making all transition probabilities out of states the same (0.001 by default).
To change, provide a probability of leaving (p).
If the first number is betwen 0 and 1, it will be assumed a probability.
If the first number given is > 1, then it will be treated as the average length (number of bins).
For example, if 1000 is given, it will be 1/1000 = 0.001.
If only 1 number is given, then that is assumed to be the probability of transitioning to all the other states.
You can also give a comma-separated pair of 2 probabilities:
prob of leaving to special state
prob of leaving to another 'non-special' state.
Make sure the probabilities sum to what you expect the overall probability of leaving the state is...
which should be p_to_special + p_to_nonspecial * (num_non_special-1) = p_to_special + p_to_nonspecial (nstates-2)
For example, in a 7-state model:
0.001,0.0002 --> 0.001 + 0.0002 * 5 = 0.001 + 0.001 = 0.002
OR
0.001,0.001 --> 0.001 + 0.001 * 5 = 0.006
If the second number is > 1, the same rules apply as to the first number.
For other analyses, I've used:
0.00001,0.000000000001
OR
0.001,0.0000000001
The probability of staying in these states is the complement: 1-p1-p2
NOTE: the program forces the transition probabilities of a given state to sum to 1.
''')
parser_puffcn.add_argument('--initialprobs', type=str, default=None,
help='''PuffCN has been optimized for mapping DNA puffs in the fungus fly.
The default state means were previously hard-coded.
This option allows some flexibility from the command-line to change the state means.
Default: [0.997, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005]
The default will change with more or less states described w/ --mu and --sigma.
By default, the first state will start out as 0.997 as above, all other states will be (1-0.997)/n_other_states.
That behavior also changes with following parameters:
--special_idx -- determines which state (not necessarily first) will be given default 0.997 (OR other with --initcn1)
--init_special (probability of starting in the special state (usually CN=1).
The probabity of starting in a another state (usually copy number variant states) defaults to (1-init_special)/(nstates-1).
--leave_special_state
--prob_other_to_special
--prob_other_to_other
--prob_other_to_self
To change the initial probs manually: Provide comma-separated list of initial probs -e.g.: '0.997,0.0005,0.0005,0.0005,0.0005,0.0005,0.0005'
This must have same number of states represented as state means (--mu; default 7).
''')
parser_puffcn.add_argument('--transprobs', type=str, default=None,
help='''Provide trans probs with semi-colon separated rows that have comma-separated values.
E.g. a 2-state t matrix where self-self is 0.99 and self-other is 0.01 looks like: 0.99,0.01;0.01,0.99
''')
##'0.997,0.0005,0.0005,0.0005,0.0005,0.0005,0.0005' [0.997, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005]
parser_puffcn.add_argument('--kmeans', type=int, default=None,
help='''PuffCN has been optimized for mapping DNA puffs in the fungus fly.
The default HMM parameters were previously hard-coded.
There are now other options available to tune the paramters.
This option is the first way to learn parameters.
It uses kmeans clustering of the data to estimate initial, transition, and emission probs.
For now, you need to make an assumption about k by providing an integer.
This model probably works best when you expect 2-3 states...
This option over-rides all other parameter options (which will be ignored).
''')
parser_puffcn.add_argument('--iters', type=int, default=1,
help='''Number of iterations to run for updating parameters. Default: 1 (no updates).
''')
parser_puffcn.add_argument('--converge', type=float, default=1e-9,
help='''When multiple iterations, stop iterating if difference between the log likelihood of the current state path is less than this much different than previous. Default = 1e-9.
''')
parser_puffcn.add_argument('--learnpseudo', type=float, default=1e-323,
help='''When learning state transitions from previous state path, zero counts can throw a wrench into the spokes.
This prevents that. Can be 1e-323 to Inf, but recommend <= 1.
Default pseudocount is basically the lowest non-zero number possible: 1e-323..
''')
parser_puffcn.add_argument('--emitpseudo', type=float, default=1e-7,
help='''Specifically for --model discrete.
When learning emission fequencies from previous state path, zero counts can throw a wrench into the spokes.
This prevents that. Can be 1e-323 to Inf, but recommend <= 1.
Default pseudocount: 1e-7 (1 in 10 million).
''')
parser_puffcn.add_argument('--constrainEmit', action='store_true', default=False,
help='''When iterating, do not update the emission probabilities: constrain them to what was given to initialize.
''')
parser_puffcn.add_argument('--outpfx', type=str, default=None,
help='''Prefix for output. If not used, all output goes to stdout.
This is particularly useful when iters > 1, which outputs the statepath from each round.
If an out prefix is not specified and iters > 1, you will be warned/reminded in a stderr message that can be ignored if purposeful.
''')
parser_puffcn.set_defaults(func=run_subtool)
## create sub-command for summits
parser_summits = subparsers.add_parser('summits',
help=''' Find summits...''')
parser_summits.add_argument('-l','--latestage', type=str, required=True,
help='''Provide path to bedGraph (e.g. made from getcov) for a late stage sample.''')
parser_summits.add_argument('-e','--earlystage', type=str, required=False, default=False,
help=''' Optional: Provide path to bedGraph (e.g. made from getcov) for an early stage sample. This is used after smoothing and median normalization to further normalize the late-stage sample (e.g. can correct for sequencing biases)''')
parser_summits.add_argument('--replace', action='store_true', default=False,
help='''Turn on "replace" functionality. By default this will replace '.' in the count column of bedGraphs with '0'.
Use --replace_with and --replace_this to change.''')
parser_summits.add_argument('--replace_this', type=str, default='.',
help='''Used with --replace. Specify the character in count column to replace. Default = '.' ''')
parser_summits.add_argument('--replace_with', type=str, default='0',
help='''Used with --replace. Specify the character to replace the --replace_this character with.
Must be a string that can be converted to a float. Default = '0' ''')
parser_summits_protocol = parser_summits.add_mutually_exclusive_group(required=True)
parser_summits_protocol.add_argument('-1', '--protocol1', action='store_true', default=False,
help='''Late stage (and early stage if present) bin counts are median normalized.
Then late stage is normalized to early stage if available.
Then the HMM is run.''')
parser_summits_protocol.add_argument('-2', '--protocol2', action='store_true', default=False,
help='''Late stage (and early stage if present) bin counts are first smoothed with bandwidth given by --bandwidth.
Then they are median normalized.
Then late stage is normalized to early stage if available.
Then the HMM is run.''')
parser_summits_protocol.add_argument('-3', '--protocol3', action='store_true', default=False,
help='''Late stage (and early stage if present) bin counts are first median normalized.
Then they are smoothed with bandwidth given by --bandwidth.
Then late stage is normalized to early stage if available.
Then the HMM is run.
Note: if early is not present, this is same as protocol 4.''')
parser_summits_protocol.add_argument('-4', '--protocol4', action='store_true', default=False,
help='''Late stage (and early stage if present) bin counts are first median normalized.
Then late stage is normalized to early stage if available.
Then late/early is smoothed with bandwidth given by --bandwidth.
Then the HMM is run.
Note: if early is not present, this is same as protocol 3.''')
parser_summits_protocol.add_argument('-5', '--protocol5', action='store_true', default=False,
help='''Late stage is normalized to early stage if available.
Then late/early is smoothed with bandwidth given by --bandwidth. (i.e. L/E -> smooth)
Then the HMM is run.
Note: if early is not present, this is same as protocol 6.''')
parser_summits_protocol.add_argument('-6', '--protocol6', action='store_true', default=False,
help='''Late stage (and early stage if present) bin counts are first smoothed with bandwidth given by --bandwidth.
Then late stage is normalized to early stage if available. (i.e. smooth -> L/E)
Then the HMM is run.
Note: if early is not present, this is same as protocol 5.''')
parser_summits_protocol.add_argument('-7', '--protocol7', action='store_true', default=False,
help='''Late stage is normalized to early stage if available.
Note: No median normalization or smoothing is performed. If only late is given, then this is just an identity/pass-through function.''')
parser_summits_protocol.add_argument('-8', '--protocol8', action='store_true', default=False,
help='''SKEW. Only accepts one file (e.g. latestage).
For file with N rows, returns N-1 rows.
For each value, V[i], in 4th column, for i in 2:N, it computes Skew = (V[i]-V[i-1]) / (V[i]+V[i-1])''')
parser_summits_protocol.add_argument('-9', '--protocol9', action='store_true', default=False,
help='''PERCENT CHANGE. Only accepts one file (e.g. latestage).
For file with N rows, returns N-1 rows.
For each value, V[i], in 4th column, for i in 2:N, it computes Skew = 100*(V[i]-V[i-1]) / V[i-1]''')
parser_summits_protocol.add_argument('-10', '--protocol10', action='store_true', default=False,
help='''SKEW CHANGE or SKEW DERIVATIVE. Only accepts one file (e.g. latestage).
For file with N rows, returns N-1 rows.
For each value, V[i], in 4th column, for i in 2:N, it computes Skew = 100*(V[i]-V[i-1]) / V[i-1]''')
parser_summits_protocol.add_argument('-11', '--protocol11', action='store_true', default=False,
help='''PERCENT CHANGE DERIVATIVE. Only accepts one file (e.g. latestage).
For file with N rows, returns N-1 rows.
For each value, V[i], in 4th column, for i in 2:N, it computes Skew = 100*(V[i]-V[i-1]) / V[i-1]''')
parser_summits.add_argument('--stringcols', action='store_true', default=False,
help='''Just treat columns other than 4 as strings...''')
parser_summits.add_argument('-ps', '--pseudo', type=float, default=0.1,
help=''' Before normalizing late to early, add this pseudocount to all counts in order to avoid division by zero.
Should be between 0 and 1.
Should be small enough to not change other values much,
but big enough such that numbers divided by 0+pseudo do not become massive.
Default: 0.1.''')
parser_summits.add_argument('-bw', '--bandwidth', type=int, default=2500,
help=''' If kernel smoothing, specify bandwidth (int).
Bandwidth should be bigger when no early stage normalization to try to smooth out sequencing biases, mappability biases, etc.
Default: 2500.''')
parser_summits.add_argument('--impute', type=int, default=False,
help=''' If imputing, specify bandwidth (int) for kernel smoothing.
This bandwidth is generally longer than the one you would provide for regular smoothing.
Only bins with a count of 0 will take on smoothed (imputed) values.
Try: 10000.''')
parser_summits_regions = parser_summits.add_mutually_exclusive_group(required=True)
parser_summits_regions.add_argument('--regions', type=str, default=False,
help = ''' Find summits in these regions - provide BED file.''')
parser_summits_regions.add_argument('--states', type=str, default=False,
help=''' Provide statepath bedGraph output by "cn" sub-command. Peak regions will be found automatically.''')
parser_summits.add_argument('--thresh_state', type=int, default=1,
help=''' Used with --states. Only consider regions with states higher than state given. Default: 1.''')
parser_summits.add_argument('--merge1', type=float, default=10e3,
help = '''Used with --states. After extracting only bins with higher state value than --thresh_state, merge bins if they are with in --merge1 bp from each other. Default: 10e3.''')
parser_summits.add_argument('--minwidth', type=float, default=50e3,
help = '''After extracting bins with states > --thresh_state and merging remaining bins that are within --merge1 bp of each other,
only keep merged regions > --minwidth.''')
parser_summits.add_argument('--merge2', type=float, default=40e3,
help = '''After (i) extracting bins with states > --thresh_state, (ii) merging remaining bins that are within --merge1 bp of each other,
(iii) retaining only merged regions > --minwidth, merge regions that are within --merge2 bp of each other.''')
parser_summits.add_argument('--max_state_thresh', type=int, default=2,
help = ''' After (i) extracting bins with states > --thresh_state, (ii) merging remaining bins that are within --merge1 bp of each other,
(iii) retaining only merged regions > --minwidth, (iv) merging filtered regions that are within --merge2 bp of each other,
only retain the remaining merged regions if their maximum state is > --max_State_thresh''')
parser_summits.set_defaults(func=run_subtool)
## create sub-command for normalize
parser_normalize = subparsers.add_parser('normalize',
help = '''Given a latest-stage sample (where all or most puffs have grown) and an optional earliest stage sample
(for additional Fold-enrichment normalization), just return the late-stage sample with normalized values as specified by protocol options below.''')
parser_normalize.add_argument('-l','--latestage', type=str, required=True,
help='''Provide path to bedGraph (e.g. made from getcov) for a late stage sample.''')
parser_normalize.add_argument('-e','--earlystage', type=str, required=False, default=False,
help=''' Optional: Provide path to bedGraph (e.g. made from getcov) for an early stage sample. This is used after smoothing and median normalization to further normalize the late-stage sample (e.g. can correct for sequencing biases)''')
parser_normalize.add_argument('--replace', action='store_true', default=False,
help='''Turn on "replace" functionality. By default this will replace '.' in the count column of bedGraphs with '0'.
Use --replace_with and --replace_this to change.''')
parser_normalize.add_argument('--replace_this', type=str, default='.',
help='''Used with --replace. Specify the character in count column to replace. Default = '.' ''')
parser_normalize.add_argument('--replace_with', type=str, default='0',
help='''Used with --replace. Specify the character to replace the --replace_this character with.
Must be a string that can be converted to a float. Default = '0' ''')
parser_normalize_protocol = parser_normalize.add_mutually_exclusive_group(required=True)
parser_normalize_protocol.add_argument('-1', '--protocol1', action='store_true', default=False,
help='''Late stage (and early stage if present) bin counts are median normalized.
Then late stage is normalized to early stage if available.
Then the HMM is run.''')
parser_normalize_protocol.add_argument('-2', '--protocol2', action='store_true', default=False,
help='''Late stage (and early stage if present) bin counts are first smoothed with bandwidth given by --bandwidth.
Then they are median normalized.
Then late stage is normalized to early stage if available.
Then the HMM is run.''')
parser_normalize_protocol.add_argument('-3', '--protocol3', action='store_true', default=False,
help='''Late stage (and early stage if present) bin counts are first median normalized.
Then they are smoothed with bandwidth given by --bandwidth.
Then late stage is normalized to early stage if available.
Then the HMM is run.
Note: if early is not present, this is same as protocol 4.''')
parser_normalize_protocol.add_argument('-4', '--protocol4', action='store_true', default=False,
help='''Late stage (and early stage if present) bin counts are first median normalized.
Then late stage is normalized to early stage if available.
Then late/early is smoothed with bandwidth given by --bandwidth.
Then the HMM is run.
Note: if early is not present, this is same as protocol 3.''')
parser_normalize_protocol.add_argument('-5', '--protocol5', action='store_true', default=False,
help='''Late stage is normalized to early stage if available.
Then late/early is smoothed with bandwidth given by --bandwidth. (i.e. L/E -> smooth)
Then the HMM is run.
Note: if early is not present, this is same as protocol 6.''')
parser_normalize_protocol.add_argument('-6', '--protocol6', action='store_true', default=False,
help='''Late stage (and early stage if present) bin counts are first smoothed with bandwidth given by --bandwidth.
Then late stage is normalized to early stage if available. (i.e. smooth -> L/E)
Then the HMM is run.
Note: if early is not present, this is same as protocol 5.''')
parser_normalize_protocol.add_argument('-7', '--protocol7', action='store_true', default=False,
help='''Late stage is normalized to early stage if available.
Note: No median normalization or smoothing is performed. If only late is given, then this is just an identity/pass-through function.''')
parser_normalize_protocol.add_argument('-8', '--protocol8', action='store_true', default=False,
help='''SKEW. Only accepts one file (e.g. latestage).
For file with N rows, returns N-1 rows.
For each value, V[i], in 4th column, for i in 2:N, it computes Skew = (V[i]-V[i-1]) / (V[i]+V[i-1])''')
parser_normalize_protocol.add_argument('-9', '--protocol9', action='store_true', default=False,
help='''PERCENT CHANGE. Only accepts one file (e.g. latestage).
For file with N rows, returns N-1 rows.
For each value, V[i], in 4th column, for i in 2:N, it computes Skew = 100*(V[i]-V[i-1]) / V[i-1]''')
parser_normalize_protocol.add_argument('-10', '--protocol10', action='store_true', default=False,
help='''SKEW CHANGE or SKEW DERIVATIVE. Only accepts one file (e.g. latestage).
For file with N rows, returns N-1 rows.
For each value, V[i], in 4th column, for i in 2:N, it computes Skew = 100*(V[i]-V[i-1]) / V[i-1]''')
parser_normalize_protocol.add_argument('-11', '--protocol11', action='store_true', default=False,
help='''PERCENT CHANGE DERIVATIVE. Only accepts one file (e.g. latestage).
For file with N rows, returns N-1 rows.
For each value, V[i], in 4th column, for i in 2:N, it computes Skew = 100*(V[i]-V[i-1]) / V[i-1]''')
parser_normalize_protocol.add_argument('-12', '--protocol12', action='store_true', default=False,
help=''' Median ratio normalization. Late is normalized to early. Then those ratios are median normalized.
This is similar to a global version of what's used in DEseq2 (or TMM for EdgeR).''')
parser_normalize_protocol.add_argument('-13', '--protocol13', action='store_true', default=False,
help=''' Median ratio normalization with pre-smoothing. Late (and early if present) is smoothed. Late is normalized to early. Then those ratios are median normalized.
This is similar to a global version of what's used in DEseq2 (or TMM for EdgeR).''')
parser_normalize_protocol.add_argument('-14', '--protocol14', action='store_true', default=False,
help=''' Median ratio normalization with post-smoothing. Late is normalized to early. Then those ratios are smoothed. Then median normalized.
This is similar to a global version of what's used in DEseq2 (or TMM for EdgeR).''')
parser_normalize_protocol.add_argument('-15', '--protocol15', action='store_true', default=False,
help=''' Median ratio normalization with end-smoothing. Late is normalized to early. Then those ratios are median normalized. Then smoothed.
This is similar to a global version of what's used in DEseq2 (or TMM for EdgeR).''')
parser_normalize_protocol.add_argument('-16', '--protocol16', action='store_true', default=False,
help=''' Glocal Median ratio normalization. Late is normalized to early. Then those ratios are median normalized based on coverage value in late.
This is similar to a coverage-based local version of what's used in DEseq2 (or TMM for EdgeR).''')
parser_normalize_protocol.add_argument('-17', '--protocol17', action='store_true', default=False,
help=''' First normalizes late stage to median (X/Med), and scales it with --scalecov to make the median equal scalecov.
It does the same to early if present.
Then Late is normalized to Early (FE) using given pseudocount (not pseudocount given has same ratio to median in both samples).
It then does median ratio normalization.
It then returns it as is or logged if --log10 or --log2 specified''')
parser_normalize_protocol.add_argument('-18', '--protocol18', action='store_true', default=False,
help='''Robust Z scores. If early (control) sample given, first is control norm (late/early, test/control) followed by robust Z. The fold-change calculation is not treated specially as it might be with other options. However, the robust Z should be the same or similar even if the t/c was not then median ratio normalized, for example, since that is just a scaling factor.''')
parser_normalize_protocol.add_argument('-19', '--protocol19', action='store_true', default=False,
help='''Rank scores. If early (control) sample given, first is control norm (late/early, test/control) followed by ranking. The fold-change calculation is not treated specially as it might be with other options. However, the rank should be the same or similar even if the t/c was not then median ratio normalized, for example, since that is just a scaling factor.''')
parser_normalize_protocol.add_argument('-20', '--protocol20', action='store_true', default=False,
help='''Robust Z score differences between two samples. Requires both late (test) and early (control) samples. Robust Z scores are calculated independently for each sample. Then R_control is subtracted from R_test = R_t - R_c.''')
parser_normalize_protocol.add_argument('-21', '--protocol21', action='store_true', default=False,
help='''Rank score differences between two samples. Requires both late (test) and early (control) samples. Rank scores are calculated independently for each sample. Then R_control is subtracted from R_test = R_t - R_c.''')
parser_normalize_protocol.add_argument('-22', '--protocol22', action='store_true', default=False,
help='''Signal Per Million Reads (Or Counts or Per Million whatever can be summed up in the 4th column). Bin_spmr = 1e6*Bin/Sum(Bins).
Use --SPXR to change scaling factor from 1e6 to X. If early (control) given, both are SPMR'd independently, then late/early (test/control).
When an early (control) sample is provided, you may also want to check the default pseudocount applied.''')
parser_normalize_protocol.add_argument('-23', '--protocol23', action='store_true', default=False,
help='''Rank standardize scores. First rank, then subtract and divide by middle: (r-M)/M, where r is a bins rank, and M is the theoretical middle rank: M=(min+max)/2. If early (control) sample given, first is control norm (late/early, test/control) followed by ranking. The fold-change calculation is not treated specially as it might be with other options. However, the rank should be the same or similar even if the t/c was not then median ratio normalized, for example, since that is just a scaling factor.''')
parser_normalize_protocol.add_argument('-24', '--protocol24', action='store_true', default=False,
help='''Pct difference from early (control): 100*(T-C)/abs(C). Usually for T and C values >= 0, but abs(C) allows both pos and neg values. This requires both test (late) and control (early) samples. It assumes samples are pre-prcoessed however you want. Other options below do pre-processing before this step.''')
parser_normalize_protocol.add_argument('-25', '--protocol25', action='store_true', default=False,
help='''Pct skew of late (test) vs early (control): 100*(T-C)/(abs(T)+abs(C)). Usually for T and C values >= 0, but (abs(T)+abs(C)) is an experimental way to allow both pos and neg values. This requires both test (late) and control (early) samples. It assumes samples are pre-prcoessed however you want. Other options below do pre-processing before this step.''')
parser_normalize_protocol.add_argument('-26', '--protocol26', action='store_true', default=False,
help='''Test Pct difference from early (control) after both test and control samples are transformed into Robust Z-scores: 100*(R_t-R_c)/abs(R_c). ''')
parser_normalize_protocol.add_argument('-27', '--protocol27', action='store_true', default=False,
help='''Pct skew given late (test) and early (control) samples, after both test and control samples are transformed into Robust Z-scores: 100*(R_t-R_c)/(abs(R_t)+abs(R_c)).''')
parser_normalize_protocol.add_argument('-28', '--protocol28', action='store_true', default=False,
help='''Test Pct difference from early (control) after both test and control samples are transformed into Ranks: 100*(R_t-R_c)/abs(R_c). ''')
parser_normalize_protocol.add_argument('-29', '--protocol29', action='store_true', default=False,
help='''Pct skew given late (test) and early (control) samples, after both test and control samples are transformed into Ranks: 100*(R_t-R_c)/(abs(R_t)+abs(R_c)).''')
parser_normalize_protocol.add_argument('-30', '--protocol30', action='store_true', default=False,
help='''Median ratio normalization. Late is normalized to early. Then those ratios are locally median normalized -- phycally local in the genome, with window size controlled by --halfwidth (default 10 bins to each side).
This is similar to a what's used in DEseq2 (or TMM for EdgeR) -- on local genomic regions.
A motivation to do this is to allow for non-linear corrections of ChIP vs control over a range of copy numbers, as is seen in intrachromosomal DNA amplification for example.
The alternative is to use the global median ratio for everything, which assumes no distortion of ratios at different copy numbers.
Note that if the experiment is to map relative copy numbers compared to a control DNA sample, the GLOBAL approach is what you would want to use.
The local approach would make everything look like RCN=1 in that scenario.
The local approach is aimed at eliminating effects on the ratios due to local biases such as copy number in order to leave only peaks due to ChIP, for example.''')
parser_normalize.add_argument('--stringcols', action='store_true', default=False,
help='''Just treat columns other than 4 as strings...''')
parser_normalize.add_argument('--log2', action='store_true', default=False,
help='''Return log2 values. Default = False.''')
parser_normalize.add_argument('--log10', action='store_true', default=False,
help='''Return log10 values. Default = False.''')
parser_normalize.add_argument('--scalecov', type=float, default=1,
help='''Multiply coverage by this as part of protocol 17.''')
parser_normalize.add_argument('--SPXR', type=float, default=1e6,
help='''In essence, this is like --scalecov with a different default: 1e6.''')
parser_normalize.add_argument('--halfwidth', type=int, default=10,
help='''In local operations (only protocol30, local med ratios atm), this is how many bins on each side of a central position is used to calculate a statistic.
The total window size would therefore be:
window = LHS + position + RHS = halfwidth + 1 + halfwidth = halfwidth*2 + 1.
Default = 10.
Note that the default has different spans depending on the input bin size.
If using 500 bp bins, then 10 bins to each side equals 5 kb to each side (10.5 kb window), but just 1 kb (2.1 kb window) if using 100 bp bins.''')
parser_normalize.add_argument('--pseudoZeroBins', action='store_true', default=False,
help='''Not to be confused with --pseudo. This option applies only to protocols 24-27 right now. It only need be used when there are zeros in the control (early) sample. In protocols 26 and 27, this is likely to happen from the robust z-score pre-processing. If an error is thrown, try --pseudoZeroBins or --addMinOtherPlusOneToBoth. --pseudoZeroBins adds min(abs(nonzero control values)) to bins in both samples that were 0 in contorl (early). --addMinOtherPlusOneToBoth shifts both distributions up by min(control values)+1, setting the min control value to 1. Both use minimum values for each chrom/contig independently rather than a global min. This is intended to reduce the effects of the modifications, but may introduce its own issues. These are not meant to be used together, but won't throw an error if they are.''')
parser_normalize.add_argument('--addMinOtherPlusOneToBoth', action='store_true', default=False,
help='''This option applies only to protocols 24-27 right now. It only need be used when there are zeros in the control (early) sample. In protocols 26 and 27, this is likely to happen from the robust z-score pre-processing. If an error is thrown, try --pseudoZeroBins or --addMinOtherPlusOneToBoth. --pseudoZeroBins adds min(abs(nonzero control values)) to bins in both samples that were 0 in contorl (early). --addMinOtherPlusOneToBoth shifts both distributions up by min(control values)+1, setting the min control value to 1. These are not meant to be used together, but won't throw an error if they are. NOTE: Both use minimum values for each chrom/contig independently rather than a global min. This is intended to reduce the effects of the modifications, but may introduce its own issues. ''')
parser_normalize.add_argument('--setToControlDist', action='store_true', default=False,
help='''This option applies only to protocols 24-27 right now. It resets the contorl RZ scores back to original, and scales the test R_z in same way. Z_c = (X_c-med_c)/mad_c ; X_c = mad_c * Z_c + med_c ; X_t_c = mad_c * Z_t + med_c''')
parser_normalize.add_argument('-c', '--collapsed', action='store_true', default=False,
help='''Return collapsed variable-step bedGraph instead of expanded single-step bedGraph.
This is often a much smaller file.''')
parser_normalize.add_argument('-ps', '--pseudo', type=float, default=0.1,
help=''' Before normalizing late to early, add this pseudocount to all counts in order to avoid division by zero.
Should be between 0 and 1.
Should be small enough to not change other values much,
but big enough such that numbers divided by 0+pseudo do not become massive.
Default: 0.1.''')
parser_normalize.add_argument('-bw', '--bandwidth', type=int, default=2500,
help=''' If kernel smoothing, specify bandwidth (int).
Bandwidth should be bigger when no early stage normalization to try to smooth out sequencing biases, mappability biases, etc.
Default: 2500.''')
parser_normalize.add_argument('--endsmoothing', action='store_true', default=False,
help=''' Add smoothing to the absolute end of any of the protocols for more flexibility here. This comes after log-transformation steps, for example, which optionally comes at the end of any protocol.''')
parser_normalize.add_argument('--replaceNaN', action='store_true', default=False,
help=''' If kernel smoothing, NaNs can be generated. This option replaces those with local averages (see --localwinsize, default=5 bins). In cases where localaverages return NaN (very rare), it fills NaN with the global average for the given chrom/sequence (not whole geneome, so still local-ish).''')
parser_normalize.add_argument('--localwinsize', type=int, default=5,
help=''' If kernel smoothing and/or using --replaceNan, this specifies the number of bins to use (centered on this so odd numbers preferable). Default = 5.''')
parser_normalize.add_argument('--impute', type=int, default=False,
help=''' If imputing, specify bandwidth (int) for kernel smoothing.
This bandwidth is generally longer than the one you would provide for regular smoothing.
Only bins with a count of 0 will take on smoothed (imputed) values.
Try: 10000.''')
## parser_normalize.add_argument('--counts', type=str, default=False,
## help=''' Use this flag and specify an output prefix for the final normalized late stage bin counts bedGraph.''')
parser_normalize.set_defaults(func=run_subtool)
###########GENERATE
## create sub-sommand for generate
parser_generate = subparsers.add_parser('generate',
help = '''Generate emitted_data and statepath bedGraphs.''')
parser_generate.add_argument('-f','-b', '-i', '--bedgraph', type=str, required=True,
help='''Provide path to bedGraph that contains the intervals in first 3 columns to return with generated data.''')
parser_generate.add_argument('-m', '--emodel', type=str, default='normal',
help='''Specify emissions model to assume for HMM. Options: normal, exponential. Default: normal.''')
parser_generate.add_argument('--mu', type=str, default='1,2,4,8,16,32,64',
help=''' PuffCN has been optimized for mapping DNA puffs in the fungus fly.
The default state means were previously hard-coded.
This option allows some flexibility from the command-line to change the state means.
Default: 1,2,4,8,16,32,64
To change: Provide comma-seprated list of state means.
The number of states will be calculated from this list.
If changing state sigmas (used in normal model), it must have same number of states represented.
NOTE: If using exponential or geometric distribution, provide the expected mean RCN values of the states
as you would for normal or poisson models. This script will automatically take their inverses to work
in the exponential and geometric models.''')
parser_generate.add_argument('--sigma', type=str, default=None,
help=''' PuffCN has been optimized for mapping DNA puffs in the fungus fly.
The default state sigmas (stdevs) were previously hard-coded.
This option allows some flexibility from the command-line to change the state sigmas.
Default: if not changed, defaults to square root of state means (Poisson-like).
To change: Provide comma-seprated list of state sigmas.
Alternatively: Use --mu_scale (default False) with a scaling factor multiplied against the MUs.
The number of states is calculated from this state mean list, which defaults to 7.
If changing state sigmas (used in normal model), it must have same number of states represented as state means.''')
parser_generate.add_argument('--mu_scale', type=float, default=None,
help=''' See --sigma for more details on sigmas.
Use this to scale means (--mu) to use as stdevs (sigma) instead of taking square roots of means.
For example, --mu_scale 0.5 will use mu*0.5 as the stdev.''')
parser_generate.add_argument('--special_idx', type=int, default=0,
help='''Only for use if you're very familiar with the program (and change defaults).
The default state means is 1,2,4,8,16,32,64.
The default index for the mean that represents copy number 1 is 0.
In this lingo - CN=1 is the special state, and the 0-based index of the special state in that list is 0.
If you were to change parameters that affect where the special state is in the list, make sure to change this index.
This index is only used to help construct initial probabilities and transition probabilies.
If understood, it can be used to designate any single special state (not necessarily the one that corresponds to CN=1).
The other parameters to use with this are:
--init_special (probability of starting in the special state (usually CN=1).
The probabity of starting in a another state (usually copy number variant states) defaults to (1-init_special)/(nstates-1).
--prob_leave_special
--prob_stay_special
--prob_other_to_special
--prob_other_to_other
--prob_other_to_self
Alternative to, an initial probability vector can be given with --initialprobs
''')
parser_generate.add_argument('--init_special', type=float, default=0.997,
help='''Probability of starting in the 'special state' (usually copy number = 1). Default: 0.997.
The probabity of starting in a another state (usually copy number variant states) defaults to (1-init_special)/(nstates-1).
''')
parser_generate.add_argument('--leave_special_state', type=float, default=0.001,
help='''Probability of leaving the 'special state' (usually copy number = 1).
Default: 0.001.
If number is betwen 0 and 1, it will be assumed a probability.
If number given is > 1, then it will be treated as the average length (number of bins) of the special state.
For example, if 1000 is given, it will be 1/1000 = 0.001.
In terms of bp lengths, one would need to multiply n_bins * bin_length OR divide bp length by bin_length
Thus, if you want to see a change every 500 kb w/ 500 bp bins, then 500kb/500 = 1 kb = 1000 -- which will be interpreted as 0.001.
Or as another example, if you expect to see a change every 2 Mb with 100 bp bins, then 2e6/1e2 = 1e4 = 10 kb = 10000, interpreted as 0.0001.
The probability of staying in this state is the complement: 1-p
''')
parser_generate.add_argument('--leave_other', type=str, default=None,
help='''Probability of leaving one of the other states.
This defaults to --leave_special_state making all transition probabilities out of states the same (0.001 by default).
To change, provide a probability of leaving (p).
If the first number is betwen 0 and 1, it will be assumed a probability.
If the first number given is > 1, then it will be treated as the average length (number of bins).
For example, if 1000 is given, it will be 1/1000 = 0.001.
If only 1 number is given, then that is assumed to be the probability of transitioning to all the other states.
You can also give a comma-separated pair of 2 probabilities:
prob of leaving to special state
prob of leaving to another 'non-special' state.
Make sure the probabilities sum to what you expect the overall probability of leaving the state is...
which should be p_to_special + p_to_nonspecial * (num_non_special-1) = p_to_special + p_to_nonspecial (nstates-2)
For example, in a 7-state model:
0.001,0.0002 --> 0.001 + 0.0002 * 5 = 0.001 + 0.001 = 0.002
OR
0.001,0.001 --> 0.001 + 0.001 * 5 = 0.006
If the second number is > 1, the same rules apply as to the first number.
For other analyses, I've used:
0.00001,0.000000000001
OR
0.001,0.0000000001
The probability of staying in these states is the complement: 1-p1-p2
NOTE: the program forces the transition probabilities of a given state to sum to 1.
''')
parser_generate.add_argument('--initialprobs', type=str, default=None,
help='''PuffCN has been optimized for mapping DNA puffs in the fungus fly.
The default state means were previously hard-coded.
This option allows some flexibility from the command-line to change the state means.
Default: [0.997, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005]
The default will change with more or less states described w/ --mu and --sigma.
By default, the first state will start out as 0.997 as above, all other states will be (1-0.997)/n_other_states.
That behavior also changes with following parameters:
--special_idx -- determines which state (not necessarily first) will be given default 0.997 (OR other with --initcn1)
--init_special (probability of starting in the special state (usually CN=1).
The probabity of starting in a another state (usually copy number variant states) defaults to (1-init_special)/(nstates-1).
--leave_special_state
--prob_other_to_special
--prob_other_to_other
--prob_other_to_self
To change the initial probs manually: Provide comma-separated list of initial probs -e.g.: '0.997,0.0005,0.0005,0.0005,0.0005,0.0005,0.0005'
This must have same number of states represented as state means (--mu; default 7).
''')
parser_generate.set_defaults(func=run_subtool)
## create sub-command for filter/filterfish
parser_filter = subparsers.add_parser('filter',
help = '''Given a latest-stage sample (where all or most puffs have grown) and an optional earliest stage sample
(for additional Fold-enrichment normalization), just return the late-stage sample with normalized values as specified by protocol options below.''')
parser_filter.add_argument('--counts', type=str, default=False,
help=''' Use this flag and specify an output prefix for the final normalized late stage bin counts bedGraph.''')
parser_filter_unit = parser_filter.add_mutually_exclusive_group()
parser_filter_unit.add_argument('-sd1','--stdev_above', action='store_true', default=False,
help='''Use value given as multiple of standard deviations above the mean.''')
parser_filter_unit.add_argument('-sd2','--stdev_below', action='store_true', default=False,
help='''Use value given as multiple of standard deviations BELOW the mean.''')
parser_filter_unit.add_argument('-mu','--mean', action='store_true', default=False,
help='''Use value given as multiple of the mean.''')
parser_filter.add_argument('-V','--value', type=float, required=True,
help='''Value to filter on -- a float. Required.''')
parser_filter.add_argument('-R','--relation', type=str, default=">",
help='''Relationship to value to filter on -- i.e. greater than, less than, etc. Accepted values are:
gt, ge, lt, le, eq, ne -- respectively representing the relations >, >=, <, <=, ==, !=''')
parser_filter.add_argument('-l','--latestage', type=str, required=True,
help='''Provide path to bedGraph (e.g. made from getcov) for a late stage sample.''')
parser_filter.add_argument('-e','--earlystage', type=str, required=False, default=False,
help=''' Optional: Provide path to bedGraph (e.g. made from getcov) for an early stage sample. This is used after smoothing and median normalization to further normalize the late-stage sample (e.g. can correct for sequencing biases)''')
parser_filter_protocol = parser_filter.add_mutually_exclusive_group(required=True)
parser_filter_protocol.add_argument('-s','--skipnorm', action='store_true', default=False,
help='''Use provided bedGraph (late option) directly -- skip any normalization procedure.''')
parser_filter_protocol.add_argument('-1', '--protocol1', action='store_true', default=False,
help='''Late stage (and early stage if present) bin counts are median normalized.
Then late stage is normalized to early stage if available..''')
parser_filter_protocol.add_argument('-2', '--protocol2', action='store_true', default=False,
help='''Late stage (and early stage if present) bin counts are first smoothed with bandwidth given by --bandwidth.
Then they are median normalized.
Then late stage is normalized to early stage if available.''')
parser_filter_protocol.add_argument('-3', '--protocol3', action='store_true', default=False,
help='''Late stage (and early stage if present) bin counts are first median normalized.
Then they are smoothed with bandwidth given by --bandwidth.
Then late stage is normalized to early stage if available.
Note: if early is not present, this is same as protocol 4.''')
parser_filter_protocol.add_argument('-4', '--protocol4', action='store_true', default=False,
help='''Late stage (and early stage if present) bin counts are first median normalized.
Then late stage is normalized to early stage if available.
Then late/early is smoothed with bandwidth given by --bandwidth.
Note: if early is not present, this is same as protocol 3.''')
parser_filter_protocol.add_argument('-5', '--protocol5', action='store_true', default=False,
help='''Late stage is normalized to early stage if available.
Then late/early is smoothed with bandwidth given by --bandwidth. (i.e. L/E -> smooth).
Note: if early is not present, this is same as protocol 6.''')
parser_filter_protocol.add_argument('-6', '--protocol6', action='store_true', default=False,
help='''Late stage (and early stage if present) bin counts are first smoothed with bandwidth given by --bandwidth.
Then late stage is normalized to early stage if available. (i.e. smooth -> L/E).
Note: if early is not present, this is same as protocol 5.''')
parser_filter.add_argument('-c', '--collapsed', action='store_true', default=False,
help='''Return collapsed variable-step bedGraph instead of expanded single-step bedGraph.
This is often a much smaller file.''')
parser_filter.add_argument('-ps', '--pseudo', type=float, default=0.1,
help=''' Before normalizing late to early, add this pseudocount to all counts in order to avoid division by zero.
Should be between 0 and 1.
Should be small enough to not change other values much,
but big enough such that numbers divided by 0+pseudo do not become massive.
Default: 0.1.''')
parser_filter.add_argument('-bw', '--bandwidth', type=int, default=2500,
help=''' If kernel smoothing, specify bandwidth (int).
Bandwidth should be bigger when no early stage normalization to try to smooth out sequencing biases, mappability biases, etc.
Default: 2500.''')
parser_filter.add_argument('--impute', type=int, default=False,
help=''' If imputing, specify bandwidth (int) for kernel smoothing.
This bandwidth is generally longer than the one you would provide for regular smoothing.
Only bins with a count of 0 will take on smoothed (imputed) values.
Try: 10000.''')
parser_filter.set_defaults(func=run_subtool)
## create sub-command for help
parser_help = subparsers.add_parser('help', help=''' Gives more extensive guidance on using pufferfish.''')
parser_help.set_defaults(func=run_subtool)
## parse the args and call the selected function
args = parser.parse_args()
## check if args.quiet set (a default to all sub-modules from ArgumentParserWithDefaults class)
if args.quiet:
logger.setLevel(logging.ERROR)
## attempt to run args.func (which calls run_subtool() for all), catch errors
try:
args.func(parser, args)
except IOError, e:
## often pipe will break for various reasons and will raise sigpipe error
## can import errno and do "if e.errno != errno.EPIPE:"
## but errno.EPIPE = 32 -- so just use 32 here
if e.errno != 32: ## ignore SIGPIPE
raise
## Run main when this script used from command-line
if __name__ == "__main__":
main()
## If needed parallelization
## Would have to parellize from here....
## from joblib import Parallel, delayed
## import time
## from glob import glob
## folder = "del"
## files = glob('{}/*.txt'.format(folder))
## def sleep(f):
## print f
## time.sleep(0.001)
## ## for f in files:
## ## sleep(f) #args.parallel
## Parallel(n_jobs=2)(delayed(sleep)(f) for f in files)
```
#### File: pufferfish/pufferfish/signalcorrect.py
```python
import sys, argparse, pybedtools, scipy.stats
from collections import defaultdict
import numpy as np
parser = argparse.ArgumentParser(description="""
Given gcmedian.txt and signal file,
Generate updated signal file corrected for gcfemedian...
NOTE: if gcfmedian obtained on FE can only use on FE signal.
If gcmedian done on SPMR, can only use on SPMR signal.
Etc.
NOTE2: signal bins being corrected need to be same bins GC analyzed in.
""", formatter_class= argparse.RawTextHelpFormatter)
parser.add_argument('--signal', '-a', '-i', '-f',
type= str,
help='''Path to signal bedGraph that has cols: chr, start, end, GCprop, signal-to-correct.Can tell algo what cols to look for.''')
parser.add_argument('--scalefactors', '-b',
type=str,
help='''Path to gc median parameter table.''')
parser.add_argument('--gccol', '-g',
type=int, default=6,
help='''Column GC proportions found in. Default = 6''')
parser.add_argument('--signalcol', '-s',
type=int, default=4,
help='''Column signal found in. Default = 4''')
parser.add_argument('--chrcol', '-c',
type=int, default=1,
help='''Column chr name found in. Default = 1''')
parser.add_argument('--startcol', '-S',
type=int, default=2,
help='''Column start coordinate found in. Default = 2''')
parser.add_argument('--endcol', '-E',
type=int, default=3,
help='''Column end coordinate found in. Default = 3''')
parser.add_argument('--mean', '-M',
action='store_true', default=False,
help='''Subtract GC mean from signal instead of GC median.''')
parser.add_argument('--control',
type= str, default=False,
help='''Give an output prefix to also output a bedGraph that contains the GC_median over each bin given the bin's GC content.
This can be used (1) in a browser, (2) as part of command-line subtraction, fold-enrcichment and other operations w/ paste/awk and a signal file,
(3) down stream significance tests outside of this program, etc etc.''')
parser.add_argument('--mad',
type= str, default=False,
help='''Give an output prefix to also output a bedGraph that contains the median absolute deviation from each GC_median over each bin given the bin's GC content.
This can be used (1) in a browser, (2) as part of command-line operations w/ paste/awk and a signal and/or median file,
(3) down stream significance tests outside of this program, etc etc.''')
parser.add_argument('--zscore',
type= str, default=False,
help='''Give an output prefix to also output a bedGraph that contains the zscore over each bin.
The z-score is obtained by (bin_value - gc_mean)/gc_std_dev given the bin's GC content.''')
parser.add_argument('--robust_zscore',
type= str, default=False,
help='''Give an output prefix to also output a bedGraph that contains the zscore over each bin.
The z-score is obtained by (bin_value - gc_median)/(c*gc_mad) given the bin's GC content.
When MedianAD is 0, c*gc_mad = 1.253314*MeanAD
When MedianAD is not 0, c*gc_mad = 1.4826*MedianAD
According to Wikipedia -- this should use 1.4826
https://en.wikipedia.org/wiki/Median_absolute_deviation
According to IBM - 1.486:
See: https://www.ibm.com/support/knowledgecenter/en/SSWLVY_1.0.0/com.ibm.spss.analyticcatalyst.help/analytic_catalyst/modified_z.html
or 1.483 according to another source (which is 1.4826 rounded)...
or 1.4296 according to another (1.43 rounded)...
''')
parser.add_argument('--madunits',
type= str, default=False,
help='''Give an output prefix to also output a bedGraph that contains the zscore-like madunits over each bin.
The z-score-like madunits are obtained by (bin_value - gc_median)/gc_mad given the bin's GC content.
When MAD is 0, it attempts to replace MAD with the MeanAD from the median.
When both MAD and MeanAD are 0, it attempts to replace MAD with 0.67449*std_dev.
When all are 0 (should not happen) - it replaces MAD with 1.
This also differs from the robust_zscore because it does not scale the MAD.
The robust_zscore does scale MAD: (x-MED)/scaled_MAD.
''')
parser.add_argument('--medfe',
type= str, default=False,
help='''Give an output prefix to also output a bedGraph that contains the fold enrichment of bin/med_gc over each bin given the bin's GC content.
This can be used with smoothing to get long-range mean relative copy numbers.
Long-range RCNs over each bin can be used to scale the bin's value (bin/bin_RCN) before subtracting GC_med or getting z-score in subsequent steps.
When median is 0, it is changed to 1 for the denominator.
This may or may not give the desired effect.''')
parser.add_argument('--meanfe',
type= str, default=False,
help='''Give an output prefix to also output a bedGraph that contains the fold enrichment of bin/mean_gc over each bin given the bin's GC content.
This can be used with smoothing to get long-range mean relative copy numbers.
Long-range RCNs over each bin can be used to scale the bin's value (bin/bin_RCN) before subtracting GC_med or getting z-score in subsequent steps.
When mean is 0, it is changed to 1 for the denominator.
This may or may not give the desired effect.''')
parser.add_argument('--subtractmed',
type= str, default=False,
help='''Give an output prefix to also output a bedGraph that contains the median-subtracted scores of bin-med_gc over each bin given the bin's GC content.''')
parser.add_argument('--subtractmean',
type= str, default=False,
help='''Give an output prefix to also output a bedGraph that contains the mean-subtracted scores of bin-med_gc over each bin given the bin's GC content.''')
args = parser.parse_args()
## could also use NS-seq medians to correct its own signal...
## could do that all in the first step -- either to treatment signal, FE signal, or both...
## if do raw signa or raw spmr -- will still need to normalize for copy/number -- and/or do local lambda peak calling
def name_bdg(bdg):
if not bdg.endswith('.bedGraph'):
return bdg + '.bedGraph'
return bdg
gccol = args.gccol-1
sigcoltocorrect = args.signalcol-1
chrcol = args.chrcol-1
startcol = args.startcol-1
endcol = args.endcol-1
gcsub = 0
if args.mean:
gcsub = 1
## READ IN GC STATS INFORMATION
statdict = defaultdict(list)
with open(args.scalefactors) as table:
for row in table:
row = row.strip().split()
statdict[int(row[0])] = [float(e) for e in row]
## WRITE BEDGRAPHS
if args.control: ## WRITE MEDIAN CONTROL BEDGRAPH IF DESIRED (mean printed out if --mean specified)
conbdg = open(name_bdg(args.control),'w')
if args.mad: ## WRITE MAD BEDGRAPH IF DESIRED
madbdg = open(name_bdg(args.mad),'w')
if args.zscore:
zbdg = open(name_bdg(args.zscore), 'w')
if args.robust_zscore:
rzbdg = open(name_bdg(args.robust_zscore), 'w')
if args.madunits:
madunitbdg = open(name_bdg(args.madunits), 'w')
if args.medfe:
febdg = open(name_bdg(args.medfe), 'w')
if args.meanfe:
mufebdg = open(name_bdg(args.meanfe), 'w')
if args.subtractmed:
submedbdg = open(name_bdg(args.subtractmed), 'w')
if args.subtractmean:
submubdg = open(name_bdg(args.subtractmean), 'w')
with open(args.signal) as table:
for row in table:
row = row.strip().split()
gc = int(100.0*float(row[gccol]))
## if args.bdg:
## sig = float(row[sigcoltocorrect])
## newsig = sig - statdict[gc][gcsub]
## out = [row[chrcol], row[startcol], row[endcol], newsig]
## outmsg = ("\t").join([str(e) for e in out])
## sigbdg.write( outmsg + '\n' )
if args.control:
curr_gc_med = statdict[gc][gcsub]
out = [row[chrcol], row[startcol], row[endcol], curr_gc_med]
outmsg = ("\t").join([str(e) for e in out])
conbdg.write( outmsg + '\n' )
if args.mad:
curr_gc_mad = statdict[gc][3]
out = [row[chrcol], row[startcol], row[endcol], curr_gc_mad]
outmsg = ("\t").join([str(e) for e in out])
madbdg.write( outmsg + '\n' )
if args.zscore: #(bin - median)
sig = float(row[sigcoltocorrect])
mu = statdict[gc][1]
std = statdict[gc][2]
zscore = (sig - mu) / std
out = [row[chrcol], row[startcol], row[endcol], zscore]
outmsg = ("\t").join([str(e) for e in out])
zbdg.write( outmsg + '\n' )
if args.robust_zscore: #(bin - median)
sig = float(row[sigcoltocorrect])
med = statdict[gc][0]
mad = statdict[gc][3]
mad2 = statdict[gc][4]
if mad == 0:
denom = 1.253314*mad2
else:
denom = 1.4826*mad
zscore = (sig - med) / denom
out = [row[chrcol], row[startcol], row[endcol], zscore]
outmsg = ("\t").join([str(e) for e in out])
rzbdg.write( outmsg + '\n' )
if args.madunits: #(bin - median)
sig = float(row[sigcoltocorrect])
med = statdict[gc][0]
mad = statdict[gc][3]
mad2 = statdict[gc][4]
std = statdict[gc][2]
if mad != 0:
denom = mad
elif mad == 0 and mad2 != 0:
denom = mad2
elif mad == 0 and mad2 == 0 and std > 0:
denom = 0.67449 * std
else:
denom = 1
zscore = (sig - med) / denom
out = [row[chrcol], row[startcol], row[endcol], zscore]
outmsg = ("\t").join([str(e) for e in out])
madunitbdg.write( outmsg + '\n' )
if args.medfe:
sig = float(row[sigcoltocorrect])
med = statdict[gc][0]
if med == 0:
med = 1
fe = sig / med
out = [row[chrcol], row[startcol], row[endcol], fe]
outmsg = ("\t").join([str(e) for e in out])
febdg.write( outmsg + '\n' )
if args.meanfe:
sig = float(row[sigcoltocorrect])
mean = statdict[gc][1]
if mean == 0:
mean = 1
fe = sig / mean
out = [row[chrcol], row[startcol], row[endcol], fe]
outmsg = ("\t").join([str(e) for e in out])
mufebdg.write( outmsg + '\n' )
if args.subtractmed:
sig = float(row[sigcoltocorrect])
newsig = sig - statdict[gc][0]
out = [row[chrcol], row[startcol], row[endcol], newsig]
outmsg = ("\t").join([str(e) for e in out])
submedbdg.write( outmsg + '\n' )
if args.subtractmean:
sig = float(row[sigcoltocorrect])
newsig = sig - statdict[gc][1]
out = [row[chrcol], row[startcol], row[endcol], newsig]
outmsg = ("\t").join([str(e) for e in out])
submubdg.write( outmsg + '\n' )
if args.control:
conbdg.close()
if args.mad:
madbdg.close()
if args.zscore:
zbdg.close()
if args.robust_zscore:
rzbdg.close()
if args.madunits:
madunitbdg.close()
if args.medfe:
febdg.close()
if args.meanfe:
mufebdg.close()
if args.subtractmed:
submedbdg.close()
if args.subtractmean:
submeanbdg.close()
### THIS WILL BE DELETED EVENTUALLY
## IT IS HERE FOR POSTERITY SO OLDER PIPELINES DO NOT GO BELLY UP
if not (args.control or args.mad or args.zscore or args.robust_zscore or args.madunits or args.medfe or args.meanfe or args.subtractmed or args.subtractmean):
## If bedGraph type not specified... then it must be an older pipeline OR user wants subtracmed bedGraph
with open(args.signal) as table:
for row in table:
row = row.strip().split()
gc = int(100.0*float(row[gccol]))
sig = float(row[sigcoltocorrect])
newsig = sig - statdict[gc][gcsub]
out = [row[chrcol], row[startcol], row[endcol], newsig]
print ("\t").join([str(e) for e in out])
```
#### File: pufferfish/switchblade/OEM.py
```python
import sys
import numpy as np
import argparse
##
## Date Created: October 02, 2013
## Author: <NAME>
##
## June 11,2014:: modified to take variable step csv --
##
## Future improvements to this script might include
## - writing the output file directly instead of building up and storing the oemDict
## I imagine this will help on the human genome
## - making it write out in correct order
## as of now, the order is scrambled b/c it loops over keys (chromosome names) of a dictionary
##
## September 2021::
## - modified to use argparse.
##############################################################################
''' ARGUMENT PARSER '''
##############################################################################
parser = argparse.ArgumentParser(description="""
DESCRIPTION
Origin Efficiency Metric.
Usage:
OEM.py -i inputCSV [-w W -p pseudocount -o outname.ext -H --csvformat]
INPUT:
CSV containing chromosome position and coverage for both strands.
OUTPUT:
bedGraph or CSV of OEM for each position.
input CSV: chr,pos,fwd,rev
outputFormat = either 'bdg' or 'csv'
bdg will return a bedGraph, a tab-delimited BED3 + score format: chr start end oem
Note that bedGraphs (and BED files) are 0-based, half open intervals
Thus, the first base of a sequence would be identified as: chr 0 1 score
i.e. start = 0, end = 1 -- but the end is not included, thus only base 0 is represented here
csv will return a CSV file similar to the input of format: chr,pos,oem
The CSV output (and input) is 1-based. Thus, base 1 of a sequence would be represented as: chr,1,oem
""", formatter_class= argparse.RawTextHelpFormatter)
parser_input = parser.add_mutually_exclusive_group()
parser_input.add_argument('-i', "--input",
type= str, default="-",
help='''input CSV: chr,pos,fwdcov,revcov. Use "-" or "stdin" or leave blank during piping.''')
parser.add_argument('-o', "--outname",
type= str, default="-",
help='''Defaults to stdout. Otherwise, specify a filename for output: e.g. output.bedGraph or output.csv. Use "-" or "stdout" or leave blank during piping. ''')
parser.add_argument('-H', '--hasheader', action='store_true', default=False,
help='''Use this flag if the inputCSV has the header line 'chr,pos,fwd_str,rev_str'.''')
parser.add_argument('-c', '--csvformat', action='store_true', default=False,
help='''Use this flag for CSV output formatting instead of bedGraph formatting.''')
parser.add_argument('-w', '--windowsize', type=int, default=10000,
help='''Window size to use. Default = 10000 (10 kb). windowSize is how big the window is to left of a position (for WL and CL calculations) and to the right (for WR and CR). This should be set to '10000' if unsure.''')
parser.add_argument('-p', '--pseudocount', type=int, default=0,
help='''Pseudocount to use. Default = 0. The pseudocount is used to prevent division by 0.
In 10kb bins on yeast genome it is unlikely to get a division-by-zero error.
This is not necessarily true for the human genome or smaller window sizes, thus a pseudocount can help.
Suggested pseudocount if needed is 1, but using sequencing depth and other knowledge to decide is better.''')
args = parser.parse_args()
##############################################################################
''' FUNCTIONS '''
##############################################################################
def okazakiFileReader(inputfile, header=True):
"""Takes in .csv a la <NAME>, returns Dict with that info """
# Open connection to input.csv
if inputfile in ('-','stdin'):
okazaki = sys.stdin
else:
okazaki = open(inputfile, "r")
# If header line present, store it
if header:
headerLine = okazaki.readline()[:-1].split(",")
# initialize dictionary for storing information from file
okazakiDict = {}
# Parse file line by line
for line in okazaki:
line = line[:-1].split(",")
chromosome, pos, fwd, rev = line[0], int(line[1]), float(line[2]), float(line[3])
try:
#okazakiDict[chromosome]
okazakiDict[chromosome]["pos"] += [pos]
okazakiDict[chromosome]["fwd"] += [fwd]
okazakiDict[chromosome]["rev"] += [rev]
except KeyError:
okazakiDict[chromosome] = {}
okazakiDict[chromosome]["pos"] = [pos]
okazakiDict[chromosome]["fwd"] = [fwd]
okazakiDict[chromosome]["rev"] = [rev]
if inputfile in ('-','stdin'):
okazaki.close()
return okazakiDict
def OEMDict(okazakiDict, windowSize=500, pseudocount=0):
"""Takes in okazakiDict, returns oemDict"""
# chr names
chromosomes = okazakiDict.keys()
# initialize oemDict
oemDict = {}
# start loop over all chromosomes
for chromosome in chromosomes:
sys.stderr.write( "Processing chromosome: " + str(chromosome) + "\n")
chrLen = len(okazakiDict[chromosome]["pos"])
sys.stderr.write( "Chromosome Length = " + str(chrLen) + "\n" )
## GIVE DUMMY LINE AND STOP IF CHRLEN < MinLen == (w+1)*2
if chrLen < (windowSize+1)*2:
oemDict[chromosome] = {'pos':[1], 'oem':[0]}
continue
### CONTINUE IF CHRLEN >= MinLen == (w+1)*2
# Set the start and end position (end position is 10,000 bp less than last position) (JB)
start = windowSize-1
end = chrLen - windowSize ## make sure this works
# Calculate the first window (JB) -- float() used to avoid default 'int division'
WL = float(sum(okazakiDict[chromosome]['fwd'][0:start+1])) + pseudocount ## goes up to and includes start as in R
CL = float(sum(okazakiDict[chromosome]['rev'][0:start+1])) + pseudocount
WR = float(sum(okazakiDict[chromosome]['fwd'][start+1:start+1+windowSize])) + pseudocount ## starts at 1 above start as in R
CR = float(sum(okazakiDict[chromosome]['rev'][start+1:start+1+windowSize])) + pseudocount
# Set up the storage oem (JB)
pos = okazakiDict[chromosome]['pos'][start:end] ##+!
oemDict[chromosome] = {'pos':pos, 'oem':[0]*len(pos)}
oem = (WL / (WL + CL)) - (WR / (WR + CR))
oemDict[chromosome]['oem'][0] = oem
## Iterative steps
numPositions = len(range(start,end))
percentsToReport = range(0,101,10)
Pos = start
for i in range(1, numPositions):
# report progress
## -- since this script goes so fast, progress other than which chr is currently being worked on was not necessary to report
##percentComplete = int(100.0*(i+1)/numPositions) ## i+1 b/c initialization added in
##if percentComplete in percentsToReport:
##print str(percentComplete) + "% complete for chromosome: " + str(chromosome)
##percentsToReport[int(percentComplete/10.0)] = -1
# Update the pos
Pos += 1
# Update WL, CL, WR, CR
WL = WL + okazakiDict[chromosome]['fwd'][Pos] - okazakiDict[chromosome]['fwd'][Pos - windowSize]
CL = CL + okazakiDict[chromosome]['rev'][Pos] - okazakiDict[chromosome]['rev'][Pos - windowSize]
#WR = WR + okazakiDict[chromosome]['fwd'][Pos+windowSize-1] - okazakiDict[chromosome]['fwd'][Pos]
#CR = CR + okazakiDict[chromosome]['rev'][Pos+windowSize-1] - okazakiDict[chromosome]['rev'][Pos]
WR = WR + okazakiDict[chromosome]['fwd'][Pos+windowSize] - okazakiDict[chromosome]['fwd'][Pos]
CR = CR + okazakiDict[chromosome]['rev'][Pos+windowSize] - okazakiDict[chromosome]['rev'][Pos]
## I am considering making it directly centered on pos by having the first window end on pos and 2nd window start on pos
## No reason that shouldn't/cannot be done and it is more reflective of the pos
## This will require some changes in the other parts of script as well such as end position
# Store the oem
oem = (WL / (WL + CL)) - (WR / (WR + CR))
oemDict[chromosome]['oem'][i] = oem
return oemDict
def writeOEM2Bdg(oemDict, out):
"""Takes in oemDict and writes out a bedgraph
This will equate to chromosome, oemDict[chromosome]['pos'][i]-1, oemDict[chromosome]['pos'][i], oemDict[chromosome]['oem'][i]
Note that bedGraphs are 0-based, half-open intervals"""
chromosomes = oemDict.keys()
for chromosome in chromosomes:
numPositions = len(oemDict[chromosome]['pos'])
for i in range(numPositions):
chrom = str(chromosome)
start = str(oemDict[chromosome]['pos'][i]-1)
end = str(oemDict[chromosome]['pos'][i])
oem = str(oemDict[chromosome]['oem'][i])
out.writelines('\t'.join([str(e) for e in [chrom, start, end, oem]]) + "\n")
def writeOEM2CSV(oemDict, out):
"""Takes in oemDict and writes out a CSV similar to input CSV: chr,pos,oem
This will equate to chromosome, oemDict[chromosome]['pos'][i], oemDict[chromosome]['oem'][i]
Note that this outputCSV is a 1-based position as was the inputCSV
If you are to make a bedGraph from the commandline, it will require something like:
awk '{gsub(/,/,"\t"); print}' output.csv | awk '{print $1 "\t" $2-1 "\t" $2 "\t" $3}' > output.bedGraph"""
chromosomes = oemDict.keys()
for chromosome in chromosomes:
numPositions = len(oemDict[chromosome]['pos'])
for i in range(numPositions):
chrom = str(chromosome)
pos = str(oemDict[chromosome]['pos'][i])
oem = str(oemDict[chromosome]['oem'][i])
out.writelines(','.join([str(e) for e in [chrom, pos, oem]]) + "\n")
##############################################################################
''' EXECUTION '''
##############################################################################
##If run from the linux commandline then execute the following
#### See 6.1.1 here for more info on how this works: http://docs.python.org/2/tutorial/modules.html
if __name__ == "__main__":
#EXECUTE
okazakiDict = okazakiFileReader(args.input, args.hasheader)
oemDict = OEMDict(okazakiDict, args.windowsize, args.pseudocount)
## OUT
if args.outname in ("-","stdout"):
out = sys.stdout
else:
out = open(args.outname, 'w')
## WRITING OUT
if args.csvformat:
writeOEM2CSV(oemDict, out)
else:
writeOEM2Bdg(oemDict, out)
## QUIT
if args.outname in ("-","stdout"):
out.close()
quit()
``` |
{
"source": "JohnUrban/squiggler",
"score": 2
} |
#### File: squiggler/squiggler/model_tools.py
```python
import sys
import h5py
import logging
import matplotlib.pyplot as plt
logger = logging.getLogger('squiggler')
import os, squiggler
# model header: kmer, level_mean, level_stdv, sd_mean, sd_stdv, weight
# older base caller also had "variant" between kmer and level_mean
def get_path(strand):
if strand == "template":
return "/Analyses/Basecall_2D_000/BaseCalled_template/Model"
elif strand == "complement":
return "/Analyses/Basecall_2D_000/BaseCalled_complement/Model"
def print_model_from_f5(f5, path):
## read = [e for e in f5["Analyses/EventDetection_000/Reads"]][0]
for event in f5[path]:
print ("\t").join([str(e) for e in event])
def print_model_attr_from_f5(f5, path):
## read = [e for e in f5["Analyses/EventDetection_000/Reads"]][0]
for attr in f5[path].attrs:
print ("\t").join([str(attr), str(f5[path].attrs[attr])])
def store_model_attr_from_f5(f5, path):
## read = [e for e in f5["Analyses/EventDetection_000/Reads"]][0]
attributes = {}
for attr in f5[path].attrs:
attributes[str(attr)] = float(f5[path].attrs[attr])
return attributes
def print_model_type_from_f5(f5):
try:
print f5["/Analyses/Basecall_2D_000/Configuration/general/"].attrs["model_type"]
except:
print "No model type found."
def read_model_f5(f5):
##kmer, level_mean, level_stdv, sd_mean, sd_stdv, weight
t = get_path("template")
c = get_path("complement")
strands = {1:t, 2:c}
model = {1:{}, 2:{}}
for strand in strands.keys():
for event in f5[strands[strand]]:
kmer = str(event[0])
level_mean = float(event[1])
level_stdv = float(event[2])
sd_mean = float(event[3])
sd_stdv = float(event[4])
weight = float(event[5])
model[strand][kmer] = [level_mean, level_stdv, sd_mean, sd_stdv, weight]
return model
def read_model_tsv(template_tsv, complement_tsv):
##kmer, level_mean, level_stdv, sd_mean, sd_stdv, weight
t = open(template_tsv, 'r')
c = open(complement_tsv, 'r')
strands = {1:t, 2:c}
model = {1:{}, 2:{}}
for strand in strands:
for event in strands[strand]:
event = event.strip().split()
kmer = str(event[0])
level_mean = float(event[1])
level_stdv = float(event[2])
sd_mean = float(event[3])
sd_stdv = float(event[4])
weight = float(event[5])
model[strand][kmer] = [level_mean, level_stdv, sd_mean, sd_stdv, weight]
strands[strand].close()
return model
def get_stored_model(model_type):
## model types: r7, r7.3
if model_type == "r7.3":
template_modelfh = os.path.join(squiggler.__path__[0], 'models', 'template_model_r7.3.tsv')
complement_modelfh = os.path.join(squiggler.__path__[0], 'models', 'complement_model_r7.3.tsv')
elif model_type == "r7":
template_modelfh = os.path.join(squiggler.__path__[0], 'models', 'template_model_r7.tsv')
complement_modelfh = os.path.join(squiggler.__path__[0], 'models', 'complement_model_r7.tsv')
return read_model_tsv(template_modelfh, complement_modelfh)
def run(parser, args):
f5 = h5py.File(args.fast5)
path = get_path(strand=args.type)
if args.get == "model":
if args.header:
## print ("\t").join(['#kmer', 'level_mean', 'level_stdv', 'sd_mean', 'sd_stdv','weight'])
print ("\t").join([h[0] for h in f5[path].dtype.descr])
print_model_from_f5(f5, path)
elif args.get == "attr":
print_model_attr_from_f5(f5, path)
elif args.get == "type":
print_model_type_from_f5(f5)
```
#### File: squiggler/squiggler/plot_events.py
```python
import sys
import h5py
import numpy as np
import logging
import matplotlib.pyplot as plt
from hmm import *
logger = logging.getLogger('squiggler')
from events_tools import *
def run(parser, args):
f5 = h5py.File(args.fast5)
## if args.raw:
## basecalled = False
## elif args.basecalled:
## basecalled = True
## else:
## basecalled = is_basecalled(f5)
plot_events(f5, args.save)
``` |
{
"source": "johnusher/ardpifi",
"score": 3
} |
#### File: ardpifi/classifier/classify copy.py
```python
import base64
import os
import sys
import numpy as np
import tflite_runtime.interpreter as tflite
def main():
interpreter = tflite.Interpreter(model_path())
classifier = Classifier(interpreter)
for line in sys.stdin:
if not line.strip() or line.strip().startswith('#'):
continue
data = base64.b64decode(line.strip())
array = np.frombuffer(data, dtype=np.uint8).reshape((28, 28)).transpose()
output = classifier.classify(array)
print(output)
def model_path():
script_dir = os.path.dirname(__file__)
return os.path.join(script_dir, 'model.tflite')
# Order of letters should match the one in train.py
_LETTERS = ['Other'] + list('CDMNOS')
class Classifier:
def __init__(self, interpreter):
interpreter.allocate_tensors()
self._input_details = interpreter.get_input_details()
self._output_details = interpreter.get_output_details()
self._interpreter = interpreter
def classify(self, array):
# TODO: The data might have to be transposed.
# TODO: The image should be blurred a bit to be more similar to the
# training dataset.
# Get the values between 0 and 1.
normalized = array.astype(np.float32) / array.max()
# Stretch shape to [1, 28, 28, 1]
normalized = np.expand_dims(normalized, 2)
normalized = np.expand_dims(normalized, 0)
self._interpreter.set_tensor(self._input_details[0]['index'], normalized)
self._interpreter.invoke()
logits = self._interpreter.get_tensor(self._output_details[0]['index'])[0]
probs = _softmax(logits)
prob, letter = max(zip(probs, _LETTERS))
return prob, letter
def _softmax(x):
return np.exp(x) / sum(np.exp(x))
if __name__ == '__main__':
main()
``` |
{
"source": "John-Uskglass/MultiProject",
"score": 3
} |
#### File: John-Uskglass/MultiProject/Main.py
```python
import time
#Custom imports #e.g. from file import class as name
#Constants # e.g. GENERIC_CONSTANT = "Status_Test_String"
THE_FINAL_COUNTDOWN = 10
#Code e.g. class Name():
class Rocket():
def __init__(self):
self.countdown = THE_FINAL_COUNTDOWN
self.run(self.countdown)
def run(self, countdown):
if countdown != 0:
print(f"{countdown} until liftoff!")
countdown -= 1
time.sleep(1)
self.run(countdown)
else:
print(f"{countdown}...\nblastoff!!!!!")
if __name__ == '__main__':
Rocket = Rocket()
print("Main Ran Perfectly")
``` |
{
"source": "JohnvandeVrugt/toonapilib4domoticz",
"score": 3
} |
#### File: toonapilib4domoticz/devices/device.py
```python
import Domoticz
import toonapilib
class Device:
_previous_value = ""
def __init__(self, name, unit, plugin_devices, toon):
self._name = name
self._unit = unit
self._plugin_devices = plugin_devices
self._toon = toon
self._previous_value = ""
@property
def plugin_devices(self):
return self._plugin_devices
@property
def exists(self):
return self._unit in self._plugin_devices
@property
def name(self):
return self._name
@property
def toon(self):
return self._toon
@property
def unit(self):
return self._unit
@property
def previous_value(self):
return self._previous_value
def set_previous_value(self, str_new_value):
self._previous_value = str_new_value
def create(self):
Domoticz.Log("Creating " + self.name + " as unit " + self.unit)
def on_command(self, unit, command, level, hue):
return
def update(self):
return
class DeviceCommandException(Exception):
"""An error occurred while issuing a command"""
class DeviceCreateException(Exception):
"""An error occurred while creating the device"""
class DeviceUpdateException(Exception):
"""An error occurred while updating the device"""
```
#### File: toonapilib4domoticz/devices/hotwater_active.py
```python
import Domoticz
from devices.configuration import config
from devices.device import Device
from devices.device import DeviceCreateException
from devices.device import DeviceUpdateException
class DeviceHotWaterActive(Device):
domoticz_device_type = 244
domoticz_subtype = 62
domoticz_switch_type = 0
domoticz_image = 9
def __init__(self, plugin_devices, toon):
super().__init__(config.STR_UNIT_HOT_WATER_ACTIVE,
config.STD_UNIT_HOT_WATER_ACTIVE,
plugin_devices,
toon)
def create(self):
if not super().exists:
try:
Domoticz.Log("Creating hot water active device " + self.name)
Domoticz.Device(Name=self.name, Unit=self.unit, Type=self.domoticz_device_type,
Subtype=self.domoticz_subtype, Switchtype=self.domoticz_switch_type,
Image=self.domoticz_image).Create()
except DeviceCreateException as ex:
Domoticz.Log("An error occurred creating " + self.name)
Domoticz.Log("Exception: " + str(ex))
elif config.debug:
Domoticz.Log("Unit " + str(self.unit) + " exists - nothing to do")
return self
def update(self):
super().update()
str_value = ""
try:
hot_water_on = 1 if self.toon.burner_state == "water_heating" else 0
str_value = str(hot_water_on)
if str_value != self.previous_value:
if config.debug:
Domoticz.Log("Update hot water active: " + str_value)
self.plugin_devices[self.unit].Update(hot_water_on, str(hot_water_on))
except DeviceUpdateException as ex:
Domoticz.Log("An error occurred updating " + self.name)
Domoticz.Log("Exception: " + str(ex))
self.set_previous_value(str_value)
``` |
{
"source": "johnvanhienen/gotrack",
"score": 4
} |
#### File: johnvanhienen/gotrack/common.py
```python
import datetime
import os
import csv
class Common:
def is_time_between(begin_time, end_time):
current_time = datetime.datetime.now().time()
if begin_time < end_time:
return current_time >= begin_time and current_time <= end_time
def formattime(triptime):
# Remove date
triptime = triptime.split('T')[-1]
# Remove timezone
triptime = triptime.split('+')[0]
return triptime
def writetofile(data, headers, filelocation):
file_exists = os.path.isfile(filelocation)
with open(filelocation, 'a+', newline='') as f:
writer = csv.writer(f, delimiter=';')
if not file_exists:
writer.writerow(headers)
writer.writerow(data)
``` |
{
"source": "john-vastola/ML-from-scratch-seminar",
"score": 2
} |
#### File: ML-from-scratch-seminar/Gaussian Process/bills_GP.py
```python
import autograd.numpy as np
import pylab as pl
np.random.seed(2837)
N = 21
f_true = lambda x: np.sin(x)
sig_obs = 0.2
X = np.linspace(-2*np.pi, 2*np.pi, N)
Y = f_true(X) + np.random.normal(0, sig_obs, len(X))
eta = 0.01
theta0 = 0.3, 0.3
X_pred = np.linspace(-np.pi, np.pi, 2*N) + 0.2
#l = 2.
k = lambda x1,x2 : 1.**2 * np.exp(-(x1-x2)**2 / l**2)
#k = lambda x1,x2 : 0.5**2 * np.exp( -np.sin(x1-x2) / l**2 )
def build_k(*theta):
k = lambda x1,x2 : theta[0]**2 * np.exp(-(x1-x2)**2 / theta[1]**2)
return k
class GP(object):
def __init__(self, k, sig2_obs=0.1**2):
self.X_obs = None
self.Y_obs = None
self.k = k
self.sig2_obs = sig2_obs
def build_Sig(self, k, X1, X2):
#Sig = np.zeros((len(X1),len(X2)))
Sig = k(X1[:,None], X2)
#for i,j in np.ndindex(*Sig.shape):
#Sig[i,j] = k(X1[i], X2[j])
return Sig
def set_obs_data(self, X, Y):
self.N = len(X)
self.X_obs = np.array(X)
self.Y_obs = np.array(Y)
self.Sig_obs = self.build_Sig(self.k, X, X) + self.sig2_obs * np.eye(self.N)
return self.Sig_obs
def set_predict_points(self, X):
self.M = len(X)
self.X_pred = np.array(X)
self.Sig_pred_internal = self.build_Sig(self.k, X, X)
self.Sig_cross = self.build_Sig(self.k, X, self.X_obs)
self.M = self.Sig_cross @ np.linalg.inv(self.Sig_obs)
self.mu_pred = self.M @ self.Y_obs
self.Sig_pred = self.Sig_pred_internal - self.M @ self.Sig_cross.T
def sample_prediction(self):
self.Y_pred = np.random.multivariate_normal(self.mu_pred, self.Sig_pred)
return self.Y_pred
def ll_obs_data(self, theta):
k = build_k(*theta)
X, Y = self.X_obs, self.Y_obs
Sig = self.build_Sig(k, X, X) + self.sig2_obs * np.eye(self.N)
ll = -0.5 * Y.T @ np.linalg.inv(Sig) @ Y - 0.5 * np.log(np.linalg.det(Sig))
return ll
def train(f_ll, theta0, eta, T):
from autograd import grad
f_grad = lambda theta: - grad(f_ll)(theta)
theta = np.array(theta0)
print(" > t=0, theta=%s" % str(theta))
for t in range(1, T+1):
theta -= eta * f_grad(theta)
print(" > t=%d, theta=%s" % (t, str(theta)))
return theta
# BEFORE LEARNING
k = build_k(*theta0)
gp = GP(k, sig2_obs=sig_obs**2)
Sig_obs = gp.set_obs_data(X, Y)
gp.set_predict_points(X_pred)
# PLOT TRAINING DATA AND PRE-LEARNING PREDICTION
pl.figure(1)
pl.plot(X, Y, 'bo', label="Data")
pl.plot(gp.X_pred, gp.mu_pred, 'b', lw=0.5)
#y = gp.sample_prediction()
#pl.plot(gp.X_pred, y, "or", ms=1.)
#y = gp.sample_prediction()
#pl.plot(gp.X_pred, y, "og", ms=1.)
# PLOT PRE-LEARNING COV MATRIX
pl.figure(2)
pl.imshow(Sig_obs)
pl.colorbar()
# TRAIN
theta = train(gp.ll_obs_data, theta0, eta, T=100)
# PLOT POST-LEARNING PREDICTION
pl.figure(1)
k2 = build_k(*theta)
gp2 = GP(k2, sig2_obs=sig_obs**2)
Sig_obs2 = gp2.set_obs_data(X, Y)
gp2.set_predict_points(X_pred)
pl.plot(gp2.X_pred, gp2.mu_pred, 'g', lw=1.)
# PLOT POST-LEARNING COV MATRIX
pl.figure(3)
pl.imshow(Sig_obs2)
pl.colorbar()
``` |
{
"source": "JohnVCZ/Web-scraper",
"score": 3
} |
#### File: JohnVCZ/Web-scraper/Web_scraper.py
```python
from time import time
import requests
from bs4 import BeautifulSoup as BS
import csv
import sys
# training website
INPUT = "https://volby.cz/pls/ps2017nss/ps3?xjazyk=CZ"
def get_districts(web):
"""
Přijímá webovku a vrátí extrahovaný seznam odkazů na stránky volebních
okresů + základní webovku.
EG.: Ze stránky Volby.cz vrací [okres_1.cz, okres_2.cz, ... okres_N.cz].
:param web:
:return adr_okresu:
:return base_adr:
"""
base_adr = web[0:web.rfind("/")]
soup = suck_n_chop(web)
tab_lines = [tr.find_all("a") for tr in soup.find_all("tr")]
adr_okresu = [base_adr + "/" + group[2]["href"] for group in tab_lines
if len(group) != 0 if group[0].text[0:2] == "CZ"]
return adr_okresu, base_adr
def get_municipalities(web_list, base_adr):
"""
Přijímá seznam okresních webů + základní webovku a vrací seznam webů obcí.
EG.: Z webu brno-venkov.cz vrací [Opatovice.cz, Ostopovice.cz, ...].
:param web_list:
:param base_adr:
:return adr_obci:
"""
assert web_list != [], "Problém: seznam adres je prázdný."
adr_obci = []
for web in web_list:
soup = suck_n_chop(web)
tab_lines = [tr.find_all("a") for tr in soup.find_all("tr")]
adr_obci = [base_adr + "/" + element["href"] for group in tab_lines
if len(group) != 0 for element in group
if element.text.isnumeric()]
return adr_obci
def extract_data(web_list):
"""
Přijímá cílové webovky (obce), cucá data a vrací je v seznamu.
EG.: return [[code, location ...], [62500, Brno ...], ...]
:param web_list:
:return db:
"""
assert web_list != [], "Problém: seznam adres je prázdný."
db = [(complete_first_line(suck_n_chop(web_list[0])))]
for web in web_list:
soup = suck_n_chop(web)
# získá kód obce
code = []
start = web.find("obec=") + 5
i = start
while web[i].isnumeric():
code.append(web[i])
i += 1
code = "".join(code)
# vytvoří řádek v CSVčku
db.append(make_package(soup, code))
return db
def complete_first_line(soup):
"""
Přijímá soap webu, ze které získá jména všech polit. stran a vrátí
popisky osy X v CSV tabulce.
:param soup:
:return first_line:
"""
first_line = ["code", "location", "registered", "envelopes", "valid"]
tabs = soup.find_all("div", {"class": "t2_470"})
par_lines = [tr.find_all("td") for tab in tabs for tr in tab.find_all("tr")
if len(tr) == 11]
for party in range(len(par_lines)):
first_line.append(par_lines[party][1].text)
return first_line
def make_package(soup, code):
"""
Získá z poskytnuté polévky informace a vrátí je v balíku - seřazené jako
řádek v CSV. Zvenku také získá kód označující obec.
:param soup:
:param code:
:return package:
"""
# code
package = [code]
# location
for title in soup.find_all("h3"):
if title.text[1:6] == "Obec:":
package.append(title.text[7:].strip())
# registered
tab = soup.find("table")
lines = [tr for tr in tab.find_all("tr")]
inventory = [td for td in lines[2]]
package.append("".join(inventory[7].text.split("\xa0")))
# envelopes
package.append("".join(inventory[9].text.split("\xa0")))
# valid
package.append("".join(inventory[15].text.split("\xa0")))
# parties
tabs = soup.find_all("table")
par_lines = [tr.find_all("td") for tab in tabs[1:]
for tr in tab.find_all("tr") if len(tr) == 11]
for party in range(len(par_lines)):
package.append("".join(par_lines[party][2].text.split("\xa0")))
return package
def suck_n_chop(web):
"""
Vyšle požadavek, zkontroluje příjem a vrátí soap.
:param web:
:return BS(resp.text, "html.parser"):
"""
resp = requests.get(web)
assert resp.status_code == 200, "Problém se spojením: {}".format(resp.status_code)
return BS(resp.text, "html.parser")
def save_csv(db, file_name):
"""
Přijíme data o volbách a uloží data do csv souboru s přijatým názvem.
:param db:
:param file_name:
"""
with open(file_name + ".csv", "w", newline="") as file:
writer = csv.writer(file)
writer.writerows(db)
def main(CSV_NAME):
"""
Jedna funkce vládne všem, jedna jim všem káže...
-
200 obcí za 20 sec.
Všech cca 6300 obcí si žádá cca 13 minut!!!
"""
# fixtures
# web = "https://volby.cz/pls/ps2017nss"
# okresy = ["https://volby.cz/pls/ps2017nss/ps32?xjazyk=CZ&xkraj=11&xnumnuts=6203"]
# obce = ["https://volby.cz/pls/ps2017nss/ps311?xjazyk=CZ&xkraj=11&xobec=582786&xvyber=6202"]
# db = [["name", "age", "sex"], ["Ondrej", "23", "M"], ["Lucie", "21", "F"]]
time_1 = time()
okresy, web = get_districts(INPUT)
print("Nalezeno okresů:", len(okresy))
obce = get_municipalities(okresy, web)
print("Nalezeno obcí:", len(obce))
print("Zpracovávám. Čekejte prosím...")
db = extract_data(obce)
save_csv(db, CSV_NAME)
print("Úspěšně dokončeno a uloženo.")
time_2 = time()
time_total = int(time_2 - time_1)
print("TOTAL TIME:", time_total, "secs")
if __name__ == "__main__":
if len(sys.argv) != 2:
print("\nUSAGE: Web_scraper.py OutputFileName\n")
else:
main(sys.argv[1:2])
``` |
{
"source": "john-veillette/mne_ari",
"score": 2
} |
#### File: mne_ari/ari/ari.py
```python
from mne.stats.cluster_level import (
_find_clusters,
_setup_adjacency,
_reshape_clusters,
_cluster_indices_to_mask
)
import numpy as np
from .parametric import ARI
from .permutation import pARI
def all_resolutions_inference(X, alpha = .05, tail = 0, ari_type = 'parametric',
adjacency = None, n_permutations = 10000, thresholds = None,
seed = None, statfun = None, shift = 0):
'''
Implements all-resolutions inference as in [1] or [2].
Tries a range of cluster thresholds between chosen alpha and the Bonferroni
corrected threshold. You can manually specify thresholds to try if you want
Parameters
----------
x: (n_observation, n_times, n_vertices) array for one-sample/paired test
or a list of two such arrays for independent sample test
alpha: (float) the false discovry control level
tail: 1 or 'greater', 0 or 'two-sided', -1 or 'less';
ignored if statfun is provided.
adjacency: defines neighbors in the data, as in
mne.stats.spatio_temporal_cluster_1samp_test
type: (str) 'parametric' to perform ARI as in [1]
and 'permutation' as in [2]
n_permutations: (int) number of permutations to perform
thresholds: (iterable) optional, manually specify cluster
inclusion thresholds to search over
shift: (float) shift for candidate critical vector family.
Corresponds to delta parameter in [2].
If statfun p-values are anti-conservative, increasing this can
increase power for detecting larger clusters (at the cost of
decreased power for smaller clusters). Therefore, this
corresponds to the minimum size cluster we're interested in
detecting. Default is 0 (interested in any sized cluster).
Only for permutation-based ARI, ignored otherwise.
statfun: a custom statistics function to compute p-values. Should take
an (n_observations, n_tests) array (or list of such arrays)
as input and return an (n_tests,) array of p-values. If this
argument is used, the tail argument is ignored.
Returns
----------
p_vals: (sample_shape array) the p-values from the mass univariate test
true_positive_proportions: (sample_shape array)
highest true positive proportion
for each coordinate in p_vals
across all thresholds.
clusters: list of sample_shape boolean masks or empty list
clusters in which true positive proportion exceeds 1 - alpha
References
----------
[1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
All-Resolutions Inference for brain imaging.
Neuroimage. 2018 Nov 1;181:786-796.
doi: 10.1016/j.neuroimage.2018.07.060
[2] <NAME>, et al.
"Permutation-based true discovery proportions for fMRI cluster analysis."
arXiv preprint arXiv:2012.00368 (2020).
'''
# initialize ARI object, which computes p-value
if ari_type == 'parametric':
ari = ARI(X, alpha, tail, n_permutations, seed, statfun)
elif ari_type == 'permutation':
ari = pARI(X, alpha, tail, n_permutations, seed, statfun, shift)
else:
raise ValueError("type must be 'parametric' or 'permutation'.")
p_vals = ari.p_values
true_discovery_proportions = np.zeros_like(p_vals)
n_times = p_vals.shape[0]
n_tests = p_vals.size
# setup adjacency structure if needed
if adjacency is not None and adjacency is not False:
adjacency = _setup_adjacency(adjacency, n_tests, n_times)
# handle threshold arguments, construct default if needed
if thresholds is None: # search grid up to max p-val
thresholds = np.geomspace(alpha, np.min(p_vals), num = 1000)
elif thresholds == 'all':
thresholds = p_vals.flatten()
else: # verify user-input thresholds
if not hasattr(thresholds, '__iter__'):
thresholds = [thresholds]
for thres in thresholds:
# make sure cluster thresholds are valid p-values
assert(thres >= 0)
assert(thres <= 1)
for thres in thresholds:
if adjacency is None: # use lattice adjacency
clusters, _ = _find_clusters(p_vals, thres, -1)
else:
clusters, _ = _find_clusters(p_vals.flatten(), thres, -1, adjacency)
if clusters: # reshape to boolean mask
clusters = _cluster_indices_to_mask(clusters, n_tests)
clusters = _reshape_clusters(clusters, true_discovery_proportions.shape)
for clust in clusters:
# compute the true-positive proportion for this cluster
tdp = ari.true_discovery_proportion(clust)
# update results array if new TPF > old TPF
tdp_old = true_discovery_proportions[clust]
tdp_new = np.full_like(tdp_old, tdp)
tdps = np.stack([tdp_old, tdp_new], axis = 0)
true_discovery_proportions[clust] = tdps.max(axis = 0)
# get clusters where true discovery proportion exceeds threshold
clusters, _ = _find_clusters(true_discovery_proportions.flatten(), 1 - alpha, 1, adjacency)
if clusters:
clusters = _cluster_indices_to_mask(clusters, n_tests)
clusters = _reshape_clusters(clusters, true_discovery_proportions.shape)
return p_vals, true_discovery_proportions, clusters
```
#### File: mne_ari/ari/permutation.py
```python
from ._permutation import _permutation_1samp, _permutation_ind
import numpy as np
def _optimize_lambda(p, alpha, delta = 0):
'''
finds best lambda parameter given the permutation distribution of p-values
based on https://github.com/angeella/pARI/blob/master/src/lambdaCalibrate.cpp
but only supports Simes family
'''
b = p.shape[1] # number of permutations
mm = p.shape[0] # number of tests
T = np.empty(b)
idV = np.arange(1 + delta, 1 + mm)
deltaV = np.full(mm - delta, delta)
mV = np.full(mm - delta, mm)
## sort columns of p-vals
Y = np.sort(p, axis = 1)
# compute lambda for each permutation
for bb in range(b):
lam = (mV - deltaV) % Y[delta:mm, bb] / ((idV - deltaV) * alpha)
T[bb] = np.min(lam) # minimum over hypotheses
T = np.sort(T)
idx = np.floor(alpha * b).astype(int)
return T[idx]
def _get_critical_vector(p, alpha, lam, delta = 0):
'''
based on https://github.com/angeella/pARI/blob/master/R/criticalVector.R
but only support Simes family
'''
m = p.shape[0]
cc = np.arange(1, m + 1)
vf = np.vectorize(lambda x: ((x - delta) * alpha * lam) / (m - delta))
return vf(cc)
class pARI:
'''
A class that handles permuation-based All-Resolutions Inference as in [1].
[1] Andreella, Angela, et al.
"Permutation-based true discovery proportions for fMRI cluster analysis."
arXiv preprint arXiv:2012.00368 (2020).
'''
def __init__(self, X, alpha, tail = 0,
n_permutations = 10000, seed = None, statfun = None, shift = 0):
'''
uses permutation distribution to estimate best critical vector
'''
if tail == 0 or tail == 'two-sided':
self.alternative = 'two-sided'
elif tail == 1 or tail == 'greater':
self.alternative = 'greater'
elif tail == -1 or tail == 'less':
self.alternative = 'less'
else:
raise ValueError('Invalid input value for tail!')
self.alpha = alpha
assert(shift >= 0)
self.delta = shift # same default as pARIBrain, see [1]
if type(X) in [list, tuple]:
self.sample_shape = X[0][0].shape
X = [np.reshape(x, (x.shape[0], -1)) for x in X] # flatten samples
p = _permutation_ind(X, n_permutations, self.alternative, seed, statfun)
else:
self.sample_shape = X[0].shape
X = np.reshape(X, (X.shape[0], -1)) # flatten samples
p = _permutation_1samp(X, n_permutations, self.alternative, seed, statfun)
self.p = p[:, 0] # just the observed values
self.lam = _optimize_lambda(p, self.alpha, self.delta)
self.crit_vec = _get_critical_vector(p, self.alpha, self.lam, self.delta)
def true_discovery_proportion(self, mask):
'''
given a boolean mask, gives the true discovery proportion
for the specified cluster
'''
assert(mask.shape == self.sample_shape)
mask = mask.flatten()
m = mask.sum() # number of tests in mask
p = self.p # observed p-values
p_vec = p[mask] # p-values in subset
u = np.empty(m)
for i in range(m):
u[i] = np.sum(p_vec <= self.crit_vec[i]) - i
n_discoveries = np.max(u) # a lower bound
tdp = n_discoveries / m
try:
assert(tdp >= 0)
assert(tdp <= 1)
except:
raise Exception("Something weird happened," +
" and we got a TDP outside of the range [0, 1]." +
" Did you use a custom stat function?" +
" Are you sure your p-values make sense?")
return tdp
@property
def p_values(self):
return np.reshape(self.p, self.sample_shape)
``` |
{
"source": "john-veillette/mne-ari",
"score": 2
} |
#### File: mne-ari/mne_ari/permutation.py
```python
from mne.utils import check_random_state
from typing import Iterable
import numpy as np
'''
This module provides mass univariate permutation tests for mutlidimensional
arrays. Note that the (univariate, not cluster-based) permutation test
provided in MNE as mne.stats.permutation_t_test applies a t-max correction
for multiple comparisons to its p-values, making those p-values unsuitable for
all-resolutions inference. Hence, we provide our own implementations here.
They're not the fastest permutation tests in the world with those for loops,
but they're pretty memory efficient since they never hold the full permutation
distribution in memory at once.
'''
def _compare(obs, perm, tail):
if tail == 1:
mask = (perm >= obs)
elif tail == -1:
mask = (perm <= obs)
return mask
def _permutation_test_1samp(X, n_permutations = 10000, tail = 0, seed = None):
rng = check_random_state(seed)
greater_ct = np.zeros_like(X[0])
lesser_ct = np.zeros_like(X[0])
obs = X.mean(0)
for i in range(n_permutations):
flips = rng.choice([-1, 1], size = X.shape[0])
broadcast_shape = [X.shape[0]] + (len(X.shape) - 1)*[1]
flips = np.reshape(flips, broadcast_shape)
perm_X = flips * X
perm_effect = perm_X.mean(0)
greater_ct += _compare(obs, perm_effect, 1)
lesser_ct += _compare(obs, perm_effect, -1)
if tail == 1:
p = (greater_ct + 1) / (n_permutations + 1)
elif tail == -1:
p = (lesser_ct + 1) / (n_permutations + 1)
elif tail == 0:
p1 = (greater_ct + 1) / (n_permutations + 1)
p2 = (lesser_ct + 1) / (n_permutations + 1)
p = 2 * np.stack([p1, p2], 0).min(0)
else:
raise ValueError("Cannot compute p-value with meaningless tail = %d."%tail)
return p
def _permutation_test_ind(X, n_permutations = 10000, tail = 0, seed = None):
rng = check_random_state(seed)
n1 = len(X[0])
X = np.concatenate(X, axis = 0)
greater_ct = np.zeros_like(X[0])
lesser_ct = np.zeros_like(X[0])
idxs = np.arange(len(X))
obs = X[:n1].mean(0) - X[n1:].mean(0)
for i in range(n_permutations):
rng.shuffle(idxs)
perm_X = X[idxs]
perm_effect = perm_X[:n1].mean(0) - perm_X[n1:].mean(0)
greater_ct += _compare(obs, perm_effect, 1)
lesser_ct += _compare(obs, perm_effect, -1)
if tail == 1:
p = (greater_ct + 1) / (n_permutations + 1)
elif tail == -1:
p = (lesser_ct + 1) / (n_permutations + 1)
elif tail == 0:
p1 = (greater_ct + 1) / (n_permutations + 1)
p2 = (lesser_ct + 1) / (n_permutations + 1)
p = 2 * np.stack([p1, p2], 0).min(0)
else:
raise ValueError("Cannot compute p-value with meaningless tail = %d."%tail)
return p
def permutation_test(X, **kwargs):
"""
Parameters
----------
X : array, shape (n_samples, n_tests) if one-sample;
list of 2 arrays if independent samples
Samples (observations) by number of tests (variables).
n_permutations : int, Number of permutations.
tail : -1 or 0 or 1 (default = 0)
If tail is 1, the alternative hypothesis is that the
mean of the data is greater than 0 (upper tailed test). If tail is 0,
the alternative hypothesis is that the mean of the data is different
than 0 (two tailed test). If tail is -1, the alternative hypothesis
is that the mean of the data is less than 0 (lower tailed test).
"""
if isinstance(X, list) or isinstance(X, tuple):
assert(len(X) == 2)
assert(X[0].shape[1:] == X[1].shape[1:])
return _permutation_test_ind(X, **kwargs)
else:
return _permutation_test_1samp(X, **kwargs)
``` |
{
"source": "john-veillette/PsychRNN",
"score": 2
} |
#### File: PsychRNN/docs/conf.py
```python
project = 'PsychRNN'
copyright = '2020, <NAME>*, <NAME>*, <NAME>, <NAME>, <NAME> (* indicates equal contribution)'
author = '<NAME>*, <NAME>*, <NAME>, <NAME>, <NAME> (* indicates equal contribution)'
exec(open("../psychrnn/_version.py", "r").read()) # get __version__ variable
# The short X.Y version
version = ".".join(__version__.split(".")[0:-1])
# The full version, including alpha/beta/rc tags
release = __version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.imgmath',
'sphinx.ext.viewcode',
'nbsphinx',
'sphinx.ext.mathjax',
'sphinx_copybutton',
"sphinx_rtd_theme",
'sphinxcontrib.napoleon',
'autodocsumm',
]
#include autosummary by defualt
autodoc_default_options = {
'autosummary': True,
}
autodata_content = 'both'
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The master toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'notebooks/.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
html_sidebars = { '**': ['customtoc.html', 'localtoc.html','relations.html', 'searchbox.html', 'sourcelink.html'], }
nbsphinx_prolog = r"""
{% set docname = 'docs/' + env.doc2path(env.docname, base=None) %}
.. raw:: html
<div class="admonition note">
This page was generated from
<a class="reference external" href="https://github.com/murraylab/PsychRNN/blob/v{{ env.config.release|e }}/{{ docname|e }}">{{ docname|e }}</a>.
Interactive online version:
<a href="https://colab.research.google.com/github/murraylab/PsychRNN/blob/v{{ env.config.release|e }}/{{ docname|e }}"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>.
<script>
if (document.location.host) {
$(document.currentScript).replaceWith(
'<a class="reference external" ' +
'href="https://nbviewer.jupyter.org/url' +
(window.location.protocol == 'https:' ? 's/' : '/') +
window.location.host +
window.location.pathname.slice(0, -4) +
'ipynb">View in <em>nbviewer</em></a>.'
);
}
</script>
</div>
"""
# Taken from https://stackoverflow.com/questions/8821511/substitutions-in-sphinx-code-blocks
def ultimateReplace(app, docname, source):
result = source[0]
for key in app.config.ultimate_replacements:
result = result.replace(key, app.config.ultimate_replacements[key])
source[0] = result
ultimate_replacements = {
"{release}" : release
}
def setup(app):
app.add_config_value('ultimate_replacements', {}, True)
app.connect('source-read', ultimateReplace)
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = False
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = False
napoleon_use_rtype = True
napoleon_use_keyword = False
napoleon_custom_sections = None
```
#### File: psychrnn/backend/curriculum.py
```python
from __future__ import division
from __future__ import print_function
from os import makedirs, path
import numpy as np
def default_metric(curriculum_params, input_data, correct_output, output_mask, output, epoch, losses, verbosity):
""" Default metric to use to evaluate performance when using Curriculum learning.
Advance is true if accuracy >= threshold, False otherwise.
Arguments:
curriculum_params (dict): Dictionary of the :class:`Curriculum` object parameters, containing the following keys:
:Dictionary Keys:
* **stop_training** (*bool*) -- True if the network has finished training and completed all stages.
* **stage** (*int*) -- Current training stage (initial stage is 0).
* **metric_values** (*list of [float, int]*) -- List of metric values and the stage at which each metric value was computed.
* **tasks** (*list of :class:`psychrnn.tasks.task.Task` objects*) -- List of tasks in the curriculum.
* **metric** (*function*) -- What metric function to use. :func:`default_metric` is an example of one in terms of inputs and outputs taken.
* **accuracies** (*list of functions with the signature of* :func:`psychrnn.tasks.task.Task.accuracy_function`) -- Accuracy function to use at each stage.
* **thresholds** (*list of float*) -- Thresholds for each stage that accuracy must reach to move to the next stage.
* **metric_epoch** (*int*) -- Calculate the metric / test if advance to the next stage every metric_epoch training epochs.
* **output_file** (*str*) -- Optional path for where to save out metric value and stage.
input_data (ndarray(dtype=float, shape =(:attr:`N_batch`, :attr:`N_steps`, :attr:`N_out` ))): Task inputs.
correct_output (ndarray(dtype=float, shape = (:attr:`N_batch`, :attr:`N_steps`, :attr:`N_out`))): Correct (target) task output given input_data.
output_mask (ndarray(dtype=float, shape = (:attr:`N_batch`, :attr:`N_steps`, :attr:`N_out`))): Output mask for the task. True when the network should aim to match the target output, False when the target output can be ignored.
output (ndarray(dtype=float, shape = (:attr:`N_batch`, :attr:`N_steps`, :attr:`N_out`))): The network's output given input_data.
epoch (*int*): The epoch number in training.
losses (*list of float*): List of losses, computed during training.
verbosity (*bool*): Whether to print information as training progresses. If True, prints accuracy every time it is computed.
Returns:
tuple:
* **advance** (*bool*) -- True if the accuracy is >= the threshold for the current stage. False otherwise.
* **metric_value** (*float*) -- Value of the computed accuracy.
"""
accuracy = curriculum_params['accuracies'][curriculum_params['stage']](correct_output,output, output_mask)
threshold = curriculum_params['thresholds'][curriculum_params['stage']]
if verbosity:
print("Accuracy: " + str(accuracy))
return accuracy>=threshold, accuracy
class Curriculum(object):
""" Curriculum object.
Allows training on a sequence of tasks when Curriculum is passed into :func:`~psychrnn.backend.rnn.RNN.train`.
Arguments:
tasks (list of :class:`~psychrnn.tasks.task.Task` objects): List of tasks to use in the curriculum.
metric (function, optional): Function for calculating whether the stage advances and what the metric value is at each metric_epoch. Default: :func:`default_metric`.
:Arguments:
* **curriculum_params** (*dict*) -- Dictionary of the :class:`Curriculum` object parameters, containing the following keys:
:Dictionary Keys:
* **stop_training** (*bool*) -- True if the network has finished training and completed all stages.
* **stage** (*int*) -- Current training stage (initial stage is 0).
* **metric_values** (*list of [float, int]*) -- List of metric values and the stage at which each metric value was computed.
* **tasks** (*list of :class:`psychrnn.tasks.task.Task` objects*) -- List of tasks in the curriculum.
* **metric** (*function*) -- What metric function to use. :func:`default_metric` is an example of one in terms of inputs and outputs taken.
* **accuracies** (*list of functions*) -- Accuracy function to use at each stage.
* **thresholds** (*list of float*) -- Thresholds for each stage that accuracy must reach to move to the next stage.
* **metric_epoch** (*int*) -- Calculate the metric and test if the model should advance to the next stage every :data:`metric_epoch` training epochs.
* **output_file** (*str*) -- Optional path for saving out themetric value and stage. If the .npz filename extension is not included, it will be appended.
* **input_data** (*ndarray(dtype=float, shape =(*:attr:`N_batch`, :attr:`N_steps`, :attr:`N_out` *))*) -- Task inputs.
* **correct_output** (*ndarray(dtype=float, shape = (*:attr:`N_batch`, :attr:`N_steps`, :attr:`N_out` *))*) -- Correct (target) task output given input_data.
* **output_mask** (*ndarray(dtype=float, shape = (*:attr:`N_batch`, :attr:`N_steps`, :attr:`N_out` *))*) -- Output mask for the task. True when the network should aim to match the target output, False when the target output can be ignored.
* **output** (*ndarray(dtype=float, shape = (*:attr:`N_batch`, :attr:`N_steps`, :attr:`N_out` *))*) -- The network's output given input_data.
* **epoch** (*int*) -- The epoch number in training.
* **losses** (*list of float*) -- List of losses, computed during training.
* **verbosity** (*bool*) -- Whether to print information as training progresses.
:Returns:
*tuple*
* **advance** (*bool*) -- True if the the stage should be advanced. False otherwise.
* **metric_value** (*float*) -- Value of the computed metric.
accuracies (list of functions, optional): Optional list of functions to use to calculate network performance for the purposes of advancing tasks. Used by :func:`default_metric` to compute accuracy. Default: ``[tasks[i].accuracy_function for i in range(len(tasks))]``.
thresholds (list of float, optional): Optional list of thresholds. If metric = default_metric, accuracies must reach the threshold for a given stage in order to advance to the next stage. Default: ``[.9 for i in range(len(tasks))]``
metric_epoch (int): Calculate the metric and test if the model should advance to the next stage every :data:`metric_epoch` training epochs. Default: 10
output_file (str): Optional path for saving out the metric value and stage. If the .npz filename extension is not included, it will be appended. Default: None.
"""
def __init__(self, tasks, **kwargs):
self.stop_training = False
self.stage = 0
self.metric_values = []
# List of tasks that make up the curriculum
self.tasks = tasks
#Optional function with parameters as in default_metric that returns whether to advance stage, and the accuracy / metric value
self.metric = kwargs.get('metric', default_metric)
#Optional list of accuracy functions to use for each task
self.accuracies = kwargs.get('accuracies', [tasks[i].accuracy_function for i in range(len(tasks))])
assert len(self.accuracies)==len(self.tasks)
# Optional list of accuracy cuttoff values to use with each tasks
self.thresholds = kwargs.get('thresholds', [.9 for i in range(len(tasks))])
assert len(self.thresholds)==len(self.tasks)
# How often to check metric?
self.metric_epoch = kwargs.get('metric_epoch', 10)
# Optional path to save out metric value and stage to
self.output_file = kwargs.get('output_file', None)
if self.output_file is not None:
if path.dirname(self.output_file) != "" and not path.exists(path.dirname(self.output_file)):
makedirs(path.dirname(self.output_file))
def metric_test(self, input_data, correct_output, output_mask, test_output, epoch, losses, verbosity = False):
"""Evaluates whether to advance the stage to the next task or not.
Arguments:
input_data (ndarray(dtype=float, shape =(:attr:`N_batch`, :attr:`N_steps`, :attr:`N_out` ))): Task inputs.
correct_output (ndarray(dtype=float, shape = (:attr:`N_batch`, :attr:`N_steps`, :attr:`N_out`))): Correct (target) task output given input_data.
output_mask (ndarray(dtype=float, shape = (:attr:`N_batch`, :attr:`N_steps`, :attr:`N_out`))): Output mask for the task. True when the network should aim to match the target output, False when the target output can be ignored.
test_output (ndarray(dtype=float, shape = (:attr:`N_batch`, :attr:`N_steps`, :attr:`N_out`))): The network's output given input_data.
epoch (*int*): The epoch number in training.
losses (*list of float*): List of losses, computed during training.
verbosity (*bool, optional*): Whether to print information as metric is computed and stages advanced. Default: False
Returns:
True if stage advances, False otherwise.
"""
advance, metric_value = self.metric(self.__dict__, input_data, correct_output, output_mask, test_output, epoch, losses, verbosity)
self.metric_values.append([metric_value, self.stage])
if advance:
self.stage+=1
if self.stage == len(self.tasks):
self.stop_training = True
if self.output_file is not None:
np.save(self.output_file, self.metric_values)
if verbosity:
print("Metric values saved in file: %s" % self.output_file)
if verbosity:
print("Stage " + str(self.stage))
return True
return False
def get_generator_function(self):
""" Return the generator function for the current task.
Returns:
:func:`psychrnn.tasks.task.Task.batch_generator` function: Task batch generator for the task at the current stage.
"""
return self.tasks[self.stage].batch_generator()
```
#### File: psychrnn/tasks/delayed_discrim.py
```python
from __future__ import division
from psychrnn.tasks.task import Task
import numpy as np
class DelayedDiscrimination(Task):
"""Delayed discrimination task.
Following a fore period, the network receives an input, followed by a delay. After the delay the network receives a second input. The second input channel receives noisy input that is inversely ordered compared to the input received by the first input channel. The network must respond by activating the output node that corresponds to the input channel with the greater input as the first stimulus.
Takes two channels of noisy input (:attr:`N_in` = 2).
Two channel output (:attr:`N_out` = 2) with a one hot encoding (high value is 1, low value is .2).
Loosely based on `<NAME>., <NAME>., <NAME>., & <NAME>. (1999). Neuronal correlates of
parametric working memory in the prefrontal cortex. Nature, 399(6735), 470. <https://www.nature.com/articles/20939>`_
Args:
dt (float): The simulation timestep.
tau (float): The intrinsic time constant of neural state decay.
T (float): The trial length.
N_batch (int): The number of trials per training update.
onset_time (float, optional): Stimulus onset time in terms of trial length :data:`T`.
stim_duration_1 (float, optional): Stimulus 1 duration in terms of trial length :data:`T`.
delay_duration (float, optional): Delay duration in terms of trial length :data:`T`.
stim_duration_2 (float, optional): Stimulus 2 duration in terms of trial length :data:`T`.
decision_duration (float, optional): Decision duration in terms of trial length :data:`T`.
"""
def __init__(self, dt, tau, T, N_batch, onset_time = None, stim_duration_1 = None, delay_duration = None, stim_duration_2 = None, decision_duration = None):
super(DelayedDiscrimination,self).__init__(2, 2, dt, tau, T, N_batch)
self.onset_time = onset_time
self.stim_duration_1 = stim_duration_1
self.delay_duration = delay_duration
self.stim_duration_2 = stim_duration_2
self.decision_duration = decision_duration
self.frequency_pairs = [(18, 10), (22, 14), (26, 18), (30, 22), (34, 26)] # frequency pairs to select from
self.decision_options = ['>', '<'] # decision options to select from
self.lo = 0.2 # Low value for one hot encoding
self.hi = 1.0 # High value for one hot encoding
def _scale_p(self, f):
"""Scale frequency to be between .4 and 1.2."""
return 0.4 + 0.8 * (f - 10) / (34 - 10)
def _scale_n(self, f):
""" Scale frequency to be between .4 and 1.2, invert frequency ordering."""
return 0.4 + 0.8 * (34 - f) / (34 - 10)
def generate_trial_params(self, batch, trial):
"""Define parameters for each trial.
Implements :func:`~psychrnn.tasks.task.Task.generate_trial_params`.
Args:
batch (int): The batch number that this trial is part of.
trial (int): The trial number of the trial within the batch *batch*.
Returns:
dict: Dictionary of trial parameters including the following keys:
:Dictionary Keys:
* **stimulus_1** (*float*) -- Start time for stimulus one. :data:`onset_time`.
* **delay** (*float*) -- Start time for the delay. :data:`onset_time` + :data:`stimulus_duration_1`.
* **stimulus_2** (*float*) -- Start time in for stimulus one. :data:`onset_time` + :data:`stimulus_duration_1` + :data:`delay_duration`.
* **decision** (*float*) -- Start time in for decision period. :data:`onset_time` + :data:`stimulus_duration_1` + :data:`delay_duration` + :data:`stimulus_duration_2`.
* **end** (*float*) -- End of decision period. :data:`onset_time` + :data:`stimulus_duration_1` + :data:`delay_duration` + :data:`stimulus_duration_2` + :data:`decision_duration`.
* **stim_noise** (*float*) -- Scales the stimlus noise. Set to .1.
* **f1** (*int*) -- Frequency of first stimulus.
* **f2** (*int*) -- Frequency of second stimulus.
* **choice** (*str*) -- Indicates whether :data:`f1` is '>' or '<' :data:`f2`.
"""
params = dict()
if self.onset_time is None:
onset_time = np.random.uniform(0, 1) * self.T / 8.0
else:
onset_time = self.onset_time
if self.stim_duration_1 is None:
stim_duration_1 = np.random.uniform(0, 1) * self.T / 4.0
else:
stim_duration_1 = self.stim_duration_1
if self.delay_duration is None:
delay_duration = np.random.uniform(0, 1) * self.T / 4.0
else:
delay_duration = self.delay_duration
if self.stim_duration_2 is None:
stim_duration_2 = np.random.uniform(0, 1) * self.T / 4.0
else:
stim_duration_2 = self.stim_duration_2
if self.decision_duration is None:
decision_duration = np.random.uniform(0, 1) * self.T / 8.0
else:
decision_duration = self.decision_duration
params['stimulus_1'] = onset_time
params['delay'] = onset_time + stim_duration_1
params['stimulus_2'] = onset_time + stim_duration_1 + delay_duration
params['decision'] = onset_time + stim_duration_1 + delay_duration + stim_duration_2
params['end'] = onset_time + stim_duration_1 + delay_duration + stim_duration_2 + decision_duration
params['stim_noise'] = 0.1
fpair = self.frequency_pairs[np.random.choice(len(self.frequency_pairs))]
gt_lt = np.random.choice(self.decision_options)
if gt_lt == '>':
f1, f2 = fpair
choice = 0
else:
f2, f1 = fpair
choice = 1
params['f1'] = f1
params['f2'] = f2
params['choice'] = choice
return params
def trial_function(self, t, params):
"""Compute the trial properties at :data:`time`.
Implements :func:`~psychrnn.tasks.task.Task.trial_function`.
Based on the :data:`params` compute the trial stimulus (x_t), correct output (y_t), and mask (mask_t) at :data:`time`.
Args:
time (int): The time within the trial (0 <= :data:`time` < :attr:`T`).
params (dict): The trial params produced by :func:`generate_trial_params`.
Returns:
tuple:
* **x_t** (*ndarray(dtype=float, shape=(*:attr:`N_in` *,))*) -- Trial input at :data:`time` given :data:`params`. First channel contains :data:`f1` during the first stimulus period, and :data:`f2` during the second stimulus period, scaled to be between .4 and 1.2. Second channel contains the frequencies but reverse scaled -- high frequencies correspond to low values and vice versa. Both channels have baseline noise.
* **y_t** (*ndarray(dtype=float, shape=(*:attr:`N_out` *,))*) -- Correct trial output at :data:`time` given :data:`params`. The correct output is encoded using one-hot encoding during the decision period.
* **mask_t** (*ndarray(dtype=bool, shape=(*:attr:`N_out` *,))*) -- True if the network should train to match the y_t, False if the network should ignore y_t when training. The mask is True for during the decision period and False otherwise.
"""
# ----------------------------------
# Initialize with noise
# ----------------------------------
x_t = np.sqrt(2*.01*np.sqrt(10)*np.sqrt(self.dt)*params['stim_noise']*params['stim_noise'])*np.random.randn(self.N_in)
y_t = np.zeros(self.N_out)
mask_t = np.zeros(self.N_out)
# ----------------------------------
# Retrieve parameters
# ----------------------------------
stimulus_1 = params['stimulus_1']
delay = params['delay']
stimulus_2 = params['stimulus_2']
decision = params['decision']
end = params['end']
f1 = params['f1']
f2 = params['f2']
choice = params['choice']
# ----------------------------------
# Compute values
# ----------------------------------
if stimulus_1 <= t < delay:
x_t[0] += self._scale_p(f1)
x_t[1] += self._scale_n(f1)
if stimulus_2 <= t < decision:
x_t[0] += self._scale_p(f2)
x_t[1] += self._scale_n(f2)
if decision <= t < end:
y_t[choice] = self.hi
y_t[1-choice] = self.lo
mask_t = np.ones(self.N_out)
return x_t, y_t, mask_t
def accuracy_function(self, correct_output, test_output, output_mask):
"""Calculates the accuracy of :data:`test_output`.
Implements :func:`~psychrnn.tasks.task.Task.accuracy_function`.
Takes the channel-wise mean of the masked output for each trial. Whichever channel has a greater mean is considered to be the network's "choice".
Returns:
float: 0 <= accuracy <= 1. Accuracy is equal to the ratio of trials in which the network made the correct choice as defined above.
"""
chosen = np.argmax(np.mean(test_output*output_mask, axis=1), axis = 1)
truth = np.argmax(np.mean(correct_output*output_mask, axis = 1), axis = 1)
return np.mean(np.equal(truth, chosen))
```
#### File: psychrnn/tasks/match_to_category.py
```python
from __future__ import division
from psychrnn.tasks.task import Task
import numpy as np
class MatchToCategory(Task):
""" Multidirectional decision-making task.
On each trial the network receives input from units representing different locations on a ring. Each input unit magnitude represents closeness to the angle of input. The network must determine which side of arbitrary category boundaries the input belongs to and respond accordingly.
Takes :attr:`N_in` channels of noisy input arranged in a ring with gaussian signal around the ring centered at 0 at the stimulus angle.
:attr:`N_out` channel output arranged as slices of a ring with a one hot encoding towards the correct category output based on the angular location of the gaussian input bump.
Loosely based on `Freedman, <NAME>., and <NAME>. "Experience-dependent representation of visual categories in parietal cortex." Nature 443.7107 (2006): 85-88. <https://www.nature.com/articles/nature05078>`_
Args:
dt (float): The simulation timestep.
tau (float): The intrinsic time constant of neural state decay.
T (float): The trial length.
N_batch (int): The number of trials per training update.
N_in (int, optional): The number of network inputs. Defaults to 16.
N_out (int, optional): The number of network outputs. Defaults to 2.
"""
def __init__(self, dt, tau, T, N_batch, N_in=16, N_out=2):
super(MatchToCategory,self).__init__(N_in, N_out, dt, tau, T, N_batch)
def _gaussian_input(self,angle, scale = 1):
""" Calculates angular gaussian pdf with mean at the :data:`angle` for N_in evenly arranged around the circumference of a unit circle.
Args:
angle (float): The angle on the input circle at which to center the gaussian.
scale (float, optional): The scale of the gaussian function. Defaults to 1.
Returns:
ndarray(dtype=float, shape=(:attr:`N_in`,)): Normal pdf value at (angle - angle of N_in channel).
"""
g = np.zeros(self.N_in)
map_g = np.linspace(0,2*np.pi,self.N_in)
for ii in range(self.N_in):
# Center N_in around angle, and truncate so -pi <= effective angle <= pi
effective_angle = map_g[ii] - angle
if effective_angle > np.pi:
effective_angle = -2*np.pi + effective_angle
elif effective_angle < -np.pi:
effective_angle = 2*np.pi + effective_angle
x = effective_angle/scale
g[ii] = np.exp(-x**2/2.0) / (np.sqrt(2*np.pi)*scale)
return g
def generate_trial_params(self, batch, trial):
"""Define parameters for each trial.
Implements :func:`~psychrnn.tasks.task.Task.generate_trial_params`.
Args:
batch (int): The batch number that this trial is part of.
trial (int): The trial number of the trial within the batch *batch*.
Returns:
dict: Dictionary of trial parameters including the following keys:
:Dictionary Keys:
* **angle** (*float*) -- Angle at which to center the gaussian. Randomly selected.
* **category** (*int*) -- Index of the N_out category channel that contains the :data:`angle`.
* **onset_time** (*float*) -- Stimulus onset time. Set to 200.
* **input_dur** (*float*) -- Stimulus duration. Set to 1000.
* **output_dur** (*float*) -- Output duration. The time given to make a choice. Set to 800.
* **stim_noise** (*float*) -- Scales the stimlus noise. Set to .1.
"""
params = dict()
params['angle'] = 2*np.pi*np.random.rand()
params['category'] = int(params['angle']/(2*np.pi/self.N_out))
params['onset_time'] = 200
params['input_dur'] = 1000.
params['output_dur'] = 800.
params['stim_noise'] = .1
return params
def trial_function(self, t, params):
"""Compute the trial properties at :data:`time`.
Implements :func:`~psychrnn.tasks.task.Task.trial_function`.
Based on the :data:`params` compute the trial stimulus (x_t), correct output (y_t), and mask (mask_t) at :data:`time`.
Args:
time (int): The time within the trial (0 <= :data:`time` < :attr:`T`).
params (dict): The trial params produced by :func:`generate_trial_params`.
Returns:
tuple:
* **x_t** (*ndarray(dtype=float, shape=(*:attr:`N_in` *,))*) -- Trial input at :data:`time` given :data:`params`. For ``params['onset_time'] < time < params['onset_time'] + params['input_dur']`` , gaussian pdf with mean = angle and scale = 1 is added to each input channel based on the channel's angle.
* **y_t** (*ndarray(dtype=float, shape=(*:attr:`N_out` *,))*) -- Correct trial output at :data:`time` given :data:`params`. 1 in the :data:`params['category']` output channel during the output period defined by :data:`params['output_dur']`, 0 otherwise.
* **mask_t** (*ndarray(dtype=bool, shape=(*:attr:`N_out` *,))*) -- True if the network should train to match the y_t, False if the network should ignore y_t when training. True during the output period, False otherwise.
"""
# ----------------------------------
# Initialize with noise
# ----------------------------------
x_t = (1./(2*self.dt))*params['stim_noise']*np.random.randn(self.N_in)
y_t = np.zeros(self.N_out)
mask_t = np.zeros(self.N_out)
# ----------------------------------
# Retrieve parameters
# ----------------------------------
onset_time = params['onset_time']
input_dur = params['input_dur']
output_dur = params['output_dur']
angle = params['angle']
category = params['category']
if onset_time <= t < onset_time + input_dur:
x_t += self._gaussian_input(angle)
if onset_time + input_dur <= t < onset_time + input_dur + output_dur:
y_t[category] += 1.
mask_t = np.ones(self.N_out)
return x_t, y_t, mask_t
def accuracy_function(self, correct_output, test_output, output_mask):
"""Calculates the accuracy of :data:`test_output`.
Implements :func:`~psychrnn.tasks.task.Task.accuracy_function`.
Takes the channel-wise mean of the masked output for each trial. Whichever channel has a greater mean is considered to be the network's "choice".
Returns:
float: 0 <= accuracy <= 1. Accuracy is equal to the ratio of trials in which the network made the correct choice as defined above.
"""
chosen = np.argmax(np.mean(test_output*output_mask, axis=1), axis = 1)
truth = np.argmax(np.mean(correct_output*output_mask, axis = 1), axis = 1)
return np.mean(np.equal(truth, chosen))
```
#### File: test/backend/test_loss_functions.py
```python
import pytest
import tensorflow as tf
from psychrnn.backend.rnn import RNN
from pytest_mock import mocker
import sys
if sys.version_info[0] == 2:
from mock import patch
else:
from unittest.mock import patch
# clears tf graph after each test.
@pytest.fixture()
def tf_graph():
yield
tf.compat.v1.reset_default_graph()
def get_params():
params = {}
params['name'] = "test"
params['N_in'] = 2
params['N_rec'] = 50
params['N_out'] = 2
params['N_steps'] = 200
params['dt'] = 10
params['tau'] = 100
params['N_batch'] = 50
return params
def mean_squared_error(predictions, y, output_mask):
""" Mean squared error.
``loss = mean(square(output_mask * (predictions - y)))``
Args:
predictions (*tf.Tensor(dtype=float, shape =(*:attr:`N_batch`, :attr:`N_steps`, :attr:`N_out` *))*): Network output.
y (*tf.Tensor(dtype=float, shape =(*?, :attr:`N_steps`, :attr:`N_out` *))*): Target output.
output_mask (*tf.Tensor(dtype=float, shape =(*?, :attr:`N_steps`, :attr:`N_out` *))*): Output mask for :attr:`N_batch` trials. True when the network should aim to match the target output, False when the target output can be ignored.
Returns:
tf.Tensor(dtype=float): Mean squared error.
"""
return tf.reduce_mean(input_tensor=tf.square(output_mask * (predictions - y)))
@patch.object(RNN, '__abstractmethods__', set())
def test_custom_loss(tf_graph, mocker):
params = get_params()
params['loss_function'] = 'my_mean_squared_error'
rnn = RNN(params)
mocker.patch.object(RNN, 'forward_pass')
RNN.forward_pass.return_value = tf.fill([params['N_batch'], params['N_steps'], params['N_out']], float('nan')), tf.fill([params['N_batch'], params['N_steps'], params['N_rec']], float('nan'))
with pytest.raises(UserWarning) as excinfo:
rnn.build()
assert 'my_mean_squared_error' in str(excinfo.value)
rnn.destruct()
params['my_mean_squared_error'] = mean_squared_error
rnn = RNN(params)
mocker.patch.object(RNN, 'forward_pass')
RNN.forward_pass.return_value = tf.fill([params['N_batch'], params['N_steps'], params['N_out']], float('nan')), tf.fill([params['N_batch'], params['N_steps'], params['N_rec']], float('nan'))
rnn.build()
``` |
{
"source": "john-veillette/pymc-learn",
"score": 2
} |
#### File: pmlearn/gaussian_process/gpr.py
```python
import numpy as np
import pymc3 as pm
import theano
from ..exceptions import NotFittedError
from ..base import BayesianModel, BayesianRegressorMixin
from .kernels import RBF
class GaussianProcessRegressorMixin(BayesianRegressorMixin):
"""Mixin class for Gaussian Process Regression
"""
def predict(self, X, return_std=False):
""" Perform Prediction
Predicts values of new data with a trained Gaussian Process
Regression model
Parameters
----------
X : numpy array, shape [n_samples, n_features]
return_std : Boolean
Whether to return standard deviations with mean values.
Defaults to False.
"""
if self.trace is None:
raise NotFittedError('Run fit on the model before predict.')
num_samples = X.shape[0]
if self.cached_model is None:
self.cached_model = self.create_model()
self._set_shared_vars({'model_input': X,
'model_output': np.zeros(num_samples)})
with self.cached_model:
f_pred = self.gp.conditional("f_pred", X)
self.ppc = pm.sample_ppc(self.trace,
vars=[f_pred],
samples=2000)
if return_std:
return self.ppc['f_pred'].mean(axis=0), \
self.ppc['f_pred'].std(axis=0)
else:
return self.ppc['f_pred'].mean(axis=0)
class GaussianProcessRegressor(BayesianModel,
GaussianProcessRegressorMixin):
""" Gaussian Process Regression built using PyMC3.
Fit a Gaussian process model and estimate model parameters using
MCMC algorithms or Variational Inference algorithms
Parameters
----------
prior_mean : mean object
The mean specifying the mean function of the GP. If None is passed,
the mean "pm.gp.mean.Zero()" is used as default.
kernel : covariance function (kernel)
The function specifying the covariance of the GP. If None is passed,
the kernel "RBF()" is used as default.
Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from pmlearn.gaussian_process import GaussianProcessRegressor
>>> from pmlearn.gaussian_process.kernels import DotProduct, WhiteKernel
>>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>>> kernel = DotProduct() + WhiteKernel()
>>> gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
>>> gpr.score(X, y) # doctest: +ELLIPSIS
0.3680...
>>> gpr.predict(X[:2,:], return_std=True) # doctest: +ELLIPSIS
(array([653.0..., 592.1...]), array([316.6..., 316.6...]))
Reference
----------
<NAME> Williams (2006). Gaussian Processes for Machine Learning.
"""
def __init__(self, prior_mean=None, kernel=None):
self.ppc = None
self.gp = None
self.num_training_samples = None
self.num_pred = None
self.prior_mean = prior_mean
self.kernel = kernel
super(GaussianProcessRegressor, self).__init__()
def create_model(self):
""" Creates and returns the PyMC3 model.
Note: The size of the shared variables must match the size of the
training data. Otherwise, setting the shared variables later will
raise an error. See http://docs.pymc.io/advanced_theano.html
Returns
----------
model: the PyMC3 model.
"""
model_input = theano.shared(np.zeros([self.num_training_samples,
self.num_pred]))
model_output = theano.shared(np.zeros(self.num_training_samples))
self.shared_vars = {
'model_input': model_input,
'model_output': model_output,
}
model = pm.Model()
with model:
length_scale = pm.Gamma('length_scale', alpha=2, beta=1,
shape=(1, self.num_pred))
signal_variance = pm.HalfCauchy('signal_variance', beta=5,
shape=1)
noise_variance = pm.HalfCauchy('noise_variance', beta=5,
shape=1)
if self.kernel is None:
cov_function = signal_variance ** 2 * RBF(
input_dim=self.num_pred,
ls=length_scale)
else:
cov_function = self.kernel
if self.prior_mean is None:
mean_function = pm.gp.mean.Zero()
else:
mean_function = pm.gp.mean.Constant(c=self.prior_mean)
self.gp = pm.gp.Latent(mean_func=mean_function,
cov_func=cov_function)
f = self.gp.prior('f', X=model_input.get_value())
y = pm.Normal('y', mu=f, sd=noise_variance, observed=model_output)
return model
def save(self, file_prefix):
params = {
'inference_type': self.inference_type,
'num_pred': self.num_pred,
'num_training_samples': self.num_training_samples
}
super(GaussianProcessRegressor, self).save(file_prefix, params)
def load(self, file_prefix):
params = super(GaussianProcessRegressor, self).load(
file_prefix, load_custom_params=True)
self.inference_type = params['inference_type']
self.num_pred = params['num_pred']
self.num_training_samples = params['num_training_samples']
class StudentsTProcessRegressor(BayesianModel,
GaussianProcessRegressorMixin):
""" StudentsT Process Regression built using PyMC3.
Fit a StudentsT process model and estimate model parameters using
MCMC algorithms or Variational Inference algorithms
Parameters
----------
prior_mean : mean object
The mean specifying the mean function of the StudentsT process.
If None is passed, the mean "pm.gp.mean.Zero()" is used as default.
Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from pmlearn.gaussian_process import StudentsTProcessRegressor
>>> from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
>>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>>> kernel = DotProduct() + WhiteKernel()
>>> spr = StudentsTProcessRegressor(kernel=kernel).fit(X, y)
>>> spr.score(X, y) # doctest: +ELLIPSIS
0.3680...
>>> spr.predict(X[:2,:], return_std=True) # doctest: +ELLIPSIS
(array([653.0..., 592.1...]), array([316.6..., 316.6...]))
Reference
----------
Rasmussen and Williams (2006). Gaussian Processes for Machine Learning.
"""
def __init__(self, prior_mean=None, kernel=None):
self.ppc = None
self.gp = None
self.num_training_samples = None
self.num_pred = None
self.prior_mean = prior_mean
self.kernel = kernel
super(StudentsTProcessRegressor, self).__init__()
def create_model(self):
""" Creates and returns the PyMC3 model.
Note: The size of the shared variables must match the size of the
training data. Otherwise, setting the shared variables later will raise
an error. See http://docs.pymc.io/advanced_theano.html
Returns
----------
model : the PyMC3 model
"""
model_input = theano.shared(np.zeros([self.num_training_samples,
self.num_pred]))
model_output = theano.shared(np.zeros(self.num_training_samples))
self.shared_vars = {
'model_input': model_input,
'model_output': model_output,
}
self.gp = None
model = pm.Model()
with model:
length_scale = pm.Gamma('length_scale', alpha=2, beta=0.5,
shape=(1, self.num_pred))
signal_variance = pm.HalfCauchy('signal_variance', beta=2,
shape=1)
noise_variance = pm.HalfCauchy('noise_variance', beta=2,
shape=1)
degrees_of_freedom = pm.Gamma('degrees_of_freedom', alpha=2,
beta=0.1, shape=1)
if self.kernel is None:
cov_function = signal_variance ** 2 * RBF(
input_dim=self.num_pred,
ls=length_scale)
else:
cov_function = self.kernel
if self.prior_mean is None:
mean_function = pm.gp.mean.Zero()
else:
mean_function = pm.gp.mean.Constant(c=self.prior_mean)
self.gp = pm.gp.Latent(mean_func=mean_function,
cov_func=cov_function)
f = self.gp.prior('f', X=model_input.get_value())
y = pm.StudentT('y', mu=f, lam=1 / signal_variance,
nu=degrees_of_freedom, observed=model_output)
return model
def save(self, file_prefix):
params = {
'inference_type': self.inference_type,
'num_pred': self.num_pred,
'num_training_samples': self.num_training_samples
}
super(StudentsTProcessRegressor, self).save(file_prefix, params)
def load(self, file_prefix):
params = super(StudentsTProcessRegressor, self).load(
file_prefix, load_custom_params=True)
self.inference_type = params['inference_type']
self.num_pred = params['num_pred']
self.num_training_samples = params['num_training_samples']
class SparseGaussianProcessRegressor(BayesianModel,
GaussianProcessRegressorMixin):
""" Sparse Gaussian Process Regression built using PyMC3.
Fit a Sparse Gaussian process model and estimate model parameters using
MCMC algorithms or Variational Inference algorithms
Parameters
----------
prior_mean : mean object
The mean specifying the mean function of the GP. If None is passed,
the mean "pm.gp.mean.Zero()" is used as default.
Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from pmlearn.gaussian_process import SparseGaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
>>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>>> kernel = DotProduct() + WhiteKernel()
>>> sgpr = SparseGaussianProcessRegressor(kernel=kernel).fit(X, y)
>>> sgpr.score(X, y) # doctest: +ELLIPSIS
0.3680...
>>> sgpr.predict(X[:2,:], return_std=True) # doctest: +ELLIPSIS
(array([653.0..., 592.1...]), array([316.6..., 316.6...]))
Reference
----------
<NAME> Williams (2006). Gaussian Processes for Machine Learning.
"""
def __init__(self, prior_mean=None, kernel=None):
self.ppc = None
self.gp = None
self.num_training_samples = None
self.num_pred = None
self.prior_mean = prior_mean
self.kernel = kernel
super(SparseGaussianProcessRegressor, self).__init__()
def create_model(self):
""" Creates and returns the PyMC3 model.
Note: The size of the shared variables must match the size of the
training data. Otherwise, setting the shared variables later will
raise an error. See http://docs.pymc.io/advanced_theano.html
Returns
----------
model : the PyMC3 model
"""
model_input = theano.shared(np.zeros([self.num_training_samples,
self.num_pred]))
model_output = theano.shared(np.zeros(self.num_training_samples))
self.shared_vars = {
'model_input': model_input,
'model_output': model_output,
}
self.gp = None
model = pm.Model()
with model:
length_scale = pm.Gamma('length_scale', alpha=2, beta=1,
shape=(1, self.num_pred))
signal_variance = pm.HalfCauchy('signal_variance', beta=5,
shape=1)
noise_variance = pm.HalfCauchy('noise_variance', beta=5,
shape=1)
if self.kernel is None:
cov_function = signal_variance ** 2 * RBF(
input_dim=self.num_pred,
ls=length_scale)
else:
cov_function = self.kernel
if self.prior_mean is None:
mean_function = pm.gp.mean.Zero()
else:
mean_function = pm.gp.mean.Constant(c=self.prior_mean)
self.gp = pm.gp.MarginalSparse(mean_func=mean_function,
cov_func=cov_function,
approx="FITC")
# initialize 20 inducing points with K-means
# gp.util
Xu = pm.gp.util.kmeans_inducing_points(20,
X=model_input.get_value())
y = self.gp.marginal_likelihood('y',
X=model_input.get_value(),
Xu=Xu,
y=model_output.get_value(),
sigma=noise_variance)
return model
def save(self, file_prefix):
params = {
'inference_type': self.inference_type,
'num_pred': self.num_pred,
'num_training_samples': self.num_training_samples
}
super(SparseGaussianProcessRegressor, self).save(file_prefix, params)
def load(self, file_prefix):
params = super(SparseGaussianProcessRegressor, self).load(
file_prefix, load_custom_params=True)
self.inference_type = params['inference_type']
self.num_pred = params['num_pred']
self.num_training_samples = params['num_training_samples']
```
#### File: pmlearn/linear_model/logistic.py
```python
import numpy as np
import pymc3 as pm
import theano
import theano.tensor as tt
from .base import BayesianModel
from .base import BayesianLinearClassifierMixin
class LogisticRegression(BayesianModel, BayesianLinearClassifierMixin):
"""Bayesian Logistic Regression built using PyMC3
"""
def __init__(self):
super(LogisticRegression, self).__init__()
self.num_cats = None
def create_model(self):
"""
Creates and returns the PyMC3 model.
Note: The size of the shared variables must match the size of the
training data. Otherwise, setting the shared variables later will
raise an error. See http://docs.pymc.io/advanced_theano.html
Returns
----------
the PyMC3 model
"""
model_input = theano.shared(
np.zeros([self.num_training_samples, self.num_pred]))
model_output = theano.shared(
np.zeros(self.num_training_samples, dtype='int'))
model_cats = theano.shared(
np.zeros(self.num_training_samples, dtype='int'))
self.shared_vars = {
'model_input': model_input,
'model_output': model_output,
'model_cats': model_cats
}
model = pm.Model()
with model:
alpha = pm.Normal('alpha', mu=0, sd=100,
shape=(self.num_cats,))
betas = pm.Normal('beta', mu=0, sd=100,
shape=(self.num_cats, self.num_pred))
c = model_cats
linear_function = alpha[c] + tt.sum(betas[c] * model_input, 1)
p = pm.invlogit(linear_function)
y = pm.Bernoulli('y', p, observed=model_output)
return model
def save(self, file_prefix):
params = {
'inference_type': self.inference_type,
'num_cats': self.num_cats,
'num_pred': self.num_pred,
'num_training_samples': self.num_training_samples
}
super(LogisticRegression, self).save(file_prefix, params)
def load(self, file_prefix):
params = super(LogisticRegression, self).load(
file_prefix, load_custom_params=True)
self.inference_type = params['inference_type']
self.num_cats = params['num_cats']
self.num_pred = params['num_pred']
self.num_training_samples = params['num_training_samples']
class HierarchicalLogisticRegression(BayesianModel,
BayesianLinearClassifierMixin):
"""
Custom Hierachical Logistic Regression built using PyMC3.
"""
def __init__(self):
super(HierarchicalLogisticRegression, self).__init__()
self.num_cats = None
def create_model(self):
"""
Creates and returns the PyMC3 model.
Note: The size of the shared variables must match the size of the
training data. Otherwise, setting the shared variables later will
raise an error. See http://docs.pymc.io/advanced_theano.html
Returns
----------
the PyMC3 model
"""
model_input = theano.shared(
np.zeros([self.num_training_samples, self.num_pred]))
model_output = theano.shared(
np.zeros(self.num_training_samples, dtype='int'))
model_cats = theano.shared(
np.zeros(self.num_training_samples, dtype='int'))
self.shared_vars = {
'model_input': model_input,
'model_output': model_output,
'model_cats': model_cats
}
model = pm.Model()
with model:
mu_alpha = pm.Normal('mu_alpha', mu=0, sd=100)
sigma_alpha = pm.HalfNormal('sigma_alpha', sd=100)
mu_beta = pm.Normal('mu_beta', mu=0, sd=100)
sigma_beta = pm.HalfNormal('sigma_beta', sd=100)
alpha = pm.Normal('alpha', mu=mu_alpha, sd=sigma_alpha,
shape=(self.num_cats,))
betas = pm.Normal('beta', mu=mu_beta, sd=sigma_beta,
shape=(self.num_cats, self.num_pred))
c = model_cats
linear_function = alpha[c] + tt.sum(betas[c] * model_input, 1)
p = pm.invlogit(linear_function)
y = pm.Bernoulli('y', p, observed=model_output)
return model
def save(self, file_prefix):
params = {
'inference_type': self.inference_type,
'num_cats': self.num_cats,
'num_pred': self.num_pred,
'num_training_samples': self.num_training_samples
}
super(HierarchicalLogisticRegression, self).save(file_prefix, params)
def load(self, file_prefix):
params = super(HierarchicalLogisticRegression,
self).load(file_prefix, load_custom_params=True)
self.inference_type = params['inference_type']
self.num_cats = params['num_cats']
self.num_pred = params['num_pred']
self.num_training_samples = params['num_training_samples']
```
#### File: mixture/tests/test_dirichlet_process.py
```python
import unittest
import shutil
import tempfile
import numpy as np
# import pandas as pd
# import pymc3 as pm
# from pymc3 import summary
# from sklearn.mixture import BayesianGaussianMixture as skBayesianGaussianMixture
from sklearn.model_selection import train_test_split
from pmlearn.exceptions import NotFittedError
from pmlearn.mixture import DirichletProcessMixture
class DirichletProcessMixtureTestCase(unittest.TestCase):
def setUp(self):
self.num_truncate = 3
self.num_components = 3
self.num_pred = 1
self.num_training_samples = 100
self.pi = np.array([0.35, 0.4, 0.25])
self.means = np.array([0, 5, 10])
self.sigmas = np.array([0.5, 0.5, 1.0])
self.components = np.random.randint(0,
self.num_components,
self.num_training_samples)
X = np.random.normal(loc=self.means[self.components],
scale=self.sigmas[self.components])
X.shape = (self.num_training_samples, 1)
self.X_train, self.X_test = train_test_split(X, test_size=0.3)
self.test_DPMM = DirichletProcessMixture()
self.test_nuts_DPMM = DirichletProcessMixture()
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_dir)
# class DirichletProcessMixtureFitTestCase(DirichletProcessMixtureTestCase):
# def test_advi_fit_returns_correct_model(self):
# # This print statement ensures PyMC3 output won't overwrite the test name
# print('')
# self.test_DPMM.fit(self.X_train)
#
# self.assertEqual(self.num_pred, self.test_DPMM.num_pred)
# self.assertEqual(self.num_components, self.test_DPMM.num_components)
# self.assertEqual(self.num_truncate, self.test_DPMM.num_truncate)
#
# self.assertAlmostEqual(self.pi[0],
# self.test_DPMM.summary['mean']['pi__0'],
# 0)
# self.assertAlmostEqual(self.pi[1],
# self.test_DPMM.summary['mean']['pi__1'],
# 0)
# self.assertAlmostEqual(self.pi[2],
# self.test_DPMM.summary['mean']['pi__2'],
# 0)
#
# self.assertAlmostEqual(
# self.means[0],
# self.test_DPMM.summary['mean']['cluster_center_0__0'],
# 0)
# self.assertAlmostEqual(
# self.means[1],
# self.test_DPMM.summary['mean']['cluster_center_1__0'],
# 0)
# self.assertAlmostEqual(
# self.means[2],
# self.test_DPMM.summary['mean']['cluster_center_2__0'],
# 0)
#
# self.assertAlmostEqual(
# self.sigmas[0],
# self.test_DPMM.summary['mean']['cluster_variance_0__0'],
# 0)
# self.assertAlmostEqual(
# self.sigmas[1],
# self.test_DPMM.summary['mean']['cluster_variance_1__0'],
# 0)
# self.assertAlmostEqual(
# self.sigmas[2],
# self.test_DPMM.summary['mean']['cluster_variance_2__0'],
# 0)
#
# def test_nuts_fit_returns_correct_model(self):
# # This print statement ensures PyMC3 output won't overwrite the test name
# print('')
# self.test_nuts_DPMM.fit(self.X_train,
# inference_type='nuts',
# inference_args={'draws': 1000,
# 'chains': 2})
#
# self.assertEqual(self.num_pred, self.test_nuts_DPMM.num_pred)
# self.assertEqual(self.num_components, self.test_nuts_DPMM.num_components)
# self.assertEqual(self.num_components, self.test_nuts_DPMM.num_truncate)
#
# self.assertAlmostEqual(self.pi[0],
# self.test_nuts_DPMM.summary['mean']['pi__0'],
# 0)
# self.assertAlmostEqual(self.pi[1],
# self.test_nuts_DPMM.summary['mean']['pi__1'],
# 0)
# self.assertAlmostEqual(self.pi[2],
# self.test_nuts_DPMM.summary['mean']['pi__2'],
# 0)
#
# self.assertAlmostEqual(
# self.means[0],
# self.test_nuts_DPMM.summary['mean']['cluster_center_0__0'],
# 0)
# self.assertAlmostEqual(
# self.means[1],
# self.test_nuts_DPMM.summary['mean']['cluster_center_1__0'],
# 0)
# self.assertAlmostEqual(
# self.means[2],
# self.test_nuts_DPMM.summary['mean']['cluster_center_2__0'],
# 0)
#
# self.assertAlmostEqual(
# self.sigmas[0],
# self.test_nuts_DPMM.summary['mean']['cluster_variance_0__0'],
# 0)
# self.assertAlmostEqual(
# self.sigmas[1],
# self.test_nuts_DPMM.summary['mean']['cluster_variance_1__0'],
# 0)
# self.assertAlmostEqual(
# self.sigmas[2],
# self.test_nuts_DPMM.summary['mean']['cluster_variance_2__0'],
# 0)
#
#
class DirichletProcessMixturePredictTestCase(DirichletProcessMixtureTestCase):
# def test_predict_returns_predictions(self):
# print('')
# self.test_DPMM.fit(self.X_train, self.y_train)
# preds = self.test_DPMM.predict(self.X_test)
# self.assertEqual(self.y_test.shape, preds.shape)
# def test_predict_returns_mean_predictions_and_std(self):
# print('')
# self.test_DPMM.fit(self.X_train, self.y_train)
# preds, stds = self.test_DPMM.predict(self.X_test, return_std=True)
# self.assertEqual(self.y_test.shape, preds.shape)
# self.assertEqual(self.y_test.shape, stds.shape)
def test_predict_raises_error_if_not_fit(self):
print('')
with self.assertRaises(NotFittedError) as no_fit_error:
test_DPMM = DirichletProcessMixture()
test_DPMM.predict(self.X_train)
expected = 'Run fit on the model before predict.'
self.assertEqual(str(no_fit_error.exception), expected)
# class DirichletProcessMixtureScoreTestCase(DirichletProcessMixtureTestCase):
# def test_score_matches_sklearn_performance(self):
# print('')
# skDPMM = skBayesianGaussianMixture(n_components=3)
# skDPMM.fit(self.X_train)
# skDPMM_score = skDPMM.score(self.X_test)
#
# self.test_DPMM.fit(self.X_train)
# test_DPMM_score = self.test_DPMM.score(self.X_test)
#
# self.assertAlmostEqual(skDPMM_score, test_DPMM_score, 0)
#
#
# class DirichletProcessMixtureSaveAndLoadTestCase(DirichletProcessMixtureTestCase):
# def test_save_and_load_work_correctly(self):
# print('')
# self.test_DPMM.fit(self.X_train)
# score1 = self.test_DPMM.score(self.X_test)
# self.test_DPMM.save(self.test_dir)
#
# DPMM2 = DirichletProcessMixture()
# DPMM2.load(self.test_dir)
#
# self.assertEqual(self.test_DPMM.inference_type, DPMM2.inference_type)
# self.assertEqual(self.test_DPMM.num_pred, DPMM2.num_pred)
# self.assertEqual(self.test_DPMM.num_training_samples,
# DPMM2.num_training_samples)
# self.assertEqual(self.test_DPMM.num_truncate, DPMM2.num_truncate)
#
# pd.testing.assert_frame_equal(summary(self.test_DPMM.trace),
# summary(DPMM2.trace))
#
# score2 = DPMM2.score(self.X_test)
# self.assertAlmostEqual(score1, score2, 0)
```
#### File: mixture/tests/test_gaussian_mixture.py
```python
import unittest
import shutil
import tempfile
import numpy as np
# import pandas as pd
# import pymc3 as pm
# from pymc3 import summary
# from sklearn.mixture import GaussianMixture as skGaussianMixture
from sklearn.model_selection import train_test_split
from pmlearn.exceptions import NotFittedError
from pmlearn.mixture import GaussianMixture
class GaussianMixtureTestCase(unittest.TestCase):
def setUp(self):
self.num_components = 3
self.num_pred = 1
self.num_training_samples = 100
self.pi = np.array([0.35, 0.4, 0.25])
self.means = np.array([0, 5, 10])
self.sigmas = np.array([0.5, 0.5, 1.0])
self.components = np.random.randint(0,
self.num_components,
self.num_training_samples)
X = np.random.normal(loc=self.means[self.components],
scale=self.sigmas[self.components])
X.shape = (self.num_training_samples, 1)
self.X_train, self.X_test = train_test_split(X, test_size=0.3)
self.test_GMM = GaussianMixture()
self.test_nuts_GMM = GaussianMixture()
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_dir)
# class GaussianMixtureFitTestCase(GaussianMixtureTestCase):
# def test_advi_fit_returns_correct_model(self):
# # This print statement ensures PyMC3 output won't overwrite the test name
# print('')
# self.test_GMM.fit(self.X_train)
#
# self.assertEqual(self.num_pred, self.test_GMM.num_pred)
# self.assertEqual(self.num_components, self.test_GMM.num_components)
#
# self.assertAlmostEqual(self.pi[0],
# self.test_GMM.summary['mean']['pi__0'],
# 0)
# self.assertAlmostEqual(self.pi[1],
# self.test_GMM.summary['mean']['pi__1'],
# 0)
# self.assertAlmostEqual(self.pi[2],
# self.test_GMM.summary['mean']['pi__2'],
# 0)
#
# self.assertAlmostEqual(
# self.means[0],
# self.test_GMM.summary['mean']['cluster_center_0__0'],
# 0)
# self.assertAlmostEqual(
# self.means[1],
# self.test_GMM.summary['mean']['cluster_center_1__0'],
# 0)
# self.assertAlmostEqual(
# self.means[2],
# self.test_GMM.summary['mean']['cluster_center_2__0'],
# 0)
#
# self.assertAlmostEqual(
# self.sigmas[0],
# self.test_GMM.summary['mean']['cluster_variance_0__0'],
# 0)
# self.assertAlmostEqual(
# self.sigmas[1],
# self.test_GMM.summary['mean']['cluster_variance_1__0'],
# 0)
# self.assertAlmostEqual(
# self.sigmas[2],
# self.test_GMM.summary['mean']['cluster_variance_2__0'],
# 0)
#
# def test_nuts_fit_returns_correct_model(self):
# # This print statement ensures PyMC3 output won't overwrite the test name
# print('')
# self.test_nuts_GMM.fit(self.X_train,
# inference_type='nuts')
#
# self.assertEqual(self.num_pred, self.test_nuts_GMM.num_pred)
# self.assertEqual(self.num_components, self.test_nuts_GMM.num_components)
#
# self.assertAlmostEqual(self.pi[0],
# self.test_nuts_GMM.summary['mean']['pi__0'],
# 0)
# self.assertAlmostEqual(self.pi[1],
# self.test_nuts_GMM.summary['mean']['pi__1'],
# 0)
# self.assertAlmostEqual(self.pi[2],
# self.test_nuts_GMM.summary['mean']['pi__2'],
# 0)
#
# self.assertAlmostEqual(
# self.means[0],
# self.test_nuts_GMM.summary['mean']['cluster_center_0__0'],
# 0)
# self.assertAlmostEqual(
# self.means[1],
# self.test_nuts_GMM.summary['mean']['cluster_center_1__0'],
# 0)
# self.assertAlmostEqual(
# self.means[2],
# self.test_nuts_GMM.summary['mean']['cluster_center_2__0'],
# 0)
#
# self.assertAlmostEqual(
# self.sigmas[0],
# self.test_nuts_GMM.summary['mean']['cluster_variance_0__0'],
# 0)
# self.assertAlmostEqual(
# self.sigmas[1],
# self.test_nuts_GMM.summary['mean']['cluster_variance_1__0'],
# 0)
# self.assertAlmostEqual(
# self.sigmas[2],
# self.test_nuts_GMM.summary['mean']['cluster_variance_2__0'],
# 0)
#
#
class GaussianMixturePredictTestCase(GaussianMixtureTestCase):
# def test_predict_returns_predictions(self):
# print('')
# self.test_GMM.fit(self.X_train, self.y_train)
# preds = self.test_GMM.predict(self.X_test)
# self.assertEqual(self.y_test.shape, preds.shape)
# def test_predict_returns_mean_predictions_and_std(self):
# print('')
# self.test_GMM.fit(self.X_train, self.y_train)
# preds, stds = self.test_GMM.predict(self.X_test, return_std=True)
# self.assertEqual(self.y_test.shape, preds.shape)
# self.assertEqual(self.y_test.shape, stds.shape)
def test_predict_raises_error_if_not_fit(self):
print('')
with self.assertRaises(NotFittedError) as no_fit_error:
test_GMM = GaussianMixture()
test_GMM.predict(self.X_train)
expected = 'Run fit on the model before predict.'
self.assertEqual(str(no_fit_error.exception), expected)
# class GaussianMixtureScoreTestCase(GaussianMixtureTestCase):
# def test_score_matches_sklearn_performance(self):
# print('')
# skGMM = skGaussianMixture(n_components=3)
# skGMM.fit(self.X_train)
# skGMM_score = skGMM.score(self.X_test)
#
# self.test_GMM.fit(self.X_train)
# test_GMM_score = self.test_GMM.score(self.X_test)
#
# self.assertAlmostEqual(skGMM_score, test_GMM_score, 0)
#
#
# class GaussianMixtureSaveAndLoadTestCase(GaussianMixtureTestCase):
# def test_save_and_load_work_correctly(self):
# print('')
# self.test_GMM.fit(self.X_train)
# score1 = self.test_GMM.score(self.X_test)
# self.test_GMM.save(self.test_dir)
#
# GMM2 = GaussianMixture()
# GMM2.load(self.test_dir)
#
# self.assertEqual(self.test_GMM.inference_type, GMM2.inference_type)
# self.assertEqual(self.test_GMM.num_pred, GMM2.num_pred)
# self.assertEqual(self.test_GMM.num_training_samples,
# GMM2.num_training_samples)
# pd.testing.assert_frame_equal(summary(self.test_GMM.trace),
# summary(GMM2.trace))
#
# score2 = GMM2.score(self.X_test)
# self.assertAlmostEqual(score1, score2, 0)
```
#### File: pmlearn/mixture/util.py
```python
import numpy as np
# import pymc3 as pm
from pymc3.math import logsumexp
# import theano
import theano.tensor as T
from theano.tensor.nlinalg import det
class logp_gmix(object):
def __init__(self, mus, pi, tau, num_training_samples):
self.mus = mus
self.pi = pi
self.tau = tau
self.num_training_samples = num_training_samples
def __call__(self, value):
def logp_normal(mu, tau, value):
# log probability of individual samples
k = tau.shape[0]
def delta(mu):
return value - mu
# delta = lambda mu: value - mu
return (-1 / 2.) * (k * T.log(2 * np.pi) + T.log(1. / det(tau)) +
(delta(mu).dot(tau) * delta(mu)).sum(axis=1))
logps = [T.log(self.pi[i]) + logp_normal(mu, self.tau, value)
for i, mu in enumerate(self.mus)]
return T.sum(logsumexp(T.stacklists(logps)[:, :self.num_training_samples],
axis=0))
# Log likelihood of normal distribution
# def logp_normal(mu, tau, value):
# # log probability of individual samples
# k = tau.shape[0]
#
# def delta(mu):
# return value - mu
# # delta = lambda mu: value - mu
# return (-1 / 2.) * (k * T.log(2 * np.pi) + T.log(1./det(tau)) +
# (delta(mu).dot(tau) * delta(mu)).sum(axis=1))
# Log likelihood of Gaussian mixture distribution
# def logp_gmix(mus, pi, tau):
# def logp_(value):
# logps = [T.log(pi[i]) + logp_normal(mu, tau, value)
# for i, mu in enumerate(mus)]
#
# return T.sum(logsumexp(T.stacklists(logps)[:, :self.num_training_samples], axis=0))
#
# return logp_
```
#### File: pmlearn/neural_network/multilayer_perceptron.py
```python
import numpy as np
import pymc3 as pm
import theano
from ..base import BayesianModel, BayesianClassifierMixin
floatX = theano.config.floatX
class MLPClassifier(BayesianModel, BayesianClassifierMixin):
""" Multilayer perceptron classification built using PyMC3.
Fit a Multilayer perceptron classification model and estimate
model parameters using
MCMC algorithms or Variational Inference algorithms
Parameters
----------
Examples
--------
Reference
----------
http://twiecki.github.io/blog/2016/06/01/bayesian-deep-learning/
"""
def __init__(self, n_hidden=5):
self.n_hidden = n_hidden
self.num_training_samples = None
self.num_pred = None
self.total_size = None
super(MLPClassifier, self).__init__()
def create_model(self):
"""
Returns
-------
"""
model_input = theano.shared(np.zeros([self.num_training_samples,
self.num_pred]))
model_output = theano.shared(np.zeros(self.num_training_samples))
self.shared_vars = {
'model_input': model_input,
'model_output': model_output,
}
self.total_size = len(model_output.get_value())
# Initialize random weights between each layer
init_1 = np.random.randn(self.num_pred, self.n_hidden).astype(floatX)
init_2 = np.random.randn(self.n_hidden, self.n_hidden).astype(floatX)
init_out = np.random.randn(self.n_hidden).astype(floatX)
model = pm.Model()
with model:
# Weights from input to hidden layer
weights_in_1 = pm.Normal('w_in_1', 0, sd=1,
shape=(self.num_pred, self.n_hidden),
testval=init_1)
# Weights from 1st to 2nd layer
weights_1_2 = pm.Normal('w_1_2', 0, sd=1,
shape=(self.n_hidden, self.n_hidden),
testval=init_2)
# Weights from hidden layer to output
weights_2_out = pm.Normal('w_2_out', 0, sd=1,
shape=(self.n_hidden,),
testval=init_out)
# Build neural-network using tanh activation function
act_1 = pm.math.tanh(pm.math.dot(model_input, weights_in_1))
act_2 = pm.math.tanh(pm.math.dot(act_1, weights_1_2))
act_out = pm.math.sigmoid(pm.math.dot(act_2, weights_2_out))
# Binary classification -> Bernoulli likelihood
y = pm.Bernoulli('y',
act_out,
observed=model_output,
total_size=self.total_size)
return model
def save(self, file_prefix):
params = {
'inference_type': self.inference_type,
'num_pred': self.num_pred,
'num_training_samples': self.num_training_samples
}
super(MLPClassifier, self).save(file_prefix, params)
def load(self, file_prefix):
params = super(MLPClassifier, self).load(file_prefix,
load_custom_params=True)
self.inference_type = params['inference_type']
self.num_pred = params['num_pred']
self.num_training_samples = params['num_training_samples']
``` |
{
"source": "JohnVict0r/tsuru-django-sample",
"score": 2
} |
#### File: blog/posts/tests.py
```python
from django import test
from django.contrib import admin as django_admin
from django.db import models as django_models
from blog.posts import models
class PostTestCase(test.TestCase):
model = models.Post
def assert_has_field(self, name):
self.assertIn(name, self.model._meta.get_all_field_names())
def _field(self, name):
return self.model._meta.get_field_by_name(name)[0]
def test_should_have_title(self):
self.assert_has_field("title")
def test_title_should_be_CharField(self):
self.assertIsInstance(self._field("title"), django_models.CharField)
def test_title_should_have_at_most_500_characters(self):
self.assertEqual(500, self._field("title").max_length)
def test_should_have_body(self):
self.assert_has_field("body")
def test_body_should_be_a_TextField(self):
self.assertIsInstance(self._field("body"), django_models.TextField)
class PostAdminTestCase(test.TestCase):
def test_Post_is_registered_within_django_admin(self):
django_admin.autodiscover()
self.assertIn(models.Post, django_admin.site._registry)
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.